CombinedText stringlengths 4 3.42M |
|---|
require 'rails_helper'
RSpec.describe 'V1::Leaders', type: :request do
describe 'POST /v1/leaders/intake' do
let(:club) { create(:club) }
let(:req_body) do
{
name: 'Foo Bar',
email: 'foo@bar.com',
gender: 'Other',
year: '2016',
phone_number: '444-444-4444',
slack_username: 'foo_bar',
github_username: 'foo_bar',
twitter_username: 'foobar',
address: '4242 Fake St, Some City, CA 90210',
club_id: club.id
}
end
context 'with valid attributes' do
let!(:starting_letter_count) { Letter.count }
before { post '/v1/leaders/intake', params: req_body }
it 'creates the leader' do
expect(response.status).to eq(201)
# Check to make sure expected attributes are set
#
# We don't check for club_id because leaders can have multiple clubs and
# the intake form doesn't support creating a leader with multiple clubs.
req_body.except(:club_id).each do |k, v|
expect(json[k.to_s]).to eq(v)
end
end
it 'geocodes the address' do
# These are really decimals, but encoded are as strings in JSON to
# preserve accuracy.
expect(json['latitude']).to be_a String
expect(json['longitude']).to be_a String
end
it 'adds the leader to the given club' do
# Gotta do this to get the parsed JSON representation of the club
club_json = JSON.parse(club.to_json)
expect(json['clubs']).to eq([club_json])
end
it 'creates a letter for the leader' do
expect(Letter.count).to eq(starting_letter_count + 1)
expect(Letter.last.name).to eq(req_body[:name])
end
end
it "doesn't create the leader with invalid attributes" do
post '/v1/leaders/intake', params: req_body.except(:name)
expect(response.status).to eq(422)
expect(json['errors']['name']).to eq(["can't be blank"])
end
it "doesn't create the leader without a club_id" do
req_body.delete(:club_id)
post '/v1/leaders/intake', params: req_body
expect(response.status).to eq(422)
expect(json['errors']['club_id']).to eq(["can't be blank"])
end
end
end
Fix rspec tests
require 'rails_helper'
RSpec.describe 'V1::Leaders', type: :request do
describe 'POST /v1/leaders/intake' do
let(:club) { create(:club) }
let(:req_body) do
{
name: 'Foo Bar',
email: 'foo@bar.com',
gender: 'Other',
year: '2016',
phone_number: '444-444-4444',
slack_username: 'foo_bar',
github_username: 'foo_bar',
twitter_username: 'foobar',
address: '4242 Fake St, Some City, CA 90210',
club_id: club.id
}
end
context 'with valid attributes' do
let!(:starting_letter_count) { Letter.count }
let (:welcome) { class_double(Hackbot::Interactions::Welcome).as_stubbed_const }
before do
# Mock the leader welcome interaction so we're not trying to send a
# Slack message every time the test is run.
allow(welcome).to receive(:trigger)
post '/v1/leaders/intake', params: req_body
end
it 'creates the leader' do
expect(response.status).to eq(201)
# Check to make sure expected attributes are set
#
# We don't check for club_id because leaders can have multiple clubs and
# the intake form doesn't support creating a leader with multiple clubs.
req_body.except(:club_id).each do |k, v|
expect(json[k.to_s]).to eq(v)
end
end
it 'geocodes the address' do
# These are really decimals, but encoded are as strings in JSON to
# preserve accuracy.
expect(json['latitude']).to be_a String
expect(json['longitude']).to be_a String
end
it 'adds the leader to the given club' do
# Gotta do this to get the parsed JSON representation of the club
club_json = JSON.parse(club.to_json)
expect(json['clubs']).to eq([club_json])
end
it 'creates a letter for the leader' do
expect(Letter.count).to eq(starting_letter_count + 1)
expect(Letter.last.name).to eq(req_body[:name])
end
end
it "doesn't create the leader with invalid attributes" do
post '/v1/leaders/intake', params: req_body.except(:name)
expect(response.status).to eq(422)
expect(json['errors']['name']).to eq(["can't be blank"])
end
it "doesn't create the leader without a club_id" do
req_body.delete(:club_id)
post '/v1/leaders/intake', params: req_body
expect(response.status).to eq(422)
expect(json['errors']['club_id']).to eq(["can't be blank"])
end
end
end
|
module CapybaraHelpers
def on_input_ctx(input_id, &block)
within("li##{input_id}_input") do
block.call
end
end
def click_filter_btn
within("#filters_sidebar_section") do
click_button("Filter")
end
end
def expect_text(text)
expect(page).to have_text(text)
end
def not_expect_text(text)
expect(page).not_to have_text(text)
end
def expect_css(css)
expect(page).to have_css(css)
end
# Select 2 helpers
def select2_options_container
find(".select2-container")
end
def select2_input
find(".select2-search__field")
end
def open_select2_options
select2_options_container.click
end
def fill_select2_input(item_text)
open_select2_options
select2_input.set(item_text)
end
def fill_select2_input_and_press_return(item_text)
fill_select2_input(item_text)
select2_input.native.send_keys(:return)
end
def pick_select2_entered_option(item_text, display_name = nil)
display_name = item_text unless display_name
fill_select2_input(item_text)
click_select2_option(display_name)
end
def click_select2_option(display_name)
page.find(:xpath, "//li[text()='#{display_name}']").click
end
def expect_select2_data_option(option, value)
expect(page).to have_xpath("//select[@data-#{option}='#{value}']")
end
def expect_select2_selection(text)
expect(page).to have_css(".select2-selection__rendered", text: /#{text}/)
end
def expect_select2_empty_selection
expect(page).not_to have_css(".select2-selection__rendered")
end
def expect_select2_choices_count_to_eq(count)
expect(page).to have_css("li.select2-selection__choice", count: count)
end
def expect_select2_options_count_to_eq(count)
expect(page).to have_css("select.select2-hidden-accessible option", count: count)
end
def expect_select2_result_text_to_eq(result_number, text)
expect(page).to have_css(
"li.select2-results__option:nth-child(#{result_number})", text: /#{text}/
)
end
def expect_select2_results_count_to_eq(count)
klass = "li.select2-results__option"
no_results = "No results found"
if count.zero?
expect(page).to have_css(klass, count: 1)
expect(page).to have_content(no_results)
else
expect(page).to have_css(klass, count: count)
expect(page).not_to have_content(no_results)
end
end
def click_add_nested
find("a.has_many_add").click
end
def on_nested_ctx(resource_number, &block)
within("li.has_many_container fieldset:nth-child(#{resource_number + 1}) ") do
block.call
end
end
def expect_nested_select2_result_text_to_eq(result_number, text)
expect(page).to have_css(
"li.nested_level:nth-child(#{result_number})", text: /#{text}/
)
end
end
fix(tests): click not visible filter button
module CapybaraHelpers
def on_input_ctx(input_id, &block)
within("li##{input_id}_input") do
block.call
end
end
def click_filter_btn
page.execute_script("document.getElementsByClassName('filter_form')[0].submit()")
end
def expect_text(text)
expect(page).to have_text(text)
end
def not_expect_text(text)
expect(page).not_to have_text(text)
end
def expect_css(css)
expect(page).to have_css(css)
end
# Select 2 helpers
def select2_options_container
find(".select2-container")
end
def select2_input
find(".select2-search__field")
end
def open_select2_options
select2_options_container.click
end
def fill_select2_input(item_text)
open_select2_options
select2_input.set(item_text)
end
def fill_select2_input_and_press_return(item_text)
fill_select2_input(item_text)
select2_input.native.send_keys(:return)
end
def pick_select2_entered_option(item_text, display_name = nil)
display_name = item_text unless display_name
fill_select2_input(item_text)
click_select2_option(display_name)
end
def click_select2_option(display_name)
page.find(:xpath, "//li[text()='#{display_name}']").click
end
def expect_select2_data_option(option, value)
expect(page).to have_xpath("//select[@data-#{option}='#{value}']")
end
def expect_select2_selection(text)
expect(page).to have_css(".select2-selection__rendered", text: /#{text}/)
end
def expect_select2_empty_selection
expect(page).not_to have_css(".select2-selection__rendered")
end
def expect_select2_choices_count_to_eq(count)
expect(page).to have_css("li.select2-selection__choice", count: count)
end
def expect_select2_options_count_to_eq(count)
expect(page).to have_css("select.select2-hidden-accessible option", count: count)
end
def expect_select2_result_text_to_eq(result_number, text)
expect(page).to have_css(
"li.select2-results__option:nth-child(#{result_number})", text: /#{text}/
)
end
def expect_select2_results_count_to_eq(count)
klass = "li.select2-results__option"
no_results = "No results found"
if count.zero?
expect(page).to have_css(klass, count: 1)
expect(page).to have_content(no_results)
else
expect(page).to have_css(klass, count: count)
expect(page).not_to have_content(no_results)
end
end
def click_add_nested
find("a.has_many_add").click
end
def on_nested_ctx(resource_number, &block)
within("li.has_many_container fieldset:nth-child(#{resource_number + 1}) ") do
block.call
end
end
def expect_nested_select2_result_text_to_eq(result_number, text)
expect(page).to have_css(
"li.nested_level:nth-child(#{result_number})", text: /#{text}/
)
end
end
|
def windows?
!!(RUBY_PLATFORM =~ /mswin|mingw|windows/)
end
# def jruby?
def unix?
!windows?
end
if windows?
LINE_ENDING = "\r\n".freeze
ECHO_LC_ALL = "echo %LC_ALL%".freeze
else
LINE_ENDING = "\n".freeze
ECHO_LC_ALL = "echo $LC_ALL".freeze
end
def root?
return false if windows?
Process.euid == 0
end
Remove the unused platform helper
Signed-off-by: Tim Smith <764ef62106582a09ed09dfa0b6bff7c05fd7d1e4@chef.io>
def windows?
!!(RUBY_PLATFORM =~ /mswin|mingw|windows/)
end
def unix?
!windows?
end
if windows?
LINE_ENDING = "\r\n".freeze
ECHO_LC_ALL = "echo %LC_ALL%".freeze
else
LINE_ENDING = "\n".freeze
ECHO_LC_ALL = "echo $LC_ALL".freeze
end
def root?
return false if windows?
Process.euid == 0
end
|
require 'chefspec'
describe 'sys::autofs' do
let(:chef_run) { ChefSpec::Runner.new.converge(described_recipe) }
context 'node.sys.autofs.maps and node.sys.autofs.ldap is empty' do
it 'does nothing' do
expect(chef_run.run_context.resource_collection).to be_empty
end
end
context 'with basic attributes' do
before do
chef_run.node.default['sys']['autofs']['maps'] = {
"/mount/point" => { "path" => "config"}
}
chef_run.converge(described_recipe)
end
it 'installs autofs' do
expect(chef_run).to install_package('autofs')
end
it 'manages /etc/auto.master' do
expect(chef_run).to create_template('/etc/auto.master').with_mode("0644").with(
:variables => {
:maps => { "/mount/point" => { "path" => "config" }}
}
)
end
end
context 'with ldap attributes' do
before do
chef_run.node.automatic['fqdn'] = 'node.example.com'
chef_run.node.automatic['sys']['autofs']['ldap']['servers'] = [
'ldap01.example.com', 'ldap02.example.com'
]
chef_run.node.default['sys']['autofs']['maps'] = {
"/mount/point" => { "map" => "ldap:ou=autofs.mount,dc=example,dc=com"}
}
chef_run.node.default['sys']['autofs']['ldap'] = {:omg => :lol}
chef_run.node.default['sys']['krb5']['realm'] = 'EXAMPLE.COM'
chef_run.node.default['sys']['autofs']['ldap']['searchbase'] = 'dc=example,dc=com'
chef_run.converge(described_recipe)
end
it 'install autofs, autofs-ldap and kstart' do
expect(chef_run).to install_package('autofs')
expect(chef_run).to install_package('autofs-ldap')
expect(chef_run).to install_package('kstart')
end
it 'manages /etc/auto.master' do
expect(chef_run).to create_template('/etc/auto.master').with_mode("0644").with(
:variables => {
:maps => { "/mount/point" => { "map" => "ldap:ou=autofs.mount,dc=example,dc=com" }}
})
expect(chef_run).to render_file('/etc/auto.master').with_content(
"/mount/point ldap:ou=autofs.mount,dc=example,dc=com"
)
end
it 'manages /etc/autofs_ldap_auth.conf' do
expect(chef_run).to create_template('/etc/autofs_ldap_auth.conf').with_mode("0600").with(
:variables => {
:principal => 'node.example.com',
:realm => 'EXAMPLE.COM'
}
)
expect(chef_run).to render_file('/etc/autofs_ldap_auth.conf').with_content(
"clientprinc=\"autofsclient/node.example.com@EXAMPLE.COM\""
)
end
it 'manages /etc/default/autofs' do
expect(chef_run).to create_template('/etc/default/autofs').with_mode("0644").with(
:variables => {
:uris => [ 'ldap01.example.com', 'ldap02.example.com' ],
:searchbase => 'dc=example,dc=com'
}
)
expect(chef_run).to render_file('/etc/default/autofs').with_content(
"MASTER_MAP_NAME=/etc/auto.master"
)
expect(chef_run).to render_file('/etc/default/autofs').with_content(
'LDAP_URI="ldap://ldap01.example.com/ ldap://ldap02.example.com/'
)
end
it 'manages /etc/init.d/autofs' do
expect(chef_run).to create_cookbook_file('/etc/init.d/autofs').with_mode("0755")
end
end
end
fix test-case
require 'chefspec'
describe 'sys::autofs' do
let(:chef_run) { ChefSpec::Runner.new.converge(described_recipe) }
context 'node.sys.autofs.maps and node.sys.autofs.ldap is empty' do
it 'does nothing' do
expect(chef_run.run_context.resource_collection).to be_empty
end
end
context 'with basic attributes' do
before do
chef_run.node.default['sys']['autofs']['maps'] = {
"/mount/point" => { "path" => "config"}
}
chef_run.converge(described_recipe)
end
it 'installs autofs' do
expect(chef_run).to install_package('autofs')
end
it 'manages /etc/auto.master' do
expect(chef_run).to create_template('/etc/auto.master').with_mode("0644").with(
:variables => {
:maps => { "/mount/point" => { "path" => "config" }}
}
)
end
end
context 'with ldap attributes' do
before do
chef_run.node.automatic['fqdn'] = 'node.example.com'
chef_run.node.automatic['sys']['autofs']['ldap']['servers'] = [
'ldap01.example.com', 'ldap02.example.com'
]
chef_run.node.default['sys']['autofs']['maps'] = {
"/mount/point" => { "map" => "ldap:ou=autofs.mount,dc=example,dc=com"}
}
chef_run.node.default['sys']['autofs']['ldap'] = {:omg => :lol}
chef_run.node.default['sys']['krb5']['realm'] = 'EXAMPLE.COM'
chef_run.node.default['sys']['autofs']['ldap']['searchbase'] = 'dc=example,dc=com'
chef_run.converge(described_recipe)
end
it 'install autofs, autofs-ldap and kstart' do
expect(chef_run).to install_package('autofs')
expect(chef_run).to install_package('autofs-ldap')
expect(chef_run).to install_package('kstart')
end
it 'manages /etc/auto.master' do
expect(chef_run).to create_template('/etc/auto.master').with_mode("0644").with(
:variables => {
:maps => { "/mount/point" => { "map" => "ldap:ou=autofs.mount,dc=example,dc=com" }}
})
expect(chef_run).to render_file('/etc/auto.master').with_content(
"/mount/point ldap:ou=autofs.mount,dc=example,dc=com"
)
end
it 'manages /etc/autofs_ldap_auth.conf' do
expect(chef_run).to create_template('/etc/autofs_ldap_auth.conf').with_mode("0600").with(
:variables => {
:principal => 'node.example.com',
:realm => 'EXAMPLE.COM'
}
)
expect(chef_run).to render_file('/etc/autofs_ldap_auth.conf').with_content(
"clientprinc=\"autofsclient/node.example.com@EXAMPLE.COM\""
)
end
it 'manages /etc/default/autofs' do
expect(chef_run).to create_template('/etc/default/autofs').with_mode("0644").with(
:variables => {
:uris => [ 'ldap01.example.com', 'ldap02.example.com' ],
:searchbase => 'dc=example,dc=com',
:browsemode => 'no'
}
)
expect(chef_run).to render_file('/etc/default/autofs').with_content(
"MASTER_MAP_NAME=/etc/auto.master"
)
expect(chef_run).to render_file('/etc/default/autofs').with_content(
'LDAP_URI="ldap://ldap01.example.com/ ldap://ldap02.example.com/'
)
end
it 'manages /etc/init.d/autofs' do
expect(chef_run).to create_cookbook_file('/etc/init.d/autofs').with_mode("0755")
end
end
end
|
require 'spec_helper'
module Resync
describe XML do
describe '#element' do
it 'returns an element unchanged' do
elem = REXML::Element.new('foo')
expect(XML.element(elem)).to be(elem)
end
it 'returns the root element of a string document' do
xml_str = '<?xml version="1.0"?><foo><bar/><baz/></foo>'
elem = XML.element(xml_str)
expect(elem).to be_a(REXML::Element)
expect(elem).to be_xml('<foo><bar/><baz/></foo>')
end
it 'returns the root element of a REXML::Document' do
xml_str = '<?xml version="1.0"?><foo><bar/><baz/></foo>'
doc = REXML::Document.new(xml_str)
elem = XML.element(doc)
expect(elem).to be_a(REXML::Element)
expect(elem).to be_xml(doc.root)
end
it 'parses an XML fragment as an element' do
xml_str = '<foo><bar/><baz/></foo>'
elem = XML.element(xml_str)
expect(elem).to be_a(REXML::Element)
expect(elem).to be_xml(xml_str)
end
end
end
end
Additional test
require 'spec_helper'
module Resync
describe XML do
describe '#element' do
it 'returns an element unchanged' do
elem = REXML::Element.new('foo')
expect(XML.element(elem)).to be(elem)
end
it 'returns the root element of a string document' do
xml_str = '<?xml version="1.0"?><foo><bar/><baz/></foo>'
elem = XML.element(xml_str)
expect(elem).to be_a(REXML::Element)
expect(elem).to be_xml('<foo><bar/><baz/></foo>')
end
it 'returns the root element of a REXML::Document' do
xml_str = '<?xml version="1.0"?><foo><bar/><baz/></foo>'
doc = REXML::Document.new(xml_str)
elem = XML.element(doc)
expect(elem).to be_a(REXML::Element)
expect(elem).to be_xml(doc.root)
end
it 'parses an XML fragment as an element' do
xml_str = '<foo><bar/><baz/></foo>'
elem = XML.element(xml_str)
expect(elem).to be_a(REXML::Element)
expect(elem).to be_xml(xml_str)
end
it 'fails when it gets something other than XML' do
data = 12_345
expect { XML.element(data) }.to raise_exception
end
end
end
end
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
default['prestashop']['version'] = '1.7.6.5'
default['prestashop']['db']['name'] = 'prestashop'
default['prestashop']['temp_packages'] = ['unzip']
VM - Prestashop - udpate to version 1.7.8.2 (#1599)
Co-authored-by: Carlos Noguera <2d0c02e8c18864a31f6e3cb560d5d7a7e7f18732@gmail.com>
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
default['prestashop']['version'] = '1.7.8.2'
default['prestashop']['db']['name'] = 'prestashop'
default['prestashop']['temp_packages'] = ['unzip']
|
Normalized and documented Summarize
|
require 'fileutils'
module Jekyll
class PageWithoutAFile < Page
def read_yaml(*)
@data ||= {}
end
end
class FeedMetaTag < Liquid::Tag
def config
@context.registers[:site].config
end
def url
if config["url"]
config["url"]
elsif config["github"] && config["github"]["url"]
config.github.url
end
end
def render(context)
@context = context
"<link type=\"application/atom+xml\" rel=\"alternate\" href=\"#{url}/feed.xml\" />"
end
end
class JekyllRssFeed < Jekyll::Generator
safe true
priority :lowest
# Main plugin action, called by Jekyll-core
def generate(site)
@site = site
@site.config["time"] = Time.new
unless feed_exists?
write
@site.keep_files ||= []
@site.keep_files << "feed.xml"
end
end
# Path to feed.xml template file
def source_path
File.expand_path "feed.xml", File.dirname(__FILE__)
end
# Destination for feed.xml file within the site source directory
def destination_path
if @site.respond_to?(:in_dest_dir)
@site.in_dest_dir("feed.xml")
else
Jekyll.sanitized_path(@site.dest, "feed.xml")
end
end
# copy sitemap template from source to destination
def write
FileUtils.mkdir_p File.dirname(destination_path)
File.open(destination_path, 'w') { |f| f.write(sitemap_content) }
end
def sitemap_content
site_map = PageWithoutAFile.new(@site, File.dirname(__FILE__), "", "feed.xml")
site_map.content = File.read(source_path).gsub(/\s*\n\s*/, "\n").gsub(/\n{%/, "{%")
site_map.data["layout"] = nil
site_map.render(Hash.new, @site.site_payload)
site_map.output
end
# Checks if a sitemap already exists in the site source
def feed_exists?
if @site.respond_to?(:in_source_dir)
File.exists? @site.in_source_dir("feed.xml")
else
File.exists? Jekyll.sanitized_path(@site.source, "feed.xml")
end
end
end
end
Liquid::Template.register_tag('feed_meta', Jekyll::FeedMetaTag)
properly call site.github.url
require 'fileutils'
module Jekyll
class PageWithoutAFile < Page
def read_yaml(*)
@data ||= {}
end
end
class FeedMetaTag < Liquid::Tag
def config
@context.registers[:site].config
end
def url
if config["url"]
config["url"]
elsif config["github"] && config["github"]["url"]
config["github"]["url"]
end
end
def render(context)
@context = context
"<link type=\"application/atom+xml\" rel=\"alternate\" href=\"#{url}/feed.xml\" />"
end
end
class JekyllRssFeed < Jekyll::Generator
safe true
priority :lowest
# Main plugin action, called by Jekyll-core
def generate(site)
@site = site
@site.config["time"] = Time.new
unless feed_exists?
write
@site.keep_files ||= []
@site.keep_files << "feed.xml"
end
end
# Path to feed.xml template file
def source_path
File.expand_path "feed.xml", File.dirname(__FILE__)
end
# Destination for feed.xml file within the site source directory
def destination_path
if @site.respond_to?(:in_dest_dir)
@site.in_dest_dir("feed.xml")
else
Jekyll.sanitized_path(@site.dest, "feed.xml")
end
end
# copy sitemap template from source to destination
def write
FileUtils.mkdir_p File.dirname(destination_path)
File.open(destination_path, 'w') { |f| f.write(sitemap_content) }
end
def sitemap_content
site_map = PageWithoutAFile.new(@site, File.dirname(__FILE__), "", "feed.xml")
site_map.content = File.read(source_path).gsub(/\s*\n\s*/, "\n").gsub(/\n{%/, "{%")
site_map.data["layout"] = nil
site_map.render(Hash.new, @site.site_payload)
site_map.output
end
# Checks if a sitemap already exists in the site source
def feed_exists?
if @site.respond_to?(:in_source_dir)
File.exists? @site.in_source_dir("feed.xml")
else
File.exists? Jekyll.sanitized_path(@site.source, "feed.xml")
end
end
end
end
Liquid::Template.register_tag('feed_meta', Jekyll::FeedMetaTag)
|
command :status do |c|
c.syntax = 'jira status [options]'
c.summary = 'List out the task status for including in Release Notes'
c.description = ''
c.option '-t DEPTH', Integer, 'Header depth'
c.action do |args, options|
# Do something or c.when_called Jira::Commands::Status
options.defaults :t=>4
project = $cfg['.jira.project']
jira = JiraUtils.new(args, options)
hh = '#' * options.t.to_i # FIXME: this isn't working.
puts "#{hh} Done"
query ="assignee = #{jira.username} AND project = #{project} AND status = 'Pending Release'"
issues = jira.getIssues(query)
keys = issues.map {|item| item['key'] + ' ' + item.access('fields.summary')}
keys.each {|k| puts "- #{k}"}
puts "#{hh} Testing"
query ="assignee = #{jira.username} AND project = #{project} AND status = Testing"
issues = jira.getIssues(query)
keys = issues.map {|item| item['key'] + ' ' + item.access('fields.summary')}
keys.each {|k| puts "- #{k}"}
puts "#{hh} In Progress"
query ="assignee = #{jira.username} AND project = #{project} AND status = 'In Progress'"
issues = jira.getIssues(query)
keys = issues.map {|item| item['key'] + ' ' + item.access('fields.summary')}
keys.each {|k| puts "- #{k}"}
puts "#{hh} To Do"
query ="assignee = #{jira.username} AND project = #{project} AND status = Open"
issues = jira.getIssues(query)
keys = issues.map {|item| item['key'] + ' ' + item.access('fields.summary')}
keys.each {|k| puts "- #{k}"}
end
end
# vim: set sw=2 ts=2 :
Loop cleanup
command :status do |c|
c.syntax = 'jira status [options]'
c.summary = 'List out the task status for including in Release Notes'
c.description = ''
c.option '-t DEPTH', Integer, 'Header depth'
c.action do |args, options|
# Do something or c.when_called Jira::Commands::Status
options.defaults :t=>4
project = $cfg['.jira.project']
jira = JiraUtils.new(args, options)
hh = '#' * options.t.to_i # FIXME: this isn't working.
puts "#{hh} Done"
query ="assignee = #{jira.username} AND project = #{project} AND status = 'Pending Release'"
issues = jira.getIssues(query)
issues.each {|item| puts "- #{item['key']} #{item.access('fields.summary')}" }
puts "#{hh} Testing"
query ="assignee = #{jira.username} AND project = #{project} AND status = Testing"
issues = jira.getIssues(query)
issues.each {|item| puts "- #{item['key']} #{item.access('fields.summary')}" }
puts "#{hh} In Progress"
query ="assignee = #{jira.username} AND project = #{project} AND status = 'In Progress'"
issues = jira.getIssues(query)
issues.each {|item| puts "- #{item['key']} #{item.access('fields.summary')}" }
puts "#{hh} To Do"
query ="assignee = #{jira.username} AND project = #{project} AND status = Open"
issues = jira.getIssues(query)
issues.each {|item| puts "- #{item['key']} #{item.access('fields.summary')}" }
end
end
# vim: set sw=2 ts=2 :
|
require 'json'
require 'bigdecimal'
module JSON::LD
class Context
include Utils
# Term Definitions specify how properties and values have to be interpreted as well as the current vocabulary mapping and the default language
class TermDefinition
# @return [RDF::URI] IRI map
attr_accessor :id
# @return [String] term name
attr_accessor :term
# @return [String] Type mapping
attr_accessor :type_mapping
# @return [String] Container mapping
attr_accessor :container_mapping
# Language mapping of term, `false` is used if there is explicitly no language mapping for this term.
# @return [String] Language mapping
attr_accessor :language_mapping
# @return [Boolean] Reverse Property
attr_accessor :reverse_property
# This is a simple term definition, not an expanded term definition
# @return [Boolean] simple
attr_accessor :simple
# This is a simple term definition, not an expanded term definition
# @return [Boolean] simple
def simple?; simple; end
# Create a new Term Mapping with an ID
# @param [String] term
# @param [String] id
def initialize(term, id = nil)
@term = term
@id = id.to_s if id
end
##
# Output Hash or String definition for this definition considering @language and @vocab
#
# @param [Context] context
# @return [String, Hash{String => Array[String], String}]
def to_context_definition(context)
cid = if context.vocab && id.start_with?(context.vocab)
# Nothing to return unless it's the same as the vocab
id == context.vocab ? context.vocab : id.to_s[context.vocab.length..-1]
else
# Find a term to act as a prefix
iri, prefix = context.iri_to_term.detect {|i,p| id.to_s.start_with?(i.to_s)}
iri && iri != id ? "#{prefix}:#{id.to_s[iri.length..-1]}" : id
end
if language_mapping.nil? &&
container_mapping.nil? &&
type_mapping.nil? &&
reverse_property.nil?
cid.to_s unless cid == term && context.vocab
else
defn = {}
defn[reverse_property ? '@reverse' : '@id'] = cid.to_s unless cid == term && !reverse_property
if type_mapping
defn['@type'] = if KEYWORDS.include?(type_mapping)
type_mapping
else
context.compact_iri(type_mapping, vocab: true)
end
end
defn['@container'] = container_mapping if container_mapping
# Language set as false to be output as null
defn['@language'] = (language_mapping ? language_mapping : nil) unless language_mapping.nil?
defn
end
end
def inspect
v = %w([TD)
v << "id=#{@id}"
v << "term=#{@term}"
v << "rev" if reverse_property
v << "container=#{container_mapping}" if container_mapping
v << "lang=#{language_mapping.inspect}" unless language_mapping.nil?
v << "type=#{type_mapping}" unless type_mapping.nil?
v.join(" ") + "]"
end
end
# The base.
#
# @return [RDF::URI] Current base IRI, used for expanding relative IRIs.
attr_reader :base
# The base.
#
# @return [RDF::URI] Document base IRI, to initialize `base`.
attr_reader :doc_base
# @return [RDF::URI] base IRI of the context, if loaded remotely. XXX
attr_accessor :context_base
# Term definitions
# @return [Hash{String => TermDefinition}]
attr_reader :term_definitions
# @return [Hash{RDF::URI => String}] Reverse mappings from IRI to term only for terms, not CURIEs XXX
attr_accessor :iri_to_term
# Default language
#
#
# This adds a language to plain strings that aren't otherwise coerced
# @return [String]
attr_reader :default_language
# Default vocabulary
#
# Sets the default vocabulary used for expanding terms which
# aren't otherwise absolute IRIs
# @return [RDF::URI]
attr_reader :vocab
# @return [Hash{Symbol => Object}] Global options used in generating IRIs
attr_accessor :options
# @return [Context] A context provided to us that we can use without re-serializing XXX
attr_accessor :provided_context
# @return [BlankNodeNamer]
attr_accessor :namer
##
# Create new evaluation context
# @param [Hash] options
# @option options [String, #to_s] :base
# The Base IRI to use when expanding the document. This overrides the value of `input` if it is a _IRI_. If not specified and `input` is not an _IRI_, the base IRI defaults to the current document IRI if in a browser context, or the empty string if there is no document context.
# @option options [Proc] :documentLoader
# The callback of the loader to be used to retrieve remote documents and contexts. If specified, it must be used to retrieve remote documents and contexts; otherwise, if not specified, the processor's built-in loader must be used. See {API.documentLoader} for the method signature.
# @option options [Hash{Symbol => String}] :prefixes
# See `RDF::Reader#initialize`
# @option options [Boolean] :simple_compact_iris (false)
# When compacting IRIs, do not use terms with expanded term definitions
# @option options [String, #to_s] :vocab
# Initial value for @vocab
# @option options [String, #to_s] :language
# Initial value for @langauge
# @yield [ec]
# @yieldparam [Context]
# @return [Context]
def initialize(options = {})
if options[:base]
@base = @doc_base = RDF::URI(options[:base])
@doc_base.canonicalize!
@doc_base.fragment = nil
@doc_base.query = nil
end
options[:documentLoader] ||= JSON::LD::API.method(:documentLoader)
@term_definitions = {}
@iri_to_term = {
RDF.to_uri.to_s => "rdf",
RDF::XSD.to_uri.to_s => "xsd"
}
@namer = BlankNodeMapper.new("t")
@options = options
# Load any defined prefixes
(options[:prefixes] || {}).each_pair do |k, v|
next if k.nil?
@iri_to_term[v.to_s] = k
@term_definitions[k.to_s] = TermDefinition.new(k, v.to_s)
@term_definitions[k.to_s].simple = true
end
self.vocab = options[:vocab] if options[:vocab]
self.default_language = options[:language] if options[:language]
#debug("init") {"iri_to_term: #{iri_to_term.inspect}"}
yield(self) if block_given?
end
##
# Initial context, without mappings, vocab or default language
#
# @return [Boolean]
def empty?
@term_definitions.empty? && self.vocab.nil? && self.default_language.nil?
end
# @param [String] value must be an absolute IRI
def base=(value)
if value
raise JsonLdError::InvalidBaseIRI, "@base must be a string: #{value.inspect}" unless value.is_a?(String) || value.is_a?(RDF::URI)
@base = RDF::URI(value)
@base.canonicalize!
@base.fragment = nil
@base.query = nil
raise JsonLdError::InvalidBaseIRI, "@base must be an absolute IRI: #{value.inspect}" unless @base.absolute?
@base
else
@base = nil
end
end
# @param [String] value
def default_language=(value)
@default_language = if value
raise JsonLdError::InvalidDefaultLanguage, "@language must be a string: #{value.inspect}" unless value.is_a?(String)
value.downcase
else
nil
end
end
# @param [String] value must be an absolute IRI
def vocab=(value)
@vocab = case value
when /_:/
value
when String
v = as_resource(value)
raise JsonLdError::InvalidVocabMapping, "@value must be an absolute IRI: #{value.inspect}" if v.uri? && v.relative?
v
when nil
nil
else
raise JsonLdError::InvalidVocabMapping, "@value must be a string: #{value.inspect}"
end
end
# Create an Evaluation Context
#
# When processing a JSON-LD data structure, each processing rule is applied using information provided by the active context. This section describes how to produce an active context.
#
# The active context contains the active term definitions which specify how properties and values have to be interpreted as well as the current base IRI, the vocabulary mapping and the default language. Each term definition consists of an IRI mapping, a boolean flag reverse property, an optional type mapping or language mapping, and an optional container mapping. A term definition can not only be used to map a term to an IRI, but also to map a term to a keyword, in which case it is referred to as a keyword alias.
#
# When processing, the active context is initialized without any term definitions, vocabulary mapping, or default language. If a local context is encountered during processing, a new active context is created by cloning the existing active context. Then the information from the local context is merged into the new active context. Given that local contexts may contain references to remote contexts, this includes their retrieval.
#
#
# @param [String, #read, Array, Hash, Context] local_context
# @raise [JsonLdError]
# on a remote context load error, syntax error, or a reference to a term which is not defined.
# @see http://json-ld.org/spec/latest/json-ld-api/index.html#context-processing-algorithm
def parse(local_context, remote_contexts = [])
result = self.dup
result.provided_context = local_context if self.empty?
local_context = [local_context] unless local_context.is_a?(Array)
local_context.each do |context|
depth do
case context
when nil
# 3.1 If niil, set to a new empty context
result = Context.new(options)
when Context
debug("parse") {"context: #{context.inspect}"}
result = context.dup
when IO, StringIO
debug("parse") {"io: #{context}"}
# Load context document, if it is a string
begin
ctx = JSON.load(context)
raise JSON::LD::JsonLdError::InvalidRemoteContext, "Context missing @context key" if @options[:validate] && ctx['@context'].nil?
result = parse(ctx["@context"] ? ctx["@context"].dup : {})
result.provided_context = ctx["@context"] if [context] == local_context
result
rescue JSON::ParserError => e
debug("parse") {"Failed to parse @context from remote document at #{context}: #{e.message}"}
raise JSON::LD::JsonLdError::InvalidRemoteContext, "Failed to parse remote context at #{context}: #{e.message}" if @options[:validate]
self.dup
end
when String, RDF::URI
debug("parse") {"remote: #{context}, base: #{result.context_base || result.base}"}
# Load context document, if it is a string
# 3.2.1) Set context to the result of resolving value against the base IRI which is established as specified in section 5.1 Establishing a Base URI of [RFC3986]. Only the basic algorithm in section 5.2 of [RFC3986] is used; neither Syntax-Based Normalization nor Scheme-Based Normalization are performed. Characters additionally allowed in IRI references are treated in the same way that unreserved characters are treated in URI references, per section 6.5 of [RFC3987].
context = RDF::URI(result.context_base || result.base).join(context)
raise JsonLdError::RecursiveContextInclusion, "#{context}" if remote_contexts.include?(context.to_s)
remote_contexts << context.to_s
context_no_base = self.dup
context_no_base.base = nil
context_no_base.context_base = context.to_s
begin
context_opts = @options.dup
if context_opts.has_key?(:header)
context_opts[:header] = context_opts[:header].dup
context_opts[:header].delete('Cache-Control') # Allow context to be cached
end
@options[:documentLoader].call(context.to_s, context_opts) do |remote_doc|
# 3.2.5) Dereference context. If the dereferenced document has no top-level JSON object with an @context member, an invalid remote context has been detected and processing is aborted; otherwise, set context to the value of that member.
jo = case remote_doc.document
when String then JSON.parse(remote_doc.document)
else remote_doc.document
end
raise JsonLdError::InvalidRemoteContext, "#{context}" unless jo.is_a?(Hash) && jo.has_key?('@context')
context = jo['@context']
if @options[:processingMode] == "json-ld-1.0"
context_no_base.provided_context = context.dup
end
end
rescue JsonLdError
raise
rescue Exception => e
debug("parse") {"Failed to retrieve @context from remote document at #{context_no_base.context_base.inspect}: #{e.message}"}
raise JsonLdError::LoadingRemoteContextFailed, "#{context_no_base.context_base}", e.backtrace
end
# 3.2.6) Set context to the result of recursively calling this algorithm, passing context no base for active context, context for local context, and remote contexts.
context = context_no_base.parse(context, remote_contexts.dup)
context.provided_context = result.provided_context
context.base ||= result.base
result = context
debug("parse") {"=> provided_context: #{context.inspect}"}
when Hash
# If context has a @vocab member: if its value is not a valid absolute IRI or null trigger an INVALID_VOCAB_MAPPING error; otherwise set the active context's vocabulary mapping to its value and remove the @vocab member from context.
context = context.dup # keep from modifying a hash passed as a param
{
'@base' => :base=,
'@language' => :default_language=,
'@vocab' => :vocab=
}.each do |key, setter|
v = context.fetch(key, false)
unless v == false
context.delete(key)
debug("parse") {"Set #{key} to #{v.inspect}"}
result.send(setter, v)
end
end
defined = {}
# For each key-value pair in context invoke the Create Term Definition subalgorithm, passing result for active context, context for local context, key, and defined
depth do
context.keys.each do |key|
result.create_term_definition(context, key, defined)
end
end
else
# 3.3) If context is not a JSON object, an invalid local context error has been detected and processing is aborted.
raise JsonLdError::InvalidLocalContext, context.inspect
end
end
end
result
end
##
# Merge in a context, creating a new context with updates from `context`
#
# @param [Context] context
# @return [Context]
def merge(context)
c = self.dup.merge!(context)
c.instance_variable_set(:@term_definitions, context.term_definitions.dup)
c
end
##
# Update context with definitions from `context`
#
# @param [Context] context
# @return [self]
def merge!(context)
# FIXME: if new context removes the default language, this won't do anything
self.default_language = context.default_language if context.default_language
self.vocab = context.vocab if context.vocab
self.base = context.base if context.base
# Merge in Term Definitions
term_definitions.merge!(context.term_definitions)
self
end
##
# Create Term Definition
#
# Term definitions are created by parsing the information in the given local context for the given term. If the given term is a compact IRI, it may omit an IRI mapping by depending on its prefix having its own term definition. If the prefix is a key in the local context, then its term definition must first be created, through recursion, before continuing. Because a term definition can depend on other term definitions, a mechanism must be used to detect cyclical dependencies. The solution employed here uses a map, defined, that keeps track of whether or not a term has been defined or is currently in the process of being defined. This map is checked before any recursion is attempted.
#
# After all dependencies for a term have been defined, the rest of the information in the local context for the given term is taken into account, creating the appropriate IRI mapping, container mapping, and type mapping or language mapping for the term.
#
# @param [Hash] local_context
# @param [String] term
# @param [Hash] defined
# @raise [JsonLdError]
# Represents a cyclical term dependency
# @see http://json-ld.org/spec/latest/json-ld-api/index.html#create-term-definition
def create_term_definition(local_context, term, defined)
# Expand a string value, unless it matches a keyword
debug("create_term_definition") {"term = #{term.inspect}"}
# If defined contains the key term, then the associated value must be true, indicating that the term definition has already been created, so return. Otherwise, a cyclical term definition has been detected, which is an error.
case defined[term]
when TrueClass then return
when nil
defined[term] = false
else
raise JsonLdError::CyclicIRIMapping, "Cyclical term dependency found: #{term.inspect}"
end
# Since keywords cannot be overridden, term must not be a keyword. Otherwise, an invalid value has been detected, which is an error.
if KEYWORDS.include?(term) && !%w(@vocab @language).include?(term)
raise JsonLdError::KeywordRedefinition, "term must not be a keyword: #{term.inspect}" if
@options[:validate]
elsif !term_valid?(term) && @options[:validate]
raise JsonLdError::InvalidTermDefinition, "term is invalid: #{term.inspect}"
end
# Remove any existing term definition for term in active context.
term_definitions.delete(term)
# Initialize value to a the value associated with the key term in local context.
value = local_context.fetch(term, false)
simple_term = value.is_a?(String)
value = {'@id' => value} if value.is_a?(String)
case value
when nil, {'@id' => nil}
# If value equals null or value is a JSON object containing the key-value pair (@id-null), then set the term definition in active context to null, set the value associated with defined's key term to true, and return.
debug("") {"=> nil"}
term_definitions[term] = TermDefinition.new(term)
defined[term] = true
return
when Hash
debug("") {"Hash[#{term.inspect}] = #{value.inspect}"}
definition = TermDefinition.new(term)
definition.simple = simple_term
if value.has_key?('@type')
type = value['@type']
# SPEC FIXME: @type may be nil
type = case type
when nil
type
when String
begin
expand_iri(type, vocab: true, documentRelative: false, local_context: local_context, defined: defined)
rescue JsonLdError::InvalidIRIMapping
raise JsonLdError::InvalidTypeMapping, "invalid mapping for '@type': #{type.inspect} on term #{term.inspect}"
end
else
:error
end
unless %w(@id @vocab).include?(type) || type.is_a?(RDF::URI) && type.absolute?
raise JsonLdError::InvalidTypeMapping, "unknown mapping for '@type': #{type.inspect} on term #{term.inspect}"
end
debug("") {"type_mapping: #{type.inspect}"}
definition.type_mapping = type
end
if value.has_key?('@reverse')
raise JsonLdError::InvalidReverseProperty, "unexpected key in #{value.inspect} on term #{term.inspect}" if
value.keys.any? {|k| %w(@id).include?(k)}
raise JsonLdError::InvalidIRIMapping, "expected value of @reverse to be a string: #{value['@reverse'].inspect} on term #{term.inspect}" unless
value['@reverse'].is_a?(String)
# Otherwise, set the IRI mapping of definition to the result of using the IRI Expansion algorithm, passing active context, the value associated with the @reverse key for value, true for vocab, true for document relative, local context, and defined. If the result is not an absolute IRI, i.e., it contains no colon (:), an invalid IRI mapping error has been detected and processing is aborted.
definition.id = expand_iri(value['@reverse'],
vocab: true,
documentRelative: true,
local_context: local_context,
defined: defined)
raise JsonLdError::InvalidIRIMapping, "non-absolute @reverse IRI: #{definition.id} on term #{term.inspect}" unless
definition.id.is_a?(RDF::URI) && definition.id.absolute?
# If value contains an @container member, set the container mapping of definition to its value; if its value is neither @set, nor @index, nor null, an invalid reverse property error has been detected (reverse properties only support set- and index-containers) and processing is aborted.
if (container = value.fetch('@container', false))
raise JsonLdError::InvalidReverseProperty,
"unknown mapping for '@container' to #{container.inspect} on term #{term.inspect}" unless
['@set', '@index', nil].include?(container)
definition.container_mapping = container
end
definition.reverse_property = true
elsif value.has_key?('@id') && value['@id'] != term
raise JsonLdError::InvalidIRIMapping, "expected value of @id to be a string: #{value['@id'].inspect} on term #{term.inspect}" unless
value['@id'].is_a?(String)
definition.id = expand_iri(value['@id'],
vocab: true,
documentRelative: true,
local_context: local_context,
defined: defined)
raise JsonLdError::InvalidKeywordAlias, "expected value of @id to not be @context on term #{term.inspect}" if
definition.id == '@context'
elsif term.include?(':')
# If term is a compact IRI with a prefix that is a key in local context then a dependency has been found. Use this algorithm recursively passing active context, local context, the prefix as term, and defined.
prefix, suffix = term.split(':')
depth {create_term_definition(local_context, prefix, defined)} if local_context.has_key?(prefix)
definition.id = if td = term_definitions[prefix]
# If term's prefix has a term definition in active context, set the IRI mapping for definition to the result of concatenating the value associated with the prefix's IRI mapping and the term's suffix.
td.id + suffix
else
# Otherwise, term is an absolute IRI. Set the IRI mapping for definition to term
term
end
debug("") {"=> #{definition.id}"}
else
# Otherwise, active context must have a vocabulary mapping, otherwise an invalid value has been detected, which is an error. Set the IRI mapping for definition to the result of concatenating the value associated with the vocabulary mapping and term.
raise JsonLdError::InvalidIRIMapping, "relative term definition without vocab: #{term} on term #{term.inspect}" unless vocab
definition.id = vocab + term
debug("") {"=> #{definition.id}"}
end
@iri_to_term[definition.id] = term if simple_term && definition.id
if value.has_key?('@container')
container = value['@container']
raise JsonLdError::InvalidContainerMapping, "unknown mapping for '@container' to #{container.inspect} on term #{term.inspect}" unless %w(@list @set @language @index).include?(container)
debug("") {"container_mapping: #{container.inspect}"}
definition.container_mapping = container
end
if value.has_key?('@language')
language = value['@language']
raise JsonLdError::InvalidLanguageMapping, "language must be null or a string, was #{language.inspect}} on term #{term.inspect}" unless language.nil? || (language || "").is_a?(String)
language = language.downcase if language.is_a?(String)
debug("") {"language_mapping: #{language.inspect}"}
definition.language_mapping = language || false
end
term_definitions[term] = definition
defined[term] = true
else
raise JsonLdError::InvalidTermDefinition, "Term definition for #{term.inspect} is an #{value.class} on term #{term.inspect}"
end
end
##
# Generate @context
#
# If a context was supplied in global options, use that, otherwise, generate one
# from this representation.
#
# @param [Hash{Symbol => Object}] options ({})
# @return [Hash]
def serialize(options = {})
depth(options) do
# FIXME: not setting provided_context now
use_context = case provided_context
when String, RDF::URI
debug "serlialize: reuse context: #{provided_context.inspect}"
provided_context.to_s
when Hash, Array
debug "serlialize: reuse context: #{provided_context.inspect}"
provided_context
else
debug("serlialize: generate context")
debug("") {"=> context: #{inspect}"}
ctx = {}
ctx['@base'] = base.to_s if base && base != doc_base
ctx['@language'] = default_language.to_s if default_language
ctx['@vocab'] = vocab.to_s if vocab
# Term Definitions
term_definitions.keys.sort.each do |term|
defn = term_definitions[term].to_context_definition(self)
ctx[term] = defn if defn
end
debug("") {"start_doc: context=#{ctx.inspect}"}
ctx
end
# Return hash with @context, or empty
r = {}
r['@context'] = use_context unless use_context.nil? || use_context.empty?
r
end
end
##
# Build a context from an RDF::Vocabulary definition.
#
# @example building from an external vocabulary definition
#
# g = RDF::Graph.load("http://schema.org/docs/schema_org_rdfa.html")
#
# context = JSON::LD::Context.new.from_vocabulary(g,
# vocab: "http://schema.org/",
# prefixes: {schema: "http://schema.org/"},
# language: "en")
#
# @param [RDF::Queryable] graph
#
# @return [self]
def from_vocabulary(graph)
statements = {}
ranges = {}
# Add term definitions for each class and property not in schema:, and
# for those properties having an object range
graph.each do |statement|
next if statement.subject.node?
(statements[statement.subject] ||= []) << statement
# Keep track of predicate ranges
if [RDF::RDFS.range, RDF::SCHEMA.rangeIncludes].include?(statement.predicate)
(ranges[statement.subject] ||= []) << statement.object
end
end
# Add term definitions for each class and property not in vocab, and
# for those properties having an object range
statements.each do |subject, values|
types = values.select {|v| v.predicate == RDF.type}.map(&:object)
is_property = types.any? {|t| t.to_s.include?("Property")}
term = subject.to_s.split(/[\/\#]/).last
if !is_property
# Ignore if there's a default voabulary and this is not a property
next if vocab && subject.to_s.start_with?(vocab)
# otherwise, create a term definition
td = term_definitions[term] = TermDefinition.new(term, subject.to_s)
else
prop_ranges = ranges.fetch(subject, [])
# If any range is empty or member of range includes rdfs:Literal or schema:Text
next if vocab && prop_ranges.empty? ||
prop_ranges.include?(RDF::SCHEMA.Text) ||
prop_ranges.include?(RDF::RDFS.Literal)
td = term_definitions[term] = TermDefinition.new(term, subject.to_s)
# Set context typing based on first element in range
case r = prop_ranges.first
when RDF::XSD.string
if self.default_language
td.language_mapping = false
end
when RDF::XSD.boolean, RDF::SCHEMA.Boolean, RDF::XSD.date, RDF::SCHEMA.Date,
RDF::XSD.dateTime, RDF::SCHEMA.DateTime, RDF::XSD.time, RDF::SCHEMA.Time,
RDF::XSD.duration, RDF::SCHEMA.Duration, RDF::XSD.decimal, RDF::SCHEMA.Number,
RDF::XSD.float, RDF::SCHEMA.Float, RDF::XSD.integer, RDF::SCHEMA.Integer
td.type_mapping = r
td.simple = false
else
# It's an object range (includes schema:URL)
td.type_mapping = '@id'
end
end
end
self
end
# Set term mapping
#
# @param [#to_s] term
# @param [RDF::URI, String, nil] value
#
# @return [TermDefinition]
def set_mapping(term, value)
debug("") {"map #{term.inspect} to #{value.inspect}"}
term = term.to_s
term_definitions[term] = TermDefinition.new(term, value)
term_definitions[term].simple = true
term_sym = term.empty? ? "" : term.to_sym
iri_to_term.delete(term_definitions[term].id.to_s) if term_definitions[term].id.is_a?(String)
@options[:prefixes][term_sym] = value if @options.has_key?(:prefixes)
iri_to_term[value.to_s] = term
term_definitions[term]
end
##
# Find a term definition
#
# @param [Term, #to_s] term in unexpanded form
# @return [Term]
def find_definition(term)
term.is_a?(TermDefinition) ? term : term_definitions[term.to_s]
end
##
# Retrieve container mapping, add it if `value` is provided
#
# @param [Term, #to_s] term in unexpanded form
# @return [String]
def container(term)
return '@set' if term == '@graph'
return term if KEYWORDS.include?(term)
term = find_definition(term)
term && term.container_mapping
end
##
# Retrieve the language associated with a term, or the default language otherwise
# @param [Term, #to_s] term in unexpanded form
# @return [String]
def language(term)
term = find_definition(term)
lang = term && term.language_mapping
lang.nil? ? @default_language : lang
end
##
# Is this a reverse term
# @param [Term, #to_s] term in unexpanded form
# @return [Boolean]
def reverse?(term)
term = find_definition(term)
term && term.reverse_property
end
##
# Given a term or IRI, find a reverse term definition matching that term. If the term is already reversed, find a non-reversed version.
#
# @param [Term, #to_s] term
# @return [Term] related term definition
def reverse_term(term)
# Direct lookup of term
term = term_definitions[term.to_s] if term_definitions.has_key?(term.to_s) && !term.is_a?(TermDefinition)
# Lookup term, assuming term is an IRI
unless term.is_a?(TermDefinition)
td = term_definitions.values.detect {|t| t.id == term.to_s}
# Otherwise create a temporary term definition
term = td || TermDefinition.new(term.to_s, expand_iri(term, vocab:true))
end
# Now, return a term, which reverses this term
term_definitions.values.detect {|t| t.id == term.id && t.reverse_property != term.reverse_property}
end
##
# Expand an IRI. Relative IRIs are expanded against any document base.
#
# @param [String] value
# A keyword, term, prefix:suffix or possibly relative IRI
# @param [Hash{Symbol => Object}] options
# @option options [Boolean] documentRelative (false)
# @option options [Boolean] vocab (false)
# @option options [Hash] local_context
# Used during Context Processing.
# @option options [Hash] defined
# Used during Context Processing.
# @return [RDF::URI, String]
# IRI or String, if it's a keyword
# @raise [JSON::LD::JsonLdError::InvalidIRIMapping] if the value cannot be expanded
# @see http://json-ld.org/spec/latest/json-ld-api/#iri-expansion
def expand_iri(value, options = {})
return value unless value.is_a?(String)
return value if KEYWORDS.include?(value)
depth(options) do
debug("expand_iri") {"value: #{value.inspect}"} unless options[:quiet]
local_context = options[:local_context]
defined = options.fetch(:defined, {})
# If local context is not null, it contains a key that equals value, and the value associated with the key that equals value in defined is not true, then invoke the Create Term Definition subalgorithm, passing active context, local context, value as term, and defined. This will ensure that a term definition is created for value in active context during Context Processing.
if local_context && local_context.has_key?(value) && !defined[value]
depth {create_term_definition(local_context, value, defined)}
end
# If vocab is true and the active context has a term definition for value, return the associated IRI mapping.
if options[:vocab] && (v_td = term_definitions[value])
debug("") {"match with #{v_td.id}"} unless options[:quiet]
return v_td.id
end
# If value contains a colon (:), it is either an absolute IRI or a compact IRI:
if value.include?(':')
prefix, suffix = value.split(':', 2)
debug("") {"prefix: #{prefix.inspect}, suffix: #{suffix.inspect}, vocab: #{vocab.inspect}"} unless options[:quiet]
# If prefix is underscore (_) or suffix begins with double-forward-slash (//), return value as it is already an absolute IRI or a blank node identifier.
return RDF::Node.new(namer.get_sym(suffix)) if prefix == '_'
return RDF::URI(value) if suffix[0,2] == '//'
# If local context is not null, it contains a key that equals prefix, and the value associated with the key that equals prefix in defined is not true, invoke the Create Term Definition algorithm, passing active context, local context, prefix as term, and defined. This will ensure that a term definition is created for prefix in active context during Context Processing.
if local_context && local_context.has_key?(prefix) && !defined[prefix]
create_term_definition(local_context, prefix, defined)
end
# If active context contains a term definition for prefix, return the result of concatenating the IRI mapping associated with prefix and suffix.
result = if (td = term_definitions[prefix])
result = td.id + suffix
else
# (Otherwise) Return value as it is already an absolute IRI.
RDF::URI(value)
end
debug("") {"=> #{result.inspect}"} unless options[:quiet]
return result
end
debug("") {"=> #{result.inspect}"} unless options[:quiet]
result = if options[:vocab] && vocab
# If vocab is true, and active context has a vocabulary mapping, return the result of concatenating the vocabulary mapping with value.
vocab + value
elsif options[:documentRelative] && base = options.fetch(:base, self.base)
# Otherwise, if document relative is true, set value to the result of resolving value against the base IRI. Only the basic algorithm in section 5.2 of [RFC3986] is used; neither Syntax-Based Normalization nor Scheme-Based Normalization are performed. Characters additionally allowed in IRI references are treated in the same way that unreserved characters are treated in URI references, per section 6.5 of [RFC3987].
RDF::URI(base).join(value)
elsif local_context && RDF::URI(value).relative?
# If local context is not null and value is not an absolute IRI, an invalid IRI mapping error has been detected and processing is aborted.
raise JSON::LD::JsonLdError::InvalidIRIMapping, "not an absolute IRI: #{value}"
else
RDF::URI(value)
end
debug("") {"=> #{result}"} unless options[:quiet]
result
end
end
##
# Compacts an absolute IRI to the shortest matching term or compact IRI
#
# @param [RDF::URI] iri
# @param [Hash{Symbol => Object}] options ({})
# @option options [Object] :value
# Value, used to select among various maps for the same IRI
# @option options [Boolean] :vocab
# specifies whether the passed iri should be compacted using the active context's vocabulary mapping
# @option options [Boolean] :reverse
# specifies whether a reverse property is being compacted
#
# @return [String] compacted form of IRI
# @see http://json-ld.org/spec/latest/json-ld-api/#iri-compaction
def compact_iri(iri, options = {})
return if iri.nil?
iri = iri.to_s
debug("compact_iri(#{iri.inspect}", options) {options.inspect} unless options[:quiet]
depth(options) do
value = options.fetch(:value, nil)
if options[:vocab] && inverse_context.has_key?(iri)
debug("") {"vocab and key in inverse context"} unless options[:quiet]
default_language = self.default_language || @none
containers = []
tl, tl_value = "@language", "@null"
containers << '@index' if index?(value)
if options[:reverse]
tl, tl_value = "@type", "@reverse"
containers << '@set'
elsif list?(value)
debug("") {"list(#{value.inspect})"} unless options[:quiet]
# if value is a list object, then set type/language and type/language value to the most specific values that work for all items in the list as follows:
containers << "@list" unless index?(value)
list = value['@list']
common_type = nil
common_language = default_language if list.empty?
list.each do |item|
item_language, item_type = "@none", "@none"
if value?(item)
if item.has_key?('@language')
item_language = item['@language']
elsif item.has_key?('@type')
item_type = item['@type']
else
item_language = "@null"
end
else
item_type = '@id'
end
common_language ||= item_language
if item_language != common_language && value?(item)
debug("") {"-- #{item_language} conflicts with #{common_language}, use @none"} unless options[:quiet]
common_language = '@none'
end
common_type ||= item_type
if item_type != common_type
common_type = '@none'
debug("") {"#{item_type} conflicts with #{common_type}, use @none"} unless options[:quiet]
end
end
common_language ||= '@none'
common_type ||= '@none'
debug("") {"common type: #{common_type}, common language: #{common_language}"} unless options[:quiet]
if common_type != '@none'
tl, tl_value = '@type', common_type
else
tl_value = common_language
end
debug("") {"list: containers: #{containers.inspect}, type/language: #{tl.inspect}, type/language value: #{tl_value.inspect}"} unless options[:quiet]
else
if value?(value)
if value.has_key?('@language') && !index?(value)
tl_value = value['@language']
containers << '@language'
elsif value.has_key?('@type')
tl_value = value['@type']
tl = '@type'
end
else
tl, tl_value = '@type', '@id'
end
containers << '@set'
debug("") {"value: containers: #{containers.inspect}, type/language: #{tl.inspect}, type/language value: #{tl_value.inspect}"} unless options[:quiet]
end
containers << '@none'
tl_value ||= '@null'
preferred_values = []
preferred_values << '@reverse' if tl_value == '@reverse'
if %w(@id @reverse).include?(tl_value) && value.is_a?(Hash) && value.has_key?('@id')
t_iri = compact_iri(value['@id'], vocab: true, document_relative: true)
if (r_td = term_definitions[t_iri]) && r_td.id == value['@id']
preferred_values.concat(%w(@vocab @id @none))
else
preferred_values.concat(%w(@id @vocab @none))
end
else
preferred_values.concat([tl_value, '@none'])
end
debug("") {"preferred_values: #{preferred_values.inspect}"} unless options[:quiet]
if p_term = select_term(iri, containers, tl, preferred_values)
debug("") {"=> term: #{p_term.inspect}"} unless options[:quiet]
return p_term
end
end
# At this point, there is no simple term that iri can be compacted to. If vocab is true and active context has a vocabulary mapping:
if options[:vocab] && vocab && iri.start_with?(vocab) && iri.length > vocab.length
suffix = iri[vocab.length..-1]
debug("") {"=> vocab suffix: #{suffix.inspect}"} unless options[:quiet]
return suffix unless term_definitions.has_key?(suffix)
end
# The iri could not be compacted using the active context's vocabulary mapping. Try to create a compact IRI, starting by initializing compact IRI to null. This variable will be used to tore the created compact IRI, if any.
candidates = []
term_definitions.each do |term, td|
next if term.include?(":")
next if td.nil? || td.id.nil? || td.id == iri || !iri.start_with?(td.id)
# Also skip term if it was not a simple term and the :simple_compact_iris flag is true
next if @options[:simple_compact_iris] && !td.simple?
suffix = iri[td.id.length..-1]
ciri = "#{term}:#{suffix}"
candidates << ciri unless value && term_definitions.has_key?(ciri)
end
if !candidates.empty?
debug("") {"=> compact iri: #{candidates.term_sort.first.inspect}"} unless options[:quiet]
return candidates.term_sort.first
end
# If we still don't have any terms and we're using standard_prefixes,
# try those, and add to mapping
if @options[:standard_prefixes]
candidates = RDF::Vocabulary.
select {|v| iri.start_with?(v.to_uri.to_s) && iri != v.to_uri.to_s}.
map do |v|
prefix = v.__name__.to_s.split('::').last.downcase
set_mapping(prefix, v.to_uri.to_s)
iri.sub(v.to_uri.to_s, "#{prefix}:").sub(/:$/, '')
end
if !candidates.empty?
debug("") {"=> standard prefies: #{candidates.term_sort.first.inspect}"} unless options[:quiet]
return candidates.term_sort.first
end
end
if !options[:vocab]
# transform iri to a relative IRI using the document's base IRI
iri = remove_base(iri)
debug("") {"=> relative iri: #{iri.inspect}"} unless options[:quiet]
return iri
else
debug("") {"=> absolute iri: #{iri.inspect}"} unless options[:quiet]
return iri
end
end
end
##
# If active property has a type mapping in the active context set to @id or @vocab, a JSON object with a single member @id whose value is the result of using the IRI Expansion algorithm on value is returned.
#
# Otherwise, the result will be a JSON object containing an @value member whose value is the passed value. Additionally, an @type member will be included if there is a type mapping associated with the active property or an @language member if value is a string and there is language mapping associated with the active property.
#
# @param [String] property
# Associated property used to find coercion rules
# @param [Hash, String] value
# Value (literal or IRI) to be expanded
# @param [Hash{Symbol => Object}] options
# @option options [Boolean] :useNativeTypes (false) use native representations
#
# @return [Hash] Object representation of value
# @raise [RDF::ReaderError] if the iri cannot be expanded
# @see http://json-ld.org/spec/latest/json-ld-api/#value-expansion
def expand_value(property, value, options = {})
options = {useNativeTypes: false}.merge(options)
depth(options) do
debug("expand_value") {"property: #{property.inspect}, value: #{value.inspect}"}
# If the active property has a type mapping in active context that is @id, return a new JSON object containing a single key-value pair where the key is @id and the value is the result of using the IRI Expansion algorithm, passing active context, value, and true for document relative.
if (td = term_definitions.fetch(property, TermDefinition.new(property))) && td.type_mapping == '@id'
debug("") {"as relative IRI: #{value.inspect}"}
return {'@id' => expand_iri(value, documentRelative: true).to_s}
end
# If active property has a type mapping in active context that is @vocab, return a new JSON object containing a single key-value pair where the key is @id and the value is the result of using the IRI Expansion algorithm, passing active context, value, true for vocab, and true for document relative.
if td.type_mapping == '@vocab'
debug("") {"as vocab IRI: #{value.inspect}"}
return {'@id' => expand_iri(value, vocab: true, documentRelative: true).to_s}
end
value = RDF::Literal(value) if
value.is_a?(Date) ||
value.is_a?(DateTime) ||
value.is_a?(Time)
result = case value
when RDF::URI, RDF::Node
debug("URI | BNode") { value.to_s }
{'@id' => value.to_s}
when RDF::Literal
debug("Literal") {"datatype: #{value.datatype.inspect}"}
res = {}
if options[:useNativeTypes] && [RDF::XSD.boolean, RDF::XSD.integer, RDF::XSD.double].include?(value.datatype)
res['@value'] = value.object
res['@type'] = uri(coerce(property)) if coerce(property)
else
value.canonicalize! if value.datatype == RDF::XSD.double
res['@value'] = value.to_s
if coerce(property)
res['@type'] = uri(coerce(property)).to_s
elsif value.has_datatype?
res['@type'] = uri(value.datatype).to_s
elsif value.has_language? || language(property)
res['@language'] = (value.language || language(property)).to_s
end
end
res
else
# Otherwise, initialize result to a JSON object with an @value member whose value is set to value.
res = {'@value' => value}
if td.type_mapping
res['@type'] = td.type_mapping.to_s
elsif value.is_a?(String)
if td.language_mapping
res['@language'] = td.language_mapping
elsif default_language && td.language_mapping.nil?
res['@language'] = default_language
end
end
res
end
debug("") {"=> #{result.inspect}"}
result
end
end
##
# Compact a value
#
# @param [String] property
# Associated property used to find coercion rules
# @param [Hash] value
# Value (literal or IRI), in full object representation, to be compacted
# @param [Hash{Symbol => Object}] options
#
# @return [Hash] Object representation of value
# @raise [JsonLdError] if the iri cannot be expanded
# @see http://json-ld.org/spec/latest/json-ld-api/#value-compaction
# FIXME: revisit the specification version of this.
def compact_value(property, value, options = {})
depth(options) do
debug("compact_value") {"property: #{property.inspect}, value: #{value.inspect}"}
num_members = value.keys.length
num_members -= 1 if index?(value) && container(property) == '@index'
if num_members > 2
debug("") {"can't compact value with # members > 2"}
return value
end
result = case
when coerce(property) == '@id' && value.has_key?('@id') && num_members == 1
# Compact an @id coercion
debug("") {" (@id & coerce)"}
compact_iri(value['@id'])
when coerce(property) == '@vocab' && value.has_key?('@id') && num_members == 1
# Compact an @id coercion
debug("") {" (@id & coerce & vocab)"}
compact_iri(value['@id'], vocab: true)
when value.has_key?('@id')
debug("") {" (@id)"}
# return value as is
value
when value['@type'] && expand_iri(value['@type'], vocab: true) == coerce(property)
# Compact common datatype
debug("") {" (@type & coerce) == #{coerce(property)}"}
value['@value']
when value['@language'] && (value['@language'] == language(property))
# Compact language
debug("") {" (@language) == #{language(property).inspect}"}
value['@value']
when num_members == 1 && !value['@value'].is_a?(String)
debug("") {" (native)"}
value['@value']
when num_members == 1 && default_language.nil? || language(property) == false
debug("") {" (!@language)"}
value['@value']
else
# Otherwise, use original value
debug("") {" (no change)"}
value
end
# If the result is an object, tranform keys using any term keyword aliases
if result.is_a?(Hash) && result.keys.any? {|k| self.alias(k) != k}
debug("") {" (map to key aliases)"}
new_element = {}
result.each do |k, v|
new_element[self.alias(k)] = v
end
result = new_element
end
debug("") {"=> #{result.inspect}"}
result
end
end
def inspect
v = %w([Context)
v << "base=#{base}" if base
v << "vocab=#{vocab}" if vocab
v << "def_language=#{default_language}" if default_language
v << "term_definitions[#{term_definitions.length}]=#{term_definitions}"
v.join(" ") + "]"
end
def dup
# Also duplicate mappings, coerce and list
that = self
ec = super
ec.instance_eval do
@term_definitions = that.term_definitions.dup
@iri_to_term = that.iri_to_term.dup
end
ec
end
protected
##
# Retrieve term coercion
#
# @param [String] property in unexpanded form
#
# @return [RDF::URI, '@id']
def coerce(property)
# Map property, if it's not an RDF::Value
# @type is always is an IRI
return '@id' if [RDF.type, '@type'].include?(property)
term_definitions[property] && term_definitions[property].type_mapping
end
##
# Determine if `term` is a suitable term.
# Term may be any valid JSON string.
#
# @param [String] term
# @return [Boolean]
def term_valid?(term)
term.is_a?(String)
end
# Reverse term mapping, typically used for finding aliases for keys.
#
# Returns either the original value, or a mapping for this value.
#
# @example
# {"@context": {"id": "@id"}, "@id": "foo"} => {"id": "foo"}
#
# @param [RDF::URI, String] value
# @return [String]
def alias(value)
iri_to_term.fetch(value, value)
end
private
def uri(value)
case value.to_s
when /^_:(.*)$/
# Map BlankNodes if a namer is given
debug "uri(bnode)#{value}: #{$1}"
bnode(namer.get_sym($1))
else
value = RDF::URI.new(value)
value.validate! if @options[:validate]
value.canonicalize! if @options[:canonicalize]
value = RDF::URI.intern(value) if @options[:intern]
value
end
end
# Clear the provided context, used for testing
# @return [Context] self
def clear_provided_context
@provided_context = nil
self
end
# Keep track of allocated BNodes
#
# Don't actually use the name provided, to prevent name alias issues.
# @return [RDF::Node]
def bnode(value = nil)
@@bnode_cache ||= {}
@@bnode_cache[value.to_s] ||= RDF::Node.new(value)
end
##
# Inverse Context creation
#
# When there is more than one term that could be chosen to compact an IRI, it has to be ensured that the term selection is both deterministic and represents the most context-appropriate choice whilst taking into consideration algorithmic complexity.
#
# In order to make term selections, the concept of an inverse context is introduced. An inverse context is essentially a reverse lookup table that maps container mappings, type mappings, and language mappings to a simple term for a given active context. A inverse context only needs to be generated for an active context if it is being used for compaction.
#
# To make use of an inverse context, a list of preferred container mappings and the type mapping or language mapping are gathered for a particular value associated with an IRI. These parameters are then fed to the Term Selection algorithm, which will find the term that most appropriately matches the value's mappings.
#
# @return [Hash{String => Hash{String => String}}]
def inverse_context
@inverse_context ||= begin
result = {}
default_language = self.default_language || '@none'
term_definitions.keys.sort do |a, b|
a.length == b.length ? (a <=> b) : (a.length <=> b.length)
end.each do |term|
next unless td = term_definitions[term]
container = td.container_mapping || '@none'
container_map = result[td.id.to_s] ||= {}
tl_map = container_map[container] ||= {'@language' => {}, '@type' => {}}
type_map = tl_map['@type']
language_map = tl_map['@language']
if td.reverse_property
type_map['@reverse'] ||= term
elsif td.type_mapping
type_map[td.type_mapping.to_s] ||= term
elsif !td.language_mapping.nil?
language = td.language_mapping || '@null'
language_map[language] ||= term
else
language_map[default_language] ||= term
language_map['@none'] ||= term
type_map['@none'] ||= term
end
end
result
end
end
##
# This algorithm, invoked via the IRI Compaction algorithm, makes use of an active context's inverse context to find the term that is best used to compact an IRI. Other information about a value associated with the IRI is given, including which container mappings and which type mapping or language mapping would be best used to express the value.
#
# @param [String] iri
# @param [Array<String>] containers
# represents an ordered list of preferred container mappings
# @param [String] type_language
# indicates whether to look for a term with a matching type mapping or language mapping
# @param [Array<String>] preferred_values
# for the type mapping or language mapping
# @return [String]
def select_term(iri, containers, type_language, preferred_values)
depth do
debug("select_term") {
"iri: #{iri.inspect}, " +
"containers: #{containers.inspect}, " +
"type_language: #{type_language.inspect}, " +
"preferred_values: #{preferred_values.inspect}"
}
container_map = inverse_context[iri]
debug(" ") {"container_map: #{container_map.inspect}"}
containers.each do |container|
next unless container_map.has_key?(container)
tl_map = container_map[container]
value_map = tl_map[type_language]
preferred_values.each do |item|
next unless value_map.has_key?(item)
debug("=>") {value_map[item].inspect}
return value_map[item]
end
end
debug("=>") {"nil"}
nil
end
end
##
# Removes a base IRI from the given absolute IRI.
#
# @param [String] iri the absolute IRI
# @return [String]
# the relative IRI if relative to base, otherwise the absolute IRI.
def remove_base(iri)
return iri unless base
@base_and_parents ||= begin
u = base
iri_set = u.to_s.end_with?('/') ? [u.to_s] : []
iri_set << u.to_s while (u = u.parent)
iri_set
end
b = base.to_s
return iri[b.length..-1] if iri.start_with?(b) && %w(? #).include?(iri[b.length, 1])
@base_and_parents.each_with_index do |b, index|
next unless iri.start_with?(b)
rel = "../" * index + iri[b.length..-1]
return rel.empty? ? "./" : rel
end
iri
end
## Used for testing
# Retrieve term mappings
#
# @return [Array<RDF::URI>]
def mappings
term_definitions.inject({}) do |memo, (t,td)|
memo[t] = td ? td.id : nil
memo
end
end
## Used for testing
# Retrieve term mapping
#
# @param [String, #to_s] term
#
# @return [RDF::URI, String]
def mapping(term)
term_definitions[term] ? term_definitions[term].id : nil
end
## Used for testing
# Retrieve language mappings
#
# @return [Array<String>]
# @deprecated
def languages
term_definitions.inject({}) do |memo, (t,td)|
memo[t] = td.language_mapping
memo
end
end
end
end
Back off on absolute IRI validation in context unless :validate flag is set.
require 'json'
require 'bigdecimal'
module JSON::LD
class Context
include Utils
# Term Definitions specify how properties and values have to be interpreted as well as the current vocabulary mapping and the default language
class TermDefinition
# @return [RDF::URI] IRI map
attr_accessor :id
# @return [String] term name
attr_accessor :term
# @return [String] Type mapping
attr_accessor :type_mapping
# @return [String] Container mapping
attr_accessor :container_mapping
# Language mapping of term, `false` is used if there is explicitly no language mapping for this term.
# @return [String] Language mapping
attr_accessor :language_mapping
# @return [Boolean] Reverse Property
attr_accessor :reverse_property
# This is a simple term definition, not an expanded term definition
# @return [Boolean] simple
attr_accessor :simple
# This is a simple term definition, not an expanded term definition
# @return [Boolean] simple
def simple?; simple; end
# Create a new Term Mapping with an ID
# @param [String] term
# @param [String] id
def initialize(term, id = nil)
@term = term
@id = id.to_s if id
end
##
# Output Hash or String definition for this definition considering @language and @vocab
#
# @param [Context] context
# @return [String, Hash{String => Array[String], String}]
def to_context_definition(context)
cid = if context.vocab && id.start_with?(context.vocab)
# Nothing to return unless it's the same as the vocab
id == context.vocab ? context.vocab : id.to_s[context.vocab.length..-1]
else
# Find a term to act as a prefix
iri, prefix = context.iri_to_term.detect {|i,p| id.to_s.start_with?(i.to_s)}
iri && iri != id ? "#{prefix}:#{id.to_s[iri.length..-1]}" : id
end
if language_mapping.nil? &&
container_mapping.nil? &&
type_mapping.nil? &&
reverse_property.nil?
cid.to_s unless cid == term && context.vocab
else
defn = {}
defn[reverse_property ? '@reverse' : '@id'] = cid.to_s unless cid == term && !reverse_property
if type_mapping
defn['@type'] = if KEYWORDS.include?(type_mapping)
type_mapping
else
context.compact_iri(type_mapping, vocab: true)
end
end
defn['@container'] = container_mapping if container_mapping
# Language set as false to be output as null
defn['@language'] = (language_mapping ? language_mapping : nil) unless language_mapping.nil?
defn
end
end
def inspect
v = %w([TD)
v << "id=#{@id}"
v << "term=#{@term}"
v << "rev" if reverse_property
v << "container=#{container_mapping}" if container_mapping
v << "lang=#{language_mapping.inspect}" unless language_mapping.nil?
v << "type=#{type_mapping}" unless type_mapping.nil?
v.join(" ") + "]"
end
end
# The base.
#
# @return [RDF::URI] Current base IRI, used for expanding relative IRIs.
attr_reader :base
# The base.
#
# @return [RDF::URI] Document base IRI, to initialize `base`.
attr_reader :doc_base
# @return [RDF::URI] base IRI of the context, if loaded remotely. XXX
attr_accessor :context_base
# Term definitions
# @return [Hash{String => TermDefinition}]
attr_reader :term_definitions
# @return [Hash{RDF::URI => String}] Reverse mappings from IRI to term only for terms, not CURIEs XXX
attr_accessor :iri_to_term
# Default language
#
#
# This adds a language to plain strings that aren't otherwise coerced
# @return [String]
attr_reader :default_language
# Default vocabulary
#
# Sets the default vocabulary used for expanding terms which
# aren't otherwise absolute IRIs
# @return [RDF::URI]
attr_reader :vocab
# @return [Hash{Symbol => Object}] Global options used in generating IRIs
attr_accessor :options
# @return [Context] A context provided to us that we can use without re-serializing XXX
attr_accessor :provided_context
# @return [BlankNodeNamer]
attr_accessor :namer
##
# Create new evaluation context
# @param [Hash] options
# @option options [String, #to_s] :base
# The Base IRI to use when expanding the document. This overrides the value of `input` if it is a _IRI_. If not specified and `input` is not an _IRI_, the base IRI defaults to the current document IRI if in a browser context, or the empty string if there is no document context.
# @option options [Proc] :documentLoader
# The callback of the loader to be used to retrieve remote documents and contexts. If specified, it must be used to retrieve remote documents and contexts; otherwise, if not specified, the processor's built-in loader must be used. See {API.documentLoader} for the method signature.
# @option options [Hash{Symbol => String}] :prefixes
# See `RDF::Reader#initialize`
# @option options [Boolean] :simple_compact_iris (false)
# When compacting IRIs, do not use terms with expanded term definitions
# @option options [String, #to_s] :vocab
# Initial value for @vocab
# @option options [String, #to_s] :language
# Initial value for @langauge
# @yield [ec]
# @yieldparam [Context]
# @return [Context]
def initialize(options = {})
if options[:base]
@base = @doc_base = RDF::URI(options[:base])
@doc_base.canonicalize!
@doc_base.fragment = nil
@doc_base.query = nil
end
options[:documentLoader] ||= JSON::LD::API.method(:documentLoader)
@term_definitions = {}
@iri_to_term = {
RDF.to_uri.to_s => "rdf",
RDF::XSD.to_uri.to_s => "xsd"
}
@namer = BlankNodeMapper.new("t")
@options = options
# Load any defined prefixes
(options[:prefixes] || {}).each_pair do |k, v|
next if k.nil?
@iri_to_term[v.to_s] = k
@term_definitions[k.to_s] = TermDefinition.new(k, v.to_s)
@term_definitions[k.to_s].simple = true
end
self.vocab = options[:vocab] if options[:vocab]
self.default_language = options[:language] if options[:language]
#debug("init") {"iri_to_term: #{iri_to_term.inspect}"}
yield(self) if block_given?
end
##
# Initial context, without mappings, vocab or default language
#
# @return [Boolean]
def empty?
@term_definitions.empty? && self.vocab.nil? && self.default_language.nil?
end
# @param [String] value must be an absolute IRI
def base=(value)
if value
raise JsonLdError::InvalidBaseIRI, "@base must be a string: #{value.inspect}" unless value.is_a?(String) || value.is_a?(RDF::URI)
@base = RDF::URI(value)
@base.canonicalize!
@base.fragment = nil
@base.query = nil
raise JsonLdError::InvalidBaseIRI, "@base must be an absolute IRI: #{value.inspect}" unless @base.absolute? || !@options[:validate]
@base
else
@base = nil
end
end
# @param [String] value
def default_language=(value)
@default_language = if value
raise JsonLdError::InvalidDefaultLanguage, "@language must be a string: #{value.inspect}" unless value.is_a?(String)
value.downcase
else
nil
end
end
# @param [String] value must be an absolute IRI
def vocab=(value)
@vocab = case value
when /_:/
value
when String
v = as_resource(value)
raise JsonLdError::InvalidVocabMapping, "@value must be an absolute IRI: #{value.inspect}" if v.uri? && v.relative? && @options[:validate]
v
when nil
nil
else
raise JsonLdError::InvalidVocabMapping, "@value must be a string: #{value.inspect}"
end
end
# Create an Evaluation Context
#
# When processing a JSON-LD data structure, each processing rule is applied using information provided by the active context. This section describes how to produce an active context.
#
# The active context contains the active term definitions which specify how properties and values have to be interpreted as well as the current base IRI, the vocabulary mapping and the default language. Each term definition consists of an IRI mapping, a boolean flag reverse property, an optional type mapping or language mapping, and an optional container mapping. A term definition can not only be used to map a term to an IRI, but also to map a term to a keyword, in which case it is referred to as a keyword alias.
#
# When processing, the active context is initialized without any term definitions, vocabulary mapping, or default language. If a local context is encountered during processing, a new active context is created by cloning the existing active context. Then the information from the local context is merged into the new active context. Given that local contexts may contain references to remote contexts, this includes their retrieval.
#
#
# @param [String, #read, Array, Hash, Context] local_context
# @raise [JsonLdError]
# on a remote context load error, syntax error, or a reference to a term which is not defined.
# @see http://json-ld.org/spec/latest/json-ld-api/index.html#context-processing-algorithm
def parse(local_context, remote_contexts = [])
result = self.dup
result.provided_context = local_context if self.empty?
local_context = [local_context] unless local_context.is_a?(Array)
local_context.each do |context|
depth do
case context
when nil
# 3.1 If niil, set to a new empty context
result = Context.new(options)
when Context
debug("parse") {"context: #{context.inspect}"}
result = context.dup
when IO, StringIO
debug("parse") {"io: #{context}"}
# Load context document, if it is a string
begin
ctx = JSON.load(context)
raise JSON::LD::JsonLdError::InvalidRemoteContext, "Context missing @context key" if @options[:validate] && ctx['@context'].nil?
result = parse(ctx["@context"] ? ctx["@context"].dup : {})
result.provided_context = ctx["@context"] if [context] == local_context
result
rescue JSON::ParserError => e
debug("parse") {"Failed to parse @context from remote document at #{context}: #{e.message}"}
raise JSON::LD::JsonLdError::InvalidRemoteContext, "Failed to parse remote context at #{context}: #{e.message}" if @options[:validate]
self.dup
end
when String, RDF::URI
debug("parse") {"remote: #{context}, base: #{result.context_base || result.base}"}
# Load context document, if it is a string
# 3.2.1) Set context to the result of resolving value against the base IRI which is established as specified in section 5.1 Establishing a Base URI of [RFC3986]. Only the basic algorithm in section 5.2 of [RFC3986] is used; neither Syntax-Based Normalization nor Scheme-Based Normalization are performed. Characters additionally allowed in IRI references are treated in the same way that unreserved characters are treated in URI references, per section 6.5 of [RFC3987].
context = RDF::URI(result.context_base || result.base).join(context)
raise JsonLdError::RecursiveContextInclusion, "#{context}" if remote_contexts.include?(context.to_s)
remote_contexts << context.to_s
context_no_base = self.dup
context_no_base.base = nil
context_no_base.context_base = context.to_s
begin
context_opts = @options.dup
if context_opts.has_key?(:header)
context_opts[:header] = context_opts[:header].dup
context_opts[:header].delete('Cache-Control') # Allow context to be cached
end
@options[:documentLoader].call(context.to_s, context_opts) do |remote_doc|
# 3.2.5) Dereference context. If the dereferenced document has no top-level JSON object with an @context member, an invalid remote context has been detected and processing is aborted; otherwise, set context to the value of that member.
jo = case remote_doc.document
when String then JSON.parse(remote_doc.document)
else remote_doc.document
end
raise JsonLdError::InvalidRemoteContext, "#{context}" unless jo.is_a?(Hash) && jo.has_key?('@context')
context = jo['@context']
if @options[:processingMode] == "json-ld-1.0"
context_no_base.provided_context = context.dup
end
end
rescue JsonLdError
raise
rescue Exception => e
debug("parse") {"Failed to retrieve @context from remote document at #{context_no_base.context_base.inspect}: #{e.message}"}
raise JsonLdError::LoadingRemoteContextFailed, "#{context_no_base.context_base}", e.backtrace
end
# 3.2.6) Set context to the result of recursively calling this algorithm, passing context no base for active context, context for local context, and remote contexts.
context = context_no_base.parse(context, remote_contexts.dup)
context.provided_context = result.provided_context
context.base ||= result.base
result = context
debug("parse") {"=> provided_context: #{context.inspect}"}
when Hash
# If context has a @vocab member: if its value is not a valid absolute IRI or null trigger an INVALID_VOCAB_MAPPING error; otherwise set the active context's vocabulary mapping to its value and remove the @vocab member from context.
context = context.dup # keep from modifying a hash passed as a param
{
'@base' => :base=,
'@language' => :default_language=,
'@vocab' => :vocab=
}.each do |key, setter|
v = context.fetch(key, false)
unless v == false
context.delete(key)
debug("parse") {"Set #{key} to #{v.inspect}"}
result.send(setter, v)
end
end
defined = {}
# For each key-value pair in context invoke the Create Term Definition subalgorithm, passing result for active context, context for local context, key, and defined
depth do
context.keys.each do |key|
result.create_term_definition(context, key, defined)
end
end
else
# 3.3) If context is not a JSON object, an invalid local context error has been detected and processing is aborted.
raise JsonLdError::InvalidLocalContext, context.inspect
end
end
end
result
end
##
# Merge in a context, creating a new context with updates from `context`
#
# @param [Context] context
# @return [Context]
def merge(context)
c = self.dup.merge!(context)
c.instance_variable_set(:@term_definitions, context.term_definitions.dup)
c
end
##
# Update context with definitions from `context`
#
# @param [Context] context
# @return [self]
def merge!(context)
# FIXME: if new context removes the default language, this won't do anything
self.default_language = context.default_language if context.default_language
self.vocab = context.vocab if context.vocab
self.base = context.base if context.base
# Merge in Term Definitions
term_definitions.merge!(context.term_definitions)
self
end
##
# Create Term Definition
#
# Term definitions are created by parsing the information in the given local context for the given term. If the given term is a compact IRI, it may omit an IRI mapping by depending on its prefix having its own term definition. If the prefix is a key in the local context, then its term definition must first be created, through recursion, before continuing. Because a term definition can depend on other term definitions, a mechanism must be used to detect cyclical dependencies. The solution employed here uses a map, defined, that keeps track of whether or not a term has been defined or is currently in the process of being defined. This map is checked before any recursion is attempted.
#
# After all dependencies for a term have been defined, the rest of the information in the local context for the given term is taken into account, creating the appropriate IRI mapping, container mapping, and type mapping or language mapping for the term.
#
# @param [Hash] local_context
# @param [String] term
# @param [Hash] defined
# @raise [JsonLdError]
# Represents a cyclical term dependency
# @see http://json-ld.org/spec/latest/json-ld-api/index.html#create-term-definition
def create_term_definition(local_context, term, defined)
# Expand a string value, unless it matches a keyword
debug("create_term_definition") {"term = #{term.inspect}"}
# If defined contains the key term, then the associated value must be true, indicating that the term definition has already been created, so return. Otherwise, a cyclical term definition has been detected, which is an error.
case defined[term]
when TrueClass then return
when nil
defined[term] = false
else
raise JsonLdError::CyclicIRIMapping, "Cyclical term dependency found: #{term.inspect}"
end
# Since keywords cannot be overridden, term must not be a keyword. Otherwise, an invalid value has been detected, which is an error.
if KEYWORDS.include?(term) && !%w(@vocab @language).include?(term)
raise JsonLdError::KeywordRedefinition, "term must not be a keyword: #{term.inspect}" if
@options[:validate]
elsif !term_valid?(term) && @options[:validate]
raise JsonLdError::InvalidTermDefinition, "term is invalid: #{term.inspect}"
end
# Remove any existing term definition for term in active context.
term_definitions.delete(term)
# Initialize value to a the value associated with the key term in local context.
value = local_context.fetch(term, false)
simple_term = value.is_a?(String)
value = {'@id' => value} if value.is_a?(String)
case value
when nil, {'@id' => nil}
# If value equals null or value is a JSON object containing the key-value pair (@id-null), then set the term definition in active context to null, set the value associated with defined's key term to true, and return.
debug("") {"=> nil"}
term_definitions[term] = TermDefinition.new(term)
defined[term] = true
return
when Hash
debug("") {"Hash[#{term.inspect}] = #{value.inspect}"}
definition = TermDefinition.new(term)
definition.simple = simple_term
if value.has_key?('@type')
type = value['@type']
# SPEC FIXME: @type may be nil
type = case type
when nil
type
when String
begin
expand_iri(type, vocab: true, documentRelative: false, local_context: local_context, defined: defined)
rescue JsonLdError::InvalidIRIMapping
raise JsonLdError::InvalidTypeMapping, "invalid mapping for '@type': #{type.inspect} on term #{term.inspect}"
end
else
:error
end
unless %w(@id @vocab).include?(type) || type.is_a?(RDF::URI) && type.absolute?
raise JsonLdError::InvalidTypeMapping, "unknown mapping for '@type': #{type.inspect} on term #{term.inspect}"
end
debug("") {"type_mapping: #{type.inspect}"}
definition.type_mapping = type
end
if value.has_key?('@reverse')
raise JsonLdError::InvalidReverseProperty, "unexpected key in #{value.inspect} on term #{term.inspect}" if
value.keys.any? {|k| %w(@id).include?(k)}
raise JsonLdError::InvalidIRIMapping, "expected value of @reverse to be a string: #{value['@reverse'].inspect} on term #{term.inspect}" unless
value['@reverse'].is_a?(String)
# Otherwise, set the IRI mapping of definition to the result of using the IRI Expansion algorithm, passing active context, the value associated with the @reverse key for value, true for vocab, true for document relative, local context, and defined. If the result is not an absolute IRI, i.e., it contains no colon (:), an invalid IRI mapping error has been detected and processing is aborted.
definition.id = expand_iri(value['@reverse'],
vocab: true,
documentRelative: true,
local_context: local_context,
defined: defined)
raise JsonLdError::InvalidIRIMapping, "non-absolute @reverse IRI: #{definition.id} on term #{term.inspect}" unless
definition.id.is_a?(RDF::URI) && definition.id.absolute?
# If value contains an @container member, set the container mapping of definition to its value; if its value is neither @set, nor @index, nor null, an invalid reverse property error has been detected (reverse properties only support set- and index-containers) and processing is aborted.
if (container = value.fetch('@container', false))
raise JsonLdError::InvalidReverseProperty,
"unknown mapping for '@container' to #{container.inspect} on term #{term.inspect}" unless
['@set', '@index', nil].include?(container)
definition.container_mapping = container
end
definition.reverse_property = true
elsif value.has_key?('@id') && value['@id'] != term
raise JsonLdError::InvalidIRIMapping, "expected value of @id to be a string: #{value['@id'].inspect} on term #{term.inspect}" unless
value['@id'].is_a?(String)
definition.id = expand_iri(value['@id'],
vocab: true,
documentRelative: true,
local_context: local_context,
defined: defined)
raise JsonLdError::InvalidKeywordAlias, "expected value of @id to not be @context on term #{term.inspect}" if
definition.id == '@context'
elsif term.include?(':')
# If term is a compact IRI with a prefix that is a key in local context then a dependency has been found. Use this algorithm recursively passing active context, local context, the prefix as term, and defined.
prefix, suffix = term.split(':')
depth {create_term_definition(local_context, prefix, defined)} if local_context.has_key?(prefix)
definition.id = if td = term_definitions[prefix]
# If term's prefix has a term definition in active context, set the IRI mapping for definition to the result of concatenating the value associated with the prefix's IRI mapping and the term's suffix.
td.id + suffix
else
# Otherwise, term is an absolute IRI. Set the IRI mapping for definition to term
term
end
debug("") {"=> #{definition.id}"}
else
# Otherwise, active context must have a vocabulary mapping, otherwise an invalid value has been detected, which is an error. Set the IRI mapping for definition to the result of concatenating the value associated with the vocabulary mapping and term.
raise JsonLdError::InvalidIRIMapping, "relative term definition without vocab: #{term} on term #{term.inspect}" unless vocab
definition.id = vocab + term
debug("") {"=> #{definition.id}"}
end
@iri_to_term[definition.id] = term if simple_term && definition.id
if value.has_key?('@container')
container = value['@container']
raise JsonLdError::InvalidContainerMapping, "unknown mapping for '@container' to #{container.inspect} on term #{term.inspect}" unless %w(@list @set @language @index).include?(container)
debug("") {"container_mapping: #{container.inspect}"}
definition.container_mapping = container
end
if value.has_key?('@language')
language = value['@language']
raise JsonLdError::InvalidLanguageMapping, "language must be null or a string, was #{language.inspect}} on term #{term.inspect}" unless language.nil? || (language || "").is_a?(String)
language = language.downcase if language.is_a?(String)
debug("") {"language_mapping: #{language.inspect}"}
definition.language_mapping = language || false
end
term_definitions[term] = definition
defined[term] = true
else
raise JsonLdError::InvalidTermDefinition, "Term definition for #{term.inspect} is an #{value.class} on term #{term.inspect}"
end
end
##
# Generate @context
#
# If a context was supplied in global options, use that, otherwise, generate one
# from this representation.
#
# @param [Hash{Symbol => Object}] options ({})
# @return [Hash]
def serialize(options = {})
depth(options) do
# FIXME: not setting provided_context now
use_context = case provided_context
when String, RDF::URI
debug "serlialize: reuse context: #{provided_context.inspect}"
provided_context.to_s
when Hash, Array
debug "serlialize: reuse context: #{provided_context.inspect}"
provided_context
else
debug("serlialize: generate context")
debug("") {"=> context: #{inspect}"}
ctx = {}
ctx['@base'] = base.to_s if base && base != doc_base
ctx['@language'] = default_language.to_s if default_language
ctx['@vocab'] = vocab.to_s if vocab
# Term Definitions
term_definitions.keys.sort.each do |term|
defn = term_definitions[term].to_context_definition(self)
ctx[term] = defn if defn
end
debug("") {"start_doc: context=#{ctx.inspect}"}
ctx
end
# Return hash with @context, or empty
r = {}
r['@context'] = use_context unless use_context.nil? || use_context.empty?
r
end
end
##
# Build a context from an RDF::Vocabulary definition.
#
# @example building from an external vocabulary definition
#
# g = RDF::Graph.load("http://schema.org/docs/schema_org_rdfa.html")
#
# context = JSON::LD::Context.new.from_vocabulary(g,
# vocab: "http://schema.org/",
# prefixes: {schema: "http://schema.org/"},
# language: "en")
#
# @param [RDF::Queryable] graph
#
# @return [self]
def from_vocabulary(graph)
statements = {}
ranges = {}
# Add term definitions for each class and property not in schema:, and
# for those properties having an object range
graph.each do |statement|
next if statement.subject.node?
(statements[statement.subject] ||= []) << statement
# Keep track of predicate ranges
if [RDF::RDFS.range, RDF::SCHEMA.rangeIncludes].include?(statement.predicate)
(ranges[statement.subject] ||= []) << statement.object
end
end
# Add term definitions for each class and property not in vocab, and
# for those properties having an object range
statements.each do |subject, values|
types = values.select {|v| v.predicate == RDF.type}.map(&:object)
is_property = types.any? {|t| t.to_s.include?("Property")}
term = subject.to_s.split(/[\/\#]/).last
if !is_property
# Ignore if there's a default voabulary and this is not a property
next if vocab && subject.to_s.start_with?(vocab)
# otherwise, create a term definition
td = term_definitions[term] = TermDefinition.new(term, subject.to_s)
else
prop_ranges = ranges.fetch(subject, [])
# If any range is empty or member of range includes rdfs:Literal or schema:Text
next if vocab && prop_ranges.empty? ||
prop_ranges.include?(RDF::SCHEMA.Text) ||
prop_ranges.include?(RDF::RDFS.Literal)
td = term_definitions[term] = TermDefinition.new(term, subject.to_s)
# Set context typing based on first element in range
case r = prop_ranges.first
when RDF::XSD.string
if self.default_language
td.language_mapping = false
end
when RDF::XSD.boolean, RDF::SCHEMA.Boolean, RDF::XSD.date, RDF::SCHEMA.Date,
RDF::XSD.dateTime, RDF::SCHEMA.DateTime, RDF::XSD.time, RDF::SCHEMA.Time,
RDF::XSD.duration, RDF::SCHEMA.Duration, RDF::XSD.decimal, RDF::SCHEMA.Number,
RDF::XSD.float, RDF::SCHEMA.Float, RDF::XSD.integer, RDF::SCHEMA.Integer
td.type_mapping = r
td.simple = false
else
# It's an object range (includes schema:URL)
td.type_mapping = '@id'
end
end
end
self
end
# Set term mapping
#
# @param [#to_s] term
# @param [RDF::URI, String, nil] value
#
# @return [TermDefinition]
def set_mapping(term, value)
debug("") {"map #{term.inspect} to #{value.inspect}"}
term = term.to_s
term_definitions[term] = TermDefinition.new(term, value)
term_definitions[term].simple = true
term_sym = term.empty? ? "" : term.to_sym
iri_to_term.delete(term_definitions[term].id.to_s) if term_definitions[term].id.is_a?(String)
@options[:prefixes][term_sym] = value if @options.has_key?(:prefixes)
iri_to_term[value.to_s] = term
term_definitions[term]
end
##
# Find a term definition
#
# @param [Term, #to_s] term in unexpanded form
# @return [Term]
def find_definition(term)
term.is_a?(TermDefinition) ? term : term_definitions[term.to_s]
end
##
# Retrieve container mapping, add it if `value` is provided
#
# @param [Term, #to_s] term in unexpanded form
# @return [String]
def container(term)
return '@set' if term == '@graph'
return term if KEYWORDS.include?(term)
term = find_definition(term)
term && term.container_mapping
end
##
# Retrieve the language associated with a term, or the default language otherwise
# @param [Term, #to_s] term in unexpanded form
# @return [String]
def language(term)
term = find_definition(term)
lang = term && term.language_mapping
lang.nil? ? @default_language : lang
end
##
# Is this a reverse term
# @param [Term, #to_s] term in unexpanded form
# @return [Boolean]
def reverse?(term)
term = find_definition(term)
term && term.reverse_property
end
##
# Given a term or IRI, find a reverse term definition matching that term. If the term is already reversed, find a non-reversed version.
#
# @param [Term, #to_s] term
# @return [Term] related term definition
def reverse_term(term)
# Direct lookup of term
term = term_definitions[term.to_s] if term_definitions.has_key?(term.to_s) && !term.is_a?(TermDefinition)
# Lookup term, assuming term is an IRI
unless term.is_a?(TermDefinition)
td = term_definitions.values.detect {|t| t.id == term.to_s}
# Otherwise create a temporary term definition
term = td || TermDefinition.new(term.to_s, expand_iri(term, vocab:true))
end
# Now, return a term, which reverses this term
term_definitions.values.detect {|t| t.id == term.id && t.reverse_property != term.reverse_property}
end
##
# Expand an IRI. Relative IRIs are expanded against any document base.
#
# @param [String] value
# A keyword, term, prefix:suffix or possibly relative IRI
# @param [Hash{Symbol => Object}] options
# @option options [Boolean] documentRelative (false)
# @option options [Boolean] vocab (false)
# @option options [Hash] local_context
# Used during Context Processing.
# @option options [Hash] defined
# Used during Context Processing.
# @return [RDF::URI, String]
# IRI or String, if it's a keyword
# @raise [JSON::LD::JsonLdError::InvalidIRIMapping] if the value cannot be expanded
# @see http://json-ld.org/spec/latest/json-ld-api/#iri-expansion
def expand_iri(value, options = {})
return value unless value.is_a?(String)
return value if KEYWORDS.include?(value)
depth(options) do
debug("expand_iri") {"value: #{value.inspect}"} unless options[:quiet]
local_context = options[:local_context]
defined = options.fetch(:defined, {})
# If local context is not null, it contains a key that equals value, and the value associated with the key that equals value in defined is not true, then invoke the Create Term Definition subalgorithm, passing active context, local context, value as term, and defined. This will ensure that a term definition is created for value in active context during Context Processing.
if local_context && local_context.has_key?(value) && !defined[value]
depth {create_term_definition(local_context, value, defined)}
end
# If vocab is true and the active context has a term definition for value, return the associated IRI mapping.
if options[:vocab] && (v_td = term_definitions[value])
debug("") {"match with #{v_td.id}"} unless options[:quiet]
return v_td.id
end
# If value contains a colon (:), it is either an absolute IRI or a compact IRI:
if value.include?(':')
prefix, suffix = value.split(':', 2)
debug("") {"prefix: #{prefix.inspect}, suffix: #{suffix.inspect}, vocab: #{vocab.inspect}"} unless options[:quiet]
# If prefix is underscore (_) or suffix begins with double-forward-slash (//), return value as it is already an absolute IRI or a blank node identifier.
return RDF::Node.new(namer.get_sym(suffix)) if prefix == '_'
return RDF::URI(value) if suffix[0,2] == '//'
# If local context is not null, it contains a key that equals prefix, and the value associated with the key that equals prefix in defined is not true, invoke the Create Term Definition algorithm, passing active context, local context, prefix as term, and defined. This will ensure that a term definition is created for prefix in active context during Context Processing.
if local_context && local_context.has_key?(prefix) && !defined[prefix]
create_term_definition(local_context, prefix, defined)
end
# If active context contains a term definition for prefix, return the result of concatenating the IRI mapping associated with prefix and suffix.
result = if (td = term_definitions[prefix])
result = td.id + suffix
else
# (Otherwise) Return value as it is already an absolute IRI.
RDF::URI(value)
end
debug("") {"=> #{result.inspect}"} unless options[:quiet]
return result
end
debug("") {"=> #{result.inspect}"} unless options[:quiet]
result = if options[:vocab] && vocab
# If vocab is true, and active context has a vocabulary mapping, return the result of concatenating the vocabulary mapping with value.
vocab + value
elsif options[:documentRelative] && base = options.fetch(:base, self.base)
# Otherwise, if document relative is true, set value to the result of resolving value against the base IRI. Only the basic algorithm in section 5.2 of [RFC3986] is used; neither Syntax-Based Normalization nor Scheme-Based Normalization are performed. Characters additionally allowed in IRI references are treated in the same way that unreserved characters are treated in URI references, per section 6.5 of [RFC3987].
RDF::URI(base).join(value)
elsif local_context && RDF::URI(value).relative?
# If local context is not null and value is not an absolute IRI, an invalid IRI mapping error has been detected and processing is aborted.
raise JSON::LD::JsonLdError::InvalidIRIMapping, "not an absolute IRI: #{value}"
else
RDF::URI(value)
end
debug("") {"=> #{result}"} unless options[:quiet]
result
end
end
##
# Compacts an absolute IRI to the shortest matching term or compact IRI
#
# @param [RDF::URI] iri
# @param [Hash{Symbol => Object}] options ({})
# @option options [Object] :value
# Value, used to select among various maps for the same IRI
# @option options [Boolean] :vocab
# specifies whether the passed iri should be compacted using the active context's vocabulary mapping
# @option options [Boolean] :reverse
# specifies whether a reverse property is being compacted
#
# @return [String] compacted form of IRI
# @see http://json-ld.org/spec/latest/json-ld-api/#iri-compaction
def compact_iri(iri, options = {})
return if iri.nil?
iri = iri.to_s
debug("compact_iri(#{iri.inspect}", options) {options.inspect} unless options[:quiet]
depth(options) do
value = options.fetch(:value, nil)
if options[:vocab] && inverse_context.has_key?(iri)
debug("") {"vocab and key in inverse context"} unless options[:quiet]
default_language = self.default_language || @none
containers = []
tl, tl_value = "@language", "@null"
containers << '@index' if index?(value)
if options[:reverse]
tl, tl_value = "@type", "@reverse"
containers << '@set'
elsif list?(value)
debug("") {"list(#{value.inspect})"} unless options[:quiet]
# if value is a list object, then set type/language and type/language value to the most specific values that work for all items in the list as follows:
containers << "@list" unless index?(value)
list = value['@list']
common_type = nil
common_language = default_language if list.empty?
list.each do |item|
item_language, item_type = "@none", "@none"
if value?(item)
if item.has_key?('@language')
item_language = item['@language']
elsif item.has_key?('@type')
item_type = item['@type']
else
item_language = "@null"
end
else
item_type = '@id'
end
common_language ||= item_language
if item_language != common_language && value?(item)
debug("") {"-- #{item_language} conflicts with #{common_language}, use @none"} unless options[:quiet]
common_language = '@none'
end
common_type ||= item_type
if item_type != common_type
common_type = '@none'
debug("") {"#{item_type} conflicts with #{common_type}, use @none"} unless options[:quiet]
end
end
common_language ||= '@none'
common_type ||= '@none'
debug("") {"common type: #{common_type}, common language: #{common_language}"} unless options[:quiet]
if common_type != '@none'
tl, tl_value = '@type', common_type
else
tl_value = common_language
end
debug("") {"list: containers: #{containers.inspect}, type/language: #{tl.inspect}, type/language value: #{tl_value.inspect}"} unless options[:quiet]
else
if value?(value)
if value.has_key?('@language') && !index?(value)
tl_value = value['@language']
containers << '@language'
elsif value.has_key?('@type')
tl_value = value['@type']
tl = '@type'
end
else
tl, tl_value = '@type', '@id'
end
containers << '@set'
debug("") {"value: containers: #{containers.inspect}, type/language: #{tl.inspect}, type/language value: #{tl_value.inspect}"} unless options[:quiet]
end
containers << '@none'
tl_value ||= '@null'
preferred_values = []
preferred_values << '@reverse' if tl_value == '@reverse'
if %w(@id @reverse).include?(tl_value) && value.is_a?(Hash) && value.has_key?('@id')
t_iri = compact_iri(value['@id'], vocab: true, document_relative: true)
if (r_td = term_definitions[t_iri]) && r_td.id == value['@id']
preferred_values.concat(%w(@vocab @id @none))
else
preferred_values.concat(%w(@id @vocab @none))
end
else
preferred_values.concat([tl_value, '@none'])
end
debug("") {"preferred_values: #{preferred_values.inspect}"} unless options[:quiet]
if p_term = select_term(iri, containers, tl, preferred_values)
debug("") {"=> term: #{p_term.inspect}"} unless options[:quiet]
return p_term
end
end
# At this point, there is no simple term that iri can be compacted to. If vocab is true and active context has a vocabulary mapping:
if options[:vocab] && vocab && iri.start_with?(vocab) && iri.length > vocab.length
suffix = iri[vocab.length..-1]
debug("") {"=> vocab suffix: #{suffix.inspect}"} unless options[:quiet]
return suffix unless term_definitions.has_key?(suffix)
end
# The iri could not be compacted using the active context's vocabulary mapping. Try to create a compact IRI, starting by initializing compact IRI to null. This variable will be used to tore the created compact IRI, if any.
candidates = []
term_definitions.each do |term, td|
next if term.include?(":")
next if td.nil? || td.id.nil? || td.id == iri || !iri.start_with?(td.id)
# Also skip term if it was not a simple term and the :simple_compact_iris flag is true
next if @options[:simple_compact_iris] && !td.simple?
suffix = iri[td.id.length..-1]
ciri = "#{term}:#{suffix}"
candidates << ciri unless value && term_definitions.has_key?(ciri)
end
if !candidates.empty?
debug("") {"=> compact iri: #{candidates.term_sort.first.inspect}"} unless options[:quiet]
return candidates.term_sort.first
end
# If we still don't have any terms and we're using standard_prefixes,
# try those, and add to mapping
if @options[:standard_prefixes]
candidates = RDF::Vocabulary.
select {|v| iri.start_with?(v.to_uri.to_s) && iri != v.to_uri.to_s}.
map do |v|
prefix = v.__name__.to_s.split('::').last.downcase
set_mapping(prefix, v.to_uri.to_s)
iri.sub(v.to_uri.to_s, "#{prefix}:").sub(/:$/, '')
end
if !candidates.empty?
debug("") {"=> standard prefies: #{candidates.term_sort.first.inspect}"} unless options[:quiet]
return candidates.term_sort.first
end
end
if !options[:vocab]
# transform iri to a relative IRI using the document's base IRI
iri = remove_base(iri)
debug("") {"=> relative iri: #{iri.inspect}"} unless options[:quiet]
return iri
else
debug("") {"=> absolute iri: #{iri.inspect}"} unless options[:quiet]
return iri
end
end
end
##
# If active property has a type mapping in the active context set to @id or @vocab, a JSON object with a single member @id whose value is the result of using the IRI Expansion algorithm on value is returned.
#
# Otherwise, the result will be a JSON object containing an @value member whose value is the passed value. Additionally, an @type member will be included if there is a type mapping associated with the active property or an @language member if value is a string and there is language mapping associated with the active property.
#
# @param [String] property
# Associated property used to find coercion rules
# @param [Hash, String] value
# Value (literal or IRI) to be expanded
# @param [Hash{Symbol => Object}] options
# @option options [Boolean] :useNativeTypes (false) use native representations
#
# @return [Hash] Object representation of value
# @raise [RDF::ReaderError] if the iri cannot be expanded
# @see http://json-ld.org/spec/latest/json-ld-api/#value-expansion
def expand_value(property, value, options = {})
options = {useNativeTypes: false}.merge(options)
depth(options) do
debug("expand_value") {"property: #{property.inspect}, value: #{value.inspect}"}
# If the active property has a type mapping in active context that is @id, return a new JSON object containing a single key-value pair where the key is @id and the value is the result of using the IRI Expansion algorithm, passing active context, value, and true for document relative.
if (td = term_definitions.fetch(property, TermDefinition.new(property))) && td.type_mapping == '@id'
debug("") {"as relative IRI: #{value.inspect}"}
return {'@id' => expand_iri(value, documentRelative: true).to_s}
end
# If active property has a type mapping in active context that is @vocab, return a new JSON object containing a single key-value pair where the key is @id and the value is the result of using the IRI Expansion algorithm, passing active context, value, true for vocab, and true for document relative.
if td.type_mapping == '@vocab'
debug("") {"as vocab IRI: #{value.inspect}"}
return {'@id' => expand_iri(value, vocab: true, documentRelative: true).to_s}
end
value = RDF::Literal(value) if
value.is_a?(Date) ||
value.is_a?(DateTime) ||
value.is_a?(Time)
result = case value
when RDF::URI, RDF::Node
debug("URI | BNode") { value.to_s }
{'@id' => value.to_s}
when RDF::Literal
debug("Literal") {"datatype: #{value.datatype.inspect}"}
res = {}
if options[:useNativeTypes] && [RDF::XSD.boolean, RDF::XSD.integer, RDF::XSD.double].include?(value.datatype)
res['@value'] = value.object
res['@type'] = uri(coerce(property)) if coerce(property)
else
value.canonicalize! if value.datatype == RDF::XSD.double
res['@value'] = value.to_s
if coerce(property)
res['@type'] = uri(coerce(property)).to_s
elsif value.has_datatype?
res['@type'] = uri(value.datatype).to_s
elsif value.has_language? || language(property)
res['@language'] = (value.language || language(property)).to_s
end
end
res
else
# Otherwise, initialize result to a JSON object with an @value member whose value is set to value.
res = {'@value' => value}
if td.type_mapping
res['@type'] = td.type_mapping.to_s
elsif value.is_a?(String)
if td.language_mapping
res['@language'] = td.language_mapping
elsif default_language && td.language_mapping.nil?
res['@language'] = default_language
end
end
res
end
debug("") {"=> #{result.inspect}"}
result
end
end
##
# Compact a value
#
# @param [String] property
# Associated property used to find coercion rules
# @param [Hash] value
# Value (literal or IRI), in full object representation, to be compacted
# @param [Hash{Symbol => Object}] options
#
# @return [Hash] Object representation of value
# @raise [JsonLdError] if the iri cannot be expanded
# @see http://json-ld.org/spec/latest/json-ld-api/#value-compaction
# FIXME: revisit the specification version of this.
def compact_value(property, value, options = {})
depth(options) do
debug("compact_value") {"property: #{property.inspect}, value: #{value.inspect}"}
num_members = value.keys.length
num_members -= 1 if index?(value) && container(property) == '@index'
if num_members > 2
debug("") {"can't compact value with # members > 2"}
return value
end
result = case
when coerce(property) == '@id' && value.has_key?('@id') && num_members == 1
# Compact an @id coercion
debug("") {" (@id & coerce)"}
compact_iri(value['@id'])
when coerce(property) == '@vocab' && value.has_key?('@id') && num_members == 1
# Compact an @id coercion
debug("") {" (@id & coerce & vocab)"}
compact_iri(value['@id'], vocab: true)
when value.has_key?('@id')
debug("") {" (@id)"}
# return value as is
value
when value['@type'] && expand_iri(value['@type'], vocab: true) == coerce(property)
# Compact common datatype
debug("") {" (@type & coerce) == #{coerce(property)}"}
value['@value']
when value['@language'] && (value['@language'] == language(property))
# Compact language
debug("") {" (@language) == #{language(property).inspect}"}
value['@value']
when num_members == 1 && !value['@value'].is_a?(String)
debug("") {" (native)"}
value['@value']
when num_members == 1 && default_language.nil? || language(property) == false
debug("") {" (!@language)"}
value['@value']
else
# Otherwise, use original value
debug("") {" (no change)"}
value
end
# If the result is an object, tranform keys using any term keyword aliases
if result.is_a?(Hash) && result.keys.any? {|k| self.alias(k) != k}
debug("") {" (map to key aliases)"}
new_element = {}
result.each do |k, v|
new_element[self.alias(k)] = v
end
result = new_element
end
debug("") {"=> #{result.inspect}"}
result
end
end
def inspect
v = %w([Context)
v << "base=#{base}" if base
v << "vocab=#{vocab}" if vocab
v << "def_language=#{default_language}" if default_language
v << "term_definitions[#{term_definitions.length}]=#{term_definitions}"
v.join(" ") + "]"
end
def dup
# Also duplicate mappings, coerce and list
that = self
ec = super
ec.instance_eval do
@term_definitions = that.term_definitions.dup
@iri_to_term = that.iri_to_term.dup
end
ec
end
protected
##
# Retrieve term coercion
#
# @param [String] property in unexpanded form
#
# @return [RDF::URI, '@id']
def coerce(property)
# Map property, if it's not an RDF::Value
# @type is always is an IRI
return '@id' if [RDF.type, '@type'].include?(property)
term_definitions[property] && term_definitions[property].type_mapping
end
##
# Determine if `term` is a suitable term.
# Term may be any valid JSON string.
#
# @param [String] term
# @return [Boolean]
def term_valid?(term)
term.is_a?(String)
end
# Reverse term mapping, typically used for finding aliases for keys.
#
# Returns either the original value, or a mapping for this value.
#
# @example
# {"@context": {"id": "@id"}, "@id": "foo"} => {"id": "foo"}
#
# @param [RDF::URI, String] value
# @return [String]
def alias(value)
iri_to_term.fetch(value, value)
end
private
def uri(value)
case value.to_s
when /^_:(.*)$/
# Map BlankNodes if a namer is given
debug "uri(bnode)#{value}: #{$1}"
bnode(namer.get_sym($1))
else
value = RDF::URI.new(value)
value.validate! if @options[:validate]
value.canonicalize! if @options[:canonicalize]
value = RDF::URI.intern(value) if @options[:intern]
value
end
end
# Clear the provided context, used for testing
# @return [Context] self
def clear_provided_context
@provided_context = nil
self
end
# Keep track of allocated BNodes
#
# Don't actually use the name provided, to prevent name alias issues.
# @return [RDF::Node]
def bnode(value = nil)
@@bnode_cache ||= {}
@@bnode_cache[value.to_s] ||= RDF::Node.new(value)
end
##
# Inverse Context creation
#
# When there is more than one term that could be chosen to compact an IRI, it has to be ensured that the term selection is both deterministic and represents the most context-appropriate choice whilst taking into consideration algorithmic complexity.
#
# In order to make term selections, the concept of an inverse context is introduced. An inverse context is essentially a reverse lookup table that maps container mappings, type mappings, and language mappings to a simple term for a given active context. A inverse context only needs to be generated for an active context if it is being used for compaction.
#
# To make use of an inverse context, a list of preferred container mappings and the type mapping or language mapping are gathered for a particular value associated with an IRI. These parameters are then fed to the Term Selection algorithm, which will find the term that most appropriately matches the value's mappings.
#
# @return [Hash{String => Hash{String => String}}]
def inverse_context
@inverse_context ||= begin
result = {}
default_language = self.default_language || '@none'
term_definitions.keys.sort do |a, b|
a.length == b.length ? (a <=> b) : (a.length <=> b.length)
end.each do |term|
next unless td = term_definitions[term]
container = td.container_mapping || '@none'
container_map = result[td.id.to_s] ||= {}
tl_map = container_map[container] ||= {'@language' => {}, '@type' => {}}
type_map = tl_map['@type']
language_map = tl_map['@language']
if td.reverse_property
type_map['@reverse'] ||= term
elsif td.type_mapping
type_map[td.type_mapping.to_s] ||= term
elsif !td.language_mapping.nil?
language = td.language_mapping || '@null'
language_map[language] ||= term
else
language_map[default_language] ||= term
language_map['@none'] ||= term
type_map['@none'] ||= term
end
end
result
end
end
##
# This algorithm, invoked via the IRI Compaction algorithm, makes use of an active context's inverse context to find the term that is best used to compact an IRI. Other information about a value associated with the IRI is given, including which container mappings and which type mapping or language mapping would be best used to express the value.
#
# @param [String] iri
# @param [Array<String>] containers
# represents an ordered list of preferred container mappings
# @param [String] type_language
# indicates whether to look for a term with a matching type mapping or language mapping
# @param [Array<String>] preferred_values
# for the type mapping or language mapping
# @return [String]
def select_term(iri, containers, type_language, preferred_values)
depth do
debug("select_term") {
"iri: #{iri.inspect}, " +
"containers: #{containers.inspect}, " +
"type_language: #{type_language.inspect}, " +
"preferred_values: #{preferred_values.inspect}"
}
container_map = inverse_context[iri]
debug(" ") {"container_map: #{container_map.inspect}"}
containers.each do |container|
next unless container_map.has_key?(container)
tl_map = container_map[container]
value_map = tl_map[type_language]
preferred_values.each do |item|
next unless value_map.has_key?(item)
debug("=>") {value_map[item].inspect}
return value_map[item]
end
end
debug("=>") {"nil"}
nil
end
end
##
# Removes a base IRI from the given absolute IRI.
#
# @param [String] iri the absolute IRI
# @return [String]
# the relative IRI if relative to base, otherwise the absolute IRI.
def remove_base(iri)
return iri unless base
@base_and_parents ||= begin
u = base
iri_set = u.to_s.end_with?('/') ? [u.to_s] : []
iri_set << u.to_s while (u = u.parent)
iri_set
end
b = base.to_s
return iri[b.length..-1] if iri.start_with?(b) && %w(? #).include?(iri[b.length, 1])
@base_and_parents.each_with_index do |b, index|
next unless iri.start_with?(b)
rel = "../" * index + iri[b.length..-1]
return rel.empty? ? "./" : rel
end
iri
end
## Used for testing
# Retrieve term mappings
#
# @return [Array<RDF::URI>]
def mappings
term_definitions.inject({}) do |memo, (t,td)|
memo[t] = td ? td.id : nil
memo
end
end
## Used for testing
# Retrieve term mapping
#
# @param [String, #to_s] term
#
# @return [RDF::URI, String]
def mapping(term)
term_definitions[term] ? term_definitions[term].id : nil
end
## Used for testing
# Retrieve language mappings
#
# @return [Array<String>]
# @deprecated
def languages
term_definitions.inject({}) do |memo, (t,td)|
memo[t] = td.language_mapping
memo
end
end
end
end
|
module Kashflow
class Client
attr_reader :service
def self.yaml_path
File.join(Kashflow.root, 'config', 'kf_api_methods.yml')
end
def self.api_methods
@@api_methods ||= YAML.load_file(yaml_path)
end
def initialize(login, password)
raise "missing login/password" unless login and password
@login, @password = login, password
@service = Savon::Client.new do |wsdl, http|
wsdl.document = "https://securedwebapp.com/api/service.asmx?wsdl"
http.auth.ssl.verify_mode = :none
end
end
def lookup_api_method(name)
self.class.api_methods.detect{ |api_method| api_method.name.underscore == name.to_s }
end
def method_missing(m, *args, &block)
api_method = lookup_api_method(m)
if api_method
# puts "found api_method #{api_method.name} for #{m}: #{api_method.request_attrs.inspect}"
# puts "you're calling with #{args.inspect}"
api_call(m, api_method.name, args)
else
raise "method_missing: No method for #{m}"
end
end
def api_call(name, method, args)
soap_return = soap_call(name, method, args)
response = soap_return["#{name}_response".to_sym]
# puts "got response: " + response.inspect
raise "API call failed: [#{response[:status_detail]}]\n\n #{response.inspect}" unless response[:status] == 'OK'
r = response["#{name}_result".to_sym]
if r.is_a?(String)
r
else
if r.is_a?(Enumerable)
if r.values.all?{|v| v.is_a?(Array) }# || r.keys.size == 1
object_type, attrs = r.first
else
# puts "arrayifying #{r.inspect}"
object_type = lookup_api_method(name).response_attrs.first[:type]
attrs = r.first.last.is_a?(Hash) ? [r.first.last] : [r]
end
# puts "it's an enumerable... #{object_type} | #{attrs.inspect}"
ostructs = attrs.map do |record_attrs|
# puts "making new ostruct with #{record_attrs.inspect}"
OpenStruct.new(record_attrs.merge(:object_type => object_type.to_s))
end
#r.first.last
else
#puts "it's a #{r.class}"
r
end
end
end
def object_wrapper(name, params_xml)
object_alias = {:customer => "custr", :quote => "quote", :invoice => "Inv", :supplier => "supl", :receipt => "Inv", :line => "InvLine", :payment => "InvoicePayment"}
needs_object = [ "insert", "update" ]
operation, object, line = name.to_s.split("_")
if needs_object.include? operation
text = line ? object_alias[line.to_sym] : object_alias[object.to_sym]
text = "sup" if operation == "update" and object == "supplier"
if line == "line" # prevent add_invoice_payment trying to do below actions
case name.to_s
when "insert_invoice_line_with_invoice_number"
line_id = "<InvoiceNumber>#{params_xml.match(/<InvoiceNumber>(.*?)<\/InvoiceNumber>/)[1]}</InvoiceNumber>\n\t\t"
else
line_id = "<ReceiptID>#{params_xml.match(/<ReceiptID>(.*?)<\/ReceiptID>/)[1]}</ReceiptID>\n\t\t" if object == "receipt"
line_id = "<InvoiceID>#{params_xml.match(/<InvoiceID>(.*?)<\/InvoiceID>/)[1]}</InvoiceID>\n\t\t" if object == "invoice"
end
end
return ["#{line_id}<#{text}>", "</#{text}>"]
else
return ["",""]
end
end
# called with CamelCase version of method name
def soap_call(name, method, params = {})
# puts "name = #{name}, method = #{method}, params = #{params.inspect}"
begin
result = @service.request(name) do |soap|
# soap.action = "KashFlow/#{method}"
params = params.pop if params.is_a?(Array)
params_xml = params.map do |field, value|
xml_tag = field.to_s.camelize
"<#{xml_tag}>#{value}</#{xml_tag}>"
end.join("\n") unless params.blank?
params_xml = params_xml.gsub(/Id>/,"ID>") if params_xml
params_xml = params_xml.gsub(/Dbid>/,"DBID>") if params_xml
pretext, posttext = object_wrapper(name, params_xml)
soap.xml = %[<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<#{method} xmlns="KashFlow">
<UserName>#{@login}</UserName>
<Password>#{@password}</Password>
#{pretext}
#{params_xml}
#{posttext}
</#{method}>
</soap:Body>
</soap:Envelope>]
end.to_hash
rescue Savon::SOAP::Fault => e
puts "soap fault:" + e.inspect
return false
end
end
end
end
fix for UpdateReceipt from #3
module Kashflow
class Client
attr_reader :service
def self.yaml_path
File.join(Kashflow.root, 'config', 'kf_api_methods.yml')
end
def self.api_methods
@@api_methods ||= YAML.load_file(yaml_path)
end
def initialize(login, password)
raise "missing login/password" unless login and password
@login, @password = login, password
@service = Savon::Client.new do |wsdl, http|
wsdl.document = "https://securedwebapp.com/api/service.asmx?wsdl"
http.auth.ssl.verify_mode = :none
end
end
def lookup_api_method(name)
self.class.api_methods.detect{ |api_method| api_method.name.underscore == name.to_s }
end
def method_missing(m, *args, &block)
api_method = lookup_api_method(m)
if api_method
# puts "found api_method #{api_method.name} for #{m}: #{api_method.request_attrs.inspect}"
# puts "you're calling with #{args.inspect}"
api_call(m, api_method.name, args)
else
raise "method_missing: No method for #{m}"
end
end
def api_call(name, method, args)
soap_return = soap_call(name, method, args)
response = soap_return["#{name}_response".to_sym]
# puts "got response: " + response.inspect
raise "API call failed: [#{response[:status_detail]}]\n\n #{response.inspect}" unless response[:status] == 'OK'
r = response["#{name}_result".to_sym]
if r.is_a?(String)
r
else
if r.is_a?(Enumerable)
if r.values.all?{|v| v.is_a?(Array) }# || r.keys.size == 1
object_type, attrs = r.first
else
# puts "arrayifying #{r.inspect}"
object_type = lookup_api_method(name).response_attrs.first[:type]
attrs = r.first.last.is_a?(Hash) ? [r.first.last] : [r]
end
# puts "it's an enumerable... #{object_type} | #{attrs.inspect}"
ostructs = attrs.map do |record_attrs|
# puts "making new ostruct with #{record_attrs.inspect}"
OpenStruct.new(record_attrs.merge(:object_type => object_type.to_s))
end
#r.first.last
else
#puts "it's a #{r.class}"
r
end
end
end
def object_wrapper(name, params_xml)
object_alias = {:customer => "custr", :quote => "quote", :invoice => "Inv", :supplier => "supl", :receipt => "Inv", :line => "InvLine", :payment => "InvoicePayment"}
needs_object = [ "insert", "update" ]
operation, object, line = name.to_s.split("_")
if needs_object.include? operation
text = line ? object_alias[line.to_sym] : object_alias[object.to_sym]
text = "sup" if operation == "update" and object == "supplier"
text = "Receipt" if operation == "update" and object == "receipt"
if line == "line" # prevent add_invoice_payment trying to do below actions
case name.to_s
when "insert_invoice_line_with_invoice_number"
line_id = "<InvoiceNumber>#{params_xml.match(/<InvoiceNumber>(.*?)<\/InvoiceNumber>/)[1]}</InvoiceNumber>\n\t\t"
else
line_id = "<ReceiptID>#{params_xml.match(/<ReceiptID>(.*?)<\/ReceiptID>/)[1]}</ReceiptID>\n\t\t" if object == "receipt"
line_id = "<InvoiceID>#{params_xml.match(/<InvoiceID>(.*?)<\/InvoiceID>/)[1]}</InvoiceID>\n\t\t" if object == "invoice"
end
end
return ["#{line_id}<#{text}>", "</#{text}>"]
else
return ["",""]
end
end
# called with CamelCase version of method name
def soap_call(name, method, params = {})
# puts "name = #{name}, method = #{method}, params = #{params.inspect}"
begin
result = @service.request(name) do |soap|
# soap.action = "KashFlow/#{method}"
params = params.pop if params.is_a?(Array)
params_xml = params.map do |field, value|
xml_tag = field.to_s.camelize
"<#{xml_tag}>#{value}</#{xml_tag}>"
end.join("\n") unless params.blank?
params_xml = params_xml.gsub(/Id>/,"ID>") if params_xml
params_xml = params_xml.gsub(/Dbid>/,"DBID>") if params_xml
pretext, posttext = object_wrapper(name, params_xml)
soap.xml = %[<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<#{method} xmlns="KashFlow">
<UserName>#{@login}</UserName>
<Password>#{@password}</Password>
#{pretext}
#{params_xml}
#{posttext}
</#{method}>
</soap:Body>
</soap:Envelope>]
end.to_hash
rescue Savon::SOAP::Fault => e
puts "soap fault:" + e.inspect
return false
end
end
end
end
|
# keyring: System keyring abstraction library
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
class Keyring
VERSION = "0.2.0"
end
Bump version to 0.3.0
# keyring: System keyring abstraction library
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
class Keyring
VERSION = "0.3.0"
end
|
require 'nokogiri'
require 'open-uri'
require 'json'
require 'kirpich/providers/google_image'
require 'kirpich/providers/lurk'
require 'kirpich/providers/text'
require 'kirpich/providers/image'
require 'kirpich/providers/fga'
require 'kirpich/providers/slack_user'
module Kirpich
class Answers
def random_user(channel)
user = Kirpich::Providers::SlackUser.random(channel)
if user
name = user['real_name']
name = user['name'] if name.empty?
appeal_text(name, 0)
end
end
def no_fap
Kirpich::NO_FAP.sample
end
def materialize(text)
result = []
text.split(' ').each do |word|
if word != 'материализуй'
result << word
if word.size > 3 && !(word =~ /[,.:;!?'\"\/#$%^&*()]/) && rand(7) == 5
result << Kirpich::MEJ.sample
end
end
end
result.join(' ')
end
def google_search(text)
"http://lmgtfy.com/?q=#{URI::encode(text)}"
end
def pidor_text
'пидора ответ'
end
def cat_image
"http://www.randomkittengenerator.com/images/cats/rotator.php?#{Time.now.to_i}"
end
def huifikatorr_text(text)
url = "http://huifikator.ru/api.php?text=#{text}"
response = Faraday.get url
response.body.force_encoding("cp1251").encode("utf-8", undef: :replace)
rescue
Kirpich::CALL.sample
end
def response_text(text)
return do_not_know_text unless text
text = text.split(' ').last
huifikatorr_text(text)
end
def les_400_image
Kirpich::Providers::Image.les_400_image
end
def developerslife_image
response = Faraday.get "http://developerslife.ru/random"
link = response.headers["location"]
if link
response = Faraday.get link
page = Nokogiri::HTML(response.body)
image = page.css('.entry .gif img')
text = page.css('.entry .code .value')
if image && text
[image.first["src"], text.first.text.gsub("'",'')]
end
end
end
def search_image(q, random)
img = Kirpich::Providers::GoogleImage.search(q, random)
img || NO_GIRLS.sample
end
def search_xxx_image(q, random)
img = Kirpich::Providers::GoogleImage.search_xxx(q, random)
img || NO_GIRLS.sample
end
def choose_text(options)
text = options.sample
appeal_text(text, 4)
end
def den_text
DEN.sample
end
def dance_text
"#{Kirpich::DANCE.sample}?#{Time.now.to_i}"
end
def brakingmad_text
response = Faraday.get 'http://breakingmad.me/ru/'
txts = Nokogiri::HTML(response.body).css(".news-row").map { |e| e }.sample
txts = "#{txts.css("h2").first.text}.\n\n#{txts.css('.news-full-forspecial').first.text}"
materialize txts
end
def pikabu_image
response = Faraday.get 'http://pikabu.ru/'
urls = Nokogiri::HTML(response.body).css(".b-story__content_type_media img").map do |src|
src['src']
end
urls.sample
end
def news_text
text = if rand(1) == 0
Kirpich::Providers::Text.interfax
else
Kirpich::Providers::Text.ria
end
materialize text
end
def currency
response = Faraday.get "https://query.yahooapis.com/v1/public/yql?q=select+*+from+yahoo.finance.xchange+where+pair+=+%22USDRUB,EURRUB%22&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback="
result = JSON.parse response.body
text = result["query"]["results"]["rate"].map do |rate|
"#{rate["Name"]}: #{rate["Rate"]}"
end
text.join("\n")
end
def geo_search(q)
"https://www.google.ru/maps/search/#{q}"
end
def chef_text
Kirpich::GLAV.sample
end
def rules_text
Kirpich::RULES
end
def poh_text
Kirpich::POX.sample
end
def do_not_know_text
Kirpich::HZ.sample
end
def appeal_text(text, r)
if rand(r) === 0 && !(text =~ /[,.:;!?'\"\/#$%^&*()]/) || r === 0
text + ", #{Kirpich::APPEAL.sample}"
else
text
end
end
def hello_text
text = Kirpich::HELLO.sample
appeal_text(text, 3)
end
def ok_text
text = Kirpich::ZBS.sample
appeal_text(text, 2)
end
def sin_text
text = Kirpich::SIN.sample
appeal_text(text, 2)
end
def nah_text
text = Kirpich::NAX.sample
appeal_text(text, 2)
end
def call_text
text = Kirpich::CALL.sample
appeal_text(text, 4)
end
def lurk_search(text)
result = Kirpich::Providers::Lurk.search(text)
if result.empty?
do_not_know_text
else
result
end
end
def fga_random
Kirpich::Providers::Fga.random
end
def lurk_random
Kirpich::Providers::Lurk.random
end
def random_ass_image
Kirpich::Providers::Image.les_400_image
rescue => e
p e
Kirpich::NO_GIRLS.sample
end
def random_boobs_image
Kirpich::Providers::Image.lesaintdesseins_image
end
end
end
Mention user
require 'nokogiri'
require 'open-uri'
require 'json'
require 'kirpich/providers/google_image'
require 'kirpich/providers/lurk'
require 'kirpich/providers/text'
require 'kirpich/providers/image'
require 'kirpich/providers/fga'
require 'kirpich/providers/slack_user'
module Kirpich
class Answers
def random_user(channel)
user = Kirpich::Providers::SlackUser.random(channel)
if user
name = user['real_name']
name = user['name'] if name.empty?
name = "@#{name}"
appeal_text(name, 0)
end
end
def no_fap
Kirpich::NO_FAP.sample
end
def materialize(text)
result = []
text.split(' ').each do |word|
if word != 'материализуй'
result << word
if word.size > 3 && !(word =~ /[,.:;!?'\"\/#$%^&*()]/) && rand(7) == 5
result << Kirpich::MEJ.sample
end
end
end
result.join(' ')
end
def google_search(text)
"http://lmgtfy.com/?q=#{URI::encode(text)}"
end
def pidor_text
'пидора ответ'
end
def cat_image
"http://www.randomkittengenerator.com/images/cats/rotator.php?#{Time.now.to_i}"
end
def huifikatorr_text(text)
url = "http://huifikator.ru/api.php?text=#{text}"
response = Faraday.get url
response.body.force_encoding("cp1251").encode("utf-8", undef: :replace)
rescue
Kirpich::CALL.sample
end
def response_text(text)
return do_not_know_text unless text
text = text.split(' ').last
huifikatorr_text(text)
end
def les_400_image
Kirpich::Providers::Image.les_400_image
end
def developerslife_image
response = Faraday.get "http://developerslife.ru/random"
link = response.headers["location"]
if link
response = Faraday.get link
page = Nokogiri::HTML(response.body)
image = page.css('.entry .gif img')
text = page.css('.entry .code .value')
if image && text
[image.first["src"], text.first.text.gsub("'",'')]
end
end
end
def search_image(q, random)
img = Kirpich::Providers::GoogleImage.search(q, random)
img || NO_GIRLS.sample
end
def search_xxx_image(q, random)
img = Kirpich::Providers::GoogleImage.search_xxx(q, random)
img || NO_GIRLS.sample
end
def choose_text(options)
text = options.sample
appeal_text(text, 4)
end
def den_text
DEN.sample
end
def dance_text
"#{Kirpich::DANCE.sample}?#{Time.now.to_i}"
end
def brakingmad_text
response = Faraday.get 'http://breakingmad.me/ru/'
txts = Nokogiri::HTML(response.body).css(".news-row").map { |e| e }.sample
txts = "#{txts.css("h2").first.text}.\n\n#{txts.css('.news-full-forspecial').first.text}"
materialize txts
end
def pikabu_image
response = Faraday.get 'http://pikabu.ru/'
urls = Nokogiri::HTML(response.body).css(".b-story__content_type_media img").map do |src|
src['src']
end
urls.sample
end
def news_text
text = if rand(1) == 0
Kirpich::Providers::Text.interfax
else
Kirpich::Providers::Text.ria
end
materialize text
end
def currency
response = Faraday.get "https://query.yahooapis.com/v1/public/yql?q=select+*+from+yahoo.finance.xchange+where+pair+=+%22USDRUB,EURRUB%22&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback="
result = JSON.parse response.body
text = result["query"]["results"]["rate"].map do |rate|
"#{rate["Name"]}: #{rate["Rate"]}"
end
text.join("\n")
end
def geo_search(q)
"https://www.google.ru/maps/search/#{q}"
end
def chef_text
Kirpich::GLAV.sample
end
def rules_text
Kirpich::RULES
end
def poh_text
Kirpich::POX.sample
end
def do_not_know_text
Kirpich::HZ.sample
end
def appeal_text(text, r)
if rand(r) === 0 && !(text =~ /[,.:;!?'\"\/#$%^&*()]/) || r === 0
text + ", #{Kirpich::APPEAL.sample}"
else
text
end
end
def hello_text
text = Kirpich::HELLO.sample
appeal_text(text, 3)
end
def ok_text
text = Kirpich::ZBS.sample
appeal_text(text, 2)
end
def sin_text
text = Kirpich::SIN.sample
appeal_text(text, 2)
end
def nah_text
text = Kirpich::NAX.sample
appeal_text(text, 2)
end
def call_text
text = Kirpich::CALL.sample
appeal_text(text, 4)
end
def lurk_search(text)
result = Kirpich::Providers::Lurk.search(text)
if result.empty?
do_not_know_text
else
result
end
end
def fga_random
Kirpich::Providers::Fga.random
end
def lurk_random
Kirpich::Providers::Lurk.random
end
def random_ass_image
Kirpich::Providers::Image.les_400_image
rescue => e
p e
Kirpich::NO_GIRLS.sample
end
def random_boobs_image
Kirpich::Providers::Image.lesaintdesseins_image
end
end
end
|
module Lacquer
VERSION = '0.6.2'
end
Version bump.
module Lacquer
VERSION = '0.6.3'
end
|
require 'optparse'
require 'logger'
require_relative 'certificate'
module LetsCert
class Runner
# Custom logger formatter
class LoggerFormatter < Logger::Formatter
# @private
FORMAT = "[%s] %5s: %s"
# @param [String] severity
# @param [Datetime] time
# @param [nil,String] progname
# @param [String] msg
# @return [String]
def call(severity, time, progname, msg)
FORMAT % [format_datetime(time), severity, msg2str(msg)]
end
private
def format_datetime(time)
time.strftime("%Y-%d-%d %H:%M:%S")
end
end
# Exit value for OK
RETURN_OK = 1
# Exit value for OK but with creation/renewal of certificate data
RETURN_OK_CERT = 0
# Exit value for error(s)
RETURN_ERROR = 2
# @return [Logger]
attr_reader :logger
# Run LetsCert
# @return [Integer]
# @see #run
def self.run
runner = new
runner.parse_options
runner.run
end
def initialize
@options = {
verbose: 0,
domains: [],
files: [],
cert_key_size: 4096,
validmin: 2_592_000,
account_key_public_exponent: 65537,
account_key_size: 4096,
tos_sha256: '33d233c8ab558ba6c8ebc370a509acdded8b80e5d587aa5d192193f35226540f',
user_agent: 'letscert/0',
server: 'https://acme-v01.api.letsencrypt.org/directory',
}
@logger = Logger.new(STDOUT)
@logger.formatter = LoggerFormatter.new
end
# @return [Integer] exit code
# * 0 if certificate data were created or updated
# * 1 if renewal was not necessery
# * 2 in case of errors
def run
if @options[:print_help]
puts @opt_parser
exit RETURN_OK
end
if @options[:show_version]
puts "letscert #{LetsCert::VERSION}"
puts "Copyright (c) 2016 Sylvain Daubert"
puts "License MIT: see http://opensource.org/licenses/MIT"
exit RETURN_OK
end
case @options[:verbose]
when 0
@logger.level = Logger::Severity::WARN
when 1
@logger.level = Logger::Severity::INFO
when 2..5
@logger.level = Logger::Severity::DEBUG
end
@logger.debug { "options are: #{@options.inspect}" }
begin
if @options[:revoke]
revoke
end
RETURN_OK
rescue Error
RETURN_ERROR
end
end
def parse_options
@opt_parser = OptionParser.new do |opts|
opts.banner = "Usage: lestcert [options]"
opts.separator('')
opts.on('-h', '--help', 'Show this help message and exit') do
@options[:print_help] = true
end
opts.on('-V', '--version', 'Show version and exit') do |v|
@options[:show_version] = v
end
opts.on('-v', '--verbose', 'Run verbosely') { |v| @options[:verbose] += 1 if v }
opts.separator("\nWebroot manager:")
opts.on('-d', '--domain DOMAIN[:PATH]',
'Domain name to include in the certificate.',
'Must be specified at least once.',
'Its path on the disk must also be provided.') do |domain|
@options[:domains] << domain
end
opts.on('--default_root PATH', 'Default webroot path',
'Use for all domains (nor need for PATH part',
'of --domain DOMAIN:PATH)') do |path|
@options[:default_root] = path
end
opts.separator("\nCertificate data files:")
opts.on('--revoke', 'Revoke existing certificates') do |revoke|
@options[:revoke] = revoke
end
opts.on("-f", "--file FILE", 'Input/output file.',
'Can be specified multiple times',
'Allowed values: account_key.json, cert.der,',
'cert.pem, chain.pem, xternal.sh, full.pem,',
'fullchain.pem, key.der, key.pem.') do |file|
@options[:files] << file
end
opts.on('--cert-key-size BITS', Integer,
'Certificate key size in bits',
'(default: 4096)') do |bits|
@options[:cert_key_size] = bits
end
opts.on('--valid-min SECONDS', Integer, 'Minimum validity of the resulting',
'certificate (default: 2592000 (30 days))') do |time|
@options[:valid_min] = time
end
opts.separator("\nRegistration:")
opts.separator(" Automatically register an account with he ACME CA specified" +
" by --server")
opts.separator('')
opts.on('--account-key-public-exponent BITS', Integer,
'Account key public exponent value (default: 65537)') do |bits|
@options[:account_key_public_exponent] = bits
end
opts.on('--account-key-size BITS', Integer,
'Account key size (default: 4096)') do |bits|
@options[:account_key_size] = bits
end
opts.on('--tos-sha256 HASH', String,
'SHA-256 digest of the content of Terms Of Service URI') do |hash|
@options[:tos_sha256] = hash
end
opts.on('--email EMAIL', String,
'E-mail address. CA is likely to use it to',
'remind about expiring certificates, as well',
'as for account recovery. It is highly',
'recommended to set this value.') do |email|
@options[:email] = email
end
opts.separator("\nHTTP:")
opts.separator(' Configure properties of HTTP requests and responses.')
opts.separator('')
opts.on('--user-agent NAME', 'User-Agent sent in all HTTP requests',
'(default: letscert/0)') do |ua|
@options[:user_agent] = ua
end
opts.on('--server URI', 'URI for the CA ACME API endpoint',
'(default: https://acme-v01.api.letsencrypt.org/directory)') do |uri|
@options[:server] = uri
end
end
@opt_parser.parse!
end
def revoke
end
end
end
Runner::LoggerFormatter: add "\n" to format.
Runner#run: puts error message in case of error.
require 'optparse'
require 'logger'
require_relative 'certificate'
module LetsCert
class Runner
# Custom logger formatter
class LoggerFormatter < Logger::Formatter
# @private
FORMAT = "[%s] %5s: %s\n"
# @param [String] severity
# @param [Datetime] time
# @param [nil,String] progname
# @param [String] msg
# @return [String]
def call(severity, time, progname, msg)
FORMAT % [format_datetime(time), severity, msg2str(msg)]
end
private
def format_datetime(time)
time.strftime("%Y-%d-%d %H:%M:%S")
end
end
# Exit value for OK
RETURN_OK = 1
# Exit value for OK but with creation/renewal of certificate data
RETURN_OK_CERT = 0
# Exit value for error(s)
RETURN_ERROR = 2
# @return [Logger]
attr_reader :logger
# Run LetsCert
# @return [Integer]
# @see #run
def self.run
runner = new
runner.parse_options
runner.run
end
def initialize
@options = {
verbose: 0,
domains: [],
files: [],
cert_key_size: 4096,
validmin: 2_592_000,
account_key_public_exponent: 65537,
account_key_size: 4096,
tos_sha256: '33d233c8ab558ba6c8ebc370a509acdded8b80e5d587aa5d192193f35226540f',
user_agent: 'letscert/0',
server: 'https://acme-v01.api.letsencrypt.org/directory',
}
@logger = Logger.new(STDOUT)
@logger.formatter = LoggerFormatter.new
end
# @return [Integer] exit code
# * 0 if certificate data were created or updated
# * 1 if renewal was not necessery
# * 2 in case of errors
def run
if @options[:print_help]
puts @opt_parser
exit RETURN_OK
end
if @options[:show_version]
puts "letscert #{LetsCert::VERSION}"
puts "Copyright (c) 2016 Sylvain Daubert"
puts "License MIT: see http://opensource.org/licenses/MIT"
exit RETURN_OK
end
case @options[:verbose]
when 0
@logger.level = Logger::Severity::WARN
when 1
@logger.level = Logger::Severity::INFO
when 2..5
@logger.level = Logger::Severity::DEBUG
end
@logger.debug { "options are: #{@options.inspect}" }
begin
if @options[:revoke]
revoke
end
RETURN_OK
rescue Error => ex
puts "Error: #{ex.message}"
RETURN_ERROR
end
end
def parse_options
@opt_parser = OptionParser.new do |opts|
opts.banner = "Usage: lestcert [options]"
opts.separator('')
opts.on('-h', '--help', 'Show this help message and exit') do
@options[:print_help] = true
end
opts.on('-V', '--version', 'Show version and exit') do |v|
@options[:show_version] = v
end
opts.on('-v', '--verbose', 'Run verbosely') { |v| @options[:verbose] += 1 if v }
opts.separator("\nWebroot manager:")
opts.on('-d', '--domain DOMAIN[:PATH]',
'Domain name to include in the certificate.',
'Must be specified at least once.',
'Its path on the disk must also be provided.') do |domain|
@options[:domains] << domain
end
opts.on('--default_root PATH', 'Default webroot path',
'Use for all domains (nor need for PATH part',
'of --domain DOMAIN:PATH)') do |path|
@options[:default_root] = path
end
opts.separator("\nCertificate data files:")
opts.on('--revoke', 'Revoke existing certificates') do |revoke|
@options[:revoke] = revoke
end
opts.on("-f", "--file FILE", 'Input/output file.',
'Can be specified multiple times',
'Allowed values: account_key.json, cert.der,',
'cert.pem, chain.pem, xternal.sh, full.pem,',
'fullchain.pem, key.der, key.pem.') do |file|
@options[:files] << file
end
opts.on('--cert-key-size BITS', Integer,
'Certificate key size in bits',
'(default: 4096)') do |bits|
@options[:cert_key_size] = bits
end
opts.on('--valid-min SECONDS', Integer, 'Minimum validity of the resulting',
'certificate (default: 2592000 (30 days))') do |time|
@options[:valid_min] = time
end
opts.separator("\nRegistration:")
opts.separator(" Automatically register an account with he ACME CA specified" +
" by --server")
opts.separator('')
opts.on('--account-key-public-exponent BITS', Integer,
'Account key public exponent value (default: 65537)') do |bits|
@options[:account_key_public_exponent] = bits
end
opts.on('--account-key-size BITS', Integer,
'Account key size (default: 4096)') do |bits|
@options[:account_key_size] = bits
end
opts.on('--tos-sha256 HASH', String,
'SHA-256 digest of the content of Terms Of Service URI') do |hash|
@options[:tos_sha256] = hash
end
opts.on('--email EMAIL', String,
'E-mail address. CA is likely to use it to',
'remind about expiring certificates, as well',
'as for account recovery. It is highly',
'recommended to set this value.') do |email|
@options[:email] = email
end
opts.separator("\nHTTP:")
opts.separator(' Configure properties of HTTP requests and responses.')
opts.separator('')
opts.on('--user-agent NAME', 'User-Agent sent in all HTTP requests',
'(default: letscert/0)') do |ua|
@options[:user_agent] = ua
end
opts.on('--server URI', 'URI for the CA ACME API endpoint',
'(default: https://acme-v01.api.letsencrypt.org/directory)') do |uri|
@options[:server] = uri
end
end
@opt_parser.parse!
end
def revoke
end
end
end
|
cask 'font-noto-sans-old-turkic' do
version :latest
sha256 :no_check
# noto-website-2.storage.googleapis.com/ was verified as official when first introduced to the cask
url 'https://noto-website-2.storage.googleapis.com/pkgs/NotoSansOldTurkic-unhinted.zip'
name 'Noto Sans Old Turkic'
homepage 'https://www.google.com/get/noto/#sans-orkh'
font 'NotoSansOldTurkic-Regular.ttf'
end
font-noto-sans-old-turkic.rb: fix for new style
cask "font-noto-sans-old-turkic" do
version :latest
sha256 :no_check
# noto-website-2.storage.googleapis.com/ was verified as official when first introduced to the cask
url "https://noto-website-2.storage.googleapis.com/pkgs/NotoSansOldTurkic-unhinted.zip"
name "Noto Sans Old Turkic"
homepage "https://www.google.com/get/noto/#sans-orkh"
font "NotoSansOldTurkic-Regular.ttf"
end
|
cask "photo-supreme-single-user" do
version "7.4.2.4629"
sha256 :no_check # required as upstream package is updated in-place
url "https://trial.idimager.com/PhotoSupreme_V#{version.major}.pkg"
name "Photo Supreme Single User"
desc "Digital Asset Management"
homepage "https://www.idimager.com/home"
livecheck do
url "https://www.idimager.com/what-s-new-in-photo-supreme-v#{version.major}"
regex(/>s*(\d+(?:\.\d+)+)[\s<]/i)
end
pkg "PhotoSupreme_V#{version.major}.pkg"
uninstall pkgutil: "com.idimager.idimagersu"
end
photo-supreme-single-user 7.4.2.4635
Update photo-supreme-single-user from 7.4.2.4629 to 7.4.2.4635
Closes #135708.
Signed-off-by: BrewTestBot <8a898ee6867e4f2028e63d2a6319b2224641c06c@users.noreply.github.com>
cask "photo-supreme-single-user" do
version "7.4.2.4635"
sha256 :no_check # required as upstream package is updated in-place
url "https://trial.idimager.com/PhotoSupreme_V#{version.major}.pkg"
name "Photo Supreme Single User"
desc "Digital Asset Management"
homepage "https://www.idimager.com/home"
livecheck do
url "https://www.idimager.com/what-s-new-in-photo-supreme-v#{version.major}"
regex(/>s*(\d+(?:\.\d+)+)[\s<]/i)
end
pkg "PhotoSupreme_V#{version.major}.pkg"
uninstall pkgutil: "com.idimager.idimagersu"
end
|
#!/bin/ruby
require 'find'
require 'json'
require 'octokit'
require 'fileutils'
require 'erb'
require 'yaml'
require 'digest'
$public_html = "public_html"
$markdown_src = "src/markdown"
$template_src = "src/templates"
$cache_file = "cache/pages.yaml"
## Page class stores data for each markdown file.
class Page
attr_reader :title, :source, :target, :content, :date, :section
@@instance_collector = []
## Initialize the class
def initialize(in_file)
@source = in_file
@title = source2title in_file
@tags = source2tags in_file
@section = source2section in_file
@content = md2html in_file
@date = source2date in_file, @section
@target = source2target in_file, @section
@@instance_collector << self
end
def source2title(in_file)
title = File.basename in_file
title = title.sub /.md$/, '' # Remove the extension
title = title.sub /#.*/, '' # Remove the tags
title.gsub /_/, ' ' # Convert underscore to spaces
end
def source2tags(in_file)
tags = File.basename in_file
tags = tags.sub /.md$/, '' # Remove the extension
tags = tags.split '#' # Separate the tags
tags.drop 1 # Drop the title
end
def source2target(in_file, section)
out_file = File.basename(in_file).sub /.md$/, ".html"
if section != nil
"#{$public_html}/#{section}/#{out_file}"
else
"#{$public_html}/#{out_file}"
end
end
def source2section(in_file)
section = File.dirname(in_file).sub /^#{$markdown_src}/, ''
section.split('/')[1]
end
def source2date(in_file, section)
if section and File.dirname(in_file) != "#{$markdown_src}/#{section}"
date = File.dirname(in_file).sub /^#{$markdown_src}\/#{section}\//, ''
date = date.split('/')
Time.new date[0], date[1], date[2]
else
File.mtime in_file
end
end
def md2html(in_file)
## Only regenerate if what is in cache doesn't match
md5_in = Digest::MD5.hexdigest File.read(in_file)
if $cache[in_file] != nil
md5_cache = $cache[in_file]["md5sum"]
return $cache[in_file]["content"] if md5_in == md5_cache
end
## If there is an access token in the environment, we can use that to auth
token = ENV['TOKEN']
if token != nil
client = Octokit::Client.new :access_token => token
content = client.markdown File.read(in_file), :mode => "gfm"
else
content = Octokit.markdown File.read(in_file), :mode => "gfm"
end
## Update the cache
$cache[in_file] = { "md5sum" => md5_in, "content" => content }
## We are done
return content
end
def refresh_content
@content = md2html @source
end
## Check if this page is an index
def is_index?
@source =~ /\/index.md$/
end
## Return a link to the page.
def link
if @title == "index"
File.dirname(@target).sub(/^#{$public_html}/, '') + "/"
else
@target.sub /^#{$public_html}/, ''
end
end
def to_s
@title
end
## Write the full html page
def render
b = binding
## Load the templates
pre_template = ERB.new(File.read("#{$template_src}/pre.html.erb"), 0, '-')
main_template = ERB.new(File.read("#{$template_src}/main.html.erb"), 0, '-')
post_template = ERB.new(File.read("#{$template_src}/post.html.erb"), 0, '-')
## Generate the html page
pre = pre_template.result b
post = post_template.result b
main = main_template.result b
File.open(@target, "w") { |f| f.write pre + main + post }
end
## Return array of each page
def self.all_pages
@@instance_collector
end
## Return all sections as array
def self.all_sections
sections = {}
@@instance_collector.each do |page|
sections[page.section] = true
end
array = []
sections.each_key { |k| array << k if k }
array
end
## Return all the pages that are part of a section
def self.section(section)
p = []
@@instance_collector.each do |x|
next if x.is_index?
p << x if x.section == section
end
return p
end
## Find the page with the matching title
def self.with_title(title)
@@instance_collector.each do |x|
return x if x.title == title
end
return nil
end
end
def render_site
## Clear the existing public_html directory
FileUtils::rm_rf $public_html
FileUtils::mkdir_p $public_html
## Symlink the needful
FileUtils::symlink "../assets", $public_html
FileUtils::symlink "../bower_components", $public_html
## Load/initialize the cache
if File.exists? $cache_file
$cache = YAML::load_file $cache_file
else
FileUtils::mkdir_p File.dirname($cache_file)
$cache = {}
end
## Load the data for the pages
Find.find("src/markdown") do |in_file|
## Only operate on files
next unless File.file? in_file
## Only operate on markdown
next unless in_file =~ /.md$/
Page.new in_file
end
## Make the sub directories
Find.find($markdown_src) do |src_dir|
## We only care about directories
next unless File.directory? src_dir
# Convert the path name
target_dir = src_dir.sub /^#{$markdown_src}/, $public_html
# Create the directory
FileUtils::mkdir_p target_dir
end
## Generare each page
Page.all_pages.each { |page| page.render }
## Save the cache file
File.open($cache_file, "w") { |f| f.write YAML::dump($cache) }
end
render_site
Organized code into classes
#!/bin/ruby
require 'find'
require 'json'
require 'octokit'
require 'fileutils'
require 'erb'
require 'yaml'
require 'digest'
$public_html = "public_html"
$markdown_src = "src/markdown"
$template_src = "src/templates"
$cache_file = "cache/pages.yaml"
## Page class stores data for each markdown file.
class Page
attr_reader :title, :source, :target, :content, :date, :section
@@instance_collector = []
## Initialize the class
def initialize(in_file)
@source = in_file
@title = source2title in_file
@tags = source2tags in_file
@section = source2section in_file
@content = md2html in_file
@date = source2date in_file, @section
@target = source2target in_file, @section
@@instance_collector << self
end
def source2title(in_file)
title = File.basename in_file
title = title.sub /.md$/, '' # Remove the extension
title = title.sub /#.*/, '' # Remove the tags
title.gsub /_/, ' ' # Convert underscore to spaces
end
def source2tags(in_file)
tags = File.basename in_file
tags = tags.sub /.md$/, '' # Remove the extension
tags = tags.split '#' # Separate the tags
tags.drop 1 # Drop the title
end
def source2target(in_file, section)
out_file = File.basename(in_file).sub /.md$/, ".html"
if section != nil
"#{$public_html}/#{section}/#{out_file}"
else
"#{$public_html}/#{out_file}"
end
end
def source2section(in_file)
section = File.dirname(in_file).sub /^#{$markdown_src}/, ''
section.split('/')[1]
end
def source2date(in_file, section)
if section and File.dirname(in_file) != "#{$markdown_src}/#{section}"
date = File.dirname(in_file).sub /^#{$markdown_src}\/#{section}\//, ''
date = date.split('/')
Time.new date[0], date[1], date[2]
else
File.mtime in_file
end
end
def md2html(in_file)
## Only regenerate if what is in cache doesn't match
md5_in = Digest::MD5.hexdigest File.read(in_file)
if $cache[in_file] != nil
md5_cache = $cache[in_file]["md5sum"]
return $cache[in_file]["content"] if md5_in == md5_cache
end
## If there is an access token in the environment, we can use that to auth
token = ENV['TOKEN']
if token != nil
client = Octokit::Client.new :access_token => token
content = client.markdown File.read(in_file), :mode => "gfm"
else
content = Octokit.markdown File.read(in_file), :mode => "gfm"
end
## Update the cache
$cache[in_file] = { "md5sum" => md5_in, "content" => content }
## We are done
return content
end
def refresh_content
@content = md2html @source
end
## Check if this page is an index
def is_index?
@source =~ /\/index.md$/
end
## Return a link to the page.
def link
if @title == "index"
File.dirname(@target).sub(/^#{$public_html}/, '') + "/"
else
@target.sub /^#{$public_html}/, ''
end
end
def to_s
@title
end
## Write the full html page
def render
b = binding
## Load the templates
pre_template = ERB.new(File.read("#{$template_src}/pre.html.erb"), 0, '-')
main_template = ERB.new(File.read("#{$template_src}/main.html.erb"), 0, '-')
post_template = ERB.new(File.read("#{$template_src}/post.html.erb"), 0, '-')
## Generate the html page
pre = pre_template.result b
post = post_template.result b
main = main_template.result b
FileUtils::mkdir_p File.dirname @target
File.open(@target, "w") { |f| f.write pre + main + post }
end
## Return array of each page
def self.all_pages
@@instance_collector
end
## Return all sections as array
def self.all_sections
sections = {}
@@instance_collector.each do |page|
sections[page.section] = true
end
array = []
sections.each_key { |k| array << k if k }
array
end
## Return all the pages that are part of a section
def self.section(section)
p = []
@@instance_collector.each do |x|
next if x.is_index?
p << x if x.section == section
end
return p
end
## Find the page with the matching title
def self.with_title(title)
@@instance_collector.each do |x|
return x if x.title == title
end
return nil
end
## Generate all pages
def self.read_all
## Load the data for the pages
Find.find($markdown_src) do |in_file|
## Only operate on files
next unless File.file? in_file
## Only operate on markdown
next unless in_file =~ /.md$/
Page.new in_file
end
end
## Render all pages
def self.render_all
@@instance_collector.each { |p| p.render }
end
end
class Cache
## Read the cache file
def self.read(cache_file)
return {} unless File.exists? cache_file
YAML::load_file cache_file
end
## Save the cache file
def self.write(cache_file, cache)
FileUtils::mkdir_p File.dirname(cache_file)
File.write cache_file, YAML::dump(cache)
end
end
class Site
def self.init_public_html
## Clear the existing public_html directory
FileUtils::rm_rf $public_html
FileUtils::mkdir_p $public_html
## Symlink the needful
FileUtils::symlink "../assets", $public_html
FileUtils::symlink "../bower_components", $public_html
end
def self.render
## Initialize the site
Site.init_public_html
## Load the cache
$cache = Cache.read $cache_file
## Load the data for the pages
Page.read_all
## Generare each page
Page.render_all
## Save the cache
Cache.write $cache_file, $cache
end
end
Site.render
|
require "thor"
require "uri"
class Gip < Thor
desc "import REPOSITORY_URL [target]",
"Imports the repository at target in the current tree. If target is absent, the repository's base name will be used. If --commit is specified, that commit will be imported, else HEAD."
method_options :commit => :optional
def import(repository_url, target=nil)
uri = URI.parse(repository_url)
target = File.basename(uri.path).sub(File.extname(uri.path), "") unless target
puts "Importing #{repository_url} into #{target} at #{options[:commit] || 'HEAD'}"
end
end
Implemented import command
require "thor"
require "uri"
require "csv"
class Gip < Thor
desc "import REPOSITORY_URL [target]", <<DESC
Imports the repository at target in the current tree.
If target is absent, the repository's base name will be used.
--remote specifies the name of the remote. If unspecified, the repository's base name will be used.
--commit specifies which commit to import. If unspecified, 'REMOTE/master' will be used. You can use any <tree-ish> that Git will recognize (SHA-1, branch name, tag name). The remote's name will always be prefixed to this value.
In all cases, a .gipinfo file will be created/updated with the correct remotes specified. The .gipinfo file is a CSV file with 2 columns: remote name,repository URL.
DESC
method_options :commit => :optional, :remote => :optional, :verbose => 0
def import(repository_url, target=nil)
uri = URI.parse(repository_url)
target = File.basename(uri.path).sub(File.extname(uri.path), "") unless target
remote_name = options[:remote]
remote_name = File.basename(uri.path).sub(File.extname(uri.path), "") unless remote_name
commit = options[:commit]
commit = "master" unless commit
commit = "#{remote_name}/#{commit}"
puts "Importing #{repository_url} into #{target} at #{commit}"
begin
git :remote, :add, remote_name, repository_url
rescue CommandError => e
# 128 means remote already exists
raise unless e.exitstatus == 128
end
git :fetch, remote_name
git :"read-tree", "--prefix=#{target}/", "-u", commit
gipinfo(remote_name => repository_url)
git :add, ".gipinfo"
git :commit, "-m", "Vendored #{repository_url} at #{commit}", :verbose => true
end
private
def gipinfo(remotes)
info = read_gipinfo
info.merge!(remotes)
write_gipinfo(info)
end
def read_gipinfo
if File.file?(".gipinfo")
CSV.read(".gipinfo").inject(Hash.new) do |hash, (name,url)|
next hash if name =~ /\s*#/
hash[name] = url
hash
end
else
Hash.new
end
end
def write_gipinfo(remotes)
CSV.open(".gipinfo", "w") do |io|
io << ["# This is the GIP gipinfo file. See http://github.com/francois/gip for details. Gip is a RubyGem: sudo gem install francois-gip."]
io << ["# This file maps a series of remote names to repository URLs. This file is here to ease the work of your team."]
io << ["# Run 'gip remotify' to generate the appropriate remotes in your repository."]
remotes.each do |name,url|
io << [name, url]
end
end
end
def git(*args)
run_cmd :git, *args
end
def run_cmd(executable, *args)
opts = args.last.is_a?(Hash) ? args.pop : Hash.new
args.collect! {|arg| arg.to_s =~ /\s|\*|\?|"|\n|\r/ ? %Q('#{arg}') : arg}
args.collect! {|arg| arg ? arg : '""'}
cmd = %Q|#{executable} #{args.join(' ')}|
p cmd if options[:verbose] > 0
original_language = ENV["LANGUAGE"]
begin
ENV["LANGUAGE"] = "C"
value = run_real(cmd)
p value if options[:verbose] > 1 && !value.to_s.strip.empty?
puts value if opts[:verbose]
return value
ensure
ENV["LANGUAGE"] = original_language
end
end
begin
raise LoadError, "Not implemented on Win32 machines" if RUBY_PLATFORM =~ /mswin32/
require "open4"
def run_real(cmd)
begin
pid, stdin, stdout, stderr = Open4::popen4(cmd)
_, cmdstatus = Process.waitpid2(pid)
raise CommandError.new("#{cmd.inspect} exited with status: #{cmdstatus.exitstatus}\n#{stderr.read}", cmdstatus) unless cmdstatus.success? || cmdstatus.exitstatus == 1
return stdout.read
rescue Errno::ENOENT
raise BadCommand, cmd.inspect
end
end
rescue LoadError
# On platforms where open4 is unavailable, we fallback to running using
# the backtick method of Kernel.
def run_real(cmd)
out = `#{cmd}`
raise BadCommand, cmd.inspect if $?.exitstatus == 127
raise CommandError.new("#{cmd.inspect} exited with status: #{$?.exitstatus}", $?) unless $?.success? || $?.exitstatus == 1
out
end
end
class BadCommand < StandardError; end
class CommandError < StandardError
def initialize(message, status)
super(message)
@status = status
end
def exitstatus
@status.exitstatus
end
end
end
|
require 'lightspeed/base'
module Lightspeed
class Item < Lightspeed::Base
attr_accessor :systemSku, :defaultCost, :avgCost, :discountable, :tax,
:archived, :itemType, :description, :modelYear, :upc, :ean, :customSku,
:manufacturerSku, :createTime, :timeStamp,
# Association keys
:categoryID, :taxClassID, :departmentID, :itemMatrixID, :manufacturerID, :seasonID,
:defaultVendorID, :itemECommerceID,
# Embedded
:ItemShops, :Prices
private
def self.id_field
"itemID"
end
end
end
Items may come with notes if they've been edited
So I would know if I thorougly read the API documentation, but... well, ain't nobody got time for that
require 'lightspeed/base'
module Lightspeed
class Item < Lightspeed::Base
attr_accessor :systemSku, :defaultCost, :avgCost, :discountable, :tax,
:archived, :itemType, :description, :modelYear, :upc, :ean, :customSku,
:manufacturerSku, :createTime, :timeStamp,
# Association keys
:categoryID, :taxClassID, :departmentID, :itemMatrixID, :manufacturerID, :seasonID,
:defaultVendorID, :itemECommerceID,
# Embedded
:ItemShops, :Prices, :Note
private
def self.id_field
"itemID"
end
end
end
|
module Linear
class Function
attr_reader :slope, :y_intercept, :power
def initialize(slope, y_intercept, power=1)
@slope, @y_intercept, @power = slope, y_intercept, power
end
def execute(x)
raise ArgumentError unless x.kind_of? Numeric
return slope * x ** power + y_intercept
end
alias f execute
def to_s
"f(x) = #{idx slope}x#{power_string unless power == 1}#{" + #{@y_intercept}" unless direct_variation}"
end
def direct_variation?
y_intercept.zero? && power == 1
end
alias dv? direct_variation?
def to_direct_variation
if direct_variation?
require "linear/direct_variation"
DirectVariation.new slope
else
raise "Unable to convert to DirectVariation"
end
end
alias to_dv to_direct_variation
private
def power_string
final = String.new
final += "\u207b" if @power < 0
final << case @power
when 0 then "\u2070"
when 1 then "\u00b9"
when 2 then "\u00b2"
when 3 then "\u00b3"
when 4 then "\u2074"
when 5 then "\u2075"
when 6 then "\u2076"
when 7 then "\u2077"
when 8 then "\u2078"
when 9 then "\u2079"
end
return final
end
def idx(s)
return case s
when 1 then String.new
when -1 then ?-
else
s
end
end
end
end
Update function.rb
module Linear
class Function
attr_reader :slope, :y_intercept, :power
def initialize(slope, y_intercept, power=1)
@slope, @y_intercept, @power = slope, y_intercept, power
end
def execute(x)
raise ArgumentError unless x.kind_of? Numeric
return slope * x ** power + y_intercept
end
alias f execute
def to_s
"f(x) = #{idx slope}x#{power_string unless power == 1}#{" + #{@y_intercept}" unless direct_variation?}"
end
def direct_variation?
y_intercept.zero? && power == 1
end
alias dv? direct_variation?
def to_direct_variation
if direct_variation?
require "linear/direct_variation"
DirectVariation.new slope
else
raise "Unable to convert to DirectVariation"
end
end
alias to_dv to_direct_variation
private
def power_string
final = String.new
final += "\u207b" if @power < 0
final << case @power
when 0 then "\u2070"
when 1 then "\u00b9"
when 2 then "\u00b2"
when 3 then "\u00b3"
when 4 then "\u2074"
when 5 then "\u2075"
when 6 then "\u2076"
when 7 then "\u2077"
when 8 then "\u2078"
when 9 then "\u2079"
end
return final
end
def idx(s)
return case s
when 1 then String.new
when -1 then ?-
else
s
end
end
end
end
|
require 'generic_api_client'
class LocationClient
def self.query(msisdn)
qparams={
:requestedAccuracy=>2,
:acceptableAccuracy=>5,
:responseTime=>3,
:tolerance=>"LowDelay",
:maximumAge=>100,
:address => "#{msisdn}"
}
qparams=qparams.merge config[:params]
return get("",qparams) do |request,response|
return_nil_on_error(response)
end
end
end
Update location_client.rb
require 'generic_api_client'
class LocationClient < GenericAPIClient
def self.query(msisdn)
qparams={
:requestedAccuracy=>2,
:acceptableAccuracy=>5,
:responseTime=>3,
:tolerance=>"LowDelay",
:maximumAge=>100,
:address => "#{msisdn}"
}
qparams=qparams.merge config[:params]
return get("",qparams) do |request,response|
return_nil_on_error(response)
end
end
end
|
#!/usr/bin/env ruby
require "log_line_parser/version"
require "strscan"
require "time"
require "date"
module LogLineParser
class Tokenizer
class << self
attr_reader :special_token_re, :non_special_token_re
def tokenize(str)
@scanner.string = str
tokens = []
token = true # to start looping, you should assign a truthy value
while token
tokens.push token if token = scan_token
end
tokens.push @scanner.rest unless @scanner.eos?
tokens
end
def setup(special_tokens, unescaped_special_tokens=[])
@special_tokens = special_tokens
@unescaped_special_tokens = unescaped_special_tokens
@scanner = StringScanner.new("".freeze)
@special_token_re, @non_special_token_re = compose_re(@special_tokens)
end
private
def scan_token
@scanner.scan(@special_token_re) ||
@scanner.scan_until(@non_special_token_re)
end
def compose_special_tokens_str(special_tokens)
sorted = special_tokens.sort {|x, y| y.length <=> x.length }
escaped = sorted.map {|token| Regexp.escape(token) }
escaped.concat @unescaped_special_tokens if @unescaped_special_tokens
escaped.join('|')
end
def compose_re(special_tokens)
tokens_str = compose_special_tokens_str(special_tokens)
return Regexp.compile(tokens_str), Regexp.compile("(?=#{tokens_str})")
end
end
setup(%w([ ] - \\ "), ['\s+']) #"
end
class NodeStack
attr_reader :current_node
class << self
attr_reader :root_node_class
def setup(root_node_class)
@root_node_class = root_node_class
end
end
def initialize
@current_node = self.class.root_node_class.new
@stack = [@current_node]
end
def push_node(node)
@current_node.push node
@current_node = node
@stack.push node
end
def pop
popped = @stack.pop
@current_node = @stack[-1]
popped
end
def push_token(token)
@current_node.push token
end
def push(token)
if @current_node.kind_of? EscapeNode
push_escaped_token(token)
elsif @current_node.end_tag?(token)
pop
elsif subnode_class = @current_node.subnode_class(token)
push_node(subnode_class.new)
elsif @current_node.can_ignore?(token)
nil
else
push_token(token)
end
end
def push_escaped_token(token)
part_to_be_escaped = @current_node.part_to_be_escaped(token)
remaining_part = nil
if part_to_be_escaped
remaining_part = @current_node.remove_escaped_part(token)
push_token(part_to_be_escaped)
end
pop
push_token(remaining_part) if remaining_part
end
def root
@stack[0]
end
end
class Node
class << self
attr_reader :start_tag, :end_tag, :subnode_classes
attr_reader :start_tag_to_subnode, :tokens_to_be_ignored
def register_subnode_classes(*subnode_classes)
@subnode_classes = subnode_classes
subnode_classes.each do |subnode|
@start_tag_to_subnode[subnode.start_tag] = subnode
end
end
def setup(start_tag, end_tag, to_be_ignored=[])
@start_tag_to_subnode = {}
@tokens_to_be_ignored = []
@start_tag = start_tag
@end_tag = end_tag
@tokens_to_be_ignored.concat(to_be_ignored) if to_be_ignored
end
end
attr_reader :subnodes
def initialize
@subnodes = []
end
def to_s
@subnodes.join
end
def subnode_class(token)
self.class.start_tag_to_subnode[token]
end
def end_tag?(token)
self.class.end_tag == token
end
def can_ignore?(token)
self.class.tokens_to_be_ignored.include?(token)
end
def push(token)
@subnodes.push token
end
end
class EscapeNode < Node
class << self
attr_reader :to_be_escaped, :to_be_escaped_re
def setup(start_tag, end_tag, to_be_ignored=[], to_be_escaped=[])
super(start_tag, end_tag, to_be_ignored)
@to_be_escaped = to_be_escaped
@to_be_escaped_re = compile_to_be_escaped_re(to_be_escaped)
end
def compile_to_be_escaped_re(to_be_escaped)
re_str = to_be_escaped.map {|e| Regexp.escape(e) }.join("|")
/\A(?:#{re_str})/
end
end
def remove_escaped_part(token)
token.sub(self.class.to_be_escaped_re, ''.freeze)
end
def part_to_be_escaped(token)
self.class.to_be_escaped.each do |e|
return e if token.start_with?(e)
end
nil
end
end
class RootNode < Node
setup(nil, nil, [" "])
end
class TimeNode < Node
setup("[", "]", [])
end
class StringNode < Node
setup('"', '"', [])
end
class StringEscapeNode < EscapeNode
setup('\\', nil, [], ['\\', '"', 't', 'n', 'r'])
ESCAPED = {
'\\' => '\\',
'"' => '"',
't' => "\t",
'n' => "\n",
'r' => "\r",
}
def to_s
ESCAPED[@subnodes[0]] || ''.freeze
end
end
RootNode.register_subnode_classes(TimeNode, StringNode)
StringNode.register_subnode_classes(StringEscapeNode)
class LogLineNodeStack < NodeStack
setup(RootNode)
def to_a
root.subnodes.map {|node| node.to_s }
end
def to_record
CombinedLogRecord.create(to_a)
end
end
# LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" combined
CombinedLogRecord = Struct.new(:remote_host,
:remote_logname,
:remote_user,
:time,
:first_line_of_request,
:last_request_status,
:size_of_response,
:referer,
:user_agent)
class CombinedLogRecord
DATE_TIME_SEP = /:/
SPACE_RE = / /
SLASH_RE = /\//
SLASH = '/'
attr_reader :method, :protocol, :resource, :referer_url, :referer_resource
class << self
def create(log_fields)
new(*log_fields).tap do |rec|
rec.last_request_status = rec.last_request_status.to_i
rec.size_of_response = response_size(rec)
rec.time = parse_time(rec.time)
rec.parse_request
rec.parse_referer
end
end
private
def response_size(rec)
size_str = rec.size_of_response
size_str == "-".freeze ? 0 : size_str.to_i
end
def parse_time(time_str)
Time.parse(time_str.sub(DATE_TIME_SEP, " ".freeze))
end
end
def date(offset=0)
DateTime.parse((self.time + offset * 86400).to_s)
end
def parse_request
request = self.first_line_of_request.split(SPACE_RE)
@method = request.shift
@protocol = request.pop
@resource = request.size == 1 ? request[0] : request.join(" ".freeze)
end
def parse_referer
return if self.referer == "-"
parts = self.referer.split(SLASH_RE, 4)
if parts[0] == "http:".freeze
@referer_url = parts.shift(3).join(SLASH).concat(SLASH)
@referer_resource = SLASH + parts.shift unless parts.empty?
else
@referer_resource = self.referer
end
end
end
def self.parse(line)
stack = LogLineNodeStack.new
tokens = Tokenizer.tokenize(line)
tokens.each {|token| stack.push token }
stack
end
end
the new line should be removed before parsing a line
#!/usr/bin/env ruby
require "log_line_parser/version"
require "strscan"
require "time"
require "date"
module LogLineParser
class Tokenizer
class << self
attr_reader :special_token_re, :non_special_token_re
def tokenize(str)
@scanner.string = str
tokens = []
token = true # to start looping, you should assign a truthy value
while token
tokens.push token if token = scan_token
end
tokens.push @scanner.rest unless @scanner.eos?
tokens
end
def setup(special_tokens, unescaped_special_tokens=[])
@special_tokens = special_tokens
@unescaped_special_tokens = unescaped_special_tokens
@scanner = StringScanner.new("".freeze)
@special_token_re, @non_special_token_re = compose_re(@special_tokens)
end
private
def scan_token
@scanner.scan(@special_token_re) ||
@scanner.scan_until(@non_special_token_re)
end
def compose_special_tokens_str(special_tokens)
sorted = special_tokens.sort {|x, y| y.length <=> x.length }
escaped = sorted.map {|token| Regexp.escape(token) }
escaped.concat @unescaped_special_tokens if @unescaped_special_tokens
escaped.join('|')
end
def compose_re(special_tokens)
tokens_str = compose_special_tokens_str(special_tokens)
return Regexp.compile(tokens_str), Regexp.compile("(?=#{tokens_str})")
end
end
setup(%w([ ] - \\ "), ['\s+']) #"
end
class NodeStack
attr_reader :current_node
class << self
attr_reader :root_node_class
def setup(root_node_class)
@root_node_class = root_node_class
end
end
def initialize
@current_node = self.class.root_node_class.new
@stack = [@current_node]
end
def push_node(node)
@current_node.push node
@current_node = node
@stack.push node
end
def pop
popped = @stack.pop
@current_node = @stack[-1]
popped
end
def push_token(token)
@current_node.push token
end
def push(token)
if @current_node.kind_of? EscapeNode
push_escaped_token(token)
elsif @current_node.end_tag?(token)
pop
elsif subnode_class = @current_node.subnode_class(token)
push_node(subnode_class.new)
elsif @current_node.can_ignore?(token)
nil
else
push_token(token)
end
end
def push_escaped_token(token)
part_to_be_escaped = @current_node.part_to_be_escaped(token)
remaining_part = nil
if part_to_be_escaped
remaining_part = @current_node.remove_escaped_part(token)
push_token(part_to_be_escaped)
end
pop
push_token(remaining_part) if remaining_part
end
def root
@stack[0]
end
end
class Node
class << self
attr_reader :start_tag, :end_tag, :subnode_classes
attr_reader :start_tag_to_subnode, :tokens_to_be_ignored
def register_subnode_classes(*subnode_classes)
@subnode_classes = subnode_classes
subnode_classes.each do |subnode|
@start_tag_to_subnode[subnode.start_tag] = subnode
end
end
def setup(start_tag, end_tag, to_be_ignored=[])
@start_tag_to_subnode = {}
@tokens_to_be_ignored = []
@start_tag = start_tag
@end_tag = end_tag
@tokens_to_be_ignored.concat(to_be_ignored) if to_be_ignored
end
end
attr_reader :subnodes
def initialize
@subnodes = []
end
def to_s
@subnodes.join
end
def subnode_class(token)
self.class.start_tag_to_subnode[token]
end
def end_tag?(token)
self.class.end_tag == token
end
def can_ignore?(token)
self.class.tokens_to_be_ignored.include?(token)
end
def push(token)
@subnodes.push token
end
end
class EscapeNode < Node
class << self
attr_reader :to_be_escaped, :to_be_escaped_re
def setup(start_tag, end_tag, to_be_ignored=[], to_be_escaped=[])
super(start_tag, end_tag, to_be_ignored)
@to_be_escaped = to_be_escaped
@to_be_escaped_re = compile_to_be_escaped_re(to_be_escaped)
end
def compile_to_be_escaped_re(to_be_escaped)
re_str = to_be_escaped.map {|e| Regexp.escape(e) }.join("|")
/\A(?:#{re_str})/
end
end
def remove_escaped_part(token)
token.sub(self.class.to_be_escaped_re, ''.freeze)
end
def part_to_be_escaped(token)
self.class.to_be_escaped.each do |e|
return e if token.start_with?(e)
end
nil
end
end
class RootNode < Node
setup(nil, nil, [" "])
end
class TimeNode < Node
setup("[", "]", [])
end
class StringNode < Node
setup('"', '"', [])
end
class StringEscapeNode < EscapeNode
setup('\\', nil, [], ['\\', '"', 't', 'n', 'r'])
ESCAPED = {
'\\' => '\\',
'"' => '"',
't' => "\t",
'n' => "\n",
'r' => "\r",
}
def to_s
ESCAPED[@subnodes[0]] || ''.freeze
end
end
RootNode.register_subnode_classes(TimeNode, StringNode)
StringNode.register_subnode_classes(StringEscapeNode)
class LogLineNodeStack < NodeStack
setup(RootNode)
def to_a
root.subnodes.map {|node| node.to_s }
end
def to_record
CombinedLogRecord.create(to_a)
end
end
# LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" combined
CombinedLogRecord = Struct.new(:remote_host,
:remote_logname,
:remote_user,
:time,
:first_line_of_request,
:last_request_status,
:size_of_response,
:referer,
:user_agent)
class CombinedLogRecord
DATE_TIME_SEP = /:/
SPACE_RE = / /
SLASH_RE = /\//
SLASH = '/'
attr_reader :method, :protocol, :resource, :referer_url, :referer_resource
class << self
def create(log_fields)
new(*log_fields).tap do |rec|
rec.last_request_status = rec.last_request_status.to_i
rec.size_of_response = response_size(rec)
rec.time = parse_time(rec.time)
rec.parse_request
rec.parse_referer
end
end
private
def response_size(rec)
size_str = rec.size_of_response
size_str == "-".freeze ? 0 : size_str.to_i
end
def parse_time(time_str)
Time.parse(time_str.sub(DATE_TIME_SEP, " ".freeze))
end
end
def date(offset=0)
DateTime.parse((self.time + offset * 86400).to_s)
end
def parse_request
request = self.first_line_of_request.split(SPACE_RE)
@method = request.shift
@protocol = request.pop
@resource = request.size == 1 ? request[0] : request.join(" ".freeze)
end
def parse_referer
return if self.referer == "-"
parts = self.referer.split(SLASH_RE, 4)
if parts[0] == "http:".freeze
@referer_url = parts.shift(3).join(SLASH).concat(SLASH)
@referer_resource = SLASH + parts.shift unless parts.empty?
else
@referer_resource = self.referer
end
end
end
def self.parse(line)
stack = LogLineNodeStack.new
tokens = Tokenizer.tokenize(line.chomp)
tokens.each {|token| stack.push token }
stack
end
end
|
#!/usr/bin/env ruby
require "log_line_parser/version"
require "strscan"
module LogLineParser
class Tokenizer
class << self
attr_reader :special_token_re, :non_special_token_re
def tokenize(str)
@scanner.string = str
tokens = []
token = true # to start looping, you should assign a truthy value
while token
tokens.push token if token = scan_token
end
tokens.push @scanner.rest unless @scanner.eos?
tokens
end
def setup(special_tokens, unescaped_special_tokens=[])
@special_tokens = special_tokens
@unescaped_special_tokens = unescaped_special_tokens
@scanner = StringScanner.new("".freeze)
@special_token_re, @non_special_token_re = compose_re(@special_tokens)
end
private
def scan_token
@scanner.scan(@special_token_re) ||
@scanner.scan_until(@non_special_token_re)
end
def compose_special_tokens_str(special_tokens)
sorted = special_tokens.sort {|x, y| y.length <=> x.length }
escaped = sorted.map {|token| Regexp.escape(token) }
escaped.concat @unescaped_special_tokens if @unescaped_special_tokens
escaped.join('|')
end
def compose_re(special_tokens)
tokens_str = compose_special_tokens_str(special_tokens)
return Regexp.compile(tokens_str), Regexp.compile("(?=#{tokens_str})")
end
end
setup(%w([ ] - \\ "), ['\s+']) #"
end
class NodeStack
attr_reader :current_node
def initialize
@stack = []
end
def push_node(node)
@current_node = node
@stack.push node
end
def pop
popped = @stack.pop
@current_node = @stack[-1]
popped
end
def push_token(token)
@current_node.push token
end
def push(token)
if @current_node.end_tag?(token)
pop
elsif subnode_class = @current_node.subnode_class(token)
push_node(subnode_class.new)
elsif @current_node.can_ignore?(token)
nil
else
push_token(token)
end
end
def root
@stack[0]
end
end
class Node
class << self
attr_reader :start_tag, :end_tag, :subnode_classes
attr_reader :start_tag_to_subnode, :tokens_to_be_ignored
def register_subnode_classes(*subnode_classes)
@subnode_classes = subnode_classes
subnode_classes.each do |subnode|
@start_tag_to_subnode[subnode.start_tag] = subnode
end
end
def setup(start_tag, end_tag, to_be_ignored=[], *subnode_classes)
@start_tag_to_subnode = {}
@tokens_to_be_ignored = []
@start_tag = start_tag
@end_tag = end_tag
@tokens_to_be_ignored.concat(to_be_ignored) if to_be_ignored
register_subnode_classes(*subnode_classes)
end
end
def initialize
@subnodes = []
end
def to_s
@subnodes.join
end
def subnode_class(token)
self.class.start_tag_to_subnode[token]
end
def end_tag?(token)
self.class.end_tag == token
end
def can_ignore?(token)
@tokens_to_be_ignored.include?(token)
end
def push(token)
@subnodes.push token
end
end
end
bug fix in Node: Node#can_ignore? should refer to the value of a class variable
#!/usr/bin/env ruby
require "log_line_parser/version"
require "strscan"
module LogLineParser
class Tokenizer
class << self
attr_reader :special_token_re, :non_special_token_re
def tokenize(str)
@scanner.string = str
tokens = []
token = true # to start looping, you should assign a truthy value
while token
tokens.push token if token = scan_token
end
tokens.push @scanner.rest unless @scanner.eos?
tokens
end
def setup(special_tokens, unescaped_special_tokens=[])
@special_tokens = special_tokens
@unescaped_special_tokens = unescaped_special_tokens
@scanner = StringScanner.new("".freeze)
@special_token_re, @non_special_token_re = compose_re(@special_tokens)
end
private
def scan_token
@scanner.scan(@special_token_re) ||
@scanner.scan_until(@non_special_token_re)
end
def compose_special_tokens_str(special_tokens)
sorted = special_tokens.sort {|x, y| y.length <=> x.length }
escaped = sorted.map {|token| Regexp.escape(token) }
escaped.concat @unescaped_special_tokens if @unescaped_special_tokens
escaped.join('|')
end
def compose_re(special_tokens)
tokens_str = compose_special_tokens_str(special_tokens)
return Regexp.compile(tokens_str), Regexp.compile("(?=#{tokens_str})")
end
end
setup(%w([ ] - \\ "), ['\s+']) #"
end
class NodeStack
attr_reader :current_node
def initialize
@stack = []
end
def push_node(node)
@current_node = node
@stack.push node
end
def pop
popped = @stack.pop
@current_node = @stack[-1]
popped
end
def push_token(token)
@current_node.push token
end
def push(token)
if @current_node.end_tag?(token)
pop
elsif subnode_class = @current_node.subnode_class(token)
push_node(subnode_class.new)
elsif @current_node.can_ignore?(token)
nil
else
push_token(token)
end
end
def root
@stack[0]
end
end
class Node
class << self
attr_reader :start_tag, :end_tag, :subnode_classes
attr_reader :start_tag_to_subnode, :tokens_to_be_ignored
def register_subnode_classes(*subnode_classes)
@subnode_classes = subnode_classes
subnode_classes.each do |subnode|
@start_tag_to_subnode[subnode.start_tag] = subnode
end
end
def setup(start_tag, end_tag, to_be_ignored=[], *subnode_classes)
@start_tag_to_subnode = {}
@tokens_to_be_ignored = []
@start_tag = start_tag
@end_tag = end_tag
@tokens_to_be_ignored.concat(to_be_ignored) if to_be_ignored
register_subnode_classes(*subnode_classes)
end
end
def initialize
@subnodes = []
end
def to_s
@subnodes.join
end
def subnode_class(token)
self.class.start_tag_to_subnode[token]
end
def end_tag?(token)
self.class.end_tag == token
end
def can_ignore?(token)
self.class.tokens_to_be_ignored.include?(token)
end
def push(token)
@subnodes.push token
end
end
end
|
require "addressable/uri"
require "active_support/inflector"
require "logeater/params_parser"
require "logeater/parser_errors"
module Logeater
class Parser
LINE_MATCHER = /^
[A-Z],\s
\[(?<timestamp>[^\s\]]+)(?:\s[^\]]*)?\]\s+
(?<log_level>[A-Z]+)\s+\-\-\s:\s+
(?<message>.*)
$/x.freeze
TIMESTAMP_MATCHER = /
(?<year>\d\d\d\d)\-
(?<month>\d\d)\-
(?<day>\d\d)T
(?<hours>\d\d):
(?<minutes>\d\d):
(?<seconds>\d\d(?:\.\d+))
/x.freeze
REQUEST_LINE_MATCHER = /^
\[(?<subdomain>[^\]]+)\]\s
\[(?<uuid>[\w\-]{36})\]\s+
(?:\[(?:guest|user\.(?<user_id>\d+)(?<tester_bar>:cph)?)\]\s+)?
(?<message>.*)
$/x.freeze
REQUEST_STARTED_MATCHER = /^
Started\s
(?<http_method>[A-Z]+)\s
"(?<path>[^"]+)"\sfor\s
(?<remote_ip>[\d\.]+)
/x.freeze
REQUEST_CONTROLLER_MATCHER = /^
Processing\sby\s
(?<controller>[A-Za-z0-9:]+)\#
(?<action>[a-z_0-9]+)\sas\s
(?<format>.*)
/x.freeze
REQUEST_PARAMETERS_MATCHER = /^
Parameters:\s
(?<params>\{.*\})
$/x.freeze
REQUEST_COMPLETED_MATCHER = /^
Completed\s
(?<http_status>\d\d\d)\s
(?:(?<http_response>.*)\s)?in\s
(?<duration>[\d\.]+)(?<units>ms)\b
/x.freeze # optional: (Views: 0.1ms | ActiveRecord: 50.0ms)
def parse!(line)
match = line.match LINE_MATCHER
raise UnmatchedLine.new(line) unless match
timestamp = match["timestamp"]
time = timestamp.match TIMESTAMP_MATCHER
raise MalformedTimestamp.new(timestamp) unless time
time = Time.new(*time.captures[0...-1], BigDecimal.new(time["seconds"]))
message = match["message"]
result = {
type: :generic,
timestamp: time,
log_level: match["log_level"],
message: message }
result.merge(parse_message(message))
end
def parse_message(message)
match = message.match REQUEST_LINE_MATCHER
return {} unless match
message = match["message"]
{ subdomain: match["subdomain"],
uuid: match["uuid"],
type: :request_line,
user_id: match["user_id"] && match["user_id"].to_i,
tester_bar: !!match["tester_bar"],
message: message }.merge(
parse_message_extra(message))
end
def parse_message_extra(message)
match = message.match(REQUEST_STARTED_MATCHER)
return parse_request_started_message(match) if match
match = message.match(REQUEST_CONTROLLER_MATCHER)
return parse_request_controller_message(match) if match
match = message.match(REQUEST_PARAMETERS_MATCHER)
return parse_request_params_message(match) if match
match = message.match(REQUEST_COMPLETED_MATCHER)
return parse_request_completed_message(match) if match
{}
end
def parse_request_started_message(match)
uri = Addressable::URI.parse(match["path"])
{ type: :request_started,
http_method: match["http_method"],
path: uri.path,
remote_ip: match["remote_ip"] }
end
def parse_request_controller_message(match)
{ type: :request_controller,
controller: match["controller"].underscore.gsub(/_controller$/, ""),
action: match["action"],
format: match["format"] }
end
def parse_request_params_message(match)
params = ParamsParser.new(match["params"])
{ type: :request_params,
params: params.parse! }
rescue Logeater::Parser::MalformedParameters
log "Unable to parse parameters: #{match["params"].inspect}"
{ params: match["params"] }
end
def parse_request_completed_message(match)
{ type: :request_completed,
http_status: match["http_status"].to_i,
http_response: match["http_response"],
duration: match["duration"].to_i }
end
def log(statement)
$stderr.puts "\e[33m#{statement}\e[0m"
end
end
end
[refactor] Optimized parser by caching parses of various fields (10m)
require "addressable/uri"
require "active_support/inflector"
require "logeater/params_parser"
require "logeater/parser_errors"
module Logeater
class Parser
LINE_MATCHER = /^
[A-Z],\s
\[(?<timestamp>[^\s\]]+)(?:\s[^\]]*)?\]\s+
(?<log_level>[A-Z]+)\s+\-\-\s:\s+
(?<message>.*)
$/x.freeze
TIMESTAMP_MATCHER = /
(?<year>\d\d\d\d)\-
(?<month>\d\d)\-
(?<day>\d\d)T
(?<hours>\d\d):
(?<minutes>\d\d):
(?<seconds>\d\d(?:\.\d+))
/x.freeze
REQUEST_LINE_MATCHER = /^
\[(?<subdomain>[^\]]+)\]\s
\[(?<uuid>[\w\-]{36})\]\s+
(?:\[(?:guest|user\.(?<user_id>\d+)(?<tester_bar>:cph)?)\]\s+)?
(?<message>.*)
$/x.freeze
REQUEST_STARTED_MATCHER = /^
Started\s
(?<http_method>[A-Z]+)\s
"(?<path>[^"]+)"\sfor\s
(?<remote_ip>[\d\.]+)
/x.freeze
REQUEST_CONTROLLER_MATCHER = /^
Processing\sby\s
(?<controller>[A-Za-z0-9:]+)\#
(?<action>[a-z_0-9]+)\sas\s
(?<format>.*)
/x.freeze
REQUEST_PARAMETERS_MATCHER = /^
Parameters:\s
(?<params>\{.*\})
$/x.freeze
REQUEST_COMPLETED_MATCHER = /^
Completed\s
(?<http_status>\d\d\d)\s
(?:(?<http_response>.*)\s)?in\s
(?<duration>[\d\.]+)(?<units>ms)\b
/x.freeze # optional: (Views: 0.1ms | ActiveRecord: 50.0ms)
def parse!(line)
match = line.match LINE_MATCHER
raise UnmatchedLine.new(line) unless match
timestamp = match["timestamp"]
time = timestamp.match TIMESTAMP_MATCHER
raise MalformedTimestamp.new(timestamp) unless time
time = Time.new(*time.captures[0...-1], BigDecimal.new(time["seconds"]))
message = match["message"]
result = {
type: :generic,
timestamp: time,
log_level: match["log_level"],
message: message }
result.merge(parse_message(message))
end
def parse_message(message)
match = message.match REQUEST_LINE_MATCHER
return {} unless match
message = match["message"]
{ subdomain: match["subdomain"],
uuid: match["uuid"],
type: :request_line,
user_id: match["user_id"] && match["user_id"].to_i,
tester_bar: !!match["tester_bar"],
message: message }.merge(
parse_message_extra(message))
end
def parse_message_extra(message)
match = message.match(REQUEST_STARTED_MATCHER)
return parse_request_started_message(match) if match
match = message.match(REQUEST_CONTROLLER_MATCHER)
return parse_request_controller_message(match) if match
match = message.match(REQUEST_PARAMETERS_MATCHER)
return parse_request_params_message(match) if match
match = message.match(REQUEST_COMPLETED_MATCHER)
return parse_request_completed_message(match) if match
{}
end
def parse_request_started_message(match)
{ type: :request_started,
http_method: match["http_method"],
path: parsed_uri[match["path"]],
remote_ip: match["remote_ip"] }
end
def parse_request_controller_message(match)
{ type: :request_controller,
controller: normalized_controller_name[match["controller"]],
action: match["action"],
format: match["format"] }
end
def parse_request_params_message(match)
{ type: :request_params,
params: ParamsParser.new(match["params"]).parse! }
rescue Logeater::Parser::MalformedParameters
log "Unable to parse parameters: #{match["params"].inspect}"
{ params: match["params"] }
end
def parse_request_completed_message(match)
{ type: :request_completed,
http_status: match["http_status"].to_i,
http_response: match["http_response"],
duration: match["duration"].to_i }
end
def log(statement)
$stderr.puts "\e[33m#{statement}\e[0m"
end
def initialize
@normalized_controller_name = Hash.new do |hash, controller_name|
hash[controller_name] = controller_name.underscore.gsub(/_controller$/, "")
end
@parsed_uri = Hash.new do |hash, uri|
hash[uri] = Addressable::URI.parse(uri).path
end
end
private
attr_reader :normalized_controller_name,
:parsed_uri
end
end
|
module Madness
VERSION = "0.0.7"
end
version bump
module Madness
VERSION = "0.0.8"
end
|
class Maildir
VERSION = '2.2.1'
end
bump to 2.2.2
class Maildir
VERSION = '2.2.2'
end
|
# -------------------------------------------------------------------------- #
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'opennebula/pool_element'
module OpenNebula
class Group < PoolElement
#######################################################################
# Constants and Class Methods
#######################################################################
GROUP_METHODS = {
:info => "group.info",
:allocate => "group.allocate",
:update => "group.update",
:delete => "group.delete",
:quota => "group.quota",
:add_provider => "group.addprovider",
:del_provider => "group.delprovider"
}
# Flag for requesting connected user's group info
SELF = -1
# Default resource ACL's for group users (create)
GROUP_DEFAULT_ACLS = "VM+IMAGE+NET+TEMPLATE+DOCUMENT"
ALL_CLUSTERS_IN_ZONE = 10
# The default view for group and group admins, must be defined in
# sunstone_views.yaml
GROUP_ADMIN_SUNSTONE_VIEWS = "vdcadmin"
# Creates a Group description with just its identifier
# this method should be used to create plain Group objects.
# +id+ the id of the user
#
# Example:
# group = Group.new(Group.build_xml(3),rpc_client)
#
def Group.build_xml(pe_id=nil)
if pe_id
group_xml = "<GROUP><ID>#{pe_id}</ID></GROUP>"
else
group_xml = "<GROUP></GROUP>"
end
XMLElement.build_xml(group_xml,'GROUP')
end
# Class constructor
def initialize(xml, client)
super(xml,client)
end
#######################################################################
# XML-RPC Methods for the Group Object
#######################################################################
# Retrieves the information of the given Group.
def info()
super(GROUP_METHODS[:info], 'GROUP')
end
alias_method :info!, :info
# Creates a group based in a group definition hash
# group_hash[:name] the group name
# group_hash[:group_admin] the admin user definition hash, see def
# create_admin_user function description for details.
# group_hash[:resource_providers]
# group_hash[:resource_providers][:zone_id]
# group_hash[:resource_providers][:cluster_id]
#
def create(group_hash)
# Check arguments
if !group_hash[:name]
return OpenNebula::Error.new("Group name not defined")
end
if group_hash[:group_admin]
if group_hash[:group_admin][:name] && !group_hash[:group_admin][:password]
error_msg = "Admin user password not defined"
return OpenNebula::Error.new(error_msg)
end
end
# Allocate group
rc = self.allocate(group_hash[:name])
return rc if OpenNebula.is_error?(rc)
# Handle resource providers
group_hash[:resource_providers].each { |rp|
next if rp[:zone_id].nil? && rp[:cluster_id].nil?
if rp[:cluster_id].class == String && rp[:cluster_id] == "ALL"
add_provider(rp[:zone_id],ALL_CLUSTERS_IN_ZONE)
else
add_provider(rp[:zone_id],rp[:cluster_id])
end
} if !group_hash[:resource_providers].nil?
# Set group ACLs to create resources
rc, msg = create_default_acls(group_hash[:resources])
if OpenNebula.is_error?(rc)
error_msg = "Error creating group ACL's: #{rc.message}"
return OpenNebula::Error.new(error_msg)
end
# Create associated group admin if needed
rc = create_admin_user(group_hash)
if OpenNebula.is_error?(rc)
self.delete
error_msg = "Error creating admin group: #{rc.message}"
return OpenNebula::Error.new(error_msg)
end
# Add default Sunstone views for the group
if group_hash[:views]
str = "SUNSTONE_VIEWS=\"#{group_hash[:views].join(",")}\"\n"
self.update(str)
end
return 0
end
# Allocates a new Group in OpenNebula
#
# +groupname+ A string containing the name of the Group.
def allocate(groupname)
super(GROUP_METHODS[:allocate], groupname)
end
# Replaces the template contents
#
# @param new_template [String] New template contents
# @param append [true, false] True to append new attributes instead of
# replace the whole template
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def update(new_template=nil, append=false)
super(GROUP_METHODS[:update], new_template, append ? 1 : 0)
end
# Deletes the Group
def delete()
super(GROUP_METHODS[:delete])
end
# Sets the group quota limits
# @param quota [String] a template (XML or txt) with the new quota limits
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def set_quota(quota)
return Error.new('ID not defined') if !@pe_id
rc = @client.call(GROUP_METHODS[:quota],@pe_id, quota)
rc = nil if !OpenNebula.is_error?(rc)
return rc
end
# Adds a resource provider to this group
# @param zone_id [Integer] Zone ID
# @param cluster_id [Integer] Cluster ID
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def add_provider(zone_id, cluster_id)
return call(GROUP_METHODS[:add_provider], @pe_id, zone_id.to_i, cluster_id.to_i)
end
# Deletes a resource provider from this group
# @param zone_id [Integer] Zone ID
# @param cluster_id [Integer] Cluster ID
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def del_provider(zone_id, cluster_id)
return call(GROUP_METHODS[:del_provider], @pe_id, zone_id.to_i, cluster_id.to_i)
end
# ---------------------------------------------------------------------
# Helpers to get information
# ---------------------------------------------------------------------
# Returns whether or not the user with id 'uid' is part of this group
def contains(uid)
#This doesn't work in ruby 1.8.5
#return self["USERS/ID[.=#{uid}]"] != nil
id_array = retrieve_elements('USERS/ID')
return id_array != nil && id_array.include?(uid.to_s)
end
# Returns an array with the numeric user ids
def user_ids
array = Array.new
self.each("USERS/ID") do |id|
array << id.text.to_i
end
return array
end
private
#######################################################################
#######################################################################
# Creates an acl array of acl strings. Returns true or error and
# a qrray with the new acl ids
def create_group_acls(acls)
acls_ids = Array.new
acls.each{|rule|
acl = OpenNebula::Acl.new(OpenNebula::Acl.build_xml,@client)
rule_ast = "#{rule} *" #Add all zone id's
parsed_acl = OpenNebula::Acl.parse_rule(rule_ast)
return parsed_acl, [] if OpenNebula.is_error?(parsed_acl)
rc = acl.allocate(*parsed_acl)
return rc, "" if OpenNebula.is_error?(rc)
acls_ids << acl.id
}
return true, acls_ids
end
def create_default_acls(resources=nil)
resources = GROUP_DEFAULT_ACLS if !resources
acls = Array.new
acls << "@#{self.id} #{resources}/* CREATE"
create_group_acls(acls)
end
# Creates a group admin and user based on the group definition hash
# @param gdef [Hash] keys are ruby sumbols
# gdef[:group_admin] the group admin hash
# gdef[:group_admin][:name] username for group admin
# gdef[:group_admin][:password] password for group admin
# gdef[:group_admin][:auth_driver] auth driver for group admin
# gdef[:group_admin][:resources] resources that group admin manage
# gdef[:group_admin][:manage_resources] whether group admin manages
# group users
# gdef[:resources] resources that group users manage
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
def create_admin_user(gdef)
return nil if gdef[:group_admin].nil? || gdef[:group_admin][:name].nil?
# Create group admin
uadmin = gdef[:group_admin][:name]
upasswd = gdef[:group_admin][:password]
udriver = gdef[:group_admin][:auth_driver]
if !uadmin.nil? && !upasswd.nil?
group_admin = OpenNebula::User.new(OpenNebula::User.build_xml,
@client)
if udriver
rc = group_admin.allocate(uadmin, upasswd, udriver)
else
rc = group_admin.allocate(uadmin, upasswd)
end
if OpenNebula.is_error?(rc)
return rc
end
end
# Set admin user groups to self
rc = group_admin.chgrp(self.id)
if OpenNebula.is_error?(rc)
group_admin.delete
return rc
end
#Create admin group acls
acls = Array.new
acls_str = (gdef[:group_admin][:resources] || \
gdef[:resources] || GROUP_DEFAULT_ACLS)
manage_users = gdef[:group_admin][:manage_users] || "YES"
if manage_users.upcase == "YES"
acls << "##{group_admin.id} USER/@#{self.id} CREATE+USE+MANAGE+ADMIN"
end
acls << "##{group_admin.id} #{acls_str}/@#{self.id} " +
"CREATE+USE+MANAGE"
rc, tmp = create_group_acls(acls)
if OpenNebula.is_error?(rc)
group_admin.delete
return rc
end
#Set Sunstone Views for the group
gtmpl = "GROUP_ADMINS=#{gdef[:group_admin][:name]}\n"
gtmpl << "GROUP_ADMIN_VIEWS=#{GROUP_ADMIN_SUNSTONE_VIEWS}\n"
self.update(gtmpl)
return nil
end
end
end
Fix template update for group creation
# -------------------------------------------------------------------------- #
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'opennebula/pool_element'
module OpenNebula
class Group < PoolElement
#######################################################################
# Constants and Class Methods
#######################################################################
GROUP_METHODS = {
:info => "group.info",
:allocate => "group.allocate",
:update => "group.update",
:delete => "group.delete",
:quota => "group.quota",
:add_provider => "group.addprovider",
:del_provider => "group.delprovider"
}
# Flag for requesting connected user's group info
SELF = -1
# Default resource ACL's for group users (create)
GROUP_DEFAULT_ACLS = "VM+IMAGE+NET+TEMPLATE+DOCUMENT"
ALL_CLUSTERS_IN_ZONE = 10
# The default view for group and group admins, must be defined in
# sunstone_views.yaml
GROUP_ADMIN_SUNSTONE_VIEWS = "vdcadmin"
# Creates a Group description with just its identifier
# this method should be used to create plain Group objects.
# +id+ the id of the user
#
# Example:
# group = Group.new(Group.build_xml(3),rpc_client)
#
def Group.build_xml(pe_id=nil)
if pe_id
group_xml = "<GROUP><ID>#{pe_id}</ID></GROUP>"
else
group_xml = "<GROUP></GROUP>"
end
XMLElement.build_xml(group_xml,'GROUP')
end
# Class constructor
def initialize(xml, client)
super(xml,client)
end
#######################################################################
# XML-RPC Methods for the Group Object
#######################################################################
# Retrieves the information of the given Group.
def info()
super(GROUP_METHODS[:info], 'GROUP')
end
alias_method :info!, :info
# Creates a group based in a group definition hash
# group_hash[:name] the group name
# group_hash[:group_admin] the admin user definition hash, see def
# create_admin_user function description for details.
# group_hash[:resource_providers]
# group_hash[:resource_providers][:zone_id]
# group_hash[:resource_providers][:cluster_id]
#
def create(group_hash)
# Check arguments
if !group_hash[:name]
return OpenNebula::Error.new("Group name not defined")
end
if group_hash[:group_admin]
if group_hash[:group_admin][:name] && !group_hash[:group_admin][:password]
error_msg = "Admin user password not defined"
return OpenNebula::Error.new(error_msg)
end
end
# Allocate group
rc = self.allocate(group_hash[:name])
return rc if OpenNebula.is_error?(rc)
# Handle resource providers
group_hash[:resource_providers].each { |rp|
next if rp[:zone_id].nil? && rp[:cluster_id].nil?
if rp[:cluster_id].class == String && rp[:cluster_id] == "ALL"
add_provider(rp[:zone_id],ALL_CLUSTERS_IN_ZONE)
else
add_provider(rp[:zone_id],rp[:cluster_id])
end
} if !group_hash[:resource_providers].nil?
# Set group ACLs to create resources
rc, msg = create_default_acls(group_hash[:resources])
if OpenNebula.is_error?(rc)
error_msg = "Error creating group ACL's: #{rc.message}"
return OpenNebula::Error.new(error_msg)
end
# Create associated group admin if needed
rc = create_admin_user(group_hash)
if OpenNebula.is_error?(rc)
self.delete
error_msg = "Error creating admin group: #{rc.message}"
return OpenNebula::Error.new(error_msg)
end
# Add default Sunstone views for the group
if group_hash[:views]
str = "SUNSTONE_VIEWS=\"#{group_hash[:views].join(",")}\"\n"
self.update(str, true)
end
return 0
end
# Allocates a new Group in OpenNebula
#
# +groupname+ A string containing the name of the Group.
def allocate(groupname)
super(GROUP_METHODS[:allocate], groupname)
end
# Replaces the template contents
#
# @param new_template [String] New template contents
# @param append [true, false] True to append new attributes instead of
# replace the whole template
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def update(new_template=nil, append=false)
super(GROUP_METHODS[:update], new_template, append ? 1 : 0)
end
# Deletes the Group
def delete()
super(GROUP_METHODS[:delete])
end
# Sets the group quota limits
# @param quota [String] a template (XML or txt) with the new quota limits
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def set_quota(quota)
return Error.new('ID not defined') if !@pe_id
rc = @client.call(GROUP_METHODS[:quota],@pe_id, quota)
rc = nil if !OpenNebula.is_error?(rc)
return rc
end
# Adds a resource provider to this group
# @param zone_id [Integer] Zone ID
# @param cluster_id [Integer] Cluster ID
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def add_provider(zone_id, cluster_id)
return call(GROUP_METHODS[:add_provider], @pe_id, zone_id.to_i, cluster_id.to_i)
end
# Deletes a resource provider from this group
# @param zone_id [Integer] Zone ID
# @param cluster_id [Integer] Cluster ID
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
# otherwise
def del_provider(zone_id, cluster_id)
return call(GROUP_METHODS[:del_provider], @pe_id, zone_id.to_i, cluster_id.to_i)
end
# ---------------------------------------------------------------------
# Helpers to get information
# ---------------------------------------------------------------------
# Returns whether or not the user with id 'uid' is part of this group
def contains(uid)
#This doesn't work in ruby 1.8.5
#return self["USERS/ID[.=#{uid}]"] != nil
id_array = retrieve_elements('USERS/ID')
return id_array != nil && id_array.include?(uid.to_s)
end
# Returns an array with the numeric user ids
def user_ids
array = Array.new
self.each("USERS/ID") do |id|
array << id.text.to_i
end
return array
end
private
#######################################################################
#######################################################################
# Creates an acl array of acl strings. Returns true or error and
# a qrray with the new acl ids
def create_group_acls(acls)
acls_ids = Array.new
acls.each{|rule|
acl = OpenNebula::Acl.new(OpenNebula::Acl.build_xml,@client)
rule_ast = "#{rule} *" #Add all zone id's
parsed_acl = OpenNebula::Acl.parse_rule(rule_ast)
return parsed_acl, [] if OpenNebula.is_error?(parsed_acl)
rc = acl.allocate(*parsed_acl)
return rc, "" if OpenNebula.is_error?(rc)
acls_ids << acl.id
}
return true, acls_ids
end
def create_default_acls(resources=nil)
resources = GROUP_DEFAULT_ACLS if !resources
acls = Array.new
acls << "@#{self.id} #{resources}/* CREATE"
create_group_acls(acls)
end
# Creates a group admin and user based on the group definition hash
# @param gdef [Hash] keys are ruby sumbols
# gdef[:group_admin] the group admin hash
# gdef[:group_admin][:name] username for group admin
# gdef[:group_admin][:password] password for group admin
# gdef[:group_admin][:auth_driver] auth driver for group admin
# gdef[:group_admin][:resources] resources that group admin manage
# gdef[:group_admin][:manage_resources] whether group admin manages
# group users
# gdef[:resources] resources that group users manage
#
# @return [nil, OpenNebula::Error] nil in case of success, Error
def create_admin_user(gdef)
return nil if gdef[:group_admin].nil? || gdef[:group_admin][:name].nil?
# Create group admin
uadmin = gdef[:group_admin][:name]
upasswd = gdef[:group_admin][:password]
udriver = gdef[:group_admin][:auth_driver]
if !uadmin.nil? && !upasswd.nil?
group_admin = OpenNebula::User.new(OpenNebula::User.build_xml,
@client)
if udriver
rc = group_admin.allocate(uadmin, upasswd, udriver)
else
rc = group_admin.allocate(uadmin, upasswd)
end
if OpenNebula.is_error?(rc)
return rc
end
end
# Set admin user groups to self
rc = group_admin.chgrp(self.id)
if OpenNebula.is_error?(rc)
group_admin.delete
return rc
end
#Create admin group acls
acls = Array.new
acls_str = (gdef[:group_admin][:resources] || \
gdef[:resources] || GROUP_DEFAULT_ACLS)
manage_users = gdef[:group_admin][:manage_users] || "YES"
if manage_users.upcase == "YES"
acls << "##{group_admin.id} USER/@#{self.id} CREATE+USE+MANAGE+ADMIN"
end
acls << "##{group_admin.id} #{acls_str}/@#{self.id} " +
"CREATE+USE+MANAGE"
rc, tmp = create_group_acls(acls)
if OpenNebula.is_error?(rc)
group_admin.delete
return rc
end
#Set Sunstone Views for the group
gtmpl = "GROUP_ADMINS=#{gdef[:group_admin][:name]}\n"
gtmpl << "GROUP_ADMIN_VIEWS=#{GROUP_ADMIN_SUNSTONE_VIEWS}\n"
self.update(gtmpl, true)
return nil
end
end
end
|
#!/usr/bin/env ruby
#
# The abstractish superclass of all MIDIator MIDI drivers.
#
# == Authors
#
# * Ben Bleything <ben@bleything.net>
#
# == Copyright
#
# Copyright (c) 2008 Ben Bleything
#
# This code released under the terms of the MIT license.
#
require 'midiator'
require 'midiator/driver_registry'
class MIDIator::Driver
##########################################################################
### M I D I C O M M A N D C O N S T A N T S
##########################################################################
# Note on
ON = 0x90
# Note off
OFF = 0x80
# Program change
PC = 0xc0
##########################################################################
### M A G I C H O O K S
##########################################################################
### Auto-registers subclasses of MIDIator::Driver with the driver registry.
def self::inherited( driver_class )
driver_name = driver_class.to_s.underscore
MIDIator::DriverRegistry.instance.register( driver_name, driver_class )
end
##########################################################################
### I N T E R F A C E A P I
##########################################################################
# These methods are the same across all drivers and are the interface that
# MIDIator::Interface interacts with.
##########################################################################
### Do any pre-open setup necessary. Often will not be overridden.
def initialize
open
end
### Shortcut to send a note_on message.
def note_on( note, channel, velocity )
message( ON | channel, note, velocity )
end
### Shortcut to send a note_off message.
def note_off( note, channel, velocity = 0 )
message( OFF | channel, note, velocity )
end
### Shortcut to send a program_change message.
def program_change( channel, program )
message( PC | channel, program )
end
##########################################################################
### D R I V E R A P I
##########################################################################
# subclasses must implement these methods.
##########################################################################
protected
##########################################################################
### Open the channel to the MIDI service.
def open
raise NotImplementedError, "You must implement #open in your driver."
end
### Close the channel to the MIDI service.
def close
raise NotImplementedError, "You must implement #close in your driver."
end
### Send MIDI message to the MIDI service.
def message( *args )
raise NotImplementedError, "You must implement #message in your driver."
end
### The only non-required method. Override this to give the user instructions
### if necessary.
def instruct_user!
end
end
Implemented control changes.
#!/usr/bin/env ruby
#
# The abstractish superclass of all MIDIator MIDI drivers.
#
# == Authors
#
# * Ben Bleything <ben@bleything.net>
#
# == Copyright
#
# Copyright (c) 2008 Ben Bleything
#
# This code released under the terms of the MIT license.
#
require 'midiator'
require 'midiator/driver_registry'
class MIDIator::Driver
##########################################################################
### M I D I C O M M A N D C O N S T A N T S
##########################################################################
# Note on
ON = 0x90
# Note off
OFF = 0x80
# Control change
CC = 0xb0
# Program change
PC = 0xc0
##########################################################################
### M A G I C H O O K S
##########################################################################
### Auto-registers subclasses of MIDIator::Driver with the driver registry.
def self::inherited( driver_class )
driver_name = driver_class.to_s.underscore
MIDIator::DriverRegistry.instance.register( driver_name, driver_class )
end
##########################################################################
### I N T E R F A C E A P I
##########################################################################
# These methods are the same across all drivers and are the interface that
# MIDIator::Interface interacts with.
##########################################################################
### Do any pre-open setup necessary. Often will not be overridden.
def initialize
open
end
### Shortcut to send a note_on message.
def note_on( note, channel, velocity )
message( ON | channel, note, velocity )
end
### Shortcut to send a note_off message.
def note_off( note, channel, velocity = 0 )
message( OFF | channel, note, velocity )
end
### Shortcut to send a control change.
def control_change( number, channel, value )
message( CC | channel, number, value )
end
### Shortcut to send a program_change message.
def program_change( channel, program )
message( PC | channel, program )
end
##########################################################################
### D R I V E R A P I
##########################################################################
# subclasses must implement these methods.
##########################################################################
protected
##########################################################################
### Open the channel to the MIDI service.
def open
raise NotImplementedError, "You must implement #open in your driver."
end
### Close the channel to the MIDI service.
def close
raise NotImplementedError, "You must implement #close in your driver."
end
### Send MIDI message to the MIDI service.
def message( *args )
raise NotImplementedError, "You must implement #message in your driver."
end
### The only non-required method. Override this to give the user instructions
### if necessary.
def instruct_user!
end
end
|
require socket to be safe
|
module OrientDB::AR
module Relations
def has_one(klass, options = {})
klass = klass_for klass
name = options[:name].to_s || field_name_for(klass, true)
field_type = klass.embedded? ? :embedded : :link
field name, [OrientDB::FIELD_TYPES[field_type], klass.oclass]
class_eval <<-eorb, __FILE__, __LINE__ + 1
def #{name} # def address
doc = odocument[:#{name}] # doc = odocument[:address]
doc ? #{klass.name}.new_from_doc(doc) : nil # doc ? Address.new_from_doc(doc) : nil
end # end
eorb
#
class_eval <<-eorb, __FILE__, __LINE__ + 1
def #{name}=(value) # def address=(value)
raise "Invalid value for [#{name}]" unless value.is_a?(#{klass}) # raise "Invalid value for [address]" unless value.is_a?(Address)
odocument[:#{name}] = value.odocument # odocument[:address] = value.odocument
#{name} # address
end # end
eorb
end
def has_many(klass, options = {})
klass = klass_for klass
name = options[:name].to_s || field_name_for(klass, false)
field_type = klass.embedded? ? :embedded : :link
field name, [OrientDB::FIELD_TYPES[field_type], klass.oclass]
class_eval <<-eorb, __FILE__, __LINE__ + 1
def #{name} # def addresses
docs = odocument[:#{name}] # docs = odocument[:addresses]
docs ? #{klass.name}.new_from_docs(docs) : nil # docs ? Address.new_from_docs(doc) : nil
end # end
eorb
class_eval <<-eorb, __FILE__, __LINE__ + 1
def #{name}=(value) # def addresses=(value)
odocument[:#{name}] = value.map{|x| x.odocument } # odocument[:addresses] = value.map{|x| x.odocument }
#{name} # addresses
end # end
eorb
class_eval <<-eorb, __FILE__, __LINE__ + 1
def add_#{name.singularize}(value) # def add_address(value)
odocument[:#{name}] << value.odocument # odocument[:addresses] << value.odocument
#{name} # addresses
end # end
eorb
end
private
def klass_for(klass)
return klass if klass.class.name == 'Class'
klass.to_s.singularize.camelize.constantize
rescue
raise "Problem getting klass for [#{klass}]"
end
def field_name_for(klass, singular)
klass.to_s.underscore.send(singular ? :singularize : :pluralize).gsub('/', '__')
end
end
end
More relations
module OrientDB::AR
module Relations
def embedds_one(klass, options = {})
klass = klass_for klass
name = options[:name].to_s || field_name_for(klass, true)
field name, [OrientDB::FIELD_TYPES[:embedded], klass.oclass]
class_eval <<-eorb, __FILE__, __LINE__ + 1
def #{name} # def address
doc = odocument[:#{name}] # doc = odocument[:address]
doc ? #{klass.name}.new_from_doc(doc) : nil # doc ? Address.new_from_doc(doc) : nil
end # end
eorb
#
class_eval <<-eorb, __FILE__, __LINE__ + 1
def #{name}=(value) # def address=(value)
raise "Invalid value for [#{name}]" unless value.is_a?(#{klass}) # raise "Invalid value for [address]" unless value.is_a?(Address)
odocument[:#{name}] = value.odocument # odocument[:address] = value.odocument
#{name} # address
end # end
eorb
end
def belongs_to(klass, options = {})
klass = klass_for klass
name = options.delete(:name) || field_name_for(klass, true)
field name, [OrientDB::FIELD_TYPES[:link], klass.oclass]
class_eval <<-eorb, __FILE__, __LINE__ + 1
def #{name} # def address
doc = odocument[:#{name}] # doc = odocument[:address]
doc ? #{klass.name}.new_from_doc(doc) : nil # doc ? Address.new_from_doc(doc) : nil
end # end
eorb
#
class_eval <<-eorb, __FILE__, __LINE__ + 1
def #{name}=(value) # def address=(value)
raise "Invalid value for [#{name}]" unless value.is_a?(#{klass}) # raise "Invalid value for [address]" unless value.is_a?(Address)
odocument[:#{name}] = value.odocument # odocument[:address] = value.odocument
#{name} # address
end # end
eorb
end
def has_many(klass, options = {})
klass = klass_for klass
name = options[:name].to_s || field_name_for(klass, false)
field_type = klass.embedded? ? :embedded : :link
field name, [OrientDB::FIELD_TYPES[field_type], klass.oclass]
class_eval <<-eorb, __FILE__, __LINE__ + 1
def #{name} # def addresses
docs = odocument[:#{name}] # docs = odocument[:addresses]
docs ? #{klass.name}.new_from_docs(docs) : nil # docs ? Address.new_from_docs(doc) : nil
end # end
eorb
class_eval <<-eorb, __FILE__, __LINE__ + 1
def #{name}=(value) # def addresses=(value)
odocument[:#{name}] = value.map{|x| x.odocument } # odocument[:addresses] = value.map{|x| x.odocument }
#{name} # addresses
end # end
eorb
class_eval <<-eorb, __FILE__, __LINE__ + 1
def add_#{name.to_s.singularize}(value) # def add_address(value)
odocument[:#{name}] << value.odocument # odocument[:addresses] << value.odocument
#{name} # addresses
end # end
eorb
end
private
def klass_for(klass)
return klass if klass.class.name == 'Class'
klass.to_s.singularize.camelize.constantize
rescue
raise "Problem getting klass for [#{klass}]"
end
def field_name_for(klass, singular)
klass.to_s.underscore.send(singular ? :singularize : :pluralize).gsub('/', '__')
end
end
end |
require 'curses'
require 'fileutils'
require 'tmpdir'
require 'rubygems/package'
require 'zip'
require 'zip/filesystem'
require_relative 'rfd/commands'
require_relative 'rfd/item'
require_relative 'rfd/windows'
module Rfd
VERSION = Gem.loaded_specs['rfd'].version.to_s
# :nodoc:
def self.init_curses
Curses.init_screen
Curses.raw
Curses.noecho
Curses.curs_set 0
Curses.stdscr.keypad = true
Curses.start_color
[Curses::COLOR_WHITE, Curses::COLOR_CYAN, Curses::COLOR_MAGENTA, Curses::COLOR_GREEN, Curses::COLOR_RED].each do |c|
Curses.init_pair c, c, Curses::COLOR_BLACK
end
Curses.mousemask Curses::BUTTON1_CLICKED | Curses::BUTTON1_DOUBLE_CLICKED
end
# Start the app here!
#
# ==== Parameters
# * +dir+ - The initial directory.
def self.start(dir = '.')
init_curses
Rfd::Window.draw_borders
Curses.refresh
rfd = Rfd::Controller.new
rfd.cd dir
rfd
end
class Controller
include Rfd::Commands
attr_reader :header_l, :header_r, :main, :command_line, :items, :displayed_items, :current_row, :current_page, :current_dir, :current_zip
# :nodoc:
def initialize
@main = MainWindow.new
@header_l = HeaderLeftWindow.new
@header_r = HeaderRightWindow.new
@command_line = CommandLineWindow.new
@direction, @dir_history, @last_command, @times, @yanked_items = nil, [], nil, nil, nil
end
# The main loop.
def run
loop do
begin
number_pressed = false
case (c = Curses.getch).ord
when 10, 13 # enter, return
enter
when 27
q
when 32 # space
space
when 127 # DEL
del
when Curses::KEY_DOWN
j
when Curses::KEY_UP
k
when Curses::KEY_LEFT
h
when Curses::KEY_RIGHT
l
when Curses::KEY_CTRL_A..Curses::KEY_CTRL_Z
chr = ((c - 1 + 65) ^ 0b0100000).chr
public_send "ctrl_#{chr}" if respond_to?("ctrl_#{chr}")
when 48..57 # ?0..?9
public_send c.chr
number_pressed = true
when 0..255
if respond_to? c.chr
public_send c.chr
else
debug "key: #{c}" if ENV['DEBUG']
end
when Curses::KEY_MOUSE
if (mouse_event = Curses.getmouse)
case mouse_event.bstate
when Curses::BUTTON1_CLICKED
click y: mouse_event.y, x: mouse_event.x
when Curses::BUTTON1_DOUBLE_CLICKED
double_click y: mouse_event.y, x: mouse_event.x
end
end
else
debug "key: #{c}" if ENV['DEBUG']
end
@times = nil unless number_pressed
rescue StopIteration
raise
rescue => e
command_line.show_error e.to_s
raise if ENV['DEBUG']
end
end
ensure
Curses.close_screen
end
# Change the number of columns in the main window.
def spawn_panes(num)
main.spawn_panes num
draw_items
@current_row = @current_page = 0
end
# Number of times to repeat the next command.
def times
(@times || 1).to_i
end
# The file or directory on which the cursor is on.
def current_item
items[current_row]
end
# * marked files and directories.
def marked_items
items.select(&:marked?)
end
# Marked files and directories or Array(the current file or directory).
#
# . and .. will not be included.
def selected_items
((m = marked_items).any? ? m : Array(current_item)).reject {|i| %w(. ..).include? i.name}
end
# Move the cursor to specified row.
#
# The main window and the headers will be updated reflecting the displayed files and directories.
# The row number can be out of range of the current page.
def move_cursor(row = nil)
if row
page, item_index_in_page = row.divmod max_items
if (prev_item = items[current_row])
main.draw_item prev_item
end
switch_page page if page != current_page
main.activate_pane row / maxy
@current_row = row
else
@current_row = 0
end
item = items[current_row]
main.draw_item item, current: true
main.display current_page
header_l.draw_current_file_info item
header_l.wrefresh
@current_row
end
# Change the current directory.
def cd(dir = '~', pushd: true)
dir = load_item expand_path(dir) unless dir.is_a? Item
unless dir.zip?
Dir.chdir dir
@current_zip = nil
else
@current_zip = dir
end
@dir_history << current_dir if current_dir && pushd
@current_dir, @current_page, @current_row = dir, 0, nil
main.activate_pane 0
ls
end
# cd to the previous directory.
def popd
cd @dir_history.pop, pushd: false if @dir_history.any?
end
# Fetch files from current directory.
# Then update each windows reflecting the newest information.
def ls
fetch_items_from_filesystem_or_zip
sort_items_according_to_current_direction
@current_page ||= 0
draw_items
move_cursor (current_row ? [current_row, items.size - 1].min : nil)
draw_marked_items
draw_total_items
end
# Sort the whole files and directories in the current directory, then refresh the screen.
#
# ==== Parameters
# * +direction+ - Sort order in a String.
# nil : order by name
# r : reverse order by name
# s, S : order by file size
# sr, Sr: reverse order by file size
# t : order by mtime
# tr : reverse order by mtime
# c : order by ctime
# cr : reverse order by ctime
# u : order by atime
# ur : reverse order by atime
# e : order by extname
# er : reverse order by extname
def sort(direction = nil)
@direction, @current_page = direction, 0
sort_items_according_to_current_direction
switch_page 0
move_cursor 0
end
# Change the file permission of the selected files and directories.
#
# ==== Parameters
# * +mode+ - Unix chmod string (e.g. +w, g-r, 755, 0644)
def chmod(mode = nil)
return unless mode
begin
Integer mode
mode = Integer mode.size == 3 ? "0#{mode}" : mode
rescue ArgumentError
end
FileUtils.chmod mode, selected_items.map(&:path)
ls
end
# Change the file owner of the selected files and directories.
#
# ==== Parameters
# * +user_and_group+ - user name and group name separated by : (e.g. alice, nobody:nobody, :admin)
def chown(user_and_group)
return unless user_and_group
user, group = user_and_group.split(':').map {|s| s == '' ? nil : s}
FileUtils.chown user, group, selected_items.map(&:path)
ls
end
# Fetch files from current directory or current .zip file.
def fetch_items_from_filesystem_or_zip
unless in_zip?
@items = Dir.foreach(current_dir).map {|fn|
stat = File.lstat current_dir.join(fn)
Item.new dir: current_dir, name: fn, stat: stat, window_width: main.width
}.to_a.partition {|i| %w(. ..).include? i.name}.flatten
else
@items = [Item.new(dir: current_dir, name: '.', stat: File.stat(current_dir), window_width: main.width),
Item.new(dir: current_dir, name: '..', stat: File.stat(File.dirname(current_dir)), window_width: main.width)]
zf = Zip::File.new current_dir
zf.each {|entry|
next if entry.name_is_directory?
stat = zf.file.stat entry.name
@items << Item.new(dir: current_dir, name: entry.name, stat: stat, window_width: main.width)
}
end
end
# Focus at the first file or directory of which name starts with the given String.
def find(str)
index = items.index {|i| i.index > current_row && i.name.start_with?(str)} || items.index {|i| i.name.start_with? str}
move_cursor index if index
end
# Focus at the last file or directory of which name starts with the given String.
def find_reverse(str)
index = items.reverse.index {|i| i.index < current_row && i.name.start_with?(str)} || items.reverse.index {|i| i.name.start_with? str}
move_cursor items.size - index - 1 if index
end
# Width of the currently active pane.
def maxx
main.maxx
end
# Height of the currently active pane.
def maxy
main.maxy
end
# Number of files or directories that the current main window can show in a page.
def max_items
main.max_items
end
# Update the main window with the loaded files and directories. Also update the header.
def draw_items
main.newpad items
@displayed_items = items[current_page * max_items, max_items]
main.display current_page
header_l.draw_path_and_page_number path: current_dir.path, current: current_page + 1, total: total_pages
end
# Sort the loaded files and directories in already given sort order.
def sort_items_according_to_current_direction
case @direction
when nil
@items = items.shift(2) + items.partition(&:directory?).flat_map(&:sort)
when 'r'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort.reverse}
when 'S', 's'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by {|i| -i.size}}
when 'Sr', 'sr'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:size)}
when 't'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.mtime <=> x.mtime}}
when 'tr'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:mtime)}
when 'c'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.ctime <=> x.ctime}}
when 'cr'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:ctime)}
when 'u'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.atime <=> x.atime}}
when 'ur'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:atime)}
when 'e'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.extname <=> x.extname}}
when 'er'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:extname)}
end
items.each.with_index {|item, index| item.index = index}
end
# Search files and directories from the current directory, and update the screen.
#
# * +pattern+ - Search pattern against file names in Ruby Regexp string.
#
# === Example
#
# a : Search files that contains the letter "a" in their file name
# .*\.pdf$ : Search PDF files
def grep(pattern = '.*')
regexp = Regexp.new(pattern)
fetch_items_from_filesystem_or_zip
@items = items.shift(2) + items.select {|i| i.name =~ regexp}
sort_items_according_to_current_direction
switch_page 0
move_cursor 0
draw_total_items
end
# Copy selected files and directories to the destination.
def cp(dest)
unless in_zip?
src = (m = marked_items).any? ? m.map(&:path) : current_item
FileUtils.cp_r src, expand_path(dest)
else
raise 'cping multiple items in .zip is not supported.' if selected_items.size > 1
Zip::File.open(current_zip) do |zip|
entry = zip.find_entry(selected_items.first.name).dup
entry.name, entry.name_length = dest, dest.size
zip.instance_variable_get(:@entry_set) << entry
end
end
ls
end
# Move selected files and directories to the destination.
def mv(dest)
unless in_zip?
src = (m = marked_items).any? ? m.map(&:path) : current_item
FileUtils.mv src, expand_path(dest)
else
raise 'mving multiple items in .zip is not supported.' if selected_items.size > 1
rename "#{selected_items.first.name}/#{dest}"
end
ls
end
# Rename selected files and directories.
#
# ==== Parameters
# * +pattern+ - / separated Regexp like string
def rename(pattern)
from, to = pattern.split '/'
from = Regexp.new from
unless in_zip?
selected_items.each do |item|
name = item.name.gsub from, to
FileUtils.mv item, current_dir.join(name) if item.name != name
end
else
Zip::File.open(current_zip) do |zip|
selected_items.each do |item|
name = item.name.gsub from, to
zip.rename item.name, name
end
end
end
ls
end
# Soft delete selected files and directories.
#
# If the OS is not OSX, performs the same as `delete` command.
def trash
unless in_zip?
if osx?
FileUtils.mv selected_items.map(&:path), File.expand_path('~/.Trash/')
else
#TODO support other OS
FileUtils.rm_rf selected_items.map(&:path)
end
else
return unless ask %Q[Trashing zip entries is not supported. Actually the files will be deleted. Are you sure want to proceed? (y/n)]
delete
end
@current_row -= selected_items.count {|i| i.index <= current_row}
ls
end
# Delete selected files and directories.
def delete
unless in_zip?
FileUtils.rm_rf selected_items.map(&:path)
else
Zip::File.open(current_zip) do |zip|
zip.select {|e| selected_items.map(&:name).include? e.to_s}.each do |entry|
if entry.name_is_directory?
zip.dir.delete entry.to_s
else
zip.file.delete entry.to_s
end
end
end
end
@current_row -= selected_items.count {|i| i.index <= current_row}
ls
end
# Create a new directory.
def mkdir(dir)
unless in_zip?
FileUtils.mkdir_p current_dir.join(dir)
else
Zip::File.open(current_zip) do |zip|
zip.dir.mkdir dir
end
end
ls
end
# Create a new empty file.
def touch(filename)
unless in_zip?
FileUtils.touch current_dir.join(filename)
else
Zip::File.open(current_zip) do |zip|
# zip.file.open(filename, 'w') {|_f| } #HAXX this code creates an unneeded temporary file
zip.instance_variable_get(:@entry_set) << Zip::Entry.new(current_zip, filename)
end
end
ls
end
# Create a symlink to the current file or directory.
def symlink(name)
FileUtils.ln_s current_item, name
ls
end
# Yank selected file / directory names.
def yank
@yanked_items = selected_items
end
# Paste yanked files / directories here.
def paste
if @yanked_items
if current_item.directory?
FileUtils.cp_r @yanked_items.map(&:path), current_item
else
@yanked_items.each do |item|
if items.include? item
i = 1
while i += 1
new_item = Item.new dir: current_dir, name: "#{item.basename}_#{i}#{item.extname}", stat: item.stat, window_width: maxx
break unless File.exist? new_item.path
end
FileUtils.cp_r item, new_item
else
FileUtils.cp_r item, current_dir
end
end
end
ls
end
end
# Copy selected files and directories' path into clipboard on OSX.
def clipboard
IO.popen('pbcopy', 'w') {|f| f << selected_items.map(&:path).join(' ')} if osx?
end
# Archive selected files and directories into a .zip file.
def zip(zipfile_name)
return unless zipfile_name
zipfile_name << '.zip' unless zipfile_name.end_with? '.zip'
Zip::File.open(zipfile_name, Zip::File::CREATE) do |zipfile|
selected_items.each do |item|
next if item.symlink?
if item.directory?
Dir[item.join('**/**')].each do |file|
zipfile.add file.sub("#{current_dir}/", ''), file
end
else
zipfile.add item.name, item
end
end
end
ls
end
# Unarchive .zip and .tar.gz files within selected files and directories into current_directory.
def unarchive
unless in_zip?
zips, gzs = selected_items.partition(&:zip?).tap {|z, others| break [z, *others.partition(&:gz?)]}
zips.each do |item|
FileUtils.mkdir_p current_dir.join(item.basename)
Zip::File.open(item) do |zip|
zip.each do |entry|
FileUtils.mkdir_p File.join(item.basename, File.dirname(entry.to_s))
zip.extract(entry, File.join(item.basename, entry.to_s)) { true }
end
end
end
gzs.each do |item|
Zlib::GzipReader.open(item) do |gz|
Gem::Package::TarReader.new(gz) do |tar|
dest_dir = current_dir.join (gz.orig_name || item.basename).sub(/\.tar$/, '')
tar.each do |entry|
dest = nil
if entry.full_name == '././@LongLink'
dest = File.join dest_dir, entry.read.strip
next
end
dest ||= File.join dest_dir, entry.full_name
if entry.directory?
FileUtils.mkdir_p dest, :mode => entry.header.mode
elsif entry.file?
FileUtils.mkdir_p dest_dir
File.open(dest, 'wb') {|f| f.print entry.read}
FileUtils.chmod entry.header.mode, dest
elsif entry.header.typeflag == '2' # symlink
File.symlink entry.header.linkname, dest
end
unless Dir.exist? dest_dir
FileUtils.mkdir_p dest_dir
File.open(File.join(dest_dir, gz.orig_name || item.basename), 'wb') {|f| f.print gz.read}
end
end
end
end
end
else
Zip::File.open(current_zip) do |zip|
zip.select {|e| selected_items.map(&:name).include? e.to_s}.each do |entry|
FileUtils.mkdir_p File.join(current_zip.dir, current_zip.basename, File.dirname(entry.to_s))
zip.extract(entry, File.join(current_zip.dir, current_zip.basename, entry.to_s)) { true }
end
end
end
ls
end
# Current page is the first page?
def first_page?
current_page == 0
end
# Do we have more pages?
def last_page?
current_page == total_pages - 1
end
# Number of pages in the current directory.
def total_pages
items.size / max_items + 1
end
# Move to the given page number.
#
# ==== Parameters
# * +page+ - Target page number
def switch_page(page)
main.display (@current_page = page)
@displayed_items = items[current_page * max_items, max_items]
header_l.draw_path_and_page_number path: current_dir.path, current: current_page + 1, total: total_pages
end
# Update the header information concerning currently marked files or directories.
def draw_marked_items
items = marked_items
header_r.draw_marked_items count: items.size, size: items.inject(0) {|sum, i| sum += i.size}
end
# Update the header information concerning total files and directories in the current directory.
def draw_total_items
header_r.draw_total_items count: items.size, size: items.inject(0) {|sum, i| sum += i.size}
end
# Swktch on / off marking on the current file or directory.
def toggle_mark
main.toggle_mark current_item
end
# Get a char as a String from user input.
def get_char
c = Curses.getch
c if (0..255) === c.ord
end
# Accept user input, and directly execute it as a Ruby method call to the controller.
#
# ==== Parameters
# * +preset_command+ - A command that would be displayed at the command line before user input.
def process_command_line(preset_command: nil)
prompt = preset_command ? ":#{preset_command} " : ':'
command_line.set_prompt prompt
cmd, *args = command_line.get_command(prompt: prompt).split(' ')
if cmd && !cmd.empty? && respond_to?(cmd)
self.public_send cmd, *args
command_line.wclear
command_line.wrefresh
end
rescue Interrupt
command_line.wclear
command_line.wrefresh
end
# Accept user input, and directly execute it in an external shell.
def process_shell_command
command_line.set_prompt ':!'
cmd = command_line.get_command(prompt: ':!')[1..-1]
execute_external_command pause: true do
system cmd
end
rescue Interrupt
ensure
command_line.wclear
command_line.wrefresh
end
# Let the user answer y or n.
#
# ==== Parameters
# * +prompt+ - Prompt message
def ask(prompt = '(y/n)')
command_line.set_prompt prompt
command_line.wrefresh
while (c = Curses.getch)
next unless [?N, ?Y, ?n, ?y, 3, 27] .include? c # N, Y, n, y, ^c, esc
command_line.wclear
command_line.wrefresh
break %[Y y].include? c # Y, y
end
end
# Open current file or directory with the editor.
def edit
execute_external_command do
editor = ENV['EDITOR'] || 'vim'
unless in_zip?
system %Q[#{editor} "#{current_item.path}"]
else
begin
tmpdir, tmpfile_name = nil
Zip::File.open(current_zip) do |zip|
tmpdir = Dir.mktmpdir
FileUtils.mkdir_p File.join(tmpdir, File.dirname(current_item.name))
tmpfile_name = File.join(tmpdir, current_item.name)
File.open(tmpfile_name, 'w') {|f| f.puts zip.file.read(current_item.name)}
system %Q[#{editor} "#{tmpfile_name}"]
zip.add(current_item.name, tmpfile_name) { true }
end
ls
ensure
FileUtils.remove_entry_secure tmpdir if tmpdir
end
end
end
end
# Open current file or directory with the viewer.
def view
pager = ENV['PAGER'] || 'less'
execute_external_command do
unless in_zip?
system %Q[#{pager} "#{current_item.path}"]
else
begin
tmpdir, tmpfile_name = nil
Zip::File.open(current_zip) do |zip|
tmpdir = Dir.mktmpdir
FileUtils.mkdir_p File.join(tmpdir, File.dirname(current_item.name))
tmpfile_name = File.join(tmpdir, current_item.name)
File.open(tmpfile_name, 'w') {|f| f.puts zip.file.read(current_item.name)}
end
system %Q[#{pager} "#{tmpfile_name}"]
ensure
FileUtils.remove_entry_secure tmpdir if tmpdir
end
end
end
end
def move_cursor_by_click(y: nil, x: nil)
if (idx = main.pane_index_at(y: y, x: x))
row = current_page * max_items + main.maxy * idx + y - main.begy
move_cursor row if (row >= 0) && (row < items.size)
end
end
private
def execute_external_command(pause: false)
Curses.def_prog_mode
Curses.close_screen
yield
ensure
Curses.reset_prog_mode
Curses.getch if pause
#NOTE needs to draw borders and ls again here since the stdlib Curses.refresh fails to retrieve the previous screen
Rfd::Window.draw_borders
ls
Curses.refresh
end
def expand_path(path)
File.expand_path path.start_with?('/') || path.start_with?('~') ? path : current_dir ? current_dir.join(path) : path
end
def load_item(path)
stat = File.lstat path
Item.new dir: File.dirname(path), name: File.basename(path), stat: stat, window_width: maxx
end
def osx?
@_osx ||= RbConfig::CONFIG['host_os'] =~ /darwin/
end
def in_zip?
@current_zip
end
def debug(str)
header_r.debug str
end
end
end
No need to ord a number
require 'curses'
require 'fileutils'
require 'tmpdir'
require 'rubygems/package'
require 'zip'
require 'zip/filesystem'
require_relative 'rfd/commands'
require_relative 'rfd/item'
require_relative 'rfd/windows'
module Rfd
VERSION = Gem.loaded_specs['rfd'].version.to_s
# :nodoc:
def self.init_curses
Curses.init_screen
Curses.raw
Curses.noecho
Curses.curs_set 0
Curses.stdscr.keypad = true
Curses.start_color
[Curses::COLOR_WHITE, Curses::COLOR_CYAN, Curses::COLOR_MAGENTA, Curses::COLOR_GREEN, Curses::COLOR_RED].each do |c|
Curses.init_pair c, c, Curses::COLOR_BLACK
end
Curses.mousemask Curses::BUTTON1_CLICKED | Curses::BUTTON1_DOUBLE_CLICKED
end
# Start the app here!
#
# ==== Parameters
# * +dir+ - The initial directory.
def self.start(dir = '.')
init_curses
Rfd::Window.draw_borders
Curses.refresh
rfd = Rfd::Controller.new
rfd.cd dir
rfd
end
class Controller
include Rfd::Commands
attr_reader :header_l, :header_r, :main, :command_line, :items, :displayed_items, :current_row, :current_page, :current_dir, :current_zip
# :nodoc:
def initialize
@main = MainWindow.new
@header_l = HeaderLeftWindow.new
@header_r = HeaderRightWindow.new
@command_line = CommandLineWindow.new
@direction, @dir_history, @last_command, @times, @yanked_items = nil, [], nil, nil, nil
end
# The main loop.
def run
loop do
begin
number_pressed = false
case (c = Curses.getch)
when 10, 13 # enter, return
enter
when 27 # ESC
q
when ' ' # space
space
when 127 # DEL
del
when Curses::KEY_DOWN
j
when Curses::KEY_UP
k
when Curses::KEY_LEFT
h
when Curses::KEY_RIGHT
l
when Curses::KEY_CTRL_A..Curses::KEY_CTRL_Z
chr = ((c - 1 + 65) ^ 0b0100000).chr
public_send "ctrl_#{chr}" if respond_to?("ctrl_#{chr}")
when ?0..?9
public_send c
number_pressed = true
when ?!..?~
if respond_to? c
public_send c
else
debug "key: #{c}" if ENV['DEBUG']
end
when Curses::KEY_MOUSE
if (mouse_event = Curses.getmouse)
case mouse_event.bstate
when Curses::BUTTON1_CLICKED
click y: mouse_event.y, x: mouse_event.x
when Curses::BUTTON1_DOUBLE_CLICKED
double_click y: mouse_event.y, x: mouse_event.x
end
end
else
debug "key: #{c}" if ENV['DEBUG']
end
@times = nil unless number_pressed
rescue StopIteration
raise
rescue => e
command_line.show_error e.to_s
raise if ENV['DEBUG']
end
end
ensure
Curses.close_screen
end
# Change the number of columns in the main window.
def spawn_panes(num)
main.spawn_panes num
draw_items
@current_row = @current_page = 0
end
# Number of times to repeat the next command.
def times
(@times || 1).to_i
end
# The file or directory on which the cursor is on.
def current_item
items[current_row]
end
# * marked files and directories.
def marked_items
items.select(&:marked?)
end
# Marked files and directories or Array(the current file or directory).
#
# . and .. will not be included.
def selected_items
((m = marked_items).any? ? m : Array(current_item)).reject {|i| %w(. ..).include? i.name}
end
# Move the cursor to specified row.
#
# The main window and the headers will be updated reflecting the displayed files and directories.
# The row number can be out of range of the current page.
def move_cursor(row = nil)
if row
page, item_index_in_page = row.divmod max_items
if (prev_item = items[current_row])
main.draw_item prev_item
end
switch_page page if page != current_page
main.activate_pane row / maxy
@current_row = row
else
@current_row = 0
end
item = items[current_row]
main.draw_item item, current: true
main.display current_page
header_l.draw_current_file_info item
header_l.wrefresh
@current_row
end
# Change the current directory.
def cd(dir = '~', pushd: true)
dir = load_item expand_path(dir) unless dir.is_a? Item
unless dir.zip?
Dir.chdir dir
@current_zip = nil
else
@current_zip = dir
end
@dir_history << current_dir if current_dir && pushd
@current_dir, @current_page, @current_row = dir, 0, nil
main.activate_pane 0
ls
end
# cd to the previous directory.
def popd
cd @dir_history.pop, pushd: false if @dir_history.any?
end
# Fetch files from current directory.
# Then update each windows reflecting the newest information.
def ls
fetch_items_from_filesystem_or_zip
sort_items_according_to_current_direction
@current_page ||= 0
draw_items
move_cursor (current_row ? [current_row, items.size - 1].min : nil)
draw_marked_items
draw_total_items
end
# Sort the whole files and directories in the current directory, then refresh the screen.
#
# ==== Parameters
# * +direction+ - Sort order in a String.
# nil : order by name
# r : reverse order by name
# s, S : order by file size
# sr, Sr: reverse order by file size
# t : order by mtime
# tr : reverse order by mtime
# c : order by ctime
# cr : reverse order by ctime
# u : order by atime
# ur : reverse order by atime
# e : order by extname
# er : reverse order by extname
def sort(direction = nil)
@direction, @current_page = direction, 0
sort_items_according_to_current_direction
switch_page 0
move_cursor 0
end
# Change the file permission of the selected files and directories.
#
# ==== Parameters
# * +mode+ - Unix chmod string (e.g. +w, g-r, 755, 0644)
def chmod(mode = nil)
return unless mode
begin
Integer mode
mode = Integer mode.size == 3 ? "0#{mode}" : mode
rescue ArgumentError
end
FileUtils.chmod mode, selected_items.map(&:path)
ls
end
# Change the file owner of the selected files and directories.
#
# ==== Parameters
# * +user_and_group+ - user name and group name separated by : (e.g. alice, nobody:nobody, :admin)
def chown(user_and_group)
return unless user_and_group
user, group = user_and_group.split(':').map {|s| s == '' ? nil : s}
FileUtils.chown user, group, selected_items.map(&:path)
ls
end
# Fetch files from current directory or current .zip file.
def fetch_items_from_filesystem_or_zip
unless in_zip?
@items = Dir.foreach(current_dir).map {|fn|
stat = File.lstat current_dir.join(fn)
Item.new dir: current_dir, name: fn, stat: stat, window_width: main.width
}.to_a.partition {|i| %w(. ..).include? i.name}.flatten
else
@items = [Item.new(dir: current_dir, name: '.', stat: File.stat(current_dir), window_width: main.width),
Item.new(dir: current_dir, name: '..', stat: File.stat(File.dirname(current_dir)), window_width: main.width)]
zf = Zip::File.new current_dir
zf.each {|entry|
next if entry.name_is_directory?
stat = zf.file.stat entry.name
@items << Item.new(dir: current_dir, name: entry.name, stat: stat, window_width: main.width)
}
end
end
# Focus at the first file or directory of which name starts with the given String.
def find(str)
index = items.index {|i| i.index > current_row && i.name.start_with?(str)} || items.index {|i| i.name.start_with? str}
move_cursor index if index
end
# Focus at the last file or directory of which name starts with the given String.
def find_reverse(str)
index = items.reverse.index {|i| i.index < current_row && i.name.start_with?(str)} || items.reverse.index {|i| i.name.start_with? str}
move_cursor items.size - index - 1 if index
end
# Width of the currently active pane.
def maxx
main.maxx
end
# Height of the currently active pane.
def maxy
main.maxy
end
# Number of files or directories that the current main window can show in a page.
def max_items
main.max_items
end
# Update the main window with the loaded files and directories. Also update the header.
def draw_items
main.newpad items
@displayed_items = items[current_page * max_items, max_items]
main.display current_page
header_l.draw_path_and_page_number path: current_dir.path, current: current_page + 1, total: total_pages
end
# Sort the loaded files and directories in already given sort order.
def sort_items_according_to_current_direction
case @direction
when nil
@items = items.shift(2) + items.partition(&:directory?).flat_map(&:sort)
when 'r'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort.reverse}
when 'S', 's'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by {|i| -i.size}}
when 'Sr', 'sr'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:size)}
when 't'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.mtime <=> x.mtime}}
when 'tr'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:mtime)}
when 'c'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.ctime <=> x.ctime}}
when 'cr'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:ctime)}
when 'u'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.atime <=> x.atime}}
when 'ur'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:atime)}
when 'e'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.extname <=> x.extname}}
when 'er'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:extname)}
end
items.each.with_index {|item, index| item.index = index}
end
# Search files and directories from the current directory, and update the screen.
#
# * +pattern+ - Search pattern against file names in Ruby Regexp string.
#
# === Example
#
# a : Search files that contains the letter "a" in their file name
# .*\.pdf$ : Search PDF files
def grep(pattern = '.*')
regexp = Regexp.new(pattern)
fetch_items_from_filesystem_or_zip
@items = items.shift(2) + items.select {|i| i.name =~ regexp}
sort_items_according_to_current_direction
switch_page 0
move_cursor 0
draw_total_items
end
# Copy selected files and directories to the destination.
def cp(dest)
unless in_zip?
src = (m = marked_items).any? ? m.map(&:path) : current_item
FileUtils.cp_r src, expand_path(dest)
else
raise 'cping multiple items in .zip is not supported.' if selected_items.size > 1
Zip::File.open(current_zip) do |zip|
entry = zip.find_entry(selected_items.first.name).dup
entry.name, entry.name_length = dest, dest.size
zip.instance_variable_get(:@entry_set) << entry
end
end
ls
end
# Move selected files and directories to the destination.
def mv(dest)
unless in_zip?
src = (m = marked_items).any? ? m.map(&:path) : current_item
FileUtils.mv src, expand_path(dest)
else
raise 'mving multiple items in .zip is not supported.' if selected_items.size > 1
rename "#{selected_items.first.name}/#{dest}"
end
ls
end
# Rename selected files and directories.
#
# ==== Parameters
# * +pattern+ - / separated Regexp like string
def rename(pattern)
from, to = pattern.split '/'
from = Regexp.new from
unless in_zip?
selected_items.each do |item|
name = item.name.gsub from, to
FileUtils.mv item, current_dir.join(name) if item.name != name
end
else
Zip::File.open(current_zip) do |zip|
selected_items.each do |item|
name = item.name.gsub from, to
zip.rename item.name, name
end
end
end
ls
end
# Soft delete selected files and directories.
#
# If the OS is not OSX, performs the same as `delete` command.
def trash
unless in_zip?
if osx?
FileUtils.mv selected_items.map(&:path), File.expand_path('~/.Trash/')
else
#TODO support other OS
FileUtils.rm_rf selected_items.map(&:path)
end
else
return unless ask %Q[Trashing zip entries is not supported. Actually the files will be deleted. Are you sure want to proceed? (y/n)]
delete
end
@current_row -= selected_items.count {|i| i.index <= current_row}
ls
end
# Delete selected files and directories.
def delete
unless in_zip?
FileUtils.rm_rf selected_items.map(&:path)
else
Zip::File.open(current_zip) do |zip|
zip.select {|e| selected_items.map(&:name).include? e.to_s}.each do |entry|
if entry.name_is_directory?
zip.dir.delete entry.to_s
else
zip.file.delete entry.to_s
end
end
end
end
@current_row -= selected_items.count {|i| i.index <= current_row}
ls
end
# Create a new directory.
def mkdir(dir)
unless in_zip?
FileUtils.mkdir_p current_dir.join(dir)
else
Zip::File.open(current_zip) do |zip|
zip.dir.mkdir dir
end
end
ls
end
# Create a new empty file.
def touch(filename)
unless in_zip?
FileUtils.touch current_dir.join(filename)
else
Zip::File.open(current_zip) do |zip|
# zip.file.open(filename, 'w') {|_f| } #HAXX this code creates an unneeded temporary file
zip.instance_variable_get(:@entry_set) << Zip::Entry.new(current_zip, filename)
end
end
ls
end
# Create a symlink to the current file or directory.
def symlink(name)
FileUtils.ln_s current_item, name
ls
end
# Yank selected file / directory names.
def yank
@yanked_items = selected_items
end
# Paste yanked files / directories here.
def paste
if @yanked_items
if current_item.directory?
FileUtils.cp_r @yanked_items.map(&:path), current_item
else
@yanked_items.each do |item|
if items.include? item
i = 1
while i += 1
new_item = Item.new dir: current_dir, name: "#{item.basename}_#{i}#{item.extname}", stat: item.stat, window_width: maxx
break unless File.exist? new_item.path
end
FileUtils.cp_r item, new_item
else
FileUtils.cp_r item, current_dir
end
end
end
ls
end
end
# Copy selected files and directories' path into clipboard on OSX.
def clipboard
IO.popen('pbcopy', 'w') {|f| f << selected_items.map(&:path).join(' ')} if osx?
end
# Archive selected files and directories into a .zip file.
def zip(zipfile_name)
return unless zipfile_name
zipfile_name << '.zip' unless zipfile_name.end_with? '.zip'
Zip::File.open(zipfile_name, Zip::File::CREATE) do |zipfile|
selected_items.each do |item|
next if item.symlink?
if item.directory?
Dir[item.join('**/**')].each do |file|
zipfile.add file.sub("#{current_dir}/", ''), file
end
else
zipfile.add item.name, item
end
end
end
ls
end
# Unarchive .zip and .tar.gz files within selected files and directories into current_directory.
def unarchive
unless in_zip?
zips, gzs = selected_items.partition(&:zip?).tap {|z, others| break [z, *others.partition(&:gz?)]}
zips.each do |item|
FileUtils.mkdir_p current_dir.join(item.basename)
Zip::File.open(item) do |zip|
zip.each do |entry|
FileUtils.mkdir_p File.join(item.basename, File.dirname(entry.to_s))
zip.extract(entry, File.join(item.basename, entry.to_s)) { true }
end
end
end
gzs.each do |item|
Zlib::GzipReader.open(item) do |gz|
Gem::Package::TarReader.new(gz) do |tar|
dest_dir = current_dir.join (gz.orig_name || item.basename).sub(/\.tar$/, '')
tar.each do |entry|
dest = nil
if entry.full_name == '././@LongLink'
dest = File.join dest_dir, entry.read.strip
next
end
dest ||= File.join dest_dir, entry.full_name
if entry.directory?
FileUtils.mkdir_p dest, :mode => entry.header.mode
elsif entry.file?
FileUtils.mkdir_p dest_dir
File.open(dest, 'wb') {|f| f.print entry.read}
FileUtils.chmod entry.header.mode, dest
elsif entry.header.typeflag == '2' # symlink
File.symlink entry.header.linkname, dest
end
unless Dir.exist? dest_dir
FileUtils.mkdir_p dest_dir
File.open(File.join(dest_dir, gz.orig_name || item.basename), 'wb') {|f| f.print gz.read}
end
end
end
end
end
else
Zip::File.open(current_zip) do |zip|
zip.select {|e| selected_items.map(&:name).include? e.to_s}.each do |entry|
FileUtils.mkdir_p File.join(current_zip.dir, current_zip.basename, File.dirname(entry.to_s))
zip.extract(entry, File.join(current_zip.dir, current_zip.basename, entry.to_s)) { true }
end
end
end
ls
end
# Current page is the first page?
def first_page?
current_page == 0
end
# Do we have more pages?
def last_page?
current_page == total_pages - 1
end
# Number of pages in the current directory.
def total_pages
items.size / max_items + 1
end
# Move to the given page number.
#
# ==== Parameters
# * +page+ - Target page number
def switch_page(page)
main.display (@current_page = page)
@displayed_items = items[current_page * max_items, max_items]
header_l.draw_path_and_page_number path: current_dir.path, current: current_page + 1, total: total_pages
end
# Update the header information concerning currently marked files or directories.
def draw_marked_items
items = marked_items
header_r.draw_marked_items count: items.size, size: items.inject(0) {|sum, i| sum += i.size}
end
# Update the header information concerning total files and directories in the current directory.
def draw_total_items
header_r.draw_total_items count: items.size, size: items.inject(0) {|sum, i| sum += i.size}
end
# Swktch on / off marking on the current file or directory.
def toggle_mark
main.toggle_mark current_item
end
# Get a char as a String from user input.
def get_char
c = Curses.getch
c if (0..255) === c.ord
end
# Accept user input, and directly execute it as a Ruby method call to the controller.
#
# ==== Parameters
# * +preset_command+ - A command that would be displayed at the command line before user input.
def process_command_line(preset_command: nil)
prompt = preset_command ? ":#{preset_command} " : ':'
command_line.set_prompt prompt
cmd, *args = command_line.get_command(prompt: prompt).split(' ')
if cmd && !cmd.empty? && respond_to?(cmd)
self.public_send cmd, *args
command_line.wclear
command_line.wrefresh
end
rescue Interrupt
command_line.wclear
command_line.wrefresh
end
# Accept user input, and directly execute it in an external shell.
def process_shell_command
command_line.set_prompt ':!'
cmd = command_line.get_command(prompt: ':!')[1..-1]
execute_external_command pause: true do
system cmd
end
rescue Interrupt
ensure
command_line.wclear
command_line.wrefresh
end
# Let the user answer y or n.
#
# ==== Parameters
# * +prompt+ - Prompt message
def ask(prompt = '(y/n)')
command_line.set_prompt prompt
command_line.wrefresh
while (c = Curses.getch)
next unless [?N, ?Y, ?n, ?y, 3, 27] .include? c # N, Y, n, y, ^c, esc
command_line.wclear
command_line.wrefresh
break (c == 'y') || (c == 'Y')
end
end
# Open current file or directory with the editor.
def edit
execute_external_command do
editor = ENV['EDITOR'] || 'vim'
unless in_zip?
system %Q[#{editor} "#{current_item.path}"]
else
begin
tmpdir, tmpfile_name = nil
Zip::File.open(current_zip) do |zip|
tmpdir = Dir.mktmpdir
FileUtils.mkdir_p File.join(tmpdir, File.dirname(current_item.name))
tmpfile_name = File.join(tmpdir, current_item.name)
File.open(tmpfile_name, 'w') {|f| f.puts zip.file.read(current_item.name)}
system %Q[#{editor} "#{tmpfile_name}"]
zip.add(current_item.name, tmpfile_name) { true }
end
ls
ensure
FileUtils.remove_entry_secure tmpdir if tmpdir
end
end
end
end
# Open current file or directory with the viewer.
def view
pager = ENV['PAGER'] || 'less'
execute_external_command do
unless in_zip?
system %Q[#{pager} "#{current_item.path}"]
else
begin
tmpdir, tmpfile_name = nil
Zip::File.open(current_zip) do |zip|
tmpdir = Dir.mktmpdir
FileUtils.mkdir_p File.join(tmpdir, File.dirname(current_item.name))
tmpfile_name = File.join(tmpdir, current_item.name)
File.open(tmpfile_name, 'w') {|f| f.puts zip.file.read(current_item.name)}
end
system %Q[#{pager} "#{tmpfile_name}"]
ensure
FileUtils.remove_entry_secure tmpdir if tmpdir
end
end
end
end
def move_cursor_by_click(y: nil, x: nil)
if (idx = main.pane_index_at(y: y, x: x))
row = current_page * max_items + main.maxy * idx + y - main.begy
move_cursor row if (row >= 0) && (row < items.size)
end
end
private
def execute_external_command(pause: false)
Curses.def_prog_mode
Curses.close_screen
yield
ensure
Curses.reset_prog_mode
Curses.getch if pause
#NOTE needs to draw borders and ls again here since the stdlib Curses.refresh fails to retrieve the previous screen
Rfd::Window.draw_borders
ls
Curses.refresh
end
def expand_path(path)
File.expand_path path.start_with?('/') || path.start_with?('~') ? path : current_dir ? current_dir.join(path) : path
end
def load_item(path)
stat = File.lstat path
Item.new dir: File.dirname(path), name: File.basename(path), stat: stat, window_width: maxx
end
def osx?
@_osx ||= RbConfig::CONFIG['host_os'] =~ /darwin/
end
def in_zip?
@current_zip
end
def debug(str)
header_r.debug str
end
end
end
|
require 'yaml'
module Rip
autoload :Parser, 'rip/parser'
autoload :Environment, 'rip/environment'
autoload :Helpers, 'rip/helpers'
autoload :Package, 'rip/package'
autoload :GitPackage, 'rip/packages/git_package'
autoload :GemPackage, 'rip/packages/gem_package'
extend self
attr_accessor :dir, :env
def packages
"#{dir}/.packages"
end
def cache
"#{dir}/.cache"
end
def active
"#{dir}/active"
end
def envdir
"#{dir}/#{env}"
end
def envs
Dir["#{dir}/*"].map { |f| File.basename(f) }.reject do |ripenv|
ripenv == 'active' || ripenv[0].chr == '.'
end
end
def md5(string)
require 'digest'
Digest::MD5.hexdigest(string.to_s)
end
end
helper for getting shells ruby platform
require 'yaml'
module Rip
autoload :Parser, 'rip/parser'
autoload :Environment, 'rip/environment'
autoload :Helpers, 'rip/helpers'
autoload :Package, 'rip/package'
autoload :GitPackage, 'rip/packages/git_package'
autoload :GemPackage, 'rip/packages/gem_package'
extend self
attr_accessor :dir, :env
def packages
"#{dir}/.packages"
end
def cache
"#{dir}/.cache"
end
def active
"#{dir}/active"
end
def envdir
"#{dir}/#{env}"
end
def envs
Dir["#{dir}/*"].map { |f| File.basename(f) }.reject do |ripenv|
ripenv == 'active' || ripenv[0].chr == '.'
end
end
def platform_hash
md5(shell_ruby_platform)
end
# Shell out to ruby so we always get the shells activate ruby,
# not whatever ruby version is running rip.
def shell_ruby_platform
`ruby -rrbconfig -e "puts RbConfig::CONFIG['sitearchdir']"`
end
def md5(string)
require 'digest'
Digest::MD5.hexdigest(string.to_s)
end
end
|
module Msgr
# The Dispatcher receives incoming messages,
# process them through a middleware stack and
# delegate them to a new and fresh consumer instance.
#
class Dispatcher
include Logging
def initialize
end
def call(message)
log(:debug) { "Receive dispatched message: #{message.payload}" }
sleep 10 * rand
message.ack
log(:debug) { 'Dispatched message acknowledged.' }
end
def to_s
self.class.name
end
end
end
Remove sleep.
module Msgr
# The Dispatcher receives incoming messages,
# process them through a middleware stack and
# delegate them to a new and fresh consumer instance.
#
class Dispatcher
include Logging
def initialize
end
def call(message)
log(:debug) { "Receive dispatched message: #{message.payload}" }
message.ack
log(:debug) { 'Dispatched message acknowledged.' }
end
def to_s
self.class.name
end
end
end
|
require 'multi_part_date/version'
require 'active_support/concern'
require 'active_support'
require 'date'
require 'reform'
module MultiPartDate
extend ActiveSupport::Concern
class_methods do
def multi_part_date(field_name, options = {})
key = options[:as] || field_name
discard_options = options.slice(:discard_day, :discard_month, :discard_year)
on_options = options.slice(:on)
type = options[:type] || Types::Form::Date
create_methods_for(field_name, key, discard_options)
property :"#{key}_month", { type: Types::Coercible::Int, virtual: true }.merge(on_options)
property :"#{key}_day", { type: Types::Coercible::Int, virtual: true }.merge(on_options)
property :"#{key}_year", { type: Types::Coercible::Int, virtual: true }.merge(on_options)
property field_name, { type: type }.merge(on_options)
validate_if_required(field_name, options[:validate_if])
end
def create_methods_for(field_name, key, discard_options)
create_date_values_methods_for(key)
create_getters_for(field_name, key, discard_options)
create_setters_for(field_name, key)
create_validation_helper_methods_for(field_name, key)
end
def create_getters_for(field_name, key, discard_options)
%i(day month year).each do |type|
if discard_options[:"discard_#{type}"]
define_method(:"#{key}_#{type}") do
1
end
else
define_method(:"#{key}_#{type}") do
if send(field_name)
send(field_name).send(type) if send(field_name).respond_to?(type)
else
super()
end
end
end
end
end
def create_setters_for(field_name, key)
%i(day month year).each do |type|
define_method(:"#{key}_#{type}=") do |value|
super(value)
send(:"set_#{field_name}") if send(:"#{field_name}_parts_present?")
end
end
define_method(:"set_#{field_name}") do
return nil unless send(:"valid_#{field_name}_date?")
date = ::Date.new(
send(:"#{key}_year_value"),
send(:"#{key}_month_value"),
send(:"#{key}_day_value")
)
send(:"#{field_name}=", date)
end
end
def validate_if_required(field_name, validate_if_option)
if validate_if_option
validate :"validate_#{field_name}_date", if: validate_if_option
else
validate :"validate_#{field_name}_date"
end
end
def create_date_values_methods_for(key)
%i(day month year).each do |type|
define_method(:"#{key}_#{type}_value") do
send(:"#{key}_#{type}").to_i
end
end
end
def create_validation_helper_methods_for(field_name, key)
create_valid_date_method_for(field_name, key)
create_parts_present_method_for(field_name, key)
create_validate_date_method_for(field_name)
end
def create_valid_date_method_for(field_name, key)
define_method(:"valid_#{field_name}_date?") do
::Date.valid_date?(
send(:"#{key}_year_value"),
send(:"#{key}_month_value"),
send(:"#{key}_day_value")
)
end
end
def create_parts_present_method_for(field_name, key)
define_method(:"#{field_name}_parts_present?") do
send(:"#{field_name}=", nil)
[
send(:"#{key}_year_value"),
send(:"#{key}_month_value"),
send(:"#{key}_day_value")
].all?(&:present?)
end
end
def create_validate_date_method_for(field_name)
define_method(:"validate_#{field_name}_date") do
return true if send(:"valid_#{field_name}_date?")
errors.add(field_name, 'is not a valid date')
end
end
end
end
revert to Types::Form::Int
require 'multi_part_date/version'
require 'active_support/concern'
require 'active_support'
require 'date'
require 'reform'
module MultiPartDate
extend ActiveSupport::Concern
class_methods do
def multi_part_date(field_name, options = {})
key = options[:as] || field_name
discard_options = options.slice(:discard_day, :discard_month, :discard_year)
on_options = options.slice(:on)
type = options[:type] || Types::Form::Date
create_methods_for(field_name, key, discard_options)
property :"#{key}_month", { type: Types::Form::Int, virtual: true }.merge(on_options)
property :"#{key}_day", { type: Types::Form::Int, virtual: true }.merge(on_options)
property :"#{key}_year", { type: Types::Form::Int, virtual: true }.merge(on_options)
property field_name, { type: type }.merge(on_options)
validate_if_required(field_name, options[:validate_if])
end
def create_methods_for(field_name, key, discard_options)
create_date_values_methods_for(key)
create_getters_for(field_name, key, discard_options)
create_setters_for(field_name, key)
create_validation_helper_methods_for(field_name, key)
end
def create_getters_for(field_name, key, discard_options)
%i(day month year).each do |type|
if discard_options[:"discard_#{type}"]
define_method(:"#{key}_#{type}") do
1
end
else
define_method(:"#{key}_#{type}") do
if send(field_name)
send(field_name).send(type) if send(field_name).respond_to?(type)
else
super()
end
end
end
end
end
def create_setters_for(field_name, key)
%i(day month year).each do |type|
define_method(:"#{key}_#{type}=") do |value|
super(value)
send(:"set_#{field_name}") if send(:"#{field_name}_parts_present?")
end
end
define_method(:"set_#{field_name}") do
return nil unless send(:"valid_#{field_name}_date?")
date = ::Date.new(
send(:"#{key}_year_value"),
send(:"#{key}_month_value"),
send(:"#{key}_day_value")
)
send(:"#{field_name}=", date)
end
end
def validate_if_required(field_name, validate_if_option)
if validate_if_option
validate :"validate_#{field_name}_date", if: validate_if_option
else
validate :"validate_#{field_name}_date"
end
end
def create_date_values_methods_for(key)
%i(day month year).each do |type|
define_method(:"#{key}_#{type}_value") do
send(:"#{key}_#{type}").to_i
end
end
end
def create_validation_helper_methods_for(field_name, key)
create_valid_date_method_for(field_name, key)
create_parts_present_method_for(field_name, key)
create_validate_date_method_for(field_name)
end
def create_valid_date_method_for(field_name, key)
define_method(:"valid_#{field_name}_date?") do
::Date.valid_date?(
send(:"#{key}_year_value"),
send(:"#{key}_month_value"),
send(:"#{key}_day_value")
)
end
end
def create_parts_present_method_for(field_name, key)
define_method(:"#{field_name}_parts_present?") do
send(:"#{field_name}=", nil)
[
send(:"#{key}_year_value"),
send(:"#{key}_month_value"),
send(:"#{key}_day_value")
].all?(&:present?)
end
end
def create_validate_date_method_for(field_name)
define_method(:"validate_#{field_name}_date") do
return true if send(:"valid_#{field_name}_date?")
errors.add(field_name, 'is not a valid date')
end
end
end
end
|
module MyGists
# Public: Encapsulates logic for getting public entities like tags.
#
# Examples
#
# MyGists::Public.tags(page)
class Public
# Public: Integer number of tags per page.
TAGS_PER_PAGE = 60
# Public: Gets, paginates and decorates public tags.
#
# page - The String or Integer of current page.
#
# Examples
#
# MyGists::Public.tags(page)
# # => [#<ActsAsTaggableOn::Tag id: 1, name: "rails" slug: "rails">]
#
# Returns an Array of paginated and decorated ActsAsTaggableOn::Tags tags.
def self.tags(page)
options = { page: page, per_page: TAGS_PER_PAGE }
ActsAsTaggableOn::Tag.public_tags.paginate(options).decorate
end
end
end
Tags per page bump
module MyGists
# Public: Encapsulates logic for getting public entities like tags.
#
# Examples
#
# MyGists::Public.tags(page)
class Public
# Public: Integer number of tags per page.
TAGS_PER_PAGE = 120
# Public: Gets, paginates and decorates public tags.
#
# page - The String or Integer of current page.
#
# Examples
#
# MyGists::Public.tags(page)
# # => [#<ActsAsTaggableOn::Tag id: 1, name: "rails" slug: "rails">]
#
# Returns an Array of paginated and decorated ActsAsTaggableOn::Tags tags.
def self.tags(page)
options = { page: page, per_page: TAGS_PER_PAGE }
ActsAsTaggableOn::Tag.public_tags.paginate(options).decorate
end
end
end
|
module EventMachine
module WebSocket
class Connection
attr_accessor :browser_id
def all_tabs
Nali::Clients.list
.select { |client| client.browser_id == self.browser_id }
.each{ |client| yield( client ) if block_given? }
end
def other_tabs
Nali::Clients.list
.select { |client| client != self and client.browser_id == self.browser_id }
.each{ |client| yield( client ) if block_given? }
end
def reset
@storage = {}
@watches = {}
self
end
def storage
@storage ||= {}
end
def []( name = nil )
name ? ( storage[ name ] or nil ) : storage
end
def []=( name, value )
storage[ name ] = value
end
def watches
@watches ||= {}
end
def watch( model )
watches[ model.class.name + model.id.to_s ] ||= 0
end
def unwatch( model )
watches.delete model.class.name + model.id.to_s
end
def watch?( model )
if watches[ model.class.name + model.id.to_s ] then true else false end
end
def watch_time( model )
watches[ model.class.name + model.id.to_s ] or 0
end
def watch_time_up( model )
watches[ model.class.name + model.id.to_s ] = model.updated_at.to_f
end
def send_json( hash )
send hash.to_json
self
end
def sync( *models )
models.flatten.compact.each do |model|
params, relations = model.get_sync_params( self )
if not params.empty? and ( watch_time( model ) < model.updated_at.to_f or model.destroyed? )
if model.destroyed? then unwatch( model ) else watch_time_up model end
relations.each { |relation| sync relation }
send_json action: :sync, params: params
end
end
self
end
def call_method( method, model, params = nil )
model = "#{ model.class.name }.#{ model.id }" if model.is_a?( ActiveRecord::Base )
send_json action: 'callMethod', model: model, method: method, params: params
self
end
def notice( method, params = nil )
call_method method, 'Notice', params
self
end
def info( params )
notice :info, params
self
end
def warning( params )
notice :warning, params
self
end
def error( params )
notice :error, params
self
end
def app_run( method, params = nil )
send_json action: 'appRun', method: method, params: params
self
end
end
end
end
not get params if model version is current
module EventMachine
module WebSocket
class Connection
attr_accessor :browser_id
def all_tabs
Nali::Clients.list
.select { |client| client.browser_id == self.browser_id }
.each{ |client| yield( client ) if block_given? }
end
def other_tabs
Nali::Clients.list
.select { |client| client != self and client.browser_id == self.browser_id }
.each{ |client| yield( client ) if block_given? }
end
def reset
@storage = {}
@watches = {}
self
end
def storage
@storage ||= {}
end
def []( name = nil )
name ? ( storage[ name ] or nil ) : storage
end
def []=( name, value )
storage[ name ] = value
end
def watches
@watches ||= {}
end
def watch( model )
watches[ model.class.name + model.id.to_s ] ||= 0
end
def unwatch( model )
watches.delete model.class.name + model.id.to_s
end
def watch?( model )
if watches[ model.class.name + model.id.to_s ] then true else false end
end
def watch_time( model )
watches[ model.class.name + model.id.to_s ] or 0
end
def watch_time_up( model )
watches[ model.class.name + model.id.to_s ] = model.updated_at.to_f
end
def send_json( hash )
send hash.to_json
self
end
def sync( *models )
models.flatten.compact.each do |model|
if watch_time( model ) < model.updated_at.to_f or model.destroyed?
params, relations = model.get_sync_params( self )
unless params.empty?
if model.destroyed? then unwatch( model ) else watch_time_up model end
relations.each { |relation| sync relation }
send_json action: :sync, params: params
end
end
end
self
end
def call_method( method, model, params = nil )
model = "#{ model.class.name }.#{ model.id }" if model.is_a?( ActiveRecord::Base )
send_json action: 'callMethod', model: model, method: method, params: params
self
end
def notice( method, params = nil )
call_method method, 'Notice', params
self
end
def info( params )
notice :info, params
self
end
def warning( params )
notice :warning, params
self
end
def error( params )
notice :error, params
self
end
def app_run( method, params = nil )
send_json action: 'appRun', method: method, params: params
self
end
end
end
end
|
require 'sqlite3'
module Orbacle
class Indexer
def initialize(db_adapter:)
@db_adapter = db_adapter
end
def call(project_root:)
project_root_path = Pathname.new(project_root)
@db = @db_adapter.new(project_root: project_root_path)
@db.reset
@db.create_table_constants
@db.create_table_metods
@db.create_table_klasslikes
@db.create_table_nodes
files = Dir.glob("#{project_root_path}/**/*.rb")
@parser = DataFlowGraph.new
files.each do |file_path|
begin
file_content = File.read(file_path)
puts "Processing #{file_path}"
@parser.process_file(file_content, file_path)
rescue Parser::SyntaxError
puts "Warning: Skipped #{file_path} because of syntax error"
end
end
puts "Typing..."
typing_result = TypingService.new.(@parser.result.graph, @parser.result.message_sends, @parser.result.tree)
puts "Saving..."
store_result(@parser.result, typing_result)
end
def store_result(result, typing_result)
puts "Saving constants..."
result.tree.constants.each do |c|
@db.add_constant(
scope: c.scope.absolute_str,
name: c.name,
type: type_of(c),
path: c.position.uri,
line: c.position.position_range.start.line)
end
puts "Saving methods..."
result.tree.metods.each do |m|
@db.add_metod(
name: m.name,
file: m.position.uri,
line: m.position.position_range.start.line)
end
puts "Saving klasslikes..."
klasslikes = result.tree.constants.select {|c| [GlobalTree::Klass, GlobalTree::Mod].include?(c.class)}
klasslikes.each do |kl|
@db.add_klasslike(
scope: kl.scope.absolute_str,
name: kl.name,
type: type_of(kl),
inheritance: type_of(kl) == "klass" ? kl.inheritance_ref&.full_name : nil,
nesting: nil)
end
puts "Saving typings..."
@db.bulk_add_nodes(typing_result)
end
def type_of(c)
case c
when GlobalTree::Klass then "klass"
when GlobalTree::Mod then "mod"
when GlobalTree::Constant then "other"
end
end
end
end
Sometimes, meta-method, has no location
require 'sqlite3'
module Orbacle
class Indexer
def initialize(db_adapter:)
@db_adapter = db_adapter
end
def call(project_root:)
project_root_path = Pathname.new(project_root)
@db = @db_adapter.new(project_root: project_root_path)
@db.reset
@db.create_table_constants
@db.create_table_metods
@db.create_table_klasslikes
@db.create_table_nodes
files = Dir.glob("#{project_root_path}/**/*.rb")
@parser = DataFlowGraph.new
files.each do |file_path|
begin
file_content = File.read(file_path)
puts "Processing #{file_path}"
@parser.process_file(file_content, file_path)
rescue Parser::SyntaxError
puts "Warning: Skipped #{file_path} because of syntax error"
end
end
puts "Typing..."
typing_result = TypingService.new.(@parser.result.graph, @parser.result.message_sends, @parser.result.tree)
puts "Saving..."
store_result(@parser.result, typing_result)
end
def store_result(result, typing_result)
puts "Saving constants..."
result.tree.constants.each do |c|
@db.add_constant(
scope: c.scope.absolute_str,
name: c.name,
type: type_of(c),
path: c.position.uri,
line: c.position.position_range.start.line)
end
puts "Saving methods..."
result.tree.metods.each do |m|
@db.add_metod(
name: m.name,
file: m.position&.uri,
line: m.position&.position_range&.start&.line)
end
puts "Saving klasslikes..."
klasslikes = result.tree.constants.select {|c| [GlobalTree::Klass, GlobalTree::Mod].include?(c.class)}
klasslikes.each do |kl|
@db.add_klasslike(
scope: kl.scope.absolute_str,
name: kl.name,
type: type_of(kl),
inheritance: type_of(kl) == "klass" ? kl.inheritance_ref&.full_name : nil,
nesting: nil)
end
puts "Saving typings..."
@db.bulk_add_nodes(typing_result)
end
def type_of(c)
case c
when GlobalTree::Klass then "klass"
when GlobalTree::Mod then "mod"
when GlobalTree::Constant then "other"
end
end
end
end
|
# Copyright (c) 2010 Tricycle I.T. Pty Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# (from http://www.opensource.org/licenses/mit-license.php)
class PackageManager < Toolbase
def install(*names)
names = names.flatten
options = extract_options!(names)
task "install packages #{names.join(', ')}", options do
packages_installed = shell_or_die('dpkg -l')
names.each do |name|
check "package #{name} is installed" do
packages_installed =~ /^ii +#{name} /
end
end
execute do
shell_or_die "apt-get install -y --assume-yes #{names.join(' ')}"
@packages_installed = nil
end
end
end
alias :default :install
end
fixed package manager to support --force-yes and debconf overrides
# Copyright (c) 2010 Tricycle I.T. Pty Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# (from http://www.opensource.org/licenses/mit-license.php)
class PackageManager < Toolbase
def install(*names)
names = names.flatten
options = extract_options!(names)
force = options.delete(:force) ? '--force-yes' : ''
debconf = options.delete(:debconf)
task "install packages #{names.join(', ')}", options do
packages_installed = shell_or_die('dpkg -l')
names.each do |name|
check "package #{name} is installed" do
packages_installed =~ /^ii +#{name} /
end
end
execute do
ENV['DEBCONF_DB_OVERRIDE'] = "File{#{debconf}}" if debconf
ENV['DEBIAN_FRONTEND'] = 'noninteractive'
shell_or_die "apt-get install --assume-yes #{force} #{names.join(' ')}"
@packages_installed = nil
end
end
end
alias :default :install
end
|
# encoding: UTF-8
# frozen_string_literal: true
require 'parallel'
require 'active_support'
require 'active_support/core_ext'
class Array
def destroy
batch = Parse::BatchOperation.new
each do |o|
next unless o.respond_to?(:destroy_request)
r = o.destroy_request
batch.add(r) unless r.nil?
end
batch.submit
batch
end
def save(merge: true, force: false)
batch = Parse::BatchOperation.new
objects = {}
each do |o|
next unless o.is_a?(Parse::Object)
objects[o.object_id] = o
batch.add o.change_requests(force)
end
if merge == false
batch.submit
return batch
end
#rebind updates
batch.submit do |request, response|
next unless request.tag.present? && response.present? && response.success?
o = objects[request.tag]
next unless o.is_a?(Parse::Object)
result = response.result
o.id = result['objectId'] if o.id.blank?
o.set_attributes!(result)
o.clear_changes!
end
batch
end #save!
end
module Parse
def self.batch(reqs = nil)
BatchOperation.new(reqs)
end
class BatchOperation
attr_accessor :requests, :responses
include Enumerable
def client
@client ||= Parse::Client.session
end
def initialize(reqs = nil)
@requests = []
@responses = []
reqs = [reqs] unless reqs.is_a?(Enumerable)
reqs.each { |r| add(r) } if reqs.is_a?(Enumerable)
end
def add(req)
if req.respond_to?(:change_requests)
requests = req.change_requests.select { |r| r.is_a?(Parse::Request) }
@requests += requests
elsif req.is_a?(Array)
requests = req.select { |r| r.is_a?(Parse::Request) }
@requests += requests
elsif req.is_a?(BatchOperation)
@requests += req.requests if req.is_a?(BatchOperation)
else
@requests.push(req) if req.is_a?(Parse::Request)
end
@requests
end
# make Batching interoperable with object methods. This allows adding a batch
# to another batch.
def change_requests
@requests
end
def each
return enum_for(:each) unless block_given?
@requests.each(&Proc.new)
self
end
def as_json(*args)
{ requests: requests }.as_json
end
def count
@requests.count
end
def clear!
@requests.clear
end
def success?
return false if @responses.empty?
@responses.compact.all?(&:success?)
end
def error?
return false if @responses.empty?
! success?
end
# Note that N requests sent in a batch will still count toward
# your request limit as N requests.
def submit(segment = 50)
@responses = []
@requests.uniq!(&:signature)
@requests.each_slice(segment) do |slice|
@responses << client.batch_request( BatchOperation.new(slice) )
#throttle
# sleep (slice.count.to_f / MAX_REQ_SEC.to_f )
end
@responses.flatten!
#puts "Requests: #{@requests.count} == Response: #{@responses.count}"
@requests.zip(@responses).each(&Proc.new) if block_given?
@responses
end
alias_method :save, :submit
end
module API
#object fetch methods
module Batch
def batch_request(batch_operations)
unless batch_operations.is_a?(Parse::BatchOperation)
batch_operations = Parse::BatchOperation.new batch_operations
end
response = request(:post, "batch", body: batch_operations.as_json)
response.success? && response.batch? ? response.batch_responses : response
end
end
end
end
Uses client instead of session for batch requests.
# encoding: UTF-8
# frozen_string_literal: true
require 'parallel'
require 'active_support'
require 'active_support/core_ext'
class Array
def destroy
batch = Parse::BatchOperation.new
each do |o|
next unless o.respond_to?(:destroy_request)
r = o.destroy_request
batch.add(r) unless r.nil?
end
batch.submit
batch
end
def save(merge: true, force: false)
batch = Parse::BatchOperation.new
objects = {}
each do |o|
next unless o.is_a?(Parse::Object)
objects[o.object_id] = o
batch.add o.change_requests(force)
end
if merge == false
batch.submit
return batch
end
#rebind updates
batch.submit do |request, response|
next unless request.tag.present? && response.present? && response.success?
o = objects[request.tag]
next unless o.is_a?(Parse::Object)
result = response.result
o.id = result['objectId'] if o.id.blank?
o.set_attributes!(result)
o.clear_changes!
end
batch
end #save!
end
module Parse
def self.batch(reqs = nil)
BatchOperation.new(reqs)
end
class BatchOperation
attr_accessor :requests, :responses
include Enumerable
def client
@client ||= Parse::Client.client
end
def initialize(reqs = nil)
@requests = []
@responses = []
reqs = [reqs] unless reqs.is_a?(Enumerable)
reqs.each { |r| add(r) } if reqs.is_a?(Enumerable)
end
def add(req)
if req.respond_to?(:change_requests)
requests = req.change_requests.select { |r| r.is_a?(Parse::Request) }
@requests += requests
elsif req.is_a?(Array)
requests = req.select { |r| r.is_a?(Parse::Request) }
@requests += requests
elsif req.is_a?(BatchOperation)
@requests += req.requests if req.is_a?(BatchOperation)
else
@requests.push(req) if req.is_a?(Parse::Request)
end
@requests
end
# make Batching interoperable with object methods. This allows adding a batch
# to another batch.
def change_requests
@requests
end
def each
return enum_for(:each) unless block_given?
@requests.each(&Proc.new)
self
end
def as_json(*args)
{ requests: requests }.as_json
end
def count
@requests.count
end
def clear!
@requests.clear
end
def success?
return false if @responses.empty?
@responses.compact.all?(&:success?)
end
def error?
return false if @responses.empty?
! success?
end
# Note that N requests sent in a batch will still count toward
# your request limit as N requests.
def submit(segment = 50)
@responses = []
@requests.uniq!(&:signature)
@requests.each_slice(segment) do |slice|
@responses << client.batch_request( BatchOperation.new(slice) )
#throttle
# sleep (slice.count.to_f / MAX_REQ_SEC.to_f )
end
@responses.flatten!
#puts "Requests: #{@requests.count} == Response: #{@responses.count}"
@requests.zip(@responses).each(&Proc.new) if block_given?
@responses
end
alias_method :save, :submit
end
module API
#object fetch methods
module Batch
def batch_request(batch_operations)
unless batch_operations.is_a?(Parse::BatchOperation)
batch_operations = Parse::BatchOperation.new batch_operations
end
response = request(:post, "batch", body: batch_operations.as_json)
response.success? && response.batch? ? response.batch_responses : response
end
end
end
end
|
module Parser
# Parse mail log file and create rows in db
class MailLog
def initialize(log_file)
@path = log_file
@postfix_id = postfix_pattern
@subject = subject_pattern
@message = message_pattern
@status = status_pattern
end
def parse_log
File.foreach(@path) do |line|
data = @postfix_id.match(line)
create_row data
subject = @subject.match(line)
update_subject subject
message = @message.match(line)
update_message message
recipient = @status.match(line)
create_recipient recipient
end
end
private
def postfix_pattern
%r{
postfix\/smtpd\[\d*\]:\s([aA-Z0-9]+):\s
client=#{Rails.application.secrets.log_mail_server}
}x
end
def subject_pattern
%r{
postfix\/cleanup\[\d*\]:\s([aA-Z0-9]+):\sinfo:\sheader\sSubject:\s
(.*)\sfrom\s#{Rails.application.secrets.log_mail_server}
}x
end
def message_pattern
%r{postfix\/cleanup\[\d*\]: ([aA-Z0-9]+): message-id=(.*)}
end
def status_pattern
%r{
postfix\/smtp\[\d*\]:\s([aA-Z0-9]+):\s
to=<(.*)>.*status=((.*)\s\(([0-9]{3})?(.*)\))
}x
end
def create_row(data)
return if data.nil? || data[1].nil?
Message.create(postfix_queue_id: data[1],
client: Rails.application.secrets.log_mail_server)
end
def update_subject(subject)
return if subject.nil?
row = Message.where(postfix_queue_id: subject[1]).first
row.update_attribute(:subject, subject[2]) unless row.nil?
end
def update_message(message)
return if message.nil?
row = Message.where(postfix_queue_id: message[1]).first
row.update_attribute(:postfix_message_id, message[2]) unless row.nil?
end
def create_recipient(recipient, delivered = false)
return if recipient.nil?
row = Message.where(postfix_queue_id: recipient[1]).first
delivered = true if recipient[5] == '250'
row.recipients
.create(address: recipient[2], status: recipient[4],
delivered: delivered, status_raw: recipient[3]) unless row.nil?
end
end
end
Fix for parsing old log file
module Parser
# Parse mail log file and create rows in db
class MailLog
def initialize(log_file)
@path = log_file
@postfix_id = postfix_pattern
@subject = subject_pattern
@message = message_pattern
@status = status_pattern
end
def parse_log
File.foreach(@path) do |line|
data = @postfix_id.match(line)
create_row data
subject = @subject.match(line)
update_subject subject
message = @message.match(line)
update_message message
recipient = @status.match(line)
create_recipient recipient
end
end
private
def postfix_pattern
%r{
postfix\/smtpd\[\d*\]:\s([aA-Z0-9]+):\s
client=#{Rails.application.secrets.log_mail_server}
}x
end
def subject_pattern
%r{
postfix\/cleanup\[\d*\]:\s([aA-Z0-9]+):\sinfo:\sheader\sSubject:\s
(.*)\sfrom\s#{Rails.application.secrets.log_mail_server}
}x
end
def message_pattern
%r{postfix\/cleanup\[\d*\]: ([aA-Z0-9]+): message-id=(.*)}
end
def status_pattern
%r{
postfix\/smtp\[\d*\]:\s([aA-Z0-9]+):\s
to=<(.*)>.*status=((.*)\s\(([0-9]{3})?(.*)\))
}x
end
def create_row(data)
return if data.nil? || data[1].nil?
Message.where(postfix_queue_id: data[1],
client: Rails.application.secrets.log_mail_server)
.first_or_create
end
def update_subject(subject)
return if subject.nil?
row = Message.where(postfix_queue_id: subject[1]).first
row.update_attribute(:subject, subject[2]) unless row.nil?
end
def update_message(message)
return if message.nil?
row = Message.where(postfix_queue_id: message[1]).first
row.update_attribute(:postfix_message_id, message[2]) unless row.nil?
end
def create_recipient(recipient, delivered = false)
return if recipient.nil?
row = Message.where(postfix_queue_id: recipient[1]).first
delivered = true if recipient[5] == '250'
row.recipients
.where(address: recipient[2], status: recipient[4],
delivered: delivered, status_raw: recipient[3])
.first_or_create unless row.nil?
end
end
end
|
# Copyright 2012 Xtreme Labs
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'digest/sha1'
require 'json'
require 'openssl'
require 'zip/zip'
require 'zip/zipfilesystem'
module Passbook
# Pkpass is the class responsible for managing the contect of a pkpass and also signing the package
class Pkpass
attr_accessor :files, :translations, :json, :pass_type_id, :serial_number, :config
def initialize(pass_type_id, serial_number)
self.pass_type_id = pass_type_id
self.serial_number = serial_number
self.translations = Hash.new
raise(ArgumentError, "Don't forget to run the generator to create the initializer") unless Config.instance.pass_config
self.config = Config.instance.pass_config[self.pass_type_id]
raise(ArgumentError, "Could not find configuration for #{self.pass_type_id}") unless self.config
if self.config.include? :files
self.files = self.config['files'].dup
else
self.files = Config.instance.load_files self.config['template_path']
end
if self.files.include? 'pass.json'
self.json = JSON.parse(self.files['pass.json'])
else
self.json = {}
puts "Warning: your template_path does not contain pass.json"
end
end
# Add a file to your pkpass
#
# example:
# pass.add_file "stripe.png", image_content
#
# == Parameters:
# filename::
# A String for the name of the file. It can contain a folder, so it is really a relative path within the pkpass
# content::
# Binary content for what will be inside that file
def add_file filename, content
self.files[filename] = content
end
def add_translation_string source, destination, language
self.translations[language] = Hash.new unless self.translations.include?(language)
self.translations[language][source] = destination
end
def package
#TODO: write a library that checks that all the right files are included in the package
#those requirements are going to be different depending on pass_type_id
self.write_json
self.write_translation_strings
self.generate_json_manifest
self.sign_manifest
self.compress_pass_file
end
# @private
def write_json
self.files['pass.json'] = JSON.pretty_generate(self.json)
end
# @private
def write_translation_strings
self.translations.each do |language, trans|
self.files["#{language}.lproj/pass.strings"] ||= ""
trans.each do |key, value|
#TODO: escape key and value
self.files["#{language}.lproj/pass.strings"] << "\n\"#{key}\" = \"#{value}\";"
end
end
end
# @private
def generate_json_manifest
manifest = {}
self.files.each do |filename, content|
manifest[filename] = Digest::SHA1.hexdigest(content)
end
self.files['manifest.json'] = JSON.pretty_generate(manifest)
end
# @private
def sign_manifest
flag = OpenSSL::PKCS7::BINARY|OpenSSL::PKCS7::DETACHED
signed = OpenSSL::PKCS7::sign(config['p12_certificate'].certificate, config['p12_certificate'].key, self.files['manifest.json'], [Config.instance.wwdr_certificate], flag)
self.files['signature'] = signed.to_der.force_encoding('UTF-8')
end
# @private
def compress_pass_file
stringio = Zip::ZipOutputStream::write_buffer do |z|
self.files.each do |filename, content|
z.put_next_entry filename
z.print content
end
end
stringio.set_encoding "binary"
stringio.rewind
stringio
# stringio.sysread
end
end
end
Update pkpass.rb
# Copyright 2012 Xtreme Labs
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'digest/sha1'
require 'json'
require 'openssl'
require 'zip'
require 'zip/zipfilesystem'
module Passbook
# Pkpass is the class responsible for managing the contect of a pkpass and also signing the package
class Pkpass
attr_accessor :files, :translations, :json, :pass_type_id, :serial_number, :config
def initialize(pass_type_id, serial_number)
self.pass_type_id = pass_type_id
self.serial_number = serial_number
self.translations = Hash.new
raise(ArgumentError, "Don't forget to run the generator to create the initializer") unless Config.instance.pass_config
self.config = Config.instance.pass_config[self.pass_type_id]
raise(ArgumentError, "Could not find configuration for #{self.pass_type_id}") unless self.config
if self.config.include? :files
self.files = self.config['files'].dup
else
self.files = Config.instance.load_files self.config['template_path']
end
if self.files.include? 'pass.json'
self.json = JSON.parse(self.files['pass.json'])
else
self.json = {}
puts "Warning: your template_path does not contain pass.json"
end
end
# Add a file to your pkpass
#
# example:
# pass.add_file "stripe.png", image_content
#
# == Parameters:
# filename::
# A String for the name of the file. It can contain a folder, so it is really a relative path within the pkpass
# content::
# Binary content for what will be inside that file
def add_file filename, content
self.files[filename] = content
end
def add_translation_string source, destination, language
self.translations[language] = Hash.new unless self.translations.include?(language)
self.translations[language][source] = destination
end
def package
#TODO: write a library that checks that all the right files are included in the package
#those requirements are going to be different depending on pass_type_id
self.write_json
self.write_translation_strings
self.generate_json_manifest
self.sign_manifest
self.compress_pass_file
end
# @private
def write_json
self.files['pass.json'] = JSON.pretty_generate(self.json)
end
# @private
def write_translation_strings
self.translations.each do |language, trans|
self.files["#{language}.lproj/pass.strings"] ||= ""
trans.each do |key, value|
#TODO: escape key and value
self.files["#{language}.lproj/pass.strings"] << "\n\"#{key}\" = \"#{value}\";"
end
end
end
# @private
def generate_json_manifest
manifest = {}
self.files.each do |filename, content|
manifest[filename] = Digest::SHA1.hexdigest(content)
end
self.files['manifest.json'] = JSON.pretty_generate(manifest)
end
# @private
def sign_manifest
flag = OpenSSL::PKCS7::BINARY|OpenSSL::PKCS7::DETACHED
signed = OpenSSL::PKCS7::sign(config['p12_certificate'].certificate, config['p12_certificate'].key, self.files['manifest.json'], [Config.instance.wwdr_certificate], flag)
self.files['signature'] = signed.to_der.force_encoding('UTF-8')
end
# @private
def compress_pass_file
stringio = Zip::ZipOutputStream::write_buffer do |z|
self.files.each do |filename, content|
z.put_next_entry filename
z.print content
end
end
stringio.set_encoding "binary"
stringio.rewind
stringio
# stringio.sysread
end
end
end
|
require "securerandom"
require "uri_config"
require "payments/client/version"
require "payments/client/config"
require "payments/client/facade"
require "payments/client/gateway"
require "payments/client/operations"
require "payments/client/railtie" if defined?(Rails)
module Payments
module Client
ENDPOINT = ENV.fetch("PAYMENTS_API_URL") do
"https://payments.everydayhero.io/api"
end
GATEWAY_ALIASES = {
http: :excon,
}
def self.request_id=(value)
@request_id = value
end
def self.request_id
@request_id ||= SecureRandom.uuid
end
# @example
# client = Payments::Client.v1
# client = Payments::Client.v1(:rack, Payments::Application)
#
# client.get_merchant(merchant_id)
def self.v1(name, *gateway_options)
name = GATEWAY_ALIASES[name] || name
gateway = Gateway.new(name, Config.new(ENDPOINT), *gateway_options)
Facade.new(gateway)
end
end
end
Default to http gateway
require "securerandom"
require "uri_config"
require "payments/client/version"
require "payments/client/config"
require "payments/client/facade"
require "payments/client/gateway"
require "payments/client/operations"
require "payments/client/railtie" if defined?(Rails)
module Payments
module Client
ENDPOINT = ENV.fetch("PAYMENTS_API_URL") do
"https://payments.everydayhero.io/api"
end
GATEWAY_ALIASES = {
http: :excon,
}
def self.request_id=(value)
@request_id = value
end
def self.request_id
@request_id ||= SecureRandom.uuid
end
# @example
# client = Payments::Client.v1
# client = Payments::Client.v1(:rack, Payments::Application)
#
# client.get_merchant(merchant_id)
def self.v1(name = :http, *gateway_options)
name = GATEWAY_ALIASES[name] || name
gateway = Gateway.new(name, Config.new(ENDPOINT), *gateway_options)
Facade.new(gateway)
end
end
end
|
module Peddler
VERSION = '0.19.0'
end
Release 1.0.0
module Peddler
VERSION = '1.0.0'
end
|
# All countries, ordered by country code.
#
# Definitions are in the format:
# NDC >> National | NDC >> National | # ...
#
# As soon as a NDC matches, it goes on to the National part. Then breaks off.
# If the NDC does not match, it go on to the next (|, or "or") NDC.
#
# Available matching/splitting methods:
# * none: Does not have a national destination code, e.g. Denmark, Iceland.
# * one_of: Matches one of the following numbers. Splits if it does.
# * match: Try to match the regex, and if it matches, splits it off.
# * fixed: Always splits off a fixed length ndc. (Always use last in a | chain as a catchall) Offers a "zero" formatting option (default true).
#
# For the national number part, there are two:
# * split: Use this number group splitting.
# * matched_split: Give a hash of regex => format array, with a :fallback => format option.
# (See Norway how it looks.)
#
# The third parameter to country are validations.
# Currently, there is one method available:
# * invalid_ndcs: Give a regexp or string to describe invalid ndc(s).
#
# Note: The ones that are commented are defined in their special files.
#
Phony.define do
# Reserved.
#
country '0', fixed(1) >> split(10) # Reserved.
# USA, Canada, etc.
#
country '1',
fixed(3, :zero => false) >> split(3,4),
invalid_ndcs('911') # /911/ would also work.
# Kazakhstan (Republic of) & Russian Federation.
# also Abhasia and South Osetia autonomous regions / recognized by some states as independent countries
#country '7', fixed(3) >> split(3,2,2)
# see special file
# Egypt.
#
country '20', one_of('800') >> split(7) | # Egypt
one_of('2', '3') >> split(8) | # Cairo/Giza, Alexandria
fixed(2) >> split(8)
# :mobile? => /^1[01246-9]\d+$/, :service? => /^800\d+$/
# South Africa.
#
country '27', fixed(2) >> split(3,4)
# Greece.
#
country '30', match(/^(2[3-8]?1|69[0345789]|800)\d+$/) >> split(8) | # Geo/Mobile
fixed(4) >> split(6) # 3-digit NDCs
# country '31' # Netherlands, see special file.
# Belgium.
#
country '32', match(/^(70|800|90\d)\d+$/) >> split(3,3) | # Service
match(/^(4[789]\d)\d{6}$/) >> split(6) | # Mobile
one_of('2','3','4','9') >> split(3,5) | # Short NDCs
fixed(2) >> split(3,5) # 2-digit NDCs
# France.
#
country '33', fixed(1) >> split(2,2,2,2) # :service? => /^8.*$/, :mobile? => /^[67].*$/
# Spain.
#
country '34',
fixed(2) >> split(3,4)
# Hungary.
#
# TODO Mobile.
#
country '36',
one_of('104','105','107','112') >> split(3,3) | # Service
one_of('1') >> split(3,4) | # Budapest
fixed(2) >> split(3,4) # 2-digit NDCs
# country '39' # Italy, see special file.
# Romania.
#
country '40',
match(/^(112|800|90[036])\d+$/) >> split(3,3) | # Service
match(/^(7[1-8])\d+$/) >> split(3,4) | # Mobile
one_of('21', '31') >> split(3,4) | # Bucureşti
fixed(3) >> split(3,4) # 3-digit NDCs
# Switzerland.
#
country '41',
match(/^(8(00|4[0248]))\d+$/) >> split(3,3) |
fixed(2) >> split(3,2,2)
# country '43' # Austria, see special file.
# country '44' # UnitedKingdom, see special file.
# Denmark.
#
country '45',
none >> split(2,2,2,2)
# country '46' # Sweden, see special file.
# Norway.
#
country '47',
none >> matched_split(/^[1].*$/ => [3],
/^[489].*$/ => [3,2,3],
:fallback => [2,2,2,2])
# Poland (Republic of)
# Although the NDCs are 2 digits, the representation is 3 digits.
# Note: http://wapedia.mobi/en/Telephone_numbers_in_Poland, mobile not yet correct
#
country '48',
match(/^(5[013]\d|6[069]\d|7[02389]\d|80[01]|88\d)/) >> split(3,3) |
fixed(2) >> split(3,2,2)
# country '49' # Germany, see special file.
# Peru.
#
country '51',
one_of('103', '105') >> split(3,3) | # Service.
one_of('1', '9') >> split(4,4) | # Lima and mobile.
fixed(2) >> split(4,4) # 2-digit NDCs.
# Mexico.
#
country '52',
match(/^(0\d{2})\d+$/) >> split(2,2,2,2) |
match(/^(33|55|81)\d+$/) >> split(2,2,2,2) |
match(/^(\d{3})\d+$/) >> split(3,2,2) # catchall.
# Cuba.
#
country '53',
match(/^(5\d{3})\d+$/) >> split(4) | # Mobile
match(/^(7|2[123]|4[1-8]|3[1-3])/) >> split(7) | # Short NDCs
fixed(3) >> split(7) # 3-digit NDCs
# Argentine Republic.
#
country '54',
one_of('11', '911') >> split(4,4) | # Fixed & Mobile
match(/^(22[0137]|237|26[14]|29[179]|34[1235]|35[138]|38[1578])/) >> split(3,4) | # Fixed
match(/^(922[0137]|9237|926[14]|929[179]|934[1235]|935[138]|938[1578])/) >> split(3,4) | # Mobile
match(/^(9\d{4})/) >> split(2,4) | # Mobile
fixed(4) >> split(2,4) # Fixed
# Brazil (Federative Republic of).
# http://en.wikipedia.org/wiki/Telephone_numbers_in_Brazil
#
brazilian_service = /^(1(00|28|9[0-4789]))\d+$/
country '55',
match(brazilian_service) >> split(3,3) | # Service.
fixed(2) >> split(4,4) # NDCs
# Chile.
#
country '56',
match(/^(13[0-79]|14[79])\d+$/) >> split(3,3) | # Service
one_of('2', '9') >> split(8) | # Santiago, Mobile
fixed(2) >> split(8) # 2-digit NDCs
# TODO Colombia.
#
country '57', todo
# Venezuela (Bolivarian Republic of)
#
country '58',
fixed(3) >> split(7)
# country '60' # Malaysia, see special file.
# Australia.
#
country '61',
match(/^(4\d\d)\d+$/) >> split(3,3) | # Mobile
fixed(1) >> split(4,4) # Rest
country '62', todo # TODO Indonesia (Republic of)
country '63', todo # TODO Philippines (Republic of the)
# New Zealand.
#
# TODO Mobile?
#
country '64',
fixed(1) >> split(3,4)
# Singapore (Republic of).
#
country '65',
none >> split(4,4) # TODO Short Codes.
# Thailand.
#
country '66',
one_of('2') >> split(3,4) | # Bangkok
fixed(2) >> split(3,3) # Rest
country '81', todo # TODO Japan
# country '82' # SouthKorea, see special file.
country '84', # Viet Nam (Socialist Republic of)
one_of('4', '8') >> split(7) |
match(/^(2[025679]|3[0136789]|5[23456789]|6[01234678]|7[02345679]|9[0-8])\d/) >> split(6) |
fixed(3) >> split(5)
# country '86' # China, see special file.
# Turkey.
#
country '90',
fixed(3) >> split(3,4) # Wiki says 7, but the examples say 3, 4.
country '91', todo # TODO India (Republic of)
country '92', todo # TODO Pakistan (Islamic Republic of), http://en.wikipedia.org/wiki/Telephone_numbers_in_Pakistan, NDC 2-5
# Afghanistan.
#
# From http://www.wtng.info/wtng-93-af.html
#
country '93', fixed(2) >> split(7) # Note: the document says 6, but the examples use 7.
country '94', fixed(2) >> split(3,2,2) # TODO Sri Lanka (Democratic Socialist Republic of)
country '95', fixed(2) >> split(3,2,2) # TODO Myanmar (Union of)
country '98', fixed(2) >> split(3,2,2) # TODO Iran (Islamic Republic of)
country '210', todo # -
country '211', todo # South Sudan
country '212', todo # Morocco
country '213', fixed(2) >> split(3,4) # Algeria
country '214', todo # -
country '215', todo # -
country '216', fixed(1) >> split(3,4) # Tunisia
country '217', todo # -
country '218', todo # Lybia
country '219', todo # -
country '220', todo # Gambia
country '221', todo # Senegal
country '222', todo # Mauritania
country '223', todo # Mali
country '224', todo # Guinea
country '225', todo # Côte d'Ivoire
country '226', todo # Burkina Faso
country '227', todo # Niger
country '228', todo # Togolese Republic
country '229', todo # Benin
country '230', todo # Mauritius
country '231', todo # Liberia
country '232', todo # Sierra Leone
# Ghana
#
# From http://www.itu.int/oth/T0202000052/en
#
country '233', fixed(2) >> split(3,4)
# Nigeria
# Wikipedia says 3 4 split, many local number with no splitting
country '234',
one_of('1', '2', '9') >> split(3,4) | # Lagos, Ibadan and Abuja
match(/^(702\d])\d+$/) >> split(3,4) | # Mobile
match(/^(70[3-9])\d+$/) >> split(3,4) | # Mobile
match(/^(8[0,1]\d])\d+$/) >> split(3,4) | # Mobile
fixed(2) >> split(3,4) # 2-digit NDC
country '235', todo # Chad
country '236', todo # Central African Republic
country '237', todo # Cameroon
country '238', todo # Cape Verde
country '239', todo # Sao Tome and Principe
country '240', todo # Equatorial Guinea
country '241', todo # Gabonese Republic
country '242', todo # Congo
country '243', todo # Democratic Republic of the Congo
country '244', todo # Angola
country '245', todo # Guinea-Bissau
country '246', todo # Diego Garcia
country '247', todo # Ascension
country '248', todo # Seychelles
country '249', todo # Sudan
country '250', todo # Rwanda
country '251', todo # Ethiopia
country '252', todo # Somali Democratic Republic
country '253', todo # Djibouti
country '254', fixed(2) >> split(7) # Kenya
# Tanzania.
#
country '255',
match(/^([89]\d\d)/) >> split(3,3) | # Special/Premium.
one_of('112', '118') >> split(3,3) | # Short Codes.
fixed(2) >> split(3,4) # Geographic.
# Uganda.
#
country '256',
match(/^(46[45]|4[78]\d)/) >> split(6) | # Geo 1.
fixed(2) >> split(7) # Geo 2.
country '257', todo # Burundi
country '258', todo # Mozambique
country '259', todo # -
country '260', todo # Zambia
country '261', todo # Madagascar
country '262', todo # Reunion / Mayotte (new)
country '263', todo # Zimbabwe
country '264', todo # Namibia
country '265', todo # Malawi
country '266', todo # Lesotho
country '267', todo # Botswana
country '268', todo # Swaziland
country '269', todo # Comoros
country '280', todo # -
country '281', todo # -
country '282', todo # -
country '283', todo # -
country '284', todo # -
country '285', todo # -
country '286', todo # -
country '287', todo # -
country '288', todo # -
country '289', todo # -
country '290', todo # Saint Helena
country '291', todo # Eritrea
country '292', todo # -
country '293', todo # -
country '294', todo # -
country '295', todo # -
country '296', todo # -
country '297', todo # Aruba
country '298', todo # Faroe Islands
country '299', todo # Greenland
country '350', todo # Gibraltar
# Portugal.
#
country '351',
one_of('700', '800') >> split(3,3) | # Service.
match(/^(9\d)\d+$/) >> split(3,4) | # Mobile.
one_of('21', '22') >> split(3,4) | # Lisboa & Porto
fixed(3) >> split(3,4) # 3-digit NDCs
# Luxembourg
#
country '352',
one_of('4') >> split(2,2,2) | # Luxembourg City
match(/^(2[4|6|7]\d{2})$/) >> split(2,2,2) | # 4-digit NDC
match(/^(6\d1)\d+$/) >> split(3,3) | # mobile
match(/^(60\d{2})\d{8}$/) >> split(2,2,2,2) | # mobile machine to machine
match(/^([2-9]\d)/) >> split(2,2,2) # 2-digit NDC
# country '353' # Republic of Ireland, see special file.
country '354', none >> split(3,4) # Iceland
country '355', todo # Albania
country '356', todo # Malta
country '357', todo # Cyprus
# Finland.
#
country '358',
match(/^([6-8]00)\d+$/) >> split(3,3) | # Service
match(/^(4\d|50)\d+$/) >> split(3,2,2) | # Mobile
one_of('2','3','5','6','8','9') >> split(3,3) | # Short NDCs
fixed(2) >> split(3,3) # 2-digit NDCs
# Bulgaria.
#
country '359',
fixed(2) >> split(3,2,2) # Bulgaria
# Lithuania.
#
country '370',
one_of('700', '800') >> split(2,3) | # Service
match(/^(6\d\d)\d+$/) >> split(2,3) | # Mobile
one_of('5') >> split(3,2,2) | # Vilnius
one_of('37','41') >> split(2,2,2) | # Kaunas, Šiauliai
fixed(3) >> split(1,2,2) # 3-digit NDCs.
country '371', todo # Latvia
country '372', todo # Estonia
country '373', todo # Moldova
country '374', todo # Armenia
country '375', todo # Belarus
country '376', todo # Andorra
country '377', todo # Monaco
country '378', todo # San Marino
country '379', todo # Vatican City State
country '380', todo # Ukraine
country '381', todo # Serbia and Montenegro
country '382', todo # -
country '383', todo # -
country '384', todo # -
# Croatia.
#
country '385', one_of('1') >> split(3,5) | # Zagreb
fixed(2) >> split(3,5) # 2-digit NDCs
country '386', fixed(2) >> split(3,2,2) # Slovenia
country '387', fixed(2) >> split(3,2,2) # Bosnia and Herzegovina
country '388', fixed(2) >> split(3,2,2) # Group of countries, shared code
country '389', fixed(2) >> split(3,2,2) # The Former Yugoslav Republic of Macedonia
country '420', fixed(3) >> split(3,3) # Czech Republic
# Slovak Republic.
#
country '421', match(/^(9\d\d).+$/) >> split(6) | # Mobile
one_of('2') >> split(8) | # Bratislava
fixed(2) >> split(7) # 2-digit NDCs
country '422', todo # Spare code
country '423', none >> split(3,2,2) # Liechtenstein (Principality of)
country '424', todo # -
country '425', todo # -
country '426', todo # -
country '427', todo # -
country '428', todo # -
country '429', todo # -
country '500', todo # Falkland Islands (Malvinas)
country '501', todo # Belize
country '502', todo # Guatemala (Republic of)
country '503', todo # El Salvador (Republic of)
country '504', todo # Honduras (Republic of)
country '505', todo # Nicaragua
country '506', todo # Costa Rica
country '507', todo # Panama (Republic of)
country '508', todo # Saint Pierre and Miquelon (Collectivité territoriale de la République française)
country '509', todo # Haiti (Republic of)
country '590', todo # Guadeloupe (French Department of)
country '591', todo # Bolivia (Republic of)
country '592', todo # Guyana
country '593', todo # Ecuador
country '594', todo # French Guiana (French Department of)
country '595', todo # Paraguay (Republic of)
country '596', todo # Martinique (French Department of)
country '597', todo # Suriname (Republic of)
country '598', todo # Uruguay (Eastern Republic of)
country '599', todo # Netherlands Antilles
country '670', todo # Democratic Republic of Timor-Leste
country '671', todo # Spare code
country '672', todo # Australian External Territories
country '673', todo # Brunei Darussalam
country '674', todo # Nauru (Republic of)
country '675', todo # Papua New Guinea
country '676', todo # Tonga (Kingdom of)
country '677', todo # Solomon Islands
country '678', todo # Vanuatu (Republic of)
country '679', todo # Fiji (Republic of)
country '680', todo # Palau (Republic of)
country '681', todo # Wallis and Futuna (Territoire français d'outre-mer)
country '682', todo # Cook Islands
country '683', todo # Niue
country '684', todo # -
country '685', todo # Samoa (Independent State of)
country '686', todo # Kiribati (Republic of)
country '687', todo # New Caledonia (Territoire français d'outre-mer)
country '688', todo # Tuvalu
country '689', todo # French Polynesia (Territoire français d'outre-mer)
country '690', todo # Tokelau
country '691', todo # Micronesia (Federated States of)
country '692', todo # Marshall Islands (Republic of the)
country '693', todo # -
country '694', todo # -
country '695', todo # -
country '696', todo # -
country '697', todo # -
country '698', todo # -
country '699', todo # -
country '800', todo # International Freephone Service
country '801', todo # -
country '802', todo # -
country '803', todo # -
country '804', todo # -
country '805', todo # -
country '806', todo # -
country '807', todo # -
country '808', todo # International Shared Cost Service (ISCS)
country '809', todo # -
country '830', todo # -
country '831', todo # -
country '832', todo # -
country '833', todo # -
country '834', todo # -
country '835', todo # -
country '836', todo # -
country '837', todo # -
country '838', todo # -
country '839', todo # -
country '850', todo # Democratic People's Republic of Korea
country '851', todo # Spare code
country '852', todo # Hong Kong, China
country '853', todo # Macao, China
country '854', todo # Spare code
country '855', todo # Cambodia (Kingdom of)
country '856', todo # Lao People's Democratic Republic
country '857', todo # Spare code
country '858', todo # Spare code
country '859', todo # Spare code
country '870', todo # Inmarsat SNAC
country '871', todo # Inmarsat (Atlantic Ocean-East)
country '872', todo # Inmarsat (Pacific Ocean)
country '873', todo # Inmarsat (Indian Ocean)
country '874', todo # Inmarsat (Atlantic Ocean-West)
country '875', todo # Reserved - Maritime Mobile Service Applications
country '876', todo # Reserved - Maritime Mobile Service Applications
country '877', todo # Reserved - Maritime Mobile Service Applications
country '878', todo # Universal Personal Telecommunication Service (UPT)
country '879', todo # Reserved for national non-commercial purposes
country '880', todo # Bangladesh (People's Republic of)
country '881', todo # International Mobile, shared code
country '882', todo # International Networks, shared code
country '883', todo # -
country '884', todo # -
country '885', todo # -
country '886', todo # Reserved
country '887', todo # -
country '888', todo # Reserved for future global service
country '889', todo # -
country '890', todo # -
country '891', todo # -
country '892', todo # -
country '893', todo # -
country '894', todo # -
country '895', todo # -
country '896', todo # -
country '897', todo # -
country '898', todo # -
country '899', todo # -
country '960', todo # Maldives (Republic of)
country '961', todo # Lebanon
country '962', todo # Jordan (Hashemite Kingdom of)
country '963', todo # Syrian Arab Republic
country '964', todo # Iraq (Republic of)
country '965', todo # Kuwait (State of)
country '966', todo # Saudi Arabia (Kingdom of)
country '967', todo # Yemen (Republic of)
country '968', todo # Oman (Sultanate of)
country '969', todo # Reserved - reservation currently under investigation
country '970', todo # Reserved
country '971', todo # United Arab Emirates
country '972', todo # Israel (State of)
country '973', todo # Bahrain (Kingdom of)
country '974', todo # Qatar (State of)
country '975', todo # Bhutan (Kingdom of)
country '976', todo # Mongolia
country '977', todo # Nepal
country '978', todo # -
country '979', todo # International Premium Rate Service (IPRS)
country '990', todo # Spare code
country '991', todo # Trial of a proposed new international telecommunication public correspondence service, shared code
country '992', todo # Tajikistan (Republic of)
country '993', todo # Turkmenistan
country '994', todo # Azerbaijani Republic
country '995', todo # Georgia
country '996', todo # Kyrgyz Republic
country '997', todo # Spare code
country '998', todo # Uzbekistan (Republic of)
country '999', todo # Reserved for possible future use within the Telecommunications for Disaster Relief (TDR) concept
end
+ More mobile number NDC for LU
# All countries, ordered by country code.
#
# Definitions are in the format:
# NDC >> National | NDC >> National | # ...
#
# As soon as a NDC matches, it goes on to the National part. Then breaks off.
# If the NDC does not match, it go on to the next (|, or "or") NDC.
#
# Available matching/splitting methods:
# * none: Does not have a national destination code, e.g. Denmark, Iceland.
# * one_of: Matches one of the following numbers. Splits if it does.
# * match: Try to match the regex, and if it matches, splits it off.
# * fixed: Always splits off a fixed length ndc. (Always use last in a | chain as a catchall) Offers a "zero" formatting option (default true).
#
# For the national number part, there are two:
# * split: Use this number group splitting.
# * matched_split: Give a hash of regex => format array, with a :fallback => format option.
# (See Norway how it looks.)
#
# The third parameter to country are validations.
# Currently, there is one method available:
# * invalid_ndcs: Give a regexp or string to describe invalid ndc(s).
#
# Note: The ones that are commented are defined in their special files.
#
Phony.define do
# Reserved.
#
country '0', fixed(1) >> split(10) # Reserved.
# USA, Canada, etc.
#
country '1',
fixed(3, :zero => false) >> split(3,4),
invalid_ndcs('911') # /911/ would also work.
# Kazakhstan (Republic of) & Russian Federation.
# also Abhasia and South Osetia autonomous regions / recognized by some states as independent countries
#country '7', fixed(3) >> split(3,2,2)
# see special file
# Egypt.
#
country '20', one_of('800') >> split(7) | # Egypt
one_of('2', '3') >> split(8) | # Cairo/Giza, Alexandria
fixed(2) >> split(8)
# :mobile? => /^1[01246-9]\d+$/, :service? => /^800\d+$/
# South Africa.
#
country '27', fixed(2) >> split(3,4)
# Greece.
#
country '30', match(/^(2[3-8]?1|69[0345789]|800)\d+$/) >> split(8) | # Geo/Mobile
fixed(4) >> split(6) # 3-digit NDCs
# country '31' # Netherlands, see special file.
# Belgium.
#
country '32', match(/^(70|800|90\d)\d+$/) >> split(3,3) | # Service
match(/^(4[789]\d)\d{6}$/) >> split(6) | # Mobile
one_of('2','3','4','9') >> split(3,5) | # Short NDCs
fixed(2) >> split(3,5) # 2-digit NDCs
# France.
#
country '33', fixed(1) >> split(2,2,2,2) # :service? => /^8.*$/, :mobile? => /^[67].*$/
# Spain.
#
country '34',
fixed(2) >> split(3,4)
# Hungary.
#
# TODO Mobile.
#
country '36',
one_of('104','105','107','112') >> split(3,3) | # Service
one_of('1') >> split(3,4) | # Budapest
fixed(2) >> split(3,4) # 2-digit NDCs
# country '39' # Italy, see special file.
# Romania.
#
country '40',
match(/^(112|800|90[036])\d+$/) >> split(3,3) | # Service
match(/^(7[1-8])\d+$/) >> split(3,4) | # Mobile
one_of('21', '31') >> split(3,4) | # Bucureşti
fixed(3) >> split(3,4) # 3-digit NDCs
# Switzerland.
#
country '41',
match(/^(8(00|4[0248]))\d+$/) >> split(3,3) |
fixed(2) >> split(3,2,2)
# country '43' # Austria, see special file.
# country '44' # UnitedKingdom, see special file.
# Denmark.
#
country '45',
none >> split(2,2,2,2)
# country '46' # Sweden, see special file.
# Norway.
#
country '47',
none >> matched_split(/^[1].*$/ => [3],
/^[489].*$/ => [3,2,3],
:fallback => [2,2,2,2])
# Poland (Republic of)
# Although the NDCs are 2 digits, the representation is 3 digits.
# Note: http://wapedia.mobi/en/Telephone_numbers_in_Poland, mobile not yet correct
#
country '48',
match(/^(5[013]\d|6[069]\d|7[02389]\d|80[01]|88\d)/) >> split(3,3) |
fixed(2) >> split(3,2,2)
# country '49' # Germany, see special file.
# Peru.
#
country '51',
one_of('103', '105') >> split(3,3) | # Service.
one_of('1', '9') >> split(4,4) | # Lima and mobile.
fixed(2) >> split(4,4) # 2-digit NDCs.
# Mexico.
#
country '52',
match(/^(0\d{2})\d+$/) >> split(2,2,2,2) |
match(/^(33|55|81)\d+$/) >> split(2,2,2,2) |
match(/^(\d{3})\d+$/) >> split(3,2,2) # catchall.
# Cuba.
#
country '53',
match(/^(5\d{3})\d+$/) >> split(4) | # Mobile
match(/^(7|2[123]|4[1-8]|3[1-3])/) >> split(7) | # Short NDCs
fixed(3) >> split(7) # 3-digit NDCs
# Argentine Republic.
#
country '54',
one_of('11', '911') >> split(4,4) | # Fixed & Mobile
match(/^(22[0137]|237|26[14]|29[179]|34[1235]|35[138]|38[1578])/) >> split(3,4) | # Fixed
match(/^(922[0137]|9237|926[14]|929[179]|934[1235]|935[138]|938[1578])/) >> split(3,4) | # Mobile
match(/^(9\d{4})/) >> split(2,4) | # Mobile
fixed(4) >> split(2,4) # Fixed
# Brazil (Federative Republic of).
# http://en.wikipedia.org/wiki/Telephone_numbers_in_Brazil
#
brazilian_service = /^(1(00|28|9[0-4789]))\d+$/
country '55',
match(brazilian_service) >> split(3,3) | # Service.
fixed(2) >> split(4,4) # NDCs
# Chile.
#
country '56',
match(/^(13[0-79]|14[79])\d+$/) >> split(3,3) | # Service
one_of('2', '9') >> split(8) | # Santiago, Mobile
fixed(2) >> split(8) # 2-digit NDCs
# TODO Colombia.
#
country '57', todo
# Venezuela (Bolivarian Republic of)
#
country '58',
fixed(3) >> split(7)
# country '60' # Malaysia, see special file.
# Australia.
#
country '61',
match(/^(4\d\d)\d+$/) >> split(3,3) | # Mobile
fixed(1) >> split(4,4) # Rest
country '62', todo # TODO Indonesia (Republic of)
country '63', todo # TODO Philippines (Republic of the)
# New Zealand.
#
# TODO Mobile?
#
country '64',
fixed(1) >> split(3,4)
# Singapore (Republic of).
#
country '65',
none >> split(4,4) # TODO Short Codes.
# Thailand.
#
country '66',
one_of('2') >> split(3,4) | # Bangkok
fixed(2) >> split(3,3) # Rest
country '81', todo # TODO Japan
# country '82' # SouthKorea, see special file.
country '84', # Viet Nam (Socialist Republic of)
one_of('4', '8') >> split(7) |
match(/^(2[025679]|3[0136789]|5[23456789]|6[01234678]|7[02345679]|9[0-8])\d/) >> split(6) |
fixed(3) >> split(5)
# country '86' # China, see special file.
# Turkey.
#
country '90',
fixed(3) >> split(3,4) # Wiki says 7, but the examples say 3, 4.
country '91', todo # TODO India (Republic of)
country '92', todo # TODO Pakistan (Islamic Republic of), http://en.wikipedia.org/wiki/Telephone_numbers_in_Pakistan, NDC 2-5
# Afghanistan.
#
# From http://www.wtng.info/wtng-93-af.html
#
country '93', fixed(2) >> split(7) # Note: the document says 6, but the examples use 7.
country '94', fixed(2) >> split(3,2,2) # TODO Sri Lanka (Democratic Socialist Republic of)
country '95', fixed(2) >> split(3,2,2) # TODO Myanmar (Union of)
country '98', fixed(2) >> split(3,2,2) # TODO Iran (Islamic Republic of)
country '210', todo # -
country '211', todo # South Sudan
country '212', todo # Morocco
country '213', fixed(2) >> split(3,4) # Algeria
country '214', todo # -
country '215', todo # -
country '216', fixed(1) >> split(3,4) # Tunisia
country '217', todo # -
country '218', todo # Lybia
country '219', todo # -
country '220', todo # Gambia
country '221', todo # Senegal
country '222', todo # Mauritania
country '223', todo # Mali
country '224', todo # Guinea
country '225', todo # Côte d'Ivoire
country '226', todo # Burkina Faso
country '227', todo # Niger
country '228', todo # Togolese Republic
country '229', todo # Benin
country '230', todo # Mauritius
country '231', todo # Liberia
country '232', todo # Sierra Leone
# Ghana
#
# From http://www.itu.int/oth/T0202000052/en
#
country '233', fixed(2) >> split(3,4)
# Nigeria
# Wikipedia says 3 4 split, many local number with no splitting
country '234',
one_of('1', '2', '9') >> split(3,4) | # Lagos, Ibadan and Abuja
match(/^(702\d])\d+$/) >> split(3,4) | # Mobile
match(/^(70[3-9])\d+$/) >> split(3,4) | # Mobile
match(/^(8[0,1]\d])\d+$/) >> split(3,4) | # Mobile
fixed(2) >> split(3,4) # 2-digit NDC
country '235', todo # Chad
country '236', todo # Central African Republic
country '237', todo # Cameroon
country '238', todo # Cape Verde
country '239', todo # Sao Tome and Principe
country '240', todo # Equatorial Guinea
country '241', todo # Gabonese Republic
country '242', todo # Congo
country '243', todo # Democratic Republic of the Congo
country '244', todo # Angola
country '245', todo # Guinea-Bissau
country '246', todo # Diego Garcia
country '247', todo # Ascension
country '248', todo # Seychelles
country '249', todo # Sudan
country '250', todo # Rwanda
country '251', todo # Ethiopia
country '252', todo # Somali Democratic Republic
country '253', todo # Djibouti
country '254', fixed(2) >> split(7) # Kenya
# Tanzania.
#
country '255',
match(/^([89]\d\d)/) >> split(3,3) | # Special/Premium.
one_of('112', '118') >> split(3,3) | # Short Codes.
fixed(2) >> split(3,4) # Geographic.
# Uganda.
#
country '256',
match(/^(46[45]|4[78]\d)/) >> split(6) | # Geo 1.
fixed(2) >> split(7) # Geo 2.
country '257', todo # Burundi
country '258', todo # Mozambique
country '259', todo # -
country '260', todo # Zambia
country '261', todo # Madagascar
country '262', todo # Reunion / Mayotte (new)
country '263', todo # Zimbabwe
country '264', todo # Namibia
country '265', todo # Malawi
country '266', todo # Lesotho
country '267', todo # Botswana
country '268', todo # Swaziland
country '269', todo # Comoros
country '280', todo # -
country '281', todo # -
country '282', todo # -
country '283', todo # -
country '284', todo # -
country '285', todo # -
country '286', todo # -
country '287', todo # -
country '288', todo # -
country '289', todo # -
country '290', todo # Saint Helena
country '291', todo # Eritrea
country '292', todo # -
country '293', todo # -
country '294', todo # -
country '295', todo # -
country '296', todo # -
country '297', todo # Aruba
country '298', todo # Faroe Islands
country '299', todo # Greenland
country '350', todo # Gibraltar
# Portugal.
#
country '351',
one_of('700', '800') >> split(3,3) | # Service.
match(/^(9\d)\d+$/) >> split(3,4) | # Mobile.
one_of('21', '22') >> split(3,4) | # Lisboa & Porto
fixed(3) >> split(3,4) # 3-digit NDCs
# Luxembourg
#
country '352',
one_of('4') >> split(2,2,2) | # Luxembourg City
match(/^(2[4|6|7]\d{2})$/) >> split(2,2,2) | # 4-digit NDC
match(/^(6\d[1|8])\d+$/) >> split(3,3) | # mobile
match(/^(60\d{2})\d{8}$/) >> split(2,2,2,2) | # mobile machine to machine
match(/^([2-9]\d)/) >> split(2,2,2) # 2-digit NDC
# country '353' # Republic of Ireland, see special file.
country '354', none >> split(3,4) # Iceland
country '355', todo # Albania
country '356', todo # Malta
country '357', todo # Cyprus
# Finland.
#
country '358',
match(/^([6-8]00)\d+$/) >> split(3,3) | # Service
match(/^(4\d|50)\d+$/) >> split(3,2,2) | # Mobile
one_of('2','3','5','6','8','9') >> split(3,3) | # Short NDCs
fixed(2) >> split(3,3) # 2-digit NDCs
# Bulgaria.
#
country '359',
fixed(2) >> split(3,2,2) # Bulgaria
# Lithuania.
#
country '370',
one_of('700', '800') >> split(2,3) | # Service
match(/^(6\d\d)\d+$/) >> split(2,3) | # Mobile
one_of('5') >> split(3,2,2) | # Vilnius
one_of('37','41') >> split(2,2,2) | # Kaunas, Šiauliai
fixed(3) >> split(1,2,2) # 3-digit NDCs.
country '371', todo # Latvia
country '372', todo # Estonia
country '373', todo # Moldova
country '374', todo # Armenia
country '375', todo # Belarus
country '376', todo # Andorra
country '377', todo # Monaco
country '378', todo # San Marino
country '379', todo # Vatican City State
country '380', todo # Ukraine
country '381', todo # Serbia and Montenegro
country '382', todo # -
country '383', todo # -
country '384', todo # -
# Croatia.
#
country '385', one_of('1') >> split(3,5) | # Zagreb
fixed(2) >> split(3,5) # 2-digit NDCs
country '386', fixed(2) >> split(3,2,2) # Slovenia
country '387', fixed(2) >> split(3,2,2) # Bosnia and Herzegovina
country '388', fixed(2) >> split(3,2,2) # Group of countries, shared code
country '389', fixed(2) >> split(3,2,2) # The Former Yugoslav Republic of Macedonia
country '420', fixed(3) >> split(3,3) # Czech Republic
# Slovak Republic.
#
country '421', match(/^(9\d\d).+$/) >> split(6) | # Mobile
one_of('2') >> split(8) | # Bratislava
fixed(2) >> split(7) # 2-digit NDCs
country '422', todo # Spare code
country '423', none >> split(3,2,2) # Liechtenstein (Principality of)
country '424', todo # -
country '425', todo # -
country '426', todo # -
country '427', todo # -
country '428', todo # -
country '429', todo # -
country '500', todo # Falkland Islands (Malvinas)
country '501', todo # Belize
country '502', todo # Guatemala (Republic of)
country '503', todo # El Salvador (Republic of)
country '504', todo # Honduras (Republic of)
country '505', todo # Nicaragua
country '506', todo # Costa Rica
country '507', todo # Panama (Republic of)
country '508', todo # Saint Pierre and Miquelon (Collectivité territoriale de la République française)
country '509', todo # Haiti (Republic of)
country '590', todo # Guadeloupe (French Department of)
country '591', todo # Bolivia (Republic of)
country '592', todo # Guyana
country '593', todo # Ecuador
country '594', todo # French Guiana (French Department of)
country '595', todo # Paraguay (Republic of)
country '596', todo # Martinique (French Department of)
country '597', todo # Suriname (Republic of)
country '598', todo # Uruguay (Eastern Republic of)
country '599', todo # Netherlands Antilles
country '670', todo # Democratic Republic of Timor-Leste
country '671', todo # Spare code
country '672', todo # Australian External Territories
country '673', todo # Brunei Darussalam
country '674', todo # Nauru (Republic of)
country '675', todo # Papua New Guinea
country '676', todo # Tonga (Kingdom of)
country '677', todo # Solomon Islands
country '678', todo # Vanuatu (Republic of)
country '679', todo # Fiji (Republic of)
country '680', todo # Palau (Republic of)
country '681', todo # Wallis and Futuna (Territoire français d'outre-mer)
country '682', todo # Cook Islands
country '683', todo # Niue
country '684', todo # -
country '685', todo # Samoa (Independent State of)
country '686', todo # Kiribati (Republic of)
country '687', todo # New Caledonia (Territoire français d'outre-mer)
country '688', todo # Tuvalu
country '689', todo # French Polynesia (Territoire français d'outre-mer)
country '690', todo # Tokelau
country '691', todo # Micronesia (Federated States of)
country '692', todo # Marshall Islands (Republic of the)
country '693', todo # -
country '694', todo # -
country '695', todo # -
country '696', todo # -
country '697', todo # -
country '698', todo # -
country '699', todo # -
country '800', todo # International Freephone Service
country '801', todo # -
country '802', todo # -
country '803', todo # -
country '804', todo # -
country '805', todo # -
country '806', todo # -
country '807', todo # -
country '808', todo # International Shared Cost Service (ISCS)
country '809', todo # -
country '830', todo # -
country '831', todo # -
country '832', todo # -
country '833', todo # -
country '834', todo # -
country '835', todo # -
country '836', todo # -
country '837', todo # -
country '838', todo # -
country '839', todo # -
country '850', todo # Democratic People's Republic of Korea
country '851', todo # Spare code
country '852', todo # Hong Kong, China
country '853', todo # Macao, China
country '854', todo # Spare code
country '855', todo # Cambodia (Kingdom of)
country '856', todo # Lao People's Democratic Republic
country '857', todo # Spare code
country '858', todo # Spare code
country '859', todo # Spare code
country '870', todo # Inmarsat SNAC
country '871', todo # Inmarsat (Atlantic Ocean-East)
country '872', todo # Inmarsat (Pacific Ocean)
country '873', todo # Inmarsat (Indian Ocean)
country '874', todo # Inmarsat (Atlantic Ocean-West)
country '875', todo # Reserved - Maritime Mobile Service Applications
country '876', todo # Reserved - Maritime Mobile Service Applications
country '877', todo # Reserved - Maritime Mobile Service Applications
country '878', todo # Universal Personal Telecommunication Service (UPT)
country '879', todo # Reserved for national non-commercial purposes
country '880', todo # Bangladesh (People's Republic of)
country '881', todo # International Mobile, shared code
country '882', todo # International Networks, shared code
country '883', todo # -
country '884', todo # -
country '885', todo # -
country '886', todo # Reserved
country '887', todo # -
country '888', todo # Reserved for future global service
country '889', todo # -
country '890', todo # -
country '891', todo # -
country '892', todo # -
country '893', todo # -
country '894', todo # -
country '895', todo # -
country '896', todo # -
country '897', todo # -
country '898', todo # -
country '899', todo # -
country '960', todo # Maldives (Republic of)
country '961', todo # Lebanon
country '962', todo # Jordan (Hashemite Kingdom of)
country '963', todo # Syrian Arab Republic
country '964', todo # Iraq (Republic of)
country '965', todo # Kuwait (State of)
country '966', todo # Saudi Arabia (Kingdom of)
country '967', todo # Yemen (Republic of)
country '968', todo # Oman (Sultanate of)
country '969', todo # Reserved - reservation currently under investigation
country '970', todo # Reserved
country '971', todo # United Arab Emirates
country '972', todo # Israel (State of)
country '973', todo # Bahrain (Kingdom of)
country '974', todo # Qatar (State of)
country '975', todo # Bhutan (Kingdom of)
country '976', todo # Mongolia
country '977', todo # Nepal
country '978', todo # -
country '979', todo # International Premium Rate Service (IPRS)
country '990', todo # Spare code
country '991', todo # Trial of a proposed new international telecommunication public correspondence service, shared code
country '992', todo # Tajikistan (Republic of)
country '993', todo # Turkmenistan
country '994', todo # Azerbaijani Republic
country '995', todo # Georgia
country '996', todo # Kyrgyz Republic
country '997', todo # Spare code
country '998', todo # Uzbekistan (Republic of)
country '999', todo # Reserved for possible future use within the Telecommunications for Disaster Relief (TDR) concept
end
|
module Rack
class Prerender
require 'net/http'
require 'active_support'
def initialize(app, options={})
# googlebot, yahoo, and bingbot are not in this list because
# we support _escaped_fragment_ and want to ensure people aren't
# penalized for cloaking.
@crawler_user_agents = [
# 'googlebot',
# 'yahoo',
# 'bingbot',
'baiduspider',
'facebookexternalhit',
'twitterbot',
'rogerbot',
'linkedinbot',
'embedly',
'bufferbot',
'quora link preview',
'showyoubot',
'outbrain',
'pinterest',
'developers.google.com/+/web/snippet',
'www.google.com/webmasters/tools/richsnippets',
'slackbot',
'vkShare',
'W3C_Validator',
'redditbot',
'Applebot'
]
@extensions_to_ignore = [
'.js',
'.css',
'.xml',
'.less',
'.png',
'.jpg',
'.jpeg',
'.gif',
'.pdf',
'.doc',
'.txt',
'.ico',
'.rss',
'.zip',
'.mp3',
'.rar',
'.exe',
'.wmv',
'.doc',
'.avi',
'.ppt',
'.mpg',
'.mpeg',
'.tif',
'.wav',
'.mov',
'.psd',
'.ai',
'.xls',
'.mp4',
'.m4a',
'.swf',
'.dat',
'.dmg',
'.iso',
'.flv',
'.m4v',
'.torrent'
]
@options = options
@options[:whitelist] = [@options[:whitelist]] if @options[:whitelist].is_a? String
@options[:blacklist] = [@options[:blacklist]] if @options[:blacklist].is_a? String
@extensions_to_ignore = @options[:extensions_to_ignore] if @options[:extensions_to_ignore]
@crawler_user_agents = @options[:crawler_user_agents] if @options[:crawler_user_agents]
@app = app
end
def call(env)
if should_show_prerendered_page(env)
cached_response = before_render(env)
if cached_response
return cached_response.finish
end
prerendered_response = get_prerendered_page_response(env)
if prerendered_response
response = build_rack_response_from_prerender(prerendered_response)
after_render(env, prerendered_response)
return response.finish
end
end
@app.call(env)
end
def should_show_prerendered_page(env)
user_agent = env['HTTP_USER_AGENT']
buffer_agent = env['X-BUFFERBOT']
is_requesting_prerendered_page = false
return false if !user_agent
return false if env['REQUEST_METHOD'] != 'GET'
request = Rack::Request.new(env)
is_requesting_prerendered_page = true if Rack::Utils.parse_query(request.query_string).has_key?('_escaped_fragment_')
#if it is a bot...show prerendered page
is_requesting_prerendered_page = true if @crawler_user_agents.any? { |crawler_user_agent| user_agent.downcase.include?(crawler_user_agent.downcase) }
#if it is BufferBot...show prerendered page
is_requesting_prerendered_page = true if buffer_agent
#if it is a bot and is requesting a resource...dont prerender
return false if @extensions_to_ignore.any? { |extension| request.fullpath.include? extension }
#if it is a bot and not requesting a resource and is not whitelisted...dont prerender
return false if @options[:whitelist].is_a?(Array) && @options[:whitelist].all? { |whitelisted| !Regexp.new(whitelisted).match(request.fullpath) }
#if it is a bot and not requesting a resource and is not blacklisted(url or referer)...dont prerender
if @options[:blacklist].is_a?(Array) && @options[:blacklist].any? { |blacklisted|
blacklistedUrl = false
blacklistedReferer = false
regex = Regexp.new(blacklisted)
blacklistedUrl = !!regex.match(request.fullpath)
blacklistedReferer = !!regex.match(request.referer) if request.referer
blacklistedUrl || blacklistedReferer
}
return false
end
return is_requesting_prerendered_page
end
def get_prerendered_page_response(env)
begin
url = URI.parse(build_api_url(env))
headers = {
'User-Agent' => env['HTTP_USER_AGENT'],
'Accept-Encoding' => 'gzip'
}
headers['X-Prerender-Token'] = ENV['PRERENDER_TOKEN'] if ENV['PRERENDER_TOKEN']
headers['X-Prerender-Token'] = @options[:prerender_token] if @options[:prerender_token]
req = Net::HTTP::Get.new(url.request_uri, headers)
req.basic_auth(ENV['PRERENDER_USERNAME'], ENV['PRERENDER_PASSWORD']) if @options[:basic_auth]
response = Net::HTTP.start(url.host, url.port) { |http| http.request(req) }
if response['Content-Encoding'] == 'gzip'
response.body = ActiveSupport::Gzip.decompress(response.body)
response['Content-Length'] = response.body.length
response.delete('Content-Encoding')
end
response
rescue
nil
end
end
def build_api_url(env)
new_env = env
if env["CF-VISITOR"]
match = /"scheme":"(http|https)"/.match(env['CF-VISITOR'])
new_env["HTTPS"] = true and new_env["rack.url_scheme"] = "https" and new_env["SERVER_PORT"] = 443 if (match && match[1] == "https")
new_env["HTTPS"] = false and new_env["rack.url_scheme"] = "http" and new_env["SERVER_PORT"] = 80 if (match && match[1] == "http")
end
if env["X-FORWARDED-PROTO"]
new_env["HTTPS"] = true and new_env["rack.url_scheme"] = "https" and new_env["SERVER_PORT"] = 443 if env["X-FORWARDED-PROTO"].split(',')[0] == "https"
new_env["HTTPS"] = false and new_env["rack.url_scheme"] = "http" and new_env["SERVER_PORT"] = 80 if env["X-FORWARDED-PROTO"].split(',')[0] == "http"
end
if @options[:protocol]
new_env["HTTPS"] = true and new_env["rack.url_scheme"] = "https" and new_env["SERVER_PORT"] = 443 if @options[:protocol] == "https"
new_env["HTTPS"] = false and new_env["rack.url_scheme"] = "http" and new_env["SERVER_PORT"] = 80 if @options[:protocol] == "http"
end
url = Rack::Request.new(new_env).url
prerender_url = get_prerender_service_url()
forward_slash = prerender_url[-1, 1] == '/' ? '' : '/'
"#{prerender_url}#{forward_slash}#{url}"
end
def get_prerender_service_url
@options[:prerender_service_url] || ENV['PRERENDER_SERVICE_URL'] || 'http://service.prerender.io/'
end
def build_rack_response_from_prerender(prerendered_response)
response = Rack::Response.new(prerendered_response.body, prerendered_response.code, prerendered_response.header)
@options[:build_rack_response_from_prerender].call(response, prerendered_response) if @options[:build_rack_response_from_prerender]
response
end
def before_render(env)
return nil unless @options[:before_render]
cached_render = @options[:before_render].call(env)
if cached_render && cached_render.is_a?(String)
Rack::Response.new(cached_render, 200, { 'Content-Type' => 'text/html; charset=utf-8' })
elsif cached_render && cached_render.is_a?(Rack::Response)
cached_render
else
nil
end
end
def after_render(env, response)
return true unless @options[:after_render]
@options[:after_render].call(env, response)
end
end
end
Add Qwantify to crawler list
Search engine is https://www.qwant.com/. User agent is
Mozilla/5.0 (compatible; Qwantify/2.3w; +https://www.qwant.com/)/2.3w
module Rack
class Prerender
require 'net/http'
require 'active_support'
def initialize(app, options={})
# googlebot, yahoo, and bingbot are not in this list because
# we support _escaped_fragment_ and want to ensure people aren't
# penalized for cloaking.
@crawler_user_agents = [
# 'googlebot',
# 'yahoo',
# 'bingbot',
'baiduspider',
'facebookexternalhit',
'twitterbot',
'rogerbot',
'linkedinbot',
'embedly',
'bufferbot',
'quora link preview',
'showyoubot',
'outbrain',
'pinterest',
'developers.google.com/+/web/snippet',
'www.google.com/webmasters/tools/richsnippets',
'slackbot',
'vkShare',
'W3C_Validator',
'redditbot',
'Applebot',
'Qwantify'
]
@extensions_to_ignore = [
'.js',
'.css',
'.xml',
'.less',
'.png',
'.jpg',
'.jpeg',
'.gif',
'.pdf',
'.doc',
'.txt',
'.ico',
'.rss',
'.zip',
'.mp3',
'.rar',
'.exe',
'.wmv',
'.doc',
'.avi',
'.ppt',
'.mpg',
'.mpeg',
'.tif',
'.wav',
'.mov',
'.psd',
'.ai',
'.xls',
'.mp4',
'.m4a',
'.swf',
'.dat',
'.dmg',
'.iso',
'.flv',
'.m4v',
'.torrent'
]
@options = options
@options[:whitelist] = [@options[:whitelist]] if @options[:whitelist].is_a? String
@options[:blacklist] = [@options[:blacklist]] if @options[:blacklist].is_a? String
@extensions_to_ignore = @options[:extensions_to_ignore] if @options[:extensions_to_ignore]
@crawler_user_agents = @options[:crawler_user_agents] if @options[:crawler_user_agents]
@app = app
end
def call(env)
if should_show_prerendered_page(env)
cached_response = before_render(env)
if cached_response
return cached_response.finish
end
prerendered_response = get_prerendered_page_response(env)
if prerendered_response
response = build_rack_response_from_prerender(prerendered_response)
after_render(env, prerendered_response)
return response.finish
end
end
@app.call(env)
end
def should_show_prerendered_page(env)
user_agent = env['HTTP_USER_AGENT']
buffer_agent = env['X-BUFFERBOT']
is_requesting_prerendered_page = false
return false if !user_agent
return false if env['REQUEST_METHOD'] != 'GET'
request = Rack::Request.new(env)
is_requesting_prerendered_page = true if Rack::Utils.parse_query(request.query_string).has_key?('_escaped_fragment_')
#if it is a bot...show prerendered page
is_requesting_prerendered_page = true if @crawler_user_agents.any? { |crawler_user_agent| user_agent.downcase.include?(crawler_user_agent.downcase) }
#if it is BufferBot...show prerendered page
is_requesting_prerendered_page = true if buffer_agent
#if it is a bot and is requesting a resource...dont prerender
return false if @extensions_to_ignore.any? { |extension| request.fullpath.include? extension }
#if it is a bot and not requesting a resource and is not whitelisted...dont prerender
return false if @options[:whitelist].is_a?(Array) && @options[:whitelist].all? { |whitelisted| !Regexp.new(whitelisted).match(request.fullpath) }
#if it is a bot and not requesting a resource and is not blacklisted(url or referer)...dont prerender
if @options[:blacklist].is_a?(Array) && @options[:blacklist].any? { |blacklisted|
blacklistedUrl = false
blacklistedReferer = false
regex = Regexp.new(blacklisted)
blacklistedUrl = !!regex.match(request.fullpath)
blacklistedReferer = !!regex.match(request.referer) if request.referer
blacklistedUrl || blacklistedReferer
}
return false
end
return is_requesting_prerendered_page
end
def get_prerendered_page_response(env)
begin
url = URI.parse(build_api_url(env))
headers = {
'User-Agent' => env['HTTP_USER_AGENT'],
'Accept-Encoding' => 'gzip'
}
headers['X-Prerender-Token'] = ENV['PRERENDER_TOKEN'] if ENV['PRERENDER_TOKEN']
headers['X-Prerender-Token'] = @options[:prerender_token] if @options[:prerender_token]
req = Net::HTTP::Get.new(url.request_uri, headers)
req.basic_auth(ENV['PRERENDER_USERNAME'], ENV['PRERENDER_PASSWORD']) if @options[:basic_auth]
response = Net::HTTP.start(url.host, url.port) { |http| http.request(req) }
if response['Content-Encoding'] == 'gzip'
response.body = ActiveSupport::Gzip.decompress(response.body)
response['Content-Length'] = response.body.length
response.delete('Content-Encoding')
end
response
rescue
nil
end
end
def build_api_url(env)
new_env = env
if env["CF-VISITOR"]
match = /"scheme":"(http|https)"/.match(env['CF-VISITOR'])
new_env["HTTPS"] = true and new_env["rack.url_scheme"] = "https" and new_env["SERVER_PORT"] = 443 if (match && match[1] == "https")
new_env["HTTPS"] = false and new_env["rack.url_scheme"] = "http" and new_env["SERVER_PORT"] = 80 if (match && match[1] == "http")
end
if env["X-FORWARDED-PROTO"]
new_env["HTTPS"] = true and new_env["rack.url_scheme"] = "https" and new_env["SERVER_PORT"] = 443 if env["X-FORWARDED-PROTO"].split(',')[0] == "https"
new_env["HTTPS"] = false and new_env["rack.url_scheme"] = "http" and new_env["SERVER_PORT"] = 80 if env["X-FORWARDED-PROTO"].split(',')[0] == "http"
end
if @options[:protocol]
new_env["HTTPS"] = true and new_env["rack.url_scheme"] = "https" and new_env["SERVER_PORT"] = 443 if @options[:protocol] == "https"
new_env["HTTPS"] = false and new_env["rack.url_scheme"] = "http" and new_env["SERVER_PORT"] = 80 if @options[:protocol] == "http"
end
url = Rack::Request.new(new_env).url
prerender_url = get_prerender_service_url()
forward_slash = prerender_url[-1, 1] == '/' ? '' : '/'
"#{prerender_url}#{forward_slash}#{url}"
end
def get_prerender_service_url
@options[:prerender_service_url] || ENV['PRERENDER_SERVICE_URL'] || 'http://service.prerender.io/'
end
def build_rack_response_from_prerender(prerendered_response)
response = Rack::Response.new(prerendered_response.body, prerendered_response.code, prerendered_response.header)
@options[:build_rack_response_from_prerender].call(response, prerendered_response) if @options[:build_rack_response_from_prerender]
response
end
def before_render(env)
return nil unless @options[:before_render]
cached_render = @options[:before_render].call(env)
if cached_render && cached_render.is_a?(String)
Rack::Response.new(cached_render, 200, { 'Content-Type' => 'text/html; charset=utf-8' })
elsif cached_render && cached_render.is_a?(Rack::Response)
cached_render
else
nil
end
end
def after_render(env, response)
return true unless @options[:after_render]
@options[:after_render].call(env, response)
end
end
end
|
# encoding: utf-8
module Protocop
# Encapsulates behaviour for writing to a string of bytes that conforms to
# the Protocol Buffer wire protocol.
#
# @since 0.0.0
class Buffer
# Constant for binary string encoding.
BINARY = "BINARY"
# @attribute [r] bytes The wrapped string of bytes.
attr_reader :bytes
# Check if this buffer is equal to the other object. Simply checks the
# bytes against the other's bytes.
#
# @example Check buffer equality.
# buffer == other
#
# @param [ Object ] other The object to check against.
#
# @return [ true, false ] If the buffer is equal to the object.
#
# @since 0.0.0
def ==(other)
bytes == other.bytes
end
# Instantiate a new buffer.
#
# @example Instantiate the buffer.
# Protocop::Buffer.new
#
# @since 0.0.0
def initialize
@bytes = "".force_encoding(BINARY)
end
# Read a boolen from the buffer, removing the byte from the buffer in the
# process.
#
# @example Read a boolean from the buffer.
# buffer.read_boolean
#
# @return [ true, false ] value The boolean value.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_boolean
read(1).ord != 0
end
# Read a 64bit double from the buffer, removing the bytes from the buffer
# in the process.
#
# @example Read a double from the buffer.
# buffer.read_double
#
# @return [ Float ] value The double value.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_double
read(8).unpack("E")[0]
end
# Read a fixed size 32 bit integer from the buffer (little endian),
# removing the bytes from the buffer in the process.
#
# @example Read the fixed 32 bit value.
# buffer.read_fixed32
#
# @return [ Integer ] The integer value.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_fixed32
read(4).unpack("V")[0]
end
# Read a fixed size 64 bit integer from the buffer (little endian),
# removing the bytes from the buffer in the process.
#
# @example Read the fixed 64 bit value.
# buffer.read_fixed64
#
# @return [ Integer ] The integer value.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_fixed64
values = read(8).unpack("VV")
values[0] + (values[1] << 32)
end
# Read a 32bit float from the buffer, removing the bytes from the buffer
# in the process.
#
# @example Read a float from the buffer.
# buffer.read_float
#
# @return [ Float ] value The float value.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_float
read(4).unpack("e")[0]
end
# Read a 32 bit integer from the buffer. The number of bytes that are read
# will depend on the value of the variable length integer.
#
# @example Read the integer from the buffer.
# buffer.read_int32
#
# @return [ Integer ] The integer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_int32
read_int64
end
# Read a 64 bit integer from the buffer. The number of bytes that are read
# will depend on the value of the variable length integer.
#
# @example Read the integer from the buffer.
# buffer.read_int64
#
# @return [ Integer ] The integer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_int64
value = read_varint
value -= (1 << 64) if value > Integer::MAX_SIGNED_64BIT
value
end
def read_sfixed32
un_zig_zag(read_fixed32)
end
def read_sfixed64
un_zig_zag(read_fixed64)
end
def read_sint32
un_zig_zag(read_varint)
end
def read_sint64
un_zig_zag(read_varint)
end
def read_string
read(read_varint)
end
def read_uint32
read_varint
end
def read_uint64
read_varint
end
# Read a variable byte length integer from the buffer. The number of bytes
# that are read will depend on the value of the integer.
#
# @example Read the varint from the buffer.
# buffer.read_varint
#
# @return [ Integer ] The integer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_varint
value, shift = 0, 0
while (byte = read(1).ord) do
value |= (byte & 0x7F) << shift
shift += 7
return value if (byte & 0x80) == 0
end
end
# Write a boolean to the buffer.
#
# @example Write a true value to the buffer.
# buffer.write_boolean(true)
#
# @param [ true, false ] value The boolean value.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_boolean(value)
write_uint64(value ? 1 : 0)
end
# Write a 64bit double to the buffer.
#
# @example Write the double to the buffer.
# buffer.write_double(1.22)
#
# @param [ Float ] value The double value.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_double(value)
bytes << [ value ].pack("E")
self
end
# Write a fixed size 32 bit integer to the buffer (little endian).
#
# @example Write the fixed 32 bit value.
# buffer.write_fixed32(1000)
#
# @param [ Integer ] value The value to write.
#
# @raise [ Errors::InvalidInt32 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_fixed32(value)
validate_int32!(value)
bytes << [ value ].pack("V")
self
end
# Write a fixed size 64 bit integer to the buffer (little endian).
#
# @example Write the fixed 64 bit value.
# buffer.write_fixed64(1000)
#
# @param [ Integer ] value The value to write.
#
# @raise [ Errors::InvalidInt64 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_fixed64(value)
validate_int64!(value)
bytes << [ value & 0xFFFFFFFF, value >> 32 ].pack("VV")
self
end
# Write a 32bit float to the buffer.
#
# @example Write the float to the buffer.
# buffer.write_float(1.22)
#
# @param [ Float ] value The float value.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_float(value)
bytes << [ value ].pack("e")
self
end
# Write a 32 bit integer to the buffer.
#
# @example Write the integer to the buffer.
# buffer.write_int32(14)
#
# @note If you find you are writing negative numbers more than positive
# ones for int32 it is more efficient to be using sint32 instead since
# negative int32s will take up more space.
#
# @param [ Integer ] value The integer.
#
# @raise [ Errors::InvalidInt32 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_int32(value)
validate_int32!(value)
write_varint(value)
end
# Write a 64 bit integer to the buffer.
#
# @example Write the integer to the buffer.
# buffer.write_int64(14)
#
# @note If you find you are writing negative numbers more than positive
# ones for int64 it is more efficient to be using sint64 instead since
# negative int64s will take up more space.
#
# @param [ Integer ] value The integer.
#
# @raise [ Errors::InvalidInt64 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_int64(value)
validate_int64!(value)
write_varint(value)
end
# Write a signed fixed size 32 bit integer to the buffer (little endian).
#
# @example Write the signed fixed 32 bit value.
# buffer.write_sfixed32(1000)
#
# @param [ Integer ] value The value to write.
#
# @raise [ Errors::InvalidInt32 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_sfixed32(value)
write_fixed32(zig_zag32(value))
end
# Write a signed fixed size 64 bit integer to the buffer (little endian).
#
# @example Write the signed fixed 64 bit value.
# buffer.write_sfixed64(1000)
#
# @param [ Integer ] value The value to write.
#
# @raise [ Errors::InvalidInt64 ] If the value is invalid.
#
# return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_sfixed64(value)
write_fixed64(zig_zag64(value))
end
# Write a 32 bit signed integer to the buffer.
#
# @example Write the integer to the buffer.
# buffer.write_sint32(14)
#
# @param [ Integer ] value The integer.
#
# @raise [ Errors::InvalidInt32 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_sint32(value)
validate_int32!(value)
write_varint(zig_zag32(value))
end
# Write a 64 bit signed integer to the buffer.
#
# @example Write the integer to the buffer.
# buffer.write_sint64(14)
#
# @param [ Integer ] value The integer.
#
# @raise [ Errors::InvalidInt64 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_sint64(value)
validate_int64!(value)
write_varint(zig_zag64(value))
end
# Write a 32 bit unsigned integer to the buffer.
#
# @example Write the integer to the buffer.
# buffer.write_uint32(14)
#
# @param [ Integer ] value The integer.
#
# @raise [ Errors::InvalidUint32 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_uint32(value)
validate_uint32!(value)
write_varint(value)
end
# Write a 64 bit unsigned integer to the buffer.
#
# @example Write the integer to the buffer.
# buffer.write_uint64(14)
#
# @param [ Integer ] value The integer.
#
# @raise [ Errors::InvalidUint64 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_uint64(value)
validate_uint64!(value)
write_varint(value)
end
# Write a varint to the buffer.
#
# @example Write a varint.
# buffer.write_varint(10)
#
# @note The shift for negative numbers is explained in the protobuf
# documentation: "If you use int32 or int64 as the type for a negative
# number, the resulting varint is always ten bytes long – it is,
# effectively, treated like a very large unsigned integer."
#
# @param [ Integer ] value The integer to write.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding#varints
#
# @since 0.0.0
def write_varint(value)
value += (1 << 64) if value < 0
while (value > 0x7F) do
bytes << ((value & 0x7F) | 0x80)
value >>= 7
end
bytes << (value & 0x7F) and self
end
# Write a string to the buffer via the Protocol Buffer specification.
#
# @example Write a string to the buffer.
# buffer.write_string("test")
#
# @param [ String ] value The string to write.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_string(value)
return self unless value
write_varint(value.length)
bytes << value and self
end
alias :write_bytes :write_string
# Exception used for validating integers are in the valid range for the
# specified type.
#
# @since 0.0.0
class OutsideRange < Exception; end
private
def read(length)
bytes.slice!(0, length)
end
def un_zig_zag(value)
(value >> 1) ^ -(value & 1)
end
# Validate that the value is a proper signed 32 bit integer.
#
# @api private
#
# @example Validate the value.
# buffer.validate_int32!(1024)
#
# @param [ Integer ] value The integer to validate.
#
# @raise [ Errors::InvalidInt32 ] If the value is invalid.
#
# @since 0.0.0
def validate_int32!(value)
unless value.int32?
raise OutsideRange.new("#{value} is not a valid 32 bit int.")
end
end
# Validate that the value is a proper signed 64 bit integer.
#
# @api private
#
# @example Validate the value.
# buffer.validate_int64!(1024)
#
# @param [ Integer ] value The integer to validate.
#
# @raise [ Errors::InvalidInt64 ] If the value is invalid.
#
# @since 0.0.0
def validate_int64!(value)
unless value.int64?
raise OutsideRange.new("#{value} is not a valid 64 bit int.")
end
end
# Validate that the value is a proper unsigned 32 bit integer.
#
# @api private
#
# @example Validate the value.
# buffer.validate_uint32!(1024)
#
# @param [ Integer ] value The integer to validate.
#
# @raise [ Errors::InvalidUint32 ] If the value is invalid.
#
# @since 0.0.0
def validate_uint32!(value)
unless value.uint32?
raise OutsideRange.new("#{value} is not a valid 32 bit unsigned int.")
end
end
# Validate that the value is a proper unsigned 64 bit integer.
#
# @api private
#
# @example Validate the value.
# buffer.validate_uint64!(1024)
#
# @param [ Integer ] value The integer to validate.
#
# @raise [ Errors::InvalidUint64 ] If the value is invalid.
#
# @since 0.0.0
def validate_uint64!(value)
unless value.uint32?
raise OutsideRange.new("#{value} is not a valid 64 bit unsigned int.")
end
end
# "Zig-zag" shift a 32 bit value.
#
# @api private
#
# @example Zig-zag shift the value.
# buffer.zig_zag32(234)
#
# @param [ Integer ] value The integer to encode.
#
# @return [ Integer ] The zig-zaged integer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def zig_zag32(value)
(value << 1) ^ (value >> 31)
end
# "Zig-zag" shift a 64 bit value.
#
# @api private
#
# @example Zig-zag shift the value.
# buffer.zig_zag64(234)
#
# @param [ Integer ] value The integer to encode.
#
# @return [ Integer ] The zig-zaged integer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def zig_zag64(value)
(value << 1) ^ (value >> 63)
end
end
end
Add rdoc to read and un_zig_zag
# encoding: utf-8
module Protocop
# Encapsulates behaviour for writing to a string of bytes that conforms to
# the Protocol Buffer wire protocol.
#
# @since 0.0.0
class Buffer
# Constant for binary string encoding.
BINARY = "BINARY"
# @attribute [r] bytes The wrapped string of bytes.
attr_reader :bytes
# Check if this buffer is equal to the other object. Simply checks the
# bytes against the other's bytes.
#
# @example Check buffer equality.
# buffer == other
#
# @param [ Object ] other The object to check against.
#
# @return [ true, false ] If the buffer is equal to the object.
#
# @since 0.0.0
def ==(other)
bytes == other.bytes
end
# Instantiate a new buffer.
#
# @example Instantiate the buffer.
# Protocop::Buffer.new
#
# @since 0.0.0
def initialize
@bytes = "".force_encoding(BINARY)
end
# Read a boolen from the buffer, removing the byte from the buffer in the
# process.
#
# @example Read a boolean from the buffer.
# buffer.read_boolean
#
# @return [ true, false ] value The boolean value.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_boolean
read(1).ord != 0
end
# Read a 64bit double from the buffer, removing the bytes from the buffer
# in the process.
#
# @example Read a double from the buffer.
# buffer.read_double
#
# @return [ Float ] value The double value.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_double
read(8).unpack("E")[0]
end
# Read a fixed size 32 bit integer from the buffer (little endian),
# removing the bytes from the buffer in the process.
#
# @example Read the fixed 32 bit value.
# buffer.read_fixed32
#
# @return [ Integer ] The integer value.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_fixed32
read(4).unpack("V")[0]
end
# Read a fixed size 64 bit integer from the buffer (little endian),
# removing the bytes from the buffer in the process.
#
# @example Read the fixed 64 bit value.
# buffer.read_fixed64
#
# @return [ Integer ] The integer value.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_fixed64
values = read(8).unpack("VV")
values[0] + (values[1] << 32)
end
# Read a 32bit float from the buffer, removing the bytes from the buffer
# in the process.
#
# @example Read a float from the buffer.
# buffer.read_float
#
# @return [ Float ] value The float value.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_float
read(4).unpack("e")[0]
end
# Read a 32 bit integer from the buffer. The number of bytes that are read
# will depend on the value of the variable length integer.
#
# @example Read the integer from the buffer.
# buffer.read_int32
#
# @return [ Integer ] The integer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_int32
read_int64
end
# Read a 64 bit integer from the buffer. The number of bytes that are read
# will depend on the value of the variable length integer.
#
# @example Read the integer from the buffer.
# buffer.read_int64
#
# @return [ Integer ] The integer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_int64
value = read_varint
value -= (1 << 64) if value > Integer::MAX_SIGNED_64BIT
value
end
def read_sfixed32
un_zig_zag(read_fixed32)
end
def read_sfixed64
un_zig_zag(read_fixed64)
end
def read_sint32
un_zig_zag(read_varint)
end
def read_sint64
un_zig_zag(read_varint)
end
def read_string
read(read_varint)
end
def read_uint32
read_varint
end
def read_uint64
read_varint
end
# Read a variable byte length integer from the buffer. The number of bytes
# that are read will depend on the value of the integer.
#
# @example Read the varint from the buffer.
# buffer.read_varint
#
# @return [ Integer ] The integer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def read_varint
value, shift = 0, 0
while (byte = read(1).ord) do
value |= (byte & 0x7F) << shift
shift += 7
return value if (byte & 0x80) == 0
end
end
# Write a boolean to the buffer.
#
# @example Write a true value to the buffer.
# buffer.write_boolean(true)
#
# @param [ true, false ] value The boolean value.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_boolean(value)
write_uint64(value ? 1 : 0)
end
# Write a 64bit double to the buffer.
#
# @example Write the double to the buffer.
# buffer.write_double(1.22)
#
# @param [ Float ] value The double value.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_double(value)
bytes << [ value ].pack("E")
self
end
# Write a fixed size 32 bit integer to the buffer (little endian).
#
# @example Write the fixed 32 bit value.
# buffer.write_fixed32(1000)
#
# @param [ Integer ] value The value to write.
#
# @raise [ Errors::InvalidInt32 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_fixed32(value)
validate_int32!(value)
bytes << [ value ].pack("V")
self
end
# Write a fixed size 64 bit integer to the buffer (little endian).
#
# @example Write the fixed 64 bit value.
# buffer.write_fixed64(1000)
#
# @param [ Integer ] value The value to write.
#
# @raise [ Errors::InvalidInt64 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_fixed64(value)
validate_int64!(value)
bytes << [ value & 0xFFFFFFFF, value >> 32 ].pack("VV")
self
end
# Write a 32bit float to the buffer.
#
# @example Write the float to the buffer.
# buffer.write_float(1.22)
#
# @param [ Float ] value The float value.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_float(value)
bytes << [ value ].pack("e")
self
end
# Write a 32 bit integer to the buffer.
#
# @example Write the integer to the buffer.
# buffer.write_int32(14)
#
# @note If you find you are writing negative numbers more than positive
# ones for int32 it is more efficient to be using sint32 instead since
# negative int32s will take up more space.
#
# @param [ Integer ] value The integer.
#
# @raise [ Errors::InvalidInt32 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_int32(value)
validate_int32!(value)
write_varint(value)
end
# Write a 64 bit integer to the buffer.
#
# @example Write the integer to the buffer.
# buffer.write_int64(14)
#
# @note If you find you are writing negative numbers more than positive
# ones for int64 it is more efficient to be using sint64 instead since
# negative int64s will take up more space.
#
# @param [ Integer ] value The integer.
#
# @raise [ Errors::InvalidInt64 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_int64(value)
validate_int64!(value)
write_varint(value)
end
# Write a signed fixed size 32 bit integer to the buffer (little endian).
#
# @example Write the signed fixed 32 bit value.
# buffer.write_sfixed32(1000)
#
# @param [ Integer ] value The value to write.
#
# @raise [ Errors::InvalidInt32 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_sfixed32(value)
write_fixed32(zig_zag32(value))
end
# Write a signed fixed size 64 bit integer to the buffer (little endian).
#
# @example Write the signed fixed 64 bit value.
# buffer.write_sfixed64(1000)
#
# @param [ Integer ] value The value to write.
#
# @raise [ Errors::InvalidInt64 ] If the value is invalid.
#
# return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_sfixed64(value)
write_fixed64(zig_zag64(value))
end
# Write a 32 bit signed integer to the buffer.
#
# @example Write the integer to the buffer.
# buffer.write_sint32(14)
#
# @param [ Integer ] value The integer.
#
# @raise [ Errors::InvalidInt32 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_sint32(value)
validate_int32!(value)
write_varint(zig_zag32(value))
end
# Write a 64 bit signed integer to the buffer.
#
# @example Write the integer to the buffer.
# buffer.write_sint64(14)
#
# @param [ Integer ] value The integer.
#
# @raise [ Errors::InvalidInt64 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_sint64(value)
validate_int64!(value)
write_varint(zig_zag64(value))
end
# Write a 32 bit unsigned integer to the buffer.
#
# @example Write the integer to the buffer.
# buffer.write_uint32(14)
#
# @param [ Integer ] value The integer.
#
# @raise [ Errors::InvalidUint32 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_uint32(value)
validate_uint32!(value)
write_varint(value)
end
# Write a 64 bit unsigned integer to the buffer.
#
# @example Write the integer to the buffer.
# buffer.write_uint64(14)
#
# @param [ Integer ] value The integer.
#
# @raise [ Errors::InvalidUint64 ] If the value is invalid.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_uint64(value)
validate_uint64!(value)
write_varint(value)
end
# Write a varint to the buffer.
#
# @example Write a varint.
# buffer.write_varint(10)
#
# @note The shift for negative numbers is explained in the protobuf
# documentation: "If you use int32 or int64 as the type for a negative
# number, the resulting varint is always ten bytes long – it is,
# effectively, treated like a very large unsigned integer."
#
# @param [ Integer ] value The integer to write.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding#varints
#
# @since 0.0.0
def write_varint(value)
value += (1 << 64) if value < 0
while (value > 0x7F) do
bytes << ((value & 0x7F) | 0x80)
value >>= 7
end
bytes << (value & 0x7F) and self
end
# Write a string to the buffer via the Protocol Buffer specification.
#
# @example Write a string to the buffer.
# buffer.write_string("test")
#
# @param [ String ] value The string to write.
#
# @return [ Buffer ] The buffer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def write_string(value)
return self unless value
write_varint(value.length)
bytes << value and self
end
alias :write_bytes :write_string
# Exception used for validating integers are in the valid range for the
# specified type.
#
# @since 0.0.0
class OutsideRange < Exception; end
private
# Read the provided number of bytes from the buffer and remove them.
#
# @api private
#
# @example Read 4 bytes from the buffer.
# buffer.read(4)
#
# @param [ Integer ] length The number of bytes to read.
#
# @return [ String ] The raw bytes.
#
# @since 0.0.0
def read(length)
bytes.slice!(0, length)
end
# Decodes a value that was encoded with "zig-zag" encoding, such as signed
# integers that aren't varints.
#
# @api private
#
# @example Un-zig-zag the value.
# buffer.un_zig_zag(1241111344224111)
#
# @param [ Integer ] The zig-zag encoded value.
#
# @return [ Integer ] The decoded value.
#
# @since 0.0.0
def un_zig_zag(value)
(value >> 1) ^ -(value & 1)
end
# Validate that the value is a proper signed 32 bit integer.
#
# @api private
#
# @example Validate the value.
# buffer.validate_int32!(1024)
#
# @param [ Integer ] value The integer to validate.
#
# @raise [ Errors::InvalidInt32 ] If the value is invalid.
#
# @since 0.0.0
def validate_int32!(value)
unless value.int32?
raise OutsideRange.new("#{value} is not a valid 32 bit int.")
end
end
# Validate that the value is a proper signed 64 bit integer.
#
# @api private
#
# @example Validate the value.
# buffer.validate_int64!(1024)
#
# @param [ Integer ] value The integer to validate.
#
# @raise [ Errors::InvalidInt64 ] If the value is invalid.
#
# @since 0.0.0
def validate_int64!(value)
unless value.int64?
raise OutsideRange.new("#{value} is not a valid 64 bit int.")
end
end
# Validate that the value is a proper unsigned 32 bit integer.
#
# @api private
#
# @example Validate the value.
# buffer.validate_uint32!(1024)
#
# @param [ Integer ] value The integer to validate.
#
# @raise [ Errors::InvalidUint32 ] If the value is invalid.
#
# @since 0.0.0
def validate_uint32!(value)
unless value.uint32?
raise OutsideRange.new("#{value} is not a valid 32 bit unsigned int.")
end
end
# Validate that the value is a proper unsigned 64 bit integer.
#
# @api private
#
# @example Validate the value.
# buffer.validate_uint64!(1024)
#
# @param [ Integer ] value The integer to validate.
#
# @raise [ Errors::InvalidUint64 ] If the value is invalid.
#
# @since 0.0.0
def validate_uint64!(value)
unless value.uint32?
raise OutsideRange.new("#{value} is not a valid 64 bit unsigned int.")
end
end
# "Zig-zag" shift a 32 bit value.
#
# @api private
#
# @example Zig-zag shift the value.
# buffer.zig_zag32(234)
#
# @param [ Integer ] value The integer to encode.
#
# @return [ Integer ] The zig-zaged integer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def zig_zag32(value)
(value << 1) ^ (value >> 31)
end
# "Zig-zag" shift a 64 bit value.
#
# @api private
#
# @example Zig-zag shift the value.
# buffer.zig_zag64(234)
#
# @param [ Integer ] value The integer to encode.
#
# @return [ Integer ] The zig-zaged integer.
#
# @see https://developers.google.com/protocol-buffers/docs/encoding
#
# @since 0.0.0
def zig_zag64(value)
(value << 1) ^ (value >> 63)
end
end
end
|
module TaskMapper::Provider
# This is the Github Provider for taskmapper
module Github
include TaskMapper::Provider::Base
class << self
attr_accessor :login, :api, :user_token
end
# This is for cases when you want to instantiate using TaskMapper::Provider::Github.new(auth)
def self.new(auth = {})
TaskMapper.new(:github, auth)
end
def provider
TaskMapper::Provider::Github
end
def new_github_client(auth)
Octokit::Client.new auth
end
# declare needed overloaded methods here
def authorize(auth = {})
auth[:login] = auth[:login] || auth[:username]
raise TaskMapper::Exception.new('Please provide at least a username') if auth[:login].blank?
provider.login = auth[:login]
provider.user_token = auth[:password] || auth[:oauth_token]
provider.api = new_github_client auth
end
def valid?
TaskMapper::Provider::Github.api.authenticated? || TaskMapper::Provider::Github.api.oauthed?
end
end
end
Unified api method calls
module TaskMapper::Provider
# This is the Github Provider for taskmapper
module Github
include TaskMapper::Provider::Base
class << self
attr_accessor :login, :api, :user_token
end
# This is for cases when you want to instantiate using TaskMapper::Provider::Github.new(auth)
def self.new(auth = {})
TaskMapper.new(:github, auth)
end
def provider
TaskMapper::Provider::Github
end
def new_github_client(auth)
Octokit::Client.new auth
end
# declare needed overloaded methods here
def authorize(auth = {})
@authentication ||= TaskMapper::Authenticator.new(auth)
auth[:login] = auth[:login] || auth[:username]
raise TaskMapper::Exception.new('Please provide at least a username') if auth[:login].blank?
provider.login = auth[:login]
provider.user_token = auth[:password] || auth[:token]
provider.api = new_github_client auth
end
def valid?
provider.api.authenticated? || provider.api.oauthed?
end
end
end
|
require 'active_support'
require 'action_view'
# +public_activity+ keeps track of changes made to models
# and allows for easy displaying of them.
#
# Basic usage requires adding one line to your models:
#
# class Article < ActiveRecord::Base
# tracked
# end
#
# And creating a table for activities, by doing this:
# rails generate public_activity:migration
# rake db:migrate
#
# Now when saved, public_activity will create
# an Activity record containing information about that changed/created
# model.
# == Displaying Activities:
#
# Minimal example would be:
#
# <% for activity in PublicActivity::Activity.all %>
# <%= activity.text %><br/>
# <% end %>
# Now you will need to add translations in your locale .yml, for the example
# provided above that would be:
# en:
# activity:
# create: 'New article has been created'
# update: 'Someone modified the article'
# destroy: 'Someone deleted the article!'
#
# Check {PublicActivity::ClassMethods#tracked} for more details about customizing and specifing
# ownership to users.
module PublicActivity
extend ActiveSupport::Concern
extend ActiveSupport::Autoload
autoload :Activist
autoload :Activity
autoload :StoreController
autoload :Tracked
autoload :Creation
autoload :Update
autoload :Destruction
autoload :VERSION
autoload :Common
module Model
extend ActiveSupport::Concern
included do
include Tracked
include Activist
end
end
end
require 'public_activity/view_helpers.rb'
require 'active_record' in public_activity.rb
it's a real dependency on `autoload :Activity`
require 'active_support'
require 'action_view'
require 'active_record'
# +public_activity+ keeps track of changes made to models
# and allows for easy displaying of them.
#
# Basic usage requires adding one line to your models:
#
# class Article < ActiveRecord::Base
# tracked
# end
#
# And creating a table for activities, by doing this:
# rails generate public_activity:migration
# rake db:migrate
#
# Now when saved, public_activity will create
# an Activity record containing information about that changed/created
# model.
# == Displaying Activities:
#
# Minimal example would be:
#
# <% for activity in PublicActivity::Activity.all %>
# <%= activity.text %><br/>
# <% end %>
# Now you will need to add translations in your locale .yml, for the example
# provided above that would be:
# en:
# activity:
# create: 'New article has been created'
# update: 'Someone modified the article'
# destroy: 'Someone deleted the article!'
#
# Check {PublicActivity::ClassMethods#tracked} for more details about customizing and specifing
# ownership to users.
module PublicActivity
extend ActiveSupport::Concern
extend ActiveSupport::Autoload
autoload :Activist
autoload :Activity
autoload :StoreController
autoload :Tracked
autoload :Creation
autoload :Update
autoload :Destruction
autoload :VERSION
autoload :Common
module Model
extend ActiveSupport::Concern
included do
include Tracked
include Activist
end
end
end
require 'public_activity/view_helpers.rb'
|
module Pushapp
VERSION = '0.1.3'
end
v0.1.4
module Pushapp
VERSION = '0.1.4'
end
|
module QUnited
VERSION = '0.3.0'
end
Bump version to 0.3.1
module QUnited
VERSION = '0.3.1'
end
|
# Nozomi: An opinionated Rails template
#
# Assumes you skipped the Gemfile, test/unit and prototype JS which is done through options like this:
#
# rails new myapp -m rails3-template.rb --skip-gemfile --skip-test-unit --skip-prototype
#
# Written on a nozomi shinkansen so you know it's awesome
def git_commit(message, &block)
yield if block
git :add => '.'
git :commit => "-m'Nozomi: #{message}'"
end
# init the repo and commit the rails generated files to git
def initial_git_commit(message)
git :init
git_commit message
end
def copy_db_yml
run 'cp config/database.yml config/database.example.yml'
end
def remove_public_files
%w{index.html favicon.ico}.each do |f|
run "rm public/#{f}"
end
end
def install_jquery
filename = "jquery-1.4.2.min.js"
url = "http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js"
run 'mkdir -p public/javascripts/vendor'
inside('public/javascripts/vendor') do
run 'wget --output-document=#{filename} #{url}'
end
end
def add_gemfile
file "Gemfile", <<-RUBY
# TODO shouldn't this be gemcutter?
source 'http://rubygems.org'
gem 'rails', '3.0.0'
# view
gem 'haml'
gem 'compass'
# persistence
gem 'sqlite3-ruby', :require => 'sqlite3'
# deployment
gem 'capistrano'
group :development, :test do
gem 'rspec'
gem 'rspec-rails'
gem 'capybara'
gem 'ruby-debug'
gem 'awesome_print', :require => 'ap'
gem 'wirble'
gem 'hirb'
end
RUBY
end
# generate 'rspec'
# run "haml --rails #{run "pwd"}"
#
#
#
#
# run 'echo TODO > README'
# run 'touch tmp/.gitignore log/.gitignore vendor/.gitignore'
# run %{find . -type d -empty | grep -v "vendor" | grep -v ".git" | grep -v "tmp" | xargs -I xxx touch xxx/.gitignore}
#
initial_git_commit "Initial commit from rails"
git_commit("Remove public files") { remove_public_files }
git_commit("Bundler gemfile") { add_gemfile }
git_commit("Copy database.yml") { copy_db_yml }
git_commit("Install jQuery") { install_jquery }
puts <<-MSG
Nozomi Rails template complete! Your next steps are:
1. Edit config/database.yml
2. rake db:create:all
3. rake db:migrate (so we can get a db/schema.rb)
4. Get to work!
MSG
Add formtastic
# Nozomi: An opinionated Rails template
#
# Assumes you skipped the Gemfile, test/unit and prototype JS which is done through options like this:
#
# rails new myapp -m rails3-template.rb --skip-gemfile --skip-test-unit --skip-prototype
#
# Written on a nozomi shinkansen so you know it's awesome
def git_commit(message, &block)
yield if block
git :add => '.'
git :commit => "-m'Nozomi: #{message}'"
end
# init the repo and commit the rails generated files to git
def initial_git_commit(message)
git :init
git_commit message
end
def copy_db_yml
run 'cp config/database.yml config/database.example.yml'
end
def remove_public_files
%w{index.html favicon.ico}.each do |f|
run "rm public/#{f}"
end
end
def install_jquery
filename = "jquery-1.4.2.min.js"
url = "http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js"
run 'mkdir -p public/javascripts/vendor'
inside('public/javascripts/vendor') do
run 'wget --output-document=#{filename} #{url}'
end
end
def add_gemfile
file "Gemfile", <<-RUBY
# TODO shouldn't this be gemcutter?
source 'http://rubygems.org'
gem 'rails', '3.0.0'
# view
gem 'haml'
gem 'compass'
gem 'formtastic'
# persistence
gem 'sqlite3-ruby', :require => 'sqlite3'
# deployment
gem 'capistrano'
group :development, :test do
gem 'rspec'
gem 'rspec-rails'
gem 'capybara'
gem 'ruby-debug'
gem 'awesome_print', :require => 'ap'
gem 'wirble'
gem 'hirb'
end
RUBY
end
# generate 'rspec'
# run "haml --rails #{run "pwd"}"
#
#
#
#
# run 'echo TODO > README'
# run 'touch tmp/.gitignore log/.gitignore vendor/.gitignore'
# run %{find . -type d -empty | grep -v "vendor" | grep -v ".git" | grep -v "tmp" | xargs -I xxx touch xxx/.gitignore}
#
initial_git_commit "Initial commit from rails"
git_commit("Remove public files") { remove_public_files }
git_commit("Bundler gemfile") { add_gemfile }
git_commit("Copy database.yml") { copy_db_yml }
git_commit("Install jQuery") { install_jquery }
puts <<-MSG
Nozomi Rails template complete! Your next steps are:
1. Edit config/database.yml
2. rake db:create:all
3. rake db:migrate (so we can get a db/schema.rb)
4. Get to work!
MSG
|
module RailsDb
class Engine < ::Rails::Engine
isolate_namespace RailsDb
config.autoload_paths += Dir["#{config.root}/lib"]
initializer 'any_login.assets_precompile', :group => :all do |app|
app.config.assets.precompile += ['vendor/modernizr.js', 'rails_db/up_arrow.gif', 'rails_db/down_arrow.gif', 'rails_db/logo.png']
app.config.assets.precompile += ["codemirror*", "codemirror/**/*"]
end
initializer 'rails_db.helpers' do
ActiveSupport.on_load :action_view do
ActionView::Base.send :include, RailsDb::RailsDbDataTableHelper
end
end
end
end
added logo to precompile
module RailsDb
class Engine < ::Rails::Engine
isolate_namespace RailsDb
config.autoload_paths += Dir["#{config.root}/lib"]
initializer 'any_login.assets_precompile', :group => :all do |app|
app.config.assets.precompile += ['rails_db/logo_mini.png', 'rails_db/*', 'vendor/modernizr.js', 'rails_db/up_arrow.gif', 'rails_db/down_arrow.gif', 'rails_db/logo.png']
app.config.assets.precompile += ["codemirror*", "codemirror/**/*"]
end
initializer 'rails_db.helpers' do
ActiveSupport.on_load :action_view do
ActionView::Base.send :include, RailsDb::RailsDbDataTableHelper
end
end
end
end
|
module Raygun
module Testable
class ItWorksException < StandardError; end
def track_test_exception
Raygun.track_exception(ItWorksException.new("Woohoo!"))
end
end
end
Add some informative output to the test task
module Raygun
module Testable
class ItWorksException < StandardError; end
def track_test_exception
Raygun.configuration.silence_reporting = false
raise ItWorksException.new("Woohoo!")
rescue ItWorksException => e
if Raygun.track_exception(e).success?
puts "Success! Now go check your Raygun.io Dashboard"
else
puts "Oh-oh, something went wrong - double check your API key"
end
end
end
end |
begin
raise LoadError, "not with java" if RUBY_PLATFORM == "java"
require 'nokogiri'
rescue LoadError => e
:rexml
end
require 'rdf/ntriples'
require 'rdf/xsd'
module RDF::RDFa
##
# An RDFa parser in Ruby
#
# This class supports [Nokogiri][] for HTML
# processing, and will automatically select the most performant
# implementation (Nokogiri or LibXML) that is available. If need be, you
# can explicitly override the used implementation by passing in a
# `:library` option to `Reader.new` or `Reader.open`.
#
# [Nokogiri]: http://nokogiri.org/
#
# Based on processing rules described here:
# @see http://www.w3.org/TR/rdfa-syntax/#s_model RDFa 1.0
# @see http://www.w3.org/TR/2012/REC-rdfa-core-20120607/
# @see http://www.w3.org/TR/2012/CR-xhtml-rdfa-20120313/
# @see http://dev.w3.org/html5/rdfa/
#
# @author [Gregg Kellogg](http://kellogg-assoc.com/)
class Reader < RDF::Reader
format Format
include Expansion
XHTML = "http://www.w3.org/1999/xhtml"
# Content model for @about and @resource. In RDFa 1.0, this was URIorSafeCURIE
SafeCURIEorCURIEorIRI = {
:"rdfa1.0" => [:safe_curie, :uri, :bnode],
:"rdfa1.1" => [:safe_curie, :curie, :uri, :bnode],
}
# Content model for @datatype. In RDFa 1.0, this was CURIE
# Also plural TERMorCURIEorAbsIRIs, content model for @rel, @rev, @property and @typeof
TERMorCURIEorAbsIRI = {
:"rdfa1.0" => [:term, :curie],
:"rdfa1.1" => [:term, :curie, :absuri],
}
# This expression matches an NCName as defined in
# [XML-NAMES](http://www.w3.org/TR/2009/REC-xml-names-20091208/#NT-NCName)
#
# @see http://www.w3.org/TR/2009/REC-xml-names-20091208/#NT-NCName
NC_REGEXP = Regexp.new(
%{^
( [a-zA-Z_]
| \\\\u[0-9a-fA-F]{4}
)
( [0-9a-zA-Z_\.-/]
| \\\\u([0-9a-fA-F]{4})
)*
$},
Regexp::EXTENDED)
# This expression matches an term as defined in
# [RDFA-CORE](http://www.w3.org/TR/2012/REC-rdfa-core-20120607/#s_terms)
#
# For the avoidance of doubt, this definition means a 'term'
# in RDFa is an XML NCName that also permits slash as a non-leading character.
# @see http://www.w3.org/TR/2012/REC-rdfa-core-20120607/#s_terms
TERM_REGEXP = Regexp.new(
%{^
(?!\\\\u0301) # ́ is a non-spacing acute accent.
# It is legal within an XML Name, but not as the first character.
( [a-zA-Z_]
| \\\\u[0-9a-fA-F]{4}
)
( [0-9a-zA-Z_\.-\/]
| \\\\u([0-9a-fA-F]{4})
)*
$},
Regexp::EXTENDED)
# Host language
# @!attribute [r] host_language
# @return [:xml, :xhtml1, :xhtml5, :html4, :html5, :svg]
attr_reader :host_language
# Version
# @!attribute [r] version
# @return [:"rdfa1.0", :"rdfa1.1"]
attr_reader :version
# Repository used for collecting triples.
# @!attribute [r] repository
# @return [RDF::Repository]
attr_reader :repository
# Returns the XML implementation module for this reader instance.
#
# @!attribute [rw] implementation
# @return [Module]
attr_reader :implementation
# The Recursive Baggage
# @private
class EvaluationContext # :nodoc:
##
# The base.
#
# This will usually be the URL of the document being processed,
# but it could be some other URL, set by some other mechanism,
# such as the (X)HTML base element. The important thing is that it establishes
# a URL against which relative paths can be resolved.
#
# @!attribute [rw] base
# @return [RDF::URI]
attr_accessor :base
##
# The parent subject.
#
# The initial value will be the same as the initial value of base,
# but it will usually change during the course of processing.
#
# @!attribute [rw] parent_subject
# @return [RDF::URI]
attr_accessor :parent_subject
##
# The parent object.
#
# In some situations the object of a statement becomes the subject of any nested statements,
# and this property is used to convey this value.
# Note that this value may be a bnode, since in some situations a number of nested statements
# are grouped together on one bnode.
# This means that the bnode must be set in the containing statement and passed down,
# and this property is used to convey this value.
#
# @!attribute [rw] parent_object
# @return [RDF::URI]
attr_accessor :parent_object
##
# A list of current, in-scope URI mappings.
#
# @!attribute [rw] uri_mappings
# @return [Hash{Symbol => String}]
attr_accessor :uri_mappings
##
# A list of current, in-scope Namespaces. This is the subset of uri_mappings
# which are defined using xmlns.
#
# @!attribute [rw] namespaces
# @return [Hash{String => Namespace}]
attr_accessor :namespaces
##
# A list of incomplete triples.
#
# A triple can be incomplete when no object resource
# is provided alongside a predicate that requires a resource (i.e., @rel or @rev).
# The triples can be completed when a resource becomes available,
# which will be when the next subject is specified (part of the process called chaining).
#
# @!attribute [rw] incomplete_triples
# @return [Array<Array<RDF::URI, RDF::Resource>>]
attr_accessor :incomplete_triples
##
# The language. Note that there is no default language.
#
# @!attribute [rw] language
# @return [Symbol]
attr_accessor :language
##
# The term mappings, a list of terms and their associated URIs.
#
# This specification does not define an initial list.
# Host Languages may define an initial list.
# If a Host Language provides an initial list, it should do so via an RDFa Context document.
#
# @!attribute [rw] term_mappings
# @return [Hash{Symbol => RDF::URI}]
attr_accessor :term_mappings
##
# The default vocabulary
#
# A value to use as the prefix URI when a term is used.
# This specification does not define an initial setting for the default vocabulary.
# Host Languages may define an initial setting.
#
# @!attribute [rw] default_vocabulary
# @return [RDF::URI]
attr_accessor :default_vocabulary
##
# lists
#
# A hash associating lists with properties.
# @!attribute [rw] list_mapping
# @return [Hash{RDF::URI => Array<RDF::Resource>}]
attr_accessor :list_mapping
# @param [RDF::URI] base
# @param [Hash] host_defaults
# @option host_defaults [Hash{String => RDF::URI}] :term_mappings Hash of NCName => URI
# @option host_defaults [Hash{String => RDF::URI}] :vocabulary Hash of prefix => URI
def initialize(base, host_defaults)
# Initialize the evaluation context, [5.1]
@base = base
@parent_subject = @base
@parent_object = nil
@namespaces = {}
@incomplete_triples = []
@language = nil
@uri_mappings = host_defaults.fetch(:uri_mappings, {})
@term_mappings = host_defaults.fetch(:term_mappings, {})
@default_vocabulary = host_defaults.fetch(:vocabulary, nil)
end
# Copy this Evaluation Context
#
# @param [EvaluationContext] from
def initialize_copy(from)
# clone the evaluation context correctly
@uri_mappings = from.uri_mappings.clone
@incomplete_triples = from.incomplete_triples.clone
@namespaces = from.namespaces.clone
@list_mapping = from.list_mapping # Don't clone
end
def inspect
v = ['base', 'parent_subject', 'parent_object', 'language', 'default_vocabulary'].map do |a|
"#{a}=#{o = self.send(a); o.respond_to?(:to_ntriples) ? o.to_ntriples : o.inspect}"
end
v << "uri_mappings[#{uri_mappings.keys.length}]"
v << "incomplete_triples[#{incomplete_triples.length}]"
v << "term_mappings[#{term_mappings.keys.length}]"
v << "lists[#{list_mapping.keys.length}]" if list_mapping
v.join(", ")
end
end
##
# Initializes the RDFa reader instance.
#
# @param [IO, File, String] input
# the input stream to read
# @param [Hash{Symbol => Object}] options
# any additional options (see `RDF::Reader#initialize`)
# @option options [Symbol] :library
# One of :nokogiri or :rexml. If nil/unspecified uses :nokogiri if available, :rexml otherwise.
# @option options [Boolean] :vocab_expansion (false)
# whether to perform OWL2 expansion on the resulting graph
# @option options [Boolean] :reference_folding (true)
# whether to perform RDFa property copying on the resulting graph
# @option options [:xml, :xhtml1, :xhtml5, :html4, :html5, :svg] :host_language (:html5)
# Host Language
# @option options [:"rdfa1.0", :"rdfa1.1"] :version (:"rdfa1.1")
# Parser version information
# @option options [Proc] :processor_callback (nil)
# Callback used to provide processor graph triples.
# @option options [Array<Symbol>] :rdfagraph ([:output])
# Used to indicate if either or both of the :output or :processor graphs are output.
# Value is an array containing on or both of :output or :processor.
# @option options [Repository] :vocab_repository (nil)
# Repository to save loaded vocabularies.
# @option options [Array] :debug
# Array to place debug messages
# @return [reader]
# @yield [reader] `self`
# @yieldparam [RDF::Reader] reader
# @yieldreturn [void] ignored
# @raise [RDF::ReaderError] if _validate_
def initialize(input = $stdin, options = {}, &block)
super do
@debug = options[:debug]
@options = {:reference_folding => true}.merge(@options)
@repository = RDF::Repository.new
@options[:rdfagraph] = case @options[:rdfagraph]
when String, Symbol then @options[:rdfagraph].to_s.split(',').map(&:strip).map(&:to_sym)
when Array then @options[:rdfagraph].map {|o| o.to_s.to_sym}
else []
end.select {|o| [:output, :processor].include?(o)}
@options[:rdfagraph] << :output if @options[:rdfagraph].empty?
@library = case options[:library]
when nil
# Use Nokogiri when available, and REXML otherwise:
(defined?(::Nokogiri) && RUBY_PLATFORM != 'java') ? :nokogiri : :rexml
when :nokogiri, :rexml
options[:library]
else
raise ArgumentError.new("expected :rexml or :nokogiri, but got #{options[:library].inspect}")
end
require "rdf/rdfa/reader/#{@library}"
@implementation = case @library
when :nokogiri then Nokogiri
when :rexml then REXML
end
self.extend(@implementation)
detect_host_language_version(input, options)
add_info(@doc, "version = #{@version}, host_language = #{@host_language}, library = #{@library}, rdfagraph = #{@options[:rdfagraph].inspect}, expand = #{@options[:vocab_expansion]}")
begin
initialize_xml(input, options)
rescue
add_error(nil, "Malformed document: #{$!.message}")
end
add_error(nil, "Empty document") if root.nil?
add_error(nil, "Syntax errors:\n#{doc_errors}") if !doc_errors.empty?
# Section 4.2 RDFa Host Language Conformance
#
# The Host Language may require the automatic inclusion of one or more Initial Contexts
@host_defaults = {
:vocabulary => nil,
:uri_mappings => {},
:initial_contexts => [],
}
if @version == :"rdfa1.0"
# Add default term mappings
@host_defaults[:term_mappings] = %w(
alternate appendix bookmark cite chapter contents copyright first glossary help icon index
last license meta next p3pv1 prev role section stylesheet subsection start top up
).inject({}) { |hash, term| hash[term] = RDF::XHV[term]; hash }
end
case @host_language
when :xml, :svg
@host_defaults[:initial_contexts] = [XML_RDFA_CONTEXT]
when :xhtml1
@host_defaults[:initial_contexts] = [XML_RDFA_CONTEXT, XHTML_RDFA_CONTEXT]
when :xhtml5, :html4, :html5
@host_defaults[:initial_contexts] = [XML_RDFA_CONTEXT, HTML_RDFA_CONTEXT]
end
block.call(self) if block_given?
end
end
##
# Iterates the given block for each RDF statement in the input.
#
# Reads to graph and performs expansion if required.
#
# @yield [statement]
# @yieldparam [RDF::Statement] statement
# @return [void]
def each_statement(&block)
unless @processed || @root.nil?
# Add prefix definitions from host defaults
@host_defaults[:uri_mappings].each_pair do |prefix, value|
prefix(prefix, value)
end
# parse
parse_whole_document(@doc, RDF::URI(base_uri))
def extract_script(el, input, type, options, &block)
add_debug(el, "script element of type #{type}")
begin
# Formats don't exist unless they've been required
case type
when 'application/rdf+xml' then require 'rdf/rdfxml'
when 'text/ntriples' then require 'rdf/ntriples'
when 'text/turtle' then require 'text/turtle'
end
rescue
end
if reader = RDF::Reader.for(:content_type => type)
add_debug(el, "=> reader #{reader.to_sym}")
reader.new(input, options).each(&block)
end
end
# Look for Embedded Turtle and RDF/XML
unless @root.xpath("//rdf:RDF", "xmlns:rdf" => "http://www.w3.org/1999/02/22-rdf-syntax-ns#").empty?
extract_script(@root, @doc, "application/rdf+xml", @options) do |statement|
@repository << statement
end
end
# Look for Embedded scripts
@root.css("script[type]").each do |el|
type = el.attribute("type")
extract_script(el, el.inner_text, type, @options) do |statement|
@repository << statement
end
end
# Look for Embedded microdata
unless @root.xpath("//@itemscope").empty?
begin
require 'rdf/microdata'
add_debug(@doc, "process microdata")
@repository << RDF::Microdata::Reader.new(@doc, options)
rescue
add_debug(@doc, "microdata detected, not processed")
end
end
# Perform property copying
copy_properties(@repository) if @options[:reference_folding]
# Perform vocabulary expansion
expand(@repository) if @options[:vocab_expansion]
@processed = true
end
# Return statements in the default graph for
# statements in the associated named or default graph from the
# processed repository
@repository.each do |statement|
case statement.context
when nil
yield statement if @options[:rdfagraph].include?(:output)
when RDF::RDFA.ProcessorGraph
yield RDF::Statement.new(*statement.to_triple) if @options[:rdfagraph].include?(:processor)
end
end
end
##
# Iterates the given block for each RDF triple in the input.
#
# @yield [subject, predicate, object]
# @yieldparam [RDF::Resource] subject
# @yieldparam [RDF::URI] predicate
# @yieldparam [RDF::Value] object
# @return [void]
def each_triple(&block)
each_statement do |statement|
block.call(*statement.to_triple)
end
end
private
# Keep track of allocated BNodes
def bnode(value = nil)
@bnode_cache ||= {}
@bnode_cache[value.to_s] ||= RDF::Node.new(value)
end
# Figure out the document path, if it is an Element or Attribute
def node_path(node)
"<#{base_uri}>#{node.respond_to?(:display_path) ? node.display_path : node}"
end
# Add debug event to debug array, if specified
#
# @param [#display_path, #to_s] node XML Node or string for showing context
# @param [String] message
# @yieldreturn [String] appended to message, to allow for lazy-evaulation of message
def add_debug(node, message = "")
return unless ::RDF::RDFa.debug? || @debug
message = message + yield if block_given?
add_processor_message(node, message, RDF::RDFA.Info)
end
def add_info(node, message, process_class = RDF::RDFA.Info)
add_processor_message(node, message, process_class)
end
def add_warning(node, message, process_class = RDF::RDFA.Warning)
add_processor_message(node, message, process_class)
end
def add_error(node, message, process_class = RDF::RDFA.Error)
add_processor_message(node, message, process_class)
raise RDF::ReaderError, message if validate?
end
def add_processor_message(node, message, process_class)
puts "#{node_path(node)}: #{message}" if ::RDF::RDFa.debug?
@debug << "#{node_path(node)}: #{message}" if @debug.is_a?(Array)
if @options[:processor_callback] || @options[:rdfagraph].include?(:processor)
n = RDF::Node.new
processor_statements = [
RDF::Statement.new(n, RDF["type"], process_class, :context => RDF::RDFA.ProcessorGraph),
RDF::Statement.new(n, RDF::DC.description, message, :context => RDF::RDFA.ProcessorGraph),
RDF::Statement.new(n, RDF::DC.date, RDF::Literal::Date.new(DateTime.now), :context => RDF::RDFA.ProcessorGraph)
]
processor_statements << RDF::Statement.new(n, RDF::RDFA.context, base_uri, :context => RDF::RDFA.ProcessorGraph) if base_uri
if node.respond_to?(:path)
nc = RDF::Node.new
processor_statements += [
RDF::Statement.new(n, RDF::RDFA.context, nc, :context => RDF::RDFA.ProcessorGraph),
RDF::Statement.new(nc, RDF["type"], RDF::PTR.XPathPointer, :context => RDF::RDFA.ProcessorGraph),
RDF::Statement.new(nc, RDF::PTR.expression, node.path, :context => RDF::RDFA.ProcessorGraph)
]
end
@repository.insert(*processor_statements)
if cb = @options[:processor_callback]
processor_statements.each {|s| cb.call(s)}
end
end
end
##
# add a statement, object can be literal or URI or bnode
# Yields {RDF::Statement} to the saved callback
#
# @param [#display_path, #to_s] node XML Node or string for showing context
# @param [RDF::Resource] subject the subject of the statement
# @param [RDF::URI] predicate the predicate of the statement
# @param [RDF::Value] object the object of the statement
# @param [RDF::Value] context the context of the statement
# @raise [RDF::ReaderError] Checks parameter types and raises if they are incorrect if parsing mode is _validate_.
def add_triple(node, subject, predicate, object, context = nil)
statement = RDF::Statement.new(subject, predicate, object)
add_error(node, "statement #{RDF::NTriples.serialize(statement)} is invalid") unless statement.valid?
if subject && predicate && object # Basic sanity checking
add_info(node, "statement: #{RDF::NTriples.serialize(statement)}")
repository << statement
end
end
# Parsing an RDFa document (this is *not* the recursive method)
def parse_whole_document(doc, base)
base = doc_base(base)
if (base)
# Strip any fragment from base
base = base.to_s.split("#").first
base = uri(base)
add_debug("") {"parse_whole_doc: base='#{base}'"}
end
# initialize the evaluation context with the appropriate base
evaluation_context = EvaluationContext.new(base, @host_defaults)
if @version != :"rdfa1.0"
# Process default vocabularies
load_initial_contexts(@host_defaults[:initial_contexts]) do |which, value|
add_debug(root) { "parse_whole_document, #{which}: #{value.inspect}"}
case which
when :uri_mappings then evaluation_context.uri_mappings.merge!(value)
when :term_mappings then evaluation_context.term_mappings.merge!(value)
when :default_vocabulary then evaluation_context.default_vocabulary = value
end
end
end
traverse(root, evaluation_context)
add_debug("", "parse_whole_doc: traversal complete'")
end
# Parse and process URI mappings, Term mappings and a default vocabulary from @context
#
# Yields each mapping
def load_initial_contexts(initial_contexts)
initial_contexts.
map {|uri| uri(uri).normalize}.
each do |uri|
# Don't try to open ourselves!
if base_uri == uri
add_debug(root) {"load_initial_contexts: skip recursive context <#{uri}>"}
next
end
old_debug = RDF::RDFa.debug?
begin
add_info(root, "load_initial_contexts: load <#{uri}>")
RDF::RDFa.debug = false
context = Context.find(uri)
rescue Exception => e
RDF::RDFa.debug = old_debug
add_error(root, e.message)
raise # In case we're not in strict mode, we need to be sure processing stops
ensure
RDF::RDFa.debug = old_debug
end
# Add URI Mappings to prefixes
context.prefixes.each_pair do |prefix, value|
prefix(prefix, value)
end
yield :uri_mappings, context.prefixes unless context.prefixes.empty?
yield :term_mappings, context.terms unless context.terms.empty?
yield :default_vocabulary, context.vocabulary if context.vocabulary
end
end
# Extract the prefix mappings from an element
def extract_mappings(element, uri_mappings, namespaces)
# look for xmlns
# (note, this may be dependent on @host_language)
# Regardless of how the mapping is declared, the value to be mapped must be converted to lower case,
# and the URI is not processed in any way; in particular if it is a relative path it is
# not resolved against the current base.
ns_defs = {}
element.namespaces.each do |prefix, href|
prefix = nil if prefix == "xmlns"
add_debug("extract_mappings") { "ns: #{prefix}: #{href}"}
ns_defs[prefix] = href
end
# HTML parsing doesn't create namespace_definitions
if ns_defs.empty?
ns_defs = {}
element.attributes.each do |attr, href|
next unless attr =~ /^xmlns(?:\:(.+))?/
prefix = $1
add_debug("extract_mappings") { "ns(attr): #{prefix}: #{href}"}
ns_defs[prefix] = href.to_s
end
end
ns_defs.each do |prefix, href|
# A Conforming RDFa Processor must ignore any definition of a mapping for the '_' prefix.
next if prefix == "_"
# Downcase prefix for RDFa 1.1
pfx_lc = (@version == :"rdfa1.0" || prefix.nil?) ? prefix : prefix.downcase
if prefix
if uri_mappings.fetch(pfx_lc.to_sym, href) != href
add_warning(element, "Redefining prefix #{pfx_lc}: from <#{uri_mappings[pfx_lc]}> to <#{href}>", RDF::RDFA.PrefixRedefinition)
end
uri_mappings[pfx_lc.to_sym] = href
namespaces[pfx_lc] ||= href
prefix(pfx_lc, href)
add_info(element, "extract_mappings: #{prefix} => <#{href}>")
else
add_info(element, "extract_mappings: nil => <#{href}>")
namespaces[""] ||= href
end
end
# Set mappings from @prefix
# prefix is a whitespace separated list of prefix-name URI pairs of the form
# NCName ':' ' '+ xs:anyURI
mappings = element.attribute("prefix").to_s.strip.split(/\s+/)
while mappings.length > 0 do
prefix, uri = mappings.shift.downcase, mappings.shift
#puts "uri_mappings prefix #{prefix} <#{uri}>"
next unless prefix.match(/:$/)
prefix.chop!
unless prefix.match(NC_REGEXP)
add_error(element, "extract_mappings: Prefix #{prefix.inspect} does not match NCName production")
next
end
# A Conforming RDFa Processor must ignore any definition of a mapping for the '_' prefix.
next if prefix == "_"
pfx_index = prefix.to_s.empty? ? nil : prefix.to_s.to_sym
if uri_mappings.fetch(pfx_index, uri) != uri
add_warning(element, "Redefining prefix #{prefix}: from <#{uri_mappings[pfx_index]}> to <#{uri}>", RDF::RDFA.PrefixRedefinition)
end
uri_mappings[pfx_index] = uri
prefix(prefix, uri)
add_info(element, "extract_mappings: prefix #{prefix} => <#{uri}>")
end unless @version == :"rdfa1.0"
end
# The recursive helper function
def traverse(element, evaluation_context)
if element.nil?
add_error(element, "Can't parse nil element")
return nil
end
add_debug(element) { "ec: #{evaluation_context.inspect}" }
# local variables [7.5 Step 1]
recurse = true
skip = false
new_subject = nil
typed_resource = nil
current_object_resource = nil
uri_mappings = evaluation_context.uri_mappings.clone
namespaces = evaluation_context.namespaces.clone
incomplete_triples = []
language = evaluation_context.language
term_mappings = evaluation_context.term_mappings.clone
default_vocabulary = evaluation_context.default_vocabulary
list_mapping = evaluation_context.list_mapping
xml_base = element.base
base = xml_base.to_s if xml_base && ![:xhtml1, :html4, :html5].include?(@host_language)
add_debug(element) {"base: #{base.inspect}"} if base
base ||= evaluation_context.base
# Pull out the attributes needed for the skip test.
attrs = {}
%w(
about
content
datatype
datetime
href
id
inlist
property
rel
resource
rev
role
src
typeof
value
vocab
).each do |a|
attrs[a.to_sym] = element.attributes[a].to_s.strip if element.attributes[a]
end
add_debug(element) {"attrs " + attrs.inspect} unless attrs.empty?
# If @property and @rel/@rev are on the same elements, the non-CURIE and non-URI @rel/@rev values are ignored. If, after this, the value of @rel/@rev becomes empty, then the then the processor must act as if the attribute is not present.
if attrs.has_key?(:property) && @version == :"rdfa1.1" && (@host_language == :html5 || @host_language == :xhtml5 || @host_language == :html4)
[:rel, :rev].each do |attr|
next unless attrs.has_key?(attr)
add_debug(element) {"Remove non-CURIE/non-IRI @#{attr} values from #{attrs[attr].inspect}"}
attrs[attr] = attrs[attr].
split(/\s+/).
select {|a| a.index(':')}.
join(" ")
add_debug(element) {" => #{attrs[attr].inspect}"}
attrs.delete(attr) if attrs[attr].empty?
end
end
# Default vocabulary [7.5 Step 2]
# Next the current element is examined for any change to the default vocabulary via @vocab.
# If @vocab is present and contains a value, its value updates the local default vocabulary.
# If the value is empty, then the local default vocabulary must be reset to the Host Language defined default.
if attrs[:vocab]
default_vocabulary = if attrs[:vocab].empty?
# Set default_vocabulary to host language default
add_debug(element) {
"[Step 3] reset default_vocaulary to #{@host_defaults.fetch(:vocabulary, nil).inspect}"
}
@host_defaults.fetch(:vocabulary, nil)
else
# Generate a triple indicating that the vocabulary is used
add_triple(element, base, RDF::RDFA.usesVocabulary, uri(attrs[:vocab]))
uri(attrs[:vocab])
end
add_debug(element) {
"[Step 2] default_vocaulary: #{default_vocabulary.inspect}"
}
end
# Local term mappings [7.5 Step 3]
# Next, the current element is then examined for URI mapping s and these are added to the local list of URI mappings.
# Note that a URI mapping will simply overwrite any current mapping in the list that has the same name
extract_mappings(element, uri_mappings, namespaces)
# Language information [7.5 Step 4]
language = element.language || language
language = nil if language.to_s.empty?
add_debug(element) {"HTML5 [3.2.3.3] lang: #{language.inspect}"} if language
# From HTML5, if the property attribute and the rel and/or rev attribute exists on the same element, the non-CURIE and non-URI rel and rev values are ignored. If, after this, the value of rel and/or rev becomes empty, then the processor must act as if the respective attribute is not present.
if [:html5, :xhtml5].include?(@host_language) && attrs[:property] && (attrs[:rel] || attrs[:rev])
old_rel, old_rev = attrs[:rel], attrs[:rev]
if old_rel
attrs[:rel] = (attrs[:rel]).split(/\s+/m).select {|r| !r.index(':').nil?}.join(" ")
attrs.delete(:rel) if attrs[:rel].empty?
add_debug(element) {"HTML5: @rel was #{old_rel}, now #{attrs[:rel]}"}
end
if old_rev
attrs[:rev] = (attrs[:rev]).split(/\s+/m).select {|r| !r.index(':').nil?}.join(" ")
attrs.delete(:rev) if attrs[:rev].empty?
add_debug(element) {"HTML5: @rev was #{old_rev}, now #{attrs[:rev]}"}
end
end
# rels and revs
rels = process_uris(element, attrs[:rel], evaluation_context, base,
:uri_mappings => uri_mappings,
:term_mappings => term_mappings,
:vocab => default_vocabulary,
:restrictions => TERMorCURIEorAbsIRI.fetch(@version, []))
revs = process_uris(element, attrs[:rev], evaluation_context, base,
:uri_mappings => uri_mappings,
:term_mappings => term_mappings,
:vocab => default_vocabulary,
:restrictions => TERMorCURIEorAbsIRI.fetch(@version, []))
add_debug(element) do
"rels: #{rels.join(" ")}, revs: #{revs.join(" ")}"
end unless (rels + revs).empty?
if !(attrs[:rel] || attrs[:rev])
# Establishing a new subject if no rel/rev [7.5 Step 5]
if @version == :"rdfa1.0"
new_subject = if attrs[:about]
process_uri(element, attrs[:about], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, []))
elsif attrs[:resource]
process_uri(element, attrs[:resource], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, []))
elsif attrs[:href] || attrs[:src]
process_uri(element, (attrs[:href] || attrs[:src]), evaluation_context, base, :restrictions => [:uri])
end
# If no URI is provided by a resource attribute, then the first match from the following rules
# will apply:
new_subject ||= if [:xhtml1, :xhtml5, :html4, :html5].include?(@host_language) && element.name =~ /^(head|body)$/
# From XHTML+RDFa 1.1:
# if no URI is provided, then first check to see if the element is the head or body element.
# If it is, then act as if the new subject is set to the parent object.
uri(base)
elsif element == root && base
# if the element is the root element of the document, then act as if there is an empty @about present,
# and process it according to the rule for @about, above;
uri(base)
elsif attrs[:typeof]
RDF::Node.new
else
# otherwise, if parent object is present, new subject is set to the value of parent object.
skip = true unless attrs[:property]
evaluation_context.parent_object
end
# if the @typeof attribute is present, set typed resource to new subject
typed_resource = new_subject if attrs[:typeof]
else # rdfa1.1
# If the current element contains no @rel or @rev attribute, then the next step is to establish a value for new subject.
# This step has two possible alternatives.
# 1. If the current element contains the @property attribute, but does not contain the @content or the @datatype attributes, then
if attrs[:property] && !(attrs[:content] || attrs[:datatype])
# new subject is set to the resource obtained from the first match from the following rule:
new_subject ||= if attrs[:about]
# by using the resource from @about, if present, obtained according to the section on CURIE and IRI Processing;
process_uri(element, attrs[:about], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, []))
elsif [:xhtml1, :xhtml5, :html4, :html5].include?(@host_language) && element.name =~ /^(head|body)$/
# From XHTML+RDFa 1.1:
# if no URI is provided, then first check to see if the element is the head or body element. If it is, then act as if the new subject is set to the parent object.
evaluation_context.parent_object
elsif element == root && base
# otherwise, if the element is the root element of the document, then act as if there is an empty @about present, and process it according to the rule for @about, above;
uri(base)
end
# if the @typeof attribute is present, set typed resource to new subject
typed_resource = new_subject if attrs[:typeof]
# otherwise, if parent object is present, new subject is set to the value of parent object.
new_subject ||= evaluation_context.parent_object
# If @typeof is present then typed resource is set to the resource obtained from the first match from the following rules:
# by using the resource from @about, if present, obtained according to the section on CURIE and IRI Processing; (done above)
# otherwise, if the element is the root element of the document, then act as if there is an empty @about present and process it according to the previous rule; (done above)
if attrs[:typeof] && typed_resource.nil?
# otherwise,
typed_resource ||= if attrs[:resource]
# by using the resource from @resource, if present, obtained according to the section on CURIE and IRI Processing;
process_uri(element, attrs[:resource], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, []))
elsif attrs[:href] || attrs[:src]
# otherwise, by using the IRI from @href, if present, obtained according to the section on CURIE and IRI Processing;
# otherwise, by using the IRI from @src, if present, obtained according to the section on CURIE and IRI Processing;
process_uri(element, (attrs[:href] || attrs[:src]), evaluation_context, base,
:restrictions => [:uri])
else
# otherwise, the value of typed resource is set to a newly created bnode.
RDF::Node.new
end
# The value of the current object resource is set to the value of typed resource.
current_object_resource = typed_resource
end
else
# otherwise (ie, the @content or @datatype)
new_subject =
process_uri(element, (attrs[:about] || attrs[:resource]),
evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, [])) if attrs[:about] ||attrs[:resource]
new_subject ||=
process_uri(element, (attrs[:href] || attrs[:src]), evaluation_context, base,
:restrictions => [:uri]) if attrs[:href] || attrs[:src]
# If no URI is provided by a resource attribute, then the first match from the following rules
# will apply:
new_subject ||= if [:xhtml1, :xhtml5, :html4, :html5].include?(@host_language) && element.name =~ /^(head|body)$/
# From XHTML+RDFa 1.1:
# if no URI is provided, then first check to see if the element is the head or body element.
# If it is, then act as if the new subject is set to the parent object.
evaluation_context.parent_object
elsif element == root
# if the element is the root element of the document, then act as if there is an empty @about present,
# and process it according to the rule for @about, above;
uri(base)
elsif attrs[:typeof]
RDF::Node.new
else
# otherwise, if parent object is present, new subject is set to the value of parent object.
# Additionally, if @property is not present then the skip element flag is set to 'true'.
skip = true unless attrs[:property]
evaluation_context.parent_object
end
# If @typeof is present then typed resource is set to the resource obtained from the first match from the following rules:
typed_resource = new_subject if attrs[:typeof]
end
end
add_debug(element) {
"[Step 5] new_subject: #{new_subject.to_ntriples rescue 'nil'}, " +
"typed_resource: #{typed_resource.to_ntriples rescue 'nil'}, " +
"current_object_resource: #{current_object_resource.to_ntriples rescue 'nil'}, " +
"skip = #{skip}"
}
else
# [7.5 Step 6]
# If the current element does contain a @rel or @rev attribute, then the next step is to
# establish both a value for new subject and a value for current object resource:
new_subject = process_uri(element, attrs[:about], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, []))
new_subject ||= process_uri(element, attrs[:src], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => [:uri]) if @version == :"rdfa1.0"
# if the @typeof attribute is present, set typed resource to new subject
typed_resource = new_subject if attrs[:typeof]
# If no URI is provided then the first match from the following rules will apply
new_subject ||= if element == root && base
uri(base)
elsif [:xhtml1, :xhtml5, :html4, :html5].include?(@host_language) && element.name =~ /^(head|body)$/
# From XHTML+RDFa 1.1:
# if no URI is provided, then first check to see if the element is the head or body element.
# If it is, then act as if the new subject is set to the parent object.
evaluation_context.parent_object
elsif attrs[:typeof] && @version == :"rdfa1.0"
RDF::Node.new
else
# if it's null, it's null and nothing changes
evaluation_context.parent_object
# no skip flag set this time
end
# Then the current object resource is set to the URI obtained from the first match from the following rules:
current_object_resource = process_uri(element, attrs[:resource], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, [])) if attrs[:resource]
current_object_resource ||= process_uri(element, attrs[:href], evaluation_context, base,
:restrictions => [:uri]) if attrs[:href]
current_object_resource ||= process_uri(element, attrs[:src], evaluation_context, base,
:restrictions => [:uri]) if attrs[:src] && @version != :"rdfa1.0"
current_object_resource ||= RDF::Node.new if attrs[:typeof] && !attrs[:about] && @version != :"rdfa1.0"
# and also set the value typed resource to this bnode
if attrs[:typeof]
if @version == :"rdfa1.0"
typed_resource = new_subject
else
typed_resource = current_object_resource if !attrs[:about]
end
end
add_debug(element) {
"[Step 6] new_subject: #{new_subject}, " +
"current_object_resource = #{current_object_resource.nil? ? 'nil' : current_object_resource} " +
"typed_resource: #{typed_resource.to_ntriples rescue 'nil'}, "
}
end
# [Step 7] If in any of the previous steps a typed resource was set to a non-null value, it is now used to provide a subject for type values;
if typed_resource
# Typeof is TERMorCURIEorAbsIRIs
types = process_uris(element, attrs[:typeof], evaluation_context, base,
:uri_mappings => uri_mappings,
:term_mappings => term_mappings,
:vocab => default_vocabulary,
:restrictions => TERMorCURIEorAbsIRI.fetch(@version, []))
add_debug(element, "[Step 7] typeof: #{attrs[:typeof]}")
types.each do |one_type|
add_triple(element, typed_resource, RDF["type"], one_type)
end
end
# Create new List mapping [step 8]
#
# If in any of the previous steps a new subject was set to a non-null value different from the parent object;
# The list mapping taken from the evaluation context is set to a new, empty mapping.
if (new_subject && (new_subject != evaluation_context.parent_subject || list_mapping.nil?))
list_mapping = {}
add_debug(element) do
"[Step 8]: create new list mapping(#{list_mapping.object_id}) " +
"ns: #{new_subject.to_ntriples}, " +
"ps: #{evaluation_context.parent_subject.to_ntriples rescue 'nil'}"
end
end
# Generate triples with given object [Step 9]
#
# If in any of the previous steps a current object resource was set to a non-null value, it is now used to generate triples and add entries to the local list mapping:
if new_subject && current_object_resource && (attrs[:rel] || attrs[:rev])
add_debug(element) {"[Step 9] rels: #{rels.inspect} revs: #{revs.inspect}"}
rels.each do |r|
if attrs[:inlist]
# If the current list mapping does not contain a list associated with this IRI,
# instantiate a new list
unless list_mapping[r]
list_mapping[r] = RDF::List.new
add_debug(element) {"list(#{r}): create #{list_mapping[r].inspect}"}
end
add_debug(element) {"[Step 9] add #{current_object_resource.to_ntriples} to #{r} #{list_mapping[r].inspect}"}
list_mapping[r] << current_object_resource
else
# Predicates for the current object resource can be set by using one or both of the @rel and the @rev attributes but, in case of the @rel attribute, only if the @inlist is not present:
add_triple(element, new_subject, r, current_object_resource)
end
end
revs.each do |r|
add_triple(element, current_object_resource, r, new_subject)
end
elsif attrs[:rel] || attrs[:rev]
# Incomplete triples and bnode creation [Step 10]
add_debug(element) {"[Step 10] incompletes: rels: #{rels}, revs: #{revs}"}
current_object_resource = RDF::Node.new
# predicate: full IRI
# direction: forward/reverse
# lists: Save into list, don't generate triple
rels.each do |r|
if attrs[:inlist]
# If the current list mapping does not contain a list associated with this IRI,
# instantiate a new list
unless list_mapping[r]
list_mapping[r] = RDF::List.new
add_debug(element) {"[Step 10] list(#{r}): create #{list_mapping[r].inspect}"}
end
incomplete_triples << {:list => list_mapping[r], :direction => :none}
else
incomplete_triples << {:predicate => r, :direction => :forward}
end
end
revs.each do |r|
incomplete_triples << {:predicate => r, :direction => :reverse}
end
end
# Establish current object literal [Step 11]
#
# If the current element has a @inlist attribute, add the property to the
# list associated with that property, creating a new list if necessary.
if attrs[:property]
properties = process_uris(element, attrs[:property], evaluation_context, base,
:uri_mappings => uri_mappings,
:term_mappings => term_mappings,
:vocab => default_vocabulary,
:restrictions => TERMorCURIEorAbsIRI.fetch(@version, []))
properties.reject! do |p|
if p.is_a?(RDF::URI)
false
else
add_warning(element, "[Step 11] predicate #{p.to_ntriples} must be a URI")
true
end
end
datatype = process_uri(element, attrs[:datatype], evaluation_context, base,
:uri_mappings => uri_mappings,
:term_mappings => term_mappings,
:vocab => default_vocabulary,
:restrictions => TERMorCURIEorAbsIRI.fetch(@version, [])) unless attrs[:datatype].to_s.empty?
begin
current_property_value = case
when datatype && ![RDF.XMLLiteral, RDF.HTML].include?(datatype)
# typed literal
add_debug(element, "[Step 11] typed literal (#{datatype})")
RDF::Literal.new(attrs[:datetime] || attrs[:value] || attrs[:content] || element.inner_text.to_s, :datatype => datatype, :language => language, :validate => validate?, :canonicalize => canonicalize?)
when @version == :"rdfa1.1"
case
when datatype == RDF.XMLLiteral
# XML Literal
add_debug(element) {"[Step 11] XML Literal: #{element.inner_html}"}
# In order to maintain maximum portability of this literal, any children of the current node that are
# elements must have the current in scope XML namespace declarations (if any) declared on the
# serialized element using their respective attributes. Since the child element node could also
# declare new XML namespaces, the RDFa Processor must be careful to merge these together when
# generating the serialized element definition. For avoidance of doubt, any re-declarations on the
# child node must take precedence over declarations that were active on the current node.
begin
c14nxl = element.children.c14nxl(
:library => @library,
:language => language,
:namespaces => {nil => XHTML}.merge(namespaces))
RDF::Literal.new(c14nxl,
:library => @library,
:datatype => RDF.XMLLiteral,
:validate => validate?,
:canonicalize => canonicalize?)
rescue ArgumentError => e
add_error(element, e.message)
end
when datatype == RDF.HTML
# HTML Literal
add_debug(element) {"[Step 11] HTML Literal: #{element.inner_html}"}
# Just like XMLLiteral, but without the c14nxl
begin
RDF::Literal.new(element.children.to_html,
:library => @library,
:datatype => RDF.HTML,
:validate => validate?,
:canonicalize => canonicalize?)
rescue ArgumentError => e
add_error(element, e.message)
end
when attrs[:datatype]
# otherwise, as a plain literal if @datatype is present but has an empty value.
# The actual literal is either the value of @content (if present) or a string created by
# concatenating the value of all descendant text nodes, of the current element in turn.
# typed literal
add_debug(element, "[Step 11] plain plain (#{datatype})")
RDF::Literal.new(attrs[:content] || element.inner_text.to_s, :language => language, :validate => validate?, :canonicalize => canonicalize?)
when attrs[:content]
# plain literal
add_debug(element, "[Step 11] plain literal (content)")
RDF::Literal.new(attrs[:content], :language => language, :validate => validate?, :canonicalize => canonicalize?)
when element.name == 'time'
# HTML5 support
# Lexically scan value and assign appropriate type, otherwise, leave untyped
v = (attrs[:datetime] || element.inner_text).to_s
datatype = %w(Date Time DateTime Year YearMonth Duration).map {|t| RDF::Literal.const_get(t)}.detect do |dt|
v.match(dt::GRAMMAR)
end || RDF::Literal
add_debug(element) {"[Step 11] <time> literal: #{datatype} #{v.inspect}"}
datatype.new(v, :language => language)
when (attrs[:resource] || attrs[:href] || attrs[:src]) &&
!(attrs[:rel] || attrs[:rev]) &&
@version != :"rdfa1.0"
add_debug(element, "[Step 11] resource (resource|href|src)")
res = process_uri(element, attrs[:resource], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, [])) if attrs[:resource]
res ||= process_uri(element, (attrs[:href] || attrs[:src]), evaluation_context, base, :restrictions => [:uri])
when typed_resource && !attrs[:about] && @version != :"rdfa1.0"
add_debug(element, "[Step 11] typed_resource")
typed_resource
else
# plain literal
add_debug(element, "[Step 11] plain literal (inner text)")
RDF::Literal.new(element.inner_text.to_s, :language => language, :validate => validate?, :canonicalize => canonicalize?)
end
else # rdfa1.0
if element.text_content? || (element.children.length == 0) || attrs[:datatype] == ""
# plain literal
add_debug(element, "[Step 11 (1.0)] plain literal")
RDF::Literal.new(attrs[:content] || element.inner_text.to_s, :language => language, :validate => validate?, :canonicalize => canonicalize?)
elsif !element.text_content? and (datatype == nil or datatype == RDF.XMLLiteral)
# XML Literal
add_debug(element) {"[Step 11 (1.0)] XML Literal: #{element.inner_html}"}
recurse = false
c14nxl = element.children.c14nxl(
:library => @library,
:language => language,
:namespaces => {nil => XHTML}.merge(namespaces))
RDF::Literal.new(c14nxl,
:library => @library,
:datatype => RDF.XMLLiteral,
:validate => validate?,
:canonicalize => canonicalize?)
end
end
rescue ArgumentError => e
add_error(element, e.message)
end
# add each property
properties.each do |p|
# Lists: If element has an @inlist attribute, add the value to a list
if attrs[:inlist]
# If the current list mapping does not contain a list associated with this IRI,
# instantiate a new list
unless list_mapping[p]
list_mapping[p] = RDF::List.new
add_debug(element) {"[Step 11] lists(#{p}): create #{list_mapping[p].inspect}"}
end
add_debug(element) {"[Step 11] add #{current_property_value.to_ntriples} to #{p.to_ntriples} #{list_mapping[p].inspect}"}
list_mapping[p] << current_property_value
elsif new_subject
add_triple(element, new_subject, p, current_property_value)
end
end
end
if !skip and new_subject && !evaluation_context.incomplete_triples.empty?
# Complete the incomplete triples from the evaluation context [Step 12]
add_debug(element) do
"[Step 12] complete incomplete triples: " +
"new_subject=#{new_subject.to_ntriples}, " +
"completes=#{evaluation_context.incomplete_triples.inspect}"
end
evaluation_context.incomplete_triples.each do |trip|
case trip[:direction]
when :none
add_debug(element) {"[Step 12] add #{new_subject.to_ntriples} to #{trip[:list].inspect}"}
trip[:list] << new_subject
when :forward
add_triple(element, evaluation_context.parent_subject, trip[:predicate], new_subject)
when :reverse
add_triple(element, new_subject, trip[:predicate], evaluation_context.parent_subject)
end
end
end
# Create a new evaluation context and proceed recursively [Step 13]
if recurse
if skip
if language == evaluation_context.language &&
uri_mappings == evaluation_context.uri_mappings &&
term_mappings == evaluation_context.term_mappings &&
default_vocabulary == evaluation_context.default_vocabulary &&
base == evaluation_context.base &&
list_mapping == evaluation_context.list_mapping
new_ec = evaluation_context
add_debug(element, "[Step 13] skip: reused ec")
else
new_ec = evaluation_context.clone
new_ec.base = base
new_ec.language = language
new_ec.uri_mappings = uri_mappings
new_ec.namespaces = namespaces
new_ec.term_mappings = term_mappings
new_ec.default_vocabulary = default_vocabulary
new_ec.list_mapping = list_mapping
add_debug(element, "[Step 13] skip: cloned ec")
end
else
# create a new evaluation context
new_ec = EvaluationContext.new(base, @host_defaults)
new_ec.parent_subject = new_subject || evaluation_context.parent_subject
new_ec.parent_object = current_object_resource || new_subject || evaluation_context.parent_subject
new_ec.uri_mappings = uri_mappings
new_ec.namespaces = namespaces
new_ec.incomplete_triples = incomplete_triples
new_ec.language = language
new_ec.term_mappings = term_mappings
new_ec.default_vocabulary = default_vocabulary
new_ec.list_mapping = list_mapping
add_debug(element, "[Step 13] new ec")
end
element.children.each do |child|
# recurse only if it's an element
traverse(child, new_ec) if child.element?
end
# Step 14: after traversing through child elements, for each list associated with
# a property
(list_mapping || {}).each do |p, l|
# if that list is different from the evaluation context
ec_list = evaluation_context.list_mapping[p] if evaluation_context.list_mapping
add_debug(element) {"[Step 14] time to create #{l.inspect}? #{(ec_list != l).inspect}"}
if ec_list != l
add_debug(element) {"[Step 14] list(#{p}) create #{l.inspect}"}
# Generate an rdf:List with the elements of that list.
l.each_statement do |st|
add_triple(element, st.subject, st.predicate, st.object) unless st.object == RDF.List
end
# Generate a triple relating new_subject, property and the list BNode,
# or rdf:nil if the list is empty.
if l.empty?
add_triple(element, new_subject, p, RDF.nil)
else
add_triple(element, new_subject, p, l.subject)
end
end
end
# Role processing
# @id is used as subject, bnode otherwise.
# Predicate is xhv:role
# Objects are TERMorCURIEorAbsIRIs.
# Act as if the default vocabulary is XHV
if attrs[:role]
subject = attrs[:id] ? uri(base, "##{attrs[:id]}") : RDF::Node.new
roles = process_uris(element, attrs[:role], evaluation_context, base,
:uri_mappings => uri_mappings,
:term_mappings => term_mappings,
:vocab => RDF::XHV.to_s,
:restrictions => TERMorCURIEorAbsIRI.fetch(@version, []))
add_debug(element) {"role: about: #{subject.to_ntriples}, roles: #{roles.map(&:to_ntriples).inspect}"}
roles.each do |r|
add_triple(element, subject, RDF::XHV.role, r)
end
end
end
end
# space-separated TERMorCURIEorAbsIRI or SafeCURIEorCURIEorIRI
def process_uris(element, value, evaluation_context, base, options)
return [] if value.to_s.empty?
add_debug(element) {"process_uris: #{value}"}
value.to_s.split(/\s+/).map {|v| process_uri(element, v, evaluation_context, base, options)}.compact
end
def process_uri(element, value, evaluation_context, base, options = {})
return if value.nil?
restrictions = options[:restrictions]
add_debug(element) {"process_uri: #{value}, restrictions = #{restrictions.inspect}"}
options = {:uri_mappings => {}}.merge(options)
if !options[:term_mappings] && options[:uri_mappings] && restrictions.include?(:safe_curie) && value.to_s.match(/^\[(.*)\]$/)
# SafeCURIEorCURIEorIRI
# When the value is surrounded by square brackets, then the content within the brackets is
# evaluated as a CURIE according to the CURIE Syntax definition. If it is not a valid CURIE, the
# value must be ignored.
uri = curie_to_resource_or_bnode(element, $1, options[:uri_mappings], evaluation_context.parent_subject, restrictions)
if uri
add_debug(element) {"process_uri: #{value} => safeCURIE => <#{uri}>"}
else
add_warning(element, "#{value} not matched as a safeCURIE", RDF::RDFA.UnresolvedCURIE)
end
uri
elsif options[:term_mappings] && TERM_REGEXP.match(value.to_s) && restrictions.include?(:term)
# TERMorCURIEorAbsIRI
# If the value is an NCName, then it is evaluated as a term according to General Use of Terms in
# Attributes. Note that this step may mean that the value is to be ignored.
uri = process_term(element, value.to_s, options)
add_debug(element) {"process_uri: #{value} => term => <#{uri}>"}
uri
else
# SafeCURIEorCURIEorIRI or TERMorCURIEorAbsIRI
# Otherwise, the value is evaluated as a CURIE.
# If it is a valid CURIE, the resulting URI is used; otherwise, the value will be processed as a URI.
uri = curie_to_resource_or_bnode(element, value, options[:uri_mappings], evaluation_context.parent_subject, restrictions)
if uri
add_debug(element) {"process_uri: #{value} => CURIE => <#{uri}>"}
elsif @version == :"rdfa1.0" && value.to_s.match(/^xml/i)
# Special case to not allow anything starting with XML to be treated as a URI
elsif restrictions.include?(:absuri) || restrictions.include?(:uri)
begin
# AbsURI does not use xml:base
if restrictions.include?(:absuri)
uri = uri(value)
unless uri.absolute?
uri = nil
add_warning(element, "Malformed IRI #{uri.inspect}")
end
else
uri = uri(base, Addressable::URI.parse(value))
end
rescue Addressable::URI::InvalidURIError => e
add_warning(element, "Malformed IRI #{value}")
rescue RDF::ReaderError => e
add_debug(element, e.message)
if value.to_s =~ /^\(^\w\):/
add_warning(element, "Undefined prefix #{$1}")
else
add_warning(element, "Relative URI #{value}")
end
end
add_debug(element) {"process_uri: #{value} => URI => <#{uri}>"}
end
uri
end
end
# [7.4.3] General Use of Terms in Attributes
def process_term(element, value, options)
if options[:vocab]
# If there is a local default vocabulary, the IRI is obtained by concatenating that value and the term
return uri(options[:vocab] + value)
elsif options[:term_mappings].is_a?(Hash)
# If the term is in the local term mappings, use the associated URI (case sensitive).
return uri(options[:term_mappings][value.to_s.to_sym]) if options[:term_mappings].has_key?(value.to_s.to_sym)
# Otherwise, check for case-insensitive match
options[:term_mappings].each_pair do |term, uri|
return uri(uri) if term.to_s.downcase == value.to_s.downcase
end
end
# Finally, if there is no local default vocabulary, the term has no associated URI and must be ignored.
add_warning(element, "Term #{value} is not defined", RDF::RDFA.UnresolvedTerm)
nil
end
# From section 6. CURIE Syntax Definition
def curie_to_resource_or_bnode(element, curie, uri_mappings, subject, restrictions)
# URI mappings for CURIEs default to XHV, rather than the default doc namespace
prefix, reference = curie.to_s.split(":", 2)
# consider the bnode situation
if prefix == "_" && restrictions.include?(:bnode)
# we force a non-nil name, otherwise it generates a new name
# As a special case, _: is also a valid reference for one specific bnode.
bnode(reference)
elsif curie.to_s.match(/^:/)
# Default prefix
RDF::XHV[reference.to_s]
elsif !curie.to_s.match(/:/)
# No prefix, undefined (in this context, it is evaluated as a term elsewhere)
nil
else
# Prefixes always downcased
prefix = prefix.to_s.downcase unless @version == :"rdfa1.0"
add_debug(element) do
"curie_to_resource_or_bnode check for #{prefix.to_s.to_sym.inspect} in #{uri_mappings.inspect}"
end
ns = uri_mappings[prefix.to_s.to_sym]
if ns
uri(ns + reference.to_s)
else
add_debug(element) {"curie_to_resource_or_bnode No namespace mapping for #{prefix.inspect}"}
nil
end
end
end
def uri(value, append = nil)
value = RDF::URI.new(value)
value = value.join(append) if append
value.validate! if validate?
value.canonicalize! if canonicalize?
value = RDF::URI.intern(value) if intern?
value
end
end
end
Fix term-matching regular expression.
begin
raise LoadError, "not with java" if RUBY_PLATFORM == "java"
require 'nokogiri'
rescue LoadError => e
:rexml
end
require 'rdf/ntriples'
require 'rdf/xsd'
module RDF::RDFa
##
# An RDFa parser in Ruby
#
# This class supports [Nokogiri][] for HTML
# processing, and will automatically select the most performant
# implementation (Nokogiri or LibXML) that is available. If need be, you
# can explicitly override the used implementation by passing in a
# `:library` option to `Reader.new` or `Reader.open`.
#
# [Nokogiri]: http://nokogiri.org/
#
# Based on processing rules described here:
# @see http://www.w3.org/TR/rdfa-syntax/#s_model RDFa 1.0
# @see http://www.w3.org/TR/2012/REC-rdfa-core-20120607/
# @see http://www.w3.org/TR/2012/CR-xhtml-rdfa-20120313/
# @see http://dev.w3.org/html5/rdfa/
#
# @author [Gregg Kellogg](http://kellogg-assoc.com/)
class Reader < RDF::Reader
format Format
include Expansion
XHTML = "http://www.w3.org/1999/xhtml"
# Content model for @about and @resource. In RDFa 1.0, this was URIorSafeCURIE
SafeCURIEorCURIEorIRI = {
:"rdfa1.0" => [:safe_curie, :uri, :bnode],
:"rdfa1.1" => [:safe_curie, :curie, :uri, :bnode],
}
# Content model for @datatype. In RDFa 1.0, this was CURIE
# Also plural TERMorCURIEorAbsIRIs, content model for @rel, @rev, @property and @typeof
TERMorCURIEorAbsIRI = {
:"rdfa1.0" => [:term, :curie],
:"rdfa1.1" => [:term, :curie, :absuri],
}
# This expression matches an NCName as defined in
# [XML-NAMES](http://www.w3.org/TR/2009/REC-xml-names-20091208/#NT-NCName)
#
# @see http://www.w3.org/TR/2009/REC-xml-names-20091208/#NT-NCName
NC_REGEXP = Regexp.new(
%{^
( [a-zA-Z_]
| \\\\u[0-9a-fA-F]{4}
)
( [0-9a-zA-Z_\.-/]
| \\\\u([0-9a-fA-F]{4})
)*
$},
Regexp::EXTENDED)
# This expression matches an term as defined in
# [RDFA-CORE](http://www.w3.org/TR/2012/REC-rdfa-core-20120607/#s_terms)
#
# For the avoidance of doubt, this definition means a 'term'
# in RDFa is an XML NCName that also permits slash as a non-leading character.
# @see http://www.w3.org/TR/2012/REC-rdfa-core-20120607/#s_terms
TERM_REGEXP = Regexp.new(
%{^
(?!\\\\u0301) # ́ is a non-spacing acute accent.
# It is legal within an XML Name, but not as the first character.
( [a-zA-Z_]
| \\\\u[0-9a-fA-F]{4}
)
( [-0-9a-zA-Z_\.\/]
| \\\\u([0-9a-fA-F]{4})
)*
$},
Regexp::EXTENDED)
# Host language
# @!attribute [r] host_language
# @return [:xml, :xhtml1, :xhtml5, :html4, :html5, :svg]
attr_reader :host_language
# Version
# @!attribute [r] version
# @return [:"rdfa1.0", :"rdfa1.1"]
attr_reader :version
# Repository used for collecting triples.
# @!attribute [r] repository
# @return [RDF::Repository]
attr_reader :repository
# Returns the XML implementation module for this reader instance.
#
# @!attribute [rw] implementation
# @return [Module]
attr_reader :implementation
# The Recursive Baggage
# @private
class EvaluationContext # :nodoc:
##
# The base.
#
# This will usually be the URL of the document being processed,
# but it could be some other URL, set by some other mechanism,
# such as the (X)HTML base element. The important thing is that it establishes
# a URL against which relative paths can be resolved.
#
# @!attribute [rw] base
# @return [RDF::URI]
attr_accessor :base
##
# The parent subject.
#
# The initial value will be the same as the initial value of base,
# but it will usually change during the course of processing.
#
# @!attribute [rw] parent_subject
# @return [RDF::URI]
attr_accessor :parent_subject
##
# The parent object.
#
# In some situations the object of a statement becomes the subject of any nested statements,
# and this property is used to convey this value.
# Note that this value may be a bnode, since in some situations a number of nested statements
# are grouped together on one bnode.
# This means that the bnode must be set in the containing statement and passed down,
# and this property is used to convey this value.
#
# @!attribute [rw] parent_object
# @return [RDF::URI]
attr_accessor :parent_object
##
# A list of current, in-scope URI mappings.
#
# @!attribute [rw] uri_mappings
# @return [Hash{Symbol => String}]
attr_accessor :uri_mappings
##
# A list of current, in-scope Namespaces. This is the subset of uri_mappings
# which are defined using xmlns.
#
# @!attribute [rw] namespaces
# @return [Hash{String => Namespace}]
attr_accessor :namespaces
##
# A list of incomplete triples.
#
# A triple can be incomplete when no object resource
# is provided alongside a predicate that requires a resource (i.e., @rel or @rev).
# The triples can be completed when a resource becomes available,
# which will be when the next subject is specified (part of the process called chaining).
#
# @!attribute [rw] incomplete_triples
# @return [Array<Array<RDF::URI, RDF::Resource>>]
attr_accessor :incomplete_triples
##
# The language. Note that there is no default language.
#
# @!attribute [rw] language
# @return [Symbol]
attr_accessor :language
##
# The term mappings, a list of terms and their associated URIs.
#
# This specification does not define an initial list.
# Host Languages may define an initial list.
# If a Host Language provides an initial list, it should do so via an RDFa Context document.
#
# @!attribute [rw] term_mappings
# @return [Hash{Symbol => RDF::URI}]
attr_accessor :term_mappings
##
# The default vocabulary
#
# A value to use as the prefix URI when a term is used.
# This specification does not define an initial setting for the default vocabulary.
# Host Languages may define an initial setting.
#
# @!attribute [rw] default_vocabulary
# @return [RDF::URI]
attr_accessor :default_vocabulary
##
# lists
#
# A hash associating lists with properties.
# @!attribute [rw] list_mapping
# @return [Hash{RDF::URI => Array<RDF::Resource>}]
attr_accessor :list_mapping
# @param [RDF::URI] base
# @param [Hash] host_defaults
# @option host_defaults [Hash{String => RDF::URI}] :term_mappings Hash of NCName => URI
# @option host_defaults [Hash{String => RDF::URI}] :vocabulary Hash of prefix => URI
def initialize(base, host_defaults)
# Initialize the evaluation context, [5.1]
@base = base
@parent_subject = @base
@parent_object = nil
@namespaces = {}
@incomplete_triples = []
@language = nil
@uri_mappings = host_defaults.fetch(:uri_mappings, {})
@term_mappings = host_defaults.fetch(:term_mappings, {})
@default_vocabulary = host_defaults.fetch(:vocabulary, nil)
end
# Copy this Evaluation Context
#
# @param [EvaluationContext] from
def initialize_copy(from)
# clone the evaluation context correctly
@uri_mappings = from.uri_mappings.clone
@incomplete_triples = from.incomplete_triples.clone
@namespaces = from.namespaces.clone
@list_mapping = from.list_mapping # Don't clone
end
def inspect
v = ['base', 'parent_subject', 'parent_object', 'language', 'default_vocabulary'].map do |a|
"#{a}=#{o = self.send(a); o.respond_to?(:to_ntriples) ? o.to_ntriples : o.inspect}"
end
v << "uri_mappings[#{uri_mappings.keys.length}]"
v << "incomplete_triples[#{incomplete_triples.length}]"
v << "term_mappings[#{term_mappings.keys.length}]"
v << "lists[#{list_mapping.keys.length}]" if list_mapping
v.join(", ")
end
end
##
# Initializes the RDFa reader instance.
#
# @param [IO, File, String] input
# the input stream to read
# @param [Hash{Symbol => Object}] options
# any additional options (see `RDF::Reader#initialize`)
# @option options [Symbol] :library
# One of :nokogiri or :rexml. If nil/unspecified uses :nokogiri if available, :rexml otherwise.
# @option options [Boolean] :vocab_expansion (false)
# whether to perform OWL2 expansion on the resulting graph
# @option options [Boolean] :reference_folding (true)
# whether to perform RDFa property copying on the resulting graph
# @option options [:xml, :xhtml1, :xhtml5, :html4, :html5, :svg] :host_language (:html5)
# Host Language
# @option options [:"rdfa1.0", :"rdfa1.1"] :version (:"rdfa1.1")
# Parser version information
# @option options [Proc] :processor_callback (nil)
# Callback used to provide processor graph triples.
# @option options [Array<Symbol>] :rdfagraph ([:output])
# Used to indicate if either or both of the :output or :processor graphs are output.
# Value is an array containing on or both of :output or :processor.
# @option options [Repository] :vocab_repository (nil)
# Repository to save loaded vocabularies.
# @option options [Array] :debug
# Array to place debug messages
# @return [reader]
# @yield [reader] `self`
# @yieldparam [RDF::Reader] reader
# @yieldreturn [void] ignored
# @raise [RDF::ReaderError] if _validate_
def initialize(input = $stdin, options = {}, &block)
super do
@debug = options[:debug]
@options = {:reference_folding => true}.merge(@options)
@repository = RDF::Repository.new
@options[:rdfagraph] = case @options[:rdfagraph]
when String, Symbol then @options[:rdfagraph].to_s.split(',').map(&:strip).map(&:to_sym)
when Array then @options[:rdfagraph].map {|o| o.to_s.to_sym}
else []
end.select {|o| [:output, :processor].include?(o)}
@options[:rdfagraph] << :output if @options[:rdfagraph].empty?
@library = case options[:library]
when nil
# Use Nokogiri when available, and REXML otherwise:
(defined?(::Nokogiri) && RUBY_PLATFORM != 'java') ? :nokogiri : :rexml
when :nokogiri, :rexml
options[:library]
else
raise ArgumentError.new("expected :rexml or :nokogiri, but got #{options[:library].inspect}")
end
require "rdf/rdfa/reader/#{@library}"
@implementation = case @library
when :nokogiri then Nokogiri
when :rexml then REXML
end
self.extend(@implementation)
detect_host_language_version(input, options)
add_info(@doc, "version = #{@version}, host_language = #{@host_language}, library = #{@library}, rdfagraph = #{@options[:rdfagraph].inspect}, expand = #{@options[:vocab_expansion]}")
begin
initialize_xml(input, options)
rescue
add_error(nil, "Malformed document: #{$!.message}")
end
add_error(nil, "Empty document") if root.nil?
add_error(nil, "Syntax errors:\n#{doc_errors}") if !doc_errors.empty?
# Section 4.2 RDFa Host Language Conformance
#
# The Host Language may require the automatic inclusion of one or more Initial Contexts
@host_defaults = {
:vocabulary => nil,
:uri_mappings => {},
:initial_contexts => [],
}
if @version == :"rdfa1.0"
# Add default term mappings
@host_defaults[:term_mappings] = %w(
alternate appendix bookmark cite chapter contents copyright first glossary help icon index
last license meta next p3pv1 prev role section stylesheet subsection start top up
).inject({}) { |hash, term| hash[term] = RDF::XHV[term]; hash }
end
case @host_language
when :xml, :svg
@host_defaults[:initial_contexts] = [XML_RDFA_CONTEXT]
when :xhtml1
@host_defaults[:initial_contexts] = [XML_RDFA_CONTEXT, XHTML_RDFA_CONTEXT]
when :xhtml5, :html4, :html5
@host_defaults[:initial_contexts] = [XML_RDFA_CONTEXT, HTML_RDFA_CONTEXT]
end
block.call(self) if block_given?
end
end
##
# Iterates the given block for each RDF statement in the input.
#
# Reads to graph and performs expansion if required.
#
# @yield [statement]
# @yieldparam [RDF::Statement] statement
# @return [void]
def each_statement(&block)
unless @processed || @root.nil?
# Add prefix definitions from host defaults
@host_defaults[:uri_mappings].each_pair do |prefix, value|
prefix(prefix, value)
end
# parse
parse_whole_document(@doc, RDF::URI(base_uri))
def extract_script(el, input, type, options, &block)
add_debug(el, "script element of type #{type}")
begin
# Formats don't exist unless they've been required
case type
when 'application/rdf+xml' then require 'rdf/rdfxml'
when 'text/ntriples' then require 'rdf/ntriples'
when 'text/turtle' then require 'text/turtle'
end
rescue
end
if reader = RDF::Reader.for(:content_type => type)
add_debug(el, "=> reader #{reader.to_sym}")
reader.new(input, options).each(&block)
end
end
# Look for Embedded Turtle and RDF/XML
unless @root.xpath("//rdf:RDF", "xmlns:rdf" => "http://www.w3.org/1999/02/22-rdf-syntax-ns#").empty?
extract_script(@root, @doc, "application/rdf+xml", @options) do |statement|
@repository << statement
end
end
# Look for Embedded scripts
@root.css("script[type]").each do |el|
type = el.attribute("type")
extract_script(el, el.inner_text, type, @options) do |statement|
@repository << statement
end
end
# Look for Embedded microdata
unless @root.xpath("//@itemscope").empty?
begin
require 'rdf/microdata'
add_debug(@doc, "process microdata")
@repository << RDF::Microdata::Reader.new(@doc, options)
rescue
add_debug(@doc, "microdata detected, not processed")
end
end
# Perform property copying
copy_properties(@repository) if @options[:reference_folding]
# Perform vocabulary expansion
expand(@repository) if @options[:vocab_expansion]
@processed = true
end
# Return statements in the default graph for
# statements in the associated named or default graph from the
# processed repository
@repository.each do |statement|
case statement.context
when nil
yield statement if @options[:rdfagraph].include?(:output)
when RDF::RDFA.ProcessorGraph
yield RDF::Statement.new(*statement.to_triple) if @options[:rdfagraph].include?(:processor)
end
end
end
##
# Iterates the given block for each RDF triple in the input.
#
# @yield [subject, predicate, object]
# @yieldparam [RDF::Resource] subject
# @yieldparam [RDF::URI] predicate
# @yieldparam [RDF::Value] object
# @return [void]
def each_triple(&block)
each_statement do |statement|
block.call(*statement.to_triple)
end
end
private
# Keep track of allocated BNodes
def bnode(value = nil)
@bnode_cache ||= {}
@bnode_cache[value.to_s] ||= RDF::Node.new(value)
end
# Figure out the document path, if it is an Element or Attribute
def node_path(node)
"<#{base_uri}>#{node.respond_to?(:display_path) ? node.display_path : node}"
end
# Add debug event to debug array, if specified
#
# @param [#display_path, #to_s] node XML Node or string for showing context
# @param [String] message
# @yieldreturn [String] appended to message, to allow for lazy-evaulation of message
def add_debug(node, message = "")
return unless ::RDF::RDFa.debug? || @debug
message = message + yield if block_given?
add_processor_message(node, message, RDF::RDFA.Info)
end
def add_info(node, message, process_class = RDF::RDFA.Info)
add_processor_message(node, message, process_class)
end
def add_warning(node, message, process_class = RDF::RDFA.Warning)
add_processor_message(node, message, process_class)
end
def add_error(node, message, process_class = RDF::RDFA.Error)
add_processor_message(node, message, process_class)
raise RDF::ReaderError, message if validate?
end
def add_processor_message(node, message, process_class)
puts "#{node_path(node)}: #{message}" if ::RDF::RDFa.debug?
@debug << "#{node_path(node)}: #{message}" if @debug.is_a?(Array)
if @options[:processor_callback] || @options[:rdfagraph].include?(:processor)
n = RDF::Node.new
processor_statements = [
RDF::Statement.new(n, RDF["type"], process_class, :context => RDF::RDFA.ProcessorGraph),
RDF::Statement.new(n, RDF::DC.description, message, :context => RDF::RDFA.ProcessorGraph),
RDF::Statement.new(n, RDF::DC.date, RDF::Literal::Date.new(DateTime.now), :context => RDF::RDFA.ProcessorGraph)
]
processor_statements << RDF::Statement.new(n, RDF::RDFA.context, base_uri, :context => RDF::RDFA.ProcessorGraph) if base_uri
if node.respond_to?(:path)
nc = RDF::Node.new
processor_statements += [
RDF::Statement.new(n, RDF::RDFA.context, nc, :context => RDF::RDFA.ProcessorGraph),
RDF::Statement.new(nc, RDF["type"], RDF::PTR.XPathPointer, :context => RDF::RDFA.ProcessorGraph),
RDF::Statement.new(nc, RDF::PTR.expression, node.path, :context => RDF::RDFA.ProcessorGraph)
]
end
@repository.insert(*processor_statements)
if cb = @options[:processor_callback]
processor_statements.each {|s| cb.call(s)}
end
end
end
##
# add a statement, object can be literal or URI or bnode
# Yields {RDF::Statement} to the saved callback
#
# @param [#display_path, #to_s] node XML Node or string for showing context
# @param [RDF::Resource] subject the subject of the statement
# @param [RDF::URI] predicate the predicate of the statement
# @param [RDF::Value] object the object of the statement
# @param [RDF::Value] context the context of the statement
# @raise [RDF::ReaderError] Checks parameter types and raises if they are incorrect if parsing mode is _validate_.
def add_triple(node, subject, predicate, object, context = nil)
statement = RDF::Statement.new(subject, predicate, object)
add_error(node, "statement #{RDF::NTriples.serialize(statement)} is invalid") unless statement.valid?
if subject && predicate && object # Basic sanity checking
add_info(node, "statement: #{RDF::NTriples.serialize(statement)}")
repository << statement
end
end
# Parsing an RDFa document (this is *not* the recursive method)
def parse_whole_document(doc, base)
base = doc_base(base)
if (base)
# Strip any fragment from base
base = base.to_s.split("#").first
base = uri(base)
add_debug("") {"parse_whole_doc: base='#{base}'"}
end
# initialize the evaluation context with the appropriate base
evaluation_context = EvaluationContext.new(base, @host_defaults)
if @version != :"rdfa1.0"
# Process default vocabularies
load_initial_contexts(@host_defaults[:initial_contexts]) do |which, value|
add_debug(root) { "parse_whole_document, #{which}: #{value.inspect}"}
case which
when :uri_mappings then evaluation_context.uri_mappings.merge!(value)
when :term_mappings then evaluation_context.term_mappings.merge!(value)
when :default_vocabulary then evaluation_context.default_vocabulary = value
end
end
end
traverse(root, evaluation_context)
add_debug("", "parse_whole_doc: traversal complete'")
end
# Parse and process URI mappings, Term mappings and a default vocabulary from @context
#
# Yields each mapping
def load_initial_contexts(initial_contexts)
initial_contexts.
map {|uri| uri(uri).normalize}.
each do |uri|
# Don't try to open ourselves!
if base_uri == uri
add_debug(root) {"load_initial_contexts: skip recursive context <#{uri}>"}
next
end
old_debug = RDF::RDFa.debug?
begin
add_info(root, "load_initial_contexts: load <#{uri}>")
RDF::RDFa.debug = false
context = Context.find(uri)
rescue Exception => e
RDF::RDFa.debug = old_debug
add_error(root, e.message)
raise # In case we're not in strict mode, we need to be sure processing stops
ensure
RDF::RDFa.debug = old_debug
end
# Add URI Mappings to prefixes
context.prefixes.each_pair do |prefix, value|
prefix(prefix, value)
end
yield :uri_mappings, context.prefixes unless context.prefixes.empty?
yield :term_mappings, context.terms unless context.terms.empty?
yield :default_vocabulary, context.vocabulary if context.vocabulary
end
end
# Extract the prefix mappings from an element
def extract_mappings(element, uri_mappings, namespaces)
# look for xmlns
# (note, this may be dependent on @host_language)
# Regardless of how the mapping is declared, the value to be mapped must be converted to lower case,
# and the URI is not processed in any way; in particular if it is a relative path it is
# not resolved against the current base.
ns_defs = {}
element.namespaces.each do |prefix, href|
prefix = nil if prefix == "xmlns"
add_debug("extract_mappings") { "ns: #{prefix}: #{href}"}
ns_defs[prefix] = href
end
# HTML parsing doesn't create namespace_definitions
if ns_defs.empty?
ns_defs = {}
element.attributes.each do |attr, href|
next unless attr =~ /^xmlns(?:\:(.+))?/
prefix = $1
add_debug("extract_mappings") { "ns(attr): #{prefix}: #{href}"}
ns_defs[prefix] = href.to_s
end
end
ns_defs.each do |prefix, href|
# A Conforming RDFa Processor must ignore any definition of a mapping for the '_' prefix.
next if prefix == "_"
# Downcase prefix for RDFa 1.1
pfx_lc = (@version == :"rdfa1.0" || prefix.nil?) ? prefix : prefix.downcase
if prefix
if uri_mappings.fetch(pfx_lc.to_sym, href) != href
add_warning(element, "Redefining prefix #{pfx_lc}: from <#{uri_mappings[pfx_lc]}> to <#{href}>", RDF::RDFA.PrefixRedefinition)
end
uri_mappings[pfx_lc.to_sym] = href
namespaces[pfx_lc] ||= href
prefix(pfx_lc, href)
add_info(element, "extract_mappings: #{prefix} => <#{href}>")
else
add_info(element, "extract_mappings: nil => <#{href}>")
namespaces[""] ||= href
end
end
# Set mappings from @prefix
# prefix is a whitespace separated list of prefix-name URI pairs of the form
# NCName ':' ' '+ xs:anyURI
mappings = element.attribute("prefix").to_s.strip.split(/\s+/)
while mappings.length > 0 do
prefix, uri = mappings.shift.downcase, mappings.shift
#puts "uri_mappings prefix #{prefix} <#{uri}>"
next unless prefix.match(/:$/)
prefix.chop!
unless prefix.match(NC_REGEXP)
add_error(element, "extract_mappings: Prefix #{prefix.inspect} does not match NCName production")
next
end
# A Conforming RDFa Processor must ignore any definition of a mapping for the '_' prefix.
next if prefix == "_"
pfx_index = prefix.to_s.empty? ? nil : prefix.to_s.to_sym
if uri_mappings.fetch(pfx_index, uri) != uri
add_warning(element, "Redefining prefix #{prefix}: from <#{uri_mappings[pfx_index]}> to <#{uri}>", RDF::RDFA.PrefixRedefinition)
end
uri_mappings[pfx_index] = uri
prefix(prefix, uri)
add_info(element, "extract_mappings: prefix #{prefix} => <#{uri}>")
end unless @version == :"rdfa1.0"
end
# The recursive helper function
def traverse(element, evaluation_context)
if element.nil?
add_error(element, "Can't parse nil element")
return nil
end
add_debug(element) { "ec: #{evaluation_context.inspect}" }
# local variables [7.5 Step 1]
recurse = true
skip = false
new_subject = nil
typed_resource = nil
current_object_resource = nil
uri_mappings = evaluation_context.uri_mappings.clone
namespaces = evaluation_context.namespaces.clone
incomplete_triples = []
language = evaluation_context.language
term_mappings = evaluation_context.term_mappings.clone
default_vocabulary = evaluation_context.default_vocabulary
list_mapping = evaluation_context.list_mapping
xml_base = element.base
base = xml_base.to_s if xml_base && ![:xhtml1, :html4, :html5].include?(@host_language)
add_debug(element) {"base: #{base.inspect}"} if base
base ||= evaluation_context.base
# Pull out the attributes needed for the skip test.
attrs = {}
%w(
about
content
datatype
datetime
href
id
inlist
property
rel
resource
rev
role
src
typeof
value
vocab
).each do |a|
attrs[a.to_sym] = element.attributes[a].to_s.strip if element.attributes[a]
end
add_debug(element) {"attrs " + attrs.inspect} unless attrs.empty?
# If @property and @rel/@rev are on the same elements, the non-CURIE and non-URI @rel/@rev values are ignored. If, after this, the value of @rel/@rev becomes empty, then the then the processor must act as if the attribute is not present.
if attrs.has_key?(:property) && @version == :"rdfa1.1" && (@host_language == :html5 || @host_language == :xhtml5 || @host_language == :html4)
[:rel, :rev].each do |attr|
next unless attrs.has_key?(attr)
add_debug(element) {"Remove non-CURIE/non-IRI @#{attr} values from #{attrs[attr].inspect}"}
attrs[attr] = attrs[attr].
split(/\s+/).
select {|a| a.index(':')}.
join(" ")
add_debug(element) {" => #{attrs[attr].inspect}"}
attrs.delete(attr) if attrs[attr].empty?
end
end
# Default vocabulary [7.5 Step 2]
# Next the current element is examined for any change to the default vocabulary via @vocab.
# If @vocab is present and contains a value, its value updates the local default vocabulary.
# If the value is empty, then the local default vocabulary must be reset to the Host Language defined default.
if attrs[:vocab]
default_vocabulary = if attrs[:vocab].empty?
# Set default_vocabulary to host language default
add_debug(element) {
"[Step 3] reset default_vocaulary to #{@host_defaults.fetch(:vocabulary, nil).inspect}"
}
@host_defaults.fetch(:vocabulary, nil)
else
# Generate a triple indicating that the vocabulary is used
add_triple(element, base, RDF::RDFA.usesVocabulary, uri(attrs[:vocab]))
uri(attrs[:vocab])
end
add_debug(element) {
"[Step 2] default_vocaulary: #{default_vocabulary.inspect}"
}
end
# Local term mappings [7.5 Step 3]
# Next, the current element is then examined for URI mapping s and these are added to the local list of URI mappings.
# Note that a URI mapping will simply overwrite any current mapping in the list that has the same name
extract_mappings(element, uri_mappings, namespaces)
# Language information [7.5 Step 4]
language = element.language || language
language = nil if language.to_s.empty?
add_debug(element) {"HTML5 [3.2.3.3] lang: #{language.inspect}"} if language
# From HTML5, if the property attribute and the rel and/or rev attribute exists on the same element, the non-CURIE and non-URI rel and rev values are ignored. If, after this, the value of rel and/or rev becomes empty, then the processor must act as if the respective attribute is not present.
if [:html5, :xhtml5].include?(@host_language) && attrs[:property] && (attrs[:rel] || attrs[:rev])
old_rel, old_rev = attrs[:rel], attrs[:rev]
if old_rel
attrs[:rel] = (attrs[:rel]).split(/\s+/m).select {|r| !r.index(':').nil?}.join(" ")
attrs.delete(:rel) if attrs[:rel].empty?
add_debug(element) {"HTML5: @rel was #{old_rel}, now #{attrs[:rel]}"}
end
if old_rev
attrs[:rev] = (attrs[:rev]).split(/\s+/m).select {|r| !r.index(':').nil?}.join(" ")
attrs.delete(:rev) if attrs[:rev].empty?
add_debug(element) {"HTML5: @rev was #{old_rev}, now #{attrs[:rev]}"}
end
end
# rels and revs
rels = process_uris(element, attrs[:rel], evaluation_context, base,
:uri_mappings => uri_mappings,
:term_mappings => term_mappings,
:vocab => default_vocabulary,
:restrictions => TERMorCURIEorAbsIRI.fetch(@version, []))
revs = process_uris(element, attrs[:rev], evaluation_context, base,
:uri_mappings => uri_mappings,
:term_mappings => term_mappings,
:vocab => default_vocabulary,
:restrictions => TERMorCURIEorAbsIRI.fetch(@version, []))
add_debug(element) do
"rels: #{rels.join(" ")}, revs: #{revs.join(" ")}"
end unless (rels + revs).empty?
if !(attrs[:rel] || attrs[:rev])
# Establishing a new subject if no rel/rev [7.5 Step 5]
if @version == :"rdfa1.0"
new_subject = if attrs[:about]
process_uri(element, attrs[:about], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, []))
elsif attrs[:resource]
process_uri(element, attrs[:resource], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, []))
elsif attrs[:href] || attrs[:src]
process_uri(element, (attrs[:href] || attrs[:src]), evaluation_context, base, :restrictions => [:uri])
end
# If no URI is provided by a resource attribute, then the first match from the following rules
# will apply:
new_subject ||= if [:xhtml1, :xhtml5, :html4, :html5].include?(@host_language) && element.name =~ /^(head|body)$/
# From XHTML+RDFa 1.1:
# if no URI is provided, then first check to see if the element is the head or body element.
# If it is, then act as if the new subject is set to the parent object.
uri(base)
elsif element == root && base
# if the element is the root element of the document, then act as if there is an empty @about present,
# and process it according to the rule for @about, above;
uri(base)
elsif attrs[:typeof]
RDF::Node.new
else
# otherwise, if parent object is present, new subject is set to the value of parent object.
skip = true unless attrs[:property]
evaluation_context.parent_object
end
# if the @typeof attribute is present, set typed resource to new subject
typed_resource = new_subject if attrs[:typeof]
else # rdfa1.1
# If the current element contains no @rel or @rev attribute, then the next step is to establish a value for new subject.
# This step has two possible alternatives.
# 1. If the current element contains the @property attribute, but does not contain the @content or the @datatype attributes, then
if attrs[:property] && !(attrs[:content] || attrs[:datatype])
# new subject is set to the resource obtained from the first match from the following rule:
new_subject ||= if attrs[:about]
# by using the resource from @about, if present, obtained according to the section on CURIE and IRI Processing;
process_uri(element, attrs[:about], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, []))
elsif [:xhtml1, :xhtml5, :html4, :html5].include?(@host_language) && element.name =~ /^(head|body)$/
# From XHTML+RDFa 1.1:
# if no URI is provided, then first check to see if the element is the head or body element. If it is, then act as if the new subject is set to the parent object.
evaluation_context.parent_object
elsif element == root && base
# otherwise, if the element is the root element of the document, then act as if there is an empty @about present, and process it according to the rule for @about, above;
uri(base)
end
# if the @typeof attribute is present, set typed resource to new subject
typed_resource = new_subject if attrs[:typeof]
# otherwise, if parent object is present, new subject is set to the value of parent object.
new_subject ||= evaluation_context.parent_object
# If @typeof is present then typed resource is set to the resource obtained from the first match from the following rules:
# by using the resource from @about, if present, obtained according to the section on CURIE and IRI Processing; (done above)
# otherwise, if the element is the root element of the document, then act as if there is an empty @about present and process it according to the previous rule; (done above)
if attrs[:typeof] && typed_resource.nil?
# otherwise,
typed_resource ||= if attrs[:resource]
# by using the resource from @resource, if present, obtained according to the section on CURIE and IRI Processing;
process_uri(element, attrs[:resource], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, []))
elsif attrs[:href] || attrs[:src]
# otherwise, by using the IRI from @href, if present, obtained according to the section on CURIE and IRI Processing;
# otherwise, by using the IRI from @src, if present, obtained according to the section on CURIE and IRI Processing;
process_uri(element, (attrs[:href] || attrs[:src]), evaluation_context, base,
:restrictions => [:uri])
else
# otherwise, the value of typed resource is set to a newly created bnode.
RDF::Node.new
end
# The value of the current object resource is set to the value of typed resource.
current_object_resource = typed_resource
end
else
# otherwise (ie, the @content or @datatype)
new_subject =
process_uri(element, (attrs[:about] || attrs[:resource]),
evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, [])) if attrs[:about] ||attrs[:resource]
new_subject ||=
process_uri(element, (attrs[:href] || attrs[:src]), evaluation_context, base,
:restrictions => [:uri]) if attrs[:href] || attrs[:src]
# If no URI is provided by a resource attribute, then the first match from the following rules
# will apply:
new_subject ||= if [:xhtml1, :xhtml5, :html4, :html5].include?(@host_language) && element.name =~ /^(head|body)$/
# From XHTML+RDFa 1.1:
# if no URI is provided, then first check to see if the element is the head or body element.
# If it is, then act as if the new subject is set to the parent object.
evaluation_context.parent_object
elsif element == root
# if the element is the root element of the document, then act as if there is an empty @about present,
# and process it according to the rule for @about, above;
uri(base)
elsif attrs[:typeof]
RDF::Node.new
else
# otherwise, if parent object is present, new subject is set to the value of parent object.
# Additionally, if @property is not present then the skip element flag is set to 'true'.
skip = true unless attrs[:property]
evaluation_context.parent_object
end
# If @typeof is present then typed resource is set to the resource obtained from the first match from the following rules:
typed_resource = new_subject if attrs[:typeof]
end
end
add_debug(element) {
"[Step 5] new_subject: #{new_subject.to_ntriples rescue 'nil'}, " +
"typed_resource: #{typed_resource.to_ntriples rescue 'nil'}, " +
"current_object_resource: #{current_object_resource.to_ntriples rescue 'nil'}, " +
"skip = #{skip}"
}
else
# [7.5 Step 6]
# If the current element does contain a @rel or @rev attribute, then the next step is to
# establish both a value for new subject and a value for current object resource:
new_subject = process_uri(element, attrs[:about], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, []))
new_subject ||= process_uri(element, attrs[:src], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => [:uri]) if @version == :"rdfa1.0"
# if the @typeof attribute is present, set typed resource to new subject
typed_resource = new_subject if attrs[:typeof]
# If no URI is provided then the first match from the following rules will apply
new_subject ||= if element == root && base
uri(base)
elsif [:xhtml1, :xhtml5, :html4, :html5].include?(@host_language) && element.name =~ /^(head|body)$/
# From XHTML+RDFa 1.1:
# if no URI is provided, then first check to see if the element is the head or body element.
# If it is, then act as if the new subject is set to the parent object.
evaluation_context.parent_object
elsif attrs[:typeof] && @version == :"rdfa1.0"
RDF::Node.new
else
# if it's null, it's null and nothing changes
evaluation_context.parent_object
# no skip flag set this time
end
# Then the current object resource is set to the URI obtained from the first match from the following rules:
current_object_resource = process_uri(element, attrs[:resource], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, [])) if attrs[:resource]
current_object_resource ||= process_uri(element, attrs[:href], evaluation_context, base,
:restrictions => [:uri]) if attrs[:href]
current_object_resource ||= process_uri(element, attrs[:src], evaluation_context, base,
:restrictions => [:uri]) if attrs[:src] && @version != :"rdfa1.0"
current_object_resource ||= RDF::Node.new if attrs[:typeof] && !attrs[:about] && @version != :"rdfa1.0"
# and also set the value typed resource to this bnode
if attrs[:typeof]
if @version == :"rdfa1.0"
typed_resource = new_subject
else
typed_resource = current_object_resource if !attrs[:about]
end
end
add_debug(element) {
"[Step 6] new_subject: #{new_subject}, " +
"current_object_resource = #{current_object_resource.nil? ? 'nil' : current_object_resource} " +
"typed_resource: #{typed_resource.to_ntriples rescue 'nil'}, "
}
end
# [Step 7] If in any of the previous steps a typed resource was set to a non-null value, it is now used to provide a subject for type values;
if typed_resource
# Typeof is TERMorCURIEorAbsIRIs
types = process_uris(element, attrs[:typeof], evaluation_context, base,
:uri_mappings => uri_mappings,
:term_mappings => term_mappings,
:vocab => default_vocabulary,
:restrictions => TERMorCURIEorAbsIRI.fetch(@version, []))
add_debug(element, "[Step 7] typeof: #{attrs[:typeof]}")
types.each do |one_type|
add_triple(element, typed_resource, RDF["type"], one_type)
end
end
# Create new List mapping [step 8]
#
# If in any of the previous steps a new subject was set to a non-null value different from the parent object;
# The list mapping taken from the evaluation context is set to a new, empty mapping.
if (new_subject && (new_subject != evaluation_context.parent_subject || list_mapping.nil?))
list_mapping = {}
add_debug(element) do
"[Step 8]: create new list mapping(#{list_mapping.object_id}) " +
"ns: #{new_subject.to_ntriples}, " +
"ps: #{evaluation_context.parent_subject.to_ntriples rescue 'nil'}"
end
end
# Generate triples with given object [Step 9]
#
# If in any of the previous steps a current object resource was set to a non-null value, it is now used to generate triples and add entries to the local list mapping:
if new_subject && current_object_resource && (attrs[:rel] || attrs[:rev])
add_debug(element) {"[Step 9] rels: #{rels.inspect} revs: #{revs.inspect}"}
rels.each do |r|
if attrs[:inlist]
# If the current list mapping does not contain a list associated with this IRI,
# instantiate a new list
unless list_mapping[r]
list_mapping[r] = RDF::List.new
add_debug(element) {"list(#{r}): create #{list_mapping[r].inspect}"}
end
add_debug(element) {"[Step 9] add #{current_object_resource.to_ntriples} to #{r} #{list_mapping[r].inspect}"}
list_mapping[r] << current_object_resource
else
# Predicates for the current object resource can be set by using one or both of the @rel and the @rev attributes but, in case of the @rel attribute, only if the @inlist is not present:
add_triple(element, new_subject, r, current_object_resource)
end
end
revs.each do |r|
add_triple(element, current_object_resource, r, new_subject)
end
elsif attrs[:rel] || attrs[:rev]
# Incomplete triples and bnode creation [Step 10]
add_debug(element) {"[Step 10] incompletes: rels: #{rels}, revs: #{revs}"}
current_object_resource = RDF::Node.new
# predicate: full IRI
# direction: forward/reverse
# lists: Save into list, don't generate triple
rels.each do |r|
if attrs[:inlist]
# If the current list mapping does not contain a list associated with this IRI,
# instantiate a new list
unless list_mapping[r]
list_mapping[r] = RDF::List.new
add_debug(element) {"[Step 10] list(#{r}): create #{list_mapping[r].inspect}"}
end
incomplete_triples << {:list => list_mapping[r], :direction => :none}
else
incomplete_triples << {:predicate => r, :direction => :forward}
end
end
revs.each do |r|
incomplete_triples << {:predicate => r, :direction => :reverse}
end
end
# Establish current object literal [Step 11]
#
# If the current element has a @inlist attribute, add the property to the
# list associated with that property, creating a new list if necessary.
if attrs[:property]
properties = process_uris(element, attrs[:property], evaluation_context, base,
:uri_mappings => uri_mappings,
:term_mappings => term_mappings,
:vocab => default_vocabulary,
:restrictions => TERMorCURIEorAbsIRI.fetch(@version, []))
properties.reject! do |p|
if p.is_a?(RDF::URI)
false
else
add_warning(element, "[Step 11] predicate #{p.to_ntriples} must be a URI")
true
end
end
datatype = process_uri(element, attrs[:datatype], evaluation_context, base,
:uri_mappings => uri_mappings,
:term_mappings => term_mappings,
:vocab => default_vocabulary,
:restrictions => TERMorCURIEorAbsIRI.fetch(@version, [])) unless attrs[:datatype].to_s.empty?
begin
current_property_value = case
when datatype && ![RDF.XMLLiteral, RDF.HTML].include?(datatype)
# typed literal
add_debug(element, "[Step 11] typed literal (#{datatype})")
RDF::Literal.new(attrs[:datetime] || attrs[:value] || attrs[:content] || element.inner_text.to_s, :datatype => datatype, :language => language, :validate => validate?, :canonicalize => canonicalize?)
when @version == :"rdfa1.1"
case
when datatype == RDF.XMLLiteral
# XML Literal
add_debug(element) {"[Step 11] XML Literal: #{element.inner_html}"}
# In order to maintain maximum portability of this literal, any children of the current node that are
# elements must have the current in scope XML namespace declarations (if any) declared on the
# serialized element using their respective attributes. Since the child element node could also
# declare new XML namespaces, the RDFa Processor must be careful to merge these together when
# generating the serialized element definition. For avoidance of doubt, any re-declarations on the
# child node must take precedence over declarations that were active on the current node.
begin
c14nxl = element.children.c14nxl(
:library => @library,
:language => language,
:namespaces => {nil => XHTML}.merge(namespaces))
RDF::Literal.new(c14nxl,
:library => @library,
:datatype => RDF.XMLLiteral,
:validate => validate?,
:canonicalize => canonicalize?)
rescue ArgumentError => e
add_error(element, e.message)
end
when datatype == RDF.HTML
# HTML Literal
add_debug(element) {"[Step 11] HTML Literal: #{element.inner_html}"}
# Just like XMLLiteral, but without the c14nxl
begin
RDF::Literal.new(element.children.to_html,
:library => @library,
:datatype => RDF.HTML,
:validate => validate?,
:canonicalize => canonicalize?)
rescue ArgumentError => e
add_error(element, e.message)
end
when attrs[:datatype]
# otherwise, as a plain literal if @datatype is present but has an empty value.
# The actual literal is either the value of @content (if present) or a string created by
# concatenating the value of all descendant text nodes, of the current element in turn.
# typed literal
add_debug(element, "[Step 11] plain plain (#{datatype})")
RDF::Literal.new(attrs[:content] || element.inner_text.to_s, :language => language, :validate => validate?, :canonicalize => canonicalize?)
when attrs[:content]
# plain literal
add_debug(element, "[Step 11] plain literal (content)")
RDF::Literal.new(attrs[:content], :language => language, :validate => validate?, :canonicalize => canonicalize?)
when element.name == 'time'
# HTML5 support
# Lexically scan value and assign appropriate type, otherwise, leave untyped
v = (attrs[:datetime] || element.inner_text).to_s
datatype = %w(Date Time DateTime Year YearMonth Duration).map {|t| RDF::Literal.const_get(t)}.detect do |dt|
v.match(dt::GRAMMAR)
end || RDF::Literal
add_debug(element) {"[Step 11] <time> literal: #{datatype} #{v.inspect}"}
datatype.new(v, :language => language)
when (attrs[:resource] || attrs[:href] || attrs[:src]) &&
!(attrs[:rel] || attrs[:rev]) &&
@version != :"rdfa1.0"
add_debug(element, "[Step 11] resource (resource|href|src)")
res = process_uri(element, attrs[:resource], evaluation_context, base,
:uri_mappings => uri_mappings,
:restrictions => SafeCURIEorCURIEorIRI.fetch(@version, [])) if attrs[:resource]
res ||= process_uri(element, (attrs[:href] || attrs[:src]), evaluation_context, base, :restrictions => [:uri])
when typed_resource && !attrs[:about] && @version != :"rdfa1.0"
add_debug(element, "[Step 11] typed_resource")
typed_resource
else
# plain literal
add_debug(element, "[Step 11] plain literal (inner text)")
RDF::Literal.new(element.inner_text.to_s, :language => language, :validate => validate?, :canonicalize => canonicalize?)
end
else # rdfa1.0
if element.text_content? || (element.children.length == 0) || attrs[:datatype] == ""
# plain literal
add_debug(element, "[Step 11 (1.0)] plain literal")
RDF::Literal.new(attrs[:content] || element.inner_text.to_s, :language => language, :validate => validate?, :canonicalize => canonicalize?)
elsif !element.text_content? and (datatype == nil or datatype == RDF.XMLLiteral)
# XML Literal
add_debug(element) {"[Step 11 (1.0)] XML Literal: #{element.inner_html}"}
recurse = false
c14nxl = element.children.c14nxl(
:library => @library,
:language => language,
:namespaces => {nil => XHTML}.merge(namespaces))
RDF::Literal.new(c14nxl,
:library => @library,
:datatype => RDF.XMLLiteral,
:validate => validate?,
:canonicalize => canonicalize?)
end
end
rescue ArgumentError => e
add_error(element, e.message)
end
# add each property
properties.each do |p|
# Lists: If element has an @inlist attribute, add the value to a list
if attrs[:inlist]
# If the current list mapping does not contain a list associated with this IRI,
# instantiate a new list
unless list_mapping[p]
list_mapping[p] = RDF::List.new
add_debug(element) {"[Step 11] lists(#{p}): create #{list_mapping[p].inspect}"}
end
add_debug(element) {"[Step 11] add #{current_property_value.to_ntriples} to #{p.to_ntriples} #{list_mapping[p].inspect}"}
list_mapping[p] << current_property_value
elsif new_subject
add_triple(element, new_subject, p, current_property_value)
end
end
end
if !skip and new_subject && !evaluation_context.incomplete_triples.empty?
# Complete the incomplete triples from the evaluation context [Step 12]
add_debug(element) do
"[Step 12] complete incomplete triples: " +
"new_subject=#{new_subject.to_ntriples}, " +
"completes=#{evaluation_context.incomplete_triples.inspect}"
end
evaluation_context.incomplete_triples.each do |trip|
case trip[:direction]
when :none
add_debug(element) {"[Step 12] add #{new_subject.to_ntriples} to #{trip[:list].inspect}"}
trip[:list] << new_subject
when :forward
add_triple(element, evaluation_context.parent_subject, trip[:predicate], new_subject)
when :reverse
add_triple(element, new_subject, trip[:predicate], evaluation_context.parent_subject)
end
end
end
# Create a new evaluation context and proceed recursively [Step 13]
if recurse
if skip
if language == evaluation_context.language &&
uri_mappings == evaluation_context.uri_mappings &&
term_mappings == evaluation_context.term_mappings &&
default_vocabulary == evaluation_context.default_vocabulary &&
base == evaluation_context.base &&
list_mapping == evaluation_context.list_mapping
new_ec = evaluation_context
add_debug(element, "[Step 13] skip: reused ec")
else
new_ec = evaluation_context.clone
new_ec.base = base
new_ec.language = language
new_ec.uri_mappings = uri_mappings
new_ec.namespaces = namespaces
new_ec.term_mappings = term_mappings
new_ec.default_vocabulary = default_vocabulary
new_ec.list_mapping = list_mapping
add_debug(element, "[Step 13] skip: cloned ec")
end
else
# create a new evaluation context
new_ec = EvaluationContext.new(base, @host_defaults)
new_ec.parent_subject = new_subject || evaluation_context.parent_subject
new_ec.parent_object = current_object_resource || new_subject || evaluation_context.parent_subject
new_ec.uri_mappings = uri_mappings
new_ec.namespaces = namespaces
new_ec.incomplete_triples = incomplete_triples
new_ec.language = language
new_ec.term_mappings = term_mappings
new_ec.default_vocabulary = default_vocabulary
new_ec.list_mapping = list_mapping
add_debug(element, "[Step 13] new ec")
end
element.children.each do |child|
# recurse only if it's an element
traverse(child, new_ec) if child.element?
end
# Step 14: after traversing through child elements, for each list associated with
# a property
(list_mapping || {}).each do |p, l|
# if that list is different from the evaluation context
ec_list = evaluation_context.list_mapping[p] if evaluation_context.list_mapping
add_debug(element) {"[Step 14] time to create #{l.inspect}? #{(ec_list != l).inspect}"}
if ec_list != l
add_debug(element) {"[Step 14] list(#{p}) create #{l.inspect}"}
# Generate an rdf:List with the elements of that list.
l.each_statement do |st|
add_triple(element, st.subject, st.predicate, st.object) unless st.object == RDF.List
end
# Generate a triple relating new_subject, property and the list BNode,
# or rdf:nil if the list is empty.
if l.empty?
add_triple(element, new_subject, p, RDF.nil)
else
add_triple(element, new_subject, p, l.subject)
end
end
end
# Role processing
# @id is used as subject, bnode otherwise.
# Predicate is xhv:role
# Objects are TERMorCURIEorAbsIRIs.
# Act as if the default vocabulary is XHV
if attrs[:role]
subject = attrs[:id] ? uri(base, "##{attrs[:id]}") : RDF::Node.new
roles = process_uris(element, attrs[:role], evaluation_context, base,
:uri_mappings => uri_mappings,
:term_mappings => term_mappings,
:vocab => RDF::XHV.to_s,
:restrictions => TERMorCURIEorAbsIRI.fetch(@version, []))
add_debug(element) {"role: about: #{subject.to_ntriples}, roles: #{roles.map(&:to_ntriples).inspect}"}
roles.each do |r|
add_triple(element, subject, RDF::XHV.role, r)
end
end
end
end
# space-separated TERMorCURIEorAbsIRI or SafeCURIEorCURIEorIRI
def process_uris(element, value, evaluation_context, base, options)
return [] if value.to_s.empty?
add_debug(element) {"process_uris: #{value}"}
value.to_s.split(/\s+/).map {|v| process_uri(element, v, evaluation_context, base, options)}.compact
end
def process_uri(element, value, evaluation_context, base, options = {})
return if value.nil?
restrictions = options[:restrictions]
add_debug(element) {"process_uri: #{value}, restrictions = #{restrictions.inspect}"}
options = {:uri_mappings => {}}.merge(options)
if !options[:term_mappings] && options[:uri_mappings] && restrictions.include?(:safe_curie) && value.to_s.match(/^\[(.*)\]$/)
# SafeCURIEorCURIEorIRI
# When the value is surrounded by square brackets, then the content within the brackets is
# evaluated as a CURIE according to the CURIE Syntax definition. If it is not a valid CURIE, the
# value must be ignored.
uri = curie_to_resource_or_bnode(element, $1, options[:uri_mappings], evaluation_context.parent_subject, restrictions)
if uri
add_debug(element) {"process_uri: #{value} => safeCURIE => <#{uri}>"}
else
add_warning(element, "#{value} not matched as a safeCURIE", RDF::RDFA.UnresolvedCURIE)
end
uri
elsif options[:term_mappings] && TERM_REGEXP.match(value.to_s) && restrictions.include?(:term)
# TERMorCURIEorAbsIRI
# If the value is an NCName, then it is evaluated as a term according to General Use of Terms in
# Attributes. Note that this step may mean that the value is to be ignored.
uri = process_term(element, value.to_s, options)
add_debug(element) {"process_uri: #{value} => term => <#{uri}>"}
uri
else
# SafeCURIEorCURIEorIRI or TERMorCURIEorAbsIRI
# Otherwise, the value is evaluated as a CURIE.
# If it is a valid CURIE, the resulting URI is used; otherwise, the value will be processed as a URI.
uri = curie_to_resource_or_bnode(element, value, options[:uri_mappings], evaluation_context.parent_subject, restrictions)
if uri
add_debug(element) {"process_uri: #{value} => CURIE => <#{uri}>"}
elsif @version == :"rdfa1.0" && value.to_s.match(/^xml/i)
# Special case to not allow anything starting with XML to be treated as a URI
elsif restrictions.include?(:absuri) || restrictions.include?(:uri)
begin
# AbsURI does not use xml:base
if restrictions.include?(:absuri)
uri = uri(value)
unless uri.absolute?
uri = nil
add_warning(element, "Malformed IRI #{uri.inspect}")
end
else
uri = uri(base, Addressable::URI.parse(value))
end
rescue Addressable::URI::InvalidURIError => e
add_warning(element, "Malformed IRI #{value}")
rescue RDF::ReaderError => e
add_debug(element, e.message)
if value.to_s =~ /^\(^\w\):/
add_warning(element, "Undefined prefix #{$1}")
else
add_warning(element, "Relative URI #{value}")
end
end
add_debug(element) {"process_uri: #{value} => URI => <#{uri}>"}
end
uri
end
end
# [7.4.3] General Use of Terms in Attributes
def process_term(element, value, options)
if options[:vocab]
# If there is a local default vocabulary, the IRI is obtained by concatenating that value and the term
return uri(options[:vocab] + value)
elsif options[:term_mappings].is_a?(Hash)
# If the term is in the local term mappings, use the associated URI (case sensitive).
return uri(options[:term_mappings][value.to_s.to_sym]) if options[:term_mappings].has_key?(value.to_s.to_sym)
# Otherwise, check for case-insensitive match
options[:term_mappings].each_pair do |term, uri|
return uri(uri) if term.to_s.downcase == value.to_s.downcase
end
end
# Finally, if there is no local default vocabulary, the term has no associated URI and must be ignored.
add_warning(element, "Term #{value} is not defined", RDF::RDFA.UnresolvedTerm)
nil
end
# From section 6. CURIE Syntax Definition
def curie_to_resource_or_bnode(element, curie, uri_mappings, subject, restrictions)
# URI mappings for CURIEs default to XHV, rather than the default doc namespace
prefix, reference = curie.to_s.split(":", 2)
# consider the bnode situation
if prefix == "_" && restrictions.include?(:bnode)
# we force a non-nil name, otherwise it generates a new name
# As a special case, _: is also a valid reference for one specific bnode.
bnode(reference)
elsif curie.to_s.match(/^:/)
# Default prefix
RDF::XHV[reference.to_s]
elsif !curie.to_s.match(/:/)
# No prefix, undefined (in this context, it is evaluated as a term elsewhere)
nil
else
# Prefixes always downcased
prefix = prefix.to_s.downcase unless @version == :"rdfa1.0"
add_debug(element) do
"curie_to_resource_or_bnode check for #{prefix.to_s.to_sym.inspect} in #{uri_mappings.inspect}"
end
ns = uri_mappings[prefix.to_s.to_sym]
if ns
uri(ns + reference.to_s)
else
add_debug(element) {"curie_to_resource_or_bnode No namespace mapping for #{prefix.inspect}"}
nil
end
end
end
def uri(value, append = nil)
value = RDF::URI.new(value)
value = value.join(append) if append
value.validate! if validate?
value.canonicalize! if canonicalize?
value = RDF::URI.intern(value) if intern?
value
end
end
end
|
module Rebuild
VERSION = '0.2.4'
end
Bump up version
module Rebuild
VERSION = '0.2.5'
end
|
require_relative '../analyzer.rb'
class View
include GladeGUI
def before_show()
dialog_response, dialog = show_file_chooser_dialog
if dialog_response == Gtk::Dialog::RESPONSE_ACCEPT
analyzer = Analyzer::Analyzer.new
@classes, @methods, @smells = analyzer.analyze(dialog.filename)
@general_stats_view = build_general_stats_view
@builder['dataviewport'].add(@general_stats_view)
end
dialog.destroy
end
#########################################
# On button clicked methods
def on_general_stats_button_clicked
clean_data_view
unless @general_stats_view
@general_stats_view = build_general_stats_view
end
@builder['dataviewport'].add(@general_stats_view)
end
def on_class_stats_button_clicked
clean_data_view
unless @class_stats_view
@class_stats_view = build_class_stats_view
end
@builder['dataviewport'].add(@class_stats_view)
end
def on_class_diag_button_clicked
clean_data_view
unless @class_diag_view
end
@builder['dataviewport'].add(Gtk::TextView.new)
end
def on_class_dep_button_clicked
clean_data_view
unless @class_dep_view
end
@builder['dataviewport'].add(Gtk::TextView.new)
end
def on_method_stats_button_clicked
clean_data_view
unless @method_stats_view
@method_stats_view = build_method_stats_view
end
@builder['dataviewport'].add(@method_stats_view)
end
def on_method_diag_button_clicked
clean_data_view
unless @method_diag_view
end
@builder['dataviewport'].add(Gtk::TextView.new)
end
def on_if_smell_button_clicked
clean_data_view
unless @if_smell_view
@if_smell_view = build_if_smell_view
end
@builder['dataviewport'].add(@if_smell_view)
end
def on_method_smell_button_clicked
clean_data_view
unless @method_smell_view
@method_smell_view = build_method_smell_view
end
@builder['dataviewport'].add(@method_smell_view)
end
###########################################
# View builders
def build_general_stats_view
text_arr = ["Total number of classes: #{@classes.length} \n"]
text_arr << "Total number of methods: #{@methods.length}"
build_text_view(text_arr.join)
end
def build_class_stats_view
text_arr = []
build_text_view(text_arr.join)
end
def build_method_stats_view
text_arr = []
build_text_view(text_arr.join)
end
def build_if_smell_view
text_arr = []
build_text_view(text_arr.join)
end
def build_method_smell_view
text_arr = []
build_text_view(text_arr.join)
end
def build_text_view(text)
stats_view = Gtk::TextView.new
stats_view.editable = false
stats_view.cursor_visible = false
stats_view.buffer.text = text
stats_view
end
##########################################
def show_file_chooser_dialog
dialog = Gtk::FileChooserDialog.new("Open File",
nil,
Gtk::FileChooser::ACTION_SELECT_FOLDER,
nil,
[Gtk::Stock::CANCEL, Gtk::Dialog::RESPONSE_CANCEL],
[Gtk::Stock::OPEN, Gtk::Dialog::RESPONSE_ACCEPT])
dialog_response = dialog.run
return dialog_response, dialog
end
def clean_data_view
@builder['dataviewport'].each do |child|
@builder['dataviewport'].remove(child)
end
end
private :build_general_stats_view, :build_class_stats_view,
:build_method_stats_view, :build_if_smell_view,
:build_method_smell_view, :build_text_view
end
Update view
Added simple class and method stats.
require_relative '../analyzer.rb'
class View
include GladeGUI
def before_show()
dialog_response, dialog = show_file_chooser_dialog
if dialog_response == Gtk::Dialog::RESPONSE_ACCEPT
analyzer = Analyzer::Analyzer.new
@classes, @methods, @smells = analyzer.analyze(dialog.filename)
@general_stats_view = build_general_stats_view
@builder['dataviewport'].add(@general_stats_view)
end
dialog.destroy
end
#########################################
# On button clicked methods
def on_general_stats_button_clicked
clean_data_view
unless @general_stats_view
@general_stats_view = build_general_stats_view
end
@builder['dataviewport'].add(@general_stats_view)
end
def on_class_stats_button_clicked
clean_data_view
unless @class_stats_view
@class_stats_view = build_class_stats_view
end
@builder['dataviewport'].add(@class_stats_view)
end
def on_class_diag_button_clicked
clean_data_view
unless @class_diag_view
end
@builder['dataviewport'].add(Gtk::TextView.new)
end
def on_class_dep_button_clicked
clean_data_view
unless @class_dep_view
end
@builder['dataviewport'].add(Gtk::TextView.new)
end
def on_method_stats_button_clicked
clean_data_view
unless @method_stats_view
@method_stats_view = build_method_stats_view
end
@builder['dataviewport'].add(@method_stats_view)
end
def on_method_diag_button_clicked
clean_data_view
unless @method_diag_view
end
@builder['dataviewport'].add(Gtk::TextView.new)
end
def on_if_smell_button_clicked
clean_data_view
unless @if_smell_view
@if_smell_view = build_if_smell_view
end
@builder['dataviewport'].add(@if_smell_view)
end
def on_method_smell_button_clicked
clean_data_view
unless @method_smell_view
@method_smell_view = build_method_smell_view
end
@builder['dataviewport'].add(@method_smell_view)
end
###########################################
# View builders
def build_general_stats_view
lines = 0
@methods.each do |method|
lines += method.lines
end
text_arr = ["Total number of classes: #{@classes.length} \n"]
text_arr << "Total number of methods: #{@methods.length} \n"
text_arr << "Total number of lines of code: #{lines}"
build_text_view(text_arr.join)
end
def build_class_stats_view
largest_class = @classes.sort.pop
text_arr = ["Largest class: #{largest_class} \n"]
text_arr << "Number of lines: #{largest_class.lines}"
build_text_view(text_arr.join)
end
def build_method_stats_view
largest_method = @methods.sort.pop
text_arr = ["Largest method: #{largest_method} \n"]
text_arr << "Number of lines: #{largest_method.lines}"
build_text_view(text_arr.join)
end
def build_if_smell_view
text_arr = []
build_text_view(text_arr.join)
end
def build_method_smell_view
text_arr = []
build_text_view(text_arr.join)
end
def build_text_view(text)
stats_view = Gtk::TextView.new
stats_view.editable = false
stats_view.cursor_visible = false
stats_view.buffer.text = text
stats_view.show
stats_view
end
##########################################
def show_file_chooser_dialog
dialog = Gtk::FileChooserDialog.new("Open File",
nil,
Gtk::FileChooser::ACTION_SELECT_FOLDER,
nil,
[Gtk::Stock::CANCEL, Gtk::Dialog::RESPONSE_CANCEL],
[Gtk::Stock::OPEN, Gtk::Dialog::RESPONSE_ACCEPT])
dialog_response = dialog.run
return dialog_response, dialog
end
def clean_data_view
@builder['dataviewport'].each do |child|
@builder['dataviewport'].remove(child)
end
end
private :build_general_stats_view, :build_class_stats_view,
:build_method_stats_view, :build_if_smell_view,
:build_method_smell_view, :build_text_view
end
|
require 'redactor2_rails/version'
require 'orm_adapter'
module Redactor2Rails
IMAGE_TYPES = ['image/jpeg', 'image/png', 'image/gif', 'image/jpg', 'image/pjpeg', 'image/tiff', 'image/x-png']
FILE_TYPES = ['application/msword', 'application/pdf', 'text/plain', 'text/rtf', 'application/vnd.ms-excel']
autoload :Http, 'redactor2_rails/http'
autoload :Devise, 'redactor2_rails/devise'
module Backend
autoload :CarrierWave, 'redactor2_rails/backend/carrierwave'
end
require 'redactor2_rails/orm/base'
require 'redactor2_rails/orm/active_record'
require 'redactor2_rails/orm/mongoid'
require 'redactor2_rails/engine'
mattr_accessor :images_file_types, :files_file_types
@@images_file_types = ['jpg', 'jpeg', 'png', 'gif', 'tiff']
@@files_file_types = ['pdf', 'doc', 'docx', 'xls', 'xlsx', 'rtf', 'txt']
def self.image_model
Redactor2Rails::Image
end
def self.file_model
Redactor2Rails::File
end
def self.devise_user
:user
end
def self.devise_user_key
"#{self.devise_user.to_s}_id".to_sym
end
end
Add possibility to update main configs by Redactor2Rails.devise_user=:admin
require 'redactor2_rails/version'
require 'orm_adapter'
module Redactor2Rails
IMAGE_TYPES = ['image/jpeg', 'image/png', 'image/gif', 'image/jpg', 'image/pjpeg', 'image/tiff', 'image/x-png']
FILE_TYPES = ['application/msword', 'application/pdf', 'text/plain', 'text/rtf', 'application/vnd.ms-excel']
autoload :Http, 'redactor2_rails/http'
autoload :Devise, 'redactor2_rails/devise'
module Backend
autoload :CarrierWave, 'redactor2_rails/backend/carrierwave'
end
require 'redactor2_rails/orm/base'
require 'redactor2_rails/orm/active_record'
require 'redactor2_rails/orm/mongoid'
require 'redactor2_rails/engine'
mattr_accessor :images_file_types, :files_file_types, :devise_user, :image_model, :file_model
@@images_file_types = ['jpg', 'jpeg', 'png', 'gif', 'tiff']
@@files_file_types = ['pdf', 'doc', 'docx', 'xls', 'xlsx', 'rtf', 'txt']
def self.image_model
@image_model || Redactor2Rails::Image
end
def self.file_model
@file_model || Redactor2Rails::File
end
def self.devise_user
@devise_user || :user
end
def self.devise_user_key
"#{self.devise_user.to_s}_id".to_sym
end
end
|
module Redchick
module Layout
def self.simple(t)
"#{t.user.screen_name.rjust(15)}: #{t.text}"
end
def self.basic(t)
"#{t.user.name} @#{t.user.screen_name} #{t.created_at}\n"\
"#{t.text}\n"\
"rt: #{t.retweet_count}, like: #{t.favorite_count}\n"\
"--\n"
end
end
end
show id on basic layout
module Redchick
module Layout
def self.simple(t)
"#{t.user.screen_name.rjust(15)}: #{t.text}"
end
def self.basic(t)
"#{t.user.name} @#{t.user.screen_name} #{t.created_at}\n"\
"#{t.text}\n"\
"rt: #{t.retweet_count}, like: #{t.favorite_count}, id: #{t.id}\n"\
"--\n"
end
end
end
|
class RedmineAdapter
AUTO_TIME_ENTRIES_TICKET = 916
def time_entries_to_assign
TimeEntry.where(issue_id: RedmineAdapter::AUTO_TIME_ENTRIES_TICKET)
end
def id_for_activity_name name
if @mapping.nil?
@mapping = {}
TimeEntryActivity.where(project_id: nil).each { |t| @mapping[t.name] = t.id }
end
@mapping[name]
end
def duplicate_time_entry time_entry
time_entry.dup
end
def transaction
TimeEntry.transaction do
yield
end
end
def set_default_time_entry_activity(new_time_entry)
new_time_entry.activity = TimeEntryActivity.where(project_id: new_time_entry.issue.project_id) unless new_time_entry.valid?
new_time_entry.activity = TimeEntryActivity.default unless new_time_entry.valid?
new_time_entry.activity = TimeEntryActivity.where(name: "Entwicklung").first unless new_time_entry.valid?
new_time_entry.activity = TimeEntryActivity.where(parent_id: nil, project_id: nil).first unless new_time_entry.valid?
end
end
Add missing first
class RedmineAdapter
AUTO_TIME_ENTRIES_TICKET = 916
def time_entries_to_assign
TimeEntry.where(issue_id: RedmineAdapter::AUTO_TIME_ENTRIES_TICKET)
end
def id_for_activity_name name
if @mapping.nil?
@mapping = {}
TimeEntryActivity.where(project_id: nil).each { |t| @mapping[t.name] = t.id }
end
@mapping[name]
end
def duplicate_time_entry time_entry
time_entry.dup
end
def transaction
TimeEntry.transaction do
yield
end
end
def set_default_time_entry_activity(new_time_entry)
new_time_entry.activity = TimeEntryActivity.where(project_id: new_time_entry.issue.project_id).first unless new_time_entry.valid?
new_time_entry.activity = TimeEntryActivity.default unless new_time_entry.valid?
new_time_entry.activity = TimeEntryActivity.where(name: "Entwicklung").first unless new_time_entry.valid?
new_time_entry.activity = TimeEntryActivity.where(parent_id: nil, project_id: nil).first unless new_time_entry.valid?
end
end
|
# Module RedSnow
module RedSnow
# Gem version
VERSION = '0.3.2'
end
Prepare release 0.3.3
# Module RedSnow
module RedSnow
# Gem version
VERSION = '0.3.3'
end
|
module RelaxDB
class Document
# Define properties and property methods
def self.property(prop)
# Class instance varibles are not inherited, so the default properties must be explicitly listed
# Perhaps a better solution exists. Revise. I think Merb extlib contains a solution for this...
@properties ||= [:_id, :_rev]
@properties << prop
define_method(prop) do
instance_variable_get("@#{prop}".to_sym)
end
define_method("#{prop}=") do |val|
instance_variable_set("@#{prop}".to_sym, val)
end
end
def self.properties
# Don't force clients to check that it's instantiated
@properties ||= []
end
def properties
self.class.properties
end
# Specifying these properties here is kinda ugly. Consider a better solution.
property :_id
property :_rev
def initialize(hash=nil)
# The default _id will be overwritten if loaded from RelaxDB
self._id = UuidGenerator.uuid
set_attributes(hash) if hash
end
def set_attributes(data)
data.each do |key, val|
# Only set instance variables on creation - object references are resolved on demand
# If the variable name ends in _at try to convert it to a Time
if key =~ /_at$/
val = Time.local(*ParseDate.parsedate(val)) rescue val
end
instance_variable_set("@#{key}".to_sym, val)
end
end
def inspect
s = "#<#{self.class}:#{self.object_id}"
properties.each do |prop|
prop_val = instance_variable_get("@#{prop}".to_sym)
s << ", #{prop}: #{prop_val}" if prop_val
end
belongs_to_rels.each do |relationship|
id = instance_variable_get("@#{relationship}_id".to_sym)
if id
s << ", #{relationship}_id: #{id}" if id
else
obj = instance_variable_get("@#{relationship}".to_sym)
s << ", #{relationship}_id: #{obj._id}" if obj
end
end
s << ">"
end
def to_json
data = {}
# Order is important - this codifies the relative importance of a relationship to its _id surrogate
# TODO: Revise - loading a parent just so the child can be saved could be considered donkey coding
belongs_to_rels.each do |relationship|
parent = send(relationship)
if parent
data["#{relationship}_id"] = parent._id
else
id = instance_variable_get("@#{relationship}_id".to_sym)
data["#{relationship}_id"] = id if id
end
end
properties.each do |prop|
prop_val = instance_variable_get("@#{prop}".to_sym)
data["#{prop}"] = prop_val if prop_val
end
data["class"] = self.class.name
data.to_json
end
def save
set_created_at_if_new
resp = RelaxDB::Database.std_db.put("#{_id}", to_json)
self._rev = JSON.parse(resp.body)["rev"]
self
end
def set_created_at_if_new
if methods.include? "created_at" and _rev.nil?
instance_variable_set(:@created_at, Time.now)
end
end
# has_many methods
def has_many_proxy(rel_name, opts=nil)
proxy_sym = "@proxy_#{rel_name}".to_sym
proxy = instance_variable_get(proxy_sym)
proxy ||= HasManyProxy.new(self, rel_name, opts)
instance_variable_set(proxy_sym, proxy)
proxy
end
def self.has_many(relationship, opts=nil)
define_method(relationship) do
has_many_proxy(relationship, opts)
end
define_method("#{relationship}=") do
raise "You may not currently assign to a has_many relationship - to be implemented"
end
end
# has_one methods
def has_one_proxy(rel_name)
proxy_sym = "@proxy_#{rel_name}".to_sym
proxy = instance_variable_get(proxy_sym)
proxy ||= HasOneProxy.new(self, rel_name)
instance_variable_set(proxy_sym, proxy)
proxy
end
def self.has_one(rel_name, opts=nil)
define_method(rel_name) do
has_one_proxy(rel_name).target
end
define_method("#{rel_name}=") do |new_target|
has_one_proxy(rel_name).target = new_target
end
end
# belongs_to methods
# Creates and returns the proxy for the named relationship
def belongs_to_proxy(rel_name)
proxy_sym = "@proxy_#{rel_name}".to_sym
proxy = instance_variable_get(proxy_sym)
proxy ||= BelongsToProxy.new(self, rel_name)
instance_variable_set(proxy_sym, proxy)
proxy
end
def self.belongs_to(rel_name)
@belongs_to_rels ||= []
@belongs_to_rels << rel_name
define_method(rel_name) do
belongs_to_proxy(rel_name).target
end
define_method("#{rel_name}=") do |new_target|
belongs_to_proxy(rel_name).target = new_target
end
end
def self.belongs_to_rels
# Don't force clients to check that it's instantiated
@belongs_to_rels ||= []
end
def belongs_to_rels
self.class.belongs_to_rels
end
def self.all
database = RelaxDB::Database.std_db
view_path = "_view/#{self}/all"
begin
resp = database.get(view_path)
rescue => e
DesignDocument.get(self).add_all_view.save
resp = database.get(view_path)
end
objects_from_view_response(resp.body)
end
# As method names go, I'm not too enamoured with all_by - Post.all.sort_by might be nice
def self.all_by(*atts)
database = RelaxDB::Database.std_db
q = Query.new(self.name, *atts)
yield q if block_given?
puts "RelaxDB submitting query to #{q.view_path}"
begin
resp = database.get(q.view_path)
rescue => e
DesignDocument.get(self).add_view_to_data(q.view_name, q.map_function).save
resp = database.get(q.view_path)
end
objects_from_view_response(resp.body)
end
# Should be able to take a query object too
def self.view(view_name)
resp = RelaxDB::Database.std_db.get("_view/#{self}/#{view_name}")
objects_from_view_response(resp.body)
end
def self.objects_from_view_response(resp_body)
@objects = []
data = JSON.parse(resp_body)["rows"]
data.each do |row|
@objects << RelaxDB.create_from_hash(row["value"])
end
@objects
end
# TODO: Destroy should presumably destroy all children
# Destroy semantics in AR are that all callbacks are invoked (as opposed to delete)
# Destroy is also used by DM. To destroy all, DM uses e.g. Post.all.destroy! see below
# http://groups.google.com/group/datamapper/browse_thread/thread/866ead34237f0e7b
# Returning something other than the http response would be good too
def destroy!
RelaxDB::Database.std_db.delete("#{_id}?rev=#{_rev}")
end
# TODO: Meh! Use bulk update to do this efficiently
# Leaves the corresponding DesignDoc for this class intact. Should it? probably...
def self.destroy_all!
self.all.each do |o|
o.destroy!
end
end
end
def self.load(id)
self.load_by_id(id)
end
def self.load_by_id(id)
database = RelaxDB::Database.std_db
resp = database.get("#{id}")
data = JSON.parse(resp.body)
create_from_hash(data)
end
def self.create_from_hash(data)
# revise use of string 'class' - it's a reserved word in JavaScript
klass = data.delete("class")
k = Module.const_get(klass)
k.new(data)
end
end
Add bulk save
module RelaxDB
class Document
# Define properties and property methods
def self.property(prop)
# Class instance varibles are not inherited, so the default properties must be explicitly listed
# Perhaps a better solution exists. Revise. I think Merb extlib contains a solution for this...
@properties ||= [:_id, :_rev]
@properties << prop
define_method(prop) do
instance_variable_get("@#{prop}".to_sym)
end
define_method("#{prop}=") do |val|
instance_variable_set("@#{prop}".to_sym, val)
end
end
def self.properties
# Don't force clients to check that it's instantiated
@properties ||= []
end
def properties
self.class.properties
end
# Specifying these properties here is kinda ugly. Consider a better solution.
property :_id
property :_rev
def initialize(hash=nil)
# The default _id will be overwritten if loaded from RelaxDB
self._id = UuidGenerator.uuid
set_attributes(hash) if hash
end
def set_attributes(data)
data.each do |key, val|
# Only set instance variables on creation - object references are resolved on demand
# If the variable name ends in _at try to convert it to a Time
if key =~ /_at$/
val = Time.local(*ParseDate.parsedate(val)) rescue val
end
instance_variable_set("@#{key}".to_sym, val)
end
end
def inspect
s = "#<#{self.class}:#{self.object_id}"
properties.each do |prop|
prop_val = instance_variable_get("@#{prop}".to_sym)
s << ", #{prop}: #{prop_val}" if prop_val
end
belongs_to_rels.each do |relationship|
id = instance_variable_get("@#{relationship}_id".to_sym)
if id
s << ", #{relationship}_id: #{id}" if id
else
obj = instance_variable_get("@#{relationship}".to_sym)
s << ", #{relationship}_id: #{obj._id}" if obj
end
end
s << ">"
end
def to_json
data = {}
# Order is important - this codifies the relative importance of a relationship to its _id surrogate
# TODO: Revise - loading a parent just so the child can be saved could be considered donkey coding
belongs_to_rels.each do |relationship|
parent = send(relationship)
if parent
data["#{relationship}_id"] = parent._id
else
id = instance_variable_get("@#{relationship}_id".to_sym)
data["#{relationship}_id"] = id if id
end
end
properties.each do |prop|
prop_val = instance_variable_get("@#{prop}".to_sym)
data["#{prop}"] = prop_val if prop_val
end
data["class"] = self.class.name
data.to_json
end
def save
set_created_at_if_new
resp = RelaxDB::Database.std_db.put("#{_id}", to_json)
self._rev = JSON.parse(resp.body)["rev"]
self
end
def set_created_at_if_new
if methods.include? "created_at" and _rev.nil?
instance_variable_set(:@created_at, Time.now)
end
end
# has_many methods
def has_many_proxy(rel_name, opts=nil)
proxy_sym = "@proxy_#{rel_name}".to_sym
proxy = instance_variable_get(proxy_sym)
proxy ||= HasManyProxy.new(self, rel_name, opts)
instance_variable_set(proxy_sym, proxy)
proxy
end
def self.has_many(relationship, opts=nil)
define_method(relationship) do
has_many_proxy(relationship, opts)
end
define_method("#{relationship}=") do
raise "You may not currently assign to a has_many relationship - to be implemented"
end
end
# has_one methods
def has_one_proxy(rel_name)
proxy_sym = "@proxy_#{rel_name}".to_sym
proxy = instance_variable_get(proxy_sym)
proxy ||= HasOneProxy.new(self, rel_name)
instance_variable_set(proxy_sym, proxy)
proxy
end
def self.has_one(rel_name, opts=nil)
define_method(rel_name) do
has_one_proxy(rel_name).target
end
define_method("#{rel_name}=") do |new_target|
has_one_proxy(rel_name).target = new_target
end
end
# belongs_to methods
# Creates and returns the proxy for the named relationship
def belongs_to_proxy(rel_name)
proxy_sym = "@proxy_#{rel_name}".to_sym
proxy = instance_variable_get(proxy_sym)
proxy ||= BelongsToProxy.new(self, rel_name)
instance_variable_set(proxy_sym, proxy)
proxy
end
def self.belongs_to(rel_name)
@belongs_to_rels ||= []
@belongs_to_rels << rel_name
define_method(rel_name) do
belongs_to_proxy(rel_name).target
end
define_method("#{rel_name}=") do |new_target|
belongs_to_proxy(rel_name).target = new_target
end
end
def self.belongs_to_rels
# Don't force clients to check that it's instantiated
@belongs_to_rels ||= []
end
def belongs_to_rels
self.class.belongs_to_rels
end
def self.all
database = RelaxDB::Database.std_db
view_path = "_view/#{self}/all"
begin
resp = database.get(view_path)
rescue => e
DesignDocument.get(self).add_all_view.save
resp = database.get(view_path)
end
objects_from_view_response(resp.body)
end
# As method names go, I'm not too enamoured with all_by - Post.all.sort_by might be nice
def self.all_by(*atts)
database = RelaxDB::Database.std_db
q = Query.new(self.name, *atts)
yield q if block_given?
puts "RelaxDB submitting query to #{q.view_path}"
begin
resp = database.get(q.view_path)
rescue => e
DesignDocument.get(self).add_view_to_data(q.view_name, q.map_function).save
resp = database.get(q.view_path)
end
objects_from_view_response(resp.body)
end
# Should be able to take a query object too
def self.view(view_name)
resp = RelaxDB::Database.std_db.get("_view/#{self}/#{view_name}")
objects_from_view_response(resp.body)
end
def self.objects_from_view_response(resp_body)
@objects = []
data = JSON.parse(resp_body)["rows"]
data.each do |row|
@objects << RelaxDB.create_from_hash(row["value"])
end
@objects
end
# TODO: Destroy should presumably destroy all children
# Destroy semantics in AR are that all callbacks are invoked (as opposed to delete)
# Destroy is also used by DM. To destroy all, DM uses e.g. Post.all.destroy! see below
# http://groups.google.com/group/datamapper/browse_thread/thread/866ead34237f0e7b
# Returning something other than the http response would be good too
def destroy!
RelaxDB::Database.std_db.delete("#{_id}?rev=#{_rev}")
end
# TODO: Meh! Use bulk update to do this efficiently
# Leaves the corresponding DesignDoc for this class intact. Should it? probably...
def self.destroy_all!
self.all.each do |o|
o.destroy!
end
end
end
def self.bulk_save(*objs)
database = RelaxDB::Database.std_db
database.post("_bulk_docs", { "docs" => objs }.to_json )
end
def self.load(id)
self.load_by_id(id)
end
def self.load_by_id(id)
database = RelaxDB::Database.std_db
resp = database.get("#{id}")
data = JSON.parse(resp.body)
create_from_hash(data)
end
def self.create_from_hash(data)
# revise use of string 'class' - it's a reserved word in JavaScript
klass = data.delete("class")
k = Module.const_get(klass)
k.new(data)
end
end |
module RenderPipeline
include HTML
class SimpleFormatFilter < Pipeline::Filter
include ActionView::Helpers::TextHelper
def call
if html.size < 64000
simple_format(html, {}, :sanitize => false)
else
html
end
end
end
class PreserveFormatting < Pipeline::Filter
def call
html.gsub /<([^\/a-zA-Z])/, '<\1'
end
end
class BetterMentionFilter < Pipeline::MentionFilter
def self.mentioned_logins_in(text, username_pattern=Channel::UsernamePattern)
text.gsub Channel::MentionPatterns[Channel::UsernamePattern] do |match|
login = $1
yield match, login, false
end
end
def link_to_mention_info(text, info_url=nil)
Rails.logger.info context.inspect
self_mention = " own" if context[:user_login] && text.downcase == context[:user_login].downcase
return "@#{text}" if info_url.nil?
"<a href='#{info_url}' class='user-mention#{self_mention}'>" +
"@#{text}" +
"</a>"
end
end
class CustomEmojiFilter < Pipeline::EmojiFilter
def emoji_image_filter(text)
regex = emoji_pattern
text.gsub(regex) do |match|
emoji_image_tag($1)
end
end
def self.emoji_names
super + CustomEmoji.custom_emojis.map { |e| e[:aliases] }.flatten.sort
end
def self.emoji_pattern
last_update = CustomEmoji.last_update
if !@last_update || @last_update < last_update || !@emoji_pattern
@emoji_pattern = /:(#{emoji_names.map { |name| Regexp.escape(name) }.join('|')}):/
@last_update = last_update
end
@emoji_pattern
end
def emoji_image_tag(name)
"<img class='emoji' title=':#{name}:' alt=':#{name}:' src='#{emoji_url(name)}' height='20' width='20' align='absmiddle' />"
end
def emoji_url(name)
e = CustomEmoji.custom_emojis.find { |e| e[:aliases].include?(name) }
return e[:image] if e
super(name)
end
end
class AutoEmbedFilter < Pipeline::Filter
EMBEDS = {
redcursor: {
pattern: %r{https?://(#{(ENV["REDCURSOR_HOSTNAMES"] || "").gsub(/\./, "\\.").split(",").join("|")})/channels/([0-9]+(#[^ $]+)?)},
callback: proc do |content,id|
content.gsub(EMBEDS[:redcursor][:pattern], %{<a href="/channels/#{id}">#{Channel.find(id.to_i).title rescue "/channels/#{id}"}</a>})
end
},
redcursor_upload: {
pattern: %r{https?://files\.redcursor\.net/uploads/},
callback: proc do |content,id|
content.gsub(EMBEDS[:redcursor_upload][:pattern], %{https://redcursor.net/uploads/})
end
},
redcursor_tag: {
pattern: Channel::TagPattern,
callback: proc do |content, tag, id, m|
content.gsub(EMBEDS[:redcursor_tag][:pattern], %{#{m[1]}<a class="hash-tag" href="/channels/tags/#{m[3]}">##{m[3]}</a>})
end
},
twitter: {
pattern: %r{https?://(m\.|mobile\.)?twitter\.com/[^/]+/statuse?s?/(\d+)},
callback: proc do |content, id, post_id|
tweet = $redis.get "Tweets:#{id}"
if !tweet
Resque.enqueue(FetchTweetJob, id, post_id, :tweet)
content
else
tweet.gsub!(/<script.*>.*<\/script>/, "")
content.gsub(EMBEDS[:twitter][:pattern], tweet)
end
end
},
youtube: {
pattern: %r{https?://(www\.youtube\.com/watch\?v=|m\.youtube\.com/watch\?.*v=|youtu\.be/)([A-Za-z\-_0-9]+)[^ ]*},
callback: proc do |content, id|
content.gsub EMBEDS[:youtube][:pattern], %{<iframe width="560" height="315" src="//www.youtube.com/embed/#{id}" frameborder="0" allowfullscreen></iframe>}
end
},
instagram: {
pattern: %r{https?://(instagram\.com|instagr\.am)/p/([A-Za-z0-9]+)/?},
callback: proc do |content, id, post_id|
image = $redis.get "Instagram:#{id}"
if !image
Resque.enqueue(FetchTweetJob, id, post_id, :instagram)
content
else
content.gsub(EMBEDS[:instagram][:pattern], image)
end
end
},
facebook: {
pattern: %r{https?://www.facebook.com/[^/]+/((videos|posts)/[0-9]+)/?},
callback: proc do |content, id, post_id, match|
"<div class=\"fb-#{match[2].to_s.gsub(/s$/, '')}\" data-href=\"#{match[0]}\" data-width=\"500\" data-allowfullscreen=\"true\"></div>"
end
},
imgur: {
pattern: %r{https?://(i.)?imgur.com/([a-zA-Z0-9]+)\.gifv},
callback: proc do |content, id|
"<video poster=\"//i.imgur.com/#{id}.jpg\" preload=\"auto\" autoplay=\"autoplay\" muted=\"muted\" loop=\"loop\" webkit-playsinline=\"\" style=\"width: 480px; height: 270px;\"><source src=\"//i.imgur.com/#{id}.webm\" type=\"video/webm\"><source src=\"//i.imgur.com/#{id}.mp4\" type=\"video/mp4\"></video>"
end
}
}
def call
doc.search('.//text()').each do |node|
next unless node.respond_to?(:to_html)
content = node.to_html
EMBEDS.each do |k,embed|
if m = content.match(embed[:pattern])
html = embed[:callback].call(content, m[2], context[:post_id], m)
next if html == content
node.replace(html)
end
end
end
doc
end
end
PIPELINE_CONTEXT = {
:asset_root => "/images",
:base_url => "/users"
}
MARKDOWN_PIPELINE = Pipeline.new [
Pipeline::MarkdownFilter,
# Pipeline::ImageMaxWidthFilter,
BetterMentionFilter,
CustomEmojiFilter,
AutoEmbedFilter,
Pipeline::AutolinkFilter
], PIPELINE_CONTEXT
SIMPLE_PIPELINE = Pipeline.new [
SimpleFormatFilter,
# Pipeline::ImageMaxWidthFilter,
PreserveFormatting,
BetterMentionFilter,
CustomEmojiFilter,
AutoEmbedFilter,
Pipeline::AutolinkFilter
], PIPELINE_CONTEXT
TITLE_PIPELINE = Pipeline.new [
Pipeline::MarkdownFilter,
CustomEmojiFilter
], PIPELINE_CONTEXT
NOTIFICATION_PIPELINE = Pipeline.new [
Pipeline::MarkdownFilter,
BetterMentionFilter,
CustomEmojiFilter,
AutoEmbedFilter,
Pipeline::AutolinkFilter
], PIPELINE_CONTEXT
class << self
def markdown(text, post_id=nil, user_login=nil)
result = MARKDOWN_PIPELINE.call(text, {post_id: post_id, user_login: user_login})
result[:output].to_s
end
def simple(text, post_id=nil, user_login=nil)
result = SIMPLE_PIPELINE.call(text, {post_id: post_id, user_login: user_login})
result[:output].to_s
end
def title(text)
result = TITLE_PIPELINE.call(text)
result[:output].to_s
end
def notification(text)
result = NOTIFICATION_PIPELINE.call(text)
result[:output].to_s
end
end
end
Move old upload URL transformation into new filter class
module RenderPipeline
include HTML
class SimpleFormatFilter < Pipeline::Filter
include ActionView::Helpers::TextHelper
def call
if html.size < 64000
simple_format(html, {}, :sanitize => false)
else
html
end
end
end
class PreserveFormatting < Pipeline::Filter
def call
html.gsub /<([^\/a-zA-Z])/, '<\1'
end
end
class OldUploadsFilter < Pipeline::Filter
def call
html.gsub %r{https?://files\.redcursor\.net/uploads/}, %{https://redcursor.net/uploads/}
end
end
class BetterMentionFilter < Pipeline::MentionFilter
def self.mentioned_logins_in(text, username_pattern=Channel::UsernamePattern)
text.gsub Channel::MentionPatterns[Channel::UsernamePattern] do |match|
login = $1
yield match, login, false
end
end
def link_to_mention_info(text, info_url=nil)
Rails.logger.info context.inspect
self_mention = " own" if context[:user_login] && text.downcase == context[:user_login].downcase
return "@#{text}" if info_url.nil?
"<a href='#{info_url}' class='user-mention#{self_mention}'>" +
"@#{text}" +
"</a>"
end
end
class CustomEmojiFilter < Pipeline::EmojiFilter
def emoji_image_filter(text)
regex = emoji_pattern
text.gsub(regex) do |match|
emoji_image_tag($1)
end
end
def self.emoji_names
super + CustomEmoji.custom_emojis.map { |e| e[:aliases] }.flatten.sort
end
def self.emoji_pattern
last_update = CustomEmoji.last_update
if !@last_update || @last_update < last_update || !@emoji_pattern
@emoji_pattern = /:(#{emoji_names.map { |name| Regexp.escape(name) }.join('|')}):/
@last_update = last_update
end
@emoji_pattern
end
def emoji_image_tag(name)
"<img class='emoji' title=':#{name}:' alt=':#{name}:' src='#{emoji_url(name)}' height='20' width='20' align='absmiddle' />"
end
def emoji_url(name)
e = CustomEmoji.custom_emojis.find { |e| e[:aliases].include?(name) }
return e[:image] if e
super(name)
end
end
class AutoEmbedFilter < Pipeline::Filter
EMBEDS = {
redcursor: {
pattern: %r{https?://(#{(ENV["REDCURSOR_HOSTNAMES"] || "").gsub(/\./, "\\.").split(",").join("|")})/channels/([0-9]+(#[^ $]+)?)},
callback: proc do |content,id|
content.gsub(EMBEDS[:redcursor][:pattern], %{<a href="/channels/#{id}">#{Channel.find(id.to_i).title rescue "/channels/#{id}"}</a>})
end
},
redcursor_tag: {
pattern: Channel::TagPattern,
callback: proc do |content, tag, id, m|
content.gsub(EMBEDS[:redcursor_tag][:pattern], %{#{m[1]}<a class="hash-tag" href="/channels/tags/#{m[3]}">##{m[3]}</a>})
end
},
twitter: {
pattern: %r{https?://(m\.|mobile\.)?twitter\.com/[^/]+/statuse?s?/(\d+)},
callback: proc do |content, id, post_id|
tweet = $redis.get "Tweets:#{id}"
if !tweet
Resque.enqueue(FetchTweetJob, id, post_id, :tweet)
content
else
tweet.gsub!(/<script.*>.*<\/script>/, "")
content.gsub(EMBEDS[:twitter][:pattern], tweet)
end
end
},
youtube: {
pattern: %r{https?://(www\.youtube\.com/watch\?v=|m\.youtube\.com/watch\?.*v=|youtu\.be/)([A-Za-z\-_0-9]+)[^ ]*},
callback: proc do |content, id|
content.gsub EMBEDS[:youtube][:pattern], %{<iframe width="560" height="315" src="//www.youtube.com/embed/#{id}" frameborder="0" allowfullscreen></iframe>}
end
},
instagram: {
pattern: %r{https?://(instagram\.com|instagr\.am)/p/([A-Za-z0-9]+)/?},
callback: proc do |content, id, post_id|
image = $redis.get "Instagram:#{id}"
if !image
Resque.enqueue(FetchTweetJob, id, post_id, :instagram)
content
else
content.gsub(EMBEDS[:instagram][:pattern], image)
end
end
},
facebook: {
pattern: %r{https?://www.facebook.com/[^/]+/((videos|posts)/[0-9]+)/?},
callback: proc do |content, id, post_id, match|
"<div class=\"fb-#{match[2].to_s.gsub(/s$/, '')}\" data-href=\"#{match[0]}\" data-width=\"500\" data-allowfullscreen=\"true\"></div>"
end
},
imgur: {
pattern: %r{https?://(i.)?imgur.com/([a-zA-Z0-9]+)\.gifv},
callback: proc do |content, id|
"<video poster=\"//i.imgur.com/#{id}.jpg\" preload=\"auto\" autoplay=\"autoplay\" muted=\"muted\" loop=\"loop\" webkit-playsinline=\"\" style=\"width: 480px; height: 270px;\"><source src=\"//i.imgur.com/#{id}.webm\" type=\"video/webm\"><source src=\"//i.imgur.com/#{id}.mp4\" type=\"video/mp4\"></video>"
end
}
}
def call
doc.search('.//text()').each do |node|
next unless node.respond_to?(:to_html)
content = node.to_html
EMBEDS.each do |k,embed|
if m = content.match(embed[:pattern])
html = embed[:callback].call(content, m[2], context[:post_id], m)
next if html == content
node.replace(html)
end
end
end
doc
end
end
PIPELINE_CONTEXT = {
:asset_root => "/images",
:base_url => "/users"
}
MARKDOWN_PIPELINE = Pipeline.new [
OldUploadsFilter,
Pipeline::MarkdownFilter,
# Pipeline::ImageMaxWidthFilter,
BetterMentionFilter,
CustomEmojiFilter,
AutoEmbedFilter,
Pipeline::AutolinkFilter
], PIPELINE_CONTEXT
SIMPLE_PIPELINE = Pipeline.new [
OldUploadsFilter,
SimpleFormatFilter,
# Pipeline::ImageMaxWidthFilter,
PreserveFormatting,
BetterMentionFilter,
CustomEmojiFilter,
AutoEmbedFilter,
Pipeline::AutolinkFilter
], PIPELINE_CONTEXT
TITLE_PIPELINE = Pipeline.new [
OldUploadsFilter,
Pipeline::MarkdownFilter,
CustomEmojiFilter
], PIPELINE_CONTEXT
NOTIFICATION_PIPELINE = Pipeline.new [
OldUploadsFilter,
Pipeline::MarkdownFilter,
BetterMentionFilter,
CustomEmojiFilter,
AutoEmbedFilter,
Pipeline::AutolinkFilter
], PIPELINE_CONTEXT
class << self
def markdown(text, post_id=nil, user_login=nil)
result = MARKDOWN_PIPELINE.call(text, {post_id: post_id, user_login: user_login})
result[:output].to_s
end
def simple(text, post_id=nil, user_login=nil)
result = SIMPLE_PIPELINE.call(text, {post_id: post_id, user_login: user_login})
result[:output].to_s
end
def title(text)
result = TITLE_PIPELINE.call(text)
result[:output].to_s
end
def notification(text)
result = NOTIFICATION_PIPELINE.call(text)
result[:output].to_s
end
end
end
|
# -------------------------------------------------------------------------- #
# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'one_helper'
require 'optparse/time'
class OneVMHelper < OpenNebulaHelper::OneHelper
MULTIPLE={
:name => "multiple",
:short => "-m x",
:large => "--multiple x",
:format => Integer,
:description => "Instance multiple VMs"
}
IMAGE = {
:name => "image",
:short => "-i id|name",
:large => "--image id|name" ,
:description => "Selects the image",
:format => String,
:proc => lambda { |o, options|
OpenNebulaHelper.rname_to_id(o, "IMAGE")
}
}
NETWORK = {
:name => "network",
:short => "-n id|name",
:large => "--network id|name" ,
:description => "Selects the virtual network",
:format => String,
:proc => lambda { |o, options|
OpenNebulaHelper.rname_to_id(o, "VNET")
}
}
FILE = {
:name => "file",
:short => "-f file",
:large => "--file file" ,
:description => "Selects the template file",
:format => String,
:proc => lambda { |o, options|
if File.file?(o)
options[:file] = o
else
exit -1
end
}
}
HOLD = {
:name => "hold",
:large => "--hold",
:description => "Creates the new VM on hold state instead of pending"
}
SCHEDULE = {
:name => "schedule",
:large => "--schedule TIME",
:description => "Schedules this action to be executed after the given time",
:format => Time
}
ALL_TEMPLATE = {
:name => "all",
:large => "--all",
:description => "Show all template data"
}
LIVE = {
:name => "live",
:large => "--live",
:description => "Do the action with the VM running"
}
HARD = {
:name => "hard",
:large => "--hard",
:description=> "Does not communicate with the guest OS"
}
RECREATE = {
:name => "recreate",
:large => "--recreate",
:description=> "Resubmits a fresh VM"
}
def self.rname
"VM"
end
def self.conf_file
"onevm.yaml"
end
def self.state_to_str(id, lcm_id)
id = id.to_i
state_str = VirtualMachine::VM_STATE[id]
short_state_str = VirtualMachine::SHORT_VM_STATES[state_str]
if short_state_str=="actv"
lcm_id = lcm_id.to_i
lcm_state_str = VirtualMachine::LCM_STATE[lcm_id]
return VirtualMachine::SHORT_LCM_STATES[lcm_state_str]
end
return short_state_str
end
def format_pool(options)
config_file = self.class.table_conf
table = CLIHelper::ShowTable.new(config_file, self) do
column :ID, "ONE identifier for Virtual Machine", :size=>6 do |d|
d["ID"]
end
column :NAME, "Name of the Virtual Machine", :left,
:size=>15 do |d|
if d["RESCHED"] == "1"
"*#{d["NAME"]}"
else
d["NAME"]
end
end
column :USER, "Username of the Virtual Machine owner", :left,
:size=>8 do |d|
helper.user_name(d, options)
end
column :GROUP, "Group of the Virtual Machine", :left,
:size=>8 do |d|
helper.group_name(d, options)
end
column :STAT, "Actual status", :size=>4 do |d,e|
OneVMHelper.state_to_str(d["STATE"], d["LCM_STATE"])
end
column :UCPU, "CPU percentage used by the VM", :size=>4 do |d|
d["CPU"]
end
column :UMEM, "Memory used by the VM", :size=>7 do |d|
OpenNebulaHelper.unit_to_str(d["MEMORY"].to_i, options)
end
column :HOST, "Host where the VM is running", :left, :size=>10 do |d|
if d['HISTORY_RECORDS'] && d['HISTORY_RECORDS']['HISTORY']
state_str = VirtualMachine::VM_STATE[d['STATE'].to_i]
if %w{ACTIVE SUSPENDED POWEROFF}.include? state_str
d['HISTORY_RECORDS']['HISTORY']['HOSTNAME']
end
end
end
column :TIME, "Time since the VM was submitted", :size=>10 do |d|
stime = d["STIME"].to_i
etime = d["ETIME"]=="0" ? Time.now.to_i : d["ETIME"].to_i
dtime = etime-stime
OpenNebulaHelper.period_to_str(dtime, false)
end
default :ID, :USER, :GROUP, :NAME, :STAT, :UCPU, :UMEM, :HOST,
:TIME
end
table
end
def schedule_actions(ids,options,action)
# Verbose by default
options[:verbose] = true
perform_actions(
ids, options,
"#{action} scheduled at #{options[:schedule]}") do |vm|
rc = vm.info
if OpenNebula.is_error?(rc)
puts rc.message
exit -1
end
ids = vm.retrieve_elements('USER_TEMPLATE/SCHED_ACTION/ID')
id = 0
if (!ids.nil? && !ids.empty?)
ids.map! {|e| e.to_i }
id = ids.max + 1
end
tmp_str = vm.user_template_str
tmp_str << "\nSCHED_ACTION = [ID = #{id}, ACTION = #{action}, TIME = #{options[:schedule].to_i}]"
vm.update(tmp_str)
end
end
private
def factory(id=nil)
if id
OpenNebula::VirtualMachine.new_with_id(id, @client)
else
xml=OpenNebula::VirtualMachine.build_xml
OpenNebula::VirtualMachine.new(xml, @client)
end
end
def factory_pool(user_flag=-2)
OpenNebula::VirtualMachinePool.new(@client, user_flag)
end
def format_resource(vm, options = {})
str_h1="%-80s"
str="%-20s: %-20s"
CLIHelper.print_header(
str_h1 % "VIRTUAL MACHINE #{vm['ID']} INFORMATION")
puts str % ["ID", vm.id.to_s]
puts str % ["NAME", vm.name]
puts str % ["USER", vm['UNAME']]
puts str % ["GROUP", vm['GNAME']]
puts str % ["STATE", vm.state_str]
puts str % ["LCM_STATE", vm.lcm_state_str]
puts str % ["RESCHED", OpenNebulaHelper.boolean_to_str(vm['RESCHED'])]
puts str % ["HOST",
vm['/VM/HISTORY_RECORDS/HISTORY[last()]/HOSTNAME']] if
%w{ACTIVE SUSPENDED}.include? vm.state_str
puts str % ["START TIME",
OpenNebulaHelper.time_to_str(vm['/VM/STIME'])]
puts str % ["END TIME",
OpenNebulaHelper.time_to_str(vm['/VM/ETIME'])]
value=vm['DEPLOY_ID']
puts str % ["DEPLOY ID", value=="" ? "-" : value]
puts
CLIHelper.print_header(str_h1 % "VIRTUAL MACHINE MONITORING",false)
poll_attrs = {
"USED MEMORY" => "MEMORY",
"USED CPU" => "CPU",
"NET_TX" => "NET_TX",
"NET_RX" => "NET_RX"
}
poll_attrs.each { |k,v|
if k == "USED CPU"
puts str % [k,vm[v]]
elsif k == "USED MEMORY"
puts str % [k, OpenNebulaHelper.unit_to_str(vm[v].to_i, {})]
else
puts str % [k, OpenNebulaHelper.unit_to_str(vm[v].to_i/1024, {})]
end
}
puts
CLIHelper.print_header(str_h1 % "PERMISSIONS",false)
["OWNER", "GROUP", "OTHER"].each { |e|
mask = "---"
mask[0] = "u" if vm["PERMISSIONS/#{e}_U"] == "1"
mask[1] = "m" if vm["PERMISSIONS/#{e}_M"] == "1"
mask[2] = "a" if vm["PERMISSIONS/#{e}_A"] == "1"
puts str % [e, mask]
}
if vm.has_elements?("/VM/TEMPLATE/DISK")
puts
CLIHelper.print_header(str_h1 % "VM DISKS",false)
CLIHelper::ShowTable.new(nil, self) do
column :ID, "", :size=>3 do |d|
d["DISK_ID"]
end
column :DATASTORE, "", :left, :size=>10 do |d|
d["DATASTORE"]
end
column :TARGET, "", :left, :size=>6 do |d|
d["TARGET"]
end
column :IMAGE, "", :left, :size=>35 do |d|
if d["IMAGE"]
d["IMAGE"]
else
case d["TYPE"].upcase
when "FS"
"#{d["FORMAT"]} - "<<
OpenNebulaHelper.unit_to_str(d["SIZE"].to_i,
{}, "M")
when "SWAP"
OpenNebulaHelper.unit_to_str(d["SIZE"].to_i,
{}, "M")
end
end
end
column :TYPE, "", :left, :size=>4 do |d|
d["TYPE"].downcase
end
column :"R/O", "", :size=>3 do |d|
d["READONLY"]
end
column :"SAVE", "", :size=>4 do |d|
d["SAVE"] || "NO"
end
column :"CLONE", "", :size=>5 do |d|
d["CLONE"]
end
column :"SAVE_AS", "", :size=>7 do |d|
d["SAVE_AS"] || "-"
end
default :ID, :TARGET, :IMAGE, :TYPE,
:SAVE, :SAVE_AS
end.show([vm.to_hash['VM']['TEMPLATE']['DISK']].flatten, {})
while vm.has_elements?("/VM/TEMPLATE/DISK")
vm.delete_element("/VM/TEMPLATE/DISK")
end if !options[:all]
end
if vm.has_elements?("/VM/TEMPLATE/NIC")
puts
CLIHelper.print_header(str_h1 % "VM NICS",false)
vm_nics = [vm.to_hash['VM']['TEMPLATE']['NIC']].flatten
nic_default = {"NETWORK" => "-",
"IP" => "-",
"MAC"=> "-",
"VLAN"=>"no",
"BRIDGE"=>"-"}
array_id = 0
vm_nics.each {|nic|
next if nic.has_key?("CLI_DONE")
if nic.has_key?("IP6_LINK")
ip6_link = {"IP" => nic.delete("IP6_LINK"),
"CLI_DONE" => true,
"DOUBLE_ENTRY" => true}
vm_nics.insert(array_id+1,ip6_link)
array_id += 1
end
if nic.has_key?("IP6_SITE")
ip6_link = {"IP" => nic.delete("IP6_SITE"),
"CLI_DONE" => true,
"DOUBLE_ENTRY" => true}
vm_nics.insert(array_id+1,ip6_link)
array_id += 1
end
if nic.has_key?("IP6_GLOBAL")
ip6_link = {"IP" => nic.delete("IP6_GLOBAL"),
"CLI_DONE" => true,
"DOUBLE_ENTRY" => true}
vm_nics.insert(array_id+1,ip6_link)
array_id += 1
end
nic.merge!(nic_default) {|k,v1,v2| v1}
array_id += 1
}
CLIHelper::ShowTable.new(nil, self) do
column :ID, "", :size=>3 do |d|
if d["DOUBLE_ENTRY"]
""
else
d["NIC_ID"]
end
end
column :NETWORK, "", :left, :size=>20 do |d|
if d["DOUBLE_ENTRY"]
""
else
d["NETWORK"]
end
end
column :VLAN, "", :size=>4 do |d|
if d["DOUBLE_ENTRY"]
""
else
d["VLAN"].downcase
end
end
column :BRIDGE, "", :left, :size=>12 do |d|
if d["DOUBLE_ENTRY"]
""
else
d["BRIDGE"]
end
end
column :IP, "",:left, :donottruncate, :size=>15 do |d|
d["IP"]
end
column :MAC, "", :left, :size=>17 do |d|
if d["DOUBLE_ENTRY"]
""
else
d["MAC"]
end
end
end.show(vm_nics,{})
while vm.has_elements?("/VM/TEMPLATE/NIC")
vm.delete_element("/VM/TEMPLATE/NIC")
end if !options[:all]
end
if vm.has_elements?("/VM/TEMPLATE/SNAPSHOT")
puts
CLIHelper.print_header(str_h1 % "SNAPSHOTS",false)
CLIHelper::ShowTable.new(nil, self) do
column :"ID", "", :size=>4 do |d|
d["SNAPSHOT_ID"] if !d.nil?
end
column :"TIME", "", :size=>12 do |d|
OpenNebulaHelper.time_to_str(d["TIME"], false) if !d.nil?
end
column :"NAME", "", :left, :size=>46 do |d|
d["NAME"] if !d.nil?
end
column :"HYPERVISOR_ID", "", :left, :size=>15 do |d|
d["HYPERVISOR_ID"] if !d.nil?
end
end.show([vm.to_hash['VM']['TEMPLATE']['SNAPSHOT']].flatten, {})
vm.delete_element("/VM/TEMPLATE/SNAPSHOT")
end
if vm.has_elements?("/VM/HISTORY_RECORDS")
puts
CLIHelper.print_header(str_h1 % "VIRTUAL MACHINE HISTORY",false)
format_history(vm)
end
if vm.has_elements?("/VM/USER_TEMPLATE/SCHED_ACTION")
puts
CLIHelper.print_header(str_h1 % "SCHEDULED ACTIONS",false)
CLIHelper::ShowTable.new(nil, self) do
column :"ID", "", :size=>2 do |d|
d["ID"] if !d.nil?
end
column :"ACTION", "", :left, :size=>15 do |d|
d["ACTION"] if !d.nil?
end
column :"SCHEDULED", "", :size=>12 do |d|
OpenNebulaHelper.time_to_str(d["TIME"], false) if !d.nil?
end
column :"DONE", "", :size=>12 do |d|
OpenNebulaHelper.time_to_str(d["DONE"], false) if !d.nil?
end
column :"MESSAGE", "", :left, :donottruncate, :size=>35 do |d|
d["MESSAGE"] if !d.nil?
end
end.show([vm.to_hash['VM']['USER_TEMPLATE']['SCHED_ACTION']].flatten, {})
end
if vm.has_elements?("/VM/USER_TEMPLATE")
puts
if !options[:all]
vm.delete_element("/VM/USER_TEMPLATE/SCHED_ACTION")
end
CLIHelper.print_header(str_h1 % "USER TEMPLATE",false)
puts vm.template_like_str('USER_TEMPLATE')
end
puts
CLIHelper.print_header(str_h1 % "VIRTUAL MACHINE TEMPLATE",false)
puts vm.template_str
end
def format_history(vm)
table=CLIHelper::ShowTable.new(nil, self) do
column :SEQ, "Sequence number", :size=>4 do |d|
d["SEQ"]
end
column :HOST, "Host name of the VM container", :left, :size=>20 do |d|
d["HOSTNAME"]
end
column :REASON, "VM state change reason", :left, :size=>6 do |d|
VirtualMachine.get_reason d["REASON"]
end
column :START, "Time when the state changed", :size=>15 do |d|
OpenNebulaHelper.time_to_str(d['STIME'])
end
column :TIME, "Total time in this state", :size=>15 do |d|
stime = d["STIME"].to_i
etime = d["ETIME"]=="0" ? Time.now.to_i : d["ETIME"].to_i
dtime = etime-stime
OpenNebulaHelper.period_to_str(dtime)
end
column :PROLOG_TIME, "Prolog time for this state", :size=>15 do |d|
stime = d["PSTIME"].to_i
if d["PSTIME"]=="0"
etime=0
else
etime = d["PETIME"]=="0" ? Time.now.to_i: d["PETIME"].to_i
end
dtime = etime-stime
OpenNebulaHelper.period_to_str(dtime)
end
default :SEQ, :HOST, :REASON, :START, :TIME, :PROLOG_TIME
end
vm_hash=vm.to_hash
history=[vm_hash['VM']['HISTORY_RECORDS']['HISTORY']].flatten
table.show(history)
end
end
feature #1790: Add example to schedule option
# -------------------------------------------------------------------------- #
# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'one_helper'
require 'optparse/time'
class OneVMHelper < OpenNebulaHelper::OneHelper
MULTIPLE={
:name => "multiple",
:short => "-m x",
:large => "--multiple x",
:format => Integer,
:description => "Instance multiple VMs"
}
IMAGE = {
:name => "image",
:short => "-i id|name",
:large => "--image id|name" ,
:description => "Selects the image",
:format => String,
:proc => lambda { |o, options|
OpenNebulaHelper.rname_to_id(o, "IMAGE")
}
}
NETWORK = {
:name => "network",
:short => "-n id|name",
:large => "--network id|name" ,
:description => "Selects the virtual network",
:format => String,
:proc => lambda { |o, options|
OpenNebulaHelper.rname_to_id(o, "VNET")
}
}
FILE = {
:name => "file",
:short => "-f file",
:large => "--file file" ,
:description => "Selects the template file",
:format => String,
:proc => lambda { |o, options|
if File.file?(o)
options[:file] = o
else
exit -1
end
}
}
HOLD = {
:name => "hold",
:large => "--hold",
:description => "Creates the new VM on hold state instead of pending"
}
SCHEDULE = {
:name => "schedule",
:large => "--schedule TIME",
:description => "Schedules this action to be executed after" \
"the given time. For example: onevm resume 0 --schedule \"09/23 14:15\"",
:format => Time
}
ALL_TEMPLATE = {
:name => "all",
:large => "--all",
:description => "Show all template data"
}
LIVE = {
:name => "live",
:large => "--live",
:description => "Do the action with the VM running"
}
HARD = {
:name => "hard",
:large => "--hard",
:description=> "Does not communicate with the guest OS"
}
RECREATE = {
:name => "recreate",
:large => "--recreate",
:description=> "Resubmits a fresh VM"
}
def self.rname
"VM"
end
def self.conf_file
"onevm.yaml"
end
def self.state_to_str(id, lcm_id)
id = id.to_i
state_str = VirtualMachine::VM_STATE[id]
short_state_str = VirtualMachine::SHORT_VM_STATES[state_str]
if short_state_str=="actv"
lcm_id = lcm_id.to_i
lcm_state_str = VirtualMachine::LCM_STATE[lcm_id]
return VirtualMachine::SHORT_LCM_STATES[lcm_state_str]
end
return short_state_str
end
def format_pool(options)
config_file = self.class.table_conf
table = CLIHelper::ShowTable.new(config_file, self) do
column :ID, "ONE identifier for Virtual Machine", :size=>6 do |d|
d["ID"]
end
column :NAME, "Name of the Virtual Machine", :left,
:size=>15 do |d|
if d["RESCHED"] == "1"
"*#{d["NAME"]}"
else
d["NAME"]
end
end
column :USER, "Username of the Virtual Machine owner", :left,
:size=>8 do |d|
helper.user_name(d, options)
end
column :GROUP, "Group of the Virtual Machine", :left,
:size=>8 do |d|
helper.group_name(d, options)
end
column :STAT, "Actual status", :size=>4 do |d,e|
OneVMHelper.state_to_str(d["STATE"], d["LCM_STATE"])
end
column :UCPU, "CPU percentage used by the VM", :size=>4 do |d|
d["CPU"]
end
column :UMEM, "Memory used by the VM", :size=>7 do |d|
OpenNebulaHelper.unit_to_str(d["MEMORY"].to_i, options)
end
column :HOST, "Host where the VM is running", :left, :size=>10 do |d|
if d['HISTORY_RECORDS'] && d['HISTORY_RECORDS']['HISTORY']
state_str = VirtualMachine::VM_STATE[d['STATE'].to_i]
if %w{ACTIVE SUSPENDED POWEROFF}.include? state_str
d['HISTORY_RECORDS']['HISTORY']['HOSTNAME']
end
end
end
column :TIME, "Time since the VM was submitted", :size=>10 do |d|
stime = d["STIME"].to_i
etime = d["ETIME"]=="0" ? Time.now.to_i : d["ETIME"].to_i
dtime = etime-stime
OpenNebulaHelper.period_to_str(dtime, false)
end
default :ID, :USER, :GROUP, :NAME, :STAT, :UCPU, :UMEM, :HOST,
:TIME
end
table
end
def schedule_actions(ids,options,action)
# Verbose by default
options[:verbose] = true
perform_actions(
ids, options,
"#{action} scheduled at #{options[:schedule]}") do |vm|
rc = vm.info
if OpenNebula.is_error?(rc)
puts rc.message
exit -1
end
ids = vm.retrieve_elements('USER_TEMPLATE/SCHED_ACTION/ID')
id = 0
if (!ids.nil? && !ids.empty?)
ids.map! {|e| e.to_i }
id = ids.max + 1
end
tmp_str = vm.user_template_str
tmp_str << "\nSCHED_ACTION = [ID = #{id}, ACTION = #{action}, TIME = #{options[:schedule].to_i}]"
vm.update(tmp_str)
end
end
private
def factory(id=nil)
if id
OpenNebula::VirtualMachine.new_with_id(id, @client)
else
xml=OpenNebula::VirtualMachine.build_xml
OpenNebula::VirtualMachine.new(xml, @client)
end
end
def factory_pool(user_flag=-2)
OpenNebula::VirtualMachinePool.new(@client, user_flag)
end
def format_resource(vm, options = {})
str_h1="%-80s"
str="%-20s: %-20s"
CLIHelper.print_header(
str_h1 % "VIRTUAL MACHINE #{vm['ID']} INFORMATION")
puts str % ["ID", vm.id.to_s]
puts str % ["NAME", vm.name]
puts str % ["USER", vm['UNAME']]
puts str % ["GROUP", vm['GNAME']]
puts str % ["STATE", vm.state_str]
puts str % ["LCM_STATE", vm.lcm_state_str]
puts str % ["RESCHED", OpenNebulaHelper.boolean_to_str(vm['RESCHED'])]
puts str % ["HOST",
vm['/VM/HISTORY_RECORDS/HISTORY[last()]/HOSTNAME']] if
%w{ACTIVE SUSPENDED}.include? vm.state_str
puts str % ["START TIME",
OpenNebulaHelper.time_to_str(vm['/VM/STIME'])]
puts str % ["END TIME",
OpenNebulaHelper.time_to_str(vm['/VM/ETIME'])]
value=vm['DEPLOY_ID']
puts str % ["DEPLOY ID", value=="" ? "-" : value]
puts
CLIHelper.print_header(str_h1 % "VIRTUAL MACHINE MONITORING",false)
poll_attrs = {
"USED MEMORY" => "MEMORY",
"USED CPU" => "CPU",
"NET_TX" => "NET_TX",
"NET_RX" => "NET_RX"
}
poll_attrs.each { |k,v|
if k == "USED CPU"
puts str % [k,vm[v]]
elsif k == "USED MEMORY"
puts str % [k, OpenNebulaHelper.unit_to_str(vm[v].to_i, {})]
else
puts str % [k, OpenNebulaHelper.unit_to_str(vm[v].to_i/1024, {})]
end
}
puts
CLIHelper.print_header(str_h1 % "PERMISSIONS",false)
["OWNER", "GROUP", "OTHER"].each { |e|
mask = "---"
mask[0] = "u" if vm["PERMISSIONS/#{e}_U"] == "1"
mask[1] = "m" if vm["PERMISSIONS/#{e}_M"] == "1"
mask[2] = "a" if vm["PERMISSIONS/#{e}_A"] == "1"
puts str % [e, mask]
}
if vm.has_elements?("/VM/TEMPLATE/DISK")
puts
CLIHelper.print_header(str_h1 % "VM DISKS",false)
CLIHelper::ShowTable.new(nil, self) do
column :ID, "", :size=>3 do |d|
d["DISK_ID"]
end
column :DATASTORE, "", :left, :size=>10 do |d|
d["DATASTORE"]
end
column :TARGET, "", :left, :size=>6 do |d|
d["TARGET"]
end
column :IMAGE, "", :left, :size=>35 do |d|
if d["IMAGE"]
d["IMAGE"]
else
case d["TYPE"].upcase
when "FS"
"#{d["FORMAT"]} - "<<
OpenNebulaHelper.unit_to_str(d["SIZE"].to_i,
{}, "M")
when "SWAP"
OpenNebulaHelper.unit_to_str(d["SIZE"].to_i,
{}, "M")
end
end
end
column :TYPE, "", :left, :size=>4 do |d|
d["TYPE"].downcase
end
column :"R/O", "", :size=>3 do |d|
d["READONLY"]
end
column :"SAVE", "", :size=>4 do |d|
d["SAVE"] || "NO"
end
column :"CLONE", "", :size=>5 do |d|
d["CLONE"]
end
column :"SAVE_AS", "", :size=>7 do |d|
d["SAVE_AS"] || "-"
end
default :ID, :TARGET, :IMAGE, :TYPE,
:SAVE, :SAVE_AS
end.show([vm.to_hash['VM']['TEMPLATE']['DISK']].flatten, {})
while vm.has_elements?("/VM/TEMPLATE/DISK")
vm.delete_element("/VM/TEMPLATE/DISK")
end if !options[:all]
end
if vm.has_elements?("/VM/TEMPLATE/NIC")
puts
CLIHelper.print_header(str_h1 % "VM NICS",false)
vm_nics = [vm.to_hash['VM']['TEMPLATE']['NIC']].flatten
nic_default = {"NETWORK" => "-",
"IP" => "-",
"MAC"=> "-",
"VLAN"=>"no",
"BRIDGE"=>"-"}
array_id = 0
vm_nics.each {|nic|
next if nic.has_key?("CLI_DONE")
if nic.has_key?("IP6_LINK")
ip6_link = {"IP" => nic.delete("IP6_LINK"),
"CLI_DONE" => true,
"DOUBLE_ENTRY" => true}
vm_nics.insert(array_id+1,ip6_link)
array_id += 1
end
if nic.has_key?("IP6_SITE")
ip6_link = {"IP" => nic.delete("IP6_SITE"),
"CLI_DONE" => true,
"DOUBLE_ENTRY" => true}
vm_nics.insert(array_id+1,ip6_link)
array_id += 1
end
if nic.has_key?("IP6_GLOBAL")
ip6_link = {"IP" => nic.delete("IP6_GLOBAL"),
"CLI_DONE" => true,
"DOUBLE_ENTRY" => true}
vm_nics.insert(array_id+1,ip6_link)
array_id += 1
end
nic.merge!(nic_default) {|k,v1,v2| v1}
array_id += 1
}
CLIHelper::ShowTable.new(nil, self) do
column :ID, "", :size=>3 do |d|
if d["DOUBLE_ENTRY"]
""
else
d["NIC_ID"]
end
end
column :NETWORK, "", :left, :size=>20 do |d|
if d["DOUBLE_ENTRY"]
""
else
d["NETWORK"]
end
end
column :VLAN, "", :size=>4 do |d|
if d["DOUBLE_ENTRY"]
""
else
d["VLAN"].downcase
end
end
column :BRIDGE, "", :left, :size=>12 do |d|
if d["DOUBLE_ENTRY"]
""
else
d["BRIDGE"]
end
end
column :IP, "",:left, :donottruncate, :size=>15 do |d|
d["IP"]
end
column :MAC, "", :left, :size=>17 do |d|
if d["DOUBLE_ENTRY"]
""
else
d["MAC"]
end
end
end.show(vm_nics,{})
while vm.has_elements?("/VM/TEMPLATE/NIC")
vm.delete_element("/VM/TEMPLATE/NIC")
end if !options[:all]
end
if vm.has_elements?("/VM/TEMPLATE/SNAPSHOT")
puts
CLIHelper.print_header(str_h1 % "SNAPSHOTS",false)
CLIHelper::ShowTable.new(nil, self) do
column :"ID", "", :size=>4 do |d|
d["SNAPSHOT_ID"] if !d.nil?
end
column :"TIME", "", :size=>12 do |d|
OpenNebulaHelper.time_to_str(d["TIME"], false) if !d.nil?
end
column :"NAME", "", :left, :size=>46 do |d|
d["NAME"] if !d.nil?
end
column :"HYPERVISOR_ID", "", :left, :size=>15 do |d|
d["HYPERVISOR_ID"] if !d.nil?
end
end.show([vm.to_hash['VM']['TEMPLATE']['SNAPSHOT']].flatten, {})
vm.delete_element("/VM/TEMPLATE/SNAPSHOT")
end
if vm.has_elements?("/VM/HISTORY_RECORDS")
puts
CLIHelper.print_header(str_h1 % "VIRTUAL MACHINE HISTORY",false)
format_history(vm)
end
if vm.has_elements?("/VM/USER_TEMPLATE/SCHED_ACTION")
puts
CLIHelper.print_header(str_h1 % "SCHEDULED ACTIONS",false)
CLIHelper::ShowTable.new(nil, self) do
column :"ID", "", :size=>2 do |d|
d["ID"] if !d.nil?
end
column :"ACTION", "", :left, :size=>15 do |d|
d["ACTION"] if !d.nil?
end
column :"SCHEDULED", "", :size=>12 do |d|
OpenNebulaHelper.time_to_str(d["TIME"], false) if !d.nil?
end
column :"DONE", "", :size=>12 do |d|
OpenNebulaHelper.time_to_str(d["DONE"], false) if !d.nil?
end
column :"MESSAGE", "", :left, :donottruncate, :size=>35 do |d|
d["MESSAGE"] if !d.nil?
end
end.show([vm.to_hash['VM']['USER_TEMPLATE']['SCHED_ACTION']].flatten, {})
end
if vm.has_elements?("/VM/USER_TEMPLATE")
puts
if !options[:all]
vm.delete_element("/VM/USER_TEMPLATE/SCHED_ACTION")
end
CLIHelper.print_header(str_h1 % "USER TEMPLATE",false)
puts vm.template_like_str('USER_TEMPLATE')
end
puts
CLIHelper.print_header(str_h1 % "VIRTUAL MACHINE TEMPLATE",false)
puts vm.template_str
end
def format_history(vm)
table=CLIHelper::ShowTable.new(nil, self) do
column :SEQ, "Sequence number", :size=>4 do |d|
d["SEQ"]
end
column :HOST, "Host name of the VM container", :left, :size=>20 do |d|
d["HOSTNAME"]
end
column :REASON, "VM state change reason", :left, :size=>6 do |d|
VirtualMachine.get_reason d["REASON"]
end
column :START, "Time when the state changed", :size=>15 do |d|
OpenNebulaHelper.time_to_str(d['STIME'])
end
column :TIME, "Total time in this state", :size=>15 do |d|
stime = d["STIME"].to_i
etime = d["ETIME"]=="0" ? Time.now.to_i : d["ETIME"].to_i
dtime = etime-stime
OpenNebulaHelper.period_to_str(dtime)
end
column :PROLOG_TIME, "Prolog time for this state", :size=>15 do |d|
stime = d["PSTIME"].to_i
if d["PSTIME"]=="0"
etime=0
else
etime = d["PETIME"]=="0" ? Time.now.to_i: d["PETIME"].to_i
end
dtime = etime-stime
OpenNebulaHelper.period_to_str(dtime)
end
default :SEQ, :HOST, :REASON, :START, :TIME, :PROLOG_TIME
end
vm_hash=vm.to_hash
history=[vm_hash['VM']['HISTORY_RECORDS']['HISTORY']].flatten
table.show(history)
end
end
|
# Global libs
require "rubygems"
require "json"
require "haml"
require "uri"
require "timeout"
require "syslog"
require "net/http"
require "thread"
require "singleton"
require "grape"
require "grape-entity"
require "newrelic_rpm"
# Shared libs
require_relative "shared/logger_formatter"
require_relative "shared/logger_helper"
require_relative "shared/conf"
require_relative "shared/worker_base"
# Project's libs
require_relative "rest-ftp-daemon/constants"
require_relative "rest-ftp-daemon/array"
require_relative "rest-ftp-daemon/exceptions"
require_relative "rest-ftp-daemon/helpers"
require_relative "rest-ftp-daemon/logger_pool"
require_relative "rest-ftp-daemon/metrics"
require_relative "rest-ftp-daemon/paginate"
require_relative "rest-ftp-daemon/uri"
require_relative "rest-ftp-daemon/job_queue"
require_relative "rest-ftp-daemon/counters"
require_relative "rest-ftp-daemon/worker_pool"
require_relative "rest-ftp-daemon/workers/conchita"
require_relative "rest-ftp-daemon/workers/reporter"
require_relative "rest-ftp-daemon/workers/job"
require_relative "rest-ftp-daemon/job"
require_relative "rest-ftp-daemon/notification"
require_relative "rest-ftp-daemon/path"
require_relative "rest-ftp-daemon/remote"
require_relative "rest-ftp-daemon/remote_ftp"
require_relative "rest-ftp-daemon/remote_sftp"
require_relative "rest-ftp-daemon/api/job_presenter"
require_relative "rest-ftp-daemon/api/jobs"
require_relative "rest-ftp-daemon/api/dashboard"
require_relative "rest-ftp-daemon/api/status"
require_relative "rest-ftp-daemon/api/config"
require_relative "rest-ftp-daemon/api/debug"
require_relative "rest-ftp-daemon/api/root"
# Haml monkey-patching
require_relative "rest-ftp-daemon/patch_haml"
rename JobWorker to TransferWorker
# Global libs
require "rubygems"
require "json"
require "haml"
require "uri"
require "timeout"
require "syslog"
require "net/http"
require "thread"
require "singleton"
require "grape"
require "grape-entity"
require "newrelic_rpm"
# Shared libs
require_relative "shared/logger_formatter"
require_relative "shared/logger_helper"
require_relative "shared/conf"
require_relative "shared/worker_base"
# Project's libs
require_relative "rest-ftp-daemon/constants"
require_relative "rest-ftp-daemon/array"
require_relative "rest-ftp-daemon/exceptions"
require_relative "rest-ftp-daemon/helpers"
require_relative "rest-ftp-daemon/logger_pool"
require_relative "rest-ftp-daemon/metrics"
require_relative "rest-ftp-daemon/paginate"
require_relative "rest-ftp-daemon/uri"
require_relative "rest-ftp-daemon/job_queue"
require_relative "rest-ftp-daemon/counters"
require_relative "rest-ftp-daemon/worker_pool"
require_relative "rest-ftp-daemon/workers/conchita"
require_relative "rest-ftp-daemon/workers/reporter"
require_relative "rest-ftp-daemon/workers/transfer"
require_relative "rest-ftp-daemon/job"
require_relative "rest-ftp-daemon/notification"
require_relative "rest-ftp-daemon/path"
require_relative "rest-ftp-daemon/remote"
require_relative "rest-ftp-daemon/remote_ftp"
require_relative "rest-ftp-daemon/remote_sftp"
require_relative "rest-ftp-daemon/api/job_presenter"
require_relative "rest-ftp-daemon/api/jobs"
require_relative "rest-ftp-daemon/api/dashboard"
require_relative "rest-ftp-daemon/api/status"
require_relative "rest-ftp-daemon/api/config"
require_relative "rest-ftp-daemon/api/debug"
require_relative "rest-ftp-daemon/api/root"
# Haml monkey-patching
require_relative "rest-ftp-daemon/patch_haml"
|
# -------------------------------------------------------------------------- #
# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'one_helper'
require 'optparse/time'
class OneVMHelper < OpenNebulaHelper::OneHelper
MULTIPLE={
:name => "multiple",
:short => "-m x",
:large => "--multiple x",
:format => Integer,
:description => "Instance multiple VMs"
}
IMAGE = {
:name => "image",
:short => "-i id|name",
:large => "--image id|name" ,
:description => "Selects the image",
:format => String,
:proc => lambda { |o, options|
OpenNebulaHelper.rname_to_id(o, "IMAGE")
}
}
FILE = {
:name => "file",
:short => "-f file",
:large => "--file file" ,
:description => "Selects the template file",
:format => String,
:proc => lambda { |o, options|
if File.file?(o)
options[:file] = o
else
exit -1
end
}
}
HOLD = {
:name => "hold",
:large => "--hold",
:description => "Creates the new VM on hold state instead of pending"
}
SCHEDULE = {
:name => "schedule",
:large => "--schedule TIME",
:description => "Schedules this action to be executed after the given time",
:format => Time
}
def self.rname
"VM"
end
def self.conf_file
"onevm.yaml"
end
def self.state_to_str(id, lcm_id)
id = id.to_i
state_str = VirtualMachine::VM_STATE[id]
short_state_str = VirtualMachine::SHORT_VM_STATES[state_str]
if short_state_str=="actv"
lcm_id = lcm_id.to_i
lcm_state_str = VirtualMachine::LCM_STATE[lcm_id]
return VirtualMachine::SHORT_LCM_STATES[lcm_state_str]
end
return short_state_str
end
def format_pool(options)
config_file = self.class.table_conf
table = CLIHelper::ShowTable.new(config_file, self) do
column :ID, "ONE identifier for Virtual Machine", :size=>6 do |d|
d["ID"]
end
column :NAME, "Name of the Virtual Machine", :left,
:size=>15 do |d|
if d["RESCHED"] == "1"
"*#{d["NAME"]}"
else
d["NAME"]
end
end
column :USER, "Username of the Virtual Machine owner", :left,
:size=>8 do |d|
helper.user_name(d, options)
end
column :GROUP, "Group of the Virtual Machine", :left,
:size=>8 do |d|
helper.group_name(d, options)
end
column :STAT, "Actual status", :size=>4 do |d,e|
OneVMHelper.state_to_str(d["STATE"], d["LCM_STATE"])
end
column :UCPU, "CPU percentage used by the VM", :size=>4 do |d|
d["CPU"]
end
column :UMEM, "Memory used by the VM", :size=>7 do |d|
OpenNebulaHelper.unit_to_str(d["MEMORY"].to_i, options)
end
column :HOST, "Host where the VM is running", :left, :size=>10 do |d|
if d['HISTORY_RECORDS'] && d['HISTORY_RECORDS']['HISTORY']
state_str = VirtualMachine::VM_STATE[d['STATE'].to_i]
if %w{ACTIVE SUSPENDED}.include? state_str
d['HISTORY_RECORDS']['HISTORY']['HOSTNAME']
end
end
end
column :TIME, "Time since the VM was submitted", :size=>10 do |d|
stime = d["STIME"].to_i
etime = d["ETIME"]=="0" ? Time.now.to_i : d["ETIME"].to_i
dtime = etime-stime
OpenNebulaHelper.period_to_str(dtime, false)
end
default :ID, :USER, :GROUP, :NAME, :STAT, :UCPU, :UMEM, :HOST,
:TIME
end
table
end
def schedule_actions(ids,options,action)
# Verbose by default
options[:verbose] = true
perform_actions(
ids, options,
"#{action} scheduled at #{options[:schedule]}") do |vm|
rc = vm.info
if OpenNebula.is_error?(rc)
puts rc.message
exit -1
end
tmp_str = vm.user_template_str
tmp_str << "\nSCHED_ACTION = [ACTION = #{action}, TIME = #{options[:schedule].to_i}]"
vm.update(tmp_str)
end
end
private
def factory(id=nil)
if id
OpenNebula::VirtualMachine.new_with_id(id, @client)
else
xml=OpenNebula::VirtualMachine.build_xml
OpenNebula::VirtualMachine.new(xml, @client)
end
end
def factory_pool(user_flag=-2)
OpenNebula::VirtualMachinePool.new(@client, user_flag)
end
def format_resource(vm)
str_h1="%-80s"
str="%-20s: %-20s"
CLIHelper.print_header(
str_h1 % "VIRTUAL MACHINE #{vm['ID']} INFORMATION")
puts str % ["ID", vm.id.to_s]
puts str % ["NAME", vm.name]
puts str % ["USER", vm['UNAME']]
puts str % ["GROUP", vm['GNAME']]
puts str % ["STATE", vm.state_str]
puts str % ["LCM_STATE", vm.lcm_state_str]
puts str % ["RESCHED", OpenNebulaHelper.boolean_to_str(vm['RESCHED'])]
puts str % ["HOST",
vm['/VM/HISTORY_RECORDS/HISTORY[last()]/HOSTNAME']] if
%w{ACTIVE SUSPENDED}.include? vm.state_str
puts str % ["START TIME",
OpenNebulaHelper.time_to_str(vm['/VM/STIME'])]
puts str % ["END TIME",
OpenNebulaHelper.time_to_str(vm['/VM/ETIME'])]
value=vm['DEPLOY_ID']
puts str % ["DEPLOY ID", value=="" ? "-" : value]
puts
CLIHelper.print_header(str_h1 % "VIRTUAL MACHINE MONITORING",false)
poll_attrs = {
"USED MEMORY" => "MEMORY",
"USED CPU" => "CPU",
"NET_TX" => "NET_TX",
"NET_RX" => "NET_RX"
}
poll_attrs.each { |k,v|
if k == "USED CPU"
puts str % [k,vm[v]]
elsif k == "USED MEMORY"
puts str % [k, OpenNebulaHelper.unit_to_str(vm[v].to_i, {})]
else
puts str % [k, OpenNebulaHelper.unit_to_str(vm[v].to_i/1024, {})]
end
}
puts
CLIHelper.print_header(str_h1 % "PERMISSIONS",false)
["OWNER", "GROUP", "OTHER"].each { |e|
mask = "---"
mask[0] = "u" if vm["PERMISSIONS/#{e}_U"] == "1"
mask[1] = "m" if vm["PERMISSIONS/#{e}_M"] == "1"
mask[2] = "a" if vm["PERMISSIONS/#{e}_A"] == "1"
puts str % [e, mask]
}
puts
if vm.has_elements?("/VM/USER_TEMPLATE/SCHED_ACTION")
CLIHelper.print_header(str_h1 % "SCHEDULED ACTIONS",false)
CLIHelper::ShowTable.new(nil, self) do
column :"ACTION", "", :left, :size=>10 do |d|
d["ACTION"] if !d.nil?
end
column :"SCHEDULED", "", :size=>12 do |d|
OpenNebulaHelper.time_to_str(d["TIME"], false) if !d.nil?
end
column :"DONE", "", :size=>12 do |d|
OpenNebulaHelper.time_to_str(d["DONE"], false) if !d.nil?
end
column :"MESSAGE", "", :left, :donottruncate, :size=>43 do |d|
d["MESSAGE"] if !d.nil?
end
end.show([vm.to_hash['VM']['USER_TEMPLATE']['SCHED_ACTION']].flatten, {})
puts
end
CLIHelper.print_header(str_h1 % "VIRTUAL MACHINE TEMPLATE",false)
puts vm.template_str
if vm.has_elements?("/VM/USER_TEMPLATE")
puts
vm.delete_element("/VM/USER_TEMPLATE/SCHED_ACTION")
CLIHelper.print_header(str_h1 % "USER TEMPLATE",false)
puts vm.template_like_str('USER_TEMPLATE')
end
if vm.has_elements?("/VM/HISTORY_RECORDS")
puts
CLIHelper.print_header(str_h1 % "VIRTUAL MACHINE HISTORY",false)
format_history(vm)
end
end
def format_history(vm)
table=CLIHelper::ShowTable.new(nil, self) do
column :SEQ, "Sequence number", :size=>4 do |d|
d["SEQ"]
end
column :HOST, "Host name of the VM container", :left, :size=>15 do |d|
d["HOSTNAME"]
end
column :REASON, "VM state change reason", :left, :size=>6 do |d|
VirtualMachine.get_reason d["REASON"]
end
column :START, "Time when the state changed", :size=>15 do |d|
OpenNebulaHelper.time_to_str(d['STIME'])
end
column :TIME, "Total time in this state", :size=>15 do |d|
stime = d["STIME"].to_i
etime = d["ETIME"]=="0" ? Time.now.to_i : d["ETIME"].to_i
dtime = etime-stime
OpenNebulaHelper.period_to_str(dtime)
end
column :PROLOG_TIME, "Prolog time for this state", :size=>15 do |d|
stime = d["PSTIME"].to_i
if d["PSTIME"]=="0"
etime=0
else
etime = d["PETIME"]=="0" ? Time.now.to_i: d["PETIME"].to_i
end
dtime = etime-stime
OpenNebulaHelper.period_to_str(dtime)
end
default :SEQ, :HOST, :REASON, :START, :TIME, :PROLOG_TIME
end
vm_hash=vm.to_hash
history=[vm_hash['VM']['HISTORY_RECORDS']['HISTORY']].flatten
table.show(history)
end
end
Feature #1483: Add an ID to each SCHED_ACTION to make them easier to edit manually with onevm update
# -------------------------------------------------------------------------- #
# Copyright 2002-2013, OpenNebula Project (OpenNebula.org), C12G Labs #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'one_helper'
require 'optparse/time'
class OneVMHelper < OpenNebulaHelper::OneHelper
MULTIPLE={
:name => "multiple",
:short => "-m x",
:large => "--multiple x",
:format => Integer,
:description => "Instance multiple VMs"
}
IMAGE = {
:name => "image",
:short => "-i id|name",
:large => "--image id|name" ,
:description => "Selects the image",
:format => String,
:proc => lambda { |o, options|
OpenNebulaHelper.rname_to_id(o, "IMAGE")
}
}
FILE = {
:name => "file",
:short => "-f file",
:large => "--file file" ,
:description => "Selects the template file",
:format => String,
:proc => lambda { |o, options|
if File.file?(o)
options[:file] = o
else
exit -1
end
}
}
HOLD = {
:name => "hold",
:large => "--hold",
:description => "Creates the new VM on hold state instead of pending"
}
SCHEDULE = {
:name => "schedule",
:large => "--schedule TIME",
:description => "Schedules this action to be executed after the given time",
:format => Time
}
def self.rname
"VM"
end
def self.conf_file
"onevm.yaml"
end
def self.state_to_str(id, lcm_id)
id = id.to_i
state_str = VirtualMachine::VM_STATE[id]
short_state_str = VirtualMachine::SHORT_VM_STATES[state_str]
if short_state_str=="actv"
lcm_id = lcm_id.to_i
lcm_state_str = VirtualMachine::LCM_STATE[lcm_id]
return VirtualMachine::SHORT_LCM_STATES[lcm_state_str]
end
return short_state_str
end
def format_pool(options)
config_file = self.class.table_conf
table = CLIHelper::ShowTable.new(config_file, self) do
column :ID, "ONE identifier for Virtual Machine", :size=>6 do |d|
d["ID"]
end
column :NAME, "Name of the Virtual Machine", :left,
:size=>15 do |d|
if d["RESCHED"] == "1"
"*#{d["NAME"]}"
else
d["NAME"]
end
end
column :USER, "Username of the Virtual Machine owner", :left,
:size=>8 do |d|
helper.user_name(d, options)
end
column :GROUP, "Group of the Virtual Machine", :left,
:size=>8 do |d|
helper.group_name(d, options)
end
column :STAT, "Actual status", :size=>4 do |d,e|
OneVMHelper.state_to_str(d["STATE"], d["LCM_STATE"])
end
column :UCPU, "CPU percentage used by the VM", :size=>4 do |d|
d["CPU"]
end
column :UMEM, "Memory used by the VM", :size=>7 do |d|
OpenNebulaHelper.unit_to_str(d["MEMORY"].to_i, options)
end
column :HOST, "Host where the VM is running", :left, :size=>10 do |d|
if d['HISTORY_RECORDS'] && d['HISTORY_RECORDS']['HISTORY']
state_str = VirtualMachine::VM_STATE[d['STATE'].to_i]
if %w{ACTIVE SUSPENDED}.include? state_str
d['HISTORY_RECORDS']['HISTORY']['HOSTNAME']
end
end
end
column :TIME, "Time since the VM was submitted", :size=>10 do |d|
stime = d["STIME"].to_i
etime = d["ETIME"]=="0" ? Time.now.to_i : d["ETIME"].to_i
dtime = etime-stime
OpenNebulaHelper.period_to_str(dtime, false)
end
default :ID, :USER, :GROUP, :NAME, :STAT, :UCPU, :UMEM, :HOST,
:TIME
end
table
end
def schedule_actions(ids,options,action)
# Verbose by default
options[:verbose] = true
perform_actions(
ids, options,
"#{action} scheduled at #{options[:schedule]}") do |vm|
rc = vm.info
if OpenNebula.is_error?(rc)
puts rc.message
exit -1
end
ids = vm.retrieve_elements('USER_TEMPLATE/SCHED_ACTION/ID')
id = 0
if (!ids.nil? && !ids.empty?)
ids.map! {|e| e.to_i }
id = ids.max + 1
end
tmp_str = vm.user_template_str
tmp_str << "\nSCHED_ACTION = [ID = #{id}, ACTION = #{action}, TIME = #{options[:schedule].to_i}]"
vm.update(tmp_str)
end
end
private
def factory(id=nil)
if id
OpenNebula::VirtualMachine.new_with_id(id, @client)
else
xml=OpenNebula::VirtualMachine.build_xml
OpenNebula::VirtualMachine.new(xml, @client)
end
end
def factory_pool(user_flag=-2)
OpenNebula::VirtualMachinePool.new(@client, user_flag)
end
def format_resource(vm)
str_h1="%-80s"
str="%-20s: %-20s"
CLIHelper.print_header(
str_h1 % "VIRTUAL MACHINE #{vm['ID']} INFORMATION")
puts str % ["ID", vm.id.to_s]
puts str % ["NAME", vm.name]
puts str % ["USER", vm['UNAME']]
puts str % ["GROUP", vm['GNAME']]
puts str % ["STATE", vm.state_str]
puts str % ["LCM_STATE", vm.lcm_state_str]
puts str % ["RESCHED", OpenNebulaHelper.boolean_to_str(vm['RESCHED'])]
puts str % ["HOST",
vm['/VM/HISTORY_RECORDS/HISTORY[last()]/HOSTNAME']] if
%w{ACTIVE SUSPENDED}.include? vm.state_str
puts str % ["START TIME",
OpenNebulaHelper.time_to_str(vm['/VM/STIME'])]
puts str % ["END TIME",
OpenNebulaHelper.time_to_str(vm['/VM/ETIME'])]
value=vm['DEPLOY_ID']
puts str % ["DEPLOY ID", value=="" ? "-" : value]
puts
CLIHelper.print_header(str_h1 % "VIRTUAL MACHINE MONITORING",false)
poll_attrs = {
"USED MEMORY" => "MEMORY",
"USED CPU" => "CPU",
"NET_TX" => "NET_TX",
"NET_RX" => "NET_RX"
}
poll_attrs.each { |k,v|
if k == "USED CPU"
puts str % [k,vm[v]]
elsif k == "USED MEMORY"
puts str % [k, OpenNebulaHelper.unit_to_str(vm[v].to_i, {})]
else
puts str % [k, OpenNebulaHelper.unit_to_str(vm[v].to_i/1024, {})]
end
}
puts
CLIHelper.print_header(str_h1 % "PERMISSIONS",false)
["OWNER", "GROUP", "OTHER"].each { |e|
mask = "---"
mask[0] = "u" if vm["PERMISSIONS/#{e}_U"] == "1"
mask[1] = "m" if vm["PERMISSIONS/#{e}_M"] == "1"
mask[2] = "a" if vm["PERMISSIONS/#{e}_A"] == "1"
puts str % [e, mask]
}
puts
if vm.has_elements?("/VM/USER_TEMPLATE/SCHED_ACTION")
CLIHelper.print_header(str_h1 % "SCHEDULED ACTIONS",false)
CLIHelper::ShowTable.new(nil, self) do
column :"ID", "", :size=>2 do |d|
d["ID"] if !d.nil?
end
column :"ACTION", "", :left, :size=>10 do |d|
d["ACTION"] if !d.nil?
end
column :"SCHEDULED", "", :size=>12 do |d|
OpenNebulaHelper.time_to_str(d["TIME"], false) if !d.nil?
end
column :"DONE", "", :size=>12 do |d|
OpenNebulaHelper.time_to_str(d["DONE"], false) if !d.nil?
end
column :"MESSAGE", "", :left, :donottruncate, :size=>40 do |d|
d["MESSAGE"] if !d.nil?
end
end.show([vm.to_hash['VM']['USER_TEMPLATE']['SCHED_ACTION']].flatten, {})
puts
end
CLIHelper.print_header(str_h1 % "VIRTUAL MACHINE TEMPLATE",false)
puts vm.template_str
if vm.has_elements?("/VM/USER_TEMPLATE")
puts
vm.delete_element("/VM/USER_TEMPLATE/SCHED_ACTION")
CLIHelper.print_header(str_h1 % "USER TEMPLATE",false)
puts vm.template_like_str('USER_TEMPLATE')
end
if vm.has_elements?("/VM/HISTORY_RECORDS")
puts
CLIHelper.print_header(str_h1 % "VIRTUAL MACHINE HISTORY",false)
format_history(vm)
end
end
def format_history(vm)
table=CLIHelper::ShowTable.new(nil, self) do
column :SEQ, "Sequence number", :size=>4 do |d|
d["SEQ"]
end
column :HOST, "Host name of the VM container", :left, :size=>15 do |d|
d["HOSTNAME"]
end
column :REASON, "VM state change reason", :left, :size=>6 do |d|
VirtualMachine.get_reason d["REASON"]
end
column :START, "Time when the state changed", :size=>15 do |d|
OpenNebulaHelper.time_to_str(d['STIME'])
end
column :TIME, "Total time in this state", :size=>15 do |d|
stime = d["STIME"].to_i
etime = d["ETIME"]=="0" ? Time.now.to_i : d["ETIME"].to_i
dtime = etime-stime
OpenNebulaHelper.period_to_str(dtime)
end
column :PROLOG_TIME, "Prolog time for this state", :size=>15 do |d|
stime = d["PSTIME"].to_i
if d["PSTIME"]=="0"
etime=0
else
etime = d["PETIME"]=="0" ? Time.now.to_i: d["PETIME"].to_i
end
dtime = etime-stime
OpenNebulaHelper.period_to_str(dtime)
end
default :SEQ, :HOST, :REASON, :START, :TIME, :PROLOG_TIME
end
vm_hash=vm.to_hash
history=[vm_hash['VM']['HISTORY_RECORDS']['HISTORY']].flatten
table.show(history)
end
end
|
require 'rest-gw2/server/cache'
require 'mime/types'
require 'rest-core'
require 'jellyfish'
require 'openssl'
require 'erb'
require 'cgi'
module RestGW2
CONFIG = ENV['RESTGW2_CONFIG'] || File.expand_path("#{__dir__}/../../.env")
def self.extract_env path
return {} unless File.exist?(path)
Hash[File.read(path).strip.squeeze("\n").each_line.map do |line|
name, value = line.split('=')
[name, value.chomp] if name && value
end.compact]
end
extract_env(CONFIG).each do |k, v|
ENV[k] ||= v
end
module DalliExtension
def [] *args
get(*args)
end
def []= *args
set(*args)
end
def store *args
set(*args)
end
end
def self.cache logger
@cache ||= Cache.pick(logger)
end
class ServerCore
include Jellyfish
SECRET = ENV['RESTGW2_SECRET'] || 'RESTGW2_SECRET'*2
COINS = %w[gold silver copper].zip(%w[
https://wiki.guildwars2.com/images/d/d1/Gold_coin.png
https://wiki.guildwars2.com/images/3/3c/Silver_coin.png
https://wiki.guildwars2.com/images/e/eb/Copper_coin.png
]).freeze
controller_include Module.new{
# VIEW
def render path
erb(:layout){ erb(path) }
end
def erb path, &block
ERB.new(views(path)).result(binding, &block)
end
def h str
CGI.escape_html(str) if str.kind_of?(String)
end
def u str
CGI.escape(str) if str.kind_of?(String)
end
def path str, q={}
RC::Middleware.request_uri(
RC::REQUEST_PATH => "#{ENV['RESTGW2_PREFIX']}#{str}",
RC::REQUEST_QUERY => q)
end
def views path
File.read("#{__dir__}/view/#{path}.erb")
end
def refresh_path
path(request.path, :r => '1', :t => t)
end
def menu item, title
href = path(item, :t => t)
if "#{ENV['RESTGW2_PREFIX']}#{request.fullpath}" == href
title
else
%Q{<a href="#{href}">#{title}</a>}
end
end
def menu_trans item, title
menu("/transactions#{item}", title)
end
# HELPER
def item_wiki item
page = item['name'].tr(' ', '_')
img = %Q{<img class="icon" title="#{item_title(item)}"} +
%Q{ src="#{h item['icon']}"/>}
%Q{<a href="http://wiki.guildwars2.com/wiki/#{u page}">#{img}</a>}
end
def item_title item
d = item['description']
d && d.unpack('U*').map{ |c| "&##{c};" }.join
end
def item_count item
c = item['count']
"(#{c})" if c > 1
end
def item_price item
b = item['buys']
s = item['sells']
bb = b && price(b['unit_price'])
ss = s && price(s['unit_price'])
%Q{#{bb} / #{ss}} if bb || ss
end
def price copper
g = copper / 100_00
s = copper % 100_00 / 100
c = copper % 100
l = [g, s, c]
n = l.index(&:nonzero?)
return '-' unless n
l.zip(COINS).drop(n).map do |(num, (title, src))|
%Q{#{num}<img class="price" title="#{h title}" src="#{h src}"/>}
end.join(' ')
end
def abbr_time_ago time, precision=1
return unless time
ago = time_ago(time)
short = ago.take(precision).join(' ')
%Q{(<abbr title="#{time}, #{ago.join(' ')} ago">#{short} ago</abbr>)}
end
def time_ago time, precision=1
delta = (Time.now - Time.parse(time)).to_i
result = []
[[ 60, :seconds],
[ 60, :minutes],
[ 24, :hours ],
[365, :days ],
[999, :years ]].
inject(delta) do |length, (divisor, name)|
quotient, remainder = length.divmod(divisor)
result.unshift("#{remainder} #{name}")
break if quotient == 0
quotient
end
result
end
def sum_trans trans
trans.inject(0) do |sum, t|
sum + t['price'] * t['quantity']
end
end
def sum_items items
items.inject([0, 0]) do |sum, i|
next sum unless i
b = i['buys']
s = i['sells']
sum[0] += b['unit_price'] * i['count'] if b
sum[1] += s['unit_price'] * i['count'] if s
sum
end
end
# CONTROLLER
def gw2_call msg, *args
refresh = !!request.GET['r']
yield(gw2.public_send(msg, *args, 'cache.update' => refresh).itself)
rescue RestGW2::Error => e
@error = e.error['text']
render :error
end
def gw2
Client.new(:access_token => access_token,
:log_method => logger(env).method(:info),
:cache => RestGW2.cache(logger(env)))
end
# ACCESS TOKEN
def access_token
decrypted_access_token || ENV['RESTGW2_ACCESS_TOKEN']
rescue ArgumentError, OpenSSL::Cipher::CipherError => e
raise RestGW2::Error.new({'text' => e.message}, 0)
end
def decrypted_access_token
decrypt(t) if t
end
def t
@t ||= begin
r = request.GET['t']
r if r && !r.strip.empty?
end
end
# UTILITIES
def encrypt data
cipher = OpenSSL::Cipher.new('aes-128-gcm')
cipher.encrypt
cipher.key = SECRET
iv = cipher.random_iv
encrypted = cipher.update(data) + cipher.final
tag = cipher.auth_tag
encode_base64(iv, encrypted, tag)
end
def decrypt data
iv, encrypted, tag = decode_base64(data)
decipher = OpenSSL::Cipher.new('aes-128-gcm')
decipher.decrypt
decipher.key = SECRET
decipher.iv = iv
decipher.auth_tag = tag
decipher.update(encrypted) + decipher.final
end
def encode_base64 *data
data.map{ |d| [d].pack('m0') }.join('.').tr('+/=', '-_~')
end
def decode_base64 str
str.split('.').map{ |d| d.tr('-_~', '+/=').unpack('m0').first }
end
# MISC
def logger env
env['rack.logger'] || begin
require 'logger'
Logger.new(env['rack.errors'])
end
end
}
post '/access_token' do
t = encrypt(request.POST['access_token'])
r = request.POST['referrer']
u = if r == path('/') then path('/account') else r end
found "#{u}?t=#{t}"
end
get '/' do
render :index
end
get '/account' do
gw2_call(:account_with_detail) do |account|
@info = account
render :info
end
end
get '/characters' do
render :wip
end
get '/bank' do
gw2_call(:with_item_detail, 'v2/account/bank') do |items|
@items = items
@buy, @sell = sum_items(items)
render :items
end
end
get '/materials' do
gw2_call(:with_item_detail, 'v2/account/materials') do |items|
@items = items
@buy, @sell = sum_items(items)
render :items
end
end
get '/wallet' do
gw2_call(:wallet_with_detail) do |wallet|
@wallet = wallet
render :wallet
end
end
get '/transactions/buying' do
gw2_call(:transactions_with_detail, 'current/buys') do |trans|
@trans = trans
@total = sum_trans(trans)
render :transactions
end
end
get '/transactions/selling' do
gw2_call(:transactions_with_detail, 'current/sells') do |trans|
@trans = trans
@total = sum_trans(trans)
render :transactions
end
end
get '/transactions/bought' do
gw2_call(:transactions_with_detail_compact, 'history/buys') do |trans|
@trans = trans
@total = sum_trans(trans)
render :transactions
end
end
get '/transactions/sold' do
gw2_call(:transactions_with_detail_compact, 'history/sells') do |trans|
@trans = trans
@total = sum_trans(trans)
render :transactions
end
end
get '/tokeninfo' do
gw2_call(:get, 'v2/tokeninfo') do |info|
@info = info
render :info
end
end
end
Server = Jellyfish::Builder.app do
use Rack::CommonLogger
use Rack::Chunked
use Rack::ContentLength
use Rack::Deflater
use Rack::ContentType, 'text/html; charset=utf-8'
map '/assets' do
run Rack::Directory.new('public')
end
map '/' do
run ServerCore.new
end
end
end
this is more accurate (so we ignore r=1)
require 'rest-gw2/server/cache'
require 'mime/types'
require 'rest-core'
require 'jellyfish'
require 'openssl'
require 'erb'
require 'cgi'
module RestGW2
CONFIG = ENV['RESTGW2_CONFIG'] || File.expand_path("#{__dir__}/../../.env")
def self.extract_env path
return {} unless File.exist?(path)
Hash[File.read(path).strip.squeeze("\n").each_line.map do |line|
name, value = line.split('=')
[name, value.chomp] if name && value
end.compact]
end
extract_env(CONFIG).each do |k, v|
ENV[k] ||= v
end
module DalliExtension
def [] *args
get(*args)
end
def []= *args
set(*args)
end
def store *args
set(*args)
end
end
def self.cache logger
@cache ||= Cache.pick(logger)
end
class ServerCore
include Jellyfish
SECRET = ENV['RESTGW2_SECRET'] || 'RESTGW2_SECRET'*2
COINS = %w[gold silver copper].zip(%w[
https://wiki.guildwars2.com/images/d/d1/Gold_coin.png
https://wiki.guildwars2.com/images/3/3c/Silver_coin.png
https://wiki.guildwars2.com/images/e/eb/Copper_coin.png
]).freeze
controller_include Module.new{
# VIEW
def render path
erb(:layout){ erb(path) }
end
def erb path, &block
ERB.new(views(path)).result(binding, &block)
end
def h str
CGI.escape_html(str) if str.kind_of?(String)
end
def u str
CGI.escape(str) if str.kind_of?(String)
end
def path str, q={}
RC::Middleware.request_uri(
RC::REQUEST_PATH => "#{ENV['RESTGW2_PREFIX']}#{str}",
RC::REQUEST_QUERY => q)
end
def views path
File.read("#{__dir__}/view/#{path}.erb")
end
def refresh_path
path(request.path, :r => '1', :t => t)
end
def menu item, title
href = path(item, :t => t)
if path(request.path, :t => t) == href
title
else
%Q{<a href="#{href}">#{title}</a>}
end
end
def menu_trans item, title
menu("/transactions#{item}", title)
end
# HELPER
def item_wiki item
page = item['name'].tr(' ', '_')
img = %Q{<img class="icon" title="#{item_title(item)}"} +
%Q{ src="#{h item['icon']}"/>}
%Q{<a href="http://wiki.guildwars2.com/wiki/#{u page}">#{img}</a>}
end
def item_title item
d = item['description']
d && d.unpack('U*').map{ |c| "&##{c};" }.join
end
def item_count item
c = item['count']
"(#{c})" if c > 1
end
def item_price item
b = item['buys']
s = item['sells']
bb = b && price(b['unit_price'])
ss = s && price(s['unit_price'])
%Q{#{bb} / #{ss}} if bb || ss
end
def price copper
g = copper / 100_00
s = copper % 100_00 / 100
c = copper % 100
l = [g, s, c]
n = l.index(&:nonzero?)
return '-' unless n
l.zip(COINS).drop(n).map do |(num, (title, src))|
%Q{#{num}<img class="price" title="#{h title}" src="#{h src}"/>}
end.join(' ')
end
def abbr_time_ago time, precision=1
return unless time
ago = time_ago(time)
short = ago.take(precision).join(' ')
%Q{(<abbr title="#{time}, #{ago.join(' ')} ago">#{short} ago</abbr>)}
end
def time_ago time, precision=1
delta = (Time.now - Time.parse(time)).to_i
result = []
[[ 60, :seconds],
[ 60, :minutes],
[ 24, :hours ],
[365, :days ],
[999, :years ]].
inject(delta) do |length, (divisor, name)|
quotient, remainder = length.divmod(divisor)
result.unshift("#{remainder} #{name}")
break if quotient == 0
quotient
end
result
end
def sum_trans trans
trans.inject(0) do |sum, t|
sum + t['price'] * t['quantity']
end
end
def sum_items items
items.inject([0, 0]) do |sum, i|
next sum unless i
b = i['buys']
s = i['sells']
sum[0] += b['unit_price'] * i['count'] if b
sum[1] += s['unit_price'] * i['count'] if s
sum
end
end
# CONTROLLER
def gw2_call msg, *args
refresh = !!request.GET['r']
yield(gw2.public_send(msg, *args, 'cache.update' => refresh).itself)
rescue RestGW2::Error => e
@error = e.error['text']
render :error
end
def gw2
Client.new(:access_token => access_token,
:log_method => logger(env).method(:info),
:cache => RestGW2.cache(logger(env)))
end
# ACCESS TOKEN
def access_token
decrypted_access_token || ENV['RESTGW2_ACCESS_TOKEN']
rescue ArgumentError, OpenSSL::Cipher::CipherError => e
raise RestGW2::Error.new({'text' => e.message}, 0)
end
def decrypted_access_token
decrypt(t) if t
end
def t
@t ||= begin
r = request.GET['t']
r if r && !r.strip.empty?
end
end
# UTILITIES
def encrypt data
cipher = OpenSSL::Cipher.new('aes-128-gcm')
cipher.encrypt
cipher.key = SECRET
iv = cipher.random_iv
encrypted = cipher.update(data) + cipher.final
tag = cipher.auth_tag
encode_base64(iv, encrypted, tag)
end
def decrypt data
iv, encrypted, tag = decode_base64(data)
decipher = OpenSSL::Cipher.new('aes-128-gcm')
decipher.decrypt
decipher.key = SECRET
decipher.iv = iv
decipher.auth_tag = tag
decipher.update(encrypted) + decipher.final
end
def encode_base64 *data
data.map{ |d| [d].pack('m0') }.join('.').tr('+/=', '-_~')
end
def decode_base64 str
str.split('.').map{ |d| d.tr('-_~', '+/=').unpack('m0').first }
end
# MISC
def logger env
env['rack.logger'] || begin
require 'logger'
Logger.new(env['rack.errors'])
end
end
}
post '/access_token' do
t = encrypt(request.POST['access_token'])
r = request.POST['referrer']
u = if r == path('/') then path('/account') else r end
found "#{u}?t=#{t}"
end
get '/' do
render :index
end
get '/account' do
gw2_call(:account_with_detail) do |account|
@info = account
render :info
end
end
get '/characters' do
render :wip
end
get '/bank' do
gw2_call(:with_item_detail, 'v2/account/bank') do |items|
@items = items
@buy, @sell = sum_items(items)
render :items
end
end
get '/materials' do
gw2_call(:with_item_detail, 'v2/account/materials') do |items|
@items = items
@buy, @sell = sum_items(items)
render :items
end
end
get '/wallet' do
gw2_call(:wallet_with_detail) do |wallet|
@wallet = wallet
render :wallet
end
end
get '/transactions/buying' do
gw2_call(:transactions_with_detail, 'current/buys') do |trans|
@trans = trans
@total = sum_trans(trans)
render :transactions
end
end
get '/transactions/selling' do
gw2_call(:transactions_with_detail, 'current/sells') do |trans|
@trans = trans
@total = sum_trans(trans)
render :transactions
end
end
get '/transactions/bought' do
gw2_call(:transactions_with_detail_compact, 'history/buys') do |trans|
@trans = trans
@total = sum_trans(trans)
render :transactions
end
end
get '/transactions/sold' do
gw2_call(:transactions_with_detail_compact, 'history/sells') do |trans|
@trans = trans
@total = sum_trans(trans)
render :transactions
end
end
get '/tokeninfo' do
gw2_call(:get, 'v2/tokeninfo') do |info|
@info = info
render :info
end
end
end
Server = Jellyfish::Builder.app do
use Rack::CommonLogger
use Rack::Chunked
use Rack::ContentLength
use Rack::Deflater
use Rack::ContentType, 'text/html; charset=utf-8'
map '/assets' do
run Rack::Directory.new('public')
end
map '/' do
run ServerCore.new
end
end
end
|
#--
# Copyright (C)2007 Tony Arcieri
# Includes portions originally Copyright (C)2005 Zed Shaw
# You can redistribute this under the terms of the Ruby license
# See file LICENSE for details
#++
require File.dirname(__FILE__) + '/../rev'
require File.dirname(__FILE__) + '/../http11_client'
module Rev
# A simple hash is returned for each request made by HttpClient with
# the headers that were given by the server for that request.
class HttpResponseHeader < Hash
# The reason returned in the http response ("OK","File not found",etc.)
attr_accessor :http_reason
# The HTTP version returned.
attr_accessor :http_version
# The status code (as a string!)
attr_accessor :http_status
# HTTP response status as an integer
def status
Integer(http_status) rescue nil
end
# Length of content as an integer, or nil if chunked/unspecified
def content_length
Integer(self[HttpClient::CONTENT_LENGTH]) rescue nil
end
# Is the transfer encoding chunked?
def chunked_encoding?
/chunked/i === self[HttpClient::TRANSFER_ENCODING]
end
end
class HttpChunkHeader < Hash
# When parsing chunked encodings this is set
attr_accessor :http_chunk_size
# Size of the chunk as an integer
def chunk_size
return @chunk_size unless @chunk_size.nil?
@chunk_size = @http_chunk_size ? @http_chunk_size.to_i(base=16) : 0
end
end
# Methods for building HTTP requests
module HttpEncoding
HTTP_REQUEST_HEADER="%s %s HTTP/1.1\r\n"
FIELD_ENCODING = "%s: %s\r\n"
# Escapes a URI.
def escape(s)
s.to_s.gsub(/([^ a-zA-Z0-9_.-]+)/n) {
'%'+$1.unpack('H2'*$1.size).join('%').upcase
}.tr(' ', '+')
end
# Unescapes a URI escaped string.
def unescape(s)
s.tr('+', ' ').gsub(/((?:%[0-9a-fA-F]{2})+)/n){
[$1.delete('%')].pack('H*')
}
end
# Map all header keys to a downcased string version
def munge_header_keys(head)
head.reduce({}) { |h, (k, v)| h[k.to_s.downcase] = v; h }
end
# HTTP is kind of retarded that you have to specify
# a Host header, but if you include port 80 then further
# redirects will tack on the :80 which is annoying.
def encode_host
remote_host + (remote_port.to_i != 80 ? ":#{remote_port}" : "")
end
def encode_request(method, path, query)
HTTP_REQUEST_HEADER % [method.to_s.upcase, encode_query(path, query)]
end
def encode_query(path, query)
return path unless query
path + "?" + query.map { |k, v| encode_param(k, v) }.join('&')
end
# URL encodes a single k=v parameter.
def encode_param(k, v)
escape(k) + "=" + escape(v)
end
# Encode a field in an HTTP header
def encode_field(k, v)
FIELD_ENCODING % [k, v]
end
def encode_headers(head)
head.reduce('') do |result, (k, v)|
# Munge keys from foo-bar-baz to Foo-Bar-Baz
k = k.split('-').map(&:capitalize).join('-')
result << encode_field(k, v)
end
end
def encode_cookies(cookies)
cookies.reduce('') { |result, (k, v)| result << encode_field('Cookie', encode_param(k, v)) }
end
end
# HTTP client class implemented as a subclass of Rev::TCPSocket. Encodes
# requests and allows streaming consumption of the response. Response is
# parsed with a Ragel-generated whitelist parser which supports chunked
# HTTP encoding.
#
# == Example
#
# loop = Rev::Loop.default
# client = Rev::HttpClient.connect("www.google.com").attach
# client.get('/search', query: {q: 'foobar'})
# loop.run
#
class HttpClient < TCPSocket
include HttpEncoding
ALLOWED_METHODS=[:put, :get, :post, :delete, :head]
TRANSFER_ENCODING="TRANSFER_ENCODING"
CONTENT_LENGTH="CONTENT_LENGTH"
SET_COOKIE="SET_COOKIE"
LOCATION="LOCATION"
HOST="HOST"
CRLF="\r\n"
# Connect to the given server, with port 80 as the default
def self.connect(addr, port = 80, *args)
super
end
def initialize(socket)
super
@parser = HttpClientParser.new
@parser_nbytes = 0
@state = :response_header
@data = Rev::Buffer.new
@response_header = HttpResponseHeader.new
@chunk_header = HttpChunkHeader.new
end
# Send an HTTP request and consume the response.
# Supports the following options:
#
# head: {Key: Value}
# Specify an HTTP header, e.g. {'Connection': 'close'}
#
# query: {Key: Value}
# Specify query string parameters (auto-escaped)
#
# cookies: {Key: Value}
# Specify hash of cookies (auto-escaped)
#
# body: String
# Specify the request body (you must encode it for now)
#
# ssl: Boolean
# If true, initiates an HTTPS (SSL) request
#
def request(method, path, options = {})
raise ArgumentError, "invalid request path" unless path[0] == '/'
raise RuntimeError, "request already sent" if @requested
@method, @path, @options = method, path, options
@requested = true
return unless @connected
@options[:ssl] ? start_ssl : send_request
end
# Enable the HttpClient if it has been disabled
def enable
super
dispatch unless @data.empty?
end
# Called when response header has been received
def on_response_header(response_header)
end
# Called when part of the body has been read
def on_body_data(data)
STDOUT.write data
STDOUT.flush
end
# Called when the request has completed
def on_request_complete
close
end
# Called when an error occurs dpathng the request
def on_error(reason)
close
raise RuntimeError, reason
end
#########
protected
#########
#
# Rev callbacks
#
def start_ssl
require 'rev/ssl'
extend Rev::SSL
ssl_start_client
end
def on_connect
@connected = true
if @options and @options[:ssl]
start_ssl
else
send_request if @method and @path
end
end
def on_ssl_connect
send_request if @method and @path
end
def on_read(data)
@data << data
dispatch
end
#
# Request sending
#
def send_request
send_request_header
send_request_body
end
def send_request_header
query = @options[:query]
head = @options[:head] ? munge_header_keys(@options[:head]) : {}
cookies = @options[:cookies]
body = @options[:body]
# Set the Host header if it hasn't been specified already
head['host'] ||= encode_host
# Set the Content-Length if it hasn't been specified already and a body was given
head['content-length'] ||= body ? body.length : 0
# Set the User-Agent if it hasn't been specified
head['user-agent'] ||= "Rev #{Rev::VERSION}"
# Default to Connection: close
head['connection'] ||= 'close'
# Build the request
request_header = encode_request(@method, @path, query)
request_header << encode_headers(head)
request_header << encode_cookies(cookies) if cookies
request_header << CRLF
write request_header
end
def send_request_body
write @options[:body] if @options[:body]
end
#
# Response processing
#
def dispatch
while enabled? and case @state
when :response_header
parse_response_header
when :chunk_header
parse_chunk_header
when :chunk_body
process_chunk_body
when :chunk_footer
process_chunk_footer
when :response_footer
process_response_footer
when :body
process_body
when :finished, :invalid
break
else raise RuntimeError, "invalid state: #{@state}"
end
end
end
def parse_header(header)
return false if @data.empty?
begin
@parser_nbytes = @parser.execute(header, @data.to_str, @parser_nbytes)
rescue Rev::HttpClientParserError
on_error "invalid HTTP format, parsing fails"
@state = :invalid
end
return false unless @parser.finished?
# Clear parsed data from the buffer
@data.read(@parser_nbytes)
@parser.reset
@parser_nbytes = 0
true
end
def parse_response_header
return false unless parse_header(@response_header)
unless @response_header.http_status and @response_header.http_reason
on_error "no HTTP response"
@state = :invalid
return false
end
on_response_header(@response_header)
if @response_header.chunked_encoding?
@state = :chunk_header
else
@state = :body
@bytes_remaining = @response_header.content_length
end
true
end
def parse_chunk_header
return false unless parse_header(@chunk_header)
@bytes_remaining = @chunk_header.chunk_size
@chunk_header = HttpChunkHeader.new
@state = @bytes_remaining > 0 ? :chunk_body : :response_footer
true
end
def process_chunk_body
if @data.size < @bytes_remaining
@bytes_remaining -= @data.size
on_body_data @data.read
return false
end
on_body_data @data.read(@bytes_remaining)
@bytes_remaining = 0
@state = :chunk_footer
true
end
def process_chunk_footer
return false if @data.size < 2
if @data.read(2) == CRLF
@state = :chunk_header
else
on_error "non-CRLF chunk footer"
@state = :invalid
end
true
end
def process_response_footer
return false if @data.size < 2
if @data.read(2) == CRLF
if @data.empty?
on_request_complete
@state = :finished
else
on_error "garbage at end of chunked response"
@state = :invalid
end
else
on_error "non-CRLF response footer"
@state = :invalid
end
false
end
def process_body
if @bytes_remaining.nil?
on_body_data @data.read
return false
end
if @bytes_remaining.zero?
on_request_complete
@state = :finished
return false
end
if @data.size < @bytes_remaining
@bytes_remaining -= @data.size
on_body_data @data.read
return false
end
on_body_data @data.read(@bytes_remaining)
@bytes_remaining = 0
if @data.empty?
on_request_complete
@state = :finished
else
on_error "garbage at end of body"
@state = :invalid
end
false
end
end
end
Remove SSL support from HTTP client until it can be done properly
git-svn-id: f3f86cdbd6861aa9839b323b12b6af648189b59b@148 ea6e6bc6-682b-4e20-bb59-304e6edd756d
#--
# Copyright (C)2007 Tony Arcieri
# Includes portions originally Copyright (C)2005 Zed Shaw
# You can redistribute this under the terms of the Ruby license
# See file LICENSE for details
#++
require File.dirname(__FILE__) + '/../rev'
require File.dirname(__FILE__) + '/../http11_client'
module Rev
# A simple hash is returned for each request made by HttpClient with
# the headers that were given by the server for that request.
class HttpResponseHeader < Hash
# The reason returned in the http response ("OK","File not found",etc.)
attr_accessor :http_reason
# The HTTP version returned.
attr_accessor :http_version
# The status code (as a string!)
attr_accessor :http_status
# HTTP response status as an integer
def status
Integer(http_status) rescue nil
end
# Length of content as an integer, or nil if chunked/unspecified
def content_length
Integer(self[HttpClient::CONTENT_LENGTH]) rescue nil
end
# Is the transfer encoding chunked?
def chunked_encoding?
/chunked/i === self[HttpClient::TRANSFER_ENCODING]
end
end
class HttpChunkHeader < Hash
# When parsing chunked encodings this is set
attr_accessor :http_chunk_size
# Size of the chunk as an integer
def chunk_size
return @chunk_size unless @chunk_size.nil?
@chunk_size = @http_chunk_size ? @http_chunk_size.to_i(base=16) : 0
end
end
# Methods for building HTTP requests
module HttpEncoding
HTTP_REQUEST_HEADER="%s %s HTTP/1.1\r\n"
FIELD_ENCODING = "%s: %s\r\n"
# Escapes a URI.
def escape(s)
s.to_s.gsub(/([^ a-zA-Z0-9_.-]+)/n) {
'%'+$1.unpack('H2'*$1.size).join('%').upcase
}.tr(' ', '+')
end
# Unescapes a URI escaped string.
def unescape(s)
s.tr('+', ' ').gsub(/((?:%[0-9a-fA-F]{2})+)/n){
[$1.delete('%')].pack('H*')
}
end
# Map all header keys to a downcased string version
def munge_header_keys(head)
head.reduce({}) { |h, (k, v)| h[k.to_s.downcase] = v; h }
end
# HTTP is kind of retarded that you have to specify
# a Host header, but if you include port 80 then further
# redirects will tack on the :80 which is annoying.
def encode_host
remote_host + (remote_port.to_i != 80 ? ":#{remote_port}" : "")
end
def encode_request(method, path, query)
HTTP_REQUEST_HEADER % [method.to_s.upcase, encode_query(path, query)]
end
def encode_query(path, query)
return path unless query
path + "?" + query.map { |k, v| encode_param(k, v) }.join('&')
end
# URL encodes a single k=v parameter.
def encode_param(k, v)
escape(k) + "=" + escape(v)
end
# Encode a field in an HTTP header
def encode_field(k, v)
FIELD_ENCODING % [k, v]
end
def encode_headers(head)
head.reduce('') do |result, (k, v)|
# Munge keys from foo-bar-baz to Foo-Bar-Baz
k = k.split('-').map(&:capitalize).join('-')
result << encode_field(k, v)
end
end
def encode_cookies(cookies)
cookies.reduce('') { |result, (k, v)| result << encode_field('Cookie', encode_param(k, v)) }
end
end
# HTTP client class implemented as a subclass of Rev::TCPSocket. Encodes
# requests and allows streaming consumption of the response. Response is
# parsed with a Ragel-generated whitelist parser which supports chunked
# HTTP encoding.
#
# == Example
#
# loop = Rev::Loop.default
# client = Rev::HttpClient.connect("www.google.com").attach
# client.get('/search', query: {q: 'foobar'})
# loop.run
#
class HttpClient < TCPSocket
include HttpEncoding
ALLOWED_METHODS=[:put, :get, :post, :delete, :head]
TRANSFER_ENCODING="TRANSFER_ENCODING"
CONTENT_LENGTH="CONTENT_LENGTH"
SET_COOKIE="SET_COOKIE"
LOCATION="LOCATION"
HOST="HOST"
CRLF="\r\n"
# Connect to the given server, with port 80 as the default
def self.connect(addr, port = 80, *args)
super
end
def initialize(socket)
super
@parser = HttpClientParser.new
@parser_nbytes = 0
@state = :response_header
@data = Rev::Buffer.new
@response_header = HttpResponseHeader.new
@chunk_header = HttpChunkHeader.new
end
# Send an HTTP request and consume the response.
# Supports the following options:
#
# head: {Key: Value}
# Specify an HTTP header, e.g. {'Connection': 'close'}
#
# query: {Key: Value}
# Specify query string parameters (auto-escaped)
#
# cookies: {Key: Value}
# Specify hash of cookies (auto-escaped)
#
# body: String
# Specify the request body (you must encode it for now)
#
def request(method, path, options = {})
raise ArgumentError, "invalid request path" unless path[0] == '/'
raise RuntimeError, "request already sent" if @requested
@method, @path, @options = method, path, options
@requested = true
return unless @connected
send_request
end
# Enable the HttpClient if it has been disabled
def enable
super
dispatch unless @data.empty?
end
# Called when response header has been received
def on_response_header(response_header)
end
# Called when part of the body has been read
def on_body_data(data)
STDOUT.write data
STDOUT.flush
end
# Called when the request has completed
def on_request_complete
close
end
# Called when an error occurs dpathng the request
def on_error(reason)
close
raise RuntimeError, reason
end
#########
protected
#########
#
# Rev callbacks
#
def on_connect
@connected = true
send_request if @method and @path
end
def on_read(data)
@data << data
dispatch
end
#
# Request sending
#
def send_request
send_request_header
send_request_body
end
def send_request_header
query = @options[:query]
head = @options[:head] ? munge_header_keys(@options[:head]) : {}
cookies = @options[:cookies]
body = @options[:body]
# Set the Host header if it hasn't been specified already
head['host'] ||= encode_host
# Set the Content-Length if it hasn't been specified already and a body was given
head['content-length'] ||= body ? body.length : 0
# Set the User-Agent if it hasn't been specified
head['user-agent'] ||= "Rev #{Rev::VERSION}"
# Default to Connection: close
head['connection'] ||= 'close'
# Build the request
request_header = encode_request(@method, @path, query)
request_header << encode_headers(head)
request_header << encode_cookies(cookies) if cookies
request_header << CRLF
write request_header
end
def send_request_body
write @options[:body] if @options[:body]
end
#
# Response processing
#
def dispatch
while enabled? and case @state
when :response_header
parse_response_header
when :chunk_header
parse_chunk_header
when :chunk_body
process_chunk_body
when :chunk_footer
process_chunk_footer
when :response_footer
process_response_footer
when :body
process_body
when :finished, :invalid
break
else raise RuntimeError, "invalid state: #{@state}"
end
end
end
def parse_header(header)
return false if @data.empty?
begin
@parser_nbytes = @parser.execute(header, @data.to_str, @parser_nbytes)
rescue Rev::HttpClientParserError
on_error "invalid HTTP format, parsing fails"
@state = :invalid
end
return false unless @parser.finished?
# Clear parsed data from the buffer
@data.read(@parser_nbytes)
@parser.reset
@parser_nbytes = 0
true
end
def parse_response_header
return false unless parse_header(@response_header)
unless @response_header.http_status and @response_header.http_reason
on_error "no HTTP response"
@state = :invalid
return false
end
on_response_header(@response_header)
if @response_header.chunked_encoding?
@state = :chunk_header
else
@state = :body
@bytes_remaining = @response_header.content_length
end
true
end
def parse_chunk_header
return false unless parse_header(@chunk_header)
@bytes_remaining = @chunk_header.chunk_size
@chunk_header = HttpChunkHeader.new
@state = @bytes_remaining > 0 ? :chunk_body : :response_footer
true
end
def process_chunk_body
if @data.size < @bytes_remaining
@bytes_remaining -= @data.size
on_body_data @data.read
return false
end
on_body_data @data.read(@bytes_remaining)
@bytes_remaining = 0
@state = :chunk_footer
true
end
def process_chunk_footer
return false if @data.size < 2
if @data.read(2) == CRLF
@state = :chunk_header
else
on_error "non-CRLF chunk footer"
@state = :invalid
end
true
end
def process_response_footer
return false if @data.size < 2
if @data.read(2) == CRLF
if @data.empty?
on_request_complete
@state = :finished
else
on_error "garbage at end of chunked response"
@state = :invalid
end
else
on_error "non-CRLF response footer"
@state = :invalid
end
false
end
def process_body
if @bytes_remaining.nil?
on_body_data @data.read
return false
end
if @bytes_remaining.zero?
on_request_complete
@state = :finished
return false
end
if @data.size < @bytes_remaining
@bytes_remaining -= @data.size
on_body_data @data.read
return false
end
on_body_data @data.read(@bytes_remaining)
@bytes_remaining = 0
if @data.empty?
on_request_complete
@state = :finished
else
on_error "garbage at end of body"
@state = :invalid
end
false
end
end
end
|
#--
# Copyright (C)2007 Tony Arcieri
# Includes portions originally Copyright (C)2005 Zed Shaw
# You can redistribute this under the terms of the Ruby license
# See file LICENSE for details
#++
require File.dirname(__FILE__) + '/../rev'
require File.dirname(__FILE__) + '/../http11_client'
module Rev
# A simple hash is returned for each request made by HttpClient with
# the headers that were given by the server for that request.
class HttpResponse < Hash
# The reason returned in the http response ("OK","File not found",etc.)
attr_accessor :http_reason
# The HTTP version returned.
attr_accessor :http_version
# The status code (as a string!)
attr_accessor :http_status
# When parsing chunked encodings this is set
attr_accessor :http_chunk_size
def chunk_size
return @chunk_size unless @chunk_size.nil?
@chunk_size = @http_chunk_size ? @http_chunk_size.to_i(base=16) : 0
end
def last_chunk?
chunk_size == 0
end
end
class HttpClient < TCPSocket
TRANSFER_ENCODING="TRANSFER_ENCODING"
CONTENT_LENGTH="CONTENT_LENGTH"
SET_COOKIE="SET_COOKIE"
LOCATION="LOCATION"
HOST="HOST"
HTTP_REQUEST_HEADER="%s %s HTTP/1.1\r\n"
FIELD_ENCODING = "%s: %s\r\n"
REQ_CONTENT_LENGTH="Content-Length"
REQ_HOST="Host"
CHUNK_SIZE=1024 * 16
CRLF="\r\n"
def self.connect(addr, port = 80, *args)
super
end
def initialize(socket)
super
@parser = HttpClientParser.new
@parser_nbytes = 0
@header_data = ''
@header_parsed = false
@response = HttpResponse.new
@chunk_header_data = ''
@chunk_header_parsed = false
@chunk_header = HttpResponse.new
end
def request(method, uri, options = {})
raise RuntimeError, "request already sent" if @requested
@allowed_methods = options[:allowed_methods] || [:put, :get, :post, :delete, :head]
raise ArgumentError, "method not supported" unless @allowed_methods.include? method.to_sym
@method, @uri, @options = method, uri, options
@requested = true
return unless @connected
send_request
end
# Called when response header has been received
def on_response_header(response)
puts response.http_reason, response.http_version
puts response.http_status, response.inspect
puts chunked_encoding?.to_s
end
# Called when part of the body has been read
def on_body_data(data)
puts data
end
# Called when the request has completed
def on_request_complete
end
#########
protected
#########
def on_connect
@connected = true
send_request if @method and @uri
end
def on_read(data)
return parse_response_header(data) unless @header_parsed
decode_body(data)
end
def parse_response_header(data)
@header_data << data
@parser_nbytes = @parser.execute(@response, @header_data, @parser_nbytes)
return unless @parser.finished?
@header_parsed = true
process_response_header
# The remaining data is part of the body, so process it as such
@header_data.slice!(0, @parser_nbytes)
@parser_nbytes = 0
@parser.reset
decode_body(@header_data)
@header_data = ''
end
def process_response_header
on_response_header(@response)
end
def chunked_encoding?
/chunked/i === @response[TRANSFER_ENCODING]
end
def decode_body(data)
return on_body_data(data) unless chunked_encoding?
return parse_chunk_header(data) unless @chunk_header_parsed
return if @chunk_remaining.zero?
if data.size < @chunk_remaining
@chunk_remaining -= data.size
return on_body_data data
end
on_body_data data.slice!(0, @chunk_remaining)
@chunk_header_parsed = false
parse_chunk_header data
end
# This is really the same as parse_response_header and should be DRYed out
def parse_chunk_header(data)
@chunk_header_data << data
@parser_nbytes = @parser.execute(@chunk_header, @chunk_header_data, @parser_nbytes)
return unless @parser.finished?
@chunk_header_parsed = true
@chunk_remaining = @chunk_header.chunk_size
@chunk_header_data.slice!(0, @parser_nbytes)
@parser_nbytes = 0
@parser.reset
decode_body(@chunk_header_data)
@chunk_header_data = ''
@chunk_header = HttpResponse.new
end
def send_request
query = @options[:query]
head = @options[:head] ? munge_header_keys(@options[:head]) : {}
cookies = @options[:cookies]
body = @options[:body]
# Set the Host header if it hasn't been specified already
head['host'] ||= encode_host
# Set the Content-Length if it hasn't been specified already and a body was given
head['content-length'] ||= body ? body.length : 0
# Set the User-Agent if it hasn't been specified
head['user-agent'] ||= "Rev #{Rev::VERSION}"
# Default to Connection: close
head['connection'] ||= 'close'
# Build the request
request = encode_request(@method, @uri, query)
request << encode_headers(head)
request << encode_cookies(cookies) if cookies
request << CRLF
request << body if body
write request
end
# Escapes a URI.
def escape(s)
s.to_s.gsub(/([^ a-zA-Z0-9_.-]+)/n) {
'%'+$1.unpack('H2'*$1.size).join('%').upcase
}.tr(' ', '+')
end
# Unescapes a URI escaped string.
def unescape(s)
s.tr('+', ' ').gsub(/((?:%[0-9a-fA-F]{2})+)/n){
[$1.delete('%')].pack('H*')
}
end
# Map all header keys to a downcased string version
def munge_header_keys(head)
head.reduce({}) { |h, (k, v)| h[k.to_s.downcase] = v; h }
end
# HTTP is kind of retarded that you have to specify
# a Host header, but if you include port 80 then further
# redirects will tack on the :80 which is annoying.
def encode_host
remote_host + (remote_port.to_i != 80 ? ":#{remote_port}" : "")
end
def encode_request(method, uri, query)
HTTP_REQUEST_HEADER % [method.to_s.upcase, encode_query(uri, query)]
end
def encode_query(uri, query)
return uri unless query
uri + "?" + query.map { |k, v| encode_param(k, v) }.join('&')
end
# URL encodes a single k=v parameter.
def encode_param(k, v)
escape(k) + "=" + escape(v)
end
# Encode a field in an HTTP header
def encode_field(k, v)
FIELD_ENCODING % [k, v]
end
def encode_headers(head)
head.reduce('') do |result, (k, v)|
# Munge keys from foo-bar-baz to Foo-Bar-Baz
k = k.split('-').map(&:capitalize).join('-')
result << encode_field(k, v)
end
end
def encode_cookies(cookies)
cookies.reduce('') { |result, (k, v)| result << encode_field('Cookie', encode_param(k, v)) }
end
end
end
Properly working HTTP client with support for both identity and chunked encoding
git-svn-id: f3f86cdbd6861aa9839b323b12b6af648189b59b@51 ea6e6bc6-682b-4e20-bb59-304e6edd756d
#--
# Copyright (C)2007 Tony Arcieri
# Includes portions originally Copyright (C)2005 Zed Shaw
# You can redistribute this under the terms of the Ruby license
# See file LICENSE for details
#++
require File.dirname(__FILE__) + '/../rev'
require File.dirname(__FILE__) + '/../http11_client'
module Rev
# A simple hash is returned for each request made by HttpClient with
# the headers that were given by the server for that request.
class HttpResponseHeader < Hash
# The reason returned in the http response ("OK","File not found",etc.)
attr_accessor :http_reason
# The HTTP version returned.
attr_accessor :http_version
# The status code (as a string!)
attr_accessor :http_status
# HTTP response status as an integer
def status
Integer(http_status) rescue nil
end
# Length of content as an integer, or nil if chunked/unspecified
def content_length
Integer(self[HttpClient::CONTENT_LENGTH]) rescue nil
end
# Is the transfer encoding chunked?
def chunked_encoding?
/chunked/i === self[HttpClient::TRANSFER_ENCODING]
end
end
class HttpChunkHeader < Hash
# When parsing chunked encodings this is set
attr_accessor :http_chunk_size
def chunk_size
return @chunk_size unless @chunk_size.nil?
@chunk_size = @http_chunk_size ? @http_chunk_size.to_i(base=16) : 0
end
def last_chunk?
chunk_size == 0
end
end
module HttpEncoding
HTTP_REQUEST_HEADER="%s %s HTTP/1.1\r\n"
FIELD_ENCODING = "%s: %s\r\n"
# Escapes a URI.
def escape(s)
s.to_s.gsub(/([^ a-zA-Z0-9_.-]+)/n) {
'%'+$1.unpack('H2'*$1.size).join('%').upcase
}.tr(' ', '+')
end
# Unescapes a URI escaped string.
def unescape(s)
s.tr('+', ' ').gsub(/((?:%[0-9a-fA-F]{2})+)/n){
[$1.delete('%')].pack('H*')
}
end
# Map all header keys to a downcased string version
def munge_header_keys(head)
head.reduce({}) { |h, (k, v)| h[k.to_s.downcase] = v; h }
end
# HTTP is kind of retarded that you have to specify
# a Host header, but if you include port 80 then further
# redirects will tack on the :80 which is annoying.
def encode_host
remote_host + (remote_port.to_i != 80 ? ":#{remote_port}" : "")
end
def encode_request(method, uri, query)
HTTP_REQUEST_HEADER % [method.to_s.upcase, encode_query(uri, query)]
end
def encode_query(uri, query)
return uri unless query
uri + "?" + query.map { |k, v| encode_param(k, v) }.join('&')
end
# URL encodes a single k=v parameter.
def encode_param(k, v)
escape(k) + "=" + escape(v)
end
# Encode a field in an HTTP header
def encode_field(k, v)
FIELD_ENCODING % [k, v]
end
def encode_headers(head)
head.reduce('') do |result, (k, v)|
# Munge keys from foo-bar-baz to Foo-Bar-Baz
k = k.split('-').map(&:capitalize).join('-')
result << encode_field(k, v)
end
end
def encode_cookies(cookies)
cookies.reduce('') { |result, (k, v)| result << encode_field('Cookie', encode_param(k, v)) }
end
end
class HttpClient < TCPSocket
include HttpEncoding
TRANSFER_ENCODING="TRANSFER_ENCODING"
CONTENT_LENGTH="CONTENT_LENGTH"
SET_COOKIE="SET_COOKIE"
LOCATION="LOCATION"
HOST="HOST"
CRLF="\r\n"
def self.connect(addr, port = 80, *args)
super
end
def initialize(socket)
super
@parser = HttpClientParser.new
@parser_nbytes = 0
@state = :response_header
@data = ''
@response_header = HttpResponseHeader.new
@chunk_header = HttpChunkHeader.new
end
def request(method, uri, options = {})
raise RuntimeError, "request already sent" if @requested
@allowed_methods = options[:allowed_methods] || [:put, :get, :post, :delete, :head]
raise ArgumentError, "method not supported" unless @allowed_methods.include? method.to_sym
@method, @uri, @options = method, uri, options
@requested = true
return unless @connected
send_request
end
# Called when response header has been received
def on_response_header(response_header)
end
# Called when part of the body has been read
def on_body_data(data)
STDOUT.write data
STDOUT.flush
end
# Called when the request has completed
def on_request_complete
close
end
# Called when an error occurs during the request
def on_error(reason)
raise RuntimeError, reason
end
#########
protected
#########
#
# Request sending
#
def send_request
send_request_header
send_request_body
end
def send_request_header
query = @options[:query]
head = @options[:head] ? munge_header_keys(@options[:head]) : {}
cookies = @options[:cookies]
body = @options[:body]
# Set the Host header if it hasn't been specified already
head['host'] ||= encode_host
# Set the Content-Length if it hasn't been specified already and a body was given
head['content-length'] ||= body ? body.length : 0
# Set the User-Agent if it hasn't been specified
head['user-agent'] ||= "Rev #{Rev::VERSION}"
# Default to Connection: close
head['connection'] ||= 'close'
# Build the request
request_header = encode_request(@method, @uri, query)
request_header << encode_headers(head)
request_header << encode_cookies(cookies) if cookies
request_header << CRLF
write request_header
end
def send_request_body
write @options[:body] if @options[:body]
end
#
# Rev callbacks
#
def on_connect
@connected = true
send_request if @method and @uri
end
def on_read(data)
until @state == :finished or @state == :invalid or data.empty?
@state, data = dispatch_data(@state, data)
end
end
#
# Response processing
#
def dispatch_data(state, data)
case state
when :response_header then parse_response_header(data)
when :chunk_header then parse_chunk_header(data)
when :chunk_body then process_chunk_body(data)
when :chunk_footer then process_chunk_footer(data)
when :response_footer then process_response_footer(data)
when :body then process_body(data)
when :finished
when :invalid
else raise RuntimeError, "invalid state: #{@state}"
end
end
def parse_header(header, data)
@data << data
@parser_nbytes = @parser.execute(header, @data, @parser_nbytes)
return unless @parser.finished?
remainder = @data.slice(@parser_nbytes, @data.size)
@data = ''
@parser.reset
@parser_nbytes = 0
remainder
end
def parse_response_header(data)
data = parse_header(@response_header, data)
return :response_header, '' if data.nil?
unless @response_header.http_status and @response_header.http_reason
return on_error("no HTTP response")
end
on_response_header(@response_header)
if @response_header.chunked_encoding?
return :chunk_header, data
else
@bytes_remaining = @response_header.content_length
return :body, data
end
end
def parse_chunk_header(data)
data = parse_header(@chunk_header, data)
return :chunk_header, '' if data.nil?
@bytes_remaining = @chunk_header.chunk_size
@chunk_header = HttpChunkHeader.new
if @bytes_remaining > 0
return :chunk_body, data
else
@bytes_remaining = 2
return :response_footer, data
end
end
def process_chunk_body(data)
if data.size < @bytes_remaining
@bytes_remaining -= data.size
on_body_data data
return :chunk_body, ''
end
on_body_data data.slice!(0, @bytes_remaining)
@bytes_remaining = 2
return :chunk_footer, data
end
def process_crlf(data)
@data << data.slice!(0, @bytes_remaining)
@bytes_remaining = 2 - @data.size
return unless @bytes_remaining == 0
matches_crlf = (@data == CRLF)
@data = ''
return matches_crlf, data
end
def process_chunk_footer(data)
result, data = process_crlf(data)
return :chunk_footer, '' if result.nil?
if result
return :chunk_header, data
else
on_error "non-CRLF chunk footer"
return :invalid
end
end
def process_response_footer(data)
result, data = process_crlf(data)
return :response_footer, '' if result.nil?
if result
unless data.empty?
on_error "garbage at end of chunked response"
return :invalid
end
on_request_complete
return :finished
else
on_error "non-CRLF response footer"
return :invalid
end
end
def process_body(data)
if data.size < @bytes_remaining
@bytes_remaining -= data.size
on_body_data data
return :body, ''
end
on_body_data data.slice!(0, @bytes_remaining)
unless data.empty?
on_error "garbage at end of body"
return :invalid
end
on_request_complete
return :finished
end
end
end
|
module Rexport
VERSION = '0.3.7'
end
version bump
module Rexport
VERSION = '0.3.8'
end
|
require 'rib'
module Rib::Caller
extend Rib::Plugin
Shell.use(self)
module Imp
def caller *filters
return if Rib::Caller.disabled?
backtrace = Rib.shell.format_backtrace(super().drop(1))
lib = %r{\brib-#{Rib::VERSION}/lib/rib/}
if backtrace.first =~ lib
backtrace.shift while backtrace.first =~ lib
elsif backtrace.last =~ lib
backtrace.pop while backtrace.last =~ lib
end
result = filters.map do |f|
case f
when Regexp
f
when String
%r{\bgems/#{Regexp.escape(f)}\-[\d\.]+/lib/}
end
end.inject(backtrace, &:grep_v)
puts result.map{ |l| " #{l}" }
Rib::Skip
end
end
Rib.extend(Imp)
end
so that we could have paging for Rib.caller
require 'rib'
module Rib::Caller
extend Rib::Plugin
Shell.use(self)
module Imp
def caller *filters
return if Rib::Caller.disabled?
backtrace = Rib.shell.format_backtrace(super().drop(1))
lib = %r{\brib-#{Rib::VERSION}/lib/rib/}
if backtrace.first =~ lib
backtrace.shift while backtrace.first =~ lib
elsif backtrace.last =~ lib
backtrace.pop while backtrace.last =~ lib
end
result = filters.map do |f|
case f
when Regexp
f
when String
%r{\bgems/#{Regexp.escape(f)}\-[\d\.]+/lib/}
end
end.inject(backtrace, &:grep_v)
Rib.shell.puts result.map{ |l| " #{l}" }.join("\n")
Rib::Skip
end
end
Rib.extend(Imp)
end
|
require 'roar/transport/net_http'
module Roar
# Gives HTTP-power to representers. They can serialize, send, process and deserialize HTTP-requests.
module HttpVerbs
class << self
attr_accessor :transport_engine
def included(base)
base.extend ClassMethods
end
end
self.transport_engine = ::Roar::Transport::NetHTTP
module ClassMethods
# GETs +url+ with +format+ and returns deserialized represented object.
def get(*args)
new.get(*args)
end
end
attr_writer :transport_engine
def transport_engine
@transport_engine || HttpVerbs.transport_engine
end
# Serializes the object, POSTs it to +url+ with +format+, deserializes the returned document
# and updates properties accordingly.
def post(options={}, &block)
response = http.post_uri(options.merge(:body => serialize), &block)
handle_response(response)
end
# GETs +url+ with +format+, deserializes the returned document and updates properties accordingly.
def get(options={}, &block)
response = http.get_uri(options, &block)
handle_response(response)
end
# Serializes the object, PUTs it to +url+ with +format+, deserializes the returned document
# and updates properties accordingly.
def put(options={}, &block)
response = http.put_uri(options.merge(:body => serialize), &block)
handle_response(response)
self
end
def patch(options={}, &block)
response = http.patch_uri(options.merge(:body => serialize), &block)
handle_response(response)
self
end
def delete(options, &block)
http.delete_uri(options, &block)
self
end
private
def handle_response(response)
document = response.body
deserialize(document)
end
def http
transport_engine.new
end
def handle_deprecated_args(body, *args) # TODO: remove in 1.0.
options = args.first
if args.size > 1
warn %{DEPRECATION WARNING: #get, #post, #put, #delete and #patch no longer accept positional arguments. Please call them as follows:
get(uri: "http://localhost/songs", as: "application/json")
post(uri: "http://localhost/songs", as: "application/json")
Thank you and have a beautiful day.}
options = {:uri => args[0], :as => args[1]} if args.size == 2
options = {:uri => args[0], :as => args[2]}
end
options[:body] = body
options
end
end
end
Remove dead code in HttpVerbs
Support for positional arguments removed in 7edbc979.
Signed-off-by: Alex Coles <60c6d277a8bd81de7fdde19201bf9c58a3df08f4@alexbcoles.com>
require 'roar/transport/net_http'
module Roar
# Gives HTTP-power to representers. They can serialize, send, process and deserialize HTTP-requests.
module HttpVerbs
class << self
attr_accessor :transport_engine
def included(base)
base.extend ClassMethods
end
end
self.transport_engine = ::Roar::Transport::NetHTTP
module ClassMethods
# GETs +url+ with +format+ and returns deserialized represented object.
def get(*args)
new.get(*args)
end
end
attr_writer :transport_engine
def transport_engine
@transport_engine || HttpVerbs.transport_engine
end
# Serializes the object, POSTs it to +url+ with +format+, deserializes the returned document
# and updates properties accordingly.
def post(options={}, &block)
response = http.post_uri(options.merge(:body => serialize), &block)
handle_response(response)
end
# GETs +url+ with +format+, deserializes the returned document and updates properties accordingly.
def get(options={}, &block)
response = http.get_uri(options, &block)
handle_response(response)
end
# Serializes the object, PUTs it to +url+ with +format+, deserializes the returned document
# and updates properties accordingly.
def put(options={}, &block)
response = http.put_uri(options.merge(:body => serialize), &block)
handle_response(response)
self
end
def patch(options={}, &block)
response = http.patch_uri(options.merge(:body => serialize), &block)
handle_response(response)
self
end
def delete(options, &block)
http.delete_uri(options, &block)
self
end
private
def handle_response(response)
document = response.body
deserialize(document)
end
def http
transport_engine.new
end
end
end
|
require 'rprogram/exceptions/program_not_found'
require 'rprogram/rprogram'
require 'env/variables'
module RProgram
#
# @since 0.3.0
#
module System
extend Env::Variables
@arch, @platform = RUBY_PLATFORM.split('-',2)
@platform ||= @arch
#
# Determines the native architecture.
#
# @return [String]
# The native architecture.
#
# @example
# System.arch
# # => "x86-64"
#
# @since 0.3.0
#
def System.arch
@arch
end
#
# Determines the native platform.
#
# @return [String]
# The native platform.
#
# @example
# System.platform
# # => "linux"
#
def System.platform
@platform
end
#
# Determines if the platform is Windows.
#
# @return [Boolean]
# Specifies whether the platform is Windows.
#
# @since 0.3.0
#
def System.windows?
if @platform
@platform.include?('mingw') || @platform.include?('mswin')
else
false
end
end
#
# Determines if the current Ruby VM is from the 1.8.x family.
#
# @return [Boolean]
# Specifies if the current Ruby VM is from the 1.8.x family.
#
# @since 0.3.0
#
def System.ruby_1_8?
RUBY_VERSION[0,4] == '1.8.'
end
#
# Determines if the current Ruby VM is JRuby.
#
# @return [Boolean]
# Specifies whether the Ruby VM is JRuby.
#
# @since 0.3.0
#
def System.jruby?
Object.const_defined?(:RUBY_ENGINE) && \
Object.const_get(:RUBY_ENGINE) == 'jruby'
end
#
# Finds the full-path of the program with the matching name.
#
# @param [String] name
# The name of the program to find.
#
# @return [Pathname, nil]
# The full-path of the desired program.
#
# @example
# System.find_program('as')
# #=> #<Pathname:/usr/bin/as>
#
def System.find_program(name)
# add the `.exe` suffix to the name, if running on Windows
if windows?
name = "#{name}.exe"
end
paths.each do |dir|
full_path = dir.join(name).expand_path
return full_path if full_path.file?
end
return nil
end
#
# Finds the program matching one of the matching names.
#
# @param [Array] names
# The names of the program to use while searching for the program.
#
# @return [Pathname, nil]
# The first full-path for the program.
#
# @example
# System.find_program_by_names("gas","as")
# # => #<Pathname:/usr/bin/as>
#
def System.find_program_by_names(*names)
names.each do |name|
if (path = find_program(name))
return path
end
end
return nil
end
#
# Runs a program.
#
# @overload run(path,*arguments)
# Run the program with the given arguments.
#
# @param [Pathname, String] path
# The path of the program to run.
#
# @param [Array] arguments
# Additional arguments to run the program with.
#
# @overload run(path,*arguments,options)
# Run the program with the given arguments and options.
#
# @param [Pathname, String] path
# The path of the program to run.
#
# @param [Array] arguments
# Additional arguments to run the program with.
#
# @param [Hash] options
# Additional options to execute the program with.
#
# @option options [Hash{String => String}] :env
# Environment variables to execute the program with.
#
# @option options [String] :popen
# Specifies to run the program using `IO.popen` with the given
# IO mode.
#
# @return [Boolean]
# Specifies whether the program exited successfully.
#
# @raise [RuntimeError]
# Passing `:popen`, `:env` or exec options is not supported
# before Ruby 1.9.1.
#
# @see http://rubydoc.info/stdlib/core/1.9.2/Kernel#spawn-instance_method
# For acceptable options.
#
def System.run(*arguments)
# extra tailing options and ENV variables from arguments
if arguments.last.kind_of?(Hash)
options = arguments.pop
env = (options.delete(:env) || {})
popen = options.delete(:popen)
else
options = {}
env = {}
end
# all arguments must be Strings
arguments = arguments.map { |arg| arg.to_s }
# print debugging information
if RProgram.debug
command = ''
env.each do |name,value|
command << "#{name}=#{value} "
end
command << arguments.join(' ')
command << " #{options.inspect}" unless options.empty?
warn ">>> #{command}"
end
# passing ENV variables or exec options is not supported before 1.9.1
if (!options.empty? && ruby_1_8?)
raise("cannot pass exec options to Kernel.system in #{RUBY_VERSION}")
end
if popen
# IO.popen does not accept multiple arguments on Ruby 1.8.x.
if ruby_1_8?
raise("cannot use :popen on #{RUBY_VERSION}, please use 1.9.x")
end
# :popen can only be used on Unix, or on Windows with JRuby
if (windows? && !jruby?)
raise("cannot use :popen on Windows, unless under JRuby")
end
end
# re-add ENV variables and exec options
arguments.unshift(env) unless env.empty?
arguments.push(options) unless options.empty?
if popen
IO.popen(arguments,popen)
else
Kernel.system(*arguments)
end
end
#
# The path to the `sudo` program.
#
# @return [Pathname, nil]
# The path to the `sudo` program.
#
# @since 0.3.0
#
def System.sudo_path
@sudo ||= find_program('sudo')
end
#
# Sets the path to the `sudo` program.
#
# @param [String, Pathname] path
# The new path to use.
#
# @return [Pathanme]
# The new path to the `sudo` program.
#
# @since 0.3.0
#
def System.sudo_path=(path)
@sudo = Pathname.new(path)
end
#
# Determines whether `sudo` is available on the system.
#
# @return [Boolean]
# Specifies whether the `sudo` program is installed on the system.
#
# @since 0.3.0
#
def System.sudo?
!sudo_path.nil?
end
#
# Runs a program under sudo.
#
# @overload run(path,*arguments)
# Run the program with the given arguments.
#
# @param [Pathname, String] path
# The path of the program to run.
#
# @param [Array] arguments
# Additional arguments to run the program with.
#
# @overload run(path,*arguments,options)
# Run the program with the given arguments and options.
#
# @param [Pathname, String] path
# The path of the program to run.
#
# @param [Array] arguments
# Additional arguments to run the program with.
#
# @param [Hash] options
# Additional options to execute the program with.
#
# @return [Boolean]
# Specifies whether the program exited successfully.
#
# @raise [ProgramNotFound]
# Indicates that the `sudo` program could not be located.
#
# @since 0.1.8
#
# @see run
#
def System.sudo(*arguments)
unless sudo?
raise(ProgramNotFound,'could not find the "sudo" program')
end
return run(sudo_path,*arguments)
end
end
end
Style.
require 'rprogram/exceptions/program_not_found'
require 'rprogram/rprogram'
require 'env/variables'
module RProgram
#
# @since 0.3.0
#
module System
extend Env::Variables
@arch, @platform = RUBY_PLATFORM.split('-',2)
@platform ||= @arch
#
# Determines the native architecture.
#
# @return [String]
# The native architecture.
#
# @example
# System.arch
# # => "x86-64"
#
# @since 0.3.0
#
def self.arch
@arch
end
#
# Determines the native platform.
#
# @return [String]
# The native platform.
#
# @example
# System.platform
# # => "linux"
#
def self.platform
@platform
end
#
# Determines if the platform is Windows.
#
# @return [Boolean]
# Specifies whether the platform is Windows.
#
# @since 0.3.0
#
def self.windows?
if @platform
@platform.include?('mingw') || @platform.include?('mswin')
else
false
end
end
#
# Determines if the current Ruby VM is from the 1.8.x family.
#
# @return [Boolean]
# Specifies if the current Ruby VM is from the 1.8.x family.
#
# @since 0.3.0
#
def self.ruby_1_8?
RUBY_VERSION[0,4] == '1.8.'
end
#
# Determines if the current Ruby VM is JRuby.
#
# @return [Boolean]
# Specifies whether the Ruby VM is JRuby.
#
# @since 0.3.0
#
def self.jruby?
Object.const_defined?(:RUBY_ENGINE) && \
Object.const_get(:RUBY_ENGINE) == 'jruby'
end
#
# Finds the full-path of the program with the matching name.
#
# @param [String] name
# The name of the program to find.
#
# @return [Pathname, nil]
# The full-path of the desired program.
#
# @example
# System.find_program('as')
# #=> #<Pathname:/usr/bin/as>
#
def self.find_program(name)
# add the `.exe` suffix to the name, if running on Windows
if windows?
name = "#{name}.exe"
end
paths.each do |dir|
full_path = dir.join(name).expand_path
return full_path if full_path.file?
end
return nil
end
#
# Finds the program matching one of the matching names.
#
# @param [Array] names
# The names of the program to use while searching for the program.
#
# @return [Pathname, nil]
# The first full-path for the program.
#
# @example
# System.find_program_by_names("gas","as")
# # => #<Pathname:/usr/bin/as>
#
def self.find_program_by_names(*names)
names.each do |name|
if (path = find_program(name))
return path
end
end
return nil
end
#
# Runs a program.
#
# @overload run(path,*arguments)
# Run the program with the given arguments.
#
# @param [Pathname, String] path
# The path of the program to run.
#
# @param [Array] arguments
# Additional arguments to run the program with.
#
# @overload run(path,*arguments,options)
# Run the program with the given arguments and options.
#
# @param [Pathname, String] path
# The path of the program to run.
#
# @param [Array] arguments
# Additional arguments to run the program with.
#
# @param [Hash] options
# Additional options to execute the program with.
#
# @option options [Hash{String => String}] :env
# Environment variables to execute the program with.
#
# @option options [String] :popen
# Specifies to run the program using `IO.popen` with the given
# IO mode.
#
# @return [Boolean]
# Specifies whether the program exited successfully.
#
# @raise [RuntimeError]
# Passing `:popen`, `:env` or exec options is not supported
# before Ruby 1.9.1.
#
# @see http://rubydoc.info/stdlib/core/1.9.2/Kernel#spawn-instance_method
# For acceptable options.
#
def self.run(*arguments)
# extra tailing options and ENV variables from arguments
if arguments.last.kind_of?(Hash)
options = arguments.pop
env = (options.delete(:env) || {})
popen = options.delete(:popen)
else
options = {}
env = {}
end
# all arguments must be Strings
arguments = arguments.map { |arg| arg.to_s }
# print debugging information
if RProgram.debug
command = ''
env.each do |name,value|
command << "#{name}=#{value} "
end
command << arguments.join(' ')
command << " #{options.inspect}" unless options.empty?
warn ">>> #{command}"
end
# passing ENV variables or exec options is not supported before 1.9.1
if (!options.empty? && ruby_1_8?)
raise("cannot pass exec options to Kernel.system in #{RUBY_VERSION}")
end
if popen
# IO.popen does not accept multiple arguments on Ruby 1.8.x.
if ruby_1_8?
raise("cannot use :popen on #{RUBY_VERSION}, please use 1.9.x")
end
# :popen can only be used on Unix, or on Windows with JRuby
if (windows? && !jruby?)
raise("cannot use :popen on Windows, unless under JRuby")
end
end
# re-add ENV variables and exec options
arguments.unshift(env) unless env.empty?
arguments.push(options) unless options.empty?
if popen
IO.popen(arguments,popen)
else
Kernel.system(*arguments)
end
end
#
# The path to the `sudo` program.
#
# @return [Pathname, nil]
# The path to the `sudo` program.
#
# @since 0.3.0
#
def self.sudo_path
@sudo ||= find_program('sudo')
end
#
# Sets the path to the `sudo` program.
#
# @param [String, Pathname] path
# The new path to use.
#
# @return [Pathanme]
# The new path to the `sudo` program.
#
# @since 0.3.0
#
def self.sudo_path=(path)
@sudo = Pathname.new(path)
end
#
# Determines whether `sudo` is available on the system.
#
# @return [Boolean]
# Specifies whether the `sudo` program is installed on the system.
#
# @since 0.3.0
#
def self.sudo?
!sudo_path.nil?
end
#
# Runs a program under sudo.
#
# @overload run(path,*arguments)
# Run the program with the given arguments.
#
# @param [Pathname, String] path
# The path of the program to run.
#
# @param [Array] arguments
# Additional arguments to run the program with.
#
# @overload run(path,*arguments,options)
# Run the program with the given arguments and options.
#
# @param [Pathname, String] path
# The path of the program to run.
#
# @param [Array] arguments
# Additional arguments to run the program with.
#
# @param [Hash] options
# Additional options to execute the program with.
#
# @return [Boolean]
# Specifies whether the program exited successfully.
#
# @raise [ProgramNotFound]
# Indicates that the `sudo` program could not be located.
#
# @since 0.1.8
#
# @see run
#
def self.sudo(*arguments)
unless sudo?
raise(ProgramNotFound,'could not find the "sudo" program')
end
return run(sudo_path,*arguments)
end
end
end
|
require 'ruby_debug.so'
SCRIPT_LINES__ = {} unless defined? SCRIPT_LINES__
SCRIPT_TIMESTAMPS__ = {} unless defined? SCRIPT_TIMESTAMPS__
module Debugger
class Context
def interrupt
self.stop_next = 1
end
alias __c_frame_binding frame_binding
def frame_binding(frame)
__c_frame_binding(frame) || hbinding(frame)
end
private
def hbinding(frame)
hash = frame_locals(frame)
code = hash.keys.map{|k| "#{k} = hash['#{k}']"}.join(';') + ';binding'
if obj = frame_self(frame)
obj.instance_eval code
else
eval code, TOPLEVEL_BINDING
end
end
def handler
Debugger.handler or raise 'No interface loaded'
end
def at_breakpoint(breakpoint)
handler.at_breakpoint(self, breakpoint)
end
def at_catchpoint(excpt)
handler.at_catchpoint(self, excpt)
end
def at_tracing(file, line)
handler.at_tracing(self, file, line)
end
def at_line(file, line)
handler.at_line(self, file, line)
end
end
@reload_source_on_change = false
class << self
# interface modules provide +handler+ object
attr_accessor :handler
# if <tt>true</tt>, checks the modification time of source files and reloads if it was modified
attr_accessor :reload_source_on_change
#
# Interrupts the current thread
#
def interrupt
current_context.interrupt
end
#
# Interrupts the last debugged thread
#
def interrupt_last
if context = last_context
return nil unless context.thread.alive?
context.interrupt
end
context
end
def source_for(file) # :nodoc:
finder = lambda do
if File.exists?(file)
if SCRIPT_LINES__[file].nil? || SCRIPT_LINES__[file] == true
SCRIPT_LINES__[file] = File.readlines(file)
end
change_time = test(?M, file)
SCRIPT_TIMESTAMPS__[file] ||= change_time
if @reload_source_on_change && SCRIPT_TIMESTAMPS__[file] < change_time
SCRIPT_LINES__[file] = File.readlines(file)
end
SCRIPT_LINES__[file]
end
end
Dir.chdir(File.dirname($0)){finder.call} || finder.call ||
(SCRIPT_LINES__[file] == true ? nil : SCRIPT_LINES__[file])
end
def source_reload
SCRIPT_LINES__.keys.each do |file|
next unless File.exists?(file)
SCRIPT_LINES__[file] = nil
end
end
def line_at(file, line) # :nodoc:
lines = source_for(file)
if lines
line = lines[line-1]
return "\n" unless line
return "#{line.gsub(/^\s+/, '').chomp}\n"
end
return "\n"
end
#
# Activates the post-mortem mode. There are two ways of using it:
#
# == Global post-mortem mode
# By calling Debugger.post_mortem method without a block, you install
# at_exit hook that intercepts any unhandled by your script exceptions
# and enables post-mortem mode.
#
# == Local post-mortem mode
#
# If you know that a particular block of code raises an exception you can
# enable post-mortem mode by wrapping this block with Debugger.post_mortem, e.g.
#
# def offender
# raise 'error'
# end
# Debugger.post_mortem do
# ...
# offender
# ...
# end
def post_mortem
raise "Post-mortem is already activated" if self.post_mortem?
self.post_mortem = true
if block_given?
begin
yield
rescue Exception => exp
handle_post_mortem(exp)
raise
ensure
self.post_mortem = false
end
elsif $! && post_mortem?
debug_at_exit do
handle_post_mortem($!)
end
end
end
def handle_post_mortem(exp)
return if exp.__debug_context.stack_size == 0
Debugger.suspend
orig_tracing = Debugger.tracing, Debugger.current_context.tracing
Debugger.tracing = Debugger.current_context.tracing = false
handler.at_line(exp.__debug_context, exp.__debug_file, exp.__debug_line)
ensure
Debugger.tracing, Debugger.current_context.tracing = orig_tracing
Debugger.resume
end
private :handle_post_mortem
end
class DebugThread # :nodoc:
end
class ThreadsTable # :nodoc:
end
end
class Exception # :nodoc:
attr_reader :__debug_file, :__debug_line, :__debug_binding, :__debug_context
end
module Kernel
#
# Stops the current thread after a number of _steps_ made.
#
def debugger(steps = 1)
Debugger.start unless Debugger.started?
Debugger.current_context.stop_next = steps
end
alias breakpoint debugger unless respond_to?(:breakpoint)
#
# Returns a binding of n-th call frame
#
def binding_n(n = 0)
Debugger.skip do
Debugger.current_context.frame_binding(n+2)
end
end
end
class Module
#
# Wraps the +meth+ method with Debugger.start {...} block.
#
def debug_method(meth)
old_meth = "__debugee_#{meth}"
old_meth = "#{$1}_set" if old_meth =~ /^(.+)=$/
alias_method old_meth.to_sym, meth
class_eval <<-EOD
def #{meth}(*args, &block)
Debugger.start do
debugger 2
#{old_meth}(*args, &block)
end
end
EOD
end
#
# Wraps the +meth+ method with Debugger.post_mortem {...} block.
#
def post_mortem_method(meth)
old_meth = "__postmortem_#{meth}"
old_meth = "#{$1}_set" if old_meth =~ /^(.+)=$/
alias_method old_meth.to_sym, meth
class_eval <<-EOD
def #{meth}(*args, &block)
Debugger.start do |dbg|
dbg.post_mortem do
#{old_meth}(*args, &block)
end
end
end
EOD
end
end
parens around a print seems to give a warning. Remove.
git-svn-id: 7ffbd09614c8d739a966cfae413e2c1c0f4cb91b@250 453b2852-3d18-0410-866e-d09c099698e4
require 'ruby_debug.so'
SCRIPT_LINES__ = {} unless defined? SCRIPT_LINES__
SCRIPT_TIMESTAMPS__ = {} unless defined? SCRIPT_TIMESTAMPS__
module Debugger
class Context
def interrupt
self.stop_next = 1
end
alias __c_frame_binding frame_binding
def frame_binding(frame)
begin
__c_frame_binding(frame) || hbinding(frame)
rescue NameError
end
end
private
def hbinding(frame)
hash = frame_locals(frame)
code = hash.keys.map{|k| "#{k} = hash['#{k}']"}.join(';') + ';binding'
if obj = frame_self(frame)
obj.instance_eval code
else
eval code, TOPLEVEL_BINDING
end
end
def handler
Debugger.handler or raise 'No interface loaded'
end
def at_breakpoint(breakpoint)
handler.at_breakpoint(self, breakpoint)
end
def at_catchpoint(excpt)
handler.at_catchpoint(self, excpt)
end
def at_tracing(file, line)
handler.at_tracing(self, file, line)
end
def at_line(file, line)
handler.at_line(self, file, line)
end
end
@reload_source_on_change = false
class << self
# interface modules provide +handler+ object
attr_accessor :handler
# if <tt>true</tt>, checks the modification time of source files and reloads if it was modified
attr_accessor :reload_source_on_change
#
# Interrupts the current thread
#
def interrupt
current_context.interrupt
end
#
# Interrupts the last debugged thread
#
def interrupt_last
if context = last_context
return nil unless context.thread.alive?
context.interrupt
end
context
end
def source_for(file) # :nodoc:
finder = lambda do
if File.exists?(file)
if SCRIPT_LINES__[file].nil? || SCRIPT_LINES__[file] == true
SCRIPT_LINES__[file] = File.readlines(file)
end
change_time = test(?M, file)
SCRIPT_TIMESTAMPS__[file] ||= change_time
if @reload_source_on_change && SCRIPT_TIMESTAMPS__[file] < change_time
SCRIPT_LINES__[file] = File.readlines(file)
end
SCRIPT_LINES__[file]
end
end
Dir.chdir(File.dirname($0)){finder.call} || finder.call ||
(SCRIPT_LINES__[file] == true ? nil : SCRIPT_LINES__[file])
end
def source_reload
SCRIPT_LINES__.keys.each do |file|
next unless File.exists?(file)
SCRIPT_LINES__[file] = nil
end
end
def line_at(file, line) # :nodoc:
lines = source_for(file)
if lines
line = lines[line-1]
return "\n" unless line
return "#{line.gsub(/^\s+/, '').chomp}\n"
end
return "\n"
end
#
# Activates the post-mortem mode. There are two ways of using it:
#
# == Global post-mortem mode
# By calling Debugger.post_mortem method without a block, you install
# at_exit hook that intercepts any unhandled by your script exceptions
# and enables post-mortem mode.
#
# == Local post-mortem mode
#
# If you know that a particular block of code raises an exception you can
# enable post-mortem mode by wrapping this block with Debugger.post_mortem, e.g.
#
# def offender
# raise 'error'
# end
# Debugger.post_mortem do
# ...
# offender
# ...
# end
def post_mortem
if block_given?
if self.post_mortem?
print "post-mortem already activated, block ignored\n"
return
end
begin
self.post_mortem = true
yield
rescue Exception => exp
handle_post_mortem(exp)
raise
ensure
self.post_mortem = false
end
else
self.post_mortem = true
if $!
debug_at_exit do
handle_post_mortem($!)
end
end
end
end
def handle_post_mortem(exp)
return if exp.__debug_context.stack_size == 0
Debugger.suspend
orig_tracing = Debugger.tracing, Debugger.current_context.tracing
Debugger.tracing = Debugger.current_context.tracing = false
handler.at_line(exp.__debug_context, exp.__debug_file, exp.__debug_line)
ensure
Debugger.tracing, Debugger.current_context.tracing = orig_tracing
Debugger.resume
end
private :handle_post_mortem
end
class DebugThread # :nodoc:
end
class ThreadsTable # :nodoc:
end
end
class Exception # :nodoc:
attr_reader :__debug_file, :__debug_line, :__debug_binding, :__debug_context
end
module Kernel
#
# Stops the current thread after a number of _steps_ made.
#
def debugger(steps = 1)
Debugger.start unless Debugger.started?
Debugger.current_context.stop_next = steps
end
alias breakpoint debugger unless respond_to?(:breakpoint)
#
# Returns a binding of n-th call frame
#
def binding_n(n = 0)
Debugger.skip do
Debugger.current_context.frame_binding(n+2)
end
end
end
class Module
#
# Wraps the +meth+ method with Debugger.start {...} block.
#
def debug_method(meth)
old_meth = "__debugee_#{meth}"
old_meth = "#{$1}_set" if old_meth =~ /^(.+)=$/
alias_method old_meth.to_sym, meth
class_eval <<-EOD
def #{meth}(*args, &block)
Debugger.start do
debugger 2
#{old_meth}(*args, &block)
end
end
EOD
end
#
# Wraps the +meth+ method with Debugger.post_mortem {...} block.
#
def post_mortem_method(meth)
old_meth = "__postmortem_#{meth}"
old_meth = "#{$1}_set" if old_meth =~ /^(.+)=$/
alias_method old_meth.to_sym, meth
class_eval <<-EOD
def #{meth}(*args, &block)
Debugger.start do |dbg|
dbg.post_mortem do
#{old_meth}(*args, &block)
end
end
end
EOD
end
end
|
=begin
rubylexer - a ruby lexer written in ruby
Copyright (C) 2004,2005,2008 Caleb Clausen
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
=end
class RubyLexer
#-------------------------
class Token
attr_accessor :ident
def to_s
@ident || "<#{self.class.name}>"
end
attr_accessor :offset #file offset of start of this token
attr_accessor :as #if defined, a KeywordToken which this token stands in for.
attr_accessor :allow_ooo_offset #hack
attr_accessor :endline
def initialize(ident,offset=nil)
@ident=ident
@offset=offset
end
def error; end
def has_no_block?; false end
attr_accessor :tag
attr_writer :startline
def startline
return @startline if defined? @startline
return endline
end
def linecount; 0 end
alias orig_inspect inspect
alias dump inspect
#this is merely the normal definition of inspect
#and is unneeded in ruby 1.8
#but in 1.9, defining to_s seemingly overrides the built-in Object#inspect
#and you can't get it back, no matter what.
#fucking 1.9
def inspect
ivars=instance_variables.map{|ivar|
ivar.to_s+'='+instance_variable_get(ivar).inspect
}.join(' ')
%[#<#{self.class}: #{ivars}>]
end
end
#-------------------------
class WToken< Token
def ===(pattern)
assert @ident
pattern===@ident
end
end
#-------------------------
class KeywordToken < WToken #also some operators
def initialize(*args)
if Hash===args.last
opts=args.pop
as=opts.delete :as
fail unless opts.empty?
end
super(*args)
self.as=as
end
#-----------------------------------
def set_callsite!(x=true)
@callsite=x
end
#-----------------------------------
def callsite?
@callsite if defined? @callsite
end
attr_accessor :value
#-----------------------------------
def set_infix!
@infix=true
end
#-----------------------------------
def unary= flag
@infix=!flag
end
#-----------------------------------
def infix?
@infix ||= nil
end
def prefix?; !infix? end
alias unary prefix?
#-----------------------------------
def has_end!
@has_end=true
end
#-----------------------------------
def has_end?
self===RubyLexer::BEGINWORDS and @has_end||=nil
end
attr_accessor :ternary, :grouping
def has_no_block!
@has_no_block=true
end
def has_no_block?
@has_no_block if defined? @has_no_block
end
def infix
@infix if defined? @infix
end
end
#-------------------------
class OperatorToken < WToken
def initialize(*args)
@tag=nil
super
end
attr_writer :as
def unary= flag; @tag=:unary if flag end
def unary; @tag==:unary end
alias prefix? unary
def infix?; !prefix? end
def as
return @as if defined? @as
if tag and ident[/^[,*&]$/]
tag.to_s+ident
end
end
end
#-------------------------
module TokenPat
@@TokenPats={}
def token_pat #used in various case statements...
result=self.dup
@@TokenPats[self] ||=
(class <<result
alias old_3eq ===
def ===(token)
WToken===token and old_3eq(token.ident)
end
end;result)
end
end
class ::String; include TokenPat; end
class ::Regexp; include TokenPat; end
#-------------------------
class VarNameToken < WToken
attr_accessor :lvar_type
attr_accessor :in_def
end
#-------------------------
class NumberToken < Token
def to_s
if defined? @char_literal and @char_literal
'?'+@ident.chr
else
@ident.to_s
end
end
def negative; /\A-/ === to_s end
attr_accessor :char_literal
end
#-------------------------
class SymbolToken < Token
attr_accessor :open,:close
attr :raw
def initialize(ident,offset=nil,starter=':')
@raw=ident
str=ident.to_s
str[0,2]='' if /\A%s/===str
super starter+str, offset
@open=":"
@close=""
# @char=':'
end
def to_s
return @ident
=begin
raw=@raw.to_s
raw=raw[1...-1] if StringToken===@raw
@open+raw+@close
=end
end
end
#-------------------------
class MethNameToken < Token # < SymbolToken
def initialize(ident,offset=nil,bogus=nil)
@ident= (VarNameToken===ident)? ident.ident : ident
@offset=offset
@has_no_block=false
# @char=''
end
def [](regex) #is this used?
regex===ident
end
def ===(pattern)
pattern===@ident
end
def has_no_block!
@has_no_block=true
end
def has_no_block?
@has_no_block
end
def has_equals; /[a-z_0-9]=$/i===ident end
end
#-------------------------
class NewlineToken < Token
def initialize(nlstr="\n",offset=nil)
super(nlstr,offset)
#@char=''
end
def as; ';' end
def linecount; 1 end
def startline
@endline-1
end
def startline=bogus; end
end
#-------------------------
class StringToken < Token
attr :char
attr_accessor :modifiers #for regex only
attr_accessor :elems
attr_accessor :startline
attr_accessor :bs_handler
attr_accessor :open #exact sequence of chars used to start the str
attr_accessor :close #exact seq of (1) char to stop the str
attr_accessor :lvars #names used in named backrefs if this is a regex
def linecount; line-startline end
def utf8?
@utf8||=nil
end
def utf8!
@utf8=true
end
def with_line(line)
@endline=line
self
end
def line; @endline end
def line= l; @endline=l end
def initialize(type='"',ident='')
super(ident)
type=="'" and type='"'
@char=type
assert @char[/^[\[{"`\/]$/] #"
@elems=[ident.dup] #why .dup?
@modifiers=nil
@endline=nil
end
DQUOTE_ESCAPE_TABLE = [
["\n",'\n'],
["\r",'\r'],
["\t",'\t'],
["\v",'\v'],
["\f",'\f'],
["\e",'\e'],
["\b",'\b'],
["\a",'\a']
]
PREFIXERS={ '['=>"%w[", '{'=>'%W{' }
SUFFIXERS={ '['=>"]", '{'=>'}' }
def has_str_inc?
elems.size>1 or RubyCode===elems.first
end
def to_s transname=:transform
assert @char[/[\[{"`\/]/] #"
#on output, all single-quoted strings become double-quoted
assert(@elems.length==1) if @char=='['
result=open.dup
starter=result[-1,1]
ender=close
elems.each{|e|
case e
when String; result<<e
# strfrag=translate_escapes strfrag if RubyLexer::FASTER_STRING_ESCAPES
# result << send(transname,strfrag,starter,ender)
when VarNameToken;
if /^[$@]/===e.to_s
result << '#' + e.to_s
else
result << "\#{#{e}}"
end
when RubyCode; result << '#' + e.to_s
else fail
end
}
result << ender
if @char=='/'
result << modifiers if modifiers #regex only
result="%r"+result if RubyLexer::WHSPLF[result[1,1]]
end
return result
end
def to_term
result=[]
0.step(@elems.length-1,2) { |i|
result << ConstTerm.new(@elems[i].dup)
if e=@elems[i+1]
assert(e.kind_of?(RubyCode))
result << (RubyTerm.new e)
end
}
return result
end
def append(glob)
#assert @elems.last.kind_of?(String)
case glob
when String,Integer then append_str! glob
when RubyCode then append_code! glob
else raise "bad string contents: #{glob}, a #{glob.class}"
end
#assert @elems.last.kind_of?(String)
end
def append_token(strtok)
assert @elems.last.kind_of?(String)
#assert strtok.elems.last.kind_of?(String)
assert strtok.elems.first.kind_of?(String)
@elems.last << strtok.elems.shift
first=strtok.elems.first
assert( first.nil? || first.kind_of?(RubyCode) )
@elems += strtok.elems
@ident << strtok.ident
assert((!@modifiers or !strtok.modifiers))
@modifiers||=strtok.modifiers
#assert @elems.last.kind_of?(String)
@bs_handler ||=strtok.bs_handler
return self
end
def translate_escapes(str)
rl=RubyLexer.new("(string escape translation hack...)",'')
result=str.dup
seq=result.to_sequence
rl.instance_eval{@file=seq}
repls=[]
i=0
#ugly ugly ugly
while i<result.size and bs_at=result.index(/\\./m,i)
seq.pos=$~.end(0)-1
ch=rl.send(bs_handler,"\\",@open[-1,1],@close)
result[bs_at...seq.pos]=ch
i=bs_at+ch.size
end
return result
end
private
UNESC_DELIMS={}
#simpler transform, preserves original exactly
def simple_transform(strfrag,starter,ender) #appears to be unused
assert('[{/'[@char])
#strfrag.gsub!(/(\A|[^\\])(?:\\\\)*\#([{$@])/){$1+'\\#'+$2} unless @char=='[' #esc #{
delimchars=Regexp.quote starter+ender
delimchars+=Regexp.quote("#") unless @char=='[' #escape beginning of string iterpolations
#i think most or all of this method is useless now...
#escape curly brace in string interpolations (%W only)
strfrag.gsub!('#{', '#\\{') if @char=='{'
ckey=starter+ender
unesc_delim=
UNESC_DELIMS[ckey]||=
/(\A|[^\\](?:\\\\)*)([#{delimchars}]+)/
# /(\\)([^#{delimchars}#{RubyLexer::WHSPLF}]|\Z)/
#an even number (esp 0) of backslashes before delim becomes escaped delim
strfrag.gsub!(unesc_delim){
pre=$1; toesc=$2
pre+toesc.gsub(/(.)/){ "\\"+$1 }
}
#no need to double backslashes anymore... they should come pre-doubled
return strfrag
end
def transform(strfrag,starter,ender) #appears to be unused
strfrag.gsub!("\\",'\\'*4)
strfrag.gsub!(/#([{$@])/,'\\#\\1')
strfrag.gsub!(Regexp.new("[\\"+starter+"\\"+ender+"]"),'\\\\\\&') unless @char=='?'
DQUOTE_ESCAPE_TABLE.each {|pair|
strfrag.gsub!(*pair)
} unless @char=='/'
strfrag.gsub!(/[^ -~]/){|np| #nonprintables
"\\x"+sprintf('%02X',np[0])
}
#break up long lines (best done later?)
strfrag.gsub!(/(\\x[0-9A-F]{2}|\\?.){40}/i, "\\&\\\n")
return strfrag
end
def append_str!(str)
if @elems.last.kind_of?(String)
@elems.last << str
else
@elems << str
end
@ident << str
assert @elems.last.kind_of?(String)
end
def append_code!(code)
if @elems.last.kind_of?(String)
else
@elems.push ''
end
@elems.push code,''
@ident << "\#{#{code}}"
assert @elems.last.kind_of?(String)
end
end
#-------------------------
class RenderExactlyStringToken < StringToken
alias transform simple_transform
#transform isn't called anymore, so there's no need for this hacky class
end
#-------------------------
class HerePlaceholderToken < WToken
attr_reader :termex, :quote, :ender, :dash
attr_accessor :unsafe_to_use, :string
attr_accessor :bodyclass
attr_accessor :open, :close
def initialize(dash,quote,ender,quote_real=true)
@dash,@quote,@ender,@quote_real=dash,quote,ender,quote_real
@unsafe_to_use=true
@string=StringToken.new
#@termex=/^#{'[\s\v]*' if dash}#{Regexp.escape ender}$/
@termex=Regexp.new \
["^", ('[\s\v]*' if dash), Regexp.escape(ender), "$"].join
@bodyclass=HereBodyToken
end
def ===(bogus); false end
def to_s
# if @bodyclass==OutlinedHereBodyToken
result=if/[^a-z_0-9]/i===@ender
@ender.gsub(/[\\"]/, '\\\\'+'\\&')
else
@ender
end
return ["<<",@dash,@quote_real&&@quote,result,@quote_real&&@quote].join
# else
# assert !unsafe_to_use
# return @string.to_s
# end
end
def append s; @string.append s end
def append_token tok; @string.append_token tok end
#def with_line(line) @string.line=line; self end
def line; @line || @string.line end
def line=line; @line=line end
def startline; @line end
alias endline startline
def startline=x; end
alias endline= startline=
end
#-------------------------
module StillIgnoreToken
end
#-------------------------
class IgnoreToken < Token
include StillIgnoreToken
def initialize(ident,*stuff)
@linecount=ident.count "\n"
super
end
attr :linecount
end
#-------------------------
class WsToken < IgnoreToken
end
#-------------------------
class ZwToken < IgnoreToken
def initialize(offset)
super('',offset)
end
def explicit_form
abstract
end
def explicit_form_all; explicit_form end
end
#-------------------------
class NoWsToken < ZwToken
def explicit_form_all
"#nows#"
end
def explicit_form
nil
end
end
#-------------------------
class ShebangToken < IgnoreToken
def initialize(text)
super text,0
end
end
#-------------------------
class EncodingDeclToken < IgnoreToken
def initialize(text,encoding,offset)
text||=''
super text,offset
@encoding=encoding
end
attr :encoding
end
#-------------------------
class ImplicitParamListStartToken < KeywordToken
include StillIgnoreToken
def initialize(offset)
super("(",offset)
end
def to_s; '' end
def as; "(" end
end
#-------------------------
class ImplicitParamListEndToken < KeywordToken
include StillIgnoreToken
def initialize(offset)
super(")",offset)
end
def to_s; '' end
def as; ")" end
end
#-------------------------
class AssignmentRhsListStartToken < ZwToken
def explicit_form
'*['
end
end
#-------------------------
class AssignmentRhsListEndToken < ZwToken
def explicit_form
']'
end
end
#-------------------------
class KwParamListStartToken < ZwToken
def explicit_form_all
"#((#"
end
def explicit_form
nil
end
end
#-------------------------
class KwParamListEndToken < ZwToken
def explicit_form_all
"#))#"
end
def explicit_form
nil
end
end
#-------------------------
class EndHeaderToken < ZwToken
def as; ";" end
end
EndDefHeaderToken=EndHeaderToken
#-------------------------
class EscNlToken < IgnoreToken
def initialize(ident,offset,filename=nil,linenum=nil)
super(ident,offset)
#@char='\\'
@filename=filename
@linenum=linenum
end
attr_accessor :filename,:linenum
def linecount; 1 end
def startline
@linenum-1
end
def endline
@linenum
end
def startline= bogus; end
alias endline= linenum=
end
#-------------------------
class EoiToken < Token
attr :file
alias :pos :offset
def initialize(cause,file, offset=nil,line=nil)
super(cause,offset)
@file=file
@endline=line
end
end
#-------------------------
class HereBodyToken < IgnoreToken
#attr_accessor :ender
attr_accessor :open,:close
def initialize(headtok,linecount)
assert HerePlaceholderToken===headtok
@ident,@offset=headtok.string,headtok.string.offset
@headtok=headtok
@linecount=linecount
end
def line
@ident.line
end
alias endline line
def endline= line
@ident.line= line
end
def startline
line-@linecount+1
end
def to_s
@ident.to_s
end
attr :headtok
attr :linecount #num lines here body spans (including terminator)
end
#-------------------------
class FileAndLineToken < IgnoreToken
attr_accessor :line
def initialize(ident,line,offset=nil)
super ident,offset
#@char='#'
@line=line
end
#def char; '#' end
def to_s()
%[##@ident:#@line]
end
def file() @ident end
def subitem() @line end #needed?
def endline; @line end
def startline; @line end
alias endline= line=
def startline= bogus; end
end
#-------------------------
class OutlinedHereBodyToken < HereBodyToken #appears to be unused
def to_s
assert HerePlaceholderToken===@headtok
result=@headtok.string
result=result.to_s(:simple_transform).match(/^"(.*)"$/m)[1]
return result +
@headtok.ender +
"\n"
end
end
#-------------------------
module ErrorToken
attr_accessor :error
end
#-------------------------
class SubitemToken < Token
attr :char2
attr :subitem
def initialize(ident,subitem)
super ident
@subitem=subitem
end
def to_s()
super+@char2+@subitem.to_s
end
end
#-------------------------
class DecoratorToken < SubitemToken
def initialize(ident,subitem)
super '^'+ident,subitem
@subitem=@subitem.to_s #why to_s?
#@char='^'
@char2='='
end
#alias to_s ident #parent has right implementation of to_s... i think
def needs_value?() @subitem.nil? end
def value=(v) @subitem=v end
def value() @subitem end
end
end
require "rubylexer/rubycode"
improve unlexing or char constants when char is space or non-glyph
=begin
rubylexer - a ruby lexer written in ruby
Copyright (C) 2004,2005,2008 Caleb Clausen
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
=end
class RubyLexer
#-------------------------
class Token
attr_accessor :ident
def to_s
@ident || "<#{self.class.name}>"
end
attr_accessor :offset #file offset of start of this token
attr_accessor :as #if defined, a KeywordToken which this token stands in for.
attr_accessor :allow_ooo_offset #hack
attr_accessor :endline
def initialize(ident,offset=nil)
@ident=ident
@offset=offset
end
def error; end
def has_no_block?; false end
attr_accessor :tag
attr_writer :startline
def startline
return @startline if defined? @startline
return endline
end
def linecount; 0 end
alias orig_inspect inspect
alias dump inspect
#this is merely the normal definition of inspect
#and is unneeded in ruby 1.8
#but in 1.9, defining to_s seemingly overrides the built-in Object#inspect
#and you can't get it back, no matter what.
#fucking 1.9
def inspect
ivars=instance_variables.map{|ivar|
ivar.to_s+'='+instance_variable_get(ivar).inspect
}.join(' ')
%[#<#{self.class}: #{ivars}>]
end
end
#-------------------------
class WToken< Token
def ===(pattern)
assert @ident
pattern===@ident
end
end
#-------------------------
class KeywordToken < WToken #also some operators
def initialize(*args)
if Hash===args.last
opts=args.pop
as=opts.delete :as
fail unless opts.empty?
end
super(*args)
self.as=as
end
#-----------------------------------
def set_callsite!(x=true)
@callsite=x
end
#-----------------------------------
def callsite?
@callsite if defined? @callsite
end
attr_accessor :value
#-----------------------------------
def set_infix!
@infix=true
end
#-----------------------------------
def unary= flag
@infix=!flag
end
#-----------------------------------
def infix?
@infix ||= nil
end
def prefix?; !infix? end
alias unary prefix?
#-----------------------------------
def has_end!
@has_end=true
end
#-----------------------------------
def has_end?
self===RubyLexer::BEGINWORDS and @has_end||=nil
end
attr_accessor :ternary, :grouping
def has_no_block!
@has_no_block=true
end
def has_no_block?
@has_no_block if defined? @has_no_block
end
def infix
@infix if defined? @infix
end
end
#-------------------------
class OperatorToken < WToken
def initialize(*args)
@tag=nil
super
end
attr_writer :as
def unary= flag; @tag=:unary if flag end
def unary; @tag==:unary end
alias prefix? unary
def infix?; !prefix? end
def as
return @as if defined? @as
if tag and ident[/^[,*&]$/]
tag.to_s+ident
end
end
end
#-------------------------
module TokenPat
@@TokenPats={}
def token_pat #used in various case statements...
result=self.dup
@@TokenPats[self] ||=
(class <<result
alias old_3eq ===
def ===(token)
WToken===token and old_3eq(token.ident)
end
end;result)
end
end
class ::String; include TokenPat; end
class ::Regexp; include TokenPat; end
#-------------------------
class VarNameToken < WToken
attr_accessor :lvar_type
attr_accessor :in_def
end
#-------------------------
class NumberToken < Token
def to_s
if defined? @char_literal and @char_literal
chr=@ident.chr
'?'+case chr
when " "; '\s'
when /[!-~]/; chr
else chr.inspect[1...-1]
end
else
@ident.to_s
end
end
def negative; /\A-/ === to_s end
attr_accessor :char_literal
end
#-------------------------
class SymbolToken < Token
attr_accessor :open,:close
attr :raw
def initialize(ident,offset=nil,starter=':')
@raw=ident
str=ident.to_s
str[0,2]='' if /\A%s/===str
super starter+str, offset
@open=":"
@close=""
# @char=':'
end
def to_s
return @ident
=begin
raw=@raw.to_s
raw=raw[1...-1] if StringToken===@raw
@open+raw+@close
=end
end
end
#-------------------------
class MethNameToken < Token # < SymbolToken
def initialize(ident,offset=nil,bogus=nil)
@ident= (VarNameToken===ident)? ident.ident : ident
@offset=offset
@has_no_block=false
# @char=''
end
def [](regex) #is this used?
regex===ident
end
def ===(pattern)
pattern===@ident
end
def has_no_block!
@has_no_block=true
end
def has_no_block?
@has_no_block
end
def has_equals; /[a-z_0-9]=$/i===ident end
end
#-------------------------
class NewlineToken < Token
def initialize(nlstr="\n",offset=nil)
super(nlstr,offset)
#@char=''
end
def as; ';' end
def linecount; 1 end
def startline
@endline-1
end
def startline=bogus; end
end
#-------------------------
class StringToken < Token
attr :char
attr_accessor :modifiers #for regex only
attr_accessor :elems
attr_accessor :startline
attr_accessor :bs_handler
attr_accessor :open #exact sequence of chars used to start the str
attr_accessor :close #exact seq of (1) char to stop the str
attr_accessor :lvars #names used in named backrefs if this is a regex
def linecount; line-startline end
def utf8?
@utf8||=nil
end
def utf8!
@utf8=true
end
def with_line(line)
@endline=line
self
end
def line; @endline end
def line= l; @endline=l end
def initialize(type='"',ident='')
super(ident)
type=="'" and type='"'
@char=type
assert @char[/^[\[{"`\/]$/] #"
@elems=[ident.dup] #why .dup?
@modifiers=nil
@endline=nil
end
DQUOTE_ESCAPE_TABLE = [
["\n",'\n'],
["\r",'\r'],
["\t",'\t'],
["\v",'\v'],
["\f",'\f'],
["\e",'\e'],
["\b",'\b'],
["\a",'\a']
]
PREFIXERS={ '['=>"%w[", '{'=>'%W{' }
SUFFIXERS={ '['=>"]", '{'=>'}' }
def has_str_inc?
elems.size>1 or RubyCode===elems.first
end
def to_s transname=:transform
assert @char[/[\[{"`\/]/] #"
#on output, all single-quoted strings become double-quoted
assert(@elems.length==1) if @char=='['
result=open.dup
starter=result[-1,1]
ender=close
elems.each{|e|
case e
when String; result<<e
# strfrag=translate_escapes strfrag if RubyLexer::FASTER_STRING_ESCAPES
# result << send(transname,strfrag,starter,ender)
when VarNameToken;
if /^[$@]/===e.to_s
result << '#' + e.to_s
else
result << "\#{#{e}}"
end
when RubyCode; result << '#' + e.to_s
else fail
end
}
result << ender
if @char=='/'
result << modifiers if modifiers #regex only
result="%r"+result if RubyLexer::WHSPLF[result[1,1]]
end
return result
end
def to_term
result=[]
0.step(@elems.length-1,2) { |i|
result << ConstTerm.new(@elems[i].dup)
if e=@elems[i+1]
assert(e.kind_of?(RubyCode))
result << (RubyTerm.new e)
end
}
return result
end
def append(glob)
#assert @elems.last.kind_of?(String)
case glob
when String,Integer then append_str! glob
when RubyCode then append_code! glob
else raise "bad string contents: #{glob}, a #{glob.class}"
end
#assert @elems.last.kind_of?(String)
end
def append_token(strtok)
assert @elems.last.kind_of?(String)
#assert strtok.elems.last.kind_of?(String)
assert strtok.elems.first.kind_of?(String)
@elems.last << strtok.elems.shift
first=strtok.elems.first
assert( first.nil? || first.kind_of?(RubyCode) )
@elems += strtok.elems
@ident << strtok.ident
assert((!@modifiers or !strtok.modifiers))
@modifiers||=strtok.modifiers
#assert @elems.last.kind_of?(String)
@bs_handler ||=strtok.bs_handler
return self
end
def translate_escapes(str)
rl=RubyLexer.new("(string escape translation hack...)",'')
result=str.dup
seq=result.to_sequence
rl.instance_eval{@file=seq}
repls=[]
i=0
#ugly ugly ugly
while i<result.size and bs_at=result.index(/\\./m,i)
seq.pos=$~.end(0)-1
ch=rl.send(bs_handler,"\\",@open[-1,1],@close)
result[bs_at...seq.pos]=ch
i=bs_at+ch.size
end
return result
end
private
UNESC_DELIMS={}
#simpler transform, preserves original exactly
def simple_transform(strfrag,starter,ender) #appears to be unused
assert('[{/'[@char])
#strfrag.gsub!(/(\A|[^\\])(?:\\\\)*\#([{$@])/){$1+'\\#'+$2} unless @char=='[' #esc #{
delimchars=Regexp.quote starter+ender
delimchars+=Regexp.quote("#") unless @char=='[' #escape beginning of string iterpolations
#i think most or all of this method is useless now...
#escape curly brace in string interpolations (%W only)
strfrag.gsub!('#{', '#\\{') if @char=='{'
ckey=starter+ender
unesc_delim=
UNESC_DELIMS[ckey]||=
/(\A|[^\\](?:\\\\)*)([#{delimchars}]+)/
# /(\\)([^#{delimchars}#{RubyLexer::WHSPLF}]|\Z)/
#an even number (esp 0) of backslashes before delim becomes escaped delim
strfrag.gsub!(unesc_delim){
pre=$1; toesc=$2
pre+toesc.gsub(/(.)/){ "\\"+$1 }
}
#no need to double backslashes anymore... they should come pre-doubled
return strfrag
end
def transform(strfrag,starter,ender) #appears to be unused
strfrag.gsub!("\\",'\\'*4)
strfrag.gsub!(/#([{$@])/,'\\#\\1')
strfrag.gsub!(Regexp.new("[\\"+starter+"\\"+ender+"]"),'\\\\\\&') unless @char=='?'
DQUOTE_ESCAPE_TABLE.each {|pair|
strfrag.gsub!(*pair)
} unless @char=='/'
strfrag.gsub!(/[^ -~]/){|np| #nonprintables
"\\x"+sprintf('%02X',np[0])
}
#break up long lines (best done later?)
strfrag.gsub!(/(\\x[0-9A-F]{2}|\\?.){40}/i, "\\&\\\n")
return strfrag
end
def append_str!(str)
if @elems.last.kind_of?(String)
@elems.last << str
else
@elems << str
end
@ident << str
assert @elems.last.kind_of?(String)
end
def append_code!(code)
if @elems.last.kind_of?(String)
else
@elems.push ''
end
@elems.push code,''
@ident << "\#{#{code}}"
assert @elems.last.kind_of?(String)
end
end
#-------------------------
class RenderExactlyStringToken < StringToken
alias transform simple_transform
#transform isn't called anymore, so there's no need for this hacky class
end
#-------------------------
class HerePlaceholderToken < WToken
attr_reader :termex, :quote, :ender, :dash
attr_accessor :unsafe_to_use, :string
attr_accessor :bodyclass
attr_accessor :open, :close
def initialize(dash,quote,ender,quote_real=true)
@dash,@quote,@ender,@quote_real=dash,quote,ender,quote_real
@unsafe_to_use=true
@string=StringToken.new
#@termex=/^#{'[\s\v]*' if dash}#{Regexp.escape ender}$/
@termex=Regexp.new \
["^", ('[\s\v]*' if dash), Regexp.escape(ender), "$"].join
@bodyclass=HereBodyToken
end
def ===(bogus); false end
def to_s
# if @bodyclass==OutlinedHereBodyToken
result=if/[^a-z_0-9]/i===@ender
@ender.gsub(/[\\"]/, '\\\\'+'\\&')
else
@ender
end
return ["<<",@dash,@quote_real&&@quote,result,@quote_real&&@quote].join
# else
# assert !unsafe_to_use
# return @string.to_s
# end
end
def append s; @string.append s end
def append_token tok; @string.append_token tok end
#def with_line(line) @string.line=line; self end
def line; @line || @string.line end
def line=line; @line=line end
def startline; @line end
alias endline startline
def startline=x; end
alias endline= startline=
end
#-------------------------
module StillIgnoreToken
end
#-------------------------
class IgnoreToken < Token
include StillIgnoreToken
def initialize(ident,*stuff)
@linecount=ident.count "\n"
super
end
attr :linecount
end
#-------------------------
class WsToken < IgnoreToken
end
#-------------------------
class ZwToken < IgnoreToken
def initialize(offset)
super('',offset)
end
def explicit_form
abstract
end
def explicit_form_all; explicit_form end
end
#-------------------------
class NoWsToken < ZwToken
def explicit_form_all
"#nows#"
end
def explicit_form
nil
end
end
#-------------------------
class ShebangToken < IgnoreToken
def initialize(text)
super text,0
end
end
#-------------------------
class EncodingDeclToken < IgnoreToken
def initialize(text,encoding,offset)
text||=''
super text,offset
@encoding=encoding
end
attr :encoding
end
#-------------------------
class ImplicitParamListStartToken < KeywordToken
include StillIgnoreToken
def initialize(offset)
super("(",offset)
end
def to_s; '' end
def as; "(" end
end
#-------------------------
class ImplicitParamListEndToken < KeywordToken
include StillIgnoreToken
def initialize(offset)
super(")",offset)
end
def to_s; '' end
def as; ")" end
end
#-------------------------
class AssignmentRhsListStartToken < ZwToken
def explicit_form
'*['
end
end
#-------------------------
class AssignmentRhsListEndToken < ZwToken
def explicit_form
']'
end
end
#-------------------------
class KwParamListStartToken < ZwToken
def explicit_form_all
"#((#"
end
def explicit_form
nil
end
end
#-------------------------
class KwParamListEndToken < ZwToken
def explicit_form_all
"#))#"
end
def explicit_form
nil
end
end
#-------------------------
class EndHeaderToken < ZwToken
def as; ";" end
end
EndDefHeaderToken=EndHeaderToken
#-------------------------
class EscNlToken < IgnoreToken
def initialize(ident,offset,filename=nil,linenum=nil)
super(ident,offset)
#@char='\\'
@filename=filename
@linenum=linenum
end
attr_accessor :filename,:linenum
def linecount; 1 end
def startline
@linenum-1
end
def endline
@linenum
end
def startline= bogus; end
alias endline= linenum=
end
#-------------------------
class EoiToken < Token
attr :file
alias :pos :offset
def initialize(cause,file, offset=nil,line=nil)
super(cause,offset)
@file=file
@endline=line
end
end
#-------------------------
class HereBodyToken < IgnoreToken
#attr_accessor :ender
attr_accessor :open,:close
def initialize(headtok,linecount)
assert HerePlaceholderToken===headtok
@ident,@offset=headtok.string,headtok.string.offset
@headtok=headtok
@linecount=linecount
end
def line
@ident.line
end
alias endline line
def endline= line
@ident.line= line
end
def startline
line-@linecount+1
end
def to_s
@ident.to_s
end
attr :headtok
attr :linecount #num lines here body spans (including terminator)
end
#-------------------------
class FileAndLineToken < IgnoreToken
attr_accessor :line
def initialize(ident,line,offset=nil)
super ident,offset
#@char='#'
@line=line
end
#def char; '#' end
def to_s()
%[##@ident:#@line]
end
def file() @ident end
def subitem() @line end #needed?
def endline; @line end
def startline; @line end
alias endline= line=
def startline= bogus; end
end
#-------------------------
class OutlinedHereBodyToken < HereBodyToken #appears to be unused
def to_s
assert HerePlaceholderToken===@headtok
result=@headtok.string
result=result.to_s(:simple_transform).match(/^"(.*)"$/m)[1]
return result +
@headtok.ender +
"\n"
end
end
#-------------------------
module ErrorToken
attr_accessor :error
end
#-------------------------
class SubitemToken < Token
attr :char2
attr :subitem
def initialize(ident,subitem)
super ident
@subitem=subitem
end
def to_s()
super+@char2+@subitem.to_s
end
end
#-------------------------
class DecoratorToken < SubitemToken
def initialize(ident,subitem)
super '^'+ident,subitem
@subitem=@subitem.to_s #why to_s?
#@char='^'
@char2='='
end
#alias to_s ident #parent has right implementation of to_s... i think
def needs_value?() @subitem.nil? end
def value=(v) @subitem=v end
def value() @subitem end
end
end
require "rubylexer/rubycode"
|
=begin
rubylexer - a ruby lexer written in ruby
Copyright (C) 2004,2005,2008 Caleb Clausen
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
=end
class RubyLexer
#-------------------------
class Token
attr_accessor :ident
def to_s
@ident || "<#{self.class.name}>"
end
attr_accessor :offset #file offset of start of this token
attr_accessor :as #if defined, a KeywordToken which this token stands in for.
attr_accessor :allow_ooo_offset #hack
attr_accessor :endline
def initialize(ident,offset=nil)
@ident=ident
@offset=offset
end
def error; end
def has_no_block?; false end
attr_accessor :tag
attr_writer :startline
def startline
return @startline if defined? @startline
return endline
end
def linecount; 0 end
end
#-------------------------
class WToken< Token
def ===(pattern)
assert @ident
pattern===@ident
end
end
#-------------------------
class KeywordToken < WToken #also some operators
def initialize(*args)
if Hash===args.last
opts=args.pop
as=opts.delete :as
fail unless opts.empty?
end
super(*args)
self.as=as
end
#-----------------------------------
def set_callsite!(x=true)
@callsite=x
end
#-----------------------------------
def callsite?
@callsite if defined? @callsite
end
attr_accessor :value
#-----------------------------------
def set_infix!
@infix=true
end
#-----------------------------------
def unary= flag
@infix=!flag
end
#-----------------------------------
def infix?
@infix ||= nil
end
def prefix?; !infix? end
alias unary prefix?
#-----------------------------------
def has_end!
@has_end=true
end
#-----------------------------------
def has_end?
self===RubyLexer::BEGINWORDS and @has_end||=nil
end
attr_accessor :ternary, :grouping
def has_no_block!
@has_no_block=true
end
def has_no_block?
@has_no_block if defined? @has_no_block
end
def infix
@infix if defined? @infix
end
end
#-------------------------
class OperatorToken < WToken
def initialize(*args)
@tag=nil
super
end
attr_writer :as
def unary= flag; @tag=:unary if flag end
def unary; @tag==:unary end
alias prefix? unary
def infix?; !prefix? end
def as
return @as if defined? @as
if tag and ident[/^[,*&]$/]
tag.to_s+ident
end
end
end
#-------------------------
module TokenPat
@@TokenPats={}
def token_pat #used in various case statements...
result=self.dup
@@TokenPats[self] ||=
(class <<result
alias old_3eq ===
def ===(token)
WToken===token and old_3eq(token.ident)
end
end;result)
end
end
class ::String; include TokenPat; end
class ::Regexp; include TokenPat; end
#-------------------------
class VarNameToken < WToken
attr_accessor :lvar_type
attr_accessor :in_def
end
#-------------------------
class NumberToken < Token
def to_s; @ident.to_s end
def negative; /\A-/ === ident end
end
#-------------------------
class SymbolToken < Token
attr_accessor :open,:close
attr :raw
def initialize(ident,offset=nil,starter=':')
@raw=ident
str=ident.to_s
str[0,2]='' if /\A%s/===str
super starter+str, offset
@open=":"
@close=""
# @char=':'
end
def to_s
return @ident
=begin
raw=@raw.to_s
raw=raw[1...-1] if StringToken===@raw
@open+raw+@close
=end
end
end
#-------------------------
class MethNameToken < Token # < SymbolToken
def initialize(ident,offset=nil,bogus=nil)
@ident= (VarNameToken===ident)? ident.ident : ident
@offset=offset
@has_no_block=false
# @char=''
end
def [](regex) #is this used?
regex===ident
end
def ===(pattern)
pattern===@ident
end
def has_no_block!
@has_no_block=true
end
def has_no_block?
@has_no_block
end
def has_equals; /[a-z_0-9]=$/i===ident end
end
#-------------------------
class NewlineToken < Token
def initialize(nlstr="\n",offset=nil)
super(nlstr,offset)
#@char=''
end
def as; ';' end
def linecount; 1 end
def startline
@endline-1
end
def startline=bogus; end
end
#-------------------------
class StringToken < Token
attr :char
attr_accessor :modifiers #for regex only
attr_accessor :elems
attr_accessor :startline
attr_accessor :bs_handler
attr_accessor :open #exact sequence of chars used to start the str
attr_accessor :close #exact seq of (1) char to stop the str
attr_accessor :lvars #names used in named backrefs if this is a regex
def linecount; line-startline end
def utf8?
@utf8||=nil
end
def utf8!
@utf8=true
end
def with_line(line)
@endline=line
self
end
def line; @endline end
def line= l; @endline=l end
def initialize(type='"',ident='')
super(ident)
type=="'" and type='"'
@char=type
assert @char[/^[\[{"`\/]$/] #"
@elems=[ident.dup] #why .dup?
@modifiers=nil
@endline=nil
end
DQUOTE_ESCAPE_TABLE = [
["\n",'\n'],
["\r",'\r'],
["\t",'\t'],
["\v",'\v'],
["\f",'\f'],
["\e",'\e'],
["\b",'\b'],
["\a",'\a']
]
PREFIXERS={ '['=>"%w[", '{'=>'%W{' }
SUFFIXERS={ '['=>"]", '{'=>'}' }
def has_str_inc?
elems.size>1 or RubyCode===elems.first
end
def to_s transname=:transform
assert @char[/[\[{"`\/]/] #"
#on output, all single-quoted strings become double-quoted
assert(@elems.length==1) if @char=='['
result=open.dup
starter=result[-1,1]
ender=close
elems.each{|e|
case e
when String; result<<e
# strfrag=translate_escapes strfrag if RubyLexer::FASTER_STRING_ESCAPES
# result << send(transname,strfrag,starter,ender)
when VarNameToken;
if /^[$@]/===e.to_s
result << '#' + e.to_s
else
result << "\#{#{e}}"
end
when RubyCode; result << '#' + e.to_s
else fail
end
}
result << ender
if @char=='/'
result << modifiers if modifiers #regex only
result="%r"+result if RubyLexer::WHSPLF[result[1,1]]
end
return result
end
def to_term
result=[]
0.step(@elems.length-1,2) { |i|
result << ConstTerm.new(@elems[i].dup)
if e=@elems[i+1]
assert(e.kind_of?(RubyCode))
result << (RubyTerm.new e)
end
}
return result
end
def append(glob)
#assert @elems.last.kind_of?(String)
case glob
when String,Integer then append_str! glob
when RubyCode then append_code! glob
else raise "bad string contents: #{glob}, a #{glob.class}"
end
#assert @elems.last.kind_of?(String)
end
def append_token(strtok)
assert @elems.last.kind_of?(String)
#assert strtok.elems.last.kind_of?(String)
assert strtok.elems.first.kind_of?(String)
@elems.last << strtok.elems.shift
first=strtok.elems.first
assert( first.nil? || first.kind_of?(RubyCode) )
@elems += strtok.elems
@ident << strtok.ident
assert((!@modifiers or !strtok.modifiers))
@modifiers||=strtok.modifiers
#assert @elems.last.kind_of?(String)
@bs_handler ||=strtok.bs_handler
return self
end
def translate_escapes(str)
rl=RubyLexer.new("(string escape translation hack...)",'')
result=str.dup
seq=result.to_sequence
rl.instance_eval{@file=seq}
repls=[]
i=0
#ugly ugly ugly
while i<result.size and bs_at=result.index(/\\./m,i)
seq.pos=$~.end(0)-1
ch=rl.send(bs_handler,"\\",@open[-1,1],@close)
result[bs_at...seq.pos]=ch
i=bs_at+ch.size
end
return result
end
private
UNESC_DELIMS={}
#simpler transform, preserves original exactly
def simple_transform(strfrag,starter,ender) #appears to be unused
assert('[{/'[@char])
#strfrag.gsub!(/(\A|[^\\])(?:\\\\)*\#([{$@])/){$1+'\\#'+$2} unless @char=='[' #esc #{
delimchars=Regexp.quote starter+ender
delimchars+=Regexp.quote("#") unless @char=='[' #escape beginning of string iterpolations
#i think most or all of this method is useless now...
#escape curly brace in string interpolations (%W only)
strfrag.gsub!('#{', '#\\{') if @char=='{'
ckey=starter+ender
unesc_delim=
UNESC_DELIMS[ckey]||=
/(\A|[^\\](?:\\\\)*)([#{delimchars}]+)/
# /(\\)([^#{delimchars}#{RubyLexer::WHSPLF}]|\Z)/
#an even number (esp 0) of backslashes before delim becomes escaped delim
strfrag.gsub!(unesc_delim){
pre=$1; toesc=$2
pre+toesc.gsub(/(.)/){ "\\"+$1 }
}
#no need to double backslashes anymore... they should come pre-doubled
return strfrag
end
def transform(strfrag,starter,ender) #appears to be unused
strfrag.gsub!("\\",'\\'*4)
strfrag.gsub!(/#([{$@])/,'\\#\\1')
strfrag.gsub!(Regexp.new("[\\"+starter+"\\"+ender+"]"),'\\\\\\&') unless @char=='?'
DQUOTE_ESCAPE_TABLE.each {|pair|
strfrag.gsub!(*pair)
} unless @char=='/'
strfrag.gsub!(/[^ -~]/){|np| #nonprintables
"\\x"+sprintf('%02X',np[0])
}
#break up long lines (best done later?)
strfrag.gsub!(/(\\x[0-9A-F]{2}|\\?.){40}/i, "\\&\\\n")
return strfrag
end
def append_str!(str)
if @elems.last.kind_of?(String)
@elems.last << str
else
@elems << str
end
@ident << str
assert @elems.last.kind_of?(String)
end
def append_code!(code)
if @elems.last.kind_of?(String)
else
@elems.push ''
end
@elems.push code,''
@ident << "\#{#{code}}"
assert @elems.last.kind_of?(String)
end
end
#-------------------------
class RenderExactlyStringToken < StringToken
alias transform simple_transform
#transform isn't called anymore, so there's no need for this hacky class
end
#-------------------------
class HerePlaceholderToken < WToken
attr_reader :termex, :quote, :ender, :dash
attr_accessor :unsafe_to_use, :string
attr_accessor :bodyclass
attr_accessor :open, :close
def initialize(dash,quote,ender,quote_real=true)
@dash,@quote,@ender,@quote_real=dash,quote,ender,quote_real
@unsafe_to_use=true
@string=StringToken.new
#@termex=/^#{'[\s\v]*' if dash}#{Regexp.escape ender}$/
@termex=Regexp.new \
["^", ('[\s\v]*' if dash), Regexp.escape(ender), "$"].join
@bodyclass=HereBodyToken
end
def ===(bogus); false end
def to_s
# if @bodyclass==OutlinedHereBodyToken
result=if/[^a-z_0-9]/i===@ender
@ender.gsub(/[\\"]/, '\\\\'+'\\&')
else
@ender
end
return ["<<",@dash,@quote_real&&@quote,result,@quote_real&&@quote].join
# else
# assert !unsafe_to_use
# return @string.to_s
# end
end
def append s; @string.append s end
def append_token tok; @string.append_token tok end
#def with_line(line) @string.line=line; self end
def line; @line || @string.line end
def line=line; @line=line end
def startline; @line end
alias endline startline
def startline=x; end
alias endline= startline=
end
#-------------------------
module StillIgnoreToken
end
#-------------------------
class IgnoreToken < Token
include StillIgnoreToken
def initialize(ident,*stuff)
@linecount=ident.count "\n"
super
end
attr :linecount
end
#-------------------------
class WsToken < IgnoreToken
end
#-------------------------
class ZwToken < IgnoreToken
def initialize(offset)
super('',offset)
end
def explicit_form
abstract
end
def explicit_form_all; explicit_form end
end
#-------------------------
class NoWsToken < ZwToken
def explicit_form_all
"#nows#"
end
def explicit_form
nil
end
end
#-------------------------
class ShebangToken < IgnoreToken
def initialize(text)
super text,0
end
end
#-------------------------
class EncodingDeclToken < IgnoreToken
def initialize(text,encoding,offset)
text||=''
super text,offset
@encoding=encoding
end
attr :encoding
end
#-------------------------
class ImplicitParamListStartToken < KeywordToken
include StillIgnoreToken
def initialize(offset)
super("(",offset)
end
def to_s; '' end
def as; "(" end
end
#-------------------------
class ImplicitParamListEndToken < KeywordToken
include StillIgnoreToken
def initialize(offset)
super(")",offset)
end
def to_s; '' end
def as; ")" end
end
#-------------------------
class AssignmentRhsListStartToken < ZwToken
def explicit_form
'*['
end
end
#-------------------------
class AssignmentRhsListEndToken < ZwToken
def explicit_form
']'
end
end
#-------------------------
class KwParamListStartToken < ZwToken
def explicit_form_all
"#((#"
end
def explicit_form
nil
end
end
#-------------------------
class KwParamListEndToken < ZwToken
def explicit_form_all
"#))#"
end
def explicit_form
nil
end
end
#-------------------------
class EndHeaderToken < ZwToken
def as; ";" end
end
EndDefHeaderToken=EndHeaderToken
#-------------------------
class EscNlToken < IgnoreToken
def initialize(ident,offset,filename=nil,linenum=nil)
super(ident,offset)
#@char='\\'
@filename=filename
@linenum=linenum
end
attr_accessor :filename,:linenum
def linecount; 1 end
def startline
@linenum-1
end
def endline
@linenum
end
def startline= bogus; end
alias endline= linenum=
end
#-------------------------
class EoiToken < Token
attr :file
alias :pos :offset
def initialize(cause,file, offset=nil,line=nil)
super(cause,offset)
@file=file
@endline=line
end
end
#-------------------------
class HereBodyToken < IgnoreToken
#attr_accessor :ender
attr_accessor :open,:close
def initialize(headtok,linecount)
assert HerePlaceholderToken===headtok
@ident,@offset=headtok.string,headtok.string.offset
@headtok=headtok
@linecount=linecount
end
def line
@ident.line
end
alias endline line
def endline= line
@ident.line= line
end
def startline
line-@linecount+1
end
def to_s
@ident.to_s
end
attr :headtok
attr :linecount #num lines here body spans (including terminator)
end
#-------------------------
class FileAndLineToken < IgnoreToken
attr_accessor :line
def initialize(ident,line,offset=nil)
super ident,offset
#@char='#'
@line=line
end
#def char; '#' end
def to_s()
['#', @ident, ':', @line].to_s
end
def file() @ident end
def subitem() @line end #needed?
def endline; @line end
def startline; @line end
alias endline= line=
def startline= bogus; end
end
#-------------------------
class OutlinedHereBodyToken < HereBodyToken #appears to be unused
def to_s
assert HerePlaceholderToken===@headtok
result=@headtok.string
result=result.to_s(:simple_transform).match(/^"(.*)"$/m)[1]
return result +
@headtok.ender +
"\n"
end
end
#-------------------------
module ErrorToken
attr_accessor :error
end
#-------------------------
class SubitemToken < Token
attr :char2
attr :subitem
def initialize(ident,subitem)
super ident
@subitem=subitem
end
def to_s()
super+@char2+@subitem.to_s
end
end
#-------------------------
class DecoratorToken < SubitemToken
def initialize(ident,subitem)
super '^'+ident,subitem
@subitem=@subitem.to_s #why to_s?
#@char='^'
@char2='='
end
#alias to_s ident #parent has right implementation of to_s... i think
def needs_value?() @subitem.nil? end
def value=(v) @subitem=v end
def value() @subitem end
end
end
require "rubylexer/rubycode"
allow number tok to be declared a char literal, in which case it unparses as one
=begin
rubylexer - a ruby lexer written in ruby
Copyright (C) 2004,2005,2008 Caleb Clausen
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
=end
class RubyLexer
#-------------------------
class Token
attr_accessor :ident
def to_s
@ident || "<#{self.class.name}>"
end
attr_accessor :offset #file offset of start of this token
attr_accessor :as #if defined, a KeywordToken which this token stands in for.
attr_accessor :allow_ooo_offset #hack
attr_accessor :endline
def initialize(ident,offset=nil)
@ident=ident
@offset=offset
end
def error; end
def has_no_block?; false end
attr_accessor :tag
attr_writer :startline
def startline
return @startline if defined? @startline
return endline
end
def linecount; 0 end
end
#-------------------------
class WToken< Token
def ===(pattern)
assert @ident
pattern===@ident
end
end
#-------------------------
class KeywordToken < WToken #also some operators
def initialize(*args)
if Hash===args.last
opts=args.pop
as=opts.delete :as
fail unless opts.empty?
end
super(*args)
self.as=as
end
#-----------------------------------
def set_callsite!(x=true)
@callsite=x
end
#-----------------------------------
def callsite?
@callsite if defined? @callsite
end
attr_accessor :value
#-----------------------------------
def set_infix!
@infix=true
end
#-----------------------------------
def unary= flag
@infix=!flag
end
#-----------------------------------
def infix?
@infix ||= nil
end
def prefix?; !infix? end
alias unary prefix?
#-----------------------------------
def has_end!
@has_end=true
end
#-----------------------------------
def has_end?
self===RubyLexer::BEGINWORDS and @has_end||=nil
end
attr_accessor :ternary, :grouping
def has_no_block!
@has_no_block=true
end
def has_no_block?
@has_no_block if defined? @has_no_block
end
def infix
@infix if defined? @infix
end
end
#-------------------------
class OperatorToken < WToken
def initialize(*args)
@tag=nil
super
end
attr_writer :as
def unary= flag; @tag=:unary if flag end
def unary; @tag==:unary end
alias prefix? unary
def infix?; !prefix? end
def as
return @as if defined? @as
if tag and ident[/^[,*&]$/]
tag.to_s+ident
end
end
end
#-------------------------
module TokenPat
@@TokenPats={}
def token_pat #used in various case statements...
result=self.dup
@@TokenPats[self] ||=
(class <<result
alias old_3eq ===
def ===(token)
WToken===token and old_3eq(token.ident)
end
end;result)
end
end
class ::String; include TokenPat; end
class ::Regexp; include TokenPat; end
#-------------------------
class VarNameToken < WToken
attr_accessor :lvar_type
attr_accessor :in_def
end
#-------------------------
class NumberToken < Token
def to_s
if defined? @char_literal and @char_literal
'?'+@ident.chr
else
@ident.to_s
end
end
def negative; /\A-/ === to_s end
attr_accessor :char_literal
end
#-------------------------
class SymbolToken < Token
attr_accessor :open,:close
attr :raw
def initialize(ident,offset=nil,starter=':')
@raw=ident
str=ident.to_s
str[0,2]='' if /\A%s/===str
super starter+str, offset
@open=":"
@close=""
# @char=':'
end
def to_s
return @ident
=begin
raw=@raw.to_s
raw=raw[1...-1] if StringToken===@raw
@open+raw+@close
=end
end
end
#-------------------------
class MethNameToken < Token # < SymbolToken
def initialize(ident,offset=nil,bogus=nil)
@ident= (VarNameToken===ident)? ident.ident : ident
@offset=offset
@has_no_block=false
# @char=''
end
def [](regex) #is this used?
regex===ident
end
def ===(pattern)
pattern===@ident
end
def has_no_block!
@has_no_block=true
end
def has_no_block?
@has_no_block
end
def has_equals; /[a-z_0-9]=$/i===ident end
end
#-------------------------
class NewlineToken < Token
def initialize(nlstr="\n",offset=nil)
super(nlstr,offset)
#@char=''
end
def as; ';' end
def linecount; 1 end
def startline
@endline-1
end
def startline=bogus; end
end
#-------------------------
class StringToken < Token
attr :char
attr_accessor :modifiers #for regex only
attr_accessor :elems
attr_accessor :startline
attr_accessor :bs_handler
attr_accessor :open #exact sequence of chars used to start the str
attr_accessor :close #exact seq of (1) char to stop the str
attr_accessor :lvars #names used in named backrefs if this is a regex
def linecount; line-startline end
def utf8?
@utf8||=nil
end
def utf8!
@utf8=true
end
def with_line(line)
@endline=line
self
end
def line; @endline end
def line= l; @endline=l end
def initialize(type='"',ident='')
super(ident)
type=="'" and type='"'
@char=type
assert @char[/^[\[{"`\/]$/] #"
@elems=[ident.dup] #why .dup?
@modifiers=nil
@endline=nil
end
DQUOTE_ESCAPE_TABLE = [
["\n",'\n'],
["\r",'\r'],
["\t",'\t'],
["\v",'\v'],
["\f",'\f'],
["\e",'\e'],
["\b",'\b'],
["\a",'\a']
]
PREFIXERS={ '['=>"%w[", '{'=>'%W{' }
SUFFIXERS={ '['=>"]", '{'=>'}' }
def has_str_inc?
elems.size>1 or RubyCode===elems.first
end
def to_s transname=:transform
assert @char[/[\[{"`\/]/] #"
#on output, all single-quoted strings become double-quoted
assert(@elems.length==1) if @char=='['
result=open.dup
starter=result[-1,1]
ender=close
elems.each{|e|
case e
when String; result<<e
# strfrag=translate_escapes strfrag if RubyLexer::FASTER_STRING_ESCAPES
# result << send(transname,strfrag,starter,ender)
when VarNameToken;
if /^[$@]/===e.to_s
result << '#' + e.to_s
else
result << "\#{#{e}}"
end
when RubyCode; result << '#' + e.to_s
else fail
end
}
result << ender
if @char=='/'
result << modifiers if modifiers #regex only
result="%r"+result if RubyLexer::WHSPLF[result[1,1]]
end
return result
end
def to_term
result=[]
0.step(@elems.length-1,2) { |i|
result << ConstTerm.new(@elems[i].dup)
if e=@elems[i+1]
assert(e.kind_of?(RubyCode))
result << (RubyTerm.new e)
end
}
return result
end
def append(glob)
#assert @elems.last.kind_of?(String)
case glob
when String,Integer then append_str! glob
when RubyCode then append_code! glob
else raise "bad string contents: #{glob}, a #{glob.class}"
end
#assert @elems.last.kind_of?(String)
end
def append_token(strtok)
assert @elems.last.kind_of?(String)
#assert strtok.elems.last.kind_of?(String)
assert strtok.elems.first.kind_of?(String)
@elems.last << strtok.elems.shift
first=strtok.elems.first
assert( first.nil? || first.kind_of?(RubyCode) )
@elems += strtok.elems
@ident << strtok.ident
assert((!@modifiers or !strtok.modifiers))
@modifiers||=strtok.modifiers
#assert @elems.last.kind_of?(String)
@bs_handler ||=strtok.bs_handler
return self
end
def translate_escapes(str)
rl=RubyLexer.new("(string escape translation hack...)",'')
result=str.dup
seq=result.to_sequence
rl.instance_eval{@file=seq}
repls=[]
i=0
#ugly ugly ugly
while i<result.size and bs_at=result.index(/\\./m,i)
seq.pos=$~.end(0)-1
ch=rl.send(bs_handler,"\\",@open[-1,1],@close)
result[bs_at...seq.pos]=ch
i=bs_at+ch.size
end
return result
end
private
UNESC_DELIMS={}
#simpler transform, preserves original exactly
def simple_transform(strfrag,starter,ender) #appears to be unused
assert('[{/'[@char])
#strfrag.gsub!(/(\A|[^\\])(?:\\\\)*\#([{$@])/){$1+'\\#'+$2} unless @char=='[' #esc #{
delimchars=Regexp.quote starter+ender
delimchars+=Regexp.quote("#") unless @char=='[' #escape beginning of string iterpolations
#i think most or all of this method is useless now...
#escape curly brace in string interpolations (%W only)
strfrag.gsub!('#{', '#\\{') if @char=='{'
ckey=starter+ender
unesc_delim=
UNESC_DELIMS[ckey]||=
/(\A|[^\\](?:\\\\)*)([#{delimchars}]+)/
# /(\\)([^#{delimchars}#{RubyLexer::WHSPLF}]|\Z)/
#an even number (esp 0) of backslashes before delim becomes escaped delim
strfrag.gsub!(unesc_delim){
pre=$1; toesc=$2
pre+toesc.gsub(/(.)/){ "\\"+$1 }
}
#no need to double backslashes anymore... they should come pre-doubled
return strfrag
end
def transform(strfrag,starter,ender) #appears to be unused
strfrag.gsub!("\\",'\\'*4)
strfrag.gsub!(/#([{$@])/,'\\#\\1')
strfrag.gsub!(Regexp.new("[\\"+starter+"\\"+ender+"]"),'\\\\\\&') unless @char=='?'
DQUOTE_ESCAPE_TABLE.each {|pair|
strfrag.gsub!(*pair)
} unless @char=='/'
strfrag.gsub!(/[^ -~]/){|np| #nonprintables
"\\x"+sprintf('%02X',np[0])
}
#break up long lines (best done later?)
strfrag.gsub!(/(\\x[0-9A-F]{2}|\\?.){40}/i, "\\&\\\n")
return strfrag
end
def append_str!(str)
if @elems.last.kind_of?(String)
@elems.last << str
else
@elems << str
end
@ident << str
assert @elems.last.kind_of?(String)
end
def append_code!(code)
if @elems.last.kind_of?(String)
else
@elems.push ''
end
@elems.push code,''
@ident << "\#{#{code}}"
assert @elems.last.kind_of?(String)
end
end
#-------------------------
class RenderExactlyStringToken < StringToken
alias transform simple_transform
#transform isn't called anymore, so there's no need for this hacky class
end
#-------------------------
class HerePlaceholderToken < WToken
attr_reader :termex, :quote, :ender, :dash
attr_accessor :unsafe_to_use, :string
attr_accessor :bodyclass
attr_accessor :open, :close
def initialize(dash,quote,ender,quote_real=true)
@dash,@quote,@ender,@quote_real=dash,quote,ender,quote_real
@unsafe_to_use=true
@string=StringToken.new
#@termex=/^#{'[\s\v]*' if dash}#{Regexp.escape ender}$/
@termex=Regexp.new \
["^", ('[\s\v]*' if dash), Regexp.escape(ender), "$"].join
@bodyclass=HereBodyToken
end
def ===(bogus); false end
def to_s
# if @bodyclass==OutlinedHereBodyToken
result=if/[^a-z_0-9]/i===@ender
@ender.gsub(/[\\"]/, '\\\\'+'\\&')
else
@ender
end
return ["<<",@dash,@quote_real&&@quote,result,@quote_real&&@quote].join
# else
# assert !unsafe_to_use
# return @string.to_s
# end
end
def append s; @string.append s end
def append_token tok; @string.append_token tok end
#def with_line(line) @string.line=line; self end
def line; @line || @string.line end
def line=line; @line=line end
def startline; @line end
alias endline startline
def startline=x; end
alias endline= startline=
end
#-------------------------
module StillIgnoreToken
end
#-------------------------
class IgnoreToken < Token
include StillIgnoreToken
def initialize(ident,*stuff)
@linecount=ident.count "\n"
super
end
attr :linecount
end
#-------------------------
class WsToken < IgnoreToken
end
#-------------------------
class ZwToken < IgnoreToken
def initialize(offset)
super('',offset)
end
def explicit_form
abstract
end
def explicit_form_all; explicit_form end
end
#-------------------------
class NoWsToken < ZwToken
def explicit_form_all
"#nows#"
end
def explicit_form
nil
end
end
#-------------------------
class ShebangToken < IgnoreToken
def initialize(text)
super text,0
end
end
#-------------------------
class EncodingDeclToken < IgnoreToken
def initialize(text,encoding,offset)
text||=''
super text,offset
@encoding=encoding
end
attr :encoding
end
#-------------------------
class ImplicitParamListStartToken < KeywordToken
include StillIgnoreToken
def initialize(offset)
super("(",offset)
end
def to_s; '' end
def as; "(" end
end
#-------------------------
class ImplicitParamListEndToken < KeywordToken
include StillIgnoreToken
def initialize(offset)
super(")",offset)
end
def to_s; '' end
def as; ")" end
end
#-------------------------
class AssignmentRhsListStartToken < ZwToken
def explicit_form
'*['
end
end
#-------------------------
class AssignmentRhsListEndToken < ZwToken
def explicit_form
']'
end
end
#-------------------------
class KwParamListStartToken < ZwToken
def explicit_form_all
"#((#"
end
def explicit_form
nil
end
end
#-------------------------
class KwParamListEndToken < ZwToken
def explicit_form_all
"#))#"
end
def explicit_form
nil
end
end
#-------------------------
class EndHeaderToken < ZwToken
def as; ";" end
end
EndDefHeaderToken=EndHeaderToken
#-------------------------
class EscNlToken < IgnoreToken
def initialize(ident,offset,filename=nil,linenum=nil)
super(ident,offset)
#@char='\\'
@filename=filename
@linenum=linenum
end
attr_accessor :filename,:linenum
def linecount; 1 end
def startline
@linenum-1
end
def endline
@linenum
end
def startline= bogus; end
alias endline= linenum=
end
#-------------------------
class EoiToken < Token
attr :file
alias :pos :offset
def initialize(cause,file, offset=nil,line=nil)
super(cause,offset)
@file=file
@endline=line
end
end
#-------------------------
class HereBodyToken < IgnoreToken
#attr_accessor :ender
attr_accessor :open,:close
def initialize(headtok,linecount)
assert HerePlaceholderToken===headtok
@ident,@offset=headtok.string,headtok.string.offset
@headtok=headtok
@linecount=linecount
end
def line
@ident.line
end
alias endline line
def endline= line
@ident.line= line
end
def startline
line-@linecount+1
end
def to_s
@ident.to_s
end
attr :headtok
attr :linecount #num lines here body spans (including terminator)
end
#-------------------------
class FileAndLineToken < IgnoreToken
attr_accessor :line
def initialize(ident,line,offset=nil)
super ident,offset
#@char='#'
@line=line
end
#def char; '#' end
def to_s()
['#', @ident, ':', @line].to_s
end
def file() @ident end
def subitem() @line end #needed?
def endline; @line end
def startline; @line end
alias endline= line=
def startline= bogus; end
end
#-------------------------
class OutlinedHereBodyToken < HereBodyToken #appears to be unused
def to_s
assert HerePlaceholderToken===@headtok
result=@headtok.string
result=result.to_s(:simple_transform).match(/^"(.*)"$/m)[1]
return result +
@headtok.ender +
"\n"
end
end
#-------------------------
module ErrorToken
attr_accessor :error
end
#-------------------------
class SubitemToken < Token
attr :char2
attr :subitem
def initialize(ident,subitem)
super ident
@subitem=subitem
end
def to_s()
super+@char2+@subitem.to_s
end
end
#-------------------------
class DecoratorToken < SubitemToken
def initialize(ident,subitem)
super '^'+ident,subitem
@subitem=@subitem.to_s #why to_s?
#@char='^'
@char2='='
end
#alias to_s ident #parent has right implementation of to_s... i think
def needs_value?() @subitem.nil? end
def value=(v) @subitem=v end
def value() @subitem end
end
end
require "rubylexer/rubycode"
|
require 'rubylisp/types'
module RubyLisp
class Reader
# from kanaka/mal
TOKEN_REGEX = %r{
# ignore whitespace and commas
[\s,]*
# match any of...
(
# the splice-unquote reader macro
~@|
# special characters
[\[\]{}()'`~^@]|
# strings
"(?:\\.|[^\\"])*"|
# comments
;.*|
# any sequence of non-special characters
# e.g. symbols, numbers, keywords, booleans, etc.
[^\s\[\]{}('"`,;)]*
)
}x
attr_accessor :tokens, :position
def peek
@tokens[@position]
end
def next_token
token = peek
@position += 1
token
end
def read_seq(type, end_token, seq=[])
case peek
when nil
raise RubyLisp::ParseError, "Unexpected EOF while parsing #{type}."
when end_token
if type == RubyLisp::HashMap
if seq.size.odd?
raise RubyLisp::ParseError,
"A RubyLisp::HashMap must contain an even number of forms."
else
next_token
hashmap = seq.each_slice(2).to_a.to_h
type.new(hashmap)
end
else
next_token
type.new(seq)
end
else
seq << read_form
read_seq(type, end_token, seq)
end
end
def read_list
read_seq RubyLisp::List, ')'
end
def read_vector
read_seq RubyLisp::Vector, ']'
end
def read_hashmap
read_seq RubyLisp::HashMap, '}'
end
def read_atom
token = next_token
case token
when nil
nil
when /^\-?\d+$/
RubyLisp::Int.new(token.to_i)
when /^".*"$/
# it's safe to use eval here because the tokenizer ensures that
# the token is an escaped string representation
RubyLisp::String.new(eval(token))
# it's a little weird that an unfinished string (e.g. "abc) gets
# tokenized as "", but at least the behavior is consistent ¯\_(ツ)_/¯
when ""
raise RubyLisp::ParseError,
"Unexpected EOF while parsing RubyLisp::String."
when /^:/
RubyLisp::Keyword.new(token[1..-1].to_sym)
when 'nil'
RubyLisp::Nil.new
when 'true'
RubyLisp::Boolean.new(true)
when 'false'
RubyLisp::Boolean.new(false)
else
RubyLisp::Symbol.new(token)
end
end
def read_quoted_form
form = read_form
unless form
raise RubyLisp::ParseError,
"Unexpected EOF while parsing quoted form."
end
RubyLisp::List.new([RubyLisp::Symbol.new("quote"), form])
end
def read_quasiquoted_form
form = read_form
unless form
raise RubyLisp::ParseError,
"Unexpected EOF while parsing quasiquoted form."
end
RubyLisp::List.new([RubyLisp::Symbol.new("quasiquote"), form])
end
def read_unquoted_form
form = read_form
unless form
raise RubyLisp::ParseError,
"Unexpected EOF while parsing unquoted form."
end
RubyLisp::List.new([RubyLisp::Symbol.new("unquote"), form])
end
def read_splice_unquoted_form
form = read_form
unless form
raise RubyLisp::ParseError,
"Unexpected EOF while parsing splice-unquoted form."
end
RubyLisp::List.new([RubyLisp::Symbol.new("splice-unquote"), form])
end
def read_form
case peek
when /^;/
# ignore comments
next_token
read_form
when '('
next_token
read_list
when '['
next_token
read_vector
when '{'
next_token
read_hashmap
when ')'
raise RubyLisp::ParseError, "Unexpected ')'."
when ']'
raise RubyLisp::ParseError, "Unexpected ']'."
when '}'
raise RubyLisp::ParseError, "Unexpected '}'."
when "'"
next_token
read_quoted_form
when "`"
next_token
read_quasiquoted_form
when "~"
next_token
read_unquoted_form
when "~@"
next_token
read_splice_unquoted_form
else
read_atom
end
end
def tokenize str
@tokens = str.scan(TOKEN_REGEX).flatten[0...-1]
@position = 0
end
def Reader.read_str str
reader = Reader.new
reader.tokenize(str)
reader.read_form
end
end
end
i prefer single quotes
require 'rubylisp/types'
module RubyLisp
class Reader
# from kanaka/mal
TOKEN_REGEX = %r{
# ignore whitespace and commas
[\s,]*
# match any of...
(
# the splice-unquote reader macro
~@|
# special characters
[\[\]{}()'`~^@]|
# strings
"(?:\\.|[^\\"])*"|
# comments
;.*|
# any sequence of non-special characters
# e.g. symbols, numbers, keywords, booleans, etc.
[^\s\[\]{}('"`,;)]*
)
}x
attr_accessor :tokens, :position
def peek
@tokens[@position]
end
def next_token
token = peek
@position += 1
token
end
def read_seq(type, end_token, seq=[])
case peek
when nil
raise RubyLisp::ParseError, "Unexpected EOF while parsing #{type}."
when end_token
if type == RubyLisp::HashMap
if seq.size.odd?
raise RubyLisp::ParseError,
"A RubyLisp::HashMap must contain an even number of forms."
else
next_token
hashmap = seq.each_slice(2).to_a.to_h
type.new(hashmap)
end
else
next_token
type.new(seq)
end
else
seq << read_form
read_seq(type, end_token, seq)
end
end
def read_list
read_seq RubyLisp::List, ')'
end
def read_vector
read_seq RubyLisp::Vector, ']'
end
def read_hashmap
read_seq RubyLisp::HashMap, '}'
end
def read_atom
token = next_token
case token
when nil
nil
when /^\-?\d+$/
RubyLisp::Int.new(token.to_i)
when /^".*"$/
# it's safe to use eval here because the tokenizer ensures that
# the token is an escaped string representation
RubyLisp::String.new(eval(token))
# it's a little weird that an unfinished string (e.g. "abc) gets
# tokenized as "", but at least the behavior is consistent ¯\_(ツ)_/¯
when ""
raise RubyLisp::ParseError,
"Unexpected EOF while parsing RubyLisp::String."
when /^:/
RubyLisp::Keyword.new(token[1..-1].to_sym)
when 'nil'
RubyLisp::Nil.new
when 'true'
RubyLisp::Boolean.new(true)
when 'false'
RubyLisp::Boolean.new(false)
else
RubyLisp::Symbol.new(token)
end
end
def read_quoted_form
form = read_form
unless form
raise RubyLisp::ParseError,
"Unexpected EOF while parsing quoted form."
end
RubyLisp::List.new([RubyLisp::Symbol.new("quote"), form])
end
def read_quasiquoted_form
form = read_form
unless form
raise RubyLisp::ParseError,
"Unexpected EOF while parsing quasiquoted form."
end
RubyLisp::List.new([RubyLisp::Symbol.new("quasiquote"), form])
end
def read_unquoted_form
form = read_form
unless form
raise RubyLisp::ParseError,
"Unexpected EOF while parsing unquoted form."
end
RubyLisp::List.new([RubyLisp::Symbol.new("unquote"), form])
end
def read_splice_unquoted_form
form = read_form
unless form
raise RubyLisp::ParseError,
"Unexpected EOF while parsing splice-unquoted form."
end
RubyLisp::List.new([RubyLisp::Symbol.new("splice-unquote"), form])
end
def read_form
case peek
when /^;/
# ignore comments
next_token
read_form
when '('
next_token
read_list
when '['
next_token
read_vector
when '{'
next_token
read_hashmap
when ')'
raise RubyLisp::ParseError, "Unexpected ')'."
when ']'
raise RubyLisp::ParseError, "Unexpected ']'."
when '}'
raise RubyLisp::ParseError, "Unexpected '}'."
when "'"
next_token
read_quoted_form
when '`'
next_token
read_quasiquoted_form
when '~'
next_token
read_unquoted_form
when '~@'
next_token
read_splice_unquoted_form
else
read_atom
end
end
def tokenize str
@tokens = str.scan(TOKEN_REGEX).flatten[0...-1]
@position = 0
end
def Reader.read_str str
reader = Reader.new
reader.tokenize(str)
reader.read_form
end
end
end
|
require 'formula'
class Cairo < Formula
homepage 'http://cairographics.org/'
url "http://cairographics.org/releases/cairo-1.14.0.tar.xz"
mirror "http://www.mirrorservice.org/sites/ftp.netbsd.org/pub/pkgsrc/distfiles/cairo-1.14.0.tar.xz"
sha256 "2cf5f81432e77ea4359af9dcd0f4faf37d015934501391c311bfd2d19a0134b7"
bottle do
root_url 'https://juliabottles.s3.amazonaws.com'
cellar :any
end
option :universal
depends_on 'staticfloat/juliadeps/pkg-config' => :build
depends_on 'freetype'
depends_on 'fontconfig'
depends_on 'libpng'
depends_on 'pixman'
depends_on 'staticfloat/juliadeps/glib'
depends_on 'xz'=> :build
env :std if build.universal?
def install
ENV.universal_binary if build.universal?
# We always build without x
args = %W[
--disable-dependency-tracking
--prefix=#{prefix}
--enable-xlib=no
--enable-xlib-xrender=no
--enable-quartz-image=yes
--enable-gobject=yes
--enable-svg=yes
]
system "./configure", *args
system "make install"
end
end
Bottle cairo
require 'formula'
class Cairo < Formula
homepage 'http://cairographics.org/'
url "http://cairographics.org/releases/cairo-1.14.0.tar.xz"
mirror "http://www.mirrorservice.org/sites/ftp.netbsd.org/pub/pkgsrc/distfiles/cairo-1.14.0.tar.xz"
sha256 "2cf5f81432e77ea4359af9dcd0f4faf37d015934501391c311bfd2d19a0134b7"
bottle do
root_url 'https://juliabottles.s3.amazonaws.com'
cellar :any
sha1 "ccdbd258f4d9bdd9b7151b793e18290dde673c6d" => :yosemite
sha1 "c17c00dda78731dd4293e4c7f7fcc1f3d3384997" => :mavericks
sha1 "cbf83223fabea2a31896d5172020d41134e03dc1" => :mountain_lion
end
option :universal
depends_on 'staticfloat/juliadeps/pkg-config' => :build
depends_on 'freetype'
depends_on 'fontconfig'
depends_on 'libpng'
depends_on 'pixman'
depends_on 'staticfloat/juliadeps/glib'
depends_on 'xz'=> :build
env :std if build.universal?
def install
ENV.universal_binary if build.universal?
# We always build without x
args = %W[
--disable-dependency-tracking
--prefix=#{prefix}
--enable-xlib=no
--enable-xlib-xrender=no
--enable-quartz-image=yes
--enable-gobject=yes
--enable-svg=yes
]
system "./configure", *args
system "make install"
end
end
|
#encoding:utf-8
require 'scene/base'
require 'scene/play'
require 'scene/autoload_view'
# 选曲场景。
module Scene
class SongList < Base
include Taiko
include AutoloadView
@@index = 0
# 当前曲名
def songdata(offset = 0)
@songlist[songlist_index(offset)]
end
private
# 开始处理
def start
Graphics.resize_screen(480, 272)
make_song_list
super
play_demo
end
# 结束处理
def terminate
super
Graphics.resize_screen(544, 416)
Audio.bgm_stop
end
# 更新
def update
super
update_index
update_course(true) if Input.trigger?(:RIGHT)
update_course(false) if Input.trigger?(:LEFT)
update_scene unless scene_changing?
end
def songlist_index(offset = 0)
(@@index + offset) % @songlist.size
end
def make_song_list
@songlist = Dir.glob("#{DIRECTORY}/**/*#{EXTNAME}").map do |name|
SongData.new name.chomp(EXTNAME)
end
if @songlist.empty?
raise "There is no tja file in the folder `#{DIRECTORY}'!"
end
end
# 更新光标
def update_index
last_index = @@index
if Input.repeat?(:UP)
@@index -= 1
elsif Input.repeat?(:DOWN)
@@index += 1
end
if last_index != @@index
Audio.se_play 'Audio/SE/dong'
play_demo
end
end
# 更新难度。to_next: 是否向下一难度
def update_course(to_next)
new_data = to_next ? songdata.next_course : songdata.prev_course
if new_data
@songlist[songlist_index] = new_data
Audio.se_play 'Audio/SE/dong'
end
end
# 播放预览
def play_demo
Audio.bgm_stop
Audio.bgm_play songdata.wave, songdata.songvol, 100
end
def update_scene
if Input.trigger?(:C)
Audio.se_play 'Audio/SE/dong'
Taiko.setup songdata
fadeout_all(120)
Scene.call(Play)
end
end
end
end
scene/song_list.rb: save index to file now
#encoding:utf-8
require 'scene/base'
require 'scene/play'
require 'scene/autoload_view'
# 选曲场景。
module Scene
class SongList < Base
include Taiko
include AutoloadView
INDEX_FILENAME = 'Data/SONGLIST_INDEX'
# 当前曲名
def songdata(offset = 0)
@songlist[songlist_index(offset)]
end
private
# 开始处理
def start
Graphics.resize_screen(480, 272)
make_song_list
load_index
super
play_demo
end
# 结束处理
def terminate
super
Graphics.resize_screen(544, 416)
Audio.bgm_stop
end
# 更新
def update
super
update_index
update_course(1) if Input.trigger?(:RIGHT)
update_course(-1) if Input.trigger?(:LEFT)
update_scene unless scene_changing?
end
# 读取上次的曲子位置
def load_index
if File.exist?(INDEX_FILENAME)
@index, @courses = load_data(INDEX_FILENAME)
else
@index = 0
@courses = Hash.new(0)
end
@courses.each do |song_index, course|
select_course(song_index, course)
end
end
# 保存上次的曲子位置
def save_index
save_data([@index, @courses], INDEX_FILENAME)
end
# 获取取余后的索引
def songlist_index(offset = 0)
(@index + offset) % @songlist.size
end
def make_song_list
@songlist = Dir.glob("#{DIRECTORY}/**/*#{EXTNAME}").map do |name|
SongData.new name.chomp(EXTNAME)
end
if @songlist.empty?
raise "There is no tja file in the folder `#{DIRECTORY}'!"
end
end
# 更新光标
def update_index
last_index = @index
if Input.repeat?(:UP)
@index -= 1
@index %= @songlist.size
elsif Input.repeat?(:DOWN)
@index += 1
@index %= @songlist.size
end
if last_index != @index
Audio.se_play 'Audio/SE/dong'
play_demo
save_index
end
end
# 更新难度
def update_course(course_diff)
if select_course(@index, course_diff)
@courses[@index] += course_diff
save_index
Audio.se_play 'Audio/SE/dong'
end
end
# 更改难度。song_index: 歌曲编号。course_diff: 更改的量。
# 未发生更改时返回伪值。
def select_course(song_index, course_diff)
last_data = data = @songlist[song_index]
to_next = course_diff > 0
course_diff.abs.times do
data = (to_next ? data.next_course : data.prev_course) || data
end
@songlist[song_index] = data
!last_data.equal?(data)
end
# 播放预览
def play_demo
Audio.bgm_stop
Audio.bgm_play songdata.wave, songdata.songvol, 100
end
def update_scene
if Input.trigger?(:C)
Audio.se_play 'Audio/SE/dong'
Taiko.setup songdata
fadeout_all(120)
Scene.call(Play)
end
end
end
end |
$: << File.join(File.dirname(__FILE__), '..', '..', 'vendor', 'feed_me', 'lib')
$: << File.join(File.dirname(__FILE__), '..', '..', 'vendor', 'mechanical_github', 'lib')
require 'rubygems'
require 'open-uri'
require 'dm-core'
require 'feed_me'
require 'mechanical_github'
require 'set'
module Seinfeld
# Some of this is destined to be broken out into modules when support for
# more services than just github is added.
class User
class << self
attr_accessor :github_login
attr_accessor :github_password
end
include DataMapper::Resource
property :id, Integer, :serial => true
property :login, String, :unique => true
property :email, String
property :last_entry_id, String
property :current_streak, Integer, :default => 0, :index => true
property :longest_streak, Integer, :default => 0, :index => true
property :streak_start, Date
property :streak_end, Date
has n, :progressions, :class_name => "Seinfeld::Progression", :order => [:created_at.desc]
def self.paginated_each(&block)
max_id = 0
while batch = next_batch(max_id)
batch.each(&block)
max_id = batch.map { |u| u.id }.max
end
end
def self.next_batch(id)
batch = all :order => [:id], :limit => 15, :id.gt => id
batch.size.zero? ? nil : batch
end
def update_progress
transaction do
days = committed_days_in_feed
save
unless days.empty?
existing = progressions(:created_at => days).map { |p| p.created_at }
streaks = [current_streak = Streak.new(streak_start, streak_end)]
days = days - existing
days.sort!
days.each do |day|
if current_streak.current?(day)
current_streak.ended = day
else
streaks << (current_streak = Streak.new(day))
end
progressions.create(:created_at => day)
end
highest_streak = streaks.empty? ? 0 : streaks.max { |a, b| a.days <=> b.days }.days
latest_streak = streaks.last
self.streak_start = latest_streak.started if latest_streak
self.streak_end = latest_streak.ended if latest_streak
self.current_streak = latest_streak.days if latest_streak && latest_streak.current?
self.longest_streak = highest_streak if highest_streak > longest_streak.to_i
save
end
end
end
def committed_days_in_feed(page = 1)
feed = get_feed(page)
return [] if feed.nil?
entry_id = nil # track the first entry id to store in the user model
skipped_early = nil
return [] if feed.entries.empty?
days = feed.entries.inject({}) do |selected, entry|
this_entry_id = entry.item_id
entry_id ||= this_entry_id
if last_entry_id == this_entry_id
skipped_early = true
break selected
end
if entry.title =~ %r{^#{login} committed}
updated = entry.updated_at
date = Date.civil(updated.year, updated.month, updated.day)
selected.update date => nil
else
selected
end
end.keys
self.last_entry_id = entry_id if page == 1
unless skipped_early
days += committed_days_in_feed(page + 1)
days.uniq!
end
days
end
def progress_for(year, month)
start = Date.new(year, month)
Set.new progressions(:created_at => start..((start >> 1) - 1)).map { |p| Date.new(p.created_at.year, p.created_at.month, p.created_at.day) }
end
def self.process_new_github_user(subject)
login_name = subject.scan(/([\w\_\-]+) sent you a message/).first.to_s
return if login_name.size.zero?
if user = first(:login => login_name)
if github_login && github_password
session = MechanicalGitHub::Session.new
session.login github_login, github_password
session.send_message login_name, "[CAN] You've already registered!", "Thanks for your enthusiasm, but you've already registered for a Calendar About Nothing: http://calendaraboutnothing.com/~#{user.login}."
end
nil
else
user = new(:login => login_name)
yield user if block_given?
user.update_progress
end
end
private
def get_feed(page = 1)
feed = nil
open("http://github.com/#{login}.atom?page=#{page}") { |f| feed = FeedMe.parse(f.read) }
feed
rescue
nil
end
end
class Progression
include DataMapper::Resource
property :id, Integer, :serial => true
property :created_at, Date
belongs_to :user, :class_name => "Seinfeld::User"
end
class Streak
attr_accessor :started, :ended
def initialize(started = nil, ended = nil)
@started = started
@ended = ended || started
end
def days
if @started && @ended
1 + (@ended - @started).to_i.abs
else
0
end
end
def current?(date = Date.today)
@ended && (@ended + 1) >= date
end
def include?(date)
if @started && @ended
@started <= date && @ended >= date
else
false
end
end
def inspect
%(#{@started ? ("#{@started.year}-#{@started.month}-#{@started.day}") : :nil}..#{@ended ? ("#{@ended.year}-#{@ended.month}-#{@ended.day}") : :nil}:Streak)
end
end
end
different approach to recursion in #committed_days_in_feed
$: << File.join(File.dirname(__FILE__), '..', '..', 'vendor', 'feed_me', 'lib')
$: << File.join(File.dirname(__FILE__), '..', '..', 'vendor', 'mechanical_github', 'lib')
require 'rubygems'
require 'open-uri'
require 'dm-core'
require 'feed_me'
require 'mechanical_github'
require 'set'
module Seinfeld
# Some of this is destined to be broken out into modules when support for
# more services than just github is added.
class User
class << self
attr_accessor :github_login
attr_accessor :github_password
end
include DataMapper::Resource
property :id, Integer, :serial => true
property :login, String, :unique => true
property :email, String
property :last_entry_id, String
property :current_streak, Integer, :default => 0, :index => true
property :longest_streak, Integer, :default => 0, :index => true
property :streak_start, Date
property :streak_end, Date
has n, :progressions, :class_name => "Seinfeld::Progression", :order => [:created_at.desc]
def self.paginated_each(&block)
max_id = 0
while batch = next_batch(max_id)
batch.each(&block)
max_id = batch.map { |u| u.id }.max
end
end
def self.next_batch(id)
batch = all :order => [:id], :limit => 15, :id.gt => id
batch.size.zero? ? nil : batch
end
def update_progress
transaction do
days = committed_days_in_feed || []
save
unless days.empty?
existing = progressions(:created_at => days).map { |p| p.created_at }
streaks = [current_streak = Streak.new(streak_start, streak_end)]
days = days - existing
days.sort!
days.each do |day|
if current_streak.current?(day)
current_streak.ended = day
else
streaks << (current_streak = Streak.new(day))
end
progressions.create(:created_at => day)
end
highest_streak = streaks.empty? ? 0 : streaks.max { |a, b| a.days <=> b.days }.days
latest_streak = streaks.last
self.streak_start = latest_streak.started if latest_streak
self.streak_end = latest_streak.ended if latest_streak
self.current_streak = latest_streak.days if latest_streak && latest_streak.current?
self.longest_streak = highest_streak if highest_streak > longest_streak.to_i
save
end
end
end
def committed_days_in_feed(page = 1)
feed = get_feed(page)
return nil if feed.nil?
entry_id = nil # track the first entry id to store in the user model
skipped_early = nil
return nil if feed.entries.empty?
days = feed.entries.inject({}) do |selected, entry|
this_entry_id = entry.item_id
entry_id ||= this_entry_id
if last_entry_id == this_entry_id
skipped_early = true
break selected
end
if entry.title =~ %r{^#{login} committed}
updated = entry.updated_at
date = Date.civil(updated.year, updated.month, updated.day)
selected.update date => nil
else
selected
end
end.keys
if page == 1
self.last_entry_id = entry_id
unless skipped_early
while paged_days = committed_days_in_feed(page += 1)
days += paged_days
end
days.uniq!
end
end
days
end
def progress_for(year, month)
start = Date.new(year, month)
Set.new progressions(:created_at => start..((start >> 1) - 1)).map { |p| Date.new(p.created_at.year, p.created_at.month, p.created_at.day) }
end
def self.process_new_github_user(subject)
login_name = subject.scan(/([\w\_\-]+) sent you a message/).first.to_s
return if login_name.size.zero?
if user = first(:login => login_name)
if github_login && github_password
session = MechanicalGitHub::Session.new
session.login github_login, github_password
session.send_message login_name, "[CAN] You've already registered!", "Thanks for your enthusiasm, but you've already registered for a Calendar About Nothing: http://calendaraboutnothing.com/~#{user.login}."
end
nil
else
user = new(:login => login_name)
yield user if block_given?
user.update_progress
end
end
private
def get_feed(page = 1)
feed = nil
open("http://github.com/#{login}.atom?page=#{page}") { |f| feed = FeedMe.parse(f.read) }
feed
rescue
nil
end
end
class Progression
include DataMapper::Resource
property :id, Integer, :serial => true
property :created_at, Date
belongs_to :user, :class_name => "Seinfeld::User"
end
class Streak
attr_accessor :started, :ended
def initialize(started = nil, ended = nil)
@started = started
@ended = ended || started
end
def days
if @started && @ended
1 + (@ended - @started).to_i.abs
else
0
end
end
def current?(date = Date.today)
@ended && (@ended + 1) >= date
end
def include?(date)
if @started && @ended
@started <= date && @ended >= date
else
false
end
end
def inspect
%(#{@started ? ("#{@started.year}-#{@started.month}-#{@started.day}") : :nil}..#{@ended ? ("#{@ended.year}-#{@ended.month}-#{@ended.day}") : :nil}:Streak)
end
end
end |
module SequreISP
URL = "http://www.sequreisp.com/"
class Version
RELEASE = 2
MAJOR = 2
MINOR = 0
def self.to_a
[ RELEASE, MAJOR, MINOR ]
end
def self.to_s
self.to_a.join(".")
end
end
end
Version bump, i forgot to bump 3 and 4
module SequreISP
URL = "http://www.sequreisp.com/"
class Version
RELEASE = 2
MAJOR = 5
MINOR = 0
def self.to_a
[ RELEASE, MAJOR, MINOR ]
end
def self.to_s
self.to_a.join(".")
end
end
end
|
module Sidetiq
# Public: Sidetiq version namespace.
module VERSION
# Public: Sidetiq major version number.
MAJOR = 0
# Public: Sidetiq minor version number.
MINOR = 4
# Public: Sidetiq patch level.
PATCH = 0
# Public: Sidetiq version suffix.
SUFFIX = 'rc4'
# Public: String representation of the current Sidetiq version.
STRING = [MAJOR, MINOR, PATCH, SUFFIX].compact.join('.')
end
end
Bump to v0.4.0.
module Sidetiq
# Public: Sidetiq version namespace.
module VERSION
# Public: Sidetiq major version number.
MAJOR = 0
# Public: Sidetiq minor version number.
MINOR = 4
# Public: Sidetiq patch level.
PATCH = 0
# Public: Sidetiq version suffix.
SUFFIX = nil
# Public: String representation of the current Sidetiq version.
STRING = [MAJOR, MINOR, PATCH, SUFFIX].compact.join('.')
end
end
|
# Detect the platform we're running on so we can tweak behaviour
# in various places.
module Sikuli
class Platform
WINDOWS = RbConfig::CONFIG['host_os'] =~ /mswin/
LINUX = RbConfig::CONFIG['host_os'] =~ /linux/
MINGW = RbConfig::CONFIG['host_os'] =~ /mingw/
OS_X = RbConfig::CONFIG['host_os'] =~ /darwin/
def self.sikuli_script_path
if OS_X
path = "/Applications/Sikuli-IDE.app/sikuli-script.jar"
else
raise LoadError, no_sikuli_home_err_msg if ENV['SIKULI_HOME'].nil?
path = "#{ENV['SIKULI_HOME']}/sikuli-script.jar"
end
unless File.exist?(path)
raise LoadError, "Failed to load '#{path}'\nIs Sikuli installed?"
end
path
end
private
def self.no_sikuli_home_err_msg
err = "Failed to load 'sikuli-script.jar' from the Sikuli home directory"
return err + "\nMake sure %SIKULI_HOME% is set!" if WINDOWS
return err + "\nMake sure $SIKULI_HOME is set!" if LINUX
err + "\nMake sure %SIKULI_HOME% or $SIKULI_HOME is set!"
end
end
end
set correct path for Mac OSX sikuli-script.jar
# Detect the platform we're running on so we can tweak behaviour
# in various places.
module Sikuli
class Platform
WINDOWS = RbConfig::CONFIG['host_os'] =~ /mswin/
LINUX = RbConfig::CONFIG['host_os'] =~ /linux/
MINGW = RbConfig::CONFIG['host_os'] =~ /mingw/
OS_X = RbConfig::CONFIG['host_os'] =~ /darwin/
def self.sikuli_script_path
if OS_X
path = "/Applications/Sikuli-IDE.app/Contents/Resources/Java/sikuli-script.jar"
else
raise LoadError, no_sikuli_home_err_msg if ENV['SIKULI_HOME'].nil?
path = "#{ENV['SIKULI_HOME']}/sikuli-script.jar"
end
unless File.exist?(path)
raise LoadError, "Failed to load '#{path}'\nIs Sikuli installed?"
end
path
end
private
def self.no_sikuli_home_err_msg
err = "Failed to load 'sikuli-script.jar' from the Sikuli home directory"
return err + "\nMake sure %SIKULI_HOME% is set!" if WINDOWS
return err + "\nMake sure $SIKULI_HOME is set!" if LINUX
err + "\nMake sure %SIKULI_HOME% or $SIKULI_HOME is set!"
end
end
end
|
module Sinatra
VERSION = '1.3.2'
end
bump version
module Sinatra
VERSION = '1.4.0'
end
|
module Sinatra
VERSION = '2.0.0.beta1'
end
2.0.0.beta2
module Sinatra
VERSION = '2.0.0.beta2'
end
|
module Sisimai
# Sisimai::Message convert bounce email text to data structure. It resolve
# email text into an UNIX From line, the header part of the mail, delivery
# status, and RFC822 header part. When the email given as a argument of "new"
# method is not a bounce email, the method returns nil.
class Message
# Imported from p5-Sisimail/lib/Sisimai/Message.pm
# :from [String] UNIX From line
# :header [Hash] Header part of an email
# :ds [Array] Parsed data by Sisimai::Lhost::* module
# :rfc822 [Hash] Header part of the original message
# :catch [Any] The results returned by hook method
attr_accessor :from, :header, :ds, :rfc822, :catch
require 'sisimai/mime'
require 'sisimai/order'
require 'sisimai/lhost'
require 'sisimai/string'
require 'sisimai/address'
require 'sisimai/rfc5322'
DefaultSet = Sisimai::Order.another
LhostTable = Sisimai::Lhost.path
# Constructor of Sisimai::Message
# @param [String] data Email text data
# @param [Hash] argvs Module to be loaded
# @options argvs [String] :data Entire email message
# @options argvs [Array] :load User defined MTA module list
# @options argvs [Array] :field Email header names to be captured
# @options argvs [Array] :order The order of MTA modules
# @options argvs [Code] :hook Reference to callback method
# @return [Sisimai::Message] Structured email data or nil if each
# value of the arguments are missing
def initialize(data: '', **argvs)
return nil if data.empty?
email = data.scrub('?').gsub("\r\n", "\n")
field = argvs[:field] || []
unless field.is_a? Array
# Unsupported value in "field"
warn ' ***warning: "field" accepts an array reference only'
return nil
end
methodargv = { 'data' => email, 'hook' => argvs[:hook] || nil, 'field' => field }
[:load, :order].each do |e|
# Order of MTA modules
next unless argvs[e]
next unless argvs[e].is_a? Array
next if argvs[e].empty?
methodargv[e.to_s] = argvs[e]
end
datasource = Sisimai::Message.make(methodargv)
return nil unless datasource
return nil unless datasource['ds']
@from = datasource['from']
@header = datasource['header']
@ds = datasource['ds']
@rfc822 = datasource['rfc822']
@catch = datasource['catch'] || nil
end
# Check whether the object has valid content or not
# @return [True,False] returns true if the object is void
def void
return true unless @ds
return false
end
# Make data structure from the email message(a body part and headers)
# @param [Hash] argvs Email data
# @options argvs [String] data Entire email message
# @options argvs [Array] load User defined MTA module list
# @options argvs [Array] field Email header names to be captured
# @options argvs [Array] order The order of MTA modules
# @options argvs [Code] hook Reference to callback method
# @return [Hash] Resolved data structure
def self.make(argvs)
email = argvs['data']
hookmethod = argvs['hook'] || nil
processing = {
'from' => '', # From_ line
'header' => {}, # Email header
'rfc822' => '', # Original message part
'ds' => [], # Parsed data, Delivery Status
'catch' => nil, # Data parsed by callback method
}
methodargv = {
'load' => argvs['load'] || [],
'order' => argvs['order'] || []
}
tobeloaded = Sisimai::Message.load(methodargv)
# 1. Split email data to headers and a body part.
return nil unless aftersplit = Sisimai::Message.divideup(email)
# 2. Convert email headers from text to hash reference
processing['from'] = aftersplit['from']
processing['header'] = Sisimai::Message.makemap(aftersplit['header'])
# 3. Remove "Fwd:" string from the Subject: header
if cv = processing['header']['subject'].downcase.match(/\A[ \t]*fwd?:[ ]*(.*)\z/)
# Delete quoted strings, quote symbols(>)
processing['header']['subject'] = cv[1]
aftersplit['body'] = aftersplit['body'].gsub(/^[>]+[ ]/, '').gsub(/^[>]$/, '')
end
# 4. Rewrite message body for detecting the bounce reason
tryonfirst = Sisimai::Order.make(processing['header']['subject'])
methodargv = {
'hook' => hookmethod,
'mail' => processing,
'body' => aftersplit['body'],
'tryonfirst' => tryonfirst,
'tobeloaded' => tobeloaded,
}
return nil unless bouncedata = Sisimai::Message.parse(methodargv)
return nil if bouncedata.empty?
# 5. Rewrite headers of the original message in the body part
%w|ds catch rfc822|.each { |e| processing[e] = bouncedata[e] }
p = bouncedata['rfc822']
p = aftersplit['body'] if p.empty?
processing['rfc822'] = p.is_a?(::String) ? Sisimai::Message.makemap(p, true) : p
return processing
end
# Load MTA modules which specified at 'order' and 'load' in the argument
# @param [Hash] argvs Module information to be loaded
# @options argvs [Array] load User defined MTA module list
# @options argvs [Array] order The order of MTA modules
# @return [Array] Module list
# @since v4.20.0
def self.load(argvs)
modulelist = []
tobeloaded = []
%w[load order].each do |e|
# The order of MTA modules specified by user
next unless argvs[e]
next unless argvs[e].is_a? Array
next if argvs[e].empty?
modulelist += argvs['order'] if e == 'order'
next unless e == 'load'
# Load user defined MTA module
argvs['load'].each do |v|
# Load user defined MTA module
begin
require v.to_s.gsub('::', '/').downcase
rescue LoadError
warn ' ***warning: Failed to load ' << v
next
end
tobeloaded << v
end
end
while e = modulelist.shift do
# Append the custom order of MTA modules
next if tobeloaded.index(e)
tobeloaded << e
end
return tobeloaded
end
# Divide email data up headers and a body part.
# @param [String] email Email data
# @return [Hash] Email data after split
def self.divideup(email)
return nil if email.empty?
block = { 'from' => '', 'header' => '', 'body' => '' }
email.gsub!(/\r\n/, "\n") if email.include?("\r\n")
email.gsub!(/[ \t]+$/, '') if email =~ /[ \t]+$/
(block['header'], block['body']) = email.split(/\n\n/, 2)
return nil unless block['header']
return nil unless block['body']
if block['header'].start_with?('From ')
# From MAILER-DAEMON Tue Feb 11 00:00:00 2014
block['from'] = block['header'].split(/\n/, 2)[0].delete("\r")
else
# Set pseudo UNIX From line
block['from'] = 'MAILER-DAEMON Tue Feb 11 00:00:00 2014'
end
block['header'] << "\n" unless block['header'].end_with?("\n")
block['body'] << "\n"
return block
end
# Convert a text including email headers to a hash reference
# @param [String] argv0 Email header data
# @param [Bool] argv1 Decode "Subject:" header
# @return [Hash] Structured email header data
# @since v4.25.6
def self.makemap(argv0 = '', argv1 = nil)
return {} if argv0.empty?
argv0.gsub!(/^[>]+[ ]/m, '') # Remove '>' indent symbol of forwarded message
# Select and convert all the headers in $argv0. The following regular expression
# is based on https://gist.github.com/xtetsuji/b080e1f5551d17242f6415aba8a00239
headermaps = { 'subject' => '' }
recvheader = []
argv0.scan(/^([\w-]+):[ ]*(.*?)\n(?![\s\t])/m) { |e| headermaps[e[0].downcase] = e[1] }
headermaps.delete('received')
headermaps.each_key { |e| headermaps[e].gsub!(/\n[\s\t]+/, ' ') }
if argv0.include?('Received:')
# Capture values of each Received: header
recvheader = argv0.scan(/^Received:[ ]*(.*?)\n(?![\s\t])/m).flatten
recvheader.each { |e| e.gsub!(/\n[\s\t]+/, ' ') }
end
headermaps['received'] = recvheader
return headermaps unless argv1
return headermaps if headermaps['subject'].empty?
# Convert MIME-Encoded subject
if Sisimai::String.is_8bit(headermaps['subject'])
# The value of ``Subject'' header is including multibyte character,
# is not MIME-Encoded text.
headermaps['subject'].scrub!('?')
else
# MIME-Encoded subject field or ASCII characters only
r = []
if Sisimai::MIME.is_mimeencoded(headermaps['subject'])
# split the value of Subject by borderline
headermaps['subject'].split(/ /).each do |v|
# Insert value to the array if the string is MIME encoded text
r << v if Sisimai::MIME.is_mimeencoded(v)
end
else
# Subject line is not MIME encoded
r << headermaps['subject']
end
headermaps['subject'] = Sisimai::MIME.mimedecode(r)
end
return headermaps
end
# @abstract Parse bounce mail with each MTA module
# @param [Hash] argvs Processing message entity.
# @param options argvs [Hash] mail Email message entity
# @param options mail [String] from From line of mbox
# @param options mail [Hash] header Email header data
# @param options mail [String] rfc822 Original message part
# @param options mail [Array] ds Delivery status list(parsed data)
# @param options argvs [String] body Email message body
# @param options argvs [Array] tryonfirst MTA module list to load on first
# @param options argvs [Array] tobeloaded User defined MTA module list
# @return [Hash] Parsed and structured bounce mails
def self.parse(argvs)
return nil unless argvs['mail']
return nil unless argvs['body']
mailheader = argvs['mail']['header']
bodystring = argvs['body']
hookmethod = argvs['hook'] || nil
havecaught = nil
return nil unless mailheader
# PRECHECK_EACH_HEADER:
# Set empty string if the value is nil
mailheader['from'] ||= ''
mailheader['subject'] ||= ''
mailheader['content-type'] ||= ''
if Sisimai::MIME.is_mimeencoded(mailheader['subject'])
# Decode MIME-Encoded "Subject:" header
mailheader['subject'] = Sisimai::MIME.mimedecode(mailheader['subject'].split(/[ ]/))
end
# Decode BASE64 Encoded message body, rewrite.
mesgformat = (mailheader['content-type'] || '').downcase
ctencoding = (mailheader['content-transfer-encoding'] || '').downcase
if mesgformat.start_with?('text/plain', 'text/html')
# Content-Type: text/plain; charset=UTF-8
if ctencoding == 'base64'
# Content-Transfer-Encoding: base64
bodystring = Sisimai::MIME.base64d(bodystring)
elsif ctencoding == 'quoted-printable'
# Content-Transfer-Encoding: quoted-printable
bodystring = Sisimai::MIME.qprintd(bodystring)
end
if mesgformat.start_with?('text/html;')
# Content-Type: text/html;...
bodystring = Sisimai::String.to_plain(bodystring, true)
end
else
# NOT text/plain
if mesgformat.start_with?('multipart/')
# In case of Content-Type: multipart/*
p = Sisimai::MIME.makeflat(mailheader['content-type'], bodystring)
bodystring = p unless p.empty?
end
end
bodystring = bodystring.scrub('?').delete("\r")
haveloaded = {}
parseddata = nil
modulename = ''
if hookmethod.is_a? Proc
# Call the hook method
begin
p = {
'datasrc' => 'email',
'headers' => mailheader,
'message' => bodystring,
'bounces' => nil
}
havecaught = hookmethod.call(p)
rescue StandardError => ce
warn ' ***warning: Something is wrong in hook method :' << ce.to_s
end
end
catch :PARSER do
while true
# 1. User-Defined Module
# 2. MTA Module Candidates to be tried on first
# 3. Sisimai::Lhost::*
# 4. Sisimai::RFC3464
# 5. Sisimai::ARF
# 6. Sisimai::RFC3834
while r = argvs['tobeloaded'].shift do
# Call user defined MTA modules
next if haveloaded[r]
parseddata = Module.const_get(r).make(mailheader, bodystring)
haveloaded[r] = true
modulename = r
throw :PARSER if parseddata
end
[argvs['tryonfirst'], DefaultSet].flatten.each do |r|
# Try MTA module candidates
next if haveloaded[r]
require LhostTable[r]
parseddata = Module.const_get(r).make(mailheader, bodystring)
haveloaded[r] = true
modulename = r
throw :PARSER if parseddata
end
unless haveloaded['Sisimai::RFC3464']
# When the all of Sisimai::Lhost::* modules did not return bounce
# data, call Sisimai::RFC3464;
require 'sisimai/rfc3464'
parseddata = Sisimai::RFC3464.make(mailheader, bodystring)
modulename = 'RFC3464'
throw :PARSER if parseddata
end
unless haveloaded['Sisimai::ARF']
# Feedback Loop message
require 'sisimai/arf'
parseddata = Sisimai::ARF.make(mailheader, bodystring) if Sisimai::ARF.is_arf(mailheader)
throw :PARSER if parseddata
end
unless haveloaded['Sisimai::RFC3834']
# Try to parse the message as auto reply message defined in RFC3834
require 'sisimai/rfc3834'
parseddata = Sisimai::RFC3834.make(mailheader, bodystring)
modulename = 'RFC3834'
throw :PARSER if parseddata
end
break # as of now, we have no sample email for coding this block
end
end
return nil unless parseddata
parseddata['catch'] = havecaught
modulename = modulename.sub(/\A.+::/, '')
parseddata['ds'].each do |e|
e['agent'] = modulename unless e['agent']
e.each_key { |a| e[a] ||= '' } # Replace nil with ""
end
return parseddata
end
end
end
Bug fix: Subject decoding should be executed bofore calling Sisimai::Order.make, import commit from https://github.com/sisimai/p5-Sisimai/commit/9880db1327087b4e1e638e4a3828ab76baac95cf
module Sisimai
# Sisimai::Message convert bounce email text to data structure. It resolve
# email text into an UNIX From line, the header part of the mail, delivery
# status, and RFC822 header part. When the email given as a argument of "new"
# method is not a bounce email, the method returns nil.
class Message
# Imported from p5-Sisimail/lib/Sisimai/Message.pm
# :from [String] UNIX From line
# :header [Hash] Header part of an email
# :ds [Array] Parsed data by Sisimai::Lhost::* module
# :rfc822 [Hash] Header part of the original message
# :catch [Any] The results returned by hook method
attr_accessor :from, :header, :ds, :rfc822, :catch
require 'sisimai/mime'
require 'sisimai/order'
require 'sisimai/lhost'
require 'sisimai/string'
require 'sisimai/address'
require 'sisimai/rfc5322'
DefaultSet = Sisimai::Order.another
LhostTable = Sisimai::Lhost.path
# Constructor of Sisimai::Message
# @param [String] data Email text data
# @param [Hash] argvs Module to be loaded
# @options argvs [String] :data Entire email message
# @options argvs [Array] :load User defined MTA module list
# @options argvs [Array] :field Email header names to be captured
# @options argvs [Array] :order The order of MTA modules
# @options argvs [Code] :hook Reference to callback method
# @return [Sisimai::Message] Structured email data or nil if each
# value of the arguments are missing
def initialize(data: '', **argvs)
return nil if data.empty?
email = data.scrub('?').gsub("\r\n", "\n")
field = argvs[:field] || []
unless field.is_a? Array
# Unsupported value in "field"
warn ' ***warning: "field" accepts an array reference only'
return nil
end
methodargv = { 'data' => email, 'hook' => argvs[:hook] || nil, 'field' => field }
[:load, :order].each do |e|
# Order of MTA modules
next unless argvs[e]
next unless argvs[e].is_a? Array
next if argvs[e].empty?
methodargv[e.to_s] = argvs[e]
end
datasource = Sisimai::Message.make(methodargv)
return nil unless datasource
return nil unless datasource['ds']
@from = datasource['from']
@header = datasource['header']
@ds = datasource['ds']
@rfc822 = datasource['rfc822']
@catch = datasource['catch'] || nil
end
# Check whether the object has valid content or not
# @return [True,False] returns true if the object is void
def void
return true unless @ds
return false
end
# Make data structure from the email message(a body part and headers)
# @param [Hash] argvs Email data
# @options argvs [String] data Entire email message
# @options argvs [Array] load User defined MTA module list
# @options argvs [Array] field Email header names to be captured
# @options argvs [Array] order The order of MTA modules
# @options argvs [Code] hook Reference to callback method
# @return [Hash] Resolved data structure
def self.make(argvs)
email = argvs['data']
hookmethod = argvs['hook'] || nil
processing = {
'from' => '', # From_ line
'header' => {}, # Email header
'rfc822' => '', # Original message part
'ds' => [], # Parsed data, Delivery Status
'catch' => nil, # Data parsed by callback method
}
methodargv = {
'load' => argvs['load'] || [],
'order' => argvs['order'] || []
}
tobeloaded = Sisimai::Message.load(methodargv)
# 1. Split email data to headers and a body part.
return nil unless aftersplit = Sisimai::Message.divideup(email)
# 2. Convert email headers from text to hash reference
processing['from'] = aftersplit['from']
processing['header'] = Sisimai::Message.makemap(aftersplit['header'])
# 3. Decode and rewrite the "Subject:" header
unless processing['header']['subject'].empty?
# Decode MIME-Encoded "Subject:" header
s = processing['header']['subject']
q = Sisimai::MIME.is_mimeencoded(s) ? Sisimai::MIME.mimedecode(s.split(/[ ]/)) : s
# Remove "Fwd:" string from the Subject: header
if cv = q.downcase.match(/\A[ \t]*fwd?:[ ]*(.*)\z/)
# Delete quoted strings, quote symbols(>)
q = cv[1]
aftersplit['body'] = aftersplit['body'].gsub(/^[>]+[ ]/, '').gsub(/^[>]$/, '')
end
processing['header']['subject'] = q
end
# 4. Rewrite message body for detecting the bounce reason
tryonfirst = Sisimai::Order.make(processing['header']['subject'])
methodargv = {
'hook' => hookmethod,
'mail' => processing,
'body' => aftersplit['body'],
'tryonfirst' => tryonfirst,
'tobeloaded' => tobeloaded,
}
return nil unless bouncedata = Sisimai::Message.parse(methodargv)
return nil if bouncedata.empty?
# 5. Rewrite headers of the original message in the body part
%w|ds catch rfc822|.each { |e| processing[e] = bouncedata[e] }
p = bouncedata['rfc822']
p = aftersplit['body'] if p.empty?
processing['rfc822'] = p.is_a?(::String) ? Sisimai::Message.makemap(p, true) : p
return processing
end
# Load MTA modules which specified at 'order' and 'load' in the argument
# @param [Hash] argvs Module information to be loaded
# @options argvs [Array] load User defined MTA module list
# @options argvs [Array] order The order of MTA modules
# @return [Array] Module list
# @since v4.20.0
def self.load(argvs)
modulelist = []
tobeloaded = []
%w[load order].each do |e|
# The order of MTA modules specified by user
next unless argvs[e]
next unless argvs[e].is_a? Array
next if argvs[e].empty?
modulelist += argvs['order'] if e == 'order'
next unless e == 'load'
# Load user defined MTA module
argvs['load'].each do |v|
# Load user defined MTA module
begin
require v.to_s.gsub('::', '/').downcase
rescue LoadError
warn ' ***warning: Failed to load ' << v
next
end
tobeloaded << v
end
end
while e = modulelist.shift do
# Append the custom order of MTA modules
next if tobeloaded.index(e)
tobeloaded << e
end
return tobeloaded
end
# Divide email data up headers and a body part.
# @param [String] email Email data
# @return [Hash] Email data after split
def self.divideup(email)
return nil if email.empty?
block = { 'from' => '', 'header' => '', 'body' => '' }
email.gsub!(/\r\n/, "\n") if email.include?("\r\n")
email.gsub!(/[ \t]+$/, '') if email =~ /[ \t]+$/
(block['header'], block['body']) = email.split(/\n\n/, 2)
return nil unless block['header']
return nil unless block['body']
if block['header'].start_with?('From ')
# From MAILER-DAEMON Tue Feb 11 00:00:00 2014
block['from'] = block['header'].split(/\n/, 2)[0].delete("\r")
else
# Set pseudo UNIX From line
block['from'] = 'MAILER-DAEMON Tue Feb 11 00:00:00 2014'
end
block['header'] << "\n" unless block['header'].end_with?("\n")
block['body'] << "\n"
return block
end
# Convert a text including email headers to a hash reference
# @param [String] argv0 Email header data
# @param [Bool] argv1 Decode "Subject:" header
# @return [Hash] Structured email header data
# @since v4.25.6
def self.makemap(argv0 = '', argv1 = nil)
return {} if argv0.empty?
argv0.gsub!(/^[>]+[ ]/m, '') # Remove '>' indent symbol of forwarded message
# Select and convert all the headers in $argv0. The following regular expression
# is based on https://gist.github.com/xtetsuji/b080e1f5551d17242f6415aba8a00239
headermaps = { 'subject' => '' }
recvheader = []
argv0.scan(/^([\w-]+):[ ]*(.*?)\n(?![\s\t])/m) { |e| headermaps[e[0].downcase] = e[1] }
headermaps.delete('received')
headermaps.each_key { |e| headermaps[e].gsub!(/\n[\s\t]+/, ' ') }
if argv0.include?('Received:')
# Capture values of each Received: header
recvheader = argv0.scan(/^Received:[ ]*(.*?)\n(?![\s\t])/m).flatten
recvheader.each { |e| e.gsub!(/\n[\s\t]+/, ' ') }
end
headermaps['received'] = recvheader
return headermaps unless argv1
return headermaps if headermaps['subject'].empty?
# Convert MIME-Encoded subject
if Sisimai::String.is_8bit(headermaps['subject'])
# The value of ``Subject'' header is including multibyte character,
# is not MIME-Encoded text.
headermaps['subject'].scrub!('?')
else
# MIME-Encoded subject field or ASCII characters only
r = []
if Sisimai::MIME.is_mimeencoded(headermaps['subject'])
# split the value of Subject by borderline
headermaps['subject'].split(/ /).each do |v|
# Insert value to the array if the string is MIME encoded text
r << v if Sisimai::MIME.is_mimeencoded(v)
end
else
# Subject line is not MIME encoded
r << headermaps['subject']
end
headermaps['subject'] = Sisimai::MIME.mimedecode(r)
end
return headermaps
end
# @abstract Parse bounce mail with each MTA module
# @param [Hash] argvs Processing message entity.
# @param options argvs [Hash] mail Email message entity
# @param options mail [String] from From line of mbox
# @param options mail [Hash] header Email header data
# @param options mail [String] rfc822 Original message part
# @param options mail [Array] ds Delivery status list(parsed data)
# @param options argvs [String] body Email message body
# @param options argvs [Array] tryonfirst MTA module list to load on first
# @param options argvs [Array] tobeloaded User defined MTA module list
# @return [Hash] Parsed and structured bounce mails
def self.parse(argvs)
return nil unless argvs['mail']
return nil unless argvs['body']
mailheader = argvs['mail']['header']
bodystring = argvs['body']
hookmethod = argvs['hook'] || nil
havecaught = nil
return nil unless mailheader
# PRECHECK_EACH_HEADER:
# Set empty string if the value is nil
mailheader['from'] ||= ''
mailheader['subject'] ||= ''
mailheader['content-type'] ||= ''
# Decode BASE64 Encoded message body, rewrite.
mesgformat = (mailheader['content-type'] || '').downcase
ctencoding = (mailheader['content-transfer-encoding'] || '').downcase
if mesgformat.start_with?('text/plain', 'text/html')
# Content-Type: text/plain; charset=UTF-8
if ctencoding == 'base64'
# Content-Transfer-Encoding: base64
bodystring = Sisimai::MIME.base64d(bodystring)
elsif ctencoding == 'quoted-printable'
# Content-Transfer-Encoding: quoted-printable
bodystring = Sisimai::MIME.qprintd(bodystring)
end
if mesgformat.start_with?('text/html;')
# Content-Type: text/html;...
bodystring = Sisimai::String.to_plain(bodystring, true)
end
else
# NOT text/plain
if mesgformat.start_with?('multipart/')
# In case of Content-Type: multipart/*
p = Sisimai::MIME.makeflat(mailheader['content-type'], bodystring)
bodystring = p unless p.empty?
end
end
bodystring = bodystring.scrub('?').delete("\r")
haveloaded = {}
parseddata = nil
modulename = ''
if hookmethod.is_a? Proc
# Call the hook method
begin
p = {
'datasrc' => 'email',
'headers' => mailheader,
'message' => bodystring,
'bounces' => nil
}
havecaught = hookmethod.call(p)
rescue StandardError => ce
warn ' ***warning: Something is wrong in hook method :' << ce.to_s
end
end
catch :PARSER do
while true
# 1. User-Defined Module
# 2. MTA Module Candidates to be tried on first
# 3. Sisimai::Lhost::*
# 4. Sisimai::RFC3464
# 5. Sisimai::ARF
# 6. Sisimai::RFC3834
while r = argvs['tobeloaded'].shift do
# Call user defined MTA modules
next if haveloaded[r]
parseddata = Module.const_get(r).make(mailheader, bodystring)
haveloaded[r] = true
modulename = r
throw :PARSER if parseddata
end
[argvs['tryonfirst'], DefaultSet].flatten.each do |r|
# Try MTA module candidates
next if haveloaded[r]
require LhostTable[r]
parseddata = Module.const_get(r).make(mailheader, bodystring)
haveloaded[r] = true
modulename = r
throw :PARSER if parseddata
end
unless haveloaded['Sisimai::RFC3464']
# When the all of Sisimai::Lhost::* modules did not return bounce
# data, call Sisimai::RFC3464;
require 'sisimai/rfc3464'
parseddata = Sisimai::RFC3464.make(mailheader, bodystring)
modulename = 'RFC3464'
throw :PARSER if parseddata
end
unless haveloaded['Sisimai::ARF']
# Feedback Loop message
require 'sisimai/arf'
parseddata = Sisimai::ARF.make(mailheader, bodystring) if Sisimai::ARF.is_arf(mailheader)
throw :PARSER if parseddata
end
unless haveloaded['Sisimai::RFC3834']
# Try to parse the message as auto reply message defined in RFC3834
require 'sisimai/rfc3834'
parseddata = Sisimai::RFC3834.make(mailheader, bodystring)
modulename = 'RFC3834'
throw :PARSER if parseddata
end
break # as of now, we have no sample email for coding this block
end
end
return nil unless parseddata
parseddata['catch'] = havecaught
modulename = modulename.sub(/\A.+::/, '')
parseddata['ds'].each do |e|
e['agent'] = modulename unless e['agent']
e.each_key { |a| e[a] ||= '' } # Replace nil with ""
end
return parseddata
end
end
end
|
# Define the version number of Sisimai
module Sisimai
VERSION = '4.17.0'
end
Bump up to v4.17.0p1
# Define the version number of Sisimai
module Sisimai
VERSION = '4.17.0p1'
end
|
# Define the version number of Sisimai
module Sisimai
VERSION = '4.22.6'.freeze
end
Bump up the version: v4.22.6p1
# Define the version number of Sisimai
module Sisimai
VERSION = '4.22.6p1'.freeze
end
|
module Sitemap
VERSION = "0.3"
end
Version bump.
module Sitemap
VERSION = "0.3.1"
end
|
require 'fileutils'
require 'xcodeproj'
require 'json'
require 'yaml'
require 'shellwords'
module Xcodeproj
class Project
def slather_setup_for_coverage(format = :auto)
unless [:gcov, :clang, :auto].include?(format)
raise StandardError, "Only supported formats for setup are gcov, clang or auto"
end
if format == :auto
format = Slather.xcode_version[0] < 7 ? :gcov : :clang
end
build_configurations.each do |build_configuration|
if format == :clang
build_configuration.build_settings["CLANG_ENABLE_CODE_COVERAGE"] = "YES"
else
build_configuration.build_settings["GCC_INSTRUMENT_PROGRAM_FLOW_ARCS"] = "YES"
build_configuration.build_settings["GCC_GENERATE_TEST_COVERAGE_FILES"] = "YES"
end
end
# Patch xcschemes too
if format == :clang
if Gem::Requirement.new('~> 0.27') =~ Gem::Version.new(Xcodeproj::VERSION)
# @todo This will require to bump the xcodeproj dependency to ~> 0.27
# (which would require to bump cocoapods too)
schemes_path = Xcodeproj::XCScheme.shared_data_dir(self.path)
Xcodeproj::Project.schemes(self.path).each do |scheme_name|
xcscheme_path = "#{schemes_path + scheme_name}.xcscheme"
xcscheme = Xcodeproj::XCScheme.new(xcscheme_path)
xcscheme.test_action.xml_element.attributes['codeCoverageEnabled'] = 'YES'
xcscheme.save_as(self.path, scheme_name)
end
else
# @todo In the meantime, simply inform the user to do it manually
puts %Q(Ensure you enabled "Gather coverage data" in each of your scheme's Test action)
end
end
end
end
end
module Slather
class Project < Xcodeproj::Project
attr_accessor :build_directory, :ignore_list, :ci_service, :coverage_service, :coverage_access_token, :source_directory,
:output_directory, :xcodeproj, :show_html, :verbose_mode, :input_format, :scheme, :binary_file, :binary_basename
alias_method :setup_for_coverage, :slather_setup_for_coverage
def self.open(xcodeproj)
proj = super
proj.xcodeproj = xcodeproj
proj
end
def derived_data_path
File.expand_path('~') + "/Library/Developer/Xcode/DerivedData/"
end
private :derived_data_path
def coverage_files
if self.input_format == "profdata"
profdata_coverage_files
else
gcov_coverage_files
end
end
private :coverage_files
def gcov_coverage_files
coverage_files = Dir["#{build_directory}/**/*.gcno"].map do |file|
coverage_file = coverage_file_class.new(self, file)
# If there's no source file for this gcno, it probably belongs to another project.
coverage_file.source_file_pathname && !coverage_file.ignored? ? coverage_file : nil
end.compact
if coverage_files.empty?
raise StandardError, "No coverage files found. Are you sure your project is setup for generating coverage files? Try `slather setup your/project.xcodeproj`"
else
dedupe(coverage_files)
end
end
private :gcov_coverage_files
def profdata_coverage_files
files = profdata_llvm_cov_output.split("\n\n")
files.map do |source|
coverage_file = coverage_file_class.new(self, source)
!coverage_file.ignored? ? coverage_file : nil
end.compact
end
private :profdata_coverage_files
def profdata_coverage_dir
raise StandardError, "The specified build directory (#{self.build_directory}) does not exist" unless File.exists?(self.build_directory)
dir = nil
if self.scheme
dir = Dir["#{build_directory}/**/CodeCoverage/#{self.scheme}"].first
else
dir = Dir["#{build_directory}/**/#{self.products.first.name}"].first
end
raise StandardError, "No coverage directory found. Are you sure your project is setup for generating coverage files? Try `slather setup your/project.xcodeproj`" unless dir != nil
dir
end
def profdata_file
profdata_coverage_dir = self.profdata_coverage_dir
if profdata_coverage_dir == nil
raise StandardError, "No coverage directory found. Please make sure the \"Code Coverage\" checkbox is enabled in your scheme's Test action or the build_directory property is set."
end
file = Dir["#{profdata_coverage_dir}/**/Coverage.profdata"].first
unless file != nil
return nil
end
return File.expand_path(file)
end
private :profdata_file
def find_binary_file_for_app(app_bundle_file)
app_bundle_file_name_noext = Pathname.new(app_bundle_file).basename.to_s.gsub(".app", "")
Dir["#{app_bundle_file}/**/#{app_bundle_file_name_noext}"].first
end
def find_binary_file_for_dynamic_lib(framework_bundle_file)
framework_bundle_file_name_noext = Pathname.new(framework_bundle_file).basename.to_s.gsub(".framework", "")
"#{framework_bundle_file}/#{framework_bundle_file_name_noext}"
end
def find_binary_file_for_static_lib(xctest_bundle_file)
xctest_bundle_file_name_noext = Pathname.new(xctest_bundle_file).basename.to_s.gsub(".xctest", "")
Dir["#{xctest_bundle_file}/**/#{xctest_bundle_file_name_noext}"].first
end
def unsafe_profdata_llvm_cov_output
profdata_file_arg = profdata_file
if profdata_file_arg == nil
raise StandardError, "No Coverage.profdata files found. Please make sure the \"Code Coverage\" checkbox is enabled in your scheme's Test action or the build_directory property is set."
end
if self.binary_file == nil
raise StandardError, "No binary file found."
end
llvm_cov_args = %W(show -instr-profile #{profdata_file_arg} #{self.binary_file})
`xcrun llvm-cov #{llvm_cov_args.shelljoin}`
end
private :unsafe_profdata_llvm_cov_output
def profdata_llvm_cov_output
unsafe_profdata_llvm_cov_output.encode!('UTF-8', 'binary', :invalid => :replace, undef: :replace)
end
private :profdata_llvm_cov_output
def dedupe(coverage_files)
coverage_files.group_by(&:source_file_pathname).values.map { |cf_array| cf_array.max_by(&:percentage_lines_tested) }
end
private :dedupe
def self.yml_filename
'.slather.yml'
end
def self.yml
@yml ||= File.exist?(yml_filename) ? YAML.load_file(yml_filename) : {}
end
def configure
configure_build_directory
configure_ignore_list
configure_ci_service
configure_coverage_access_token
configure_coverage_service
configure_source_directory
configure_output_directory
configure_input_format
configure_scheme
configure_binary_file
if self.verbose_mode
puts "\nProcessing coverage file: #{profdata_file}"
puts "Against binary file: #{self.binary_file}\n\n"
end
end
def configure_build_directory
self.build_directory ||= self.class.yml["build_directory"] || derived_data_path
end
def configure_source_directory
self.source_directory ||= self.class.yml["source_directory"] if self.class.yml["source_directory"]
end
def configure_output_directory
self.output_directory ||= self.class.yml["output_directory"] if self.class.yml["output_directory"]
end
def configure_ignore_list
self.ignore_list ||= [(self.class.yml["ignore"] || [])].flatten
end
def configure_ci_service
self.ci_service ||= (self.class.yml["ci_service"] || :travis_ci)
end
def configure_input_format
self.input_format ||= self.class.yml["input_format"] || input_format
end
def input_format=(format)
format ||= "auto"
unless %w(gcov profdata auto).include?(format)
raise StandardError, "Only supported input formats are gcov, profdata or auto"
end
if format == "auto"
@input_format = Slather.xcode_version[0] < 7 ? "gcov" : "profdata"
else
@input_format = format
end
end
def configure_scheme
self.scheme ||= self.class.yml["scheme"] if self.class.yml["scheme"]
end
def ci_service=(service)
@ci_service = service && service.to_sym
end
def configure_coverage_service
self.coverage_service ||= (self.class.yml["coverage_service"] || :terminal)
end
def configure_coverage_access_token
self.coverage_access_token ||= (ENV["COVERAGE_ACCESS_TOKEN"] || self.class.yml["coverage_access_token"] || "")
end
def coverage_service=(service)
service = service && service.to_sym
case service
when :coveralls
extend(Slather::CoverageService::Coveralls)
when :hardcover
extend(Slather::CoverageService::Hardcover)
when :terminal
extend(Slather::CoverageService::SimpleOutput)
when :gutter_json
extend(Slather::CoverageService::GutterJsonOutput)
when :cobertura_xml
extend(Slather::CoverageService::CoberturaXmlOutput)
when :html
extend(Slather::CoverageService::HtmlOutput)
else
raise ArgumentError, "`#{coverage_service}` is not a valid coverage service. Try `terminal`, `coveralls`, `gutter_json`, `cobertura_xml` or `html`"
end
@coverage_service = service
end
def configure_binary_file
if self.input_format == "profdata"
self.binary_file ||= self.class.yml["binary_file"] || File.expand_path(find_binary_file)
end
end
def find_binary_file
xctest_bundle = Dir["#{profdata_coverage_dir}/**/*.xctest"].reject { |bundle|
bundle.include? "-Runner.app/PlugIns/"
}.first
raise StandardError, "No product binary found in #{profdata_coverage_dir}. Are you sure your project is setup for generating coverage files? Try `slather setup your/project.xcodeproj`" unless xctest_bundle != nil
# Find the matching binary file
search_for = self.binary_basename || self.class.yml["binary_basename"] || '*'
xctest_bundle_file_directory = Pathname.new(xctest_bundle).dirname
app_bundle = Dir["#{xctest_bundle_file_directory}/#{search_for}.app"].first
dynamic_lib_bundle = Dir["#{xctest_bundle_file_directory}/#{search_for}.framework"].first
if app_bundle != nil
find_binary_file_for_app(app_bundle)
elsif dynamic_lib_bundle != nil
find_binary_file_for_dynamic_lib(dynamic_lib_bundle)
else
find_binary_file_for_static_lib(xctest_bundle)
end
end
end
end
Fixes how profdata_coverage_dir is created.
The previous implementation of this method was assuming that
the first product always has name, which doesn't always happen.
The documentation of PBXFileReference says that the 'name'
attribute is "often not present.". For more info check
https://github.com/CocoaPods/Xcodeproj/blob/master/lib/xcodeproj/project/object/file_reference.rb#L13
If the name attribute is not present the profdata_coverage_dir
created was the first child of the build directory (which by
default is the derived data directory). This is wrong.
Also the build directory was being concatenated using plain
string manipulation which led to path of the form:
"~/Library/Developer/Xcode/DerivedData//**/PRODUCT_NAME".
This is why File.join is used to join the build directory path
with '/**/PRODUCT_NAME' to comtemplate the case of build directory
already ending with '/'.
require 'fileutils'
require 'xcodeproj'
require 'json'
require 'yaml'
require 'shellwords'
module Xcodeproj
class Project
def slather_setup_for_coverage(format = :auto)
unless [:gcov, :clang, :auto].include?(format)
raise StandardError, "Only supported formats for setup are gcov, clang or auto"
end
if format == :auto
format = Slather.xcode_version[0] < 7 ? :gcov : :clang
end
build_configurations.each do |build_configuration|
if format == :clang
build_configuration.build_settings["CLANG_ENABLE_CODE_COVERAGE"] = "YES"
else
build_configuration.build_settings["GCC_INSTRUMENT_PROGRAM_FLOW_ARCS"] = "YES"
build_configuration.build_settings["GCC_GENERATE_TEST_COVERAGE_FILES"] = "YES"
end
end
# Patch xcschemes too
if format == :clang
if Gem::Requirement.new('~> 0.27') =~ Gem::Version.new(Xcodeproj::VERSION)
# @todo This will require to bump the xcodeproj dependency to ~> 0.27
# (which would require to bump cocoapods too)
schemes_path = Xcodeproj::XCScheme.shared_data_dir(self.path)
Xcodeproj::Project.schemes(self.path).each do |scheme_name|
xcscheme_path = "#{schemes_path + scheme_name}.xcscheme"
xcscheme = Xcodeproj::XCScheme.new(xcscheme_path)
xcscheme.test_action.xml_element.attributes['codeCoverageEnabled'] = 'YES'
xcscheme.save_as(self.path, scheme_name)
end
else
# @todo In the meantime, simply inform the user to do it manually
puts %Q(Ensure you enabled "Gather coverage data" in each of your scheme's Test action)
end
end
end
end
end
module Slather
class Project < Xcodeproj::Project
attr_accessor :build_directory, :ignore_list, :ci_service, :coverage_service, :coverage_access_token, :source_directory,
:output_directory, :xcodeproj, :show_html, :verbose_mode, :input_format, :scheme, :binary_file, :binary_basename
alias_method :setup_for_coverage, :slather_setup_for_coverage
def self.open(xcodeproj)
proj = super
proj.xcodeproj = xcodeproj
proj
end
def derived_data_path
File.expand_path('~') + "/Library/Developer/Xcode/DerivedData/"
end
private :derived_data_path
def coverage_files
if self.input_format == "profdata"
profdata_coverage_files
else
gcov_coverage_files
end
end
private :coverage_files
def gcov_coverage_files
coverage_files = Dir["#{build_directory}/**/*.gcno"].map do |file|
coverage_file = coverage_file_class.new(self, file)
# If there's no source file for this gcno, it probably belongs to another project.
coverage_file.source_file_pathname && !coverage_file.ignored? ? coverage_file : nil
end.compact
if coverage_files.empty?
raise StandardError, "No coverage files found. Are you sure your project is setup for generating coverage files? Try `slather setup your/project.xcodeproj`"
else
dedupe(coverage_files)
end
end
private :gcov_coverage_files
def profdata_coverage_files
files = profdata_llvm_cov_output.split("\n\n")
files.map do |source|
coverage_file = coverage_file_class.new(self, source)
!coverage_file.ignored? ? coverage_file : nil
end.compact
end
private :profdata_coverage_files
def remove_extension(path)
path.split(".")[0..-2].join(".")
end
def first_product_name
first_product = self.products.first
# If name is not available it computes it using
# the path by dropping the 'extension' of the path.
first_product.name || remove_extension(first_product.path)
end
def profdata_coverage_dir
raise StandardError, "The specified build directory (#{self.build_directory}) does not exist" unless File.exists?(self.build_directory)
dir = nil
if self.scheme
dir = Dir[File.join("#{build_directory}","/**/CodeCoverage/#{self.scheme}")].first
else
dir = Dir[File.join("#{build_directory}","/**/#{first_product_name}")].first
end
raise StandardError, "No coverage directory found. Are you sure your project is setup for generating coverage files? Try `slather setup your/project.xcodeproj`" unless dir != nil
dir
end
def profdata_file
profdata_coverage_dir = self.profdata_coverage_dir
if profdata_coverage_dir == nil
raise StandardError, "No coverage directory found. Please make sure the \"Code Coverage\" checkbox is enabled in your scheme's Test action or the build_directory property is set."
end
file = Dir["#{profdata_coverage_dir}/**/Coverage.profdata"].first
unless file != nil
return nil
end
return File.expand_path(file)
end
private :profdata_file
def find_binary_file_for_app(app_bundle_file)
app_bundle_file_name_noext = Pathname.new(app_bundle_file).basename.to_s.gsub(".app", "")
Dir["#{app_bundle_file}/**/#{app_bundle_file_name_noext}"].first
end
def find_binary_file_for_dynamic_lib(framework_bundle_file)
framework_bundle_file_name_noext = Pathname.new(framework_bundle_file).basename.to_s.gsub(".framework", "")
"#{framework_bundle_file}/#{framework_bundle_file_name_noext}"
end
def find_binary_file_for_static_lib(xctest_bundle_file)
xctest_bundle_file_name_noext = Pathname.new(xctest_bundle_file).basename.to_s.gsub(".xctest", "")
Dir["#{xctest_bundle_file}/**/#{xctest_bundle_file_name_noext}"].first
end
def unsafe_profdata_llvm_cov_output
profdata_file_arg = profdata_file
if profdata_file_arg == nil
raise StandardError, "No Coverage.profdata files found. Please make sure the \"Code Coverage\" checkbox is enabled in your scheme's Test action or the build_directory property is set."
end
if self.binary_file == nil
raise StandardError, "No binary file found."
end
llvm_cov_args = %W(show -instr-profile #{profdata_file_arg} #{self.binary_file})
`xcrun llvm-cov #{llvm_cov_args.shelljoin}`
end
private :unsafe_profdata_llvm_cov_output
def profdata_llvm_cov_output
unsafe_profdata_llvm_cov_output.encode!('UTF-8', 'binary', :invalid => :replace, undef: :replace)
end
private :profdata_llvm_cov_output
def dedupe(coverage_files)
coverage_files.group_by(&:source_file_pathname).values.map { |cf_array| cf_array.max_by(&:percentage_lines_tested) }
end
private :dedupe
def self.yml_filename
'.slather.yml'
end
def self.yml
@yml ||= File.exist?(yml_filename) ? YAML.load_file(yml_filename) : {}
end
def configure
configure_build_directory
configure_ignore_list
configure_ci_service
configure_coverage_access_token
configure_coverage_service
configure_source_directory
configure_output_directory
configure_input_format
configure_scheme
configure_binary_file
if self.verbose_mode
puts "\nProcessing coverage file: #{profdata_file}"
puts "Against binary file: #{self.binary_file}\n\n"
end
end
def configure_build_directory
self.build_directory ||= self.class.yml["build_directory"] || derived_data_path
end
def configure_source_directory
self.source_directory ||= self.class.yml["source_directory"] if self.class.yml["source_directory"]
end
def configure_output_directory
self.output_directory ||= self.class.yml["output_directory"] if self.class.yml["output_directory"]
end
def configure_ignore_list
self.ignore_list ||= [(self.class.yml["ignore"] || [])].flatten
end
def configure_ci_service
self.ci_service ||= (self.class.yml["ci_service"] || :travis_ci)
end
def configure_input_format
self.input_format ||= self.class.yml["input_format"] || input_format
end
def input_format=(format)
format ||= "auto"
unless %w(gcov profdata auto).include?(format)
raise StandardError, "Only supported input formats are gcov, profdata or auto"
end
if format == "auto"
@input_format = Slather.xcode_version[0] < 7 ? "gcov" : "profdata"
else
@input_format = format
end
end
def configure_scheme
self.scheme ||= self.class.yml["scheme"] if self.class.yml["scheme"]
end
def ci_service=(service)
@ci_service = service && service.to_sym
end
def configure_coverage_service
self.coverage_service ||= (self.class.yml["coverage_service"] || :terminal)
end
def configure_coverage_access_token
self.coverage_access_token ||= (ENV["COVERAGE_ACCESS_TOKEN"] || self.class.yml["coverage_access_token"] || "")
end
def coverage_service=(service)
service = service && service.to_sym
case service
when :coveralls
extend(Slather::CoverageService::Coveralls)
when :hardcover
extend(Slather::CoverageService::Hardcover)
when :terminal
extend(Slather::CoverageService::SimpleOutput)
when :gutter_json
extend(Slather::CoverageService::GutterJsonOutput)
when :cobertura_xml
extend(Slather::CoverageService::CoberturaXmlOutput)
when :html
extend(Slather::CoverageService::HtmlOutput)
else
raise ArgumentError, "`#{coverage_service}` is not a valid coverage service. Try `terminal`, `coveralls`, `gutter_json`, `cobertura_xml` or `html`"
end
@coverage_service = service
end
def configure_binary_file
if self.input_format == "profdata"
self.binary_file ||= self.class.yml["binary_file"] || File.expand_path(find_binary_file)
end
end
def find_binary_file
xctest_bundle = Dir["#{profdata_coverage_dir}/**/*.xctest"].reject { |bundle|
bundle.include? "-Runner.app/PlugIns/"
}.first
raise StandardError, "No product binary found in #{profdata_coverage_dir}. Are you sure your project is setup for generating coverage files? Try `slather setup your/project.xcodeproj`" unless xctest_bundle != nil
# Find the matching binary file
search_for = self.binary_basename || self.class.yml["binary_basename"] || '*'
xctest_bundle_file_directory = Pathname.new(xctest_bundle).dirname
app_bundle = Dir["#{xctest_bundle_file_directory}/#{search_for}.app"].first
dynamic_lib_bundle = Dir["#{xctest_bundle_file_directory}/#{search_for}.framework"].first
if app_bundle != nil
find_binary_file_for_app(app_bundle)
elsif dynamic_lib_bundle != nil
find_binary_file_for_dynamic_lib(dynamic_lib_bundle)
else
find_binary_file_for_static_lib(xctest_bundle)
end
end
end
end
|
module Snooper
##
# The library version for this module
#
# This should conform to SemVer. If this is changed it should be the only
# thing that changes in the comit.
VERSION = '0.1.0'
end
Bump Version
module Snooper
##
# The library version for this module
#
# This should conform to SemVer. If this is changed it should be the only
# thing that changes in the comit.
VERSION = '0.1.1'
end |
module Soloist
VERSION = "0.0.4"
end
bumping version to 0.0.5 - passing RACK_ENV - these should be configurable
module Soloist
VERSION = "0.0.5"
end
|
# frozen_string_literal: true
require 'taglib'
module SouvlakiRS
module Tag
#
# retag a file fetched from web
def self.retag_file(file, def_album, def_artist, pub_date, write_tags, rewrite_title = true)
tags = audio_file_read_tags(file)
# prep the title - prepend the date to the album (show name) when
# 1. there's no title tag
# 2. the title tag equals the album title (show name)
# 3. the title tag looks like a file name w/ .mp3 extension
# 4. config forces it
if tags[:title].nil? ||
tags[:title] == def_album ||
(tags[:title].length.positive? && tags[:title].downcase.include?('mp3')) ||
rewrite_title
date = pub_date.strftime('%Y%m%d')
old_t = tags[:title] || ''
tags[:title] = "#{date} #{def_album}"
SouvlakiRS.logger.warn "Title ('#{old_t}') will be overwritten to '#{tags[:title]}'"
elsif tags[:title] && tags[:title].downcase.start_with?(def_album.downcase)
# title starts with program name - remove it to be less wordy and clean up leading -, :, or ws
tags[:title] = tags[:title][def_album.length..].gsub(/^[\sfor\-:]*/, '')
SouvlakiRS.logger.info "Trimmed title: '#{tags[:title]}'"
end
# override artist & album (program name) to our consistent one
tags[:artist] = def_artist
tags[:album] = def_album
# and set year because, why not
tags[:year] = pub_date.strftime('%Y').to_i
audio_file_write_tags(file, tags) if write_tags
tags
end
#
# tries to retag a user's file imported manually
def self.retag_user_file(file, tags, def_album, def_artist = nil)
# if there's no title set, do nothing. Return nil to indicate error
if tags[:title].nil?
SouvlakiRS.logger.error "No title tag set for #{file}"
return nil
end
# if the title looks like a filename, remove the extension
if tags[:title].downcase.end_with?('.mp3')
SouvlakiRS.logger.warn "Title tag looks like a filename (#{file}) - removing extension from tag"
tags[:title] = tags[:title][0..-4]
end
# replace artist if specified
tags[:artist] = def_artist if def_artist
# force album (program name or type)
tags[:album] = def_album
audio_file_write_tags(file, tags)
tags
end
#
# read tags from a file
def self.audio_file_read_tags(filepath)
tags = { title: nil, artist: nil, album: nil, year: nil }
TagLib::MPEG::File.open(filepath) do |file|
# Read basic attributes
id3v2 = file.id3v2_tag
if id3v2
SouvlakiRS.logger.info "ID3V2 title '#{id3v2.title}'"
tags[:title] = copy_tag(id3v2.title)
tags[:artist] = copy_tag(id3v2.artist)
tags[:album] = copy_tag(id3v2.album)
tags[:year] = id3v2.year if id3v2.year != 0
end
if tags[:title].nil? || tags[:artist].nil?
id3v1 = file.id3v1_tag
if id3v1
SouvlakiRS.logger.info "ID3V1 title '#{id3v1.title}'"
tags[:title] = copy_tag(id3v1.title) if tags[:title].nil?
tags[:artist] = copy_tag(id3v1.artist) if tags[:artist].nil?
end
end
tags[:length] = file.audio_properties.length if file.audio_properties
return tags
end
tags
end
# --------------------------------------------------------
def self.copy_tag(tag)
if tag
new_t = tag.strip
return new_t if new_t.length.positive?
end
nil
end
#
# tag a file
def self.audio_file_write_tags(filepath, tags)
status = TagLib::MPEG::File.open(filepath) do |file|
[file.id3v1_tag, file.id3v2_tag].each do |tag|
# Write basic attributes
tag.album = tags[:album]
tag.artist = tags[:artist] unless tags[:artist].nil?
tag.title = tags[:title] unless tags[:title].nil?
tag.year = tags[:year] unless tags[:year].nil?
end
file.save
end
return true if status
SouvlakiRS.logger.error "Failed to save tags for #{filepath}"
false
end
end
end
chore: comment out unsued fn
# frozen_string_literal: true
require 'taglib'
module SouvlakiRS
module Tag
#
# retag a file fetched from web
def self.retag_file(file, def_album, def_artist, pub_date, write_tags, rewrite_title = true)
tags = audio_file_read_tags(file)
# prep the title - prepend the date to the album (show name) when
# 1. there's no title tag
# 2. the title tag equals the album title (show name)
# 3. the title tag looks like a file name w/ .mp3 extension
# 4. config forces it
if tags[:title].nil? ||
tags[:title] == def_album ||
(tags[:title].length.positive? && tags[:title].downcase.include?('mp3')) ||
rewrite_title
date = pub_date.strftime('%Y%m%d')
old_t = tags[:title] || ''
tags[:title] = "#{date} #{def_album}"
SouvlakiRS.logger.warn "Title ('#{old_t}') will be overwritten to '#{tags[:title]}'"
elsif tags[:title] && tags[:title].downcase.start_with?(def_album.downcase)
# title starts with program name - remove it to be less wordy and clean up leading -, :, or ws
tags[:title] = tags[:title][def_album.length..].gsub(/^[\sfor\-:]*/, '')
SouvlakiRS.logger.info "Trimmed title: '#{tags[:title]}'"
end
# override artist & album (program name) to our consistent one
tags[:artist] = def_artist
tags[:album] = def_album
# and set year because, why not
tags[:year] = pub_date.strftime('%Y').to_i
audio_file_write_tags(file, tags) if write_tags
tags
end
#
# tries to retag a user's file imported manually
# def self.retag_user_file(file, tags, def_album, def_artist = nil)
# # if there's no title set, do nothing. Return nil to indicate error
# if tags[:title].nil?
# SouvlakiRS.logger.error "No title tag set for #{file}"
# return nil
# end
# # if the title looks like a filename, remove the extension
# if tags[:title].downcase.end_with?('.mp3')
# SouvlakiRS.logger.warn "Title tag looks like a filename (#{file}) - removing extension from tag"
# tags[:title] = tags[:title][0..-4]
# end
# # replace artist if specified
# tags[:artist] = def_artist if def_artist
# # force album (program name or type)
# tags[:album] = def_album
# audio_file_write_tags(file, tags)
# tags
# end
#
# read tags from a file
def self.audio_file_read_tags(filepath)
tags = { title: nil, artist: nil, album: nil, year: nil }
TagLib::MPEG::File.open(filepath) do |file|
# Read basic attributes
id3v2 = file.id3v2_tag
if id3v2
SouvlakiRS.logger.info "ID3V2 title '#{id3v2.title}'"
tags[:title] = copy_tag(id3v2.title)
tags[:artist] = copy_tag(id3v2.artist)
tags[:album] = copy_tag(id3v2.album)
tags[:year] = id3v2.year if id3v2.year != 0
end
if tags[:title].nil? || tags[:artist].nil?
id3v1 = file.id3v1_tag
if id3v1
SouvlakiRS.logger.info "ID3V1 title '#{id3v1.title}'"
tags[:title] = copy_tag(id3v1.title) if tags[:title].nil?
tags[:artist] = copy_tag(id3v1.artist) if tags[:artist].nil?
end
end
tags[:length] = file.audio_properties.length if file.audio_properties
return tags
end
tags
end
# --------------------------------------------------------
def self.copy_tag(tag)
if tag
new_t = tag.strip
return new_t if new_t.length.positive?
end
nil
end
#
# tag a file
def self.audio_file_write_tags(filepath, tags)
status = TagLib::MPEG::File.open(filepath) do |file|
[file.id3v1_tag, file.id3v2_tag].each do |tag|
# Write basic attributes
tag.album = tags[:album]
tag.artist = tags[:artist] unless tags[:artist].nil?
tag.title = tags[:title] unless tags[:title].nil?
tag.year = tags[:year] unless tags[:year].nil?
end
file.save
end
return true if status
SouvlakiRS.logger.error "Failed to save tags for #{filepath}"
false
end
end
end
|
# frozen_string_literal: true
##
# Set the version (needed for Mercenary -v)
module Spaarti
VERSION = '1.0.4'.freeze
end
update version
# frozen_string_literal: true
##
# Set the version (needed for Mercenary -v)
module Spaarti
VERSION = '1.0.5'.freeze
end
|
require 'spiderfw/env'
require 'rubygems'
require 'find'
require 'fileutils'
require 'pathname'
require 'spiderfw/autoload'
require 'spiderfw/requires'
require 'spiderfw/version'
module Spider
@apps = {}; @apps_by_path = {}; @apps_by_short_name = {}; @loaded_apps = {}
@paths = {}
@resource_types = {}
@spawner = nil
class << self
# Everything here must be thread safe!!!
# An instance of the shared logger.
attr_reader :logger
# An hash of registered Spider::App, indexed by name.
attr_reader :apps
# An hash of registred Spider::App modules, indexed by path.
attr_reader :apps_by_path
# An hash of registred Spider::App modules, indexed by short name (name without namespace).
attr_reader :apps_by_short_name
# The current runmode (test, devel or production).
attr_reader :runmode
# An hash of runtime paths.
# :root:: The base runtime path.
# :apps:: Apps folder.
# :core_apps:: Spider apps folder.
# :config:: Config folder.
# :views:: Runtime views folder.
# :var:: Var folder. Must be writable. Contains cache, logs, and other files written by the server.
# :data:: Data folder. Holds static and dynamic files. Some subdirs may have to be writable.
# :certs:: Certificates folder.
# ::tmp:: Temp folder. Must be writable.
# ::log:: Log location.
attr_reader :paths
# Current Home
attr_reader :home
# Registered resource types
attr_reader :resource_types
# Main site
attr_accessor :site
attr_accessor :spawner
# Initializes the runtime environment. This method is called when spider is required. Apps may implement
# an app_init method, that will be called after Spider::init is done.
def init(force=false)
return if @init_done && !force
init_base(force)
start_loggers
# @controller = Controller
@paths[:spider] = $SPIDER_PATH
if ($SPIDER_CONFIG_SETS)
$SPIDER_CONFIG_SETS.each{ |set| @configuration.include_set(set) }
end
init_file = File.join($SPIDER_RUN_PATH, 'init.rb')
ENV['BUNDLE_GEMFILE'] ||= File.join($SPIDER_RUN_PATH, 'Gemfile')
require 'bundler/setup' if File.exists? ENV['BUNDLE_GEMFILE']
if File.exist?(init_file)
@home.instance_eval(File.read(init_file), init_file)
end
@apps.each do |name, mod|
mod.app_init if mod.respond_to?(:app_init)
end
GetText::LocalePath.memoize_clear # since new paths have been added to GetText
@apps.each do |name, mod|
if File.directory?(File.join(mod.path, 'po'))
GetText.bindtextdomain(mod.short_name)
end
end
@init_done=true
end
def init_done?
@init_done
end
def init_base(force=false)
return if @init_base_done && !force
@apps_to_load = []
@root = $SPIDER_RUN_PATH
@home = Home.new(@root)
require 'spiderfw/config/options/spider.rb'
setup_paths(@root)
all_apps = find_all_apps
all_apps.each do |path|
load_configuration(File.join(path, 'config'))
end
@runmode = nil
self.runmode = $SPIDER_RUNMODE if $SPIDER_RUNMODE
load_configuration File.join($SPIDER_PATH, 'config')
load_configuration File.join(@root, 'config')
Locale.default = Spider.conf.get('i18n.default_locale')
@init_base_done = true
end
#
# def stop
# @apps.each do |name, mod|
# mod.app_stop if mod.respond_to?(:app_stop)
# end
# end
# Invoked before a server is started. Apps may implement the app_startup method, that will be called.
def startup
unless File.exists?(File.join(Spider.paths[:root], 'init.rb'))
raise "The server must be started from the root directory"
end
FileUtils.mkdir_p(Spider.paths[:tmp])
FileUtils.mkdir_p(Spider.paths[:var])
if Spider.conf.get('template.cache.reload_on_restart')
FileUtils.touch("#{Spider.paths[:tmp]}/templates_reload.txt")
end
unless Spider.runmode == 'test'
if domain = Spider.conf.get('site.domain')
ssl_port = Spider.conf.get('site.ssl') ? Spider.conf.get('site.ssl_port') : nil
Spider.site = Site.new(domain, Spider.conf.get('site.port'), ssl_port)
elsif File.exists?(Site.cache_file)
Spider.site = Site.load_cache
end
end
if Spider.conf.get('request.mutex')
mutex_requests!
end
@apps.each do |name, mod|
mod.app_startup if mod.respond_to?(:app_startup)
end
@startup_done = true
end
def startup_done?
@startup_done
end
# Invoked when a server is shutdown. Apps may implement the app_shutdown method, that will be called.
def shutdown
return unless Thread.current == Thread.main
Debugger.post_mortem = false if Object.const_defined?(:Debugger) && Debugger.post_mortem?
@apps.each do |name, mod|
mod.app_shutdown if mod.respond_to?(:app_shutdown)
end
end
def current
Spider::Request.current
end
def request_started
@request_mutex.lock if (@request_mutex)
Spider::Request.current = {
:_start => Time.now
}
end
def request_finished
# Spider.logger.info("Done in #{(Time.now - Spider::Request.current[:_start])*1000}ms")
Spider::Request.reset_current
@request_mutex.unlock if (@request_mutex)
end
def mutex_requests!
@request_mutex = Mutex.new
end
def request_mutex
@request_mutex
end
def request_mutex=(val)
@request_mutex = val
end
# Closes any open loggers, and opens new ones based on configured settings.
def start_loggers(force=false)
return if @logger && !force
@logger ||= Spider::Logger
@logger.close_all
@logger.open(STDERR, Spider.conf.get('log.console')) if Spider.conf.get('log.console')
begin
FileUtils.mkdir(@paths[:log]) unless File.exist?(@paths[:log])
rescue => exc
@logger.error("Unable to create log folder") if File.exist?(File.dirname(@paths[:log]))
end
if @paths[:log] && File.exist?(@paths[:log])
@logger.open(File.join(@paths[:log], 'error.log'), :ERROR) if Spider.conf.get('log.errors')
if Spider.conf.get('log.level')
@logger.open(File.join(@paths[:log], Spider.conf.get('log.file_name')), Spider.conf.get('log.level'))
end
end
if RUBY_PLATFORM =~ /java/ && Spider.conf.get('log.apache_commons')
begin
require 'spiderfw/utils/loggers/apache_commons_logger'
l = Spider::Loggers::ApacheCommonsLogger.new
@logger.add('apache_commons_logger', l)
rescue NameError
$stderr << "Warning: Unable to load Java class org.apache.commons.logging.LogFactory\n"
end
end
$LOG = @logger
Object.const_set(:LOGGER, @logger)
end
def start_loggers!
start_loggers(false)
end
# Sets the default paths (see #paths).
def setup_paths(root)
@paths[:root] = root
@paths[:apps] = File.join(root, 'apps')
@paths[:core_apps] = File.join($SPIDER_PATH, 'apps')
@paths[:config] = File.join(root, 'config')
@paths[:layouts] = File.join(root, 'layouts')
@paths[:var] = File.join(root, 'var')
@paths[:certs] = File.join(@paths[:config], 'certs')
@paths[:tmp] = File.join(root, 'tmp')
@paths[:data] = File.join(root, 'data')
@paths[:log] = File.join(@paths[:var], 'log')
end
# Finds an app by name, looking in paths[:apps] and paths[:core_apps]. Returns the found path.
def find_app(name)
path = nil
[@paths[:apps], @paths[:core_apps]].each do |base|
test = File.join(base, name)
if File.exist?(File.join(test, '_init.rb'))
path = test
break
end
end
return path
end
def find_apps(name)
[@paths[:apps], @paths[:core_apps]].each do |base|
test = File.join(base, name)
if File.exist?(test)
return find_apps_in_folder(test)
end
end
end
def load_app(name)
paths = find_apps(name)
paths.each do |path|
load_app_at_path(path)
end
end
def load_app_at_path(path)
return if @loaded_apps[path]
relative_path = path
if path.index(Spider.paths[:root])
home = Pathname.new(Spider.paths[:root])
pname = Pathname.new(path)
relative_path = pname.relative_path_from(home).to_s
end
@loaded_apps[path] = true
last_name = File.basename(path)
app_files = ['_init.rb', last_name+'.rb', 'cmd.rb']
app_files.each{ |f| require File.join(relative_path, f) if File.exist?(File.join(path, f)) }
GetText::LocalePath.add_default_rule(File.join(path, "data/locale/%{lang}/LC_MESSAGES/%{name}.mo"))
end
def load_apps(*l)
l.each do |app|
load_app(app)
end
end
def load_all_apps
find_all_apps.each do |path|
load_app_at_path(path)
end
end
def find_all_apps
app_paths = []
Find.find(@paths[:core_apps], @paths[:apps]) do |path|
if (File.basename(path) == '_init.rb')
app_paths << File.dirname(path)
Find.prune
elsif File.exist?(File.join(path, '_init.rb'))
app_paths << path
Find.prune
end
end
return app_paths
end
def find_apps_in_folder(path)
return unless File.directory?(path)
return [path] if File.exist?(File.join(path, '_init.rb'))
found = []
Dir.new(path).each do |f|
next if f[0].chr == '.'
found_path = File.join(path, f)
if File.exist?(File.join(found_path, '/_init.rb'))
found << found_path
else
found += find_apps_in_folder(found_path)
end
end
return found
end
def add_app(mod)
@apps[mod.name] = mod
@apps_by_path[mod.relative_path] = mod
@apps_by_short_name[mod.short_name] = mod
end
def app?(path_or_name)
return true if @apps_by_path[path_or_name]
return true if @apps_by_short_name[path_or_name]
return false
end
def load_configuration(path)
return unless File.directory?(path)
opts = File.join(path, 'options.rb')
require opts if File.exist?(opts)
Dir.new(path).each do |f|
f.untaint # FIXME: security parse
case f
when /^\./
next
when /\.(yaml|yml)$/
begin
@configuration.load_yaml(File.join(path, f))
rescue ConfigurationException => exc
if (exc.type == :yaml)
err = "Configuration file #{path+f} is not valid YAML"
if @logger
@logger.error(err)
else
puts err
end
else
raise
end
end
end
end
end
# Returns the default controller.
def controller
require 'spiderfw/controller/spider_controller'
SpiderController
end
# Sets routes on the #controller for the given apps.
def route_apps(*apps)
options = {}
if apps[-1].is_a?(Hash)
options = apps.pop
end
@route_apps = apps.empty? ? true : apps
if (@route_apps)
apps_to_route = @route_apps == true ? self.apps.values : @route_apps.map{ |name| self.apps[name] }
end
if options[:except]
apps_to_route.reject{ |app| options[:except].include?(app) }
end
if (apps_to_route)
apps_to_route.each{ |app| @home.controller.route_app(app) }
end
end
# Adds a resource type
# name must be a symbol, extensions an array of extensions (strings, without the dot) for this resource.
# Options may be:
# :extensions an array of possible extensions. If given, find_resource will try appending the extensions
# when looking for the file.
# :path the path of the resource relative to resource root; if not given, name will be used.
#
def register_resource_type(name, options={})
@resource_types[name] = {
:extensions => options[:extensions],
:path => options[:path] || name.to_s
}
end
Spider.register_resource_type(:views, :extensions => ['shtml'])
def path
$SPIDER_PATH
end
def relative_path
'/spider'
end
# Returns the full path of a resource.
# resource_type may be :views, or any other type registered with #register_resource_type
# path is the path of the resource, relative to the resource folder
# cur_path, if provided, is the current working path
# owner_class, if provided, must respond to *app*
#
# Will look for the resource in the runtime root first, than in the
# app's :"#{resource_type}_path", and finally in the spider folder.
def find_resource(resource_type, path, cur_path=nil, owner_classes=nil, search_paths=[])
owner_classes = [owner_classes] unless owner_classes.is_a?(Enumerable)
# FIXME: security check for allowed paths?
def first_found(extensions, path)
extensions.each do |ext|
full = path
full += '.'+ext if ext
return full if (File.exist?(full))
end
return nil
end
search_paths ||= []
owner_classes.each do |owner_class| # FIXME: refactor
next if owner_class.is_a?(Spider::Home) # home is already checked for other owner_classes
# FIXME: maybe it shouldn't get here?
owner_class = nil if owner_class == NilClass
resource_config = @resource_types[resource_type]
raise "Unknown resource type #{resource_type}" unless resource_config
resource_rel_path = resource_config[:path]
extensions = [nil] + resource_config[:extensions]
path.strip!
if (path[0..3] == 'ROOT')
path.sub!(/^ROOT/, Spider.paths[:root])
return Resource.new(path, @home)
elsif (path[0..5] == 'SPIDER')
path.sub!(/^SPIDER/, $SPIDER_PATH)
return Resource.new(path, self)
elsif (cur_path)
if (path[0..1] == './')
return Resource.new(first_found(extensions, cur_path+path[1..-1]), owner_class)
elsif (path[0..1] == '../')
return Resource.new(first_found(extensions, File.dirname(cur_path)+path[2..-1]), owner_class)
end
end
app = nil
path_app = nil
if (path[0].chr == '/')
first_part = path[1..-1].split('/')[0]
Spider.apps_by_path.each do |p, a|
if path.index(p+'/') == 1 # FIXME: might not be correct
#if first_part == p
path_app = a
path = path[p.length+2..-1]
break
end
end
app = path_app
elsif owner_class <= Spider::App
app = owner_class
else
app = owner_class.app if (owner_class && owner_class.app)
end
return Resource.new(cur_path+'/'+path, owner_class) if cur_path && File.exist?(cur_path+'/'+path) # !app
raise "Can't find owner app for resource #{path}" unless app
search_locations = resource_search_locations(resource_type, app)
search_paths.each do |p|
p = [p, owner_class] unless p.is_a?(Array)
search_locations << p
end
search_locations.each do |p|
found = first_found(extensions, p[0]+'/'+path)
next if found == cur_path
definer = path_app || p[1]
return Resource.new(found, definer) if found
end
end
return Resource.new(path)
end
def resource_search_locations(resource_type, app=nil)
resource_config = @resource_types[resource_type]
resource_rel_path = resource_config[:path]
app_rel_path = app && app.respond_to?(:relative_path) ? app.relative_path : nil
search_locations = []
unless Spider.conf.get('resources.disable_custom')
root_search = File.join(Spider.paths[:root], resource_rel_path)
root_search = File.join(root_search, app_rel_path) if app_rel_path
# unless cur_path && cur_path == File.join(root_search, path)
search_locations = [[root_search, @home]]
# end
end
if app
if app.respond_to?("#{resource_type}_path")
search_locations << [app.send("#{resource_type}_path"), app]
else
search_locations << [File.join(app.path, resource_rel_path), app]
end
if Spider.runmode == 'test'
search_locations << [File.join(app.path, 'test', resource_rel_path), app]
end
end
spider_path = File.join($SPIDER_PATH, resource_rel_path)
search_locations << [spider_path, self]
search_locations
end
def list_resources(resource_type, owner_class=nil, start=nil, search_paths = [])
app = nil
if owner_class <= Spider::App
app = owner_class
else
app = owner_class.app if (owner_class && owner_class.app)
end
search_locations = resource_search_locations(resource_type, app)
resource_config = @resource_types[resource_type]
extensions = resource_config[:extensions]
search_paths.each do |p|
p = [p, owner_class] unless p.is_a?(Array)
search_locations << p
end
res = []
search_locations.reverse.each do |p|
pname = Pathname.new(p[0])
base = p[0]
base = File.join(base, start) if start
extensions.each do |ext|
Dir.glob(File.join(base, "*.#{ext}")).each do |f|
res << (Pathname.new(f).relative_path_from(pname)).to_s
end
end
end
res.uniq
end
def find_resource_path(resource_type, path, cur_path=nil, owner_classes=nil, search_paths=[])
res = find_resource(resource_type, path, cur_path, owner_classes, search_paths)
return res ? res.path : nil
end
# Source file management
def sources_in_dir(path)
loaded = []
$".each do |file|
basename = File.basename(file)
next if (basename == 'spider.rb' || basename == 'options.rb')
if (file[0..path.length-1] == path)
loaded.push(file)
else
$:.each do |dir|
file_path = File.join(dir, file)
if (file_path =~ /^#{path}/) # FileTest.exists?(file_path) &&
loaded.push(file_path)
end
end
end
end
return loaded
end
def reload_sources_in_dir(dir)
self.sources_in_dir(dir).each do |file|
load(file)
end
end
def reload_sources
logger.debug("Reloading sources")
crit = Thread.critical
Thread.critical = true
$".each do |file|
if file =~ /^(#{$SPIDER_RUN_PATH}|apps)/
# logger.debug("RELOADING #{file}")
load(file)
else
# logger.debug("SKIPPING #{file}")
end
end
Thread.critical = crit
end
def respawn!
# TODO
raise "Unimplemented"
Spider.logger.info("Respawning")
@spawner.write('spawn')
@spawner.close
Process.kill "KILL", Process.pid
end
def runmode=(mode)
raise "Can't change runmode" if @runmode
@runmode = mode
@configuration.include_set(mode)
if Spider.conf.get('debugger.start') || File.exists?(File.join($SPIDER_RUN_PATH,'tmp', 'debug.txt'))
init_debug
end
Spider.paths[:var] = File.join(Spider.paths[:var], mode) if mode != 'production'
Bundler.require(:default, @runmode.to_sym) if defined?(Bundler)
end
def init_debug
begin
require 'ruby-debug'
if File.exists?(File.join($SPIDER_RUN_PATH,'tmp', 'debug.txt'))
Debugger.wait_connection = true
Debugger.start_remote
File.delete(File.join($SPIDER_RUN_PATH,'tmp', 'debug.txt'))
else
Debugger.start
end
rescue LoadError, RuntimeError => exc
Spider.logger.warn(exc.message)
end
end
def locale
Locale.current[0]
end
def i18n(l = self.locale)
Spider::I18n.provider(l)
end
def test_setup
end
def test_teardown
end
def _test_setup
@apps.each do |name, mod|
mod.test_setup if mod.respond_to?(:test_setup)
end
end
def _test_teardown
@apps.each do |name, mod|
mod.test_teardown if mod.respond_to?(:test_teardown)
end
end
end
end
Better error message when ruby-debug can't be loaded
require 'spiderfw/env'
require 'rubygems'
require 'find'
require 'fileutils'
require 'pathname'
require 'spiderfw/autoload'
require 'spiderfw/requires'
require 'spiderfw/version'
module Spider
@apps = {}; @apps_by_path = {}; @apps_by_short_name = {}; @loaded_apps = {}
@paths = {}
@resource_types = {}
@spawner = nil
class << self
# Everything here must be thread safe!!!
# An instance of the shared logger.
attr_reader :logger
# An hash of registered Spider::App, indexed by name.
attr_reader :apps
# An hash of registred Spider::App modules, indexed by path.
attr_reader :apps_by_path
# An hash of registred Spider::App modules, indexed by short name (name without namespace).
attr_reader :apps_by_short_name
# The current runmode (test, devel or production).
attr_reader :runmode
# An hash of runtime paths.
# :root:: The base runtime path.
# :apps:: Apps folder.
# :core_apps:: Spider apps folder.
# :config:: Config folder.
# :views:: Runtime views folder.
# :var:: Var folder. Must be writable. Contains cache, logs, and other files written by the server.
# :data:: Data folder. Holds static and dynamic files. Some subdirs may have to be writable.
# :certs:: Certificates folder.
# ::tmp:: Temp folder. Must be writable.
# ::log:: Log location.
attr_reader :paths
# Current Home
attr_reader :home
# Registered resource types
attr_reader :resource_types
# Main site
attr_accessor :site
attr_accessor :spawner
# Initializes the runtime environment. This method is called when spider is required. Apps may implement
# an app_init method, that will be called after Spider::init is done.
def init(force=false)
return if @init_done && !force
init_base(force)
start_loggers
# @controller = Controller
@paths[:spider] = $SPIDER_PATH
if ($SPIDER_CONFIG_SETS)
$SPIDER_CONFIG_SETS.each{ |set| @configuration.include_set(set) }
end
init_file = File.join($SPIDER_RUN_PATH, 'init.rb')
ENV['BUNDLE_GEMFILE'] ||= File.join($SPIDER_RUN_PATH, 'Gemfile')
require 'bundler/setup' if File.exists? ENV['BUNDLE_GEMFILE']
if File.exist?(init_file)
@home.instance_eval(File.read(init_file), init_file)
end
@apps.each do |name, mod|
mod.app_init if mod.respond_to?(:app_init)
end
GetText::LocalePath.memoize_clear # since new paths have been added to GetText
@apps.each do |name, mod|
if File.directory?(File.join(mod.path, 'po'))
GetText.bindtextdomain(mod.short_name)
end
end
@init_done=true
end
def init_done?
@init_done
end
def init_base(force=false)
return if @init_base_done && !force
@apps_to_load = []
@root = $SPIDER_RUN_PATH
@home = Home.new(@root)
require 'spiderfw/config/options/spider.rb'
setup_paths(@root)
all_apps = find_all_apps
all_apps.each do |path|
load_configuration(File.join(path, 'config'))
end
@runmode = nil
self.runmode = $SPIDER_RUNMODE if $SPIDER_RUNMODE
load_configuration File.join($SPIDER_PATH, 'config')
load_configuration File.join(@root, 'config')
Locale.default = Spider.conf.get('i18n.default_locale')
@init_base_done = true
end
#
# def stop
# @apps.each do |name, mod|
# mod.app_stop if mod.respond_to?(:app_stop)
# end
# end
# Invoked before a server is started. Apps may implement the app_startup method, that will be called.
def startup
unless File.exists?(File.join(Spider.paths[:root], 'init.rb'))
raise "The server must be started from the root directory"
end
FileUtils.mkdir_p(Spider.paths[:tmp])
FileUtils.mkdir_p(Spider.paths[:var])
if Spider.conf.get('template.cache.reload_on_restart')
FileUtils.touch("#{Spider.paths[:tmp]}/templates_reload.txt")
end
unless Spider.runmode == 'test'
if domain = Spider.conf.get('site.domain')
ssl_port = Spider.conf.get('site.ssl') ? Spider.conf.get('site.ssl_port') : nil
Spider.site = Site.new(domain, Spider.conf.get('site.port'), ssl_port)
elsif File.exists?(Site.cache_file)
Spider.site = Site.load_cache
end
end
if Spider.conf.get('request.mutex')
mutex_requests!
end
@apps.each do |name, mod|
mod.app_startup if mod.respond_to?(:app_startup)
end
@startup_done = true
end
def startup_done?
@startup_done
end
# Invoked when a server is shutdown. Apps may implement the app_shutdown method, that will be called.
def shutdown
return unless Thread.current == Thread.main
Debugger.post_mortem = false if Object.const_defined?(:Debugger) && Debugger.post_mortem?
@apps.each do |name, mod|
mod.app_shutdown if mod.respond_to?(:app_shutdown)
end
end
def current
Spider::Request.current
end
def request_started
@request_mutex.lock if (@request_mutex)
Spider::Request.current = {
:_start => Time.now
}
end
def request_finished
# Spider.logger.info("Done in #{(Time.now - Spider::Request.current[:_start])*1000}ms")
Spider::Request.reset_current
@request_mutex.unlock if (@request_mutex)
end
def mutex_requests!
@request_mutex = Mutex.new
end
def request_mutex
@request_mutex
end
def request_mutex=(val)
@request_mutex = val
end
# Closes any open loggers, and opens new ones based on configured settings.
def start_loggers(force=false)
return if @logger && !force
@logger ||= Spider::Logger
@logger.close_all
@logger.open(STDERR, Spider.conf.get('log.console')) if Spider.conf.get('log.console')
begin
FileUtils.mkdir(@paths[:log]) unless File.exist?(@paths[:log])
rescue => exc
@logger.error("Unable to create log folder") if File.exist?(File.dirname(@paths[:log]))
end
if @paths[:log] && File.exist?(@paths[:log])
@logger.open(File.join(@paths[:log], 'error.log'), :ERROR) if Spider.conf.get('log.errors')
if Spider.conf.get('log.level')
@logger.open(File.join(@paths[:log], Spider.conf.get('log.file_name')), Spider.conf.get('log.level'))
end
end
if RUBY_PLATFORM =~ /java/ && Spider.conf.get('log.apache_commons')
begin
require 'spiderfw/utils/loggers/apache_commons_logger'
l = Spider::Loggers::ApacheCommonsLogger.new
@logger.add('apache_commons_logger', l)
rescue NameError
$stderr << "Warning: Unable to load Java class org.apache.commons.logging.LogFactory\n"
end
end
$LOG = @logger
Object.const_set(:LOGGER, @logger)
end
def start_loggers!
start_loggers(false)
end
# Sets the default paths (see #paths).
def setup_paths(root)
@paths[:root] = root
@paths[:apps] = File.join(root, 'apps')
@paths[:core_apps] = File.join($SPIDER_PATH, 'apps')
@paths[:config] = File.join(root, 'config')
@paths[:layouts] = File.join(root, 'layouts')
@paths[:var] = File.join(root, 'var')
@paths[:certs] = File.join(@paths[:config], 'certs')
@paths[:tmp] = File.join(root, 'tmp')
@paths[:data] = File.join(root, 'data')
@paths[:log] = File.join(@paths[:var], 'log')
end
# Finds an app by name, looking in paths[:apps] and paths[:core_apps]. Returns the found path.
def find_app(name)
path = nil
[@paths[:apps], @paths[:core_apps]].each do |base|
test = File.join(base, name)
if File.exist?(File.join(test, '_init.rb'))
path = test
break
end
end
return path
end
def find_apps(name)
[@paths[:apps], @paths[:core_apps]].each do |base|
test = File.join(base, name)
if File.exist?(test)
return find_apps_in_folder(test)
end
end
end
def load_app(name)
paths = find_apps(name)
paths.each do |path|
load_app_at_path(path)
end
end
def load_app_at_path(path)
return if @loaded_apps[path]
relative_path = path
if path.index(Spider.paths[:root])
home = Pathname.new(Spider.paths[:root])
pname = Pathname.new(path)
relative_path = pname.relative_path_from(home).to_s
end
@loaded_apps[path] = true
last_name = File.basename(path)
app_files = ['_init.rb', last_name+'.rb', 'cmd.rb']
app_files.each{ |f| require File.join(relative_path, f) if File.exist?(File.join(path, f)) }
GetText::LocalePath.add_default_rule(File.join(path, "data/locale/%{lang}/LC_MESSAGES/%{name}.mo"))
end
def load_apps(*l)
l.each do |app|
load_app(app)
end
end
def load_all_apps
find_all_apps.each do |path|
load_app_at_path(path)
end
end
def find_all_apps
app_paths = []
Find.find(@paths[:core_apps], @paths[:apps]) do |path|
if (File.basename(path) == '_init.rb')
app_paths << File.dirname(path)
Find.prune
elsif File.exist?(File.join(path, '_init.rb'))
app_paths << path
Find.prune
end
end
return app_paths
end
def find_apps_in_folder(path)
return unless File.directory?(path)
return [path] if File.exist?(File.join(path, '_init.rb'))
found = []
Dir.new(path).each do |f|
next if f[0].chr == '.'
found_path = File.join(path, f)
if File.exist?(File.join(found_path, '/_init.rb'))
found << found_path
else
found += find_apps_in_folder(found_path)
end
end
return found
end
def add_app(mod)
@apps[mod.name] = mod
@apps_by_path[mod.relative_path] = mod
@apps_by_short_name[mod.short_name] = mod
end
def app?(path_or_name)
return true if @apps_by_path[path_or_name]
return true if @apps_by_short_name[path_or_name]
return false
end
def load_configuration(path)
return unless File.directory?(path)
opts = File.join(path, 'options.rb')
require opts if File.exist?(opts)
Dir.new(path).each do |f|
f.untaint # FIXME: security parse
case f
when /^\./
next
when /\.(yaml|yml)$/
begin
@configuration.load_yaml(File.join(path, f))
rescue ConfigurationException => exc
if (exc.type == :yaml)
err = "Configuration file #{path+f} is not valid YAML"
if @logger
@logger.error(err)
else
puts err
end
else
raise
end
end
end
end
end
# Returns the default controller.
def controller
require 'spiderfw/controller/spider_controller'
SpiderController
end
# Sets routes on the #controller for the given apps.
def route_apps(*apps)
options = {}
if apps[-1].is_a?(Hash)
options = apps.pop
end
@route_apps = apps.empty? ? true : apps
if (@route_apps)
apps_to_route = @route_apps == true ? self.apps.values : @route_apps.map{ |name| self.apps[name] }
end
if options[:except]
apps_to_route.reject{ |app| options[:except].include?(app) }
end
if (apps_to_route)
apps_to_route.each{ |app| @home.controller.route_app(app) }
end
end
# Adds a resource type
# name must be a symbol, extensions an array of extensions (strings, without the dot) for this resource.
# Options may be:
# :extensions an array of possible extensions. If given, find_resource will try appending the extensions
# when looking for the file.
# :path the path of the resource relative to resource root; if not given, name will be used.
#
def register_resource_type(name, options={})
@resource_types[name] = {
:extensions => options[:extensions],
:path => options[:path] || name.to_s
}
end
Spider.register_resource_type(:views, :extensions => ['shtml'])
def path
$SPIDER_PATH
end
def relative_path
'/spider'
end
# Returns the full path of a resource.
# resource_type may be :views, or any other type registered with #register_resource_type
# path is the path of the resource, relative to the resource folder
# cur_path, if provided, is the current working path
# owner_class, if provided, must respond to *app*
#
# Will look for the resource in the runtime root first, than in the
# app's :"#{resource_type}_path", and finally in the spider folder.
def find_resource(resource_type, path, cur_path=nil, owner_classes=nil, search_paths=[])
owner_classes = [owner_classes] unless owner_classes.is_a?(Enumerable)
# FIXME: security check for allowed paths?
def first_found(extensions, path)
extensions.each do |ext|
full = path
full += '.'+ext if ext
return full if (File.exist?(full))
end
return nil
end
search_paths ||= []
owner_classes.each do |owner_class| # FIXME: refactor
next if owner_class.is_a?(Spider::Home) # home is already checked for other owner_classes
# FIXME: maybe it shouldn't get here?
owner_class = nil if owner_class == NilClass
resource_config = @resource_types[resource_type]
raise "Unknown resource type #{resource_type}" unless resource_config
resource_rel_path = resource_config[:path]
extensions = [nil] + resource_config[:extensions]
path.strip!
if (path[0..3] == 'ROOT')
path.sub!(/^ROOT/, Spider.paths[:root])
return Resource.new(path, @home)
elsif (path[0..5] == 'SPIDER')
path.sub!(/^SPIDER/, $SPIDER_PATH)
return Resource.new(path, self)
elsif (cur_path)
if (path[0..1] == './')
return Resource.new(first_found(extensions, cur_path+path[1..-1]), owner_class)
elsif (path[0..1] == '../')
return Resource.new(first_found(extensions, File.dirname(cur_path)+path[2..-1]), owner_class)
end
end
app = nil
path_app = nil
if (path[0].chr == '/')
first_part = path[1..-1].split('/')[0]
Spider.apps_by_path.each do |p, a|
if path.index(p+'/') == 1 # FIXME: might not be correct
#if first_part == p
path_app = a
path = path[p.length+2..-1]
break
end
end
app = path_app
elsif owner_class <= Spider::App
app = owner_class
else
app = owner_class.app if (owner_class && owner_class.app)
end
return Resource.new(cur_path+'/'+path, owner_class) if cur_path && File.exist?(cur_path+'/'+path) # !app
raise "Can't find owner app for resource #{path}" unless app
search_locations = resource_search_locations(resource_type, app)
search_paths.each do |p|
p = [p, owner_class] unless p.is_a?(Array)
search_locations << p
end
search_locations.each do |p|
found = first_found(extensions, p[0]+'/'+path)
next if found == cur_path
definer = path_app || p[1]
return Resource.new(found, definer) if found
end
end
return Resource.new(path)
end
def resource_search_locations(resource_type, app=nil)
resource_config = @resource_types[resource_type]
resource_rel_path = resource_config[:path]
app_rel_path = app && app.respond_to?(:relative_path) ? app.relative_path : nil
search_locations = []
unless Spider.conf.get('resources.disable_custom')
root_search = File.join(Spider.paths[:root], resource_rel_path)
root_search = File.join(root_search, app_rel_path) if app_rel_path
# unless cur_path && cur_path == File.join(root_search, path)
search_locations = [[root_search, @home]]
# end
end
if app
if app.respond_to?("#{resource_type}_path")
search_locations << [app.send("#{resource_type}_path"), app]
else
search_locations << [File.join(app.path, resource_rel_path), app]
end
if Spider.runmode == 'test'
search_locations << [File.join(app.path, 'test', resource_rel_path), app]
end
end
spider_path = File.join($SPIDER_PATH, resource_rel_path)
search_locations << [spider_path, self]
search_locations
end
def list_resources(resource_type, owner_class=nil, start=nil, search_paths = [])
app = nil
if owner_class <= Spider::App
app = owner_class
else
app = owner_class.app if (owner_class && owner_class.app)
end
search_locations = resource_search_locations(resource_type, app)
resource_config = @resource_types[resource_type]
extensions = resource_config[:extensions]
search_paths.each do |p|
p = [p, owner_class] unless p.is_a?(Array)
search_locations << p
end
res = []
search_locations.reverse.each do |p|
pname = Pathname.new(p[0])
base = p[0]
base = File.join(base, start) if start
extensions.each do |ext|
Dir.glob(File.join(base, "*.#{ext}")).each do |f|
res << (Pathname.new(f).relative_path_from(pname)).to_s
end
end
end
res.uniq
end
def find_resource_path(resource_type, path, cur_path=nil, owner_classes=nil, search_paths=[])
res = find_resource(resource_type, path, cur_path, owner_classes, search_paths)
return res ? res.path : nil
end
# Source file management
def sources_in_dir(path)
loaded = []
$".each do |file|
basename = File.basename(file)
next if (basename == 'spider.rb' || basename == 'options.rb')
if (file[0..path.length-1] == path)
loaded.push(file)
else
$:.each do |dir|
file_path = File.join(dir, file)
if (file_path =~ /^#{path}/) # FileTest.exists?(file_path) &&
loaded.push(file_path)
end
end
end
end
return loaded
end
def reload_sources_in_dir(dir)
self.sources_in_dir(dir).each do |file|
load(file)
end
end
def reload_sources
logger.debug("Reloading sources")
crit = Thread.critical
Thread.critical = true
$".each do |file|
if file =~ /^(#{$SPIDER_RUN_PATH}|apps)/
# logger.debug("RELOADING #{file}")
load(file)
else
# logger.debug("SKIPPING #{file}")
end
end
Thread.critical = crit
end
def respawn!
# TODO
raise "Unimplemented"
Spider.logger.info("Respawning")
@spawner.write('spawn')
@spawner.close
Process.kill "KILL", Process.pid
end
def runmode=(mode)
raise "Can't change runmode" if @runmode
@runmode = mode
@configuration.include_set(mode)
if Spider.conf.get('debugger.start') || File.exists?(File.join($SPIDER_RUN_PATH,'tmp', 'debug.txt'))
init_debug
end
Spider.paths[:var] = File.join(Spider.paths[:var], mode) if mode != 'production'
Bundler.require(:default, @runmode.to_sym) if defined?(Bundler)
end
def init_debug
begin
require 'ruby-debug'
if File.exists?(File.join($SPIDER_RUN_PATH,'tmp', 'debug.txt'))
Debugger.wait_connection = true
Debugger.start_remote
File.delete(File.join($SPIDER_RUN_PATH,'tmp', 'debug.txt'))
else
Debugger.start
end
rescue LoadError, RuntimeError => exc
msg = _('Unable to start debugger. Ensure ruby-debug is installed (or set debugger.start to false).')
if Spider.logger
Spider.logger.warn(exc.message)
Spider.logger.warn(msg)
else
puts msg
end
end
end
def locale
Locale.current[0]
end
def i18n(l = self.locale)
Spider::I18n.provider(l)
end
def test_setup
end
def test_teardown
end
def _test_setup
@apps.each do |name, mod|
mod.test_setup if mod.respond_to?(:test_setup)
end
end
def _test_teardown
@apps.each do |name, mod|
mod.test_teardown if mod.respond_to?(:test_teardown)
end
end
end
end
|
module Spinach
# Spinach version.
VERSION = "0.6.0"
end
Version bump
module Spinach
# Spinach version.
VERSION = "0.6.1"
end
|
module Spinach
# Spinach version.
VERSION = "0.5.2"
end
Version bump
module Spinach
# Spinach version.
VERSION = "0.6.0"
end
|
require "spinning_cursor/cursor"
require "spinning_cursor/parser"
module SpinningCursor
extend self
#
# Sends passed block to Parser, and starts cursor thread
# It will execute the action block and kill the cursor
# thread if an action block is passed.
#
def start(&block)
if not @curs.nil?
if @curs.alive?
stop
end
end
@parsed = Parser.new(block)
@cursor = Cursor.new(@parsed.banner nil)
@curs = Thread.new { @cursor.spin(@parsed.type(nil), @parsed.delay(nil)) }
@start = @finish = @elapsed = nil
if @parsed.action.nil?
# record start time
do_exec_time
return
end
# The action
begin
do_exec_time do
@parsed.originator.instance_eval &@parsed.action
end
rescue Exception => e
set_message "#{e.message}\n#{e.backtrace.join("\n")}"
ensure
return stop
end
end
#
# Kills the cursor thread and prints the finished message
# Returns execution time
#
def stop
begin
@curs.kill
# Wait for the cursor to die -- can cause problems otherwise
while @curs.alive? ; end
# Set cursor to nil so set_banner method only works
# when cursor is actually running.
@cursor = nil
reset_line
puts (@parsed.message nil)
# Set parsed to nil so set_message method only works
# when cursor is actually running.
@parsed = nil
# Return execution time
get_exec_time
rescue NameError
raise CursorNotRunning.new "Can't stop, no cursor running."
end
end
#
# Determines whether the cursor thread is still running
#
def alive?
if @curs.nil?
return false
else
@curs.alive?
end
end
#
# Sets the finish message (to be used inside the action for
# non-deterministic output)
#
def set_message(msg)
begin
@parsed.message msg
rescue NameError
raise CursorNotRunning.new "Cursor isn't running... are you sure " +
"you're calling this from an action block?"
end
end
#
# Sets the banner message during execution
#
def set_banner(banner)
begin
@cursor.banner = banner
rescue NameError
raise CursorNotRunning.new "Cursor isn't running... are you sure " +
"you're calling this from an action block?"
end
end
#
# Retrieves execution time information
#
def get_exec_time
if not @start.nil?
if @finish.nil? && @curs.alive? == false
do_exec_time
end
return { :started => @start, :finished => @finish,
:elapsed => @elapsed }
else
raise NoTaskError.new "An execution hasn't started or finished."
end
end
private
#
# Takes a block, and returns the start, finish and elapsed times
#
def do_exec_time
if @curs.alive?
@start = Time.now
if block_given?
yield
@finish = Time.now
@elapsed = @finish - @start
end
else
@finish = Time.now
@elapsed = @finish - @start
end
end
class NoTaskError < Exception ; end
class CursorNotRunning < NoTaskError ; end
end
Improve conditional 'if'
'nil' evaluates 'false' for 'if' conditionals
require "spinning_cursor/cursor"
require "spinning_cursor/parser"
module SpinningCursor
extend self
#
# Sends passed block to Parser, and starts cursor thread
# It will execute the action block and kill the cursor
# thread if an action block is passed.
#
def start(&block)
if @curs
if @curs.alive?
stop
end
end
@parsed = Parser.new(block)
@cursor = Cursor.new(@parsed.banner nil)
@curs = Thread.new { @cursor.spin(@parsed.type(nil), @parsed.delay(nil)) }
@start = @finish = @elapsed = nil
if @parsed.action.nil?
# record start time
do_exec_time
return
end
# The action
begin
do_exec_time do
@parsed.originator.instance_eval &@parsed.action
end
rescue Exception => e
set_message "#{e.message}\n#{e.backtrace.join("\n")}"
ensure
return stop
end
end
#
# Kills the cursor thread and prints the finished message
# Returns execution time
#
def stop
begin
@curs.kill
# Wait for the cursor to die -- can cause problems otherwise
while @curs.alive? ; end
# Set cursor to nil so set_banner method only works
# when cursor is actually running.
@cursor = nil
reset_line
puts (@parsed.message nil)
# Set parsed to nil so set_message method only works
# when cursor is actually running.
@parsed = nil
# Return execution time
get_exec_time
rescue NameError
raise CursorNotRunning.new "Can't stop, no cursor running."
end
end
#
# Determines whether the cursor thread is still running
#
def alive?
if @curs.nil?
return false
else
@curs.alive?
end
end
#
# Sets the finish message (to be used inside the action for
# non-deterministic output)
#
def set_message(msg)
begin
@parsed.message msg
rescue NameError
raise CursorNotRunning.new "Cursor isn't running... are you sure " +
"you're calling this from an action block?"
end
end
#
# Sets the banner message during execution
#
def set_banner(banner)
begin
@cursor.banner = banner
rescue NameError
raise CursorNotRunning.new "Cursor isn't running... are you sure " +
"you're calling this from an action block?"
end
end
#
# Retrieves execution time information
#
def get_exec_time
if not @start.nil?
if @finish.nil? && @curs.alive? == false
do_exec_time
end
return { :started => @start, :finished => @finish,
:elapsed => @elapsed }
else
raise NoTaskError.new "An execution hasn't started or finished."
end
end
private
#
# Takes a block, and returns the start, finish and elapsed times
#
def do_exec_time
if @curs.alive?
@start = Time.now
if block_given?
yield
@finish = Time.now
@elapsed = @finish - @start
end
else
@finish = Time.now
@elapsed = @finish - @start
end
end
class NoTaskError < Exception ; end
class CursorNotRunning < NoTaskError ; end
end
|
require 'rubygems'
require 'bundler/setup'
require_relative 'aloader'
require_relative 'context'
Changed relative paths
require 'rubygems'
require 'bundler/setup'
require_relative 'splunk-sdk-ruby/aloader'
require_relative 'splunk-sdk-ruby/context'
|
# frozen_string_literal: true
require 'fileutils'
require 'sprockets/digest_utils'
module Sprockets
class Asset
attr_reader :logical_path
# Private: Intialize Asset wrapper from attributes Hash.
#
# Asset wrappers should not be initialized directly, only
# Environment#find_asset should vend them.
#
# attributes - Hash of ivars
#
# Returns Asset.
def initialize(attributes = {})
@attributes = attributes
@content_type = attributes[:content_type]
@filename = attributes[:filename]
@id = attributes[:id]
@load_path = attributes[:load_path]
@logical_path = attributes[:logical_path]
@metadata = attributes[:metadata]
@name = attributes[:name]
@source = attributes[:source]
@uri = attributes[:uri]
end
# Internal: Return all internal instance variables as a hash.
#
# Returns a Hash.
def to_hash
@attributes
end
# Public: Metadata accumulated from pipeline process.
#
# The API status of the keys is dependent on the pipeline processors
# itself. So some values maybe considered public and others internal.
# See the pipeline proccessor documentation itself.
#
# Returns Hash.
attr_reader :metadata
# Public: Returns String path of asset.
attr_reader :filename
# Internal: Unique asset object ID.
#
# Returns a String.
attr_reader :id
# Public: Internal URI to lookup asset by.
#
# NOT a publically accessible URL.
#
# Returns URI.
attr_reader :uri
# Public: Return logical path with digest spliced in.
#
# "foo/bar-37b51d194a7513e45b56f6524f2d51f2.js"
#
# Returns String.
def digest_path
logical_path.sub(/\.(\w+)$/) { |ext| "-#{etag}#{ext}" }
end
# Public: Return load path + logical path with digest spliced in.
#
# Returns String.
def full_digest_path
File.join(@load_path, digest_path)
end
# Public: Returns String MIME type of asset. Returns nil if type is unknown.
attr_reader :content_type
# Public: Get all externally linked asset filenames from asset.
#
# All linked assets should be compiled anytime this asset is.
#
# Returns Set of String asset URIs.
def links
metadata[:links] || Set.new
end
# Public: Return `String` of concatenated source.
#
# Returns String.
def source
if @source
@source
else
# File is read everytime to avoid memory bloat of large binary files
File.binread(filename)
end
end
# Public: Alias for #source.
#
# Returns String.
def to_s
source
end
# Public: Get charset of source.
#
# Returns a String charset name or nil if binary.
def charset
metadata[:charset]
end
# Public: Returns Integer length of source.
def length
metadata[:length]
end
alias_method :bytesize, :length
# Public: Returns String byte digest of source.
def digest
metadata[:digest]
end
# Private: Return the version of the environment where the asset was generated.
def environment_version
metadata[:environment_version]
end
# Public: Returns String hexdigest of source.
def hexdigest
DigestUtils.pack_hexdigest(digest)
end
# Pubic: ETag String of Asset.
def etag
DigestUtils.pack_hexdigest(environment_version + digest)
end
# Public: Returns String base64 digest of source.
def base64digest
DigestUtils.pack_base64digest(digest)
end
# Public: A "named information" URL for subresource integrity.
def integrity
DigestUtils.integrity_uri(metadata[:digest])
end
# Public: Add enumerator to allow `Asset` instances to be used as Rack
# compatible body objects.
#
# block
# part - String body chunk
#
# Returns nothing.
def each
yield to_s
end
# Deprecated: Save asset to disk.
#
# filename - String target
#
# Returns nothing.
def write_to(filename)
FileUtils.mkdir_p File.dirname(filename)
PathUtils.atomic_write(filename) do |f|
f.write source
end
nil
end
# Public: Pretty inspect
#
# Returns String.
def inspect
"#<#{self.class}:#{object_id.to_s(16)} #{uri.inspect}>"
end
# Public: Implements Object#hash so Assets can be used as a Hash key or
# in a Set.
#
# Returns Integer hash of the id.
def hash
id.hash
end
# Public: Compare assets.
#
# Assets are equal if they share the same path and digest.
#
# Returns true or false.
def eql?(other)
self.class == other.class && self.id == other.id
end
alias_method :==, :eql?
end
end
Use the digest method to calculate the integrity
# frozen_string_literal: true
require 'fileutils'
require 'sprockets/digest_utils'
module Sprockets
class Asset
attr_reader :logical_path
# Private: Intialize Asset wrapper from attributes Hash.
#
# Asset wrappers should not be initialized directly, only
# Environment#find_asset should vend them.
#
# attributes - Hash of ivars
#
# Returns Asset.
def initialize(attributes = {})
@attributes = attributes
@content_type = attributes[:content_type]
@filename = attributes[:filename]
@id = attributes[:id]
@load_path = attributes[:load_path]
@logical_path = attributes[:logical_path]
@metadata = attributes[:metadata]
@name = attributes[:name]
@source = attributes[:source]
@uri = attributes[:uri]
end
# Internal: Return all internal instance variables as a hash.
#
# Returns a Hash.
def to_hash
@attributes
end
# Public: Metadata accumulated from pipeline process.
#
# The API status of the keys is dependent on the pipeline processors
# itself. So some values maybe considered public and others internal.
# See the pipeline proccessor documentation itself.
#
# Returns Hash.
attr_reader :metadata
# Public: Returns String path of asset.
attr_reader :filename
# Internal: Unique asset object ID.
#
# Returns a String.
attr_reader :id
# Public: Internal URI to lookup asset by.
#
# NOT a publically accessible URL.
#
# Returns URI.
attr_reader :uri
# Public: Return logical path with digest spliced in.
#
# "foo/bar-37b51d194a7513e45b56f6524f2d51f2.js"
#
# Returns String.
def digest_path
logical_path.sub(/\.(\w+)$/) { |ext| "-#{etag}#{ext}" }
end
# Public: Return load path + logical path with digest spliced in.
#
# Returns String.
def full_digest_path
File.join(@load_path, digest_path)
end
# Public: Returns String MIME type of asset. Returns nil if type is unknown.
attr_reader :content_type
# Public: Get all externally linked asset filenames from asset.
#
# All linked assets should be compiled anytime this asset is.
#
# Returns Set of String asset URIs.
def links
metadata[:links] || Set.new
end
# Public: Return `String` of concatenated source.
#
# Returns String.
def source
if @source
@source
else
# File is read everytime to avoid memory bloat of large binary files
File.binread(filename)
end
end
# Public: Alias for #source.
#
# Returns String.
def to_s
source
end
# Public: Get charset of source.
#
# Returns a String charset name or nil if binary.
def charset
metadata[:charset]
end
# Public: Returns Integer length of source.
def length
metadata[:length]
end
alias_method :bytesize, :length
# Public: Returns String byte digest of source.
def digest
metadata[:digest]
end
# Private: Return the version of the environment where the asset was generated.
def environment_version
metadata[:environment_version]
end
# Public: Returns String hexdigest of source.
def hexdigest
DigestUtils.pack_hexdigest(digest)
end
# Pubic: ETag String of Asset.
def etag
DigestUtils.pack_hexdigest(environment_version + digest)
end
# Public: Returns String base64 digest of source.
def base64digest
DigestUtils.pack_base64digest(digest)
end
# Public: A "named information" URL for subresource integrity.
def integrity
DigestUtils.integrity_uri(digest)
end
# Public: Add enumerator to allow `Asset` instances to be used as Rack
# compatible body objects.
#
# block
# part - String body chunk
#
# Returns nothing.
def each
yield to_s
end
# Deprecated: Save asset to disk.
#
# filename - String target
#
# Returns nothing.
def write_to(filename)
FileUtils.mkdir_p File.dirname(filename)
PathUtils.atomic_write(filename) do |f|
f.write source
end
nil
end
# Public: Pretty inspect
#
# Returns String.
def inspect
"#<#{self.class}:#{object_id.to_s(16)} #{uri.inspect}>"
end
# Public: Implements Object#hash so Assets can be used as a Hash key or
# in a Set.
#
# Returns Integer hash of the id.
def hash
id.hash
end
# Public: Compare assets.
#
# Assets are equal if they share the same path and digest.
#
# Returns true or false.
def eql?(other)
self.class == other.class && self.id == other.id
end
alias_method :==, :eql?
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.