CombinedText stringlengths 4 3.42M |
|---|
# Copyright:: Copyright (c) 2015.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name "unifiedpush-server"
default_version "1.2.0-SNAPSHOT"
dependency "ruby"
dependency "bundler"
dependency "rsync"
dependency "postgresql"
dependency "wildfly"
version "1.2.0-SNAPSHOT" do
source md5: "3114d32b853008ff6949d1570b5a8b4e"
end
version "1.2.0-RC1" do
source md5: "f8f00ce9b554937445d0c9d4097f56ef"
end
repo_home = if "#{version}".end_with?("SNAPSHOT") then "libs-snapshot-local" else "libs-release-local" end
source url: "https://development.c-b4.com/artifactory/#{repo_home}/org/jboss/aerogear/unifiedpush/unifiedpush-package/#{version}/unifiedpush-package-#{version}.tar.gz"
build do
command "mkdir -p #{install_dir}/embedded/apps/unifiedpush-server/"
sync "#{project_dir}/", "#{install_dir}/embedded/apps/unifiedpush-server/"
# Strip version from packages.
link "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-server-wildfly-#{version}.war", "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-server.war"
link "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-initdb-#{version}.tar.gz", "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-initdb.tar.gz"
link "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-keycloak-theme-#{version}.tar.gz", "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-keycloak-theme.tar.gz"
link "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-admin-ui-#{version}.tar.gz", "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-admin-ui.tar.gz"
erb source: "version.yml.erb",
dest: "#{install_dir}/embedded/apps/unifiedpush-server/version.yml",
mode: 0644,
vars: { default_version: default_version }
end
# extract initdb project to allow JPA based schema creation.
build do
command "tar xzf #{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-initdb-#{version}.tar.gz -C #{install_dir}/embedded/apps/unifiedpush-server/"
end
Update ups server version
# Copyright:: Copyright (c) 2015.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name "unifiedpush-server"
default_version "1.2.0-SNAPSHOT"
dependency "ruby"
dependency "bundler"
dependency "rsync"
dependency "postgresql"
dependency "wildfly"
version "1.2.0-SNAPSHOT" do
source md5: "d055dbe56c515b325dece05fa98e0e84"
end
version "1.2.0-RC1" do
source md5: "f8f00ce9b554937445d0c9d4097f56ef"
end
repo_home = if "#{version}".end_with?("SNAPSHOT") then "libs-snapshot-local" else "libs-release-local" end
source url: "https://development.c-b4.com/artifactory/#{repo_home}/org/jboss/aerogear/unifiedpush/unifiedpush-package/#{version}/unifiedpush-package-#{version}.tar.gz"
build do
command "mkdir -p #{install_dir}/embedded/apps/unifiedpush-server/"
sync "#{project_dir}/", "#{install_dir}/embedded/apps/unifiedpush-server/"
# Strip version from packages.
link "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-server-wildfly-#{version}.war", "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-server.war"
link "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-initdb-#{version}.tar.gz", "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-initdb.tar.gz"
link "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-keycloak-theme-#{version}.tar.gz", "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-keycloak-theme.tar.gz"
link "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-admin-ui-#{version}.tar.gz", "#{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-admin-ui.tar.gz"
erb source: "version.yml.erb",
dest: "#{install_dir}/embedded/apps/unifiedpush-server/version.yml",
mode: 0644,
vars: { default_version: default_version }
end
# extract initdb project to allow JPA based schema creation.
build do
command "tar xzf #{install_dir}/embedded/apps/unifiedpush-server/unifiedpush-initdb-#{version}.tar.gz -C #{install_dir}/embedded/apps/unifiedpush-server/"
end
|
require 'optparse'
require 'cucumber'
module Cucumber
class CLI
class << self
attr_writer :step_mother, :stories
def execute
@execute_called = true
parse(ARGV).execute!(@step_mother, @stories)
end
def execute_called?
@execute_called
end
def parse(args)
cli = new(args)
cli.parse_options!
cli
end
end
def initialize(args)
@args = args.dup
@args.extend(OptionParser::Arguable)
end
def parse_options!
@options = { :require => nil, :lang => 'en', :format => 'pretty', :dry_run => false }
@args.options do |opts|
opts.banner = "Usage: cucumber [options] FILES|DIRS"
opts.on("-r LIBRARY|DIR", "--require LIBRARY|DIR", "Require files before executing the stories.",
"If this option is not specified, all *.rb files that",
"are siblings or below the stories will be autorequired") do |v|
@options[:require] ||= []
@options[:require] << v
end
opts.on("-l LINE", "--line LANG", "Only execute the scenario at the given line") do |v|
@options[:line] = v
end
opts.on("-a LANG", "--language LANG", "Specify language for stories (Default: #{@options[:lang]})") do |v|
@options[:lang] = v
end
opts.on("-f FORMAT", "--format FORMAT", "How to format stories (Default: #{@options[:format]})") do |v|
@options[:format] = v
end
opts.on("-d", "--dry-run", "Invokes formatters without executing the steps.") do
@options[:dry_run] = true
end
end.parse!
# Whatever is left after option parsing
@files = @args.map do |path|
path = path.gsub(/\\/, '/') # In case we're on windows. Globs don't work with backslashes.
File.directory?(path) ? Dir["#{path}/**/*.story"] : path
end.flatten
end
def execute!(step_mother, stories)
$executor = Executor.new(formatter, step_mother)
require_files
load_plain_text_stories(stories)
$executor.line = @options[:line].to_i if @options[:line]
$executor.visit_stories(stories)
exit 1 if $executor.failed
end
private
# Requires files - typically step files and ruby story files.
def require_files
require "cucumber/parser/story_parser_#{@options[:lang]}"
requires = @options[:require] || @args.map{|f| File.directory?(f) ? f : File.dirname(f)}.uniq
libs = requires.map do |path|
path = path.gsub(/\\/, '/') # In case we're on windows. Globs don't work with backslashes.
File.directory?(path) ? Dir["#{path}/**/*.rb"] : path
end.flatten
libs.each do |lib|
begin
require lib
rescue LoadError => e
e.message << "\nFailed to load #{lib}"
raise e
end
end
end
def load_plain_text_stories(stories)
parser = Parser::StoryParser.new
@files.each do |f|
stories << Parser::StoryNode.parse(f, parser)
end
end
def formatter
klass = {
'progress' => Formatters::ProgressFormatter,
'html' => Formatters::HtmlFormatter,
'pretty' => Formatters::PrettyFormatter,
}[@options[:format]]
klass.new(STDOUT)
end
end
end
# Hook the toplevel StepMother to the CLI
# TODO: Hook in a RubyStories object on toplevel for pure ruby stories
extend Cucumber::StepMethods
Cucumber::CLI.step_mother = step_mother
extend(Cucumber::RubyTree)
Cucumber::CLI.stories = stories
at_exit do
Cucumber::CLI.execute unless Cucumber::CLI.execute_called?
end
Better CLI messages
require 'optparse'
require 'cucumber'
module Cucumber
class CLI
class << self
attr_writer :step_mother, :stories
def execute
@execute_called = true
parse(ARGV).execute!(@step_mother, @stories)
end
def execute_called?
@execute_called
end
def parse(args)
cli = new(args)
cli.parse_options!
cli
end
end
def initialize(args)
@args = args.dup
@args.extend(OptionParser::Arguable)
end
def parse_options!
@options = { :require => nil, :lang => 'en', :format => 'pretty', :dry_run => false }
@args.options do |opts|
opts.banner = "Usage: cucumber [options] FILES|DIRS"
opts.on("-r LIBRARY|DIR", "--require LIBRARY|DIR", "Require files before executing the stories.",
"If this option is not specified, all *.rb files that",
"are siblings or below the stories will be autorequired") do |v|
@options[:require] ||= []
@options[:require] << v
end
opts.on("-l LINE", "--line LANG", "Only execute the scenario at the given line") do |v|
@options[:line] = v
end
opts.on("-a LANG", "--language LANG", "Specify language for stories (Default: #{@options[:lang]})") do |v|
@options[:lang] = v
end
opts.on("-f FORMAT", "--format FORMAT", "How to format stories (Default: #{@options[:format]})") do |v|
@options[:format] = v
end
opts.on("-d", "--dry-run", "Invokes formatters without executing the steps.") do
@options[:dry_run] = true
end
opts.on_tail("--version", "Show version") do
puts VERSION::STRING
exit
end
opts.on_tail("--help", "You're looking at it") do
puts opts.help
exit
end
end.parse!
if @args.empty?
puts @args.options.help
exit 1
end
# Whatever is left after option parsing
@files = @args.map do |path|
path = path.gsub(/\\/, '/') # In case we're on windows. Globs don't work with backslashes.
File.directory?(path) ? Dir["#{path}/**/*.story"] : path
end.flatten
end
def execute!(step_mother, stories)
$executor = Executor.new(formatter, step_mother)
require_files
load_plain_text_stories(stories)
$executor.line = @options[:line].to_i if @options[:line]
$executor.visit_stories(stories)
exit 1 if $executor.failed
end
private
# Requires files - typically step files and ruby story files.
def require_files
require "cucumber/parser/story_parser_#{@options[:lang]}"
requires = @options[:require] || @args.map{|f| File.directory?(f) ? f : File.dirname(f)}.uniq
libs = requires.map do |path|
path = path.gsub(/\\/, '/') # In case we're on windows. Globs don't work with backslashes.
File.directory?(path) ? Dir["#{path}/**/*.rb"] : path
end.flatten
libs.each do |lib|
begin
require lib
rescue LoadError => e
e.message << "\nFailed to load #{lib}"
raise e
end
end
end
def load_plain_text_stories(stories)
parser = Parser::StoryParser.new
@files.each do |f|
stories << Parser::StoryNode.parse(f, parser)
end
end
def formatter
klass = {
'progress' => Formatters::ProgressFormatter,
'html' => Formatters::HtmlFormatter,
'pretty' => Formatters::PrettyFormatter,
}[@options[:format]]
klass.new(STDOUT)
end
end
end
# Hook the toplevel StepMother to the CLI
# TODO: Hook in a RubyStories object on toplevel for pure ruby stories
extend Cucumber::StepMethods
Cucumber::CLI.step_mother = step_mother
extend(Cucumber::RubyTree)
Cucumber::CLI.stories = stories
at_exit do
Cucumber::CLI.execute unless Cucumber::CLI.execute_called?
end |
require 'socket'
require 'portal/platform'
require 'portal/jvm'
require 'portal/interop'
require 'pp'
class Portal
class Error < StandardError; end
class ProtocolError < Error; end
RESULT_WAIT = 0.01
BLOCK_SIZE = 1024
def initialize(port, host = "localhost")
@socket = TCPSocket.new(host, port)
@contexts = {}
Thread.new do
while (message = receive_message)
id, type, content = message
if ["stdout", "stderr"].include?(type)
out = context(id)[type.to_sym][1]
out.write(content)
out.flush
elsif ["result", "error", "read-error"].include?(type)
context(id)[:results] << [type, content]
else
raise ProtocolError, "unknown message type: #{type}"
end
end
end
end
def send_message(id, type, content)
message = "#{id} #{type} #{content}"
@socket.write("#{message.size}:")
@socket.write(message)
@socket.write(",")
@socket.flush
end
def receive_message
size = ""
while (c = @socket.getc) != ?:
raise ProtocolError.new("Message size must be an integer, found #{c.chr}") unless (?0..?9).include?(c)
size << c
end
message = @socket.read(size.to_i).split(/ /, 3)
raise ProtocolError.new("Message must be followed by comma") unless @socket.getc == ?,
message
end
def context(id)
@contexts[id.to_s] ||= {
:results => [],
:count => 0,
:stdout => IO.pipe,
:stderr => IO.pipe
}
end
def with_context(id)
old_id, @id = @id, id
yield
ensure
@id = old_id
end
def eval(form, id = @id || rand)
send_message(id, "eval", form)
context = context(id)
count = context[:count] += 1;
lambda do
while (count > context[:results].size)
sleep(RESULT_WAIT)
end
type, form = context[:results][count - 1]
if type == "result"
form.split("\n")
else
vals = form.split("\n")
vals[-1] = {type.to_sym => vals[-1]}
vals
end
end
end
def write(string, id = @id)
raise ProtocolError, "context id required to write to stdin" unless id
send_message(id, "stdin", string)
end
def tail(type, id = @id)
raise ProtocolError, "context id required to tail" unless id
while true
print(context(id)[type.to_sym][0].readpartial(BLOCK_SIZE))
end
rescue EOFError
end
end
make ruby eval lambda non-blocking. fix memory leak
require 'socket'
require 'portal/platform'
require 'portal/jvm'
require 'portal/interop'
require 'pp'
class Portal
class Error < StandardError; end
class ProtocolError < Error; end
RESULT_WAIT = 0.01
BLOCK_SIZE = 1024
def initialize(port, host = "localhost")
@socket = TCPSocket.new(host, port)
@contexts = {}
Thread.new do
while (message = receive_message)
id, type, content = message
if ["stdout", "stderr"].include?(type)
out = context(id)[type.to_sym][1]
out.write(content)
out.flush
elsif ["result", "error", "read-error"].include?(type)
context(id)[:results] << [type, content]
else
raise ProtocolError, "unknown message type: #{type}"
end
end
end
end
def send_message(id, type, content)
message = "#{id} #{type} #{content}"
@socket.write("#{message.size}:")
@socket.write(message)
@socket.write(",")
@socket.flush
end
def receive_message
size = ""
while (c = @socket.getc) != ?:
raise ProtocolError.new("Message size must be an integer, found #{c.chr}") unless (?0..?9).include?(c)
size << c
end
message = @socket.read(size.to_i).split(/ /, 3)
raise ProtocolError.new("Message must be followed by comma") unless @socket.getc == ?,
message
end
def context(id)
@contexts[id.to_s] ||= {
:results => {},
:count => 0,
:stdout => IO.pipe,
:stderr => IO.pipe
}
end
def with_context(id)
old_id, @id = @id, id
yield
ensure
@id = old_id
end
def eval(form, id = @id || rand)
send_message(id, "eval", form)
context = context(id)
count = context[:count] += 1;
lambda do
return unless type
vals = form.split("\n")
vals[-1] = {type.to_sym => vals[-1]} unless type == "result"
vals
end
end
def write(string, id = @id)
raise ProtocolError, "context id required to write to stdin" unless id
send_message(id, "stdin", string)
end
def tail(type, id = @id)
raise ProtocolError, "context id required to tail" unless id
while true
print(context(id)[type.to_sym][0].readpartial(BLOCK_SIZE))
end
rescue EOFError
end
end
|
module Amiando
##
# http://developers.amiando.com/index.php/REST_API_Events
class Event < Resource
map :host_id, :hostId
map :selected_date, :selectedDate, :type => :time
map :selected_end_date, :selectedEndDate, :type => :time
map :short_description, :shortDescription
map :event_type, :eventType
map :organisator_display_name, :organisatorDisplayName
map :partner_event_url, :partnerEventUrl
map :publish_search_engines, :publishSearchEngines
map :search_engine_tags, :searchEngineTags
map :location_description, :locationDescription
map :zip_code, :zipCode
map :creation_time, :creationTime, :type => :time
map :last_modified, :lastModified, :type => :time
##
# Creates an event.
#
# @return [Event] will not return the full event and only the id attribute
# will be available.
def self.create(attributes)
object = new
request = post object, '/api/event/create',
:params => map_params(attributes),
:populate_method => :populate_create
object
end
##
# Fetch an event
def self.find(id)
object = new
request = get object, "/api/event/#{id}"
object
end
##
# Search by identifier or title.
#
# @param [Hash] a hash with 1 entry, either :identifier or :title
#
# @return [Result] with an array of ids
def self.search(by = {})
unless by[:identifier].nil? ^ by[:title].nil? # XOR
raise ArgumentError.new('Events can be searched either by identifier or by title, include only one.')
end
object = Result.new { |response_body| response_body['ids'] }
request = get object, '/api/event/find', :params => by
object
end
def populate(response_body)
extract_attributes_from(response_body, 'user')
end
def populate_create(response_body)
@attributes = {:id => response_body['id']}
@success = response_body['success']
end
end
end
Fixed Event.find, somehow slipped by
module Amiando
##
# http://developers.amiando.com/index.php/REST_API_Events
class Event < Resource
map :host_id, :hostId
map :selected_date, :selectedDate, :type => :time
map :selected_end_date, :selectedEndDate, :type => :time
map :short_description, :shortDescription
map :event_type, :eventType
map :organisator_display_name, :organisatorDisplayName
map :partner_event_url, :partnerEventUrl
map :publish_search_engines, :publishSearchEngines
map :search_engine_tags, :searchEngineTags
map :location_description, :locationDescription
map :zip_code, :zipCode
map :creation_time, :creationTime, :type => :time
map :last_modified, :lastModified, :type => :time
##
# Creates an event.
#
# @return [Event] will not return the full event and only the id attribute
# will be available.
def self.create(attributes)
object = new
request = post object, '/api/event/create',
:params => map_params(attributes),
:populate_method => :populate_create
object
end
##
# Fetch an event
def self.find(id)
object = new
request = get object, "/api/event/#{id}"
object
end
##
# Search by identifier or title.
#
# @param [Hash] a hash with 1 entry, either :identifier or :title
#
# @return [Result] with an array of ids
def self.search(by = {})
unless by[:identifier].nil? ^ by[:title].nil? # XOR
raise ArgumentError.new('Events can be searched either by identifier or by title, include only one.')
end
object = Result.new { |response_body| response_body['ids'] }
request = get object, '/api/event/find', :params => by
object
end
def populate(response_body)
extract_attributes_from(response_body, 'event')
end
def populate_create(response_body)
@attributes = {:id => response_body['id']}
@success = response_body['success']
end
end
end
|
module CZTop
# Represents a CZMQ::FFI::Zsock.
class Socket
include HasFFIDelegate
extend CZTop::HasFFIDelegate::ClassMethods
include ZsockOptions
include SendReceiveMethods
include PolymorphicZsockMethods
include CZMQ::FFI
# @!group CURVE Security
# Enables CURVE security and makes this socket a CURVE server.
# @param cert [Certificate] this server's certificate,
# so remote clients are able to authenticate this server
# @note You'll have to use a {CZTop::Authenticator}.
# @return [void]
def CURVE_server!(cert)
options.CURVE_server = true
cert.apply(self) # NOTE: desired: raises if no secret key in cert
end
# Enables CURVE security and makes this socket a CURVE client.
# @param client_cert [Certificate] client's certificate, to secure
# communication (and be authenticated by the server)
# @param server_cert [Certificate] the remote server's certificate, so
# this socket is able to authenticate the server
# @return [void]
# @raise [SecurityError] if the server's secret key is set in server_cert,
# which means it's not secret anymore
# @raise [SystemCallError] if there's no secret key in client_cert
def CURVE_client!(client_cert, server_cert)
if server_cert.secret_key
raise SecurityError, "server's secret key not secret"
end
client_cert.apply(self) # NOTE: desired: raises if no secret key in cert
options.CURVE_serverkey = server_cert.public_key
end
# @!endgroup
# @return [String] last bound endpoint, if any
# @return [nil] if not bound
def last_endpoint
ffi_delegate.endpoint
end
# Connects to an endpoint.
# @param endpoint [String]
# @return [void]
# @raise [ArgumentError] if the endpoint is incorrect
def connect(endpoint)
rc = ffi_delegate.connect("%s", :string, endpoint)
raise ArgumentError, "incorrect endpoint: %p" % endpoint if rc == -1
end
# Disconnects from an endpoint.
# @param endpoint [String]
# @return [void]
# @raise [ArgumentError] if the endpoint is incorrect
def disconnect(endpoint)
rc = ffi_delegate.disconnect("%s", :string, endpoint)
raise ArgumentError, "incorrect endpoint: %p" % endpoint if rc == -1
end
# Closes and destroys the native socket.
# @return [void]
# @note Don't try to use it anymore afterwards.
def close
ffi_delegate.destroy
end
# @return [Integer] last automatically selected, bound TCP port, if any
# @return [nil] if not bound to a TCP port yet
attr_reader :last_tcp_port
# Binds to an endpoint.
# @note When binding to an automatically selected TCP port, this will set
# {#last_tcp_port}.
# @param endpoint [String]
# @return [void]
# @raise [SystemCallError] in case of failure
def bind(endpoint)
rc = ffi_delegate.bind("%s", :string, endpoint)
raise_zmq_err("unable to bind to %p" % endpoint) if rc == -1
@last_tcp_port = rc if rc > 0
end
# Unbinds from an endpoint.
# @param endpoint [String]
# @return [void]
# @raise [ArgumentError] if the endpoint is incorrect
def unbind(endpoint)
rc = ffi_delegate.unbind("%s", :string, endpoint)
raise ArgumentError, "incorrect endpoint: %p" % endpoint if rc == -1
end
# Inspects this {Socket}.
# @return [String] shows class, native address, and {#last_endpoint}
def inspect
"#<%s:0x%x last_endpoint=%p>" % [
self.class,
to_ptr.address,
last_endpoint
]
end
end
end
fix API doc
module CZTop
# Represents a CZMQ::FFI::Zsock.
class Socket
include HasFFIDelegate
extend CZTop::HasFFIDelegate::ClassMethods
include ZsockOptions
include SendReceiveMethods
include PolymorphicZsockMethods
include CZMQ::FFI
# @!group CURVE Security
# Enables CURVE security and makes this socket a CURVE server.
# @param cert [Certificate] this server's certificate,
# so remote clients are able to authenticate this server
# @note You'll have to use a {CZTop::Authenticator}.
# @return [void]
# @raise [ArgumentError] if there's no secret key in certificate
def CURVE_server!(cert)
options.CURVE_server = true
cert.apply(self) # NOTE: desired: raises if no secret key in cert
end
# Enables CURVE security and makes this socket a CURVE client.
# @param client_cert [Certificate] client's certificate, to secure
# communication (and be authenticated by the server)
# @param server_cert [Certificate] the remote server's certificate, so
# this socket is able to authenticate the server
# @return [void]
# @raise [SecurityError] if the server's secret key is set in server_cert,
# which means it's not secret anymore
# @raise [SystemCallError] if there's no secret key in client_cert
def CURVE_client!(client_cert, server_cert)
if server_cert.secret_key
raise SecurityError, "server's secret key not secret"
end
client_cert.apply(self) # NOTE: desired: raises if no secret key in cert
options.CURVE_serverkey = server_cert.public_key
end
# @!endgroup
# @return [String] last bound endpoint, if any
# @return [nil] if not bound
def last_endpoint
ffi_delegate.endpoint
end
# Connects to an endpoint.
# @param endpoint [String]
# @return [void]
# @raise [ArgumentError] if the endpoint is incorrect
def connect(endpoint)
rc = ffi_delegate.connect("%s", :string, endpoint)
raise ArgumentError, "incorrect endpoint: %p" % endpoint if rc == -1
end
# Disconnects from an endpoint.
# @param endpoint [String]
# @return [void]
# @raise [ArgumentError] if the endpoint is incorrect
def disconnect(endpoint)
rc = ffi_delegate.disconnect("%s", :string, endpoint)
raise ArgumentError, "incorrect endpoint: %p" % endpoint if rc == -1
end
# Closes and destroys the native socket.
# @return [void]
# @note Don't try to use it anymore afterwards.
def close
ffi_delegate.destroy
end
# @return [Integer] last automatically selected, bound TCP port, if any
# @return [nil] if not bound to a TCP port yet
attr_reader :last_tcp_port
# Binds to an endpoint.
# @note When binding to an automatically selected TCP port, this will set
# {#last_tcp_port}.
# @param endpoint [String]
# @return [void]
# @raise [SystemCallError] in case of failure
def bind(endpoint)
rc = ffi_delegate.bind("%s", :string, endpoint)
raise_zmq_err("unable to bind to %p" % endpoint) if rc == -1
@last_tcp_port = rc if rc > 0
end
# Unbinds from an endpoint.
# @param endpoint [String]
# @return [void]
# @raise [ArgumentError] if the endpoint is incorrect
def unbind(endpoint)
rc = ffi_delegate.unbind("%s", :string, endpoint)
raise ArgumentError, "incorrect endpoint: %p" % endpoint if rc == -1
end
# Inspects this {Socket}.
# @return [String] shows class, native address, and {#last_endpoint}
def inspect
"#<%s:0x%x last_endpoint=%p>" % [
self.class,
to_ptr.address,
last_endpoint
]
end
end
end
|
# encoding: utf-8
# Original source is "青空文庫→TeX(ver. 0.9.5 2004/5/5 psitau)"
# see: http://psitau.kitunebi.com/aozora.html
#
# Also another source is "青空キンドル [Beta]"
# see: http://a2k.aill.org/
require 'nkf'
class Aozora4Reader
PreambleLineNumber=13
KANJIPAT = "[々〇〻\u3400-\u9FFF\uF900-\uFAFF※ヶ〆]"
MAX_SAGE = 15
def self.a4r(file)
self.new(file).main
end
def initialize(file)
@inputfile_name = file
@jisage = false
@log_text = []
@line_num=0
@gaiji = {}
@gaiji2 = {}
end
# UTF-8で出力
def normalize(l)
##l.gsub!(/&/, '\\\\&')
l.to_s
end
# 全角→半角
def to_single_byte(str)
s = str.dup
if s =~ /[0-9]/
s.tr!("1234567890", "1234567890")
elsif s =~ /[一二三四五六七八九〇]/
s.tr!("一二三四五六七八九〇", "1234567890")
end
case s
when /\d十\d/
s.sub!(/(\d)十(\d)/, '\1\2')
when /\d十/
s.sub!(/(\d)十/, '\{1}0')
when /十\d/
s.sub!(/十(\d)/, '1\1')
when /十/
s.sub!(/十/, "10")
end
if s =~/[!?]/
s.tr!("!?", "!?")
end
return s
end
# ルビの削除(表題等)
def remove_ruby(str)
str.gsub(/\\ruby{([^}]+)}{[^}]*}/i){$1}
end
# プリアンブルの出力
def preamble
title = remove_ruby(@title)
author = remove_ruby(@author)
str = <<"END_OF_PRE"
\\documentclass[a5paper]{tbook}
%\\documentclass[a5paper, twocolumn]{tbook}
%\\usepackage[deluxe]{otf}
\\usepackage[expert, deluxe]{otf}
%\\usepackage{utf}
\\usepackage{furikana}
\\usepackage{type1cm}
\\usepackage[size=large]{aozora4reader}
\\def\\rubykatuji{\\rubyfamily\\tiny}
%\\def\\rubykatuji{\\tiny}%for UTF package
%\\renewenvironment{teihon}{\\comment}{\\endcomment}
\\usepackage[dvipdfm,bookmarks=false,bookmarksnumbered=false,hyperfootnotes=false,%
pdftitle={#{title}},%
pdfauthor={#{author}}]{hyperref}
%% Bookmarkの文字化け対策(日本語向け)
\\ifnum 46273=\\euc"B4C1 % 46273 == 0xB4C1 == 漢(EUC-JP)
\\AtBeginDvi{\\special{pdf:tounicode EUC-UCS2}}%
\\else
\\AtBeginDvi{\\special{pdf:tounicode 90ms-RKSJ-UCS2}}%
\\fi
END_OF_PRE
str
end
# 底本の表示用
def postamble
str = <<"END_OF_POST"
\\theendnotes
\\begin{teihon}
\\clearpage\\null\\newpage\\thispagestyle{empty}
\\begin{minipage}<y>{\\textheight}
\\vspace{1\\baselineskip}
\\scriptsize
END_OF_POST
str
end
# アクセントの処理用
# http://www.aozora.gr.jp/accent_separation.html
# http://cosmoshouse.com/tools/acc-conv-j.htm
def translate_accent(l)
l.gsub!(/([ij]):/){"\\\"{\\#{$1}}"}
l.gsub!(/([AIOEUaioeu])(['`~^])/){"\\#$2{#$1}"}
l.gsub!(/([AIOEUaioeu]):/){"\\\"{#$1}"}
l.gsub!(/([AIOEUaioeu])_/){"\\={#$1}"}
l.gsub!(/([!?])@/){"#$1'"}
l.gsub!(/([Aa])&/){"\\r{#$1}"}
l.gsub!(/AE&/){"\\AE{}"}
l.gsub!(/ae&/){"\\ae{}"}
l.gsub!(/OE&/){"\\OE{}"}
l.gsub!(/oe&/){"\\oe{}"}
l.gsub!(/s&/){"\\ss{}"}
l.gsub!(/([cC]),/){"\\c{#$1}"}
l.gsub!(/〔/,'')
l.gsub!(/〕/,'')
return l
end
# 外字の処理用
def translate_gaiji(l)
if l =~/※[#([^]]*)、([^、]]*)]/
if @gaiji2[$1]
l.gsub!(/※[#([^]]*)、([^、]]*)]/){@gaiji2[$1]}
end
end
## ※[#「姉」の正字、「女+※[#第3水準1-85-57]のつくり」、256-下-16]
if l =~/※[#([^]]*※[#[^]]*][^]]*)、([^、]]*)]/
if @gaiji2[$1]
l.gsub!(/※[#([^]]*※[#[^]]*][^]]*)、([^、]]*)]/){@gaiji2[$1]}
end
end
## ※[#「さんずい+闊」]
if l =~ /※[#「([^]]+?)」]/
if @gaiji2[$1]
l.gsub!(/※[#「([^]]+?)」]/){@gaiji2[$1]}
end
end
if l =~ /※[#[^]]*?※[#[^]]*?[12]\-\d{1,2}\-\d{1,2}[^]]*?][^]]*?]/
l.gsub!(/※[#([^]]*?)※[#([^]]*?([12]\-\d{1,2}\-\d{1,2})[^]]*?)]([^]]*?)]/){"※\\footnote{#$1"+@gaiji[$3]+"#$4}"}
end
if l =~ /※[#[^]]*?([12]\-\d{1,2}\-\d{1,2})[^]]*?]/
if @gaiji[$1]
l.gsub!(/※[#([^]]*?([12]\-\d{1,2}\-\d{1,2})[^]]*?)]/){@gaiji[$2]}
end
end
if l =~ /※[#濁点付き片仮名([ワヰヱヲ])、.*?]/
l.gsub!(/※[#濁点付き片仮名([ワヰヱヲ])、.*?]/){ "\\ajLig{#{$1}゛}"}
end
if l =~ /※[#感嘆符三つ.*]/
l.gsub!(/※[#感嘆符三つ.*?]/){ "\\rensuji{!!!}"}
end
if l =~ /※[#.*?([A-Za-z0-9_]+\.png).*?]/
l.gsub!(/※[#([^]]+?)]/, "\\includegraphics{#{$1}}")
end
if l =~ /※[#[^]]+?]/
l.gsub!(/※[#([^]]+?)]/, '※\\footnote{\1}')
end
if l =~ /※/
STDERR.puts("Remaining Unprocessed Gaiji Character in Line #@line_num.")
@log_text << normalize("未処理の外字が#{@line_num}行目にあります.\n")
end
return l
end
# ルビの処理用
def translate_ruby(l)
# 被ルビ文字列内に外字の注記があるばあい,ルビ文字列の後ろに移動する
# ただし,順番が入れ替わってしまう
while l =~ /※\\footnote\{[^(?:\\footnote)]+\}(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》/
l.sub!(/(※)(\\footnote\{[^(?:\\footnote)]+\})((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》)/, '\1\3\2')
end
# 被ルビ文字列内に誤記などの注記が存在する場合は、ルビの後ろに移動する
while l =~ /(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?[#[^]]*?](?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》/
l.sub!(/((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)([#[^]]*?])((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》)/, '\1\3\2')
end
# ルビ文字列内に誤記などの注記が存在する場合は、ルビの後ろに移動する
if l =~ /《[^》]*?[#[^]]*?][^》]*?》/
l.gsub!(/(《[^》]*?)([#[^]]*?])([^》]*?》)/, '\1\3\2')
end
# 一連のルビの処理
# 1 縦棒ありの場合
if l =~ /|/
l.gsub!(/|(.+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 2 漢字および外字
if l =~ /(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?《.+?》/
l.gsub!(/((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 3 ひらがな
if l =~ /[あ-ん](?:[ぁ-んーヽヾ]|\\CID\{12107\})*?《.+?》/
l.gsub!(/([あ-ん](?:[ぁ-んーヽヾ]|\\CID\{12107\})*?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 4 カタカナ
if l =~ /[ア-ヴ](?:[ァ-ヴーゝゞ]|\\CID\{12107\})*?《.+?》/
l.gsub!(/([ア-ヴ](?:[ァ-ヴーゝゞ]|\\CID\{12107\})*?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 5 全角アルファベットなど
if l =~ /[A-Za-zΑ-Ωα-ωА-Яа-я・]+?《.+?》/
l.gsub!(/([A-Za-zΑ-Ωα-ωА-Яа-я・]+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 6 半角英数字
if l =~ /[A-Za-z0-9#\-\;\&.\'\^\`\\\{\} ]+?《.+?》/
l.gsub!(/([A-Za-z0-9#\-\;\&.\'\^\`\\\{\} ]+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
if l =~ /《.*?》/
STDERR.puts("Unknown ruby pattern found in #@line_num.")
@log_text << normalize("未処理のルビが#{@line_num}行目にあります.\n")
end
return l
end
# 傍点の処理用
def translate_bouten(l)
bouten_list = [
["傍点", "bou"],
["白ゴマ傍点","sirogomabou"],
["丸傍点","marubou"],
["白丸傍点","siromarubou"],
["黒三角傍点","kurosankakubou"],
["白三角傍点","sirosankakubou"],
["二重丸傍点","nijyuumarubou"],
["蛇の目傍点","jyanomebou"]]
bouten_list.each{ |name, fun|
if l =~ /[#「.+?」に#{name}]/
l.gsub!(/(.+?)[#.*?「\1」に#{name}]/){
str = $1
str.gsub!(/(\\UTF{.+?})/){ "{"+$1+"}"}
str.gsub!(/(\\ruby{.+?}{.+?})/i){ "{"+$1+"}"}
"\\#{fun}{"+str+"}"
}
end
}
if l =~ /[#傍点].+?[#傍点終わり]/
l.gsub!(/[#傍点](.+?)[#傍点終わり]/){
str = $1
str.gsub!(/(\\UTF{.+?})/){ "{"+$1+"}"}
str.gsub!(/(\\ruby{.+?}{.+?})/i){ "{"+$1+"}"}
"\\bou{"+str+"}"
}
end
return l
end
# 傍線の処理用
def translate_bousen(l)
if l =~ /[#「.+?」に傍線]/
l.gsub!(/(.+?)[#「\1」に傍線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に二重傍線]/
l.gsub!(/(.+?)[#「\1」に二重傍線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に鎖線]/
l.gsub!(/(.+?)[#「\1」に鎖線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に破線]/
l.gsub!(/(.+?)[#「\1」に破線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に波線]/
l.gsub!(/(.+?)[#「\1」に波線]/, '\\bousen{\1}')
end
return l
end
# ルビの調整
def tuning_ruby(l)
# 1 直前が漢字の場合
if l =~ /(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))\\ruby/
l.gsub!(/((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\})))\\ruby/, '\1\\Ruby')
end
# 2 直後が漢字の場合
if l =~ /\\ruby\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\\{\}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))/
l.gsub!(/\\ruby(\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\\{\}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\})))/, '\\Ruby\1')
end
# 3 ルビが連続する場合
while l =~ /\\(?:ruby|RUBY|Ruby)\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\{}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\\ruby/
l.sub!(/\\(?:ruby|RUBY|Ruby)(\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\{}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\})\\ruby/, '\\RUBY\1\\RUBY')
end
end
# 傍点の調整
def tuning_bou(l)
# 傍点の中の「くの字点」を変換
while l =~ /(\\[a-z]*?bou\{(?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)(\\ajD?Kunoji)\{\}((?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?)\}/
l.gsub!(/((\\([a-z]*?)bou)\{(?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)(\\ajD?Kunoji)\{\}((?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?)\}/, '\1}\4with\3Bou\2{\5}')
end
if l =~ /\\[a-z]*?bou\{\}/
l.gsub!(/\\([a-z]*?)bou\{\}/, '{}')
end
return l
end
# 外字用ハッシュを作成
def load_gaiji
datadir = File.dirname(__FILE__)+"/../data"
File.open(datadir+"/gaiji.txt") do |f|
while gaiji_line = f.gets
gaiji_line.chomp!
key, data = gaiji_line.split
@gaiji[key] = data
end
end
File.open(datadir+"/gaiji2.txt") do |f|
while gaiji_line = f.gets
gaiji_line.chomp!
key, data = gaiji_line.split
data.gsub(/#.*$/,'')
@gaiji2[key] = data
end
end
end
#
# メインパート
#
def main
load_gaiji()
# 入出力ファイルの定義
outputfile_name = @inputfile_name.sub(/\.txt$/, ".tex")
inputfile = File.open(@inputfile_name)
outputfile = File.open(outputfile_name, "w")
# プリアンブルの処理
empty_line = 0
in_note = false
meta_data = []
while empty_line < 2
line = inputfile.gets.chomp
line = NKF::nkf('-wS', line)
if in_note
if line =~ /^-+$/
in_note = false
break
end
else
if line =~ /^-+$/
in_note = true
else
if line =~ /^$/
empty_line += 1
else
if line =~ /《.*?》/
translate_ruby(line)
end
meta_data << line
end
end
end
end
@line_num += meta_data.size
@title = normalize(meta_data.shift)
case meta_data.size
when 1
@author = normalize(meta_data.shift)
when 2
@subtitle = normalize(meta_data.shift)
@author = normalize(meta_data.shift)
when 3
@subtitle = normalize(meta_data.shift)
@author = normalize(meta_data.shift)
@subauthor = normalize(meta_data.shift)
else
@subtitle = normalize(meta_data.shift)
@meta_data = []
until meta_data.empty?
@meta_data << normalize(meta_data.shift)
end
@subauthor = @meta_data.pop
@author = @meta_data.pop
end
outputfile.write(preamble())
outputfile.print "\\title{"+@title+"}\n"
outputfile.print "\\subtitle{"+@subtitle+"}\n" if @subtitle
outputfile.print "\\author{"+@author+"}\n"
outputfile.print "\\subauthor{"+@subauthor+"}\n" if @subauthor
if @meta_data
@meta_data.each do |data|
outputfile.print "\\metadata{"+data+"}\n"
end
end
outputfile.print "\\date{}\n"
# 本文の処理
outputfile.print "\\begin{document}\n\\maketitle\n"
@line_num += PreambleLineNumber
while line = inputfile.gets
@line_num += 1
line.chomp!
line = NKF::nkf('-wS', line)
break if line =~ /^底本/
if line =~ /^ 「/
line.sub!(/^ 「/, "\\mbox{ }\\kern0mm\\inhibitglue「")
end
if line =~ /[ワヰヱヲ]゛/
line.gsub!(/ワ゛/, "\\ajLig{ワ゛}")
line.gsub!(/ヰ゛/, "\\ajLig{ヰ゛}")
line.gsub!(/ヱ゛/, "\\ajLig{ヱ゛}")
line.gsub!(/ヲ゛/, "\\ajLig{ヲ゛}")
end
if line =~ /[?!] /
line.gsub!(/([?!]) /, '\1{}')
end
if line =~ /——/
line.gsub!(/——/, "\\——{}")
end
if line =~ //\/
line.gsub!(//\/, "\\ajKunoji{}")
end
if line =~ //″\/
line.gsub!(//″\/, "\\ajDKunoji{}")
end
=begin
if line =~ /^ +.+/
line.gsub!(/^ +([一二三四五六七八九〇十].*)/, '\\section*{\1}')
end
=end
while line =~ /(.+?)[#(「\1」は横?[1|一]文字[^]]*?)]/
line = line.sub(/(.+?)[#(「\1」は横?[1|一]文字[^]]*?)]/){"\\ajLig{"+to_single_byte($1)+"}"}
end
if line =~ /[#改丁.*?]/
line.sub!(/[#改丁.*?]/, "\\cleardoublepage")
end
if line =~ /[#改[頁|ページ].*?]/
line.sub!(/[#改[頁|ページ].*?]/, "\\clearpage")
end
if line =~ /〔.*?〕/
translate_accent(line)
end
if line =~ /※/
translate_gaiji(line)
end
if line =~ /《.*?》/
translate_ruby(line)
end
if line =~ /[#(.+?)傍点]/
translate_bouten(line)
end
if line =~ /[#傍点].+?[#傍点終わり]/
translate_bouten(line)
end
if line =~ /[#「(.+?)」に(?:二重)?[傍鎖破波]線]/
translate_bousen(line)
end
if line =~ /[#この行.*?([1234567890一二三四五六七八九〇十]*)字下げ]/
outputfile.print "\\begin{jisage}{"+to_single_byte($1)+"}\n"
line = line.sub(/[#この行.*?字下げ]/, "")+"\n\\end{jisage}"
@line_num += 2
end
if line =~ /[#ここから地から.+字上げ]/
line.sub!(/[#ここから地から([1234567890一二三四五六七八九〇十]*)字上げ]/){"\\begin{flushright}\\advance\\rightskip"+to_single_byte($1)+"zw"}
@jisage = true
end
if line =~ /[#ここで字上げ終わり]/
line.sub!(/[#ここで字上げ終わり]/){"\\end{flushright}"}
@jisage = false
end
if line =~ /[#ここから改行天付き、折り返して.*?字下げ]/
if @jisage
outputfile.print "\\end{jisage}\n"
@line_num += 1
end
line.sub!(/[#ここから改行天付き、折り返して([1234567890一二三四五六七八九〇十]*)字下げ]/){"\\begin{jisage}{#{to_single_byte($1)}}\\setlength\\parindent{-"+to_single_byte($1)+"zw}"}
@jisage = true
end
if line =~ /[#.*?字下げ[^]]*?(?:終わり|まで)[^]]*?]/
line = line.sub(/[#.*?字下げ.*?(?:終わり|まで).*?]/, "")+"\\end{jisage}"
@jisage = false
end
if line =~ /[#(ここから|これより|ここより|以下).+字下げ.*?]/
if @jisage
outputfile.print "\\end{jisage}\n"
@line_num += 1
end
line.sub!(/[#(ここから|これより|ここより|以下).*?([1234567890一二三四五六七八九〇十]*)字下げ.*?]/){"\\begin{jisage}{"+to_single_byte($2)+"}"}
@jisage = true
end
if line =~ /^[#ここから地付き]$/
@jisage = true
line = "\\begin{flushright}"
end
if line =~ /^[#ここで地付き終わり]$/
line = "\\end{flushright}"
@jisage = false
end
if line =~ /[#.*?地付き.*?]$/
line = "\\begin{flushright}\n"+line.sub(/[#.*?地付き.*?]$/, "\\end{flushright}")
@line_num += 1
elsif line =~ /[#.*?地付き.*?]/
line = line.sub(/[#.*?地付き.*?]/, "\\begin{flushright}\n")+"\\end{flushright}"
@line_num += 1
end
if line =~ /[#.*?(?:行末|地)(?:から|より).*?([1234567890一二三四五六七八九〇十]*)字上.*?]$$/
line = "\\begin{flushright}\\advance\\rightskip"+to_single_byte($1)+"zw\n"+line.sub(/[#.*?(?:行末|地)(?:から|より).*?字上.*?]$/, "\\end{flushright}")
@line_num += 1
elsif line =~ /^(.*?)[#.*?(?:行末|地)(?:から|より).*?([1234567890一二三四五六七八九〇十]*)字上.*?](.*)$/
line = $1+"\\begin{flushright}\\advance\\rightskip"+to_single_byte($2)+"zw\n"+$3+"\\end{flushright}"
@line_num += 1
end
if line =~ /[#「.+?」は返り点]/
line.gsub!(/(.+)[#「\1」は返り点]/, '\\kaeriten{\ajKunten{\1}}')
end
if line =~ /[#[一二三上中下甲乙丙丁レ]*]/
line.gsub!(/[#([一二三上中下甲乙丙丁レ]*)]/, '\\kaeriten{\ajKunten{\1}}')
end
if line =~ /[#(.*?)]/
line.gsub!(/[#((.*?))]/, '\\okurigana{\1}')
end
if line =~ /[#「.+?」.*?ママ.*?注記]/
line.gsub!(/(.+)[#「\1」.*?ママ.*?注記]/, '\\ruby{\1}{ママ}')
end
if line =~ /[#[^]]+(([^)]+.png).*?)[^]]+]/
line.gsub!(/[#[^]]+(([^)]+.png).*?)[^]]+]/, '\\sashie{\1}')
end
if line =~ /[#([1234567890一二三四五六七八九〇十]*)字下げ]/
num = to_single_byte($1).to_i
if num > MAX_SAGE
num = MAX_SAGE
end
outputfile.print "\\begin{jisage}{#{num}}\n"
line = line.sub(/[#.*?字下げ]/, "")+"\n\\end{jisage}"
end
## ちょっと汚いけど二重指定の対策
if line =~ /[#「(.*?)」は縦中横][#「(.*?)」は中見出し]/
line.gsub!(/(.*?)[#「(\1)」は縦中横][#「(\1)」は中見出し]/){"{\\large \\rensuji{#{$1}}}"}
end
if line =~ /[#「(.*?)」は大見出し]/
line.gsub!(/(.*?)[#「(.*?)」は大見出し]/){"{\\Large #{$1}}"}
end
if line =~ /[#「(.*?)」は中見出し]/
line.gsub!(/(.*?)[#「(.*?)」は中見出し]/){"{\\large #{$1}}"}
end
if line =~ /[#「(.*?)」は小見出し]/
line.gsub!(/(.*?)[#「(.*?)」は小見出し]/){"{\\gtfamily #{$1}}"}
end
if line =~ /[#小見出し](.*?)[#小見出し終わり]/
line.gsub!(/[#小見出し](.*?)[#小見出し終わり]/){"{\\gtfamily #{$1}}"}
end
if line =~ /[#中見出し](.*?)[#中見出し終わり]/
line.gsub!(/[#中見出し](.*?)[#中見出し終わり]/){"{\\large #{$1}}"}
end
if line =~ /[#ここから中見出し]/
line.gsub!(/[#ここから中見出し]/){"{\\large"}
end
if line =~ /[#ここで中見出し終わり]/
line.gsub!(/[#ここで中見出し終わり]/){"}"}
end
if line =~ /[#ページの左右中央]/
## XXX とりあえず無視
line.gsub!(/[#ページの左右中央]/, "")
end
## XXX 字詰めは1行の文字数が少ないので無視
if line =~ /[#ここから([1234567890一二三四五六七八九〇十]*)字詰め]/
line.gsub!(/[#ここから([1234567890一二三四五六七八九〇十]*)字詰め]/, "")
end
if line =~ /[#ここで字詰め終わり]/
line.gsub!(/[#ここで字詰め終わり]/, "")
end
# XXX 割り注も無視
if line =~ /[#ここから割り注]/
line.gsub!(/[#ここから割り注]/, "")
end
if line =~ /[#ここで割り注終わり]/
line.gsub!(/[#ここで割り注終わり]/, "")
end
if line =~ /[#「(.*?)」は太字]/
line.gsub!(/(.+)[#「\1」は太字]/,'{\\textbf{\1}}')
end
if line =~ /[#「.+?」は縦中横]/
line.gsub!(/(.+)[#「\1」は縦中横]/, '\\rensuji{\1}')
end
if line =~ /[#「(1)(/)(\d+)」は分数]/
bunshi = to_single_byte($1)
bunbo = $3
line.gsub!(/(.+)[#「.+?」は分数]/, "\\rensuji{#{bunshi}/#{bunbo}}")
end
if line =~ /[#「.+?」は罫囲み]/
line.gsub!(/(.+)[#「\1」は罫囲み]/, '\\fbox{\1}')
end
if line =~ /[#「(.+?)」は(本文より)?([123456])段階大きな文字]/
line.gsub!(/([^[]+?)[#「\1」は(本文より)?([123456])段階大きな文字]/) {
num = to_single_byte($3).to_i
case num
when 1
"{\\large #{$1}}"
when 2
"{\\Large #{$1}}"
when 3
"{\\LARGE #{$1}}"
when 4
"{\\huge #{$1}}"
when 5
"{\\Huge #{$1}}"
when 6
"{\\Huge #{$1}}"
end
}
end
if line =~ /[#「.+?」は斜体]/
line.gsub!(/(.+)[#「\1」は斜体]/){
shatai = to_single_byte($1).tr("abcdefghijklmnopqrstuvwxyz","abcdefghijklmnopqrstuvwxyz")
"\\rensuji{\\textsl{"+shatai+"}}"
}
end
if line =~ /[#「[0-90-9]」は下付き小文字]/
line.gsub!(/([A-Za-za-zA-Zαβδγ])([0-90-9])[#「\2」は下付き小文字]/){
"$"+$1+"_{"+to_single_byte($2)+"}$"
}
end
if line =~ /([^ ]*)[#ゴシック体]$/
line.gsub!(/([^ ]*)[#ゴシック体]/){"{\\gtfamily #{$1}}"}
end
if line =~ /[#「.+?」はゴシック体]/
line.gsub!(/(.+?)[#「\1」はゴシック体]/){"{\\gtfamily #{$1}}"}
end
if line =~ /[#ここから横組み](.*?)[#ここで横組み終わり]/
line.gsub!(/[#ここから横組み](.*?)[#ここで横組み終わり]/){
yoko_str = $1
yoko_str.gsub!(/π/,"\\pi ")
yoko_str.gsub!(/=/,"=")
yoko_str.gsub!(/(\d+)[#「\1」は指数]/){"^{#{$1}}"}
"$"+yoko_str+"$"
}
end
line.tr!("┌┐┘└│─┏┓┛┗┃━→","┐┘└┌─│┓┛┗┏━┃↓")
if line =~ /[#改段]/
line.sub!(/[#改段]/, "\\clearpage")
end
if line =~ /[aioeu]\^/i
line.gsub!(/([aioeu])\^/i){ "\\\^{#{$1}}"}
end
if line =~ /[aioeu]\'/i
line.gsub!(/([aioeu])\'/i){ "\\\'{#{$1}}"}
end
if line =~ /[#天から.*?([1234567890一二三四五六七八九〇十]*)字下げ]/
num = to_single_byte($1).to_i
if num > MAX_SAGE
num = MAX_SAGE
end
outputfile.print "\\begin{jisage}{#{num}}\n"
line = line.sub(/[#天から.*?字下げ]/, "")+"\n\\end{jisage}"
end
line.gsub!(/[#図形 □(四角)に内接する◆]/, '{\setlength{\fboxsep}{0pt}\fbox{◆}}')
if line =~ /[#[^]]+?]/
line.gsub!(/[#([^]]+?)]/, '\\endnote{\1}')
end
if line =~ /\\[a-z]*?bou/
tuning_bou(line)
end
if line =~ /\\ajD?Kunoji\{\}\}/
line.gsub!(/(\\ajD?Kunoji)\{\}\}/, '\1}')
end
if line =~ /\\ruby/
tuning_ruby(line)
end
if line =~ /^$/
line = " "
end
outputfile.print normalize(line)+"\n"
end
# 底本の処理
outputfile.write(postamble())
outputfile.print normalize(line)+"\n"
while line = inputfile.gets
line.chomp!
line = NKF::nkf('-wS', line)
outputfile.print normalize(line)+"\n"
end
outputfile.print "\n\\end{minipage}\n\\end{teihon}\n\\end{document}\n"
if @log_text.size > 0
until @log_text.empty?
outputfile.print @log_text.shift
end
end
end
end
「_」もルビの区切りにならないよう修正
# encoding: utf-8
# Original source is "青空文庫→TeX(ver. 0.9.5 2004/5/5 psitau)"
# see: http://psitau.kitunebi.com/aozora.html
#
# Also another source is "青空キンドル [Beta]"
# see: http://a2k.aill.org/
require 'nkf'
class Aozora4Reader
PreambleLineNumber=13
KANJIPAT = "[々〇〻\u3400-\u9FFF\uF900-\uFAFF※ヶ〆]"
MAX_SAGE = 15
def self.a4r(file)
self.new(file).main
end
def initialize(file)
@inputfile_name = file
@jisage = false
@log_text = []
@line_num=0
@gaiji = {}
@gaiji2 = {}
end
# UTF-8で出力
def normalize(l)
##l.gsub!(/&/, '\\\\&')
l.to_s
end
# 全角→半角
def to_single_byte(str)
s = str.dup
if s =~ /[0-9]/
s.tr!("1234567890", "1234567890")
elsif s =~ /[一二三四五六七八九〇]/
s.tr!("一二三四五六七八九〇", "1234567890")
end
case s
when /\d十\d/
s.sub!(/(\d)十(\d)/, '\1\2')
when /\d十/
s.sub!(/(\d)十/, '\{1}0')
when /十\d/
s.sub!(/十(\d)/, '1\1')
when /十/
s.sub!(/十/, "10")
end
if s =~/[!?]/
s.tr!("!?", "!?")
end
return s
end
# ルビの削除(表題等)
def remove_ruby(str)
str.gsub(/\\ruby{([^}]+)}{[^}]*}/i){$1}
end
# プリアンブルの出力
def preamble
title = remove_ruby(@title)
author = remove_ruby(@author)
str = <<"END_OF_PRE"
\\documentclass[a5paper]{tbook}
%\\documentclass[a5paper, twocolumn]{tbook}
%\\usepackage[deluxe]{otf}
\\usepackage[expert, deluxe]{otf}
%\\usepackage{utf}
\\usepackage{furikana}
\\usepackage{type1cm}
\\usepackage[size=large]{aozora4reader}
\\def\\rubykatuji{\\rubyfamily\\tiny}
%\\def\\rubykatuji{\\tiny}%for UTF package
%\\renewenvironment{teihon}{\\comment}{\\endcomment}
\\usepackage[dvipdfm,bookmarks=false,bookmarksnumbered=false,hyperfootnotes=false,%
pdftitle={#{title}},%
pdfauthor={#{author}}]{hyperref}
%% Bookmarkの文字化け対策(日本語向け)
\\ifnum 46273=\\euc"B4C1 % 46273 == 0xB4C1 == 漢(EUC-JP)
\\AtBeginDvi{\\special{pdf:tounicode EUC-UCS2}}%
\\else
\\AtBeginDvi{\\special{pdf:tounicode 90ms-RKSJ-UCS2}}%
\\fi
END_OF_PRE
str
end
# 底本の表示用
def postamble
str = <<"END_OF_POST"
\\theendnotes
\\begin{teihon}
\\clearpage\\null\\newpage\\thispagestyle{empty}
\\begin{minipage}<y>{\\textheight}
\\vspace{1\\baselineskip}
\\scriptsize
END_OF_POST
str
end
# アクセントの処理用
# http://www.aozora.gr.jp/accent_separation.html
# http://cosmoshouse.com/tools/acc-conv-j.htm
def translate_accent(l)
l.gsub!(/([ij]):/){"\\\"{\\#{$1}}"}
l.gsub!(/([AIOEUaioeu])(['`~^])/){"\\#$2{#$1}"}
l.gsub!(/([AIOEUaioeu]):/){"\\\"{#$1}"}
l.gsub!(/([AIOEUaioeu])_/){"\\={#$1}"}
l.gsub!(/([!?])@/){"#$1'"}
l.gsub!(/([Aa])&/){"\\r{#$1}"}
l.gsub!(/AE&/){"\\AE{}"}
l.gsub!(/ae&/){"\\ae{}"}
l.gsub!(/OE&/){"\\OE{}"}
l.gsub!(/oe&/){"\\oe{}"}
l.gsub!(/s&/){"\\ss{}"}
l.gsub!(/([cC]),/){"\\c{#$1}"}
l.gsub!(/〔/,'')
l.gsub!(/〕/,'')
return l
end
# 外字の処理用
def translate_gaiji(l)
if l =~/※[#([^]]*)、([^、]]*)]/
if @gaiji2[$1]
l.gsub!(/※[#([^]]*)、([^、]]*)]/){@gaiji2[$1]}
end
end
## ※[#「姉」の正字、「女+※[#第3水準1-85-57]のつくり」、256-下-16]
if l =~/※[#([^]]*※[#[^]]*][^]]*)、([^、]]*)]/
if @gaiji2[$1]
l.gsub!(/※[#([^]]*※[#[^]]*][^]]*)、([^、]]*)]/){@gaiji2[$1]}
end
end
## ※[#「さんずい+闊」]
if l =~ /※[#「([^]]+?)」]/
if @gaiji2[$1]
l.gsub!(/※[#「([^]]+?)」]/){@gaiji2[$1]}
end
end
if l =~ /※[#[^]]*?※[#[^]]*?[12]\-\d{1,2}\-\d{1,2}[^]]*?][^]]*?]/
l.gsub!(/※[#([^]]*?)※[#([^]]*?([12]\-\d{1,2}\-\d{1,2})[^]]*?)]([^]]*?)]/){"※\\footnote{#$1"+@gaiji[$3]+"#$4}"}
end
if l =~ /※[#[^]]*?([12]\-\d{1,2}\-\d{1,2})[^]]*?]/
if @gaiji[$1]
l.gsub!(/※[#([^]]*?([12]\-\d{1,2}\-\d{1,2})[^]]*?)]/){@gaiji[$2]}
end
end
if l =~ /※[#濁点付き片仮名([ワヰヱヲ])、.*?]/
l.gsub!(/※[#濁点付き片仮名([ワヰヱヲ])、.*?]/){ "\\ajLig{#{$1}゛}"}
end
if l =~ /※[#感嘆符三つ.*]/
l.gsub!(/※[#感嘆符三つ.*?]/){ "\\rensuji{!!!}"}
end
if l =~ /※[#.*?([A-Za-z0-9_]+\.png).*?]/
l.gsub!(/※[#([^]]+?)]/, "\\includegraphics{#{$1}}")
end
if l =~ /※[#[^]]+?]/
l.gsub!(/※[#([^]]+?)]/, '※\\footnote{\1}')
end
if l =~ /※/
STDERR.puts("Remaining Unprocessed Gaiji Character in Line #@line_num.")
@log_text << normalize("未処理の外字が#{@line_num}行目にあります.\n")
end
return l
end
# ルビの処理用
def translate_ruby(l)
# 被ルビ文字列内に外字の注記があるばあい,ルビ文字列の後ろに移動する
# ただし,順番が入れ替わってしまう
while l =~ /※\\footnote\{[^(?:\\footnote)]+\}(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》/
l.sub!(/(※)(\\footnote\{[^(?:\\footnote)]+\})((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》)/, '\1\3\2')
end
# 被ルビ文字列内に誤記などの注記が存在する場合は、ルビの後ろに移動する
while l =~ /(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?[#[^]]*?](?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》/
l.sub!(/((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)([#[^]]*?])((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》)/, '\1\3\2')
end
# ルビ文字列内に誤記などの注記が存在する場合は、ルビの後ろに移動する
if l =~ /《[^》]*?[#[^]]*?][^》]*?》/
l.gsub!(/(《[^》]*?)([#[^]]*?])([^》]*?》)/, '\1\3\2')
end
# 一連のルビの処理
# 1 縦棒ありの場合
if l =~ /|/
l.gsub!(/|(.+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 2 漢字および外字
if l =~ /(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?《.+?》/
l.gsub!(/((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 3 ひらがな
if l =~ /[あ-ん](?:[ぁ-んーヽヾ]|\\CID\{12107\})*?《.+?》/
l.gsub!(/([あ-ん](?:[ぁ-んーヽヾ]|\\CID\{12107\})*?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 4 カタカナ
if l =~ /[ア-ヴ](?:[ァ-ヴーゝゞ]|\\CID\{12107\})*?《.+?》/
l.gsub!(/([ア-ヴ](?:[ァ-ヴーゝゞ]|\\CID\{12107\})*?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 5 全角アルファベットなど
if l =~ /[A-Za-zΑ-Ωα-ωА-Яа-я・]+?《.+?》/
l.gsub!(/([A-Za-zΑ-Ωα-ωА-Яа-я・]+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 6 半角英数字
if l =~ /[A-Za-z0-9#_\-\;\&.\'\^\`\\\{\} ]+?《.+?》/
l.gsub!(/([A-Za-z0-9#_\-\;\&.\'\^\`\\\{\} ]+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
if l =~ /《.*?》/
STDERR.puts("Unknown ruby pattern found in #@line_num.")
@log_text << normalize("未処理のルビが#{@line_num}行目にあります.\n")
end
return l
end
# 傍点の処理用
def translate_bouten(l)
bouten_list = [
["傍点", "bou"],
["白ゴマ傍点","sirogomabou"],
["丸傍点","marubou"],
["白丸傍点","siromarubou"],
["黒三角傍点","kurosankakubou"],
["白三角傍点","sirosankakubou"],
["二重丸傍点","nijyuumarubou"],
["蛇の目傍点","jyanomebou"]]
bouten_list.each{ |name, fun|
if l =~ /[#「.+?」に#{name}]/
l.gsub!(/(.+?)[#.*?「\1」に#{name}]/){
str = $1
str.gsub!(/(\\UTF{.+?})/){ "{"+$1+"}"}
str.gsub!(/(\\ruby{.+?}{.+?})/i){ "{"+$1+"}"}
"\\#{fun}{"+str+"}"
}
end
}
if l =~ /[#傍点].+?[#傍点終わり]/
l.gsub!(/[#傍点](.+?)[#傍点終わり]/){
str = $1
str.gsub!(/(\\UTF{.+?})/){ "{"+$1+"}"}
str.gsub!(/(\\ruby{.+?}{.+?})/i){ "{"+$1+"}"}
"\\bou{"+str+"}"
}
end
return l
end
# 傍線の処理用
def translate_bousen(l)
if l =~ /[#「.+?」に傍線]/
l.gsub!(/(.+?)[#「\1」に傍線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に二重傍線]/
l.gsub!(/(.+?)[#「\1」に二重傍線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に鎖線]/
l.gsub!(/(.+?)[#「\1」に鎖線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に破線]/
l.gsub!(/(.+?)[#「\1」に破線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に波線]/
l.gsub!(/(.+?)[#「\1」に波線]/, '\\bousen{\1}')
end
return l
end
# ルビの調整
def tuning_ruby(l)
# 1 直前が漢字の場合
if l =~ /(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))\\ruby/
l.gsub!(/((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\})))\\ruby/, '\1\\Ruby')
end
# 2 直後が漢字の場合
if l =~ /\\ruby\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\\{\}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))/
l.gsub!(/\\ruby(\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\\{\}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\})))/, '\\Ruby\1')
end
# 3 ルビが連続する場合
while l =~ /\\(?:ruby|RUBY|Ruby)\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\{}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\\ruby/
l.sub!(/\\(?:ruby|RUBY|Ruby)(\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\{}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\})\\ruby/, '\\RUBY\1\\RUBY')
end
end
# 傍点の調整
def tuning_bou(l)
# 傍点の中の「くの字点」を変換
while l =~ /(\\[a-z]*?bou\{(?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)(\\ajD?Kunoji)\{\}((?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?)\}/
l.gsub!(/((\\([a-z]*?)bou)\{(?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)(\\ajD?Kunoji)\{\}((?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?)\}/, '\1}\4with\3Bou\2{\5}')
end
if l =~ /\\[a-z]*?bou\{\}/
l.gsub!(/\\([a-z]*?)bou\{\}/, '{}')
end
return l
end
# 外字用ハッシュを作成
def load_gaiji
datadir = File.dirname(__FILE__)+"/../data"
File.open(datadir+"/gaiji.txt") do |f|
while gaiji_line = f.gets
gaiji_line.chomp!
key, data = gaiji_line.split
@gaiji[key] = data
end
end
File.open(datadir+"/gaiji2.txt") do |f|
while gaiji_line = f.gets
gaiji_line.chomp!
key, data = gaiji_line.split
data.gsub(/#.*$/,'')
@gaiji2[key] = data
end
end
end
#
# メインパート
#
def main
load_gaiji()
# 入出力ファイルの定義
outputfile_name = @inputfile_name.sub(/\.txt$/, ".tex")
inputfile = File.open(@inputfile_name)
outputfile = File.open(outputfile_name, "w")
# プリアンブルの処理
empty_line = 0
in_note = false
meta_data = []
while empty_line < 2
line = inputfile.gets.chomp
line = NKF::nkf('-wS', line)
if in_note
if line =~ /^-+$/
in_note = false
break
end
else
if line =~ /^-+$/
in_note = true
else
if line =~ /^$/
empty_line += 1
else
if line =~ /《.*?》/
translate_ruby(line)
end
meta_data << line
end
end
end
end
@line_num += meta_data.size
@title = normalize(meta_data.shift)
case meta_data.size
when 1
@author = normalize(meta_data.shift)
when 2
@subtitle = normalize(meta_data.shift)
@author = normalize(meta_data.shift)
when 3
@subtitle = normalize(meta_data.shift)
@author = normalize(meta_data.shift)
@subauthor = normalize(meta_data.shift)
else
@subtitle = normalize(meta_data.shift)
@meta_data = []
until meta_data.empty?
@meta_data << normalize(meta_data.shift)
end
@subauthor = @meta_data.pop
@author = @meta_data.pop
end
outputfile.write(preamble())
outputfile.print "\\title{"+@title+"}\n"
outputfile.print "\\subtitle{"+@subtitle+"}\n" if @subtitle
outputfile.print "\\author{"+@author+"}\n"
outputfile.print "\\subauthor{"+@subauthor+"}\n" if @subauthor
if @meta_data
@meta_data.each do |data|
outputfile.print "\\metadata{"+data+"}\n"
end
end
outputfile.print "\\date{}\n"
# 本文の処理
outputfile.print "\\begin{document}\n\\maketitle\n"
@line_num += PreambleLineNumber
while line = inputfile.gets
@line_num += 1
line.chomp!
line = NKF::nkf('-wS', line)
break if line =~ /^底本/
if line =~ /^ 「/
line.sub!(/^ 「/, "\\mbox{ }\\kern0mm\\inhibitglue「")
end
if line =~ /[ワヰヱヲ]゛/
line.gsub!(/ワ゛/, "\\ajLig{ワ゛}")
line.gsub!(/ヰ゛/, "\\ajLig{ヰ゛}")
line.gsub!(/ヱ゛/, "\\ajLig{ヱ゛}")
line.gsub!(/ヲ゛/, "\\ajLig{ヲ゛}")
end
if line =~ /[?!] /
line.gsub!(/([?!]) /, '\1{}')
end
if line =~ /——/
line.gsub!(/——/, "\\——{}")
end
if line =~ //\/
line.gsub!(//\/, "\\ajKunoji{}")
end
if line =~ //″\/
line.gsub!(//″\/, "\\ajDKunoji{}")
end
=begin
if line =~ /^ +.+/
line.gsub!(/^ +([一二三四五六七八九〇十].*)/, '\\section*{\1}')
end
=end
while line =~ /(.+?)[#(「\1」は横?[1|一]文字[^]]*?)]/
line = line.sub(/(.+?)[#(「\1」は横?[1|一]文字[^]]*?)]/){"\\ajLig{"+to_single_byte($1)+"}"}
end
if line =~ /[#改丁.*?]/
line.sub!(/[#改丁.*?]/, "\\cleardoublepage")
end
if line =~ /[#改[頁|ページ].*?]/
line.sub!(/[#改[頁|ページ].*?]/, "\\clearpage")
end
if line =~ /〔.*?〕/
translate_accent(line)
end
if line =~ /※/
translate_gaiji(line)
end
if line =~ /《.*?》/
translate_ruby(line)
end
if line =~ /[#(.+?)傍点]/
translate_bouten(line)
end
if line =~ /[#傍点].+?[#傍点終わり]/
translate_bouten(line)
end
if line =~ /[#「(.+?)」に(?:二重)?[傍鎖破波]線]/
translate_bousen(line)
end
if line =~ /[#この行.*?([1234567890一二三四五六七八九〇十]*)字下げ]/
outputfile.print "\\begin{jisage}{"+to_single_byte($1)+"}\n"
line = line.sub(/[#この行.*?字下げ]/, "")+"\n\\end{jisage}"
@line_num += 2
end
if line =~ /[#ここから地から.+字上げ]/
line.sub!(/[#ここから地から([1234567890一二三四五六七八九〇十]*)字上げ]/){"\\begin{flushright}\\advance\\rightskip"+to_single_byte($1)+"zw"}
@jisage = true
end
if line =~ /[#ここで字上げ終わり]/
line.sub!(/[#ここで字上げ終わり]/){"\\end{flushright}"}
@jisage = false
end
if line =~ /[#ここから改行天付き、折り返して.*?字下げ]/
if @jisage
outputfile.print "\\end{jisage}\n"
@line_num += 1
end
line.sub!(/[#ここから改行天付き、折り返して([1234567890一二三四五六七八九〇十]*)字下げ]/){"\\begin{jisage}{#{to_single_byte($1)}}\\setlength\\parindent{-"+to_single_byte($1)+"zw}"}
@jisage = true
end
if line =~ /[#.*?字下げ[^]]*?(?:終わり|まで)[^]]*?]/
line = line.sub(/[#.*?字下げ.*?(?:終わり|まで).*?]/, "")+"\\end{jisage}"
@jisage = false
end
if line =~ /[#(ここから|これより|ここより|以下).+字下げ.*?]/
if @jisage
outputfile.print "\\end{jisage}\n"
@line_num += 1
end
line.sub!(/[#(ここから|これより|ここより|以下).*?([1234567890一二三四五六七八九〇十]*)字下げ.*?]/){"\\begin{jisage}{"+to_single_byte($2)+"}"}
@jisage = true
end
if line =~ /^[#ここから地付き]$/
@jisage = true
line = "\\begin{flushright}"
end
if line =~ /^[#ここで地付き終わり]$/
line = "\\end{flushright}"
@jisage = false
end
if line =~ /[#.*?地付き.*?]$/
line = "\\begin{flushright}\n"+line.sub(/[#.*?地付き.*?]$/, "\\end{flushright}")
@line_num += 1
elsif line =~ /[#.*?地付き.*?]/
line = line.sub(/[#.*?地付き.*?]/, "\\begin{flushright}\n")+"\\end{flushright}"
@line_num += 1
end
if line =~ /[#.*?(?:行末|地)(?:から|より).*?([1234567890一二三四五六七八九〇十]*)字上.*?]$$/
line = "\\begin{flushright}\\advance\\rightskip"+to_single_byte($1)+"zw\n"+line.sub(/[#.*?(?:行末|地)(?:から|より).*?字上.*?]$/, "\\end{flushright}")
@line_num += 1
elsif line =~ /^(.*?)[#.*?(?:行末|地)(?:から|より).*?([1234567890一二三四五六七八九〇十]*)字上.*?](.*)$/
line = $1+"\\begin{flushright}\\advance\\rightskip"+to_single_byte($2)+"zw\n"+$3+"\\end{flushright}"
@line_num += 1
end
if line =~ /[#「.+?」は返り点]/
line.gsub!(/(.+)[#「\1」は返り点]/, '\\kaeriten{\ajKunten{\1}}')
end
if line =~ /[#[一二三上中下甲乙丙丁レ]*]/
line.gsub!(/[#([一二三上中下甲乙丙丁レ]*)]/, '\\kaeriten{\ajKunten{\1}}')
end
if line =~ /[#(.*?)]/
line.gsub!(/[#((.*?))]/, '\\okurigana{\1}')
end
if line =~ /[#「.+?」.*?ママ.*?注記]/
line.gsub!(/(.+)[#「\1」.*?ママ.*?注記]/, '\\ruby{\1}{ママ}')
end
if line =~ /[#[^]]+(([^)]+.png).*?)[^]]+]/
line.gsub!(/[#[^]]+(([^)]+.png).*?)[^]]+]/, '\\sashie{\1}')
end
if line =~ /[#([1234567890一二三四五六七八九〇十]*)字下げ]/
num = to_single_byte($1).to_i
if num > MAX_SAGE
num = MAX_SAGE
end
outputfile.print "\\begin{jisage}{#{num}}\n"
line = line.sub(/[#.*?字下げ]/, "")+"\n\\end{jisage}"
end
## ちょっと汚いけど二重指定の対策
if line =~ /[#「(.*?)」は縦中横][#「(.*?)」は中見出し]/
line.gsub!(/(.*?)[#「(\1)」は縦中横][#「(\1)」は中見出し]/){"{\\large \\rensuji{#{$1}}}"}
end
if line =~ /[#「(.*?)」は大見出し]/
line.gsub!(/(.*?)[#「(.*?)」は大見出し]/){"{\\Large #{$1}}"}
end
if line =~ /[#「(.*?)」は中見出し]/
line.gsub!(/(.*?)[#「(.*?)」は中見出し]/){"{\\large #{$1}}"}
end
if line =~ /[#「(.*?)」は小見出し]/
line.gsub!(/(.*?)[#「(.*?)」は小見出し]/){"{\\gtfamily #{$1}}"}
end
if line =~ /[#小見出し](.*?)[#小見出し終わり]/
line.gsub!(/[#小見出し](.*?)[#小見出し終わり]/){"{\\gtfamily #{$1}}"}
end
if line =~ /[#中見出し](.*?)[#中見出し終わり]/
line.gsub!(/[#中見出し](.*?)[#中見出し終わり]/){"{\\large #{$1}}"}
end
if line =~ /[#ここから中見出し]/
line.gsub!(/[#ここから中見出し]/){"{\\large"}
end
if line =~ /[#ここで中見出し終わり]/
line.gsub!(/[#ここで中見出し終わり]/){"}"}
end
if line =~ /[#ページの左右中央]/
## XXX とりあえず無視
line.gsub!(/[#ページの左右中央]/, "")
end
## XXX 字詰めは1行の文字数が少ないので無視
if line =~ /[#ここから([1234567890一二三四五六七八九〇十]*)字詰め]/
line.gsub!(/[#ここから([1234567890一二三四五六七八九〇十]*)字詰め]/, "")
end
if line =~ /[#ここで字詰め終わり]/
line.gsub!(/[#ここで字詰め終わり]/, "")
end
# XXX 割り注も無視
if line =~ /[#ここから割り注]/
line.gsub!(/[#ここから割り注]/, "")
end
if line =~ /[#ここで割り注終わり]/
line.gsub!(/[#ここで割り注終わり]/, "")
end
if line =~ /[#「(.*?)」は太字]/
line.gsub!(/(.+)[#「\1」は太字]/,'{\\textbf{\1}}')
end
if line =~ /[#「.+?」は縦中横]/
line.gsub!(/(.+)[#「\1」は縦中横]/, '\\rensuji{\1}')
end
if line =~ /[#「(1)(/)(\d+)」は分数]/
bunshi = to_single_byte($1)
bunbo = $3
line.gsub!(/(.+)[#「.+?」は分数]/, "\\rensuji{#{bunshi}/#{bunbo}}")
end
if line =~ /[#「.+?」は罫囲み]/
line.gsub!(/(.+)[#「\1」は罫囲み]/, '\\fbox{\1}')
end
if line =~ /[#「(.+?)」は(本文より)?([123456])段階大きな文字]/
line.gsub!(/([^[]+?)[#「\1」は(本文より)?([123456])段階大きな文字]/) {
num = to_single_byte($3).to_i
case num
when 1
"{\\large #{$1}}"
when 2
"{\\Large #{$1}}"
when 3
"{\\LARGE #{$1}}"
when 4
"{\\huge #{$1}}"
when 5
"{\\Huge #{$1}}"
when 6
"{\\Huge #{$1}}"
end
}
end
if line =~ /[#「.+?」は斜体]/
line.gsub!(/(.+)[#「\1」は斜体]/){
shatai = to_single_byte($1).tr("abcdefghijklmnopqrstuvwxyz","abcdefghijklmnopqrstuvwxyz")
"\\rensuji{\\textsl{"+shatai+"}}"
}
end
if line =~ /[#「[0-90-9]」は下付き小文字]/
line.gsub!(/([A-Za-za-zA-Zαβδγ])([0-90-9])[#「\2」は下付き小文字]/){
"$"+$1+"_{"+to_single_byte($2)+"}$"
}
end
if line =~ /([^ ]*)[#ゴシック体]$/
line.gsub!(/([^ ]*)[#ゴシック体]/){"{\\gtfamily #{$1}}"}
end
if line =~ /[#「.+?」はゴシック体]/
line.gsub!(/(.+?)[#「\1」はゴシック体]/){"{\\gtfamily #{$1}}"}
end
if line =~ /[#ここから横組み](.*?)[#ここで横組み終わり]/
line.gsub!(/[#ここから横組み](.*?)[#ここで横組み終わり]/){
yoko_str = $1
yoko_str.gsub!(/π/,"\\pi ")
yoko_str.gsub!(/=/,"=")
yoko_str.gsub!(/(\d+)[#「\1」は指数]/){"^{#{$1}}"}
"$"+yoko_str+"$"
}
end
line.tr!("┌┐┘└│─┏┓┛┗┃━→","┐┘└┌─│┓┛┗┏━┃↓")
if line =~ /[#改段]/
line.sub!(/[#改段]/, "\\clearpage")
end
if line =~ /[aioeu]\^/i
line.gsub!(/([aioeu])\^/i){ "\\\^{#{$1}}"}
end
if line =~ /[aioeu]\'/i
line.gsub!(/([aioeu])\'/i){ "\\\'{#{$1}}"}
end
if line =~ /[#天から.*?([1234567890一二三四五六七八九〇十]*)字下げ]/
num = to_single_byte($1).to_i
if num > MAX_SAGE
num = MAX_SAGE
end
outputfile.print "\\begin{jisage}{#{num}}\n"
line = line.sub(/[#天から.*?字下げ]/, "")+"\n\\end{jisage}"
end
line.gsub!(/[#図形 □(四角)に内接する◆]/, '{\setlength{\fboxsep}{0pt}\fbox{◆}}')
if line =~ /[#[^]]+?]/
line.gsub!(/[#([^]]+?)]/, '\\endnote{\1}')
end
if line =~ /\\[a-z]*?bou/
tuning_bou(line)
end
if line =~ /\\ajD?Kunoji\{\}\}/
line.gsub!(/(\\ajD?Kunoji)\{\}\}/, '\1}')
end
if line =~ /\\ruby/
tuning_ruby(line)
end
if line =~ /^$/
line = " "
end
outputfile.print normalize(line)+"\n"
end
# 底本の処理
outputfile.write(postamble())
outputfile.print normalize(line)+"\n"
while line = inputfile.gets
line.chomp!
line = NKF::nkf('-wS', line)
outputfile.print normalize(line)+"\n"
end
outputfile.print "\n\\end{minipage}\n\\end{teihon}\n\\end{document}\n"
if @log_text.size > 0
until @log_text.empty?
outputfile.print @log_text.shift
end
end
end
end
|
module Dapp
VERSION = "0.35.7"
BUILD_CACHE_VERSION = 31
end
0.35.8
module Dapp
VERSION = "0.35.8"
BUILD_CACHE_VERSION = 31
end
|
# encoding: utf-8
# Original source is "青空文庫→TeX(ver. 0.9.5 2004/5/5 psitau)"
# see: http://psitau.kitunebi.com/aozora.html
#
# Also another source is "青空キンドル [Beta]"
# see: http://a2k.aill.org/
require 'nkf'
class Aozora4Reader
PreambleLineNumber=13
KANJIPAT = "[々〇〻\u3400-\u9FFF\uF900-\uFAFF※ヶ〆]"
MAX_SAGE = 20
def self.a4r(file)
self.new(file).main
end
def initialize(file)
@inputfile_name = file
@jisage = false
@log_text = []
@line_num=0
@gaiji = {}
@gaiji2 = {}
end
# UTF-8で出力
def normalize(l)
##l.gsub!(/&/, '\\\\&')
l.to_s
end
# 全角→半角
def to_single_byte(str)
s = str.dup
if s =~ /[0-9]/
s.tr!("1234567890", "1234567890")
elsif s =~ /[一二三四五六七八九〇]/
s.tr!("一二三四五六七八九〇", "1234567890")
end
case s
when /\d十\d/
s.sub!(/(\d)十(\d)/, '\1\2')
when /\d十/
s.sub!(/(\d)十/, '\{1}0')
when /十\d/
s.sub!(/十(\d)/, '1\1')
when /十/
s.sub!(/十/, "10")
end
if s =~/[!?]/
s.tr!("!?", "!?")
end
return s
end
# プリアンブルの出力
def preamble
str = <<"END_OF_PRE"
\\documentclass[a5paper]{tbook}
%\\documentclass[a5paper, twocolumn]{tbook}
%\\usepackage[deluxe]{otf}
\\usepackage[expert, deluxe]{otf}
%\\usepackage{utf}
\\usepackage{furikana}
\\usepackage{type1cm}
\\usepackage[size=large]{aozora4reader}
\\def\\rubykatuji{\\rubyfamily\\tiny}
%\\def\\rubykatuji{\\tiny}%for UTF package
%\\renewenvironment{teihon}{\\comment}{\\endcomment}
\\usepackage[dvipdfm,bookmarks=false,bookmarksnumbered=false,hyperfootnotes=false,%
pdftitle={#{@title}},%
pdfauthor={#{@author}}]{hyperref}
%% Bookmarkの文字化け対策(日本語向け)
\\ifnum 46273=\\euc"B4C1 % 46273 == 0xB4C1 == 漢(EUC-JP)
\\AtBeginDvi{\\special{pdf:tounicode EUC-UCS2}}%
\\else
\\AtBeginDvi{\\special{pdf:tounicode 90ms-RKSJ-UCS2}}%
\\fi
END_OF_PRE
str
end
# 底本の表示用
def postamble
str = <<"END_OF_POST"
\\theendnotes
\\begin{teihon}
\\clearpage\\null\\newpage\\thispagestyle{empty}
\\begin{minipage}<y>{\\textheight}
\\vspace{1\\baselineskip}
\\scriptsize
END_OF_POST
str
end
# アクセントの処理用
# http://www.aozora.gr.jp/accent_separation.html
# http://cosmoshouse.com/tools/acc-conv-j.htm
def translate_accent(l)
l.gsub!(/([ij]):/){"\\\"{\\#{$1}}"}
l.gsub!(/([AIOEUaioeu])(['`~^])/){"\\#$2{#$1}"}
l.gsub!(/([AIOEUaioeu]):/){"\\\"{#$1}"}
l.gsub!(/([AIOEUaioeu])_/){"\\={#$1}"}
l.gsub!(/([!?])@/){"#$1'"}
l.gsub!(/([Aa])&/){"\\r{#$1}"}
l.gsub!(/AE&/){"\\AE"}
l.gsub!(/ae&/){"\\ae"}
l.gsub!(/OE&/){"\\OE"}
l.gsub!(/oe&/){"\\oe"}
l.gsub!(/s&/){"\\ss"}
l.gsub!(/([cC]),/){"\\c{#$1}"}
l.gsub!(/〔/,'')
l.gsub!(/〕/,'')
return l
end
# 外字の処理用
def translate_gaiji(l)
if l =~/※[#([^]]*)、([^、]]*)]/
if @gaiji2[$1]
l.gsub!(/※[#([^]]*)、([^、]]*)]/){@gaiji2[$1]}
end
end
## ※[#「姉」の正字、「女+※[#第3水準1-85-57]のつくり」、256-下-16]
if l =~/※[#([^]]*※[#[^]]*][^]]*)、([^、]]*)]/
if @gaiji2[$1]
l.gsub!(/※[#([^]]*※[#[^]]*][^]]*)、([^、]]*)]/){@gaiji2[$1]}
end
end
## ※[#「さんずい+闊」]
if l =~ /※[#「([^]]+?)」]/
if @gaiji2[$1]
l.gsub!(/※[#「([^]]+?)」]/){@gaiji2[$1]}
end
end
if l =~ /※[#[^]]*?※[#[^]]*?[12]\-\d{1,2}\-\d{1,2}[^]]*?][^]]*?]/
l.gsub!(/※[#([^]]*?)※[#([^]]*?([12]\-\d{1,2}\-\d{1,2})[^]]*?)]([^]]*?)]/){"※\\footnote{#$1"+@gaiji[$3]+"#$4}"}
end
if l =~ /※[#[^]]*?[12]\-\d{1,2}\-\d{1,2}[^]]*?]/
l.gsub!(/※[#([^]]*?([12]\-\d{1,2}\-\d{1,2})[^]]*?)]/){@gaiji[$2]}
end
if l =~ /※[#感嘆符三つ.*]/
l.gsub!(/※[#感嘆符三つ.*?]/){ "\\rensuji{!!!}"}
end
if l =~ /※[#[^]]+?]/
l.gsub!(/※[#([^]]+?)]/, '※\\footnote{\1}')
end
if l =~ /※/
STDERR.puts("Remaining Unprocessed Gaiji Character in Line #@line_num.")
@log_text << normalize("未処理の外字が#{@line_num}行目にあります.\n")
end
return l
end
# ルビの処理用
def translate_ruby(l)
# 被ルビ文字列内に外字の注記があるばあい,ルビ文字列の後ろに移動する
# ただし,順番が入れ替わってしまう
while l =~ /※\\footnote\{[^(?:\\footnote)]+\}(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》/
l.sub!(/(※)(\\footnote\{[^(?:\\footnote)]+\})((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》)/, '\1\3\2')
end
# 被ルビ文字列内に誤記などの注記が存在する場合は、ルビの後ろに移動する
while l =~ /(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?[#[^]]*?](?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》/
l.sub!(/((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)([#[^]]*?])((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》)/, '\1\3\2')
end
# ルビ文字列内に誤記などの注記が存在する場合は、ルビの後ろに移動する
if l =~ /《[^》]*?[#[^]]*?][^》]*?》/
l.gsub!(/(《[^》]*?)([#[^]]*?])([^》]*?》)/, '\1\3\2')
end
# 一連のルビの処理
# 1 縦棒ありの場合
if l =~ /|/
l.gsub!(/|(.+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 2 漢字および外字
if l =~ /(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?《.+?》/
l.gsub!(/((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 3 ひらがな
if l =~ /[あ-ん](?:[ぁ-んーヽヾ]|\\CID\{12107\})*?《.+?》/
l.gsub!(/([あ-ん](?:[ぁ-んーヽヾ]|\\CID\{12107\})*?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 4 カタカナ
if l =~ /[ア-ヴ](?:[ァ-ヴーゝゞ]|\\CID\{12107\})*?《.+?》/
l.gsub!(/([ア-ヴ](?:[ァ-ヴーゝゞ]|\\CID\{12107\})*?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 5 全角アルファベットなど
if l =~ /[A-Za-zΑ-Ωα-ωА-Яа-я・]+?《.+?》/
l.gsub!(/([A-Za-zΑ-Ωα-ωА-Яа-я・]+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 6 半角英数字
if l =~ /[A-Za-z0-9#\-\;\&.\'\^\` ]+?《.+?》/
l.gsub!(/([A-Za-z0-9#\-\;\&. ]+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
if l =~ /《.*?》/
STDERR.puts("Unknown ruby pattern found in #@line_num.")
@log_text << normalize("未処理のルビが#{@line_num}行目にあります.\n")
end
return l
end
# 傍点の処理用
def translate_bouten(l)
if l =~ /[#「.+?」に傍点]/
l.gsub!(/(.+?)[#.*?「\1」に傍点]/, '\\bou{\1}')
end
if l =~ /[#「.+?」に白ゴマ傍点]/
l.gsub!(/(.+?)[#.*?「\1」に白ゴマ傍点]/, '\\sirogomabou{\1}')
end
if l =~ /[#「.+?」に丸傍点]/
l.gsub!(/(.+?)[#.*?「\1」に丸傍点]/, '\\marubou{\1}')
end
if l =~ /[#「.+?」に白丸傍点]/
l.gsub!(/(.+?)[#.*?「\1」に白丸傍点]/, '\\siromarubou{\1}')
end
if l =~ /[#「.+?」に黒三角傍点]/
l.gsub!(/(.+?)[#.*?「\1」に黒三角傍点]/, '\\kurosankakubou{\1}')
end
if l =~ /[#「.+?」に白三角傍点]/
l.gsub!(/(.+?)[#.*?「\1」に白三角傍点]/, '\\sirosankakubou{\1}')
end
if l =~ /[#「.+?」に二重丸傍点]/
l.gsub!(/(.+?)[#.*?「\1」に二重丸傍点]/, '\\nijyuumarubou{\1}')
end
if l =~ /[#「.+?」に蛇の目傍点]/
l.gsub!(/(.+?)[#.*?「\1」に蛇の目傍点]/, '\\jyanomebou{\1}')
end
if l =~ /[#傍点].+?[#傍点終わり]/
l.gsub!(/[#傍点](.+?)[#傍点終わり]/){
str = $1
str.gsub!(/(\\UTF{.+?})/){ "{"+$1+"}"}
"\\bou{"+str+"}"
}
end
return l
end
# 傍線の処理用
def translate_bousen(l)
if l =~ /[#「.+?」に傍線]/
l.gsub!(/(.+?)[#「\1」に傍線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に二重傍線]/
l.gsub!(/(.+?)[#「\1」に二重傍線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に鎖線]/
l.gsub!(/(.+?)[#「\1」に鎖線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に破線]/
l.gsub!(/(.+?)[#「\1」に破線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に波線]/
l.gsub!(/(.+?)[#「\1」に波線]/, '\\bousen{\1}')
end
return l
end
# ルビの調整
def tuning_ruby(l)
# 1 直前が漢字の場合
if l =~ /(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))\\ruby/
l.gsub!(/((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\})))\\ruby/, '\1\\Ruby')
end
# 2 直後が漢字の場合
if l =~ /\\ruby\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\\{\}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))/
l.gsub!(/\\ruby(\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\\{\}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\})))/, '\\Ruby\1')
end
# 3 ルビが連続する場合
while l =~ /\\(?:ruby|RUBY|Ruby)\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\{}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\\ruby/
l.sub!(/\\(?:ruby|RUBY|Ruby)(\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\{}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\})\\ruby/, '\\RUBY\1\\RUBY')
end
end
# 傍点の調整
def tuning_bou(l)
# 傍点の中の「くの字点」を変換
while l =~ /(\\[a-z]*?bou\{(?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)(\\ajD?Kunoji)\{\}((?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?)\}/
l.gsub!(/((\\([a-z]*?)bou)\{(?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)(\\ajD?Kunoji)\{\}((?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?)\}/, '\1}\4with\3Bou\2{\5}')
end
if l =~ /\\[a-z]*?bou\{\}/
l.gsub!(/\\([a-z]*?)bou\{\}/, '{}')
end
return l
end
# 外字用ハッシュを作成
def load_gaiji
datadir = File.dirname(__FILE__)+"/../data"
File.open(datadir+"/gaiji.txt") do |f|
while gaiji_line = f.gets
gaiji_line.chomp!
key, data = gaiji_line.split
@gaiji[key] = data
end
end
File.open(datadir+"/gaiji2.txt") do |f|
while gaiji_line = f.gets
gaiji_line.chomp!
key, data = gaiji_line.split
data.gsub(/#.*$/,'')
@gaiji2[key] = data
end
end
end
#
# メインパート
#
def main
load_gaiji()
# 入出力ファイルの定義
outputfile_name = @inputfile_name.sub(/\.txt$/, ".tex")
inputfile = File.open(@inputfile_name)
outputfile = File.open(outputfile_name, "w")
# プリアンブルの処理
empty_line = 0
in_note = false
meta_data = []
while empty_line < 2
line = inputfile.gets.chomp
line = NKF::nkf('-wS', line)
if in_note
if line =~ /^-+$/
in_note = false
end
else
if line =~ /^-+$/
in_note = true
else
if line =~ /^$/
empty_line += 1
else
if line =~ /《.*?》/
translate_ruby(line)
end
meta_data << line
end
end
end
end
@line_num += meta_data.size
@title = normalize(meta_data.shift)
case meta_data.size
when 1
@author = normalize(meta_data.shift)
when 2
@subtitle = normalize(meta_data.shift)
@author = normalize(meta_data.shift)
when 3
@subtitle = normalize(meta_data.shift)
@author = normalize(meta_data.shift)
@subauthor = normalize(meta_data.shift)
else
@subtitle = normalize(meta_data.shift)
@author = normalize(meta_data.shift)
@subauthor = normalize(meta_data.shift)
@meta_data = []
until meta_data.empty?
@metadata << normalize(meta_data.shift)
end
end
outputfile.write(preamble())
outputfile.print "\\title{"+@title+"}\n"
outputfile.print "\\subtitle{"+@subtitle+"}\n" if @subtitle
outputfile.print "\\author{"+@author+"}\n"
outputfile.print "\\subauthor{"+@subauthor+"}\n" if @subauthor
if @meta_data
@meta_data.each do |data|
outputfile.print "\\metadata{"+data+"}\n"
end
end
outputfile.print "\\date{}\n"
# 本文の処理
outputfile.print "\\begin{document}\n\\maketitle\n"
@line_num += PreambleLineNumber
while line = inputfile.gets
@line_num += 1
line.chomp!
line = NKF::nkf('-wS', line)
break if line =~ /^底本/
if line =~ /^ 「/
line.sub!(/^ 「/, "\\mbox{ }\\kern0mm\\inhibitglue「")
end
if line =~ /[ワヰヱヲ]゛/
line.gsub!(/ワ゛/, "\\ajLig{ワ゛}")
line.gsub!(/ヰ゛/, "\\ajLig{ヰ゛}")
line.gsub!(/ヱ゛/, "\\ajLig{ヱ゛}")
line.gsub!(/ヲ゛/, "\\ajLig{ヲ゛}")
end
if line =~ /[?!] /
line.gsub!(/([?!]) /, '\1{}')
end
if line =~ /——/
line.gsub!(/——/, "\\——{}")
end
if line =~ //\/
line.gsub!(//\/, "\\ajKunoji{}")
end
if line =~ //″\/
line.gsub!(//″\/, "\\ajDKunoji{}")
end
=begin
if line =~ /^ +.+/
line.gsub!(/^ +([一二三四五六七八九〇十].*)/, '\\section*{\1}')
end
=end
while line =~ /(.+?)[#(「\1」は横?[1|一]文字[^]]*?)]/
line = line.sub(/(.+?)[#(「\1」は横?[1|一]文字[^]]*?)]/){"\\ajLig{"+to_single_byte($1)+"}"}
end
if line =~ /[#改丁.*?]/
line.sub!(/[#改丁.*?]/, "\\cleardoublepage")
end
if line =~ /[#改[頁|ページ].*?]/
line.sub!(/[#改[頁|ページ].*?]/, "\\clearpage")
end
if line =~ /〔.*?〕/
translate_accent(line)
end
if line =~ /※/
translate_gaiji(line)
end
if line =~ /[#(.+?)傍点]/
translate_bouten(line)
end
if line =~ /[#傍点].+?[#傍点終わり]/
translate_bouten(line)
end
if line =~ /《.*?》/
translate_ruby(line)
end
if line =~ /[#「(.+?)」に(?:二重)?[傍鎖破波]線]/
translate_bousen(line)
end
if line =~ /[#この行.*?([1234567890一二三四五六七八九〇十]*)字下げ]/
outputfile.print "\\begin{jisage}{"+to_single_byte($1)+"}\n"
line = line.sub(/[#この行.*?字下げ]/, "")+"\n\\end{jisage}"
@line_num += 2
end
if line =~ /[#ここから地から.+字上げ]/
line.sub!(/[#ここから地から([1234567890一二三四五六七八九〇十]*)字上げ]/){"\\begin{flushright}\\advance\\rightskip"+to_single_byte($1)+"zw"}
@jisage = true
end
if line =~ /[#ここで字上げ終わり]/
line.sub!(/[#ここで字上げ終わり]/){"\\end{flushright}"}
@jisage = false
end
if line =~ /[#.*?字下げ[^]]*?(?:終わり|まで)[^]]*?]/
line = line.sub(/[#.*?字下げ.*?(?:終わり|まで).*?]/, "")+"\\end{jisage}"
@jisage = false
end
if line =~ /[#ここから.+字下げ.*?]/
if @jisage
outputfile.print "\\end{jisage}\n"
@line_num += 1
end
line.sub!(/[#ここから.*?([1234567890一二三四五六七八九〇十]*)字下げ.*?]/){"\\begin{jisage}{"+to_single_byte($1)+"}"}
@jisage = true
end
if line =~ /[#.*?地付き.*?]$/
line = "\\begin{flushright}\n"+line.sub(/[#.*?地付き.*?]$/, "\\end{flushright}")
@line_num += 1
elsif line =~ /[#.*?地付き.*?]/
line = line.sub(/[#.*?地付き.*?]/, "\\begin{flushright}\n")+"\\end{flushright}"
@line_num += 1
end
if line =~ /[#.*?(?:行末|地)(?:から|より).*?([1234567890一二三四五六七八九〇十]*)字上.*?]$$/
line = "\\begin{flushright}\\advance\\rightskip"+to_single_byte($1)+"zw\n"+line.sub(/[#.*?(?:行末|地)(?:から|より).*?字上.*?]$/, "\\end{flushright}")
@line_num += 1
elsif line =~ /^(.*?)[#.*?(?:行末|地)(?:から|より).*?([1234567890一二三四五六七八九〇十]*)字上.*?](.*)$/
line = $1+"\\begin{flushright}\\advance\\rightskip"+to_single_byte($2)+"zw\n"+$3+"\\end{flushright}"
@line_num += 1
end
if line =~ /[#「.+?」は返り点]/
line.gsub!(/(.+)[#「\1」は返り点]/, '\\kaeriten{\ajKunten{\1}}')
end
if line =~ /[#[一二上中下甲乙丙丁レ]*]/
line.gsub!(/[#([一二上中下甲乙丙丁レ]*)]/, '\\kaeriten{\ajKunten{\1}}')
end
if line =~ /[#(.*?)]/
line.gsub!(/[#((.*?))]/, '\\okurigana{\1}')
end
if line =~ /[#「.+?」.*?ママ.*?注記]/
line.gsub!(/(.+)[#「\1」.*?ママ.*?注記]/, '\\ruby{\1}{ママ}')
end
if line =~ /[#[^]]+(([^)]+.png))[^]]+]/
line.gsub!(/[#[^]]+(([^)]+.png))[^]]+]/, '\\sashie{\1}')
end
if line =~ /[#([1234567890一二三四五六七八九〇十]*)字下げ]/
outputfile.print "\\begin{jisage}{"+to_single_byte($1)+"}\n"
line = line.sub(/[#.*?字下げ]/, "")+"\n\\end{jisage}"
end
if line =~ /[#「(.*?)」は大見出し]/
line.gsub!(/(.*?)[#「(.*?)」は大見出し]/){"{\\Large #{$1}}"}
end
if line =~ /[#「(.*?)」は中見出し]/
line.gsub!(/(.*?)[#「(.*?)」は中見出し]/){"{\\large #{$1}}"}
end
if line =~ /[#「(.*?)」は小見出し]/
line.gsub!(/(.*?)[#「(.*?)」は小見出し]/){"{\\gtfamily #{$1}}"}
end
if line =~ /[#小見出し](.*?)[#小見出し終わり]/
line.gsub!(/[#小見出し](.*?)[#小見出し終わり]/){"{\\gtfamily #{$1}}"}
end
if line =~ /[#ここから中見出し]/
line.gsub!(/[#ここから中見出し]/){"{\\large"}
end
if line =~ /[#ここで中見出し終わり]/
line.gsub!(/[#ここで中見出し終わり]/){"}"}
end
if line =~ /[#ページの左右中央]/
## XXX とりあえず無視
line.gsub!(/[#ページの左右中央]/, "")
end
if line =~ /[#「(.*?)」は太字]/
line.gsub!(/(.+)[#「\1」は太字]/,'{\\textbf{\1}}')
end
if line =~ /[#「.+?」は縦中横]/
line.gsub!(/(.+)[#「\1」は縦中横]/, '\\rensuji{\1}')
end
if line =~ /[#「(1)(/)(\d+)」は分数]/
bunshi = to_single_byte($1)
bunbo = $3
line.gsub!(/(.+)[#「.+?」は分数]/, "\\rensuji{#{bunshi}/#{bunbo}}")
end
if line =~ /[#「.+?」は罫囲み]/
line.gsub!(/(.+)[#「\1」は罫囲み]/, '\\fbox{\1}')
end
if line =~ /[#「(.+?)」は(本文より)?([123456])段階大きな文字]/
line.gsub!(/([^[]+?)[#「\1」は(本文より)?([123456])段階大きな文字]/) {
num = to_single_byte($3).to_i
case num
when 1
"{\\large #{$1}}"
when 2
"{\\Large #{$1}}"
when 3
"{\\LARGE #{$1}}"
when 4
"{\\huge #{$1}}"
when 5
"{\\Huge #{$1}}"
when 6
"{\\Huge #{$1}}"
end
}
end
if line =~ /[#「.+?」は斜体]/
line.gsub!(/(.+)[#「\1」は斜体]/){
shatai = to_single_byte($1).tr("abcdefghijklmnopqrstuvwxyz","abcdefghijklmnopqrstuvwxyz")
"\\rensuji{\\textsl{"+shatai+"}}"
}
end
if line =~ /[#「[0-90-9]」は下付き小文字]/
line.gsub!(/([A-Za-za-zA-Zαβδγ])([0-90-9])[#「\2」は下付き小文字]/){
"$"+$1+"_{"+to_single_byte($2)+"}$"
}
end
line.tr!("┌┐┘└│─","┐┘└┌─│")
if line =~ /[#改段]/
line.sub!(/[#改段]/, "\\clearpage")
end
if line =~ /[aioeu]\^/i
line.gsub!(/([aioeu])\^/i){ "\\\^{#{$1}}"}
end
if line =~ /[aioeu]\'/i
line.gsub!(/([aioeu])\'/i){ "\\\'{#{$1}}"}
end
if line =~ /[#天から.*?([1234567890一二三四五六七八九〇十]*)字下げ]/
num = to_single_byte($1).to_i
if num > MAX_SAGE
num = MAX_SAGE
end
outputfile.print "\\begin{jisage}{#{num}}\n"
line = line.sub(/[#天から.*?字下げ]/, "")+"\n\\end{jisage}"
end
line.gsub!(/[#図形 □(四角)に内接する◆]/, '{\setlength{\fboxsep}{0pt}\fbox{◆}}')
if line =~ /[#[^]]+?]/
line.gsub!(/[#([^]]+?)]/, '\\endnote{\1}')
end
if line =~ /\\[a-z]*?bou/
tuning_bou(line)
end
if line =~ /\\ajD?Kunoji\{\}\}/
line.gsub!(/(\\ajD?Kunoji)\{\}\}/, '\1}')
end
if line =~ /\\ruby/
tuning_ruby(line)
end
if line =~ /^$/
line = " "
end
outputfile.print normalize(line)+"\n"
end
# 底本の処理
outputfile.write(postamble())
outputfile.print normalize(line)+"\n"
while line = inputfile.gets
line.chomp!
line = NKF::nkf('-wS', line)
outputfile.print normalize(line)+"\n"
end
outputfile.print "\n\\end{minipage}\n\\end{teihon}\n\\end{document}\n"
if @log_text.size > 0
until @log_text.empty?
outputfile.print @log_text.shift
end
end
end
end
add syntex rules(ゴシック体,罫線)
# encoding: utf-8
# Original source is "青空文庫→TeX(ver. 0.9.5 2004/5/5 psitau)"
# see: http://psitau.kitunebi.com/aozora.html
#
# Also another source is "青空キンドル [Beta]"
# see: http://a2k.aill.org/
require 'nkf'
class Aozora4Reader
PreambleLineNumber=13
KANJIPAT = "[々〇〻\u3400-\u9FFF\uF900-\uFAFF※ヶ〆]"
MAX_SAGE = 20
def self.a4r(file)
self.new(file).main
end
def initialize(file)
@inputfile_name = file
@jisage = false
@log_text = []
@line_num=0
@gaiji = {}
@gaiji2 = {}
end
# UTF-8で出力
def normalize(l)
##l.gsub!(/&/, '\\\\&')
l.to_s
end
# 全角→半角
def to_single_byte(str)
s = str.dup
if s =~ /[0-9]/
s.tr!("1234567890", "1234567890")
elsif s =~ /[一二三四五六七八九〇]/
s.tr!("一二三四五六七八九〇", "1234567890")
end
case s
when /\d十\d/
s.sub!(/(\d)十(\d)/, '\1\2')
when /\d十/
s.sub!(/(\d)十/, '\{1}0')
when /十\d/
s.sub!(/十(\d)/, '1\1')
when /十/
s.sub!(/十/, "10")
end
if s =~/[!?]/
s.tr!("!?", "!?")
end
return s
end
# プリアンブルの出力
def preamble
str = <<"END_OF_PRE"
\\documentclass[a5paper]{tbook}
%\\documentclass[a5paper, twocolumn]{tbook}
%\\usepackage[deluxe]{otf}
\\usepackage[expert, deluxe]{otf}
%\\usepackage{utf}
\\usepackage{furikana}
\\usepackage{type1cm}
\\usepackage[size=large]{aozora4reader}
\\def\\rubykatuji{\\rubyfamily\\tiny}
%\\def\\rubykatuji{\\tiny}%for UTF package
%\\renewenvironment{teihon}{\\comment}{\\endcomment}
\\usepackage[dvipdfm,bookmarks=false,bookmarksnumbered=false,hyperfootnotes=false,%
pdftitle={#{@title}},%
pdfauthor={#{@author}}]{hyperref}
%% Bookmarkの文字化け対策(日本語向け)
\\ifnum 46273=\\euc"B4C1 % 46273 == 0xB4C1 == 漢(EUC-JP)
\\AtBeginDvi{\\special{pdf:tounicode EUC-UCS2}}%
\\else
\\AtBeginDvi{\\special{pdf:tounicode 90ms-RKSJ-UCS2}}%
\\fi
END_OF_PRE
str
end
# 底本の表示用
def postamble
str = <<"END_OF_POST"
\\theendnotes
\\begin{teihon}
\\clearpage\\null\\newpage\\thispagestyle{empty}
\\begin{minipage}<y>{\\textheight}
\\vspace{1\\baselineskip}
\\scriptsize
END_OF_POST
str
end
# アクセントの処理用
# http://www.aozora.gr.jp/accent_separation.html
# http://cosmoshouse.com/tools/acc-conv-j.htm
def translate_accent(l)
l.gsub!(/([ij]):/){"\\\"{\\#{$1}}"}
l.gsub!(/([AIOEUaioeu])(['`~^])/){"\\#$2{#$1}"}
l.gsub!(/([AIOEUaioeu]):/){"\\\"{#$1}"}
l.gsub!(/([AIOEUaioeu])_/){"\\={#$1}"}
l.gsub!(/([!?])@/){"#$1'"}
l.gsub!(/([Aa])&/){"\\r{#$1}"}
l.gsub!(/AE&/){"\\AE"}
l.gsub!(/ae&/){"\\ae"}
l.gsub!(/OE&/){"\\OE"}
l.gsub!(/oe&/){"\\oe"}
l.gsub!(/s&/){"\\ss"}
l.gsub!(/([cC]),/){"\\c{#$1}"}
l.gsub!(/〔/,'')
l.gsub!(/〕/,'')
return l
end
# 外字の処理用
def translate_gaiji(l)
if l =~/※[#([^]]*)、([^、]]*)]/
if @gaiji2[$1]
l.gsub!(/※[#([^]]*)、([^、]]*)]/){@gaiji2[$1]}
end
end
## ※[#「姉」の正字、「女+※[#第3水準1-85-57]のつくり」、256-下-16]
if l =~/※[#([^]]*※[#[^]]*][^]]*)、([^、]]*)]/
if @gaiji2[$1]
l.gsub!(/※[#([^]]*※[#[^]]*][^]]*)、([^、]]*)]/){@gaiji2[$1]}
end
end
## ※[#「さんずい+闊」]
if l =~ /※[#「([^]]+?)」]/
if @gaiji2[$1]
l.gsub!(/※[#「([^]]+?)」]/){@gaiji2[$1]}
end
end
if l =~ /※[#[^]]*?※[#[^]]*?[12]\-\d{1,2}\-\d{1,2}[^]]*?][^]]*?]/
l.gsub!(/※[#([^]]*?)※[#([^]]*?([12]\-\d{1,2}\-\d{1,2})[^]]*?)]([^]]*?)]/){"※\\footnote{#$1"+@gaiji[$3]+"#$4}"}
end
if l =~ /※[#[^]]*?[12]\-\d{1,2}\-\d{1,2}[^]]*?]/
l.gsub!(/※[#([^]]*?([12]\-\d{1,2}\-\d{1,2})[^]]*?)]/){@gaiji[$2]}
end
if l =~ /※[#感嘆符三つ.*]/
l.gsub!(/※[#感嘆符三つ.*?]/){ "\\rensuji{!!!}"}
end
if l =~ /※[#[^]]+?]/
l.gsub!(/※[#([^]]+?)]/, '※\\footnote{\1}')
end
if l =~ /※/
STDERR.puts("Remaining Unprocessed Gaiji Character in Line #@line_num.")
@log_text << normalize("未処理の外字が#{@line_num}行目にあります.\n")
end
return l
end
# ルビの処理用
def translate_ruby(l)
# 被ルビ文字列内に外字の注記があるばあい,ルビ文字列の後ろに移動する
# ただし,順番が入れ替わってしまう
while l =~ /※\\footnote\{[^(?:\\footnote)]+\}(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》/
l.sub!(/(※)(\\footnote\{[^(?:\\footnote)]+\})((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》)/, '\1\3\2')
end
# 被ルビ文字列内に誤記などの注記が存在する場合は、ルビの後ろに移動する
while l =~ /(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?[#[^]]*?](?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》/
l.sub!(/((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)([#[^]]*?])((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?《.+?》)/, '\1\3\2')
end
# ルビ文字列内に誤記などの注記が存在する場合は、ルビの後ろに移動する
if l =~ /《[^》]*?[#[^]]*?][^》]*?》/
l.gsub!(/(《[^》]*?)([#[^]]*?])([^》]*?》)/, '\1\3\2')
end
# 一連のルビの処理
# 1 縦棒ありの場合
if l =~ /|/
l.gsub!(/|(.+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 2 漢字および外字
if l =~ /(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?《.+?》/
l.gsub!(/((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 3 ひらがな
if l =~ /[あ-ん](?:[ぁ-んーヽヾ]|\\CID\{12107\})*?《.+?》/
l.gsub!(/([あ-ん](?:[ぁ-んーヽヾ]|\\CID\{12107\})*?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 4 カタカナ
if l =~ /[ア-ヴ](?:[ァ-ヴーゝゞ]|\\CID\{12107\})*?《.+?》/
l.gsub!(/([ア-ヴ](?:[ァ-ヴーゝゞ]|\\CID\{12107\})*?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 5 全角アルファベットなど
if l =~ /[A-Za-zΑ-Ωα-ωА-Яа-я・]+?《.+?》/
l.gsub!(/([A-Za-zΑ-Ωα-ωА-Яа-я・]+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
# 6 半角英数字
if l =~ /[A-Za-z0-9#\-\;\&.\'\^\` ]+?《.+?》/
l.gsub!(/([A-Za-z0-9#\-\;\&. ]+?)《(.+?)》/, '\\ruby{\1}{\2}')
end
if l =~ /《.*?》/
STDERR.puts("Unknown ruby pattern found in #@line_num.")
@log_text << normalize("未処理のルビが#{@line_num}行目にあります.\n")
end
return l
end
# 傍点の処理用
def translate_bouten(l)
if l =~ /[#「.+?」に傍点]/
l.gsub!(/(.+?)[#.*?「\1」に傍点]/, '\\bou{\1}')
end
if l =~ /[#「.+?」に白ゴマ傍点]/
l.gsub!(/(.+?)[#.*?「\1」に白ゴマ傍点]/, '\\sirogomabou{\1}')
end
if l =~ /[#「.+?」に丸傍点]/
l.gsub!(/(.+?)[#.*?「\1」に丸傍点]/, '\\marubou{\1}')
end
if l =~ /[#「.+?」に白丸傍点]/
l.gsub!(/(.+?)[#.*?「\1」に白丸傍点]/, '\\siromarubou{\1}')
end
if l =~ /[#「.+?」に黒三角傍点]/
l.gsub!(/(.+?)[#.*?「\1」に黒三角傍点]/, '\\kurosankakubou{\1}')
end
if l =~ /[#「.+?」に白三角傍点]/
l.gsub!(/(.+?)[#.*?「\1」に白三角傍点]/, '\\sirosankakubou{\1}')
end
if l =~ /[#「.+?」に二重丸傍点]/
l.gsub!(/(.+?)[#.*?「\1」に二重丸傍点]/, '\\nijyuumarubou{\1}')
end
if l =~ /[#「.+?」に蛇の目傍点]/
l.gsub!(/(.+?)[#.*?「\1」に蛇の目傍点]/, '\\jyanomebou{\1}')
end
if l =~ /[#傍点].+?[#傍点終わり]/
l.gsub!(/[#傍点](.+?)[#傍点終わり]/){
str = $1
str.gsub!(/(\\UTF{.+?})/){ "{"+$1+"}"}
"\\bou{"+str+"}"
}
end
return l
end
# 傍線の処理用
def translate_bousen(l)
if l =~ /[#「.+?」に傍線]/
l.gsub!(/(.+?)[#「\1」に傍線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に二重傍線]/
l.gsub!(/(.+?)[#「\1」に二重傍線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に鎖線]/
l.gsub!(/(.+?)[#「\1」に鎖線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に破線]/
l.gsub!(/(.+?)[#「\1」に破線]/, '\\bousen{\1}')
end
if l =~ /[#「.+?」に波線]/
l.gsub!(/(.+?)[#「\1」に波線]/, '\\bousen{\1}')
end
return l
end
# ルビの調整
def tuning_ruby(l)
# 1 直前が漢字の場合
if l =~ /(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))\\ruby/
l.gsub!(/((?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\})))\\ruby/, '\1\\Ruby')
end
# 2 直後が漢字の場合
if l =~ /\\ruby\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\\{\}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))/
l.gsub!(/\\ruby(\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\\{\}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\})))/, '\\Ruby\1')
end
# 3 ルビが連続する場合
while l =~ /\\(?:ruby|RUBY|Ruby)\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\{}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\\ruby/
l.sub!(/\\(?:ruby|RUBY|Ruby)(\{(?:#{KANJIPAT}|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\}\{(?:[^\\{}]|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?\})\\ruby/, '\\RUBY\1\\RUBY')
end
end
# 傍点の調整
def tuning_bou(l)
# 傍点の中の「くの字点」を変換
while l =~ /(\\[a-z]*?bou\{(?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)(\\ajD?Kunoji)\{\}((?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?)\}/
l.gsub!(/((\\([a-z]*?)bou)\{(?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))+?)(\\ajD?Kunoji)\{\}((?:\w|(?:\\UTF\{[0-9a-fA-F]+\}|\\CID\{[0-9]+\}))*?)\}/, '\1}\4with\3Bou\2{\5}')
end
if l =~ /\\[a-z]*?bou\{\}/
l.gsub!(/\\([a-z]*?)bou\{\}/, '{}')
end
return l
end
# 外字用ハッシュを作成
def load_gaiji
datadir = File.dirname(__FILE__)+"/../data"
File.open(datadir+"/gaiji.txt") do |f|
while gaiji_line = f.gets
gaiji_line.chomp!
key, data = gaiji_line.split
@gaiji[key] = data
end
end
File.open(datadir+"/gaiji2.txt") do |f|
while gaiji_line = f.gets
gaiji_line.chomp!
key, data = gaiji_line.split
data.gsub(/#.*$/,'')
@gaiji2[key] = data
end
end
end
#
# メインパート
#
def main
load_gaiji()
# 入出力ファイルの定義
outputfile_name = @inputfile_name.sub(/\.txt$/, ".tex")
inputfile = File.open(@inputfile_name)
outputfile = File.open(outputfile_name, "w")
# プリアンブルの処理
empty_line = 0
in_note = false
meta_data = []
while empty_line < 2
line = inputfile.gets.chomp
line = NKF::nkf('-wS', line)
if in_note
if line =~ /^-+$/
in_note = false
end
else
if line =~ /^-+$/
in_note = true
else
if line =~ /^$/
empty_line += 1
else
if line =~ /《.*?》/
translate_ruby(line)
end
meta_data << line
end
end
end
end
@line_num += meta_data.size
@title = normalize(meta_data.shift)
case meta_data.size
when 1
@author = normalize(meta_data.shift)
when 2
@subtitle = normalize(meta_data.shift)
@author = normalize(meta_data.shift)
when 3
@subtitle = normalize(meta_data.shift)
@author = normalize(meta_data.shift)
@subauthor = normalize(meta_data.shift)
else
@subtitle = normalize(meta_data.shift)
@author = normalize(meta_data.shift)
@subauthor = normalize(meta_data.shift)
@meta_data = []
until meta_data.empty?
@metadata << normalize(meta_data.shift)
end
end
outputfile.write(preamble())
outputfile.print "\\title{"+@title+"}\n"
outputfile.print "\\subtitle{"+@subtitle+"}\n" if @subtitle
outputfile.print "\\author{"+@author+"}\n"
outputfile.print "\\subauthor{"+@subauthor+"}\n" if @subauthor
if @meta_data
@meta_data.each do |data|
outputfile.print "\\metadata{"+data+"}\n"
end
end
outputfile.print "\\date{}\n"
# 本文の処理
outputfile.print "\\begin{document}\n\\maketitle\n"
@line_num += PreambleLineNumber
while line = inputfile.gets
@line_num += 1
line.chomp!
line = NKF::nkf('-wS', line)
break if line =~ /^底本/
if line =~ /^ 「/
line.sub!(/^ 「/, "\\mbox{ }\\kern0mm\\inhibitglue「")
end
if line =~ /[ワヰヱヲ]゛/
line.gsub!(/ワ゛/, "\\ajLig{ワ゛}")
line.gsub!(/ヰ゛/, "\\ajLig{ヰ゛}")
line.gsub!(/ヱ゛/, "\\ajLig{ヱ゛}")
line.gsub!(/ヲ゛/, "\\ajLig{ヲ゛}")
end
if line =~ /[?!] /
line.gsub!(/([?!]) /, '\1{}')
end
if line =~ /——/
line.gsub!(/——/, "\\——{}")
end
if line =~ //\/
line.gsub!(//\/, "\\ajKunoji{}")
end
if line =~ //″\/
line.gsub!(//″\/, "\\ajDKunoji{}")
end
=begin
if line =~ /^ +.+/
line.gsub!(/^ +([一二三四五六七八九〇十].*)/, '\\section*{\1}')
end
=end
while line =~ /(.+?)[#(「\1」は横?[1|一]文字[^]]*?)]/
line = line.sub(/(.+?)[#(「\1」は横?[1|一]文字[^]]*?)]/){"\\ajLig{"+to_single_byte($1)+"}"}
end
if line =~ /[#改丁.*?]/
line.sub!(/[#改丁.*?]/, "\\cleardoublepage")
end
if line =~ /[#改[頁|ページ].*?]/
line.sub!(/[#改[頁|ページ].*?]/, "\\clearpage")
end
if line =~ /〔.*?〕/
translate_accent(line)
end
if line =~ /※/
translate_gaiji(line)
end
if line =~ /[#(.+?)傍点]/
translate_bouten(line)
end
if line =~ /[#傍点].+?[#傍点終わり]/
translate_bouten(line)
end
if line =~ /《.*?》/
translate_ruby(line)
end
if line =~ /[#「(.+?)」に(?:二重)?[傍鎖破波]線]/
translate_bousen(line)
end
if line =~ /[#この行.*?([1234567890一二三四五六七八九〇十]*)字下げ]/
outputfile.print "\\begin{jisage}{"+to_single_byte($1)+"}\n"
line = line.sub(/[#この行.*?字下げ]/, "")+"\n\\end{jisage}"
@line_num += 2
end
if line =~ /[#ここから地から.+字上げ]/
line.sub!(/[#ここから地から([1234567890一二三四五六七八九〇十]*)字上げ]/){"\\begin{flushright}\\advance\\rightskip"+to_single_byte($1)+"zw"}
@jisage = true
end
if line =~ /[#ここで字上げ終わり]/
line.sub!(/[#ここで字上げ終わり]/){"\\end{flushright}"}
@jisage = false
end
if line =~ /[#.*?字下げ[^]]*?(?:終わり|まで)[^]]*?]/
line = line.sub(/[#.*?字下げ.*?(?:終わり|まで).*?]/, "")+"\\end{jisage}"
@jisage = false
end
if line =~ /[#ここから.+字下げ.*?]/
if @jisage
outputfile.print "\\end{jisage}\n"
@line_num += 1
end
line.sub!(/[#ここから.*?([1234567890一二三四五六七八九〇十]*)字下げ.*?]/){"\\begin{jisage}{"+to_single_byte($1)+"}"}
@jisage = true
end
if line =~ /[#.*?地付き.*?]$/
line = "\\begin{flushright}\n"+line.sub(/[#.*?地付き.*?]$/, "\\end{flushright}")
@line_num += 1
elsif line =~ /[#.*?地付き.*?]/
line = line.sub(/[#.*?地付き.*?]/, "\\begin{flushright}\n")+"\\end{flushright}"
@line_num += 1
end
if line =~ /[#.*?(?:行末|地)(?:から|より).*?([1234567890一二三四五六七八九〇十]*)字上.*?]$$/
line = "\\begin{flushright}\\advance\\rightskip"+to_single_byte($1)+"zw\n"+line.sub(/[#.*?(?:行末|地)(?:から|より).*?字上.*?]$/, "\\end{flushright}")
@line_num += 1
elsif line =~ /^(.*?)[#.*?(?:行末|地)(?:から|より).*?([1234567890一二三四五六七八九〇十]*)字上.*?](.*)$/
line = $1+"\\begin{flushright}\\advance\\rightskip"+to_single_byte($2)+"zw\n"+$3+"\\end{flushright}"
@line_num += 1
end
if line =~ /[#「.+?」は返り点]/
line.gsub!(/(.+)[#「\1」は返り点]/, '\\kaeriten{\ajKunten{\1}}')
end
if line =~ /[#[一二上中下甲乙丙丁レ]*]/
line.gsub!(/[#([一二上中下甲乙丙丁レ]*)]/, '\\kaeriten{\ajKunten{\1}}')
end
if line =~ /[#(.*?)]/
line.gsub!(/[#((.*?))]/, '\\okurigana{\1}')
end
if line =~ /[#「.+?」.*?ママ.*?注記]/
line.gsub!(/(.+)[#「\1」.*?ママ.*?注記]/, '\\ruby{\1}{ママ}')
end
if line =~ /[#[^]]+(([^)]+.png))[^]]+]/
line.gsub!(/[#[^]]+(([^)]+.png))[^]]+]/, '\\sashie{\1}')
end
if line =~ /[#([1234567890一二三四五六七八九〇十]*)字下げ]/
outputfile.print "\\begin{jisage}{"+to_single_byte($1)+"}\n"
line = line.sub(/[#.*?字下げ]/, "")+"\n\\end{jisage}"
end
if line =~ /[#「(.*?)」は大見出し]/
line.gsub!(/(.*?)[#「(.*?)」は大見出し]/){"{\\Large #{$1}}"}
end
if line =~ /[#「(.*?)」は中見出し]/
line.gsub!(/(.*?)[#「(.*?)」は中見出し]/){"{\\large #{$1}}"}
end
if line =~ /[#「(.*?)」は小見出し]/
line.gsub!(/(.*?)[#「(.*?)」は小見出し]/){"{\\gtfamily #{$1}}"}
end
if line =~ /[#小見出し](.*?)[#小見出し終わり]/
line.gsub!(/[#小見出し](.*?)[#小見出し終わり]/){"{\\gtfamily #{$1}}"}
end
if line =~ /[#ここから中見出し]/
line.gsub!(/[#ここから中見出し]/){"{\\large"}
end
if line =~ /[#ここで中見出し終わり]/
line.gsub!(/[#ここで中見出し終わり]/){"}"}
end
if line =~ /[#ページの左右中央]/
## XXX とりあえず無視
line.gsub!(/[#ページの左右中央]/, "")
end
if line =~ /[#「(.*?)」は太字]/
line.gsub!(/(.+)[#「\1」は太字]/,'{\\textbf{\1}}')
end
if line =~ /[#「.+?」は縦中横]/
line.gsub!(/(.+)[#「\1」は縦中横]/, '\\rensuji{\1}')
end
if line =~ /[#「(1)(/)(\d+)」は分数]/
bunshi = to_single_byte($1)
bunbo = $3
line.gsub!(/(.+)[#「.+?」は分数]/, "\\rensuji{#{bunshi}/#{bunbo}}")
end
if line =~ /[#「.+?」は罫囲み]/
line.gsub!(/(.+)[#「\1」は罫囲み]/, '\\fbox{\1}')
end
if line =~ /[#「(.+?)」は(本文より)?([123456])段階大きな文字]/
line.gsub!(/([^[]+?)[#「\1」は(本文より)?([123456])段階大きな文字]/) {
num = to_single_byte($3).to_i
case num
when 1
"{\\large #{$1}}"
when 2
"{\\Large #{$1}}"
when 3
"{\\LARGE #{$1}}"
when 4
"{\\huge #{$1}}"
when 5
"{\\Huge #{$1}}"
when 6
"{\\Huge #{$1}}"
end
}
end
if line =~ /[#「.+?」は斜体]/
line.gsub!(/(.+)[#「\1」は斜体]/){
shatai = to_single_byte($1).tr("abcdefghijklmnopqrstuvwxyz","abcdefghijklmnopqrstuvwxyz")
"\\rensuji{\\textsl{"+shatai+"}}"
}
end
if line =~ /[#「[0-90-9]」は下付き小文字]/
line.gsub!(/([A-Za-za-zA-Zαβδγ])([0-90-9])[#「\2」は下付き小文字]/){
"$"+$1+"_{"+to_single_byte($2)+"}$"
}
end
if line =~ /([^ ]*)[#ゴシック体]$/
line.gsub!(/([^ ]*)[#ゴシック体]/){"{\\gtfamily #{$1}}"}
end
line.tr!("┌┐┘└│─┃━","┐┘└┌─│━┃")
if line =~ /[#改段]/
line.sub!(/[#改段]/, "\\clearpage")
end
if line =~ /[aioeu]\^/i
line.gsub!(/([aioeu])\^/i){ "\\\^{#{$1}}"}
end
if line =~ /[aioeu]\'/i
line.gsub!(/([aioeu])\'/i){ "\\\'{#{$1}}"}
end
if line =~ /[#天から.*?([1234567890一二三四五六七八九〇十]*)字下げ]/
num = to_single_byte($1).to_i
if num > MAX_SAGE
num = MAX_SAGE
end
outputfile.print "\\begin{jisage}{#{num}}\n"
line = line.sub(/[#天から.*?字下げ]/, "")+"\n\\end{jisage}"
end
line.gsub!(/[#図形 □(四角)に内接する◆]/, '{\setlength{\fboxsep}{0pt}\fbox{◆}}')
if line =~ /[#[^]]+?]/
line.gsub!(/[#([^]]+?)]/, '\\endnote{\1}')
end
if line =~ /\\[a-z]*?bou/
tuning_bou(line)
end
if line =~ /\\ajD?Kunoji\{\}\}/
line.gsub!(/(\\ajD?Kunoji)\{\}\}/, '\1}')
end
if line =~ /\\ruby/
tuning_ruby(line)
end
if line =~ /^$/
line = " "
end
outputfile.print normalize(line)+"\n"
end
# 底本の処理
outputfile.write(postamble())
outputfile.print normalize(line)+"\n"
while line = inputfile.gets
line.chomp!
line = NKF::nkf('-wS', line)
outputfile.print normalize(line)+"\n"
end
outputfile.print "\n\\end{minipage}\n\\end{teihon}\n\\end{document}\n"
if @log_text.size > 0
until @log_text.empty?
outputfile.print @log_text.shift
end
end
end
end
|
class Apps
def initialize(client)
@client = client
@ENDPOINT = client.ENDPOINT + "/" + "app"
end
def list()
method = "GET"
return self.client.request(method, self.ENDPOINT)
end
def delete(appid)
method = "DELETE"
if not appid.is_a? Integer
raise "Non integer appid"
end
endpoint = self.ENDPOINT + "/" + appid
return self.client.request(method, endpoint)
end
def delete_branding(appid)
method = "DELETE"
if not appid.is_a? Integer
raise "Non integer appid"
end
endpoint = self.ENDPOINT + "/" + appid + "/branding"
return self.client.request(method, endpoint)
end
def create(name)
method = "POST"
if not appid.is_a? String
raise "Non string app name"
end
return self.client.request(method, self.ENDPOINT, {"name" => name})
end
end
Use the fancy ruby unless instead of if negations
class Apps
def initialize(client)
@client = client
@ENDPOINT = client.ENDPOINT + "/" + "app"
end
def list()
method = "GET"
return self.client.request(method, self.ENDPOINT)
end
def delete(appid)
method = "DELETE"
unless appid.is_a? Integer
raise "Non integer appid"
end
endpoint = self.ENDPOINT + "/" + appid
return self.client.request(method, endpoint)
end
def delete_branding(appid)
method = "DELETE"
unless appid.is_a? Integer
raise "Non integer appid"
end
endpoint = self.ENDPOINT + "/" + appid + "/branding"
return self.client.request(method, endpoint)
end
def create(name)
method = "POST"
unless appid.is_a? String
raise "Non string app name"
end
return self.client.request(method, self.ENDPOINT, {"name" => name})
end
end
|
module Daun
VERSION = '0.1.0'.freeze
end
Bump version to 0.1.1.
module Daun
VERSION = '0.1.1'.freeze
end
|
module David
class Server
include Celluloid::IO
include CoAP::Codification
attr_reader :logger
finalizer :shutdown
def initialize(host, port, app, options)
@logger = setup_logger(options[:Debug])
logger.info "Starting on [#{host}]:#{port}."
ipv6 = IPAddr.new(host).ipv6?
af = ipv6 ? ::Socket::AF_INET6 : ::Socket::AF_INET
# Actually Celluloid::IO::UDPServer.
# (Use celluloid-io from git, 0.15.0 does not support AF_INET6).
@socket = UDPSocket.new(af)
@socket.bind(host, port.to_i)
app = app.new if app.respond_to? :new
@host, @port, @app = host, port, app
async.run
end
def shutdown
@socket.close unless @socket.nil?
end
def run
loop { async.handle_input(*@socket.recvfrom(1024)) }
end
private
def answer(host, port, message)
@socket.send(message.to_wire, 0, host, port)
end
def app_response(request)
env = basic_env(request)
logger.debug env
code, options, body = @app.call(env)
new_body = ''
body.each do |line|
new_body += line + "\n"
end
response = initialize_response(request)
response.mcode = http_to_coap_code(code)
response.payload = new_body.chomp
response.options[:content_format] =
CoAP::Registry.convert_content_format(options['Content-Type'])
response
end
def basic_env(request)
{
'REQUEST_METHOD' => coap_to_http_method(request.mcode),
'SCRIPT_NAME' => '',
'PATH_INFO' => path_encode(request.options[:uri_path]),
'QUERY_STRING' => query_encode(request.options[:uri_query])
.gsub(/^\?/, ''),
'SERVER_NAME' => @host,
'SERVER_PORT' => @port.to_s,
'CONTENT_LENGTH' => request.payload.size.to_s,
'rack.version' => [1, 2],
'rack.url_scheme' => 'http',
'rack.input' => StringIO.new(request.payload),
'rack.errors' => $stderr,
'rack.multithread' => true,
'rack.multiprocess' => true,
'rack.run_once' => false,
'rack.logger' => @logger,
}
end
def coap_to_http_method(method)
method.to_s.upcase
end
def handle_input(data, sender)
_, port, host = sender
request = CoAP::Message.parse(data)
logger.info "[#{host}]:#{port}: #{request}"
logger.debug request.inspect
response = app_response(request)
logger.debug response.inspect
answer(host, port, response)
end
def http_to_coap_code(code)
code = code.to_i
h = {200 => 205}
code = h[code] if h[code]
a = code / 100
b = code - (a * 100)
[a, b]
end
def initialize_response(request)
CoAP::Message.new \
tt: :ack,
mcode: 2.00,
mid: request.mid,
token: request.options[:token]
end
def setup_logger(debug)
logger = ::Logger.new($stderr)
logger.level = debug ? ::Logger::DEBUG : ::Logger::INFO
logger.formatter = proc do |sev, time, prog, msg|
"#{time.to_i}(#{sev.downcase}) #{msg}\n"
end
logger
end
end
end
Inlclude IP address and port in Rack environment.
module David
class Server
include Celluloid::IO
include CoAP::Codification
attr_reader :logger
finalizer :shutdown
def initialize(host, port, app, options)
@logger = setup_logger(options[:Debug])
logger.info "Starting on [#{host}]:#{port}."
ipv6 = IPAddr.new(host).ipv6?
af = ipv6 ? ::Socket::AF_INET6 : ::Socket::AF_INET
# Actually Celluloid::IO::UDPServer.
# (Use celluloid-io from git, 0.15.0 does not support AF_INET6).
@socket = UDPSocket.new(af)
@socket.bind(host, port.to_i)
app = app.new if app.respond_to? :new
@host, @port, @app = host, port, app
async.run
end
def shutdown
@socket.close unless @socket.nil?
end
def run
loop { async.handle_input(*@socket.recvfrom(1024)) }
end
private
def answer(host, port, message)
@socket.send(message.to_wire, 0, host, port)
end
def app_response(host, port, request)
env = basic_env(host, port, request)
logger.debug env
code, options, body = @app.call(env)
new_body = ''
body.each do |line|
new_body += line + "\n"
end
response = initialize_response(request)
response.mcode = http_to_coap_code(code)
response.payload = new_body.chomp
response.options[:content_format] =
CoAP::Registry.convert_content_format(options['Content-Type'])
response
end
def basic_env(host, port, request)
{
'REMOTE_ADDR' => host,
'REMOTE_PORT' => port,
'REQUEST_METHOD' => coap_to_http_method(request.mcode),
'SCRIPT_NAME' => '',
'PATH_INFO' => path_encode(request.options[:uri_path]),
'QUERY_STRING' => query_encode(request.options[:uri_query])
.gsub(/^\?/, ''),
'SERVER_NAME' => @host,
'SERVER_PORT' => @port.to_s,
'CONTENT_LENGTH' => request.payload.size.to_s,
'rack.version' => [1, 2],
'rack.url_scheme' => 'http',
'rack.input' => StringIO.new(request.payload),
'rack.errors' => $stderr,
'rack.multithread' => true,
'rack.multiprocess' => true,
'rack.run_once' => false,
'rack.logger' => @logger,
}
end
def coap_to_http_method(method)
method.to_s.upcase
end
def handle_input(data, sender)
_, port, host = sender
request = CoAP::Message.parse(data)
logger.info "[#{host}]:#{port}: #{request}"
logger.debug request.inspect
response = app_response(host, port, request)
logger.debug response.inspect
answer(host, port, response)
end
def http_to_coap_code(code)
code = code.to_i
h = {200 => 205}
code = h[code] if h[code]
a = code / 100
b = code - (a * 100)
[a, b]
end
def initialize_response(request)
CoAP::Message.new \
tt: :ack,
mcode: 2.00,
mid: request.mid,
token: request.options[:token]
end
def setup_logger(debug)
logger = ::Logger.new($stderr)
logger.level = debug ? ::Logger::DEBUG : ::Logger::INFO
logger.formatter = proc do |sev, time, prog, msg|
"#{time.to_i}(#{sev.downcase}) #{msg}\n"
end
logger
end
end
end
|
##
# Daylight API Client Library
#
# Use this client in your Ruby/Rails applications for ease of use access to the Daylight API.
#
# Unlike typical ActiveResource clients, the Daylight API Client has been designed to be used similarly to ActiveRecord with scopes and the ability to chain queries.
#
# Daylight::Zone.all
# Daylight::Zone.where(code:'iad1')
# Daylight::Zone.internal # scope
# Daylight::Zone.find(1).tenants # associations
#
class Daylight::API < ActiveResource::Base
include Daylight::Refinements
include Daylight::Associations
class << self
attr_reader :version
cattr_accessor :request_root_in_json
alias_method :endpoint, :site
SUPPORTED_VERSIONS = %w[v1].freeze
DEFAULT_CONFIG = {
endpoint: 'http://localhost',
version: SUPPORTED_VERSIONS.last
}.freeze
##
# Setup and configure the Daylight API. Must be called before use.
def setup! options={}
config = options.with_indifferent_access.reverse_merge(DEFAULT_CONFIG)
self.password = config[:password]
self.endpoint = config[:endpoint]
self.version = config[:version]
self.timeout = config[:timeout] if config[:timeout] # default read_timeout is 60
# API requires JSON request to emit a root node named after the object’s type
# this is different from `include_root_in_json` where every ActiveResource
# supplies its root.
self.request_root_in_json = config[:request_root_in_json] || true
headers['X-Daylight-Client'] = Daylight::VERSION
alias_apis
end
##
# Find a single resource from the default URL
# Fixes bug to short-circuit and return nil if scope/id is nil.
def find_single(scope, options)
return if scope.nil?
super
end
##
# Whether to show root for the request
def request_root_in_json?
request_root_in_json && format.extension == 'json'
end
private
alias_method :endpoint=, :site=
##
# Set the version and make sure it's appropiate
def version= v
unless SUPPORTED_VERSIONS.include?(v)
raise "Unsupported version #{v} is not one of #{SUPPORTED_VERSIONS.join(', ')}"
end
@version = v.upcase
version_path = "/#{v.downcase}/".gsub(/\/+/, '/')
set_prefix version_path
end
##
# Alias the configured version APIs to be references without a version number
# Daylight::V1::Zone => Daylight::Zone
def alias_apis
api_classes.each do |api|
Daylight.const_set(api, "Daylight::#{version}::#{api}".constantize)
end
true
end
##
# Load and return the APIs for the configured version
def api_classes
api_files = File.join(File.dirname(__FILE__), version.downcase, "**/*.rb")
Dir[api_files].each { |filename| load filename }
"Daylight::#{version}".constantize.constants
end
end
attr_reader :metadata
##
# Overwriting ActiveResource::Base#initialize
#---
# Concern cannot call `super` from module to base class (we think)
def initialize(attributes={}, persisted = false)
if Hash === attributes && attributes.has_key?('meta')
metadata = (attributes.delete('meta')||{}).with_indifferent_access # save and strip any metadata supplied in the response
end
@metadata = metadata || {}
super
end
##
# Get the list of read_only attributes.
# If there are none then an empty array is supplied.
def read_only
@read_only ||= begin
metadata[:read_only][self.class.element_name] || []
rescue
[]
end
end
class HashResourcePassthrough
def self.new(value, _)
# load values using ActiveResource::Base and extract them as attributes
Daylight::API.new(value.duplicable? ? value.dup : value).attributes
end
end
def find_or_create_resource_for name
# if the key is attributes attributes for a configured association
if /(?:_attributes)\z/ =~ name && reflections.key?($`.to_sym)
HashResourcePassthrough
else
super
end
end
##
# Returns the serialized string representation of the resource in the configured
# serialization format specified in ActiveResource::Base.format.
#
# For JSON formatted requests default option is to include the root element
# depending on the `request_root_in_json` configuration.
def encode(options={})
super(self.class.request_root_in_json? ? { :root => self.class.element_name }.merge(options) : options)
end
##
# Adds API specific options when generating json
#
# See
# except_read_only
def as_json(options={})
super(except_read_only(options))
end
##
# Adds API specific options when generating xml
#
# See
# except_read_only
def to_xml(options={})
super(except_read_only(options))
end
##
# Writers for read only attributes are not included as methods
def respond_to?(method_name, include_priv = false)
return false if read_only?(method_name)
super
end
private
def method_missing(method_name, *arguments)
if read_only?(method_name)
logger.warn "Cannot set read_only attribute: #{method_name[0...-1]}" if logger
raise NoMethodError, "Cannot set read_only attribute: #{method_name[0...-1]}"
end
super
end
##
# Ensures that read_only attributes are merged in with :except options.
def except_read_only options
options.merge(except: (options[:except]||[]).push(*read_only))
end
##
# Determines if `method_name` is writing to a read only attribute.
def read_only? method_name
!!(method_name =~ /(?:=)$/ && read_only.include?($`))
end
end
disable the api aliasing until we know what to do with it
##
# Daylight API Client Library
#
# Use this client in your Ruby/Rails applications for ease of use access to the Daylight API.
#
# Unlike typical ActiveResource clients, the Daylight API Client has been designed to be used similarly to ActiveRecord with scopes and the ability to chain queries.
#
# Daylight::Zone.all
# Daylight::Zone.where(code:'iad1')
# Daylight::Zone.internal # scope
# Daylight::Zone.find(1).tenants # associations
#
class Daylight::API < ActiveResource::Base
include Daylight::Refinements
include Daylight::Associations
class << self
attr_reader :version
cattr_accessor :request_root_in_json
alias_method :endpoint, :site
SUPPORTED_VERSIONS = %w[v1].freeze
DEFAULT_CONFIG = {
endpoint: 'http://localhost',
version: SUPPORTED_VERSIONS.last
}.freeze
##
# Setup and configure the Daylight API. Must be called before use.
def setup! options={}
config = options.with_indifferent_access.reverse_merge(DEFAULT_CONFIG)
self.password = config[:password]
self.endpoint = config[:endpoint]
self.version = config[:version]
self.timeout = config[:timeout] if config[:timeout] # default read_timeout is 60
# API requires JSON request to emit a root node named after the object’s type
# this is different from `include_root_in_json` where every ActiveResource
# supplies its root.
self.request_root_in_json = config[:request_root_in_json] || true
headers['X-Daylight-Client'] = Daylight::VERSION
# alias_apis
end
##
# Find a single resource from the default URL
# Fixes bug to short-circuit and return nil if scope/id is nil.
def find_single(scope, options)
return if scope.nil?
super
end
##
# Whether to show root for the request
def request_root_in_json?
request_root_in_json && format.extension == 'json'
end
private
alias_method :endpoint=, :site=
##
# Set the version and make sure it's appropiate
def version= v
unless SUPPORTED_VERSIONS.include?(v)
raise "Unsupported version #{v} is not one of #{SUPPORTED_VERSIONS.join(', ')}"
end
@version = v.upcase
version_path = "/#{v.downcase}/".gsub(/\/+/, '/')
set_prefix version_path
end
##
# Alias the configured version APIs to be references without a version number
# Daylight::V1::Zone => Daylight::Zone
def alias_apis
api_classes.each do |api|
Daylight.const_set(api, "Daylight::#{version}::#{api}".constantize)
end
true
end
##
# Load and return the APIs for the configured version
def api_classes
api_files = File.join(File.dirname(__FILE__), version.downcase, "**/*.rb")
Dir[api_files].each { |filename| load filename }
"Daylight::#{version}".constantize.constants
end
end
attr_reader :metadata
##
# Overwriting ActiveResource::Base#initialize
#---
# Concern cannot call `super` from module to base class (we think)
def initialize(attributes={}, persisted = false)
if Hash === attributes && attributes.has_key?('meta')
metadata = (attributes.delete('meta')||{}).with_indifferent_access # save and strip any metadata supplied in the response
end
@metadata = metadata || {}
super
end
##
# Get the list of read_only attributes.
# If there are none then an empty array is supplied.
def read_only
@read_only ||= begin
metadata[:read_only][self.class.element_name] || []
rescue
[]
end
end
class HashResourcePassthrough
def self.new(value, _)
# load values using ActiveResource::Base and extract them as attributes
Daylight::API.new(value.duplicable? ? value.dup : value).attributes
end
end
def find_or_create_resource_for name
# if the key is attributes attributes for a configured association
if /(?:_attributes)\z/ =~ name && reflections.key?($`.to_sym)
HashResourcePassthrough
else
super
end
end
##
# Returns the serialized string representation of the resource in the configured
# serialization format specified in ActiveResource::Base.format.
#
# For JSON formatted requests default option is to include the root element
# depending on the `request_root_in_json` configuration.
def encode(options={})
super(self.class.request_root_in_json? ? { :root => self.class.element_name }.merge(options) : options)
end
##
# Adds API specific options when generating json
#
# See
# except_read_only
def as_json(options={})
super(except_read_only(options))
end
##
# Adds API specific options when generating xml
#
# See
# except_read_only
def to_xml(options={})
super(except_read_only(options))
end
##
# Writers for read only attributes are not included as methods
def respond_to?(method_name, include_priv = false)
return false if read_only?(method_name)
super
end
private
def method_missing(method_name, *arguments)
if read_only?(method_name)
logger.warn "Cannot set read_only attribute: #{method_name[0...-1]}" if logger
raise NoMethodError, "Cannot set read_only attribute: #{method_name[0...-1]}"
end
super
end
##
# Ensures that read_only attributes are merged in with :except options.
def except_read_only options
options.merge(except: (options[:except]||[]).push(*read_only))
end
##
# Determines if `method_name` is writing to a read only attribute.
def read_only? method_name
!!(method_name =~ /(?:=)$/ && read_only.include?($`))
end
end
|
module Desi
VERSION = "0.2.2"
end
bump version up to 0.2.3
module Desi
VERSION = "0.2.3"
end
|
class Nxtrim < Formula
homepage "https://github.com/sequencing/NxTrim"
#doi "10.1101/007666"
#tag "bioinformatics"
url "https://github.com/sequencing/NxTrim/archive/v0.3.0-alpha.tar.gz"
version "0.3.0"
sha1 "6502be8546b8d0ebc0120cc2791aefd26471e8a4"
depends_on "boost"
def install
system "make", "BOOST_ROOT=#{Formula["boost"].prefix}"
bin.install "nxtrim", "mergeReads"
doc.install "Changes", "LICENSE.txt", "README.md"
end
test do
system "#{bin}/nxtrim -h |grep NxTrim"
end
end
nxtrim: add 0.3.0 bottle.
class Nxtrim < Formula
homepage "https://github.com/sequencing/NxTrim"
#doi "10.1101/007666"
#tag "bioinformatics"
url "https://github.com/sequencing/NxTrim/archive/v0.3.0-alpha.tar.gz"
version "0.3.0"
sha1 "6502be8546b8d0ebc0120cc2791aefd26471e8a4"
bottle do
root_url "https://downloads.sf.net/project/machomebrew/Bottles/science"
sha1 "7a4bb42527550571a9ba7597a20dbf83174586cf" => :yosemite
sha1 "37593e250fbde7b93bd107dec77e12cacb51cd11" => :mavericks
sha1 "f9db6b90a23cab1befc9b317ea8f8de8ad799cb1" => :mountain_lion
end
depends_on "boost"
def install
system "make", "BOOST_ROOT=#{Formula["boost"].prefix}"
bin.install "nxtrim", "mergeReads"
doc.install "Changes", "LICENSE.txt", "README.md"
end
test do
system "#{bin}/nxtrim -h |grep NxTrim"
end
end
|
[Add] FirebaseAppDistribution (7.10.0-beta)
Pod::Spec.new do |s|
s.name = 'FirebaseAppDistribution'
s.version = '7.10.0-beta'
s.summary = 'App Distribution for Firebase iOS SDK.'
s.description = <<-DESC
iOS SDK for App Distribution for Firebase.
DESC
s.homepage = 'https://developers.google.com/'
s.license = { :type => 'Apache', :file => 'LICENSE' }
s.authors = 'Google, Inc.'
s.source = {
:git => 'https://github.com/firebase/firebase-ios-sdk.git',
:tag => 'CocoaPods-7.10.0.nightly'
}
s.ios.deployment_target = '10.0'
s.cocoapods_version = '>= 1.4.0'
s.prefix_header_file = false
base_dir = "FirebaseAppDistribution/Sources/"
s.source_files = [
base_dir + '**/*.{c,h,m,mm}',
'FirebaseCore/Sources/Private/*.h',
'FirebaseInstallations/Source/Library/Private/*.h',
]
s.public_header_files = base_dir + 'Public/FirebaseAppDistribution/*.h'
s.dependency 'FirebaseCore', '~> 7.0'
s.dependency 'GoogleUtilities/AppDelegateSwizzler', '~> 7.0'
s.dependency 'GoogleUtilities/UserDefaults', '~> 7.0'
s.dependency 'FirebaseInstallations', '~> 7.0'
s.dependency 'GoogleDataTransport', '~> 8.4'
s.pod_target_xcconfig = {
'GCC_C_LANGUAGE_STANDARD' => 'c99',
'HEADER_SEARCH_PATHS' => '"${PODS_TARGET_SRCROOT}"'
}
s.test_spec 'unit' do |unit_tests|
unit_tests.scheme = { :code_coverage => true }
unit_tests.source_files = 'FirebaseAppDistribution/Tests/Unit*/*.[mh]'
unit_tests.resources = 'FirebaseAppDistribution/Tests/Unit/Resources/*'
unit_tests.dependency 'OCMock'
end
# end
end
|
require 'spec_helper'
describe CategoryRecommender do
before :all do
user = User.find_user(35_914)
inventory = [
Item.find_item(1_253),
Item.find_item(1_286),
Item.find_item(1_423)
]
@recommender = CategoryRecommender.new(user, inventory)
end
describe '.recommend_items' do
it 'should recommend items based on the user purchased categories' do
expected = [Item.find_item(1_423), Item.find_item(1_286)]
expect(@recommender.recommend_items).to eq(expected)
end
end
end
Add comment
require 'spec_helper'
describe CategoryRecommender do
before :all do
user = User.find_user(35_914)
# user category_ids = [134, 152, 133, 129, 164, 168, 154]
# user item_ids = [1253, 1532, 1532, 1298, 1314, 1366]
inventory = [
Item.find_item(1_423), # category_ids = [154, 164, 168]
Item.find_item(1_286), # category_ids = [168]
Item.find_item(1_253), # category_ids = [134, 152]
Item.find_item(1_170) # category_ids = [172]
]
@recommender = CategoryRecommender.new(user, inventory)
end
describe '.recommend_items' do
it 'should recommend items based on the user purchased categories' do
expected = [Item.find_item(1_423), Item.find_item(1_286)]
expect(@recommender.recommend_items).to eq(expected)
end
end
end
|
Added specs for trailer_arquivo
require 'spec_helper'
describe Cnab::TrailerArquivo do
describe "#initialize" do
before :each do
@trailer_arquivo = Cnab::TrailerArquivo.new(LINE)
end
it "should set #banco" do
@trailer_arquivo.banco.should == "012"
end
it "should set #lote" do
@trailer_arquivo.lote.should == "3456"
end
it "should set #registro" do
@trailer_arquivo.registro.should == "7"
end
it "should set #res_cnab1" do
@trailer_arquivo.res_cnab1.should == "890123456"
end
it "should set #qtd_lotes" do
@trailer_arquivo.qtd_lotes.should == "789012"
end
it "should set #qtd_registros" do
@trailer_arquivo.qtd_registros.should == "345678"
end
it "should set #qtd_contas" do
@trailer_arquivo.qtd_contas.should == "901234"
end
it "should set #res_cnab2" do
@trailer_arquivo.res_cnab2.should == "5678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
end
end
end |
require File.expand_path(File.dirname(__FILE__) + '/../spec_helper')
describe Constellation::Runner do
before(:each) do
@runner = Constellation::Runner.new
end
describe "#init" do
it "should load the application from the Git repository given by Constellation::REPOSITORY tagged by Constellation::VERSION"
context "given an error while loading the application" do
it "should throw an error"
end
end
describe "#start" do
it "should load the config defined at the ConstellationFile"
it "should establish a connection to the given data store"
context "given a successful data store connection" do
it "should start the web application"
end
context "given a failed data store connection" do
it "should throw an error"
end
end
describe "#restart" do
it "should call #stop"
it "should call #start"
end
describe "#stop" do
it "should close the connection"
end
describe "#version" do
it "should put the current version on the command line" do
@runner.stub!(:puts)
@runner.should_receive(:puts).with(Constellation::VERSION)
@runner.version
end
end
end
Spec help
require File.expand_path(File.dirname(__FILE__) + '/../spec_helper')
describe Constellation::Runner do
before(:each) do
@runner = Constellation::Runner.new
end
describe "#init" do
it "should load the application from the Git repository given by Constellation::REPOSITORY tagged by Constellation::VERSION"
context "given an error while loading the application" do
it "should throw an error"
end
end
describe "#help" do
it "should put some help to the command line" do
@runner.stub!(:puts)
@runner.should_receive(:puts)
@runner.help
end
end
describe "#start" do
it "should load the config defined at the ConstellationFile"
it "should establish a connection to the given data store"
context "given a successful data store connection" do
it "should start the web application"
end
context "given a failed data store connection" do
it "should throw an error"
end
end
describe "#restart" do
it "should call #stop"
it "should call #start"
end
describe "#stop" do
it "should close the connection"
end
describe "#version" do
it "should put the current version on the command line" do
@runner.stub!(:puts)
@runner.should_receive(:puts).with(Constellation::VERSION)
@runner.version
end
end
end |
require File.expand_path(File.dirname(__FILE__) + '/../spec_helper')
require 'cucumber'
require 'cucumber/rb_support/rb_language'
module Cucumber
describe StepMother do
before do
@dsl = Object.new
@dsl.extend(RbSupport::RbDsl)
@step_mother = StepMother.new
@step_mother.load_natural_language('en')
@rb = @step_mother.load_programming_language('rb')
@visitor = mock('Visitor')
end
it "should format step names" do
@dsl.Given(/it (.*) in (.*)/) do |what, month|
end
@dsl.Given(/nope something else/) do |what, month|
end
format = @step_mother.step_match("it snows in april").format_args("[%s]")
format.should == "it [snows] in [april]"
end
it "should raise Ambiguous error with guess hint when multiple step definitions match" do
@dsl.Given(/Three (.*) mice/) {|disability|}
@dsl.Given(/Three blind (.*)/) {|animal|}
lambda do
@step_mother.step_match("Three blind mice")
end.should raise_error(Ambiguous, %{Ambiguous match of "Three blind mice":
spec/cucumber/step_mother_spec.rb:30:in `/Three (.*) mice/'
spec/cucumber/step_mother_spec.rb:31:in `/Three blind (.*)/'
You can run again with --guess to make Cucumber be more smart about it
})
end
it "should not show --guess hint when --guess is used" do
@step_mother.options = {:guess => true}
@dsl.Given(/Three (.*) mice/) {|disability|}
@dsl.Given(/Three cute (.*)/) {|animal|}
lambda do
@step_mother.step_match("Three cute mice")
end.should raise_error(Ambiguous, %{Ambiguous match of "Three cute mice":
spec/cucumber/step_mother_spec.rb:47:in `/Three (.*) mice/'
spec/cucumber/step_mother_spec.rb:48:in `/Three cute (.*)/'
})
end
it "should not raise Ambiguous error when multiple step definitions match, but --guess is enabled" do
@step_mother.options = {:guess => true}
@dsl.Given(/Three (.*) mice( cannot find food)?/) {|disability, is_disastrous|}
@dsl.Given(/Three (.*)/) {|animal|}
lambda do
@step_mother.step_match("Three blind mice")
end.should_not raise_error
end
it "should pick right step definition when --guess is enabled and equal number of capture groups" do
@step_mother.options = {:guess => true}
right = @dsl.Given(/Three (.*) mice/) {|disability|}
wrong = @dsl.Given(/Three (.*)/) {|animal|}
@step_mother.step_match("Three blind mice").step_definition.should == right
end
it "should pick right step definition when --guess is enabled and unequal number of capture groups" do
@step_mother.options = {:guess => true}
right = @dsl.Given(/Three (.*) mice ran (.*)/) {|disability|}
wrong = @dsl.Given(/Three (.*)/) {|animal|}
@step_mother.step_match("Three blind mice ran far").step_definition.should == right
end
it "should pick most specific step definition when --guess is enabled and unequal number of capture groups" do
@step_mother.options = {:guess => true}
general = @dsl.Given(/Three (.*) mice ran (.*)/) {|disability|}
specific = @dsl.Given(/Three blind mice ran far/) do; end
more_specific = @dsl.Given(/^Three blind mice ran far$/) do; end
@step_mother.step_match("Three blind mice ran far").step_definition.should == more_specific
end
it "should raise Undefined error when no step definitions match" do
lambda do
@step_mother.step_match("Three blind mice")
end.should raise_error(Undefined)
end
# http://railsforum.com/viewtopic.php?pid=93881
it "should not raise Redundant unless it's really redundant" do
@dsl.Given(/^(.*) (.*) user named '(.*)'$/) {|a,b,c|}
@dsl.Given(/^there is no (.*) user named '(.*)'$/) {|a,b|}
end
it "should raise an error if the world is nil" do
@dsl.World {}
begin
@step_mother.before_and_after(nil) do; end
raise "Should fail"
rescue RbSupport::NilWorld => e
e.message.should == "World procs should never return nil"
e.backtrace.should == ["spec/cucumber/step_mother_spec.rb:108:in `World'"]
end
end
module ModuleOne
end
module ModuleTwo
end
class ClassOne
end
it "should implicitly extend world with modules" do
@dsl.World(ModuleOne, ModuleTwo)
@step_mother.before(mock('scenario', :null_object => true))
class << @rb.current_world
included_modules.inspect.should =~ /ModuleOne/ # Workaround for RSpec/Ruby 1.9 issue with namespaces
included_modules.inspect.should =~ /ModuleTwo/
end
@rb.current_world.class.should == Object
end
it "should raise error when we try to register more than one World proc" do
@dsl.World { Hash.new }
lambda do
@dsl.World { Array.new }
end.should raise_error(RbSupport::MultipleWorld, %{You can only pass a proc to #World once, but it's happening
in 2 places:
spec/cucumber/step_mother_spec.rb:139:in `World'
spec/cucumber/step_mother_spec.rb:141:in `World'
Use Ruby modules instead to extend your worlds. See the Cucumber::RbSupport::RbDsl#World RDoc
or http://wiki.github.com/aslakhellesoy/cucumber/a-whole-new-world.
})
end
it "should find before hooks" do
fish = @dsl.Before('@fish'){}
meat = @dsl.Before('@meat'){}
scenario = mock('Scenario')
scenario.should_receive(:accept_hook?).with(fish).and_return(true)
scenario.should_receive(:accept_hook?).with(meat).and_return(false)
@rb.hooks_for(:before, scenario).should == [fish]
end
end
describe StepMother, "step argument transformations" do
before do
@dsl = Object.new
@dsl.extend(RbSupport::RbDsl)
@step_mother = StepMother.new
@step_mother.load_natural_language('en')
@rb = @step_mother.load_programming_language('rb')
end
describe "without capture groups" do
it "complains when registering with a with no transform block" do
lambda do
@dsl.Transform('^abc$')
end.should raise_error(Cucumber::RbSupport::RbTransform::MissingProc)
end
it "complains when registering with a zero-arg transform block" do
lambda do
@dsl.Transform('^abc$') {42}
end.should raise_error(Cucumber::RbSupport::RbTransform::MissingProc)
end
it "complains when registering with a splat-arg transform block" do
lambda do
@dsl.Transform('^abc$') {|*splat| 42 }
end.should raise_error(Cucumber::RbSupport::RbTransform::MissingProc)
end
it "complains when transforming with an arity mismatch" do
lambda do
@dsl.Transform('^abc$') {|one, two| 42 }
@rb.execute_transforms(['abc'])
end.should raise_error(Cucumber::ArityMismatchError)
end
it "allows registering a regexp pattern that yields the step_arg matched" do
@dsl.Transform(/^ab*c$/) {|arg| 42}
@rb.execute_transforms(['ab']).should == ['ab']
@rb.execute_transforms(['ac']).should == [42]
@rb.execute_transforms(['abc']).should == [42]
@rb.execute_transforms(['abbc']).should == [42]
end
end
describe "with capture groups" do
it "complains when registering with a with no transform block" do
lambda do
@dsl.Transform('^a(.)c$')
end.should raise_error(Cucumber::RbSupport::RbTransform::MissingProc)
end
it "complains when registering with a zero-arg transform block" do
lambda do
@dsl.Transform('^a(.)c$') { 42 }
end.should raise_error(Cucumber::RbSupport::RbTransform::MissingProc)
end
it "complains when registering with a splat-arg transform block" do
lambda do
@dsl.Transform('^a(.)c$') {|*splat| 42 }
end.should raise_error(Cucumber::RbSupport::RbTransform::MissingProc)
end
it "complains when transforming with an arity mismatch" do
lambda do
@dsl.Transform('^a(.)c$') {|one, two| 42 }
@rb.execute_transforms(['abc'])
end.should raise_error(Cucumber::ArityMismatchError)
end
it "allows registering a regexp pattern that yields capture groups" do
@dsl.Transform(/^shape: (.+), color: (.+)$/) do |shape, color|
{shape.to_sym => color.to_sym}
end
@rb.execute_transforms(['shape: circle, color: blue']).should == [{:circle => :blue}]
@rb.execute_transforms(['shape: square, color: red']).should == [{:square => :red}]
@rb.execute_transforms(['not shape: square, not color: red']).should == ['not shape: square, not color: red']
end
end
it "allows registering a string pattern" do
@dsl.Transform('^ab*c$') {|arg| 42}
@rb.execute_transforms(['ab']).should == ['ab']
@rb.execute_transforms(['ac']).should == [42]
@rb.execute_transforms(['abc']).should == [42]
@rb.execute_transforms(['abbc']).should == [42]
end
it "gives match priority to transforms defined last" do
@dsl.Transform(/^transform_me$/) {|arg| :foo }
@dsl.Transform(/^transform_me$/) {|arg| :bar }
@dsl.Transform(/^transform_me$/) {|arg| :baz }
@rb.execute_transforms(['transform_me']).should == [:baz]
end
it "allows registering a transform which returns nil" do
@dsl.Transform('^ac$') {|arg| nil}
@rb.execute_transforms(['ab']).should == ['ab']
@rb.execute_transforms(['ac']).should == [nil]
end
end
end
module ModuleOne
end
module ModuleTwo
end
class ClassOne
end
Revert "add a spec that fails without the previous fix"
The old spec should stay, a new one will be added
This reverts commit 928196f849d73341631fdbb336032f236256be0e.
require File.expand_path(File.dirname(__FILE__) + '/../spec_helper')
require 'cucumber'
require 'cucumber/rb_support/rb_language'
module Cucumber
describe StepMother do
before do
@dsl = Object.new
@dsl.extend(RbSupport::RbDsl)
@step_mother = StepMother.new
@step_mother.load_natural_language('en')
@rb = @step_mother.load_programming_language('rb')
@visitor = mock('Visitor')
end
it "should format step names" do
@dsl.Given(/it (.*) in (.*)/) do |what, month|
end
@dsl.Given(/nope something else/) do |what, month|
end
format = @step_mother.step_match("it snows in april").format_args("[%s]")
format.should == "it [snows] in [april]"
end
it "should raise Ambiguous error with guess hint when multiple step definitions match" do
@dsl.Given(/Three (.*) mice/) {|disability|}
@dsl.Given(/Three blind (.*)/) {|animal|}
lambda do
@step_mother.step_match("Three blind mice")
end.should raise_error(Ambiguous, %{Ambiguous match of "Three blind mice":
spec/cucumber/step_mother_spec.rb:30:in `/Three (.*) mice/'
spec/cucumber/step_mother_spec.rb:31:in `/Three blind (.*)/'
You can run again with --guess to make Cucumber be more smart about it
})
end
it "should not show --guess hint when --guess is used" do
@step_mother.options = {:guess => true}
@dsl.Given(/Three (.*) mice/) {|disability|}
@dsl.Given(/Three cute (.*)/) {|animal|}
lambda do
@step_mother.step_match("Three cute mice")
end.should raise_error(Ambiguous, %{Ambiguous match of "Three cute mice":
spec/cucumber/step_mother_spec.rb:47:in `/Three (.*) mice/'
spec/cucumber/step_mother_spec.rb:48:in `/Three cute (.*)/'
})
end
it "should not raise Ambiguous error when multiple step definitions match, but --guess is enabled" do
@step_mother.options = {:guess => true}
@dsl.Given(/Three (.*) mice/) {|disability|}
@dsl.Given(/Three (.*)/) {|animal|}
lambda do
@step_mother.step_match("Three blind mice")
end.should_not raise_error
end
it "should pick right step definition when --guess is enabled and equal number of capture groups" do
@step_mother.options = {:guess => true}
right = @dsl.Given(/Three (.*) mice/) {|disability|}
wrong = @dsl.Given(/Three (.*)/) {|animal|}
@step_mother.step_match("Three blind mice").step_definition.should == right
end
it "should pick right step definition when --guess is enabled and unequal number of capture groups" do
@step_mother.options = {:guess => true}
right = @dsl.Given(/Three (.*) mice ran (.*)/) {|disability|}
wrong = @dsl.Given(/Three (.*)/) {|animal|}
@step_mother.step_match("Three blind mice ran far").step_definition.should == right
end
it "should pick most specific step definition when --guess is enabled and unequal number of capture groups" do
@step_mother.options = {:guess => true}
general = @dsl.Given(/Three (.*) mice ran (.*)/) {|disability|}
specific = @dsl.Given(/Three blind mice ran far/) do; end
more_specific = @dsl.Given(/^Three blind mice ran far$/) do; end
@step_mother.step_match("Three blind mice ran far").step_definition.should == more_specific
end
it "should raise Undefined error when no step definitions match" do
lambda do
@step_mother.step_match("Three blind mice")
end.should raise_error(Undefined)
end
# http://railsforum.com/viewtopic.php?pid=93881
it "should not raise Redundant unless it's really redundant" do
@dsl.Given(/^(.*) (.*) user named '(.*)'$/) {|a,b,c|}
@dsl.Given(/^there is no (.*) user named '(.*)'$/) {|a,b|}
end
it "should raise an error if the world is nil" do
@dsl.World {}
begin
@step_mother.before_and_after(nil) do; end
raise "Should fail"
rescue RbSupport::NilWorld => e
e.message.should == "World procs should never return nil"
e.backtrace.should == ["spec/cucumber/step_mother_spec.rb:108:in `World'"]
end
end
module ModuleOne
end
module ModuleTwo
end
class ClassOne
end
it "should implicitly extend world with modules" do
@dsl.World(ModuleOne, ModuleTwo)
@step_mother.before(mock('scenario', :null_object => true))
class << @rb.current_world
included_modules.inspect.should =~ /ModuleOne/ # Workaround for RSpec/Ruby 1.9 issue with namespaces
included_modules.inspect.should =~ /ModuleTwo/
end
@rb.current_world.class.should == Object
end
it "should raise error when we try to register more than one World proc" do
@dsl.World { Hash.new }
lambda do
@dsl.World { Array.new }
end.should raise_error(RbSupport::MultipleWorld, %{You can only pass a proc to #World once, but it's happening
in 2 places:
spec/cucumber/step_mother_spec.rb:139:in `World'
spec/cucumber/step_mother_spec.rb:141:in `World'
Use Ruby modules instead to extend your worlds. See the Cucumber::RbSupport::RbDsl#World RDoc
or http://wiki.github.com/aslakhellesoy/cucumber/a-whole-new-world.
})
end
it "should find before hooks" do
fish = @dsl.Before('@fish'){}
meat = @dsl.Before('@meat'){}
scenario = mock('Scenario')
scenario.should_receive(:accept_hook?).with(fish).and_return(true)
scenario.should_receive(:accept_hook?).with(meat).and_return(false)
@rb.hooks_for(:before, scenario).should == [fish]
end
end
describe StepMother, "step argument transformations" do
before do
@dsl = Object.new
@dsl.extend(RbSupport::RbDsl)
@step_mother = StepMother.new
@step_mother.load_natural_language('en')
@rb = @step_mother.load_programming_language('rb')
end
describe "without capture groups" do
it "complains when registering with a with no transform block" do
lambda do
@dsl.Transform('^abc$')
end.should raise_error(Cucumber::RbSupport::RbTransform::MissingProc)
end
it "complains when registering with a zero-arg transform block" do
lambda do
@dsl.Transform('^abc$') {42}
end.should raise_error(Cucumber::RbSupport::RbTransform::MissingProc)
end
it "complains when registering with a splat-arg transform block" do
lambda do
@dsl.Transform('^abc$') {|*splat| 42 }
end.should raise_error(Cucumber::RbSupport::RbTransform::MissingProc)
end
it "complains when transforming with an arity mismatch" do
lambda do
@dsl.Transform('^abc$') {|one, two| 42 }
@rb.execute_transforms(['abc'])
end.should raise_error(Cucumber::ArityMismatchError)
end
it "allows registering a regexp pattern that yields the step_arg matched" do
@dsl.Transform(/^ab*c$/) {|arg| 42}
@rb.execute_transforms(['ab']).should == ['ab']
@rb.execute_transforms(['ac']).should == [42]
@rb.execute_transforms(['abc']).should == [42]
@rb.execute_transforms(['abbc']).should == [42]
end
end
describe "with capture groups" do
it "complains when registering with a with no transform block" do
lambda do
@dsl.Transform('^a(.)c$')
end.should raise_error(Cucumber::RbSupport::RbTransform::MissingProc)
end
it "complains when registering with a zero-arg transform block" do
lambda do
@dsl.Transform('^a(.)c$') { 42 }
end.should raise_error(Cucumber::RbSupport::RbTransform::MissingProc)
end
it "complains when registering with a splat-arg transform block" do
lambda do
@dsl.Transform('^a(.)c$') {|*splat| 42 }
end.should raise_error(Cucumber::RbSupport::RbTransform::MissingProc)
end
it "complains when transforming with an arity mismatch" do
lambda do
@dsl.Transform('^a(.)c$') {|one, two| 42 }
@rb.execute_transforms(['abc'])
end.should raise_error(Cucumber::ArityMismatchError)
end
it "allows registering a regexp pattern that yields capture groups" do
@dsl.Transform(/^shape: (.+), color: (.+)$/) do |shape, color|
{shape.to_sym => color.to_sym}
end
@rb.execute_transforms(['shape: circle, color: blue']).should == [{:circle => :blue}]
@rb.execute_transforms(['shape: square, color: red']).should == [{:square => :red}]
@rb.execute_transforms(['not shape: square, not color: red']).should == ['not shape: square, not color: red']
end
end
it "allows registering a string pattern" do
@dsl.Transform('^ab*c$') {|arg| 42}
@rb.execute_transforms(['ab']).should == ['ab']
@rb.execute_transforms(['ac']).should == [42]
@rb.execute_transforms(['abc']).should == [42]
@rb.execute_transforms(['abbc']).should == [42]
end
it "gives match priority to transforms defined last" do
@dsl.Transform(/^transform_me$/) {|arg| :foo }
@dsl.Transform(/^transform_me$/) {|arg| :bar }
@dsl.Transform(/^transform_me$/) {|arg| :baz }
@rb.execute_transforms(['transform_me']).should == [:baz]
end
it "allows registering a transform which returns nil" do
@dsl.Transform('^ac$') {|arg| nil}
@rb.execute_transforms(['ab']).should == ['ab']
@rb.execute_transforms(['ac']).should == [nil]
end
end
end
module ModuleOne
end
module ModuleTwo
end
class ClassOne
end
|
# frozen_string_literal: true
FactoryGirl.define do
factory :authentication do
provider 'SomeCoolProvider'
sequence(:uid) { |n| "#{n}" }
token { SecureRandom.base64 }
user
end
end
:cop:
# frozen_string_literal: true
FactoryGirl.define do
factory :authentication do
provider 'SomeCoolProvider'
sequence(:uid, &:to_s)
token { SecureRandom.base64 }
user
end
end
|
FactoryGirl.define do
factory :release do
sequence(:name) { |n| "Release Name #{n}" }
release_date 10.days.ago
wine
end
end
synched up release factory date changes
FactoryGirl.define do
factory :release do
sequence(:name) { |n| "Release Name #{n}" }
release_date { 10.days.ago }
wine
end
end |
require 'spec_helper'
feature 'validations' do
scenario 'inputs should validate on blur', js: true do
admin_login
visit new_admin_release_path
page.find('#release_name').trigger('focus')
page.find('#release_name').trigger('blur')
expect(page).to have_selector('#release_name.invalid')
expect(page).to have_selector('div.release_name.field_with_errors')
expect(page).to have_selector('div.release_name span.error')
end
scenario 'inputs should validate on form submission', js: true do
admin_login
visit new_admin_release_path
click_button('Save Settings')
expect(page).to have_selector('#release_name.invalid')
expect(page).to have_selector('div.release_name.field_with_errors')
expect(page).to have_selector('div.release_name span.error')
end
scenario 'datapickers should validate on blur', js: true do
admin_login
visit new_admin_release_path
page.find('#release_release_date').trigger('focus')
page.find('#release_release_date').trigger('blur')
expect(page).to have_selector('#release_release_date.invalid')
expect(page).to have_selector('div.release_release_date.field_with_errors')
expect(page).to have_selector('div.release_release_date span.error')
end
scenario 'selects should validate on change', js: true do
wine = FactoryGirl.create(:wine)
admin_login
visit new_admin_release_path
within('.release_wine') do
page.find('.chosen-single').click
page.find('li', text: wine.name_en).click
end
expect(page).to_not have_selector('div.release_wine.field_with_errors')
expect(page).to_not have_selector('div.release_wine span.error')
within('.release_wine') do
page.find('.chosen-single').click
page.find('li', text: 'Select One').click
end
expect(page).to have_selector('div.release_wine.field_with_errors')
expect(page).to have_selector('div.release_wine span.error')
end
scenario 'inputs should validate length on inline', js: true do
admin_login
visit new_admin_release_path
within(:css, 'div.release_name') do
expect(page.find(:css, 'span.characters-left').text).to eq('Characters Left: 15')
fill_in 'release_name', with: 'Test'
expect(page.find(:css, 'span.characters-left').text).to eq('Characters Left: 11')
fill_in 'release_name', with: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent pulvinar euismod nisl, in pellentesque sapien ornare ac.'
expect(page.find(:css, 'span.characters-left').text).to eq('Characters Left: 0')
end
scenario 'should have error banner when errors are on the page', js: true do
admin_login
visit new_admin_release_path
within('.release_wine') do
page.find('.chosen-single').click
page.find('li', text: 'Select One').click
end
expect(page).to have_selector('div.alert.no_slide')
end
end
end
added specccc
require 'spec_helper'
feature 'validations' do
scenario 'inputs should validate on blur', js: true do
admin_login
visit new_admin_release_path
page.find('#release_name').trigger('focus')
page.find('#release_name').trigger('blur')
expect(page).to have_selector('#release_name.invalid')
expect(page).to have_selector('div.release_name.field_with_errors')
expect(page).to have_selector('div.release_name span.error')
end
scenario 'inputs should validate on form submission', js: true do
admin_login
visit new_admin_release_path
click_button('Save Settings')
expect(page).to have_selector('#release_name.invalid')
expect(page).to have_selector('div.release_name.field_with_errors')
expect(page).to have_selector('div.release_name span.error')
end
scenario 'datapickers should validate on blur', js: true do
admin_login
visit new_admin_release_path
page.find('#release_release_date').trigger('focus')
page.find('#release_release_date').trigger('blur')
expect(page).to have_selector('#release_release_date.invalid')
expect(page).to have_selector('div.release_release_date.field_with_errors')
expect(page).to have_selector('div.release_release_date span.error')
end
scenario 'selects should validate on change', js: true do
wine = FactoryGirl.create(:wine)
admin_login
visit new_admin_release_path
within('.release_wine') do
page.find('.chosen-single').click
page.find('li', text: wine.name_en).click
end
expect(page).to_not have_selector('div.release_wine.field_with_errors')
expect(page).to_not have_selector('div.release_wine span.error')
within('.release_wine') do
page.find('.chosen-single').click
page.find('li', text: 'Select One').click
end
expect(page).to have_selector('div.release_wine.field_with_errors')
expect(page).to have_selector('div.release_wine span.error')
end
scenario 'inputs should validate length on inline', js: true do
admin_login
visit new_admin_release_path
within(:css, 'div.release_name') do
expect(page.find(:css, 'span.characters-left').text).to eq('Characters Left: 15')
fill_in 'release_name', with: 'Test'
expect(page.find(:css, 'span.characters-left').text).to eq('Characters Left: 11')
fill_in 'release_name', with: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent pulvinar euismod nisl, in pellentesque sapien ornare ac.'
expect(page.find(:css, 'span.characters-left').text).to eq('Characters Left: 0')
end
end
scenario 'should have error banner when errors are on the page', js: true do
admin_login
visit new_admin_release_path
page.find('#release_name').trigger('focus')
page.find('#release_name').trigger('blur')
expect(page).to have_selector('div.alert.no_slide')
end
end
|
require '../lib/tinkit' #<-- to be replaced by gem
# So time to check the persistence.
# If we provide the environment again
env = {:node_class_id => "HelloWorldClass",
:persist_model => {
:name => "filesystem",
:key_fields => {
:required_keys => [:id],
:primary_key => :id,
},
:env => {
:user_id => "me",
:path => "/tmp/tinkit_hello_world_test/"
}
}
}
#and make the class
TinkitNodeFactory.make(env)
#=> Tinkit::HelloWorldClass
#we should be able to get to our data
#we can get all data (array of all nodes)
puts Tinkit::HelloWorldClass.all.first.data
#or by the primary key id
puts Tinkit::HelloWorldClass.get("helloworld_id").data
#or by searching for data
puts Tinkit::HelloWorldClass.find_nodes_where(:data, :equals, "Hello World").first.data
Slight update to hello_world_get to delete the node data
require '../lib/tinkit' #<-- to be replaced by gem
# So time to check the persistence.
# If we provide the environment again
env = {:node_class_id => "HelloWorldClass",
:persist_model => {
:name => "filesystem",
:key_fields => {
:required_keys => [:id],
:primary_key => :id,
},
:env => {
:user_id => "me",
:path => "/tmp/tinkit_hello_world_test/"
}
}
}
#and make the class
TinkitNodeFactory.make(env)
#=> Tinkit::HelloWorldClass
#we should be able to get to our data
#we can get all data (array of all nodes)
puts Tinkit::HelloWorldClass.all.first.data
#or by the primary key id
puts Tinkit::HelloWorldClass.get("helloworld_id").data
#or by searching for data
puts Tinkit::HelloWorldClass.find_nodes_where(:data, :equals, "Hello World").first.data
#delete the data
Tinkit::HelloWorldClass.destroy_all |
$: << '../../lib'
$LOAD_PATH << '.'
require 'bundler/setup'
require 'doc_smoosher'
extend DocSmoosher::TopLevel
# Shared fields
limit = define_parameter(name: 'limit') do |p|
p.description = 'Return these many results'
p.type = :integer
p.default = 10
end
offset = define_parameter(name: 'offset') do |p|
p.description = 'Offset the results I receive by this amount'
p.type = :integer
p.default = 0
end
api_key = define_parameter(name: 'api_key') do |p|
p.description = 'Use this API key'
p.type = :string
p.required = true
p.example = "hg7JHG6daSgf56FjhgsSa"
end
full_text = define_parameter(name: 'full_text') do |p|
p.description = 'Show the results with all their text, however long'
p.type = :boolean
p.default = false
end
uri = define_parameter(name: 'uri') do |p|
p.description = 'The url of the article you want to extract'
p.type = :string
p.default = false
end
url = define_parameter(name: 'url') do |p|
p.description = 'The url of the html you want to return'
p.type = :string
p.default = false
end
metadata = define_parameter(name: 'metadata') do |m|
m.description = <<-DESC
A list of attributes you can associate to the knowledge item.
Valid types of metadata are date, string, collection and number.</br>
'date': which includes a time for when an event occurred</br>
'string': General purpose content</br>
'collection': One item from defined collection</br>
'number': A numerical value
DESC
m.type = :array
m.example = "[{ name: 'published_date', type: 'date', content: '2012-01-20 00:00:00' }, { name: 'title', type: 'string', content: 'A day to remember', { name: 'author', type: 'collection', content: 'Joe Bloggs' }, { name: 'author', type: 'collection', content: 'John Smith' }]"
end
##
# Introduction
#
DESCRIPTION =<<-DESC
<p>Check out the <a href="/pages/demo"> demo </a> to see Ingenia in action.</p>
<p>Look at the <a href="/faq">FAQ</a> for any questions.</p>
<p> Go through the documentation and choose if you want to use Ingenia by the API or with the Ruby gem. </p>
<p> <a href="/contact"> Contact us </a> to get your API key or if you have any questions.</p>
<p>If you would like to verify your API key or code data path then use the <a href="#call-administrative-calls-status">status</a> call.</p>
<h3 id='api-libraries'>Ruby API library</h3>
<a href="httpss://github.com/ingenia-api/ingenia_ruby">httpss://github.com/ingenia-api/ingenia_ruby</a>
<h3 id='api-rate-limiting'>Rate limiting</h3>
<p>Ingenia by default limits a user to 4 calls per second, for every type of API call. Contact us to have this limit increased or removed if needed. </p>
DESC
json_similarity_response = define_object(name: 'Similarity response') do |sr|
sr.description = "An array of items that are related to an origin item sent via a similarity API call"
sr.parameter name: 'id' do |p|
p.description = 'The ID of the origin item'
p.type = :string
end
sr.parameter name: 'text' do |p|
p.description = 'First 50 characters of the text of each related item'
p.type = :string
end
sr.parameter name: 'mode' do |p|
p.description = 'If \'tag\', it will determine related items on the basis of their tags; if \'word\', it will do so on the basis of the words contained in the item'
p.type = :string
p.default = 'tag'
end
sr.parameter name: 'similarity' do |p|
p.description = 'From 0 to 1, it measures how similar each related item is to the origin item; the response will sort items on descending similarity'
p.type = :float
end
sr.example = '
{
[
{ "item": { "id":12182, "text": "The fall in the rand has given wealthy Russians a new location to search for luxury..." }, "mode": "tag", "similarity": 0.62 },
{ "item": { "id":9293, "text": "Robots tend to do jobs that no one wants to do. I am old enough to remember..." }, "mode": "tag", "similarity": 0.55 },
{ "item": { "id":25333, "text": "The market for RMB credit raised outside China has gone four weeks without a..." }, "mode": "word", "similariy": 0.22 }
]
}'
end
# bundle
json_bundle = define_object(name: 'Bundle: create / update input') do |bundle|
bundle.description = "A collection of items related to each other"
bundle.type = :json
bundle.required = true
bundle.parameter name: 'name' do |p|
p.description = 'The name of your bundle'
p.type = :string
end
bundle.this_is_json!
bundle.parameter name: 'tag_set_ids' do |p|
p.description =<<-DESC
An array of tag set IDs to be applied to this bundle. The tags in these tag sets will be available to the items in the bundle.
If an existing bundle already has tag sets, then these can be removed by omitting the ID in the call.
DESC
p.type = :array
end
bundle.example = '
{
"name":"Tech Startups",
"tag_sets": [
{
"id" : 2820,
"name" : "Tag Set One"
},
{
"id" : 2819,
"name" : "Tag Set Two"
}
]
}'
end
json_bundle_show = define_object(name: 'Bundle: show output') do |bundle|
bundle.description = "A collection of items related to each other"
bundle.parameter name: 'id' do |p|
p.description = 'A unique numeric id generated by Ingenia'
p.default = '[generated]'
p.type = :numeric
end
bundle.parameter name: 'name' do |p|
p.description = 'The name of your bundle'
p.type = :string
end
bundle.parameter name: 'tag_sets' do |ts|
ts.description = 'The tag sets that are currently attached to this bundle. Items within the bundle can use all the tags in these tag sets.'
ts.type = :array
end
bundle.parameter name: 'created_at' do |p|
p.description = 'When this bundle was created'
p.type = :date_time
p.example = '2013-12-16T11:24:52+00:00'
end
#We should probably not show this to the user
bundle.parameter name: 'updated_at' do |p|
p.description = 'When this bundle was last updated'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
bundle.example = '
{
"id":47858,
"name":"Tech Startups",
"tag_sets": [
{ "name": "technology", "id": 14562 },
{ "name": "business", "id": 666 }
],
"created_at":"2014-03-13T15:36:51Z",
"updated_at":"2014-03-13T15:36:51Z",
}'
end
json_basic_response = define_object(name: 'Basic response format') do |brf|
brf.description = "All responses from the API gateway have the following format"
brf.parameter name: 'version' do |p|
p.description = 'The version of the API that is responding'
p.type = :string
p.example = '"2.0"'
end
#data
brf.parameter name: 'data' do |p|
p.description = 'The data payload response from the call'
p.type = :object
end
#status
brf.parameter name: 'status' do |p|
p.description = '"okay" if the call is processed correctly, otherwise it will be "error"'
p.type = :string
end
#message
brf.parameter name: 'message' do |p|
p.description = 'A message describing the nature of the error, returned if an error occurred'
p.type = :string
end
end
# Item JSON POST form
json_item = define_object(name: 'Item: create / update input') do |item|
item.description = "An item is a block of text to which you can associate tags, that belongs to a bundle"
item.type = :json
item.required = true
item.this_is_json!
item.parameter name: 'id' do |p|
p.description = 'An alphanumeric id unique to each bundle. You can use your own, or have Ingenia generate one for you'
p.default = '[generated]'
p.type = :string
p.example = '785uU423aC'
end
item.parameter name: 'text' do |p|
p.description = 'Your item\'s content. [1]'
p.type = :string
end
item.parameter name: 'url' do |p|
p.description = 'Source URL to get text from. Ingenia will extract the most relevant text [1]'
p.type = :string
p.example = 'https://www.example.com'
end
item.parameter name: 'bundle_id' do |p|
p.description = 'ID of the bundle in which to put the item'
p.type = :integer
p.default = '[user\'s first bundle]'
end
item.parameter name: 'tag_sets' do |p|
p.description = "A hash of tag sets, each of which is an array of tags that you consider of the same type [2]"
p.type = :hash
p.example = '{ "topics": [ "startups", "saas", "marketing" ], "geography": [ "United Kingdom", "Italy" ] }'
end
item.parameter name: 'tags' do |p|
p.description = "An array with the name of the tags you wish to assign to this item. If the tag doesn\'t exist, it will be created [2]."
p.type = :array
p.example = <<-EOF
[ "startups", "saas", "marketing" ]'
EOF
end
item.parameter name: 'tags' do |p|
p.description = "As above, but with a user-assigned score. The score should be a number between 0 and 1 that quantifies the strength of the association between the item and the tag (1: highest) [2]."
p.type = :hash
p.example = <<-EOF
{ "startups" : 0.2 , "sass" : 0.7, "marketing" : 1 }
EOF
end
item.parameter name: 'tag_ids' do |p|
p.description = "The Ingenia IDs of the tags you wish to assign to this item [2]"
p.type = :array
p.example = '[ 45, 787, 23 ]'
end
item.example = <<-EXAMPLE
{
text: "High tech startups and their positive power to change for good",
tag_sets: {
"Topics": [ "startups", "technology" ],
"Mood": [ "positive" ]
}
}
EXAMPLE
item.parameter metadata
item.footnote =<<-FN
<p>[1] You can input content as one of these fields: text, a URL, a file. Formats
supported for files include txt, html, pdf and all MS Office formats. If you send a file, it will extract the text
from it.</p>
<p>The text and the URL are input as part of the JSON component. The file
is sent as a multipart encoded https field.</p>
<p>[2] Only specify one of the following: tag_sets, tags or tag_ids </p>
FN
end
# Item JSON get form
json_item_show = define_object(name: 'Item: show output') do |item|
item.parameter name: 'bundle_id' do |p|
p.description = 'The id of the bundle that this item belongs to'
p.type = :numeric
end
item.parameter name: 'bundle_name' do |p|
p.description = 'The name of the bundle that this item belongs to'
p.type = :string
end
item.parameter name: 'concordance' do |p|
p.description = 'The extent to which the user and Ingenia agree on the categorisation of the item. 0 if the tags are different, 1 if they are identical. Use this to identify content that may need to be reviewed'
p.type = :float
end
item.parameter name: 'id' do |p|
p.description = 'A unique alphanumeric id'
p.type = :string
p.example = '785uU423aC'
end
item.parameter name: 'item_state' do |p|
p.description = 'The current state of the item'
p.type = :string
end
item.parameter name: 'language' do |p|
p.description = 'The language of the conent in this item'
p.type = :string
end
item.parameter name: 'text' do |p|
p.description = 'Your item\'s content'
p.type = :string
end
item.parameter name: 'created_at' do |p|
p.description = 'When this item was created'
p.type = :date_time
p.example = '2013-12-16T11:24:52+00:00'
end
#We should probably not show this to the user
item.parameter name: 'updated_at' do |p|
p.description = 'When this item was last updated'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
item.parameter name: 'last_classified_at' do |p|
p.description = 'When this item was last classified by the system; null if it hasn\'t been classified yet'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
item.parameter name: 'tag_sets' do |p|
p.description = 'An array of tag sets associated to the item'
p.type = :array
end
item.parameter name: 'tag_set' do |p|
p.description = 'A hash containing the tag set id and the array of tags associated to the item'
p.type = :hash
end
item.parameter name: 'tag' do |p|
p.description = 'A hash with the details of a tag associated to the item, including its id, name, user assigned score and user_selected'
p.type = :hash
end
item.parameter name: 'score' do |p|
p.description = 'An aggregation of the machine and rule scores, between 0 (lowest) and 1 (highest).'
p.type = :numeric
end
item.parameter name: 'user_selected' do |p|
p.description = 'Deprecated: please use user_assigned value, this will be removed in the next release'
p.type = :string
end
item.parameter name: 'user_assigned' do |p|
p.description = 'true if the tag was assigned to the item by the user, false if it was assigned by Ingenia'
p.type = :boolean
end
item.parameter name: 'user_assigned_score' do |p|
p.description = 'score assigned by the user when tag was created'
p.type = :float
end
item.parameter name: 'machine_score' do |p|
p.description = 'A number which quantifies the strength of the association between an item and a tag, between 0 (lowest) and 1 (highest)'
p.type = :numeric
end
item.parameter name: 'rule_score' do |p|
p.description = 'A number which quantifies the strength of the association between an item and a tag score, between -1 (lowest) and 1 (highest)'
p.type = :numeric
end
item.parameter name: 'membership_degree' do |p|
p.description = 'the degree to which this item is a member of its bundle'
p.type = :float
end
item.parameter name: 'metadata' do |p|
p.description = 'any additional data you associated to this content; it may include dates, values, urls, additional text, etc.'
p.type = :array
end
item.example = '
{
"id":"e19e134d0e79153349ff78a674283e0b",
"last_classified_at":2013-12-16T11:25:07+00:00,
"text":"How to get to scale with a saas startup in the UK? ...",
"tag_sets":
[
{
"topics":
{
"id":156,
"tags":
[
{
"id":4352,
"name":"startups",
"user_selected": "f",
"user_assigned": false,
"score":"0.8",
"machine_score":"0.45",
"rule_score": "0.35",
"user_assigned_score": null
},
{
"id": 7811,
"name": "saas",
"user_selected": "t",
"user_assigned": true,
"score": "0.45",
"machine_score":"0.45",
"rule_score": null,
"user_assigned_score": 0.7
},
{
"id":1327,
"name":"marketing",
"user_selected": "t",
"user_assigned": true,
"score": "0.50",
"machine_score":"0.45",
"rule_score": "0.05",
"user_assigned_score": 0.7
}
]
}
},
{
"geography":
{
"id":622,
"tags":
[
{
"id":3321,
"name":"united kingdom",
"score":"0.37",
"user_selected": "t",
"user_assigned": true
}
]
}
}
]
"created_at":"2013-12-16T11:24:52+00:00",
"updated_at":"2013-12-16T11:24:56+00:00"
}'
end
# Tag JSON POST form
json_tag = define_object(name: 'Tag: create / update input') do |tag|
tag.description = "Something you want to associate to an item, e.g., a concept, topic, tone, sentiment, keyword, person, company, product, etc."
tag.type = :json
tag.this_is_json!
tag.required = true
tag.parameter name: 'name' do |p|
p.description = 'The name of your tag; we advise to make it short but meaningful; unique to each tag set'
p.type = :string
p.required = true
end
tag.parameter name: 'tag_set_id' do |p|
p.description = 'The ID of the tag_set to which this tag belongs'
p.type = :integer
p.required = true
end
tag.parameter name: 'description' do |p|
p.description = "A description of this tag: this is helpful to define in a focused way how the tag should be used"
p.type = :string
end
tag.parameter name: 'disposition' do |p|
p.description = "The disposition of the tag. Float value between 0 and 1, defaults to 0.5. Lower values will tend to privilege precision (we suggest 0.25); higher values will tend to privilege recall (we suggest 0.75). For most uses, the default value will work well.
You will want to privilege precision (with a disposition < 0.5) if you want each tag assignment to be accurate, and are less worried about some items being missed, i.e., you prefer to have false negatives than false positives. If the disposition is 0, no item will be tagged with this tag.
You will want to privilege recall (with a disposition > 0.5) if you want each tag assignment to occur, and are less worried about some items being tagged incorrectly, i.e., you prefer to have false positives than false negatives. If the disposition is 1, all items will be tagged with this tag."
p.type = :float
p.default = 0.5
end
tag.example = '
{
"name":"Text Analytics",
"tag_set_id":37874,
"description":"A set of techniques designed to extract valuable information from textual content",
"disposition": 0.5
}'
end
json_tag_show = define_object(name: 'Tag: show output') do |tag|
tag.description = "Something you want to associate to an item, e.g., a concept, topic, tone, sentiment, keyword, person, company, product, etc."
tag.parameter name: 'id' do |p|
p.description = 'A unique numeric id, generated by Ingenia'
p.type = :numeric
end
tag.parameter name: 'name' do |p|
p.description = 'The name of your tag'
p.type = :string
end
tag.parameter name: 'tag_set_id' do |p|
p.description = 'The ID of the tag_set to which this tag belongs'
p.type = :integer
end
tag.parameter name: 'confidence' do |p|
p.description = "From 0 to 1; confidence gets closer to 1 the more Ingenia considers the training for this tag sufficient; if this value is low, we advise to increase your training set for this tag"
p.type = :float
end
tag.parameter name: 'description' do |p|
p.description = "A description of this tag"
p.type = :string
end
tag.parameter name: 'created_at' do |p|
p.description = 'When this tag was created'
p.type = :date_time
p.example = '2013-12-16T11:24:52+00:00'
end
#We should probably not show this to the user
tag.parameter name: 'updated_at' do |p|
p.description = 'When this tag was last updated'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
tag.example = '
{
"id":554273,
"name":"Text Analytics",
"tag_set_id":8547,
"confidence":0.95,
"description":"the process of deriving high-quality information from text",
"created_at":"2014-03-13T12:59:32Z",
"updated_at":"2014-03-13T12:59:32Z"
}'
end
json_tag_rules_show = define_object(name: 'Tag rules: index output') do |tag_rule|
tag_rule.description = "A list of rules applied to a tag to influence whether or not to apply the tag to an item."
tag_rule.parameter name: 'tag:id' do |p|
p.description = 'The ID of the tag to which this tag rule belongs'
p.type = :integer
end
tag_rule.parameter name: 'tag:name' do |p|
p.description = 'The name of the tag to which this tag rule belongs'
p.type = :string
end
tag_rule.parameter name: 'tag_rules:(array):id' do |p|
p.description = 'A unique numeric id, generated by Ingenia'
p.type = :numeric
end
tag_rule.parameter name: 'tag_rules:(array):text' do |p|
p.description = 'The word or phrase to which the rule should apply.'
p.type = :string
end
tag_rule.parameter name: 'tag_rules:(array):language' do |p|
p.description = 'The language of the word you\'ve entered.'
p.type = :string
end
tag_rule.parameter name: 'tag_rules:(array):influence' do |p|
p.description = 'A number from -1 to 1, it indicates the "strength" of the rule.'
p.type = :float
end
tag_rule.parameter name: 'tag_rules:(array):tag_rule_mode' do |p|
p.description = 'The tag rule mode used. Options are; word_present, word_absent, word_skipped or word_capped'
p.type = :float
end
tag_rule.parameter name: 'tag_rules:(array):created_at' do |p|
p.description = 'When this tag rule was created'
p.type = :date_time
p.example = '2013-12-16T11:24:52+00:00'
end
tag_rule.parameter name: 'tag_rules:(array):updated_at' do |p|
p.description = 'When this tag rule was last updated'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
tag_rule.example = '
{
"tag": {
id":554273,
"name":"Text Analytics",
},
"tag_rule": [{
"text": "data",
"influence": 0.4,
"language": "en",
"tag_rule_mode":"word_present",
"created_at":"2014-03-13T12:59:32Z",
"updated_at":"2014-03-13T12:59:32Z"
},
...
]
}'
end
json_tag_rule_show = define_object(name: 'Tag Rule: show output') do |tag_rule|
tag_rule.description = "A rule applied to a tag to influence whether or not to apply the tag to an item."
tag_rule.parameter name: 'tag:id' do |p|
p.description = 'The ID of the tag to which this tag rule belongs'
p.type = :integer
end
tag_rule.parameter name: 'tag:name' do |p|
p.description = 'The name of the tag to which this tag rule belongs'
p.type = :string
end
tag_rule.parameter name: 'tag_rule:id' do |p|
p.description = 'A unique numeric id, generated by Ingenia'
p.type = :numeric
end
tag_rule.parameter name: 'tag_rule:text' do |p|
p.description = 'The word or phrase to which the rule should apply.'
p.type = :string
end
tag_rule.parameter name: 'tag_rule:language' do |p|
p.description = 'The language of the word you\'ve entered.'
p.type = :string
end
tag_rule.parameter name: 'tag_rule:influence' do |p|
p.description = 'A number from -1 to 1, it indicates the "strength" of the rule.'
p.type = :float
end
tag_rule.parameter name: 'tag_rule:tag_rule_mode' do |p|
p.description = 'The tag rule mode used. Options are; word_present, word_absent, word_skipped or word_capped'
p.type = :string
end
tag_rule.parameter name: 'tag_rule:created_at' do |p|
p.description = 'When this tag rule was created'
p.type = :date_time
p.example = '2013-12-16T11:24:52+00:00'
end
tag_rule.parameter name: 'tag_rule:updated_at' do |p|
p.description = 'When this tag rule was last updated'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
tag_rule.example = '
{
"tag": {
id":554273,
"name":"Text Analytics",
},
"tag_rule": {
"text": "data",
"influence": 0.4,
"language": "en",
"tag_rule_mode":"word_present",
"created_at":"2014-03-13T12:59:32Z",
"updated_at":"2014-03-13T12:59:32Z"
}
}'
end
# TagRule JSON POST form
json_tag_rule_create = define_object(name: 'Tag rule: create input') do |tag_rule|
tag_rule.description = "A rule to apply to a tag to influence use of that tag."
tag_rule.type = :json
tag_rule.this_is_json!
tag_rule.parameter name: 'text' do |p|
p.description = 'The word or phrase to which the rule should apply.'
p.type = :string
end
tag_rule.parameter name: 'language' do |p|
p.description = 'The language of the word you\'ve entered.'
p.type = :string
end
tag_rule.parameter name: 'influence' do |p|
p.description = 'A number from -1 to 1, it indicates the "strength" of the rule.'
p.type = :float
end
tag_rule.parameter name: 'tag_rule_mode' do |p|
p.description = 'The tag rule mode used. Options are; word_present, word_absent, word_skipped or word_capped'
p.type = :string
end
tag_rule.example = '
{
"text":"ruby",
"influence":0.5,
"language":"en",
"tag_rule_mode":"word_present"
}'
end
# TagSet JSON POST form
json_tag_set = define_object(name: 'Tag set: create / update input') do |tag_set|
tag_set.description = "A collection of thematically consistent tags"
tag_set.type = :json
tag_set.required = true
tag_set.this_is_json!
tag_set.parameter name: 'name' do |p|
p.description = 'The name of your tag set; we advise to make it short but meaningful; must be unique'
p.type = :string
end
tag_set.example = '
{
"name":"Big Data"
}'
end
json_tag_set_show = define_object(name: 'Tag set: show output') do |tag_set|
tag_set.description = "A collection of thematically consistent tags"
tag_set.parameter name: 'id' do |p|
p.description = 'A unique numeric id, generated by Ingenia'
p.type = :numeric
end
tag_set.parameter name: 'name' do |p|
p.description = 'The name of your tag set'
p.type = :string
end
tag_set.parameter name: 'created_at' do |p|
p.description = 'When this tag set was created'
p.type = :date_time
p.example = '2013-12-16T11:24:52+00:00'
end
#We should probably not show this to the user
tag_set.parameter name: 'updated_at' do |p|
p.description = 'When this tag set was last updated'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
tag_set.example = '
{
"id":178751,
"name":"Big Data",
"created_at":"2014-03-12T12:17:33Z",
"updated_at":"2014-03-12T12:17:33Z"
}'
end
# Classifications
json_classify = define_object(name: 'Classifications') do |classify|
classify.example = '
{
"api_version": "2.0",
"status": "okay",
"data": {
"classification_status": "complete",
"results": {
"Software": {
"id": 6,
"tags": [
{
"id": 31,
"name": "php",
"score": 0.655
},
{
"id": 90,
"name": "php-session",
"score": 0.315
},
{
"id": 158,
"name": "pass-by-reference",
"score": 0.262
},
{
"id": 160,
"name": "debugging",
"score": 0.24
},
{
"id": 159,
"name": "pass-by-value",
"score": 0.198
},
{
"id": 63,
"name": "apache",
"score": 0.132
}
]
}
}
}
}
'
end
define_api(name: 'Ingenia API', description: DESCRIPTION) do |api|
api.endpoint = 'api.ingeniapi.com/v2/'
api.version = '2.0'
api.format = 'json'
api.object json_basic_response
api.object json_item
api.object json_item_show
api.object json_bundle
api.object json_bundle_show
api.object json_tag
api.object json_tag_show
api.object json_tag_set
api.object json_tag_set_show
api.object json_similarity_response
api.resource name: 'Classifications' do |r|
r.description = ""
r.request name: 'Classify' do |req|
req.description = ''
req.call_type = :post
req.path = '/classify'
req.parameter name: 'text' do |p|
p.description = 'The text you want Ingenia to classify [1]'
p.type = :string
p.example = 'A comparative study of European secondary education systems illustrated issues related to their budgetary sustainability...'
end
req.parameter name: 'url' do |p|
p.description = 'The source URL from which to extract the text to be classified; Ingenia will extract the most relevant text [1]'
p.type = :string
p.example = 'https://www.example.com'
end
req.parameter name: 'file' do |p|
p.description = 'File to be used as text source. Sent as multipart upload. Accepted file extensions are: Text (txt), Postscript Document Format (pdf) and Microsoft Office Documents (doc, docx, xlsx, ppt, pptx). [1]'
p.type = :multipart
p.example = 'document.pdf'
end
req.parameter name: 'bundle_id' do |p|
p.description = 'ID of the bundle to which the item belongs'
p.type = :integer
p.default = '[user\'s first bundle]'
end
req.parameter name: 'min_tags' do |p|
p.description = 'Return at least these many tags'
p.type = :integer
p.default = 0
end
req.parameter name: 'max_tags' do |p|
p.description = 'Return at most these many tags'
p.type = :integer
p.default = 6
end
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/classify?text=A%20comparative%20study%20of%20European%20secondary%20education%20systems%20illustrated%20issues%20related%20to%20their%20budgetary%20sustainability&api_key=$api_key'
Response:
{
"classification_status": "complete",
"text": "A comparative study of European secondary education systems illustrated issues related to their budgetary sustainability",
"results": {
"Themes": {
"tags": [
{
"machine_score": -0.15,
"name": "Ed-tech",
"id": 174857,
"rule_score": 0.21,
"score": 0.06
}
],
"id": "1627"
}
}
}
EOF
req.footnote = <<-FN
<p>[1] You can input content as one of these fields: text, a URL, a file. Formats
supported for files include txt, html, pdf and all MS Office formats. If you send a file, it will extract the text
from it.</p>
<p>The text and the URL are input as part of the JSON component. The file
is sent as a multipart encoded https field.</p>
FN
end
end
##
# Personalisation
#
api.resource name: 'Personalisation' do |r|
r.description = ""
# r.request name: 'Similar to' do |req|
# req.description = ''
# req.call_type = :get
# req.path = '/items/:id/similar_to'
#
# req.parameter name: 'id' do |p|
# p.description = 'ID of item for which we want other similar items'
# p.type = :string
# p.required = true
# end
#
# req.parameter limit
#
# req.parameter full_text
#
# req.parameter name: 'mode' do |p|
# p.description = 'Constrain matches to base similarity on just "tag", just "word", or "auto" (first tags, then words)'
# p.type = :string
# p.example = 'mode=tag'
# p.default = 'auto'
# end
#
# req.parameter name: 'metadata_filters' do |p|
# p.description = 'Instruct ingenia to only consider knowledge items which match these criteria'
# p.type = :string
# p.example = 'metadata_filters[author]=Joe%20Bloggs'
# end
#
# req.parameter name: 'item_filters' do |p|
# p.description = 'Instruct ingenia to only consider knowledge items which were created within specific dates. Dates are inclusive.'
# p.type = :string
# p.example = 'item_filters[from]=2014-12-25&item_filters[to]=2014-12-30'
# end
#
# req.response = json_similarity_response
# end
r.request name: 'Similar to' do |req|
req.description = ''
req.call_type = :get
req.path = '/items/:id/similar_to'
req.parameter name: 'id' do |p|
p.description = 'ID of item to get similar items to'
p.type = :string
p.required = true
end
req.parameter name: 'bundle_id' do |p|
p.description = 'Tell ingenia which bundle this item is in. If this parameter is omitted, ingenia will only look for the item in the default bundle'
p.type = :integer
p.example = '77'
end
req.parameter name: 'bundle_ids' do |p|
p.description = 'Restrict your search to one or more bundles. If this parameter is omitted, all bundles will be scanned'
p.type = :array
p.example = '1,4,77'
end
req.parameter name: 'limit' do |p|
p.description = 'The number of items to return, the maximum is 100.'
p.type = :integer
p.example = '15'
end
req.parameter full_text
req.parameter name: 'mode' do |p|
p.description = 'Constrain matches to base similarity on just "tag", just "word", or "auto" (first tags, then words)'
p.type = :string
p.example = 'mode=tag'
p.default = 'auto'
end
req.parameter name: 'metadata_filters' do |p|
p.description = 'Instruct ingenia to only consider knowledge items which match these criteria'
p.type = :string
p.example = 'metadata_filters[author]=Joe%20Bloggs'
end
req.parameter name: 'item_filters' do |p|
p.description = 'Instruct ingenia to only consider knowledge items which were created within specific dates. Dates are inclusive.'
p.type = :string
p.example = 'item_filters[from]=2014-12-25&item_filters[to]=2014-12-30'
end
req.example = <<-EOF
curl -X GET 'https://api.ingeniapi.com/v2/items/ID423455-12-1432321250/similar_to?limit=3&api_key=$api_key'
Response:
[
{
"item": {
"id": "ID1959443-12-1458267383",
"text": "\n So it’s been a little over a year since GitHub fired me.\nI initially made a vague tweet about leaving the company, and then a few weeks later I wrot..."
},
"mode": "word",
"similarity": 0.194
},
{
"item": {
"id": "ID1834322-12-1455638255",
"text": " \n I worked there. It was literally the worst experience of my career - and I have worked at all of the hardest charging blue chips and two successfu..."
},
"mode": "word",
"similarity": 0.193
},
{
"item": {
"id": "ID1847748-12-1455841393",
"text": "Table of Contents (Show)Table of Contents (Hide)\n In This Issue of Venture Weekly:\n Top Story \nWhy Category Leaders Win, By Ablorde Ashigbi\n Per..."
},
"mode": "word",
"similarity": 0.19
}
]
EOF
req.response = json_similarity_response
end
r.request name: 'Similar to text' do |req|
req.description = ''
req.call_type = :post
req.path = '/similar_to_text'
req.parameter name: 'text' do |p|
p.description = 'Text of item for which we want other similar items'
p.type = :string
p.required = true
end
req.parameter name: 'bundle_id' do |p|
p.description = 'The bundle this item would most likely be found in. If this parameter is omitted, ingenia assumes the first bundle you created.'
p.type = :integer
p.example = '77'
end
req.parameter name: 'bundle_ids' do |p|
p.description = 'Find similar items in one or more bundles. If this parameter is omitted, ingenia find items from any of your bundles.'
p.type = :array
p.example = '1,4,77'
end
req.parameter name: 'limit' do |p|
p.description = 'The number of items to return, the maximum is 100.'
p.type = :integer
p.example = '15'
end
req.parameter full_text
req.parameter name: 'mode' do |p|
p.description = 'Constrain matches to base similarity on just "tag", just "word", or "auto" (first tags, then words)'
p.type = :string
p.example = 'mode=tag'
p.default = 'auto'
end
req.parameter name: 'metadata_filters' do |p|
p.description = 'Instruct ingenia to only consider knowledge items which match these criteria'
p.type = :string
p.example = 'metadata_filters[author]=Joe%20Bloggs'
end
req.parameter name: 'item_filters' do |p|
p.description = 'Instruct ingenia to only consider knowledge items which were created within specific dates. Dates are inclusive.'
p.type = :string
p.example = 'item_filters[from]=2014-12-25&item_filters[to]=2014-12-30'
end
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/similar_to_text?text=technology%latest&limit=3&api_key=$api_key'
Response:
[
{
"item": {
"id": "ID218266-10-1425298759",
"text": "Clarus Financial Technology | Esma\n+447771824036"
},
"mode": "word",
"similarity": 0.966
},
{
"item": {
"id": "CyberVally",
"text": "Technology blog group. blogging about latest technology related news."
},
"mode": "word",
"similarity": 0.87
},
{
"item": {
"id": "TechoTrack",
"text": "This is a technology blog. We provide latest updates on gadgets and technology."
},
"mode": "word",
"similarity": 0.869
}
]
EOF
req.response = json_similarity_response
end
r.request name: 'Similar to tags' do |req|
req.description = ''
req.call_type = :get
req.path = '/similar_to_tags'
req.parameter name: 'tag_ids' do |p|
p.description = 'JSON encoded array of tag IDs for which we want relevant items'
p.type = :array
p.example = '[ 45, 787, 23 ]'
p.required = true
end
req.parameter name: 'bundle_ids' do |p|
p.description = 'Find similar items in one or more bundles. If this parameter is omitted, ingenia will attempt to infer the bundles from the tags'
p.type = :array
p.example = '1,4,77'
end
req.parameter name: 'limit' do |p|
p.description = 'The number of items to return, the maximum is 100.'
p.type = :integer
p.example = '15'
end
req.parameter full_text
req.parameter name: 'metadata_filters' do |p|
p.description = 'Instruct ingenia to only consider knowledge items which match these criteria'
p.type = :string
p.example = 'metadata_filters[author]=Joe%20Bloggs'
end
req.parameter name: 'item_filters' do |p|
p.description = 'Instruct ingenia to only consider knowledge items which were created within specific dates. Dates are inclusive.'
p.type = :string
p.example = 'item_filters[from]=2014-12-25&item_filters[to]=2014-12-30'
end
req.example = <<-EOF
curl -X GET 'http://api.ingeniapi.com/v2/similar_to_tags?tag_ids=%5B189454%2C189475%5D&limit=3&api_key=$api_key'
Response:
[
{
"item": {
"id": "ID1959443-12-1458267383",
"text": "\n So it’s been a little over a year since GitHub fired me.\nI initially made a vague tweet about leaving the company, and then a few weeks later I wrot..."
},
"mode": "word",
"similarity": 0.194
},
{
"item": {
"id": "ID1834322-12-1455638255",
"text": " \n I worked there. It was literally the worst experience of my career - and I have worked at all of the hardest charging blue chips and two successfu..."
},
"mode": "word",
"similarity": 0.193
},
{
"item": {
"id": "ID1847748-12-1455841393",
"text": "Table of Contents (Show)Table of Contents (Hide)\n In This Issue of Venture Weekly:\n Top Story \nWhy Category Leaders Win, By Ablorde Ashigbi\n Per..."
},
"mode": "word",
"similarity": 0.19
}
]
EOF
req.response = json_similarity_response
end
end
##
# Summarization
#
api.resource name: 'Summarisation' do |r|
r.description = ""
r.request name: 'Summarise' do |req|
req.description = '<code class="get_post">GET</code> is also supported'
req.call_type = :post
req.path = '/summarise'
req.parameter name: 'text' do |p|
p.description = 'Text to summarise: the key sentences will be extracted [1]'
p.type = :string
end
req.parameter name: 'url' do |p|
p.description = 'URL of article to summarise: the key sentences will be extracted [1]'
p.type = :string
end
req.parameter name: 'id' do |p|
p.description = 'ID of the item to be summarised.'
p.type = :string
end
req.parameter name: 'include_tags' do |p|
p.description = 'If true the resulting sentences will be organised by each tag associated to the text, if false they are returned as a list'
p.type = :boolean
p.default = true
end
req.parameter name: 'order_by_position' do |p|
p.description = 'If true, the results will be ordered as they appear in the text, if false, they will be ordered by the score of the sentence'
p.type = :boolean
p.default = 'false'
end
req.parameter name: 'max_sentences' do |p|
p.description = 'Maximum number of sentences to return'
p.type = :integer
p.default = 2
end
req.footnote = <<-EOF
<p>[1] You must input content as either text or a URL. </p>
EOF
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/summarise?url=http://techcrunch.com/2016/05/11/charged/&api_key=$api_key'
Response:
{
"results": {
"Relevance": {
"tags": [
{
"machine_score": 0.11,
"name": "Relevance",
"id": 174842,
"rule_score": 0.31,
"score": 0.42,
"sentences": [
{
"text": "Venture capitalists in some sectors are increasingly eager to fund serious scientific innovations, they can be much tougher to do due diligence on than simple software that can be assessed based on immediate market traction.",
"score": 0.055,
"position": 4812
},
{
"text": " Otherwise, it could find it difficult to raise additional funding, hire or retain talent, and avoid a negative press spiral.",
"score": 0.043,
"position": 4686
}
]
}
],
"id": "1625"
}
}
}
EOF
end
end
api.resource name: 'Keywords' do |r|
r.description = ""
r.request name: 'Show' do |req|
req.description = 'Returns a list of keywords for a given item'
req.call_type = :get
req.path = '/keywords/:item_id'
req.parameter name: 'item_id' do |p|
p.description = 'ID of the item to show keyfords for.'
p.type = :integer
p.required = :true
end
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/keywords/457?api_key=$api_key'
Response:
[
{
"text": "chronograph",
"occurrences": 1,
"score": 254
},
{
"text": "measure",
"occurrences": 3,
"score": 122
},
{
"text": "time",
"occurrences": 8,
"score": 12
}
]
EOF
end
end
##
# Items
#
api.resource name: 'Items' do |r|
r.description = "Blocks of textual content, typically self-contained and homogeneous"
r.request name: 'Index' do |req|
req.description = 'Returns a list of all your items'
req.call_type = :get
req.path = '/items'
#req.parameter api_key
req.parameter limit
req.parameter full_text
req.parameter offset
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/items?api_key=$api_key'
Response:
[
{
"bundle_id": 778,
"bundle_name": "Here we go again",
"concordance": null,
"created_at": "2016-05-10T15:35:59Z",
"id": "61265a8b2e56ff9693753fd044630ed5",
"item_state": "processed",
"language": "en",
"last_classified_at": "2016-05-10T15:38:47Z",
"membership_degree": null,
"updated_at": "2016-05-10T15:38:47Z",
"tag_sets": [
],
"text": "Some inline text",
"metadata": [
null
]
},
{
"bundle_id": 778,
"bundle_name": "Here we go again",
"concordance": null,
"created_at": "2016-05-10T16:03:59Z",
"id": "3fdb62127e7a839e3f4e0ab6de7cd869",
"item_state": "processed",
"language": "en",
"last_classified_at": "2016-05-10T16:04:00Z",
"membership_degree": null,
"updated_at": "2016-05-10T16:04:01Z",
"tag_sets": [
],
"text": "Smartwatch cheats force Thai students back to exam halls - BBC News\\nSome 3,000 students in Thailand must retake university entrance exams after a cheating scam involving cameras and smartwatches was uncovered.The sophisticated scam happened at Rangsit University in Bangkok.The ...",
"metadata": [
null,
{
"name": "url-fetched",
"type": "date",
"content": "2016-05-10 16:03:59"
},
{
"name": "url",
"type": "url",
"content": "http://www.bbc.co.uk/news/world-asia-36253769"
}
]
}
]
EOF
req.response = json_item_show
end
r.request name: 'Show' do |req|
req.description = 'Returns a single item'
req.call_type = :get
req.path = '/items/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the item you want to show'
p.type = :string
p.required = true
end
#req.parameter api_key
req.parameter full_text
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/items/61265a8b2e56ff9693753fd044630ed5?api_key=$api_key'
Response:
{
"bundle_id": 778,
"bundle_name": "Tech Startups",
"concordance": null,
"created_at": "2016-05-10T15:35:59Z",
"id": "61265a8b2e56ff9693753fd044630ed5",
"item_state": "processed",
"language": "en",
"last_classified_at": "2016-05-10T15:38:47Z",
"membership_degree": null,
"updated_at": "2016-05-10T15:38:47Z",
"tag_sets": [
],
"text": "Some inline text",
"metadata": [
null
]
}
EOF
req.response = json_item_show
end
r.request name: 'Create' do |req|
req.description = 'Creates a new item'
req.call_type = :post
req.path = '/items'
req.parameter json_item
req.parameter name: 'file' do |p|
p.description = 'File to be used as text source. Sent as multipart upload. Accepted file extensions are: Text (txt), Postscript Document Format (pdf) and Microsoft Office Documents (doc, docx, xlsx, ppt, pptx). [1]'
p.type = :multipart
end
req.parameter name: 'update_existing' do |p|
p.description = 'Choice of what to do if the item sent via a create call already exists on Ingenia, as determined by its item ID. If this field is true, the tags supplied will overwrite those on the existing item. If false, no data is modified and a response is returned with a 409 code (Conflict) together with the existing item as JSON.'
p.default = true
p.type = :boolean
end
req.parameter name: 'classify' do |p|
p.description = 'If true, the response will also include a classification'
p.default = false
p.type = :boolean
end
req.footnote =<<-FN
<p>[1] You can input content as one of these fields: text, a URL, a file. Formats
supported for files include txt, html, pdf and all MS Office formats. If you send a file, it will extract the text
from it.</p>
<p>The text and the URL are input as part of the JSON component. The file
is sent as a multipart encoded https field.</p>
FN
req.example = <<-EOF
# Simply post item's text
curl -X POST \\
-F'json={ "text" : "Some inline text" }' \\
'https://api.ingeniapi.com/v2/items?api_key=$api_key&classify=true'
# Create an item with some text and assign a tag ('foo') to it with a score of 0.2.
curl -X POST \\
-F'json={ "text" : "Some inline text" , "tags" : { "foo" : 0.2 } }' \\
'https://api.ingeniapi.com/v2/items?api_key=$api_key&classify=true'
# Create an item with some text, create a new tag set ('my tag set') and add
# a tag ('foo') with a score of 0.2 to that tag set..
curl -X POST \\
-F'json={ "text" : "Some inline text" , "tag_sets" : { "my tag set" : { "foo" : 0.2 } } }' \\
'https://api.ingeniapi.com/v2/items?api_key=$api_key&classify=true'
# Create an item with the tag ('foo')
curl -X POST \\
-F'json={ "text" : "Some inline text" , "tags" : [ "foo"] }' \\
'https://api.ingeniapi.com/v2/items=$api_key&classify=true'
# Post url to retrieve content from and create an item with that content
curl -X POST \\
-F'json={ "url" : "https://www.zdziarski.com/blog/?p=3875" }' \\
'https://api.ingeniapi.com/v2/items?api_key=$api_key'
# Post a file using multipart/form-data upload and create an item with that content
curl -X POST \\
-F'json={}' \\
-F'file=@article.txt' \\
'https://api.ingeniapi.com/v2/items?api_key=$api_key&classify=true&update_existing=true'
EOF
req.response = json_item
end
r.request name: 'Update' do |req|
req.description = 'Update an existing item'
req.call_type = :put
req.path = '/items/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the item you want to update'
p.type = :string
p.required = true
end
req.parameter json_item
req.parameter name: 'file' do |p|
p.description = 'File to be used as text source. Sent as multipart upload. Accepted file types are: Text (txt), Postscript Document Format (pdf), Microsoft Office Documents (doc, docx, xls, xlsx, ppt, pptx). [1]'
p.type = :multipart
end
req.footnote =<<-FN
<p>[1] You can input content as ONE of: text, a URL, a file (formats
supported include txt, html, pdf, all the MS Office formats). If you
send a URL, Ingenia will extract the most meaningful text from it,
e.g., ignoring links. If you send a file, it will extract the text
from it.</p>
<p>The text and the URL are input as part of the JSON component. The file
is sent as a multipart encoded https field.</p>
FN
req.example = <<-EOF
curl -X PUT \\
-F'json={ "text" : "Some updated text" , "tags" : [ "foo"] }' \\
'https://api.ingeniapi.com/v2/items/61265a8b2e56ff9693753fd044630ed5?api_key=$api_key
Response:
{
"bundle_id": 778,
"created_at": "2016-05-10T15:35:59Z",
"id": "61265a8b2e56ff9693753fd044630ed5",
"last_classified_at": "2016-05-10T16:54:56Z",
"updated_at": "2016-05-10T16:54:57Z",
"text": "Some updated text",
"tag_sets": [
{
"Technologia": {
"id": 2860,
"tags": [
{
"id": 189475,
"name": "foo",
"user_selected": "t",
"user_assigned": true,
"score": "0.0",
"machine_score": "0",
"rule_score": null,
"user_assigned_score": "0"
}
]
}
}
]
}
EOF
req.response = json_item
end
r.request name: 'Delete' do |req|
req.description = 'Delete an existing item'
req.call_type = :delete
req.path = '/items/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the item you want to delete'
p.type = :string
p.required = true
end
req.example = <<-EOF
curl -X DELETE 'https://api.ingeniapi.com/v2/items/61265a8b2e56ff9693753fd044630ed5?api_key=$api_key'
Response:
{
"61265a8b2e56ff9693753fd044630ed5": "destroyed",
"bundle_id": 778
}
EOF
end
end
##
# Bundles
#
api.resource name: 'Bundles' do |r|
r.description = "Groups of thematically consistent items"
r.request name: 'Index' do |req|
req.description = 'Returns a list of all your bundles'
req.call_type = :get
req.path = '/bundles'
req.parameter limit
req.parameter offset
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/bundles?api_key=$api_key'
Response:
'{
[
{
"id":755,
"name":"New Bundle",
"tag_sets" : [
{ "name" : "technology", "id": 14562 },
{ "name" : "business", "id": 666 }
],
"created_at" : "2016-04-06T09:00:44Z",
"updated_at":"2016-04-06T09:00:44Z"
},
{
"id" : 756,
"name" : "Another Bundle",
"tag_sets" : [
{ "name" : "technology", "id": 14562 }
],
"created_at" : "2016-04-07T11:44:26Z",
"updated_at":"2016-04-07T11:44:26Z"
}
]
}'
EOF
req.response = json_bundle_show
end
r.request name: 'Show' do |req|
req.description = 'Returns a single bundle'
req.call_type = :get
req.path = '/bundles/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the bundle you want to show'
p.type = :integer
p.required = true
end
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/bundles/47858?api_key=$api_key'
Response:
'{
"id" : 47858,
"name" : "Tech Startups",
"tag_sets" : [
{ "name" : "technology", "id": 14562 },
{ "name" : "business", "id": 666 }
],
"created_at" :"2014-03-13T15:36:51Z",
"updated_at" :"2014-03-13T15:36:51Z",
}'
EOF
req.response = json_bundle_show
end
r.request name: 'Find_by_name' do |req|
req.description = 'Looks for a bundle that matches exactly text input'
req.call_type = :get
req.path = '/bundles/find_by_name'
#req.parameter api_key
req.parameter name: 'text' do |p|
p.description = 'Text of the bundle to look for'
p.type = :string
p.required = true
end
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/bundles/find_by_name?name=Tech%20Startups&api_key=$api_key'
Response:
'{
"id" : 47858,
"name" : "Tech Startups",
"tag_sets" : [
{ "name" : "technology", "id": 14562 },
{ "name" : "business", "id": 666 }
],
"created_at" :"2014-03-13T15:36:51Z",
"updated_at" :"2014-03-13T15:36:51Z",
}'
EOF
req.response = json_bundle_show
end
r.request name: 'Create' do |req|
req.description = 'Creates a new bundle'
req.call_type = :post
req.path = '/bundles'
req.parameter json_bundle
req.example = <<-EOF
curl -X POST \\
-F'json={ "name" : "New Bundle", "tag_set_ids" : [2820, 2819] }' \\
'https://api.ingeniapi.com/v2/bundles?api_key=$api_key'
Response:
'{
"id" : 47858,
"name" : "New Bundle",
"tag_sets" : [
{
"id" : 2820,
"name" : "Tag Set One"
},
{
"id : 2819,
"name : "Tag Set Two"
}
],
"created_at" :"2014-03-13T15:36:51Z",
"updated_at" :"2014-03-13T15:36:51Z"
}'
EOF
req.response = json_bundle
end
r.request name: 'Update' do |req|
req.description = 'Update an existing bundle'
req.call_type = :put
req.path = '/bundles/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the bundle you want to update'
p.type = :integer
p.required = true
end
req.parameter json_bundle
req.example = <<-EOF
curl -X PUT \\
-F'json={ "name" : "New Bundle Updated" }' \\
'https://api.ingeniapi.com/v2/bundles/47858?api_key=$api_key'
Response:
'{
"id" : 47858,
"name" : "New Bundle Updated",
"tag_sets" : [
{
"id" : 2820,
"name" : "Tag Set One"
},
{
"id : 2819,
"name : "Tag Set Two"
}
],
"created_at" :"2016-04-06T09:00:44Z",
"updated_at" :"2016-04-06T09:00:44Z",
}'
EOF
req.response = json_bundle
end
r.request name: 'Delete' do |req|
req.description = 'Delete an existing bundle'
req.call_type = :delete
req.path = '/bundles/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the bundle you want to delete'
p.type = :integer
p.required = true
req.example = <<-EOF
curl -X DELETE \\
'https://api.ingeniapi.com/v2/bundles/47858?api_key=$api_key'
Response:
'{
"47858" : "destroyed"
}'
EOF
end
end
end
##
# Tags
#
api.resource name: 'Tags' do |r|
r.description = "Tags are meaningful words or expressions that you want to associate to some of your items"
r.request name: 'Index' do |req|
req.description = 'List all your tags'
req.call_type = :get
req.path = '/tags'
req.parameter limit
req.parameter offset
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/tags?api_key=$api_key'
Response:
[
{
"confidence": 0.0,
"created_at": "2016-05-04T16:12:43Z",
"current_state": "not_enough_items_to_learn",
"description": "a term for data sets that are so large or complex that traditional data processing applications are inadequate",
"id": 189453,
"name": "Big Data",
"tag_set_id": 2858,
"updated_at": "2016-05-04T16:12:43Z"
},
{
"confidence": 0.0,
"created_at": "2016-05-04T16:08:05Z",
"current_state": "not_enough_items_to_learn",
"description": "the process of deriving high-quality information from text",
"id": 189452,
"name": "Text Analytics",
"tag_set_id": 2858,
"updated_at": "2016-05-04T16:08:05Z"
}
]
EOF
req.response = json_tag_show
end
r.request name: 'Show' do |req|
req.description = 'View a single tag'
req.call_type = :get
req.path = '/tags/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag you want to show'
p.type = :integer
p.required = true
end
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/tags/189453?api_key=$api_key'
Response:
{
"confidence": 0.0,
"created_at": "2016-05-04T16:12:43Z",
"current_state": "not_enough_items_to_learn",
"description": "",
"id": 189453,
"name": "New Tag",
"tag_set_id": 2858,
"updated_at": "2016-05-04T16:12:43Z"
}
EOF
req.response = json_tag_show
end
r.request name: 'Find_by_name' do |req|
req.description = 'Looks for a tag that matches exactly text input'
req.call_type = :get
req.path = '/tags/find_by_name'
req.parameter name: 'text' do |p|
p.description = 'Text of the tag to look for'
p.type = :string
p.required = true
end
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/tags/find_by_name?name=New%20Tag&api_key=$api_key'
Response:
{
"confidence": 0.0,
"created_at": "2016-05-04T16:12:43Z",
"current_state": "not_enough_items_to_learn",
"description": "",
"id": 189453,
"name": "New Tag",
"tag_set_id": 2858,
"updated_at": "2016-05-04T16:12:43Z"
}
EOF
req.response = json_tag_show
end
r.request name: 'Create' do |req|
req.description = 'Create a new tag'
req.call_type = :post
req.path = '/tags'
req.parameter json_tag
req.example = <<-EOF
curl -X POST \\
-F'json={ "tag_set_id" : 2858, "name" : "New Tag" }' \\
'https://api.ingeniapi.com/v2/tags?api_key=$api_key'
Response:
{
"confidence": 0.0,
"created_at": "2016-05-04T17:05:18Z",
"current_state": "unprocessed",
"description": "",
"id": 189455,
"name": "New Tag",
"tag_set_id": 2858,
"updated_at": "2016-05-04T17:05:18Z"
}
EOF
req.response = json_tag
end
r.request name: 'Update' do |req|
req.description = 'Update an existing tag'
req.call_type = :put
req.path = '/tags/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag you want to update'
p.type = :integer
p.required = true
end
req.parameter json_tag
req.example = <<-EOF
curl -X PUT \\
-F'json={ "name" : "New Tag Updated" }' \\
'https://api.ingeniapi.com/v2/tags/189453?api_key=$api_key'
Response:
{
"confidence": 0.0,
"created_at": "2016-05-04T16:12:43Z",
"current_state": "unprocessed",
"description": "",
"id": 189453,
"name": "New Tag Updated",
"tag_set_id": 2858
}
EOF
req.response = json_tag
end
r.request name: 'Merge' do |req|
req.description = 'Merge two or more existing tags'
req.call_type = :post
req.path = '/tags/:id/merge'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag into which you want to merge other tags; the resulting tag will have this name'
p.type = :integer
p.required = true
end
req.parameter name: 'tag_ids' do |p|
p.description = 'A JSON encoded array of tag IDs that will be merged into the main tag'
p.type = :array
p.example = '[ 23, 43, 2113 ]'
p.required = true
end
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/tags/189454/merge?tag_ids=%5B189452%2C189453%5D&api_key=$api_key'
/*(Where:
'%5B' = '['
'%2C' = ','
'%5D' = ']'
for constructing array of IDs in url params)*/
Response:
{
"189454":"merged"
}
EOF
end
r.request name: 'Delete' do |req|
req.description = 'Delete an existing tag'
req.call_type = :delete
req.path = '/tags/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag you want to delete'
p.type = :integer
p.required = true
end
req.example = <<-EOF
curl -X DELETE 'https://api.ingeniapi.com/v2/tags/189454?api_key=$api_key'
Response:
{
"189455" : "destroyed"
}
EOF
end
end
##
# Tag rules
#
api.resource name: 'Tag rules' do |r|
r.description = "Tag rules are rules that you want to associate with a tag to influence the tag choice"
r.request name: 'Index' do |req|
req.description = 'List all your tag rules for a tag'
req.call_type = :get
req.path = '/tag/:tag_id/tag_rules'
req.response = json_tag_rules_show
req.example = <<-EOF
curl https://api.ingeniapi.com/v2/tag/5/tag_rules?api_key=$api_key
EOF
req.parameter name: 'tag_id' do |p|
p.description = 'The ID of the tag to find its associated tag rules'
p.type = :integer
p.required = true
end
end
r.request name: 'Show' do |req|
req.description = 'View a single tag rule'
req.call_type = :get
req.path = '/tag/:tag_id/tag_rules/:id'
req.response = json_tag_rule_show
req.example = <<-EOF
curl https://api.ingeniapi.com/v2/tag/5/tag_rules/6?api_key=$api_key
EOF
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag rule'
p.type = :integer
p.required = true
end
req.parameter name: 'tag_id' do |p|
p.description = 'The ID of the tag'
p.type = :integer
p.required = true
end
end
r.request name: 'Create' do |req|
req.description = 'Create a new tag rule'
req.call_type = :post
req.path = '/tag/:tag_id/tag_rules'
req.parameter json_tag_rule_create
req.example = <<-EOF
curl -X POST \\
-F'json={ "text": "tag_text", "influence" : 0.3, "language": "en", "tag_rule_mode": "word_present" }' \\
https://api.ingeniapi.com/v2/tag/5/tag_rules?api_key=$api_key
EOF
req.parameter name: 'tag_id' do |p|
p.description = 'The ID of the tag'
p.type = :integer
p.required = true
end
end
r.request name: 'Delete' do |req|
req.description = 'Delete an existing tag rule'
req.call_type = :delete
req.path = '/tag/:tag_id/tag_rules/:id'
req.example = <<-EOF
curl -X DELETE \\
https://api.ingeniapi.com/v2/tag/5/tag_rules/6?api_key=$api_key
EOF
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag rule you want to delete'
p.type = :integer
p.required = true
end
req.parameter name: 'tag_id' do |p|
p.description = 'The ID of the tag'
p.type = :integer
p.required = true
end
end
end
##
# Tag Sets
#
api.resource name: 'Tag sets' do |r|
r.description = "Tag sets are thematically consistent groups of tags defined by you, such as, say, world countries, business sectors, product types, companies, concepts, topics, etc"
r.request name: 'Index' do |req|
req.description = 'List all your tag sets'
req.call_type = :get
req.path = '/tag_sets'
req.parameter limit
req.parameter offset
req.example = <<-EOF
# Simple request to fetch all tag sets
curl -s -q 'https://api.ingeniapi.com/v2/tag_sets?api_key=$api_key'
# ...and a bit more advanced example
curl -s -q 'https://api.ingeniapi.com/v2/tag_sets?limit=100&offset=100&bundle_id=42&api_key=$api_key'
Response:
'[
{
"created_at" : "2016-04-06T11:01:18Z",
"id" : 2820,
"name" : "Tag Set One",
"updated_at" : "2016-04-06T11:04:00Z"
},
{
"created_at" : "2016-04-06T09:00:44Z",
"id" : 2819,
"name" : "Tag Set Two",
"updated_at":"2016-04-06T09:00:44Z"
}
]'
EOF
req.response = json_tag_set_show
end
r.request name: 'Show' do |req|
req.description = 'View a single tag set'
req.call_type = :get
req.path = '/tag_sets/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag set you want to show'
p.type = :integer
p.required = true
end
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/tag_sets/2820?api_key=haDJdWeW41iwzEup7n8x'
Response:
'{
"created_at" : "2016-04-07T16:13:52Z",
"id" : 2822,
"name" : "Big Data",
"updated_at" : "2016-04-07T16:13:52Z"
}'
EOF
req.response = json_tag_set_show
end
r.request name: 'Find_by_name' do |req|
req.description = 'Looks for a tag set that matches exactly text input'
req.call_type = :get
req.path = '/tag_sets/find_by_name'
req.parameter name: 'text' do |p|
p.description = 'Text of tag set to look for'
p.type = :string
p.required = true
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/tag_sets/find_by_name?name=Big%20Data&api_key=$api_key'
Response:
'{
"created_at" : "2016-04-07T16:13:52Z",
"id" : 2822,
"name" : "Big Data",
"updated_at" : "2016-04-07T16:13:52Z"
}'
EOF
req.response = json_tag_set_show
end
end
r.request name: 'Create' do |req|
req.description = 'Create a new tag set'
req.call_type = :post
req.path = '/tag_sets'
req.parameter json_tag_set
req.example = <<-EOF
curl -s -X POST \\
-F'json={ "name" : "new tag s" }' \\
'https://api.ingeniapi.com/v2/tag_sets?api_key=$api_key'
Response:
'{
"created_at" : "2016-04-07T16:49:24Z",
"id" : 2823,
"name" : "new tag s",
"updated_at" : "2016-04-07T16:49:24Z"
}'
EOF
req.response = json_tag_set
end
r.request name: 'Update' do |req|
req.description = 'Update an existing tag set'
req.call_type = :put
req.path = '/tag_sets/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag set you want to update'
p.type = :integer
p.required = true
end
req.parameter json_tag_set
req.example = <<-EOF
curl -s -X PUT \\
-F'json={ "name" : "Updated Tag Set Name" }' \\
'https://api.ingeniapi.com/v2/tag_sets/2823?api_key=$api_key'
Response:
'{
"created_at" : "2016-04-07T16:49:24Z",
"id" : 2823,
"name" : "Updated Tag Set Name",
"updated_at" : "2016-04-07T16:58:11Z"
}'
EOF
req.response = json_tag_set
end
r.request name: 'Merge' do |req|
req.description = 'Merge two or more existing tag sets'
req.call_type = :post
req.path = '/tag_sets/:id/merge'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag set into which you want to merge the other tag sets; the resulting tag set will have this name'
p.type = :integer
p.required = true
end
req.parameter name: 'tag_set_ids' do |p|
p.description = 'JSON encoded array of tag set IDs to merge into main tag set'
p.type = :array
p.example = '[ 12, 34, 56 ]'
p.required = true
end
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/tag_sets/2824/merge?tag_set_ids=%5B2833%2C2832%5D&api_key=$api_key'
/*(Where:
'%5B' = '['
'%2C' = ','
'%5D' = ']'
for constructing array of IDs in url params)*/
Response:
'{
{"tag_set_id" : 2824}
}'
EOF
end
r.request name: 'Delete' do |req|
req.description = 'Delete an existing tag set'
req.call_type = :delete
req.path = '/tag_sets/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag set you want to delete.'
p.type = :integer
p.required = true
end
req.example = <<-EOF
curl -X DELETE 'https://api.ingeniapi.com/v2/tag_sets/2824?api_key=$api_key'
Response:
'{
"2824" : "destroyed"
}'
EOF
end
end
api.resource name: 'Text extraction' do |r|
r.description = "Returns stripped text for a given url"
r.request name: 'Get stripped text' do |req|
req.description = 'Returns stripped text for a given url'
req.call_type = :post
req.parameter uri
req.example = <<-EOF
# Request to get stripped content for url
curl -X POST -H 'Content-Type: application/json' -d '{"url":{"uri":"https://techcrunch.com/2016/08/02/instagram-stories/"}}' http://content-service.ingeniapi.com/urls
Response:
'{
"url": {
"uri": "https://techcrunch.com/2016/08/02/instagram-stories/"
},
"title": "Instagram launches “Stories,” a Snapchatty feature for imperfect sharing",
"content": "People only post the highlights of their life on Instagram, so today the app adds its own version of “Stories” ...'
EOF
end
r.request name: 'Get full html' do |req|
req.description = 'Returns full html for a url'
req.call_type = :get
req.parameter url
req.example = <<-EOF
# Request to get stripped content for url
curl 'https://techcrunch.com/2016/08/02/instagram-stories/'
Response:
<xmp>'<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:og="http://opengraphprotocol.org/schema/" xmlns:fb="http://www.facebook.com/2008/fbml" lang="en">
<head>
<title>Instagram launches “Stories,” a Snapchatty feature for imperfect sharing | TechCrunch</title>
...'</xmp>
EOF
end
end
##
# Administrative Calls
#
api.resource name: 'Administrative calls' do |r|
r.description = ""
r.request name: 'Status' do |req|
req.description = 'The status of your Ingenia account, indicating whether Ingenia has processed all your content; use this to test your API key, see [status call] for details'
req.call_type = :get
req.path = '/status'
req.parameter name: 'total_bundles' do |p|
p.description = 'Number of bundles you have own'
p.type = :integer
end
req.parameter name: 'processed_bundles' do |p|
p.description = 'Number of bundles where all items have been processed'
p.type = :integer
end
req.parameter name: 'total_items' do |p|
p.description = 'Number of items you have created'
p.type = :integer
end
req.parameter name: 'pending_items' do |p|
p.description = 'Number of items Ingenia has not yet processed'
p.type = :integer
end
req.parameter name: 'processed_items' do |p|
p.description = 'Number of items Ingenia has processed'
p.type = :integer
end
req.parameter name: 'total_tag_sets' do |p|
p.description = 'Number of tag sets you own'
p.type = :integer
end
req.parameter name: 'processed_tag_sets' do |p|
p.description = 'Number of tag sets Ingenia has processed'
p.type = :integer
end
req.parameter name: 'pending_tag_sets' do |p|
p.description = 'Number of tag sets ready to process, but which Ingenia has not yet processed'
p.type = :integer
end
req.parameter name: 'untrained_tag_sets' do |p|
p.description = 'Number of tag sets which do not have enough items to process'
p.type = :integer
end
req.parameter name: 'idle_tag_sets' do |p|
p.description = 'Number of tag sets that the user prefers to not be processed by Ingenia'
p.type = :integer
end
req.parameter name: 'total_tags' do |p|
p.description = 'Number of tags you have own'
p.type = :integer
end
req.parameter name: 'processed_tags' do |p|
p.description = 'Number of tags Ingenia has processed'
p.type = :integer
end
req.parameter name: 'pending_tags' do |p|
p.description = 'Number of tags Ingenia has not yet processed'
p.type = :integer
end
req.parameter name: 'untrained_tags' do |p|
p.description = 'Number of tags which are not assigned to items'
p.type = :integer
end
req.parameter name: 'idle_tags' do |p|
p.description = 'Number of tags that the user prefers to not be processed by Ingenia'
p.type = :integer
end
req.parameter name: 'ready_to_classify' do |p|
p.description = 'True if all tags assigned to items have been processed'
p.type = :boolean
end
req.example = <<-EOF
curl -X GET 'https://api.ingeniapi.com/v2/status?api_key=$api_key'
Response:
{
"total_bundles": 17,
"processed_bundles": 1,
"total_items": 2,
"pending_items": 0,
"processed_items": 2,
"total_tag_sets": 2,
"pending_tag_sets": 0,
"processed_tag_sets": 0,
"untrained_tag_sets": 2,
"idle_tag_sets": 0,
"total_tags": 3,
"pending_tags": 0,
"processed_tags": 0,
"untrained_tags": 3,
"idle_tags": 0,
"ready_to_classify": true
}
EOF
end
r.request name: 'Clear_data' do |req|
req.description = 'Delete all the data in your account; useful to restart from zero if the data was polluted'
req.call_type = :post
req.path = '/clear_data'
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/clear_data?api_key=$api_key'
Response:
{}
EOF
end
end
end
@api = api
api.to_json
Add Cluster show
$: << '../../lib'
$LOAD_PATH << '.'
require 'bundler/setup'
require 'doc_smoosher'
extend DocSmoosher::TopLevel
# Shared fields
limit = define_parameter(name: 'limit') do |p|
p.description = 'Return these many results'
p.type = :integer
p.default = 10
end
offset = define_parameter(name: 'offset') do |p|
p.description = 'Offset the results I receive by this amount'
p.type = :integer
p.default = 0
end
api_key = define_parameter(name: 'api_key') do |p|
p.description = 'Use this API key'
p.type = :string
p.required = true
p.example = "hg7JHG6daSgf56FjhgsSa"
end
full_text = define_parameter(name: 'full_text') do |p|
p.description = 'Show the results with all their text, however long'
p.type = :boolean
p.default = false
end
uri = define_parameter(name: 'uri') do |p|
p.description = 'The url of the article you want to extract'
p.type = :string
p.default = false
end
url = define_parameter(name: 'url') do |p|
p.description = 'The url of the html you want to return'
p.type = :string
p.default = false
end
metadata = define_parameter(name: 'metadata') do |m|
m.description = <<-DESC
A list of attributes you can associate to the knowledge item.
Valid types of metadata are date, string, collection and number.</br>
'date': which includes a time for when an event occurred</br>
'string': General purpose content</br>
'collection': One item from defined collection</br>
'number': A numerical value
DESC
m.type = :array
m.example = "[{ name: 'published_date', type: 'date', content: '2012-01-20 00:00:00' }, { name: 'title', type: 'string', content: 'A day to remember', { name: 'author', type: 'collection', content: 'Joe Bloggs' }, { name: 'author', type: 'collection', content: 'John Smith' }]"
end
##
# Introduction
#
DESCRIPTION =<<-DESC
<p>Check out the <a href="/pages/demo"> demo </a> to see Ingenia in action.</p>
<p>Look at the <a href="/faq">FAQ</a> for any questions.</p>
<p> Go through the documentation and choose if you want to use Ingenia by the API or with the Ruby gem. </p>
<p> <a href="/contact"> Contact us </a> to get your API key or if you have any questions.</p>
<p>If you would like to verify your API key or code data path then use the <a href="#call-administrative-calls-status">status</a> call.</p>
<h3 id='api-libraries'>Ruby API library</h3>
<a href="httpss://github.com/ingenia-api/ingenia_ruby">httpss://github.com/ingenia-api/ingenia_ruby</a>
<h3 id='api-rate-limiting'>Rate limiting</h3>
<p>Ingenia by default limits a user to 4 calls per second, for every type of API call. Contact us to have this limit increased or removed if needed. </p>
DESC
json_similarity_response = define_object(name: 'Similarity response') do |sr|
sr.description = "An array of items that are related to an origin item sent via a similarity API call"
sr.parameter name: 'id' do |p|
p.description = 'The ID of the origin item'
p.type = :string
end
sr.parameter name: 'text' do |p|
p.description = 'First 50 characters of the text of each related item'
p.type = :string
end
sr.parameter name: 'mode' do |p|
p.description = 'If \'tag\', it will determine related items on the basis of their tags; if \'word\', it will do so on the basis of the words contained in the item'
p.type = :string
p.default = 'tag'
end
sr.parameter name: 'similarity' do |p|
p.description = 'From 0 to 1, it measures how similar each related item is to the origin item; the response will sort items on descending similarity'
p.type = :float
end
sr.example = '
{
[
{ "item": { "id":12182, "text": "The fall in the rand has given wealthy Russians a new location to search for luxury..." }, "mode": "tag", "similarity": 0.62 },
{ "item": { "id":9293, "text": "Robots tend to do jobs that no one wants to do. I am old enough to remember..." }, "mode": "tag", "similarity": 0.55 },
{ "item": { "id":25333, "text": "The market for RMB credit raised outside China has gone four weeks without a..." }, "mode": "word", "similariy": 0.22 }
]
}'
end
# bundle
json_bundle = define_object(name: 'Bundle: create / update input') do |bundle|
bundle.description = "A collection of items related to each other"
bundle.type = :json
bundle.required = true
bundle.parameter name: 'name' do |p|
p.description = 'The name of your bundle'
p.type = :string
end
bundle.this_is_json!
bundle.parameter name: 'tag_set_ids' do |p|
p.description =<<-DESC
An array of tag set IDs to be applied to this bundle. The tags in these tag sets will be available to the items in the bundle.
If an existing bundle already has tag sets, then these can be removed by omitting the ID in the call.
DESC
p.type = :array
end
bundle.example = '
{
"name":"Tech Startups",
"tag_sets": [
{
"id" : 2820,
"name" : "Tag Set One"
},
{
"id" : 2819,
"name" : "Tag Set Two"
}
]
}'
end
json_bundle_show = define_object(name: 'Bundle: show output') do |bundle|
bundle.description = "A collection of items related to each other"
bundle.parameter name: 'id' do |p|
p.description = 'A unique numeric id generated by Ingenia'
p.default = '[generated]'
p.type = :numeric
end
bundle.parameter name: 'name' do |p|
p.description = 'The name of your bundle'
p.type = :string
end
bundle.parameter name: 'tag_sets' do |ts|
ts.description = 'The tag sets that are currently attached to this bundle. Items within the bundle can use all the tags in these tag sets.'
ts.type = :array
end
bundle.parameter name: 'created_at' do |p|
p.description = 'When this bundle was created'
p.type = :date_time
p.example = '2013-12-16T11:24:52+00:00'
end
#We should probably not show this to the user
bundle.parameter name: 'updated_at' do |p|
p.description = 'When this bundle was last updated'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
bundle.example = '
{
"id":47858,
"name":"Tech Startups",
"tag_sets": [
{ "name": "technology", "id": 14562 },
{ "name": "business", "id": 666 }
],
"created_at":"2014-03-13T15:36:51Z",
"updated_at":"2014-03-13T15:36:51Z",
}'
end
json_basic_response = define_object(name: 'Basic response format') do |brf|
brf.description = "All responses from the API gateway have the following format"
brf.parameter name: 'version' do |p|
p.description = 'The version of the API that is responding'
p.type = :string
p.example = '"2.0"'
end
#data
brf.parameter name: 'data' do |p|
p.description = 'The data payload response from the call'
p.type = :object
end
#status
brf.parameter name: 'status' do |p|
p.description = '"okay" if the call is processed correctly, otherwise it will be "error"'
p.type = :string
end
#message
brf.parameter name: 'message' do |p|
p.description = 'A message describing the nature of the error, returned if an error occurred'
p.type = :string
end
end
# Item JSON POST form
json_item = define_object(name: 'Item: create / update input') do |item|
item.description = "An item is a block of text to which you can associate tags, that belongs to a bundle"
item.type = :json
item.required = true
item.this_is_json!
item.parameter name: 'id' do |p|
p.description = 'An alphanumeric id unique to each bundle. You can use your own, or have Ingenia generate one for you'
p.default = '[generated]'
p.type = :string
p.example = '785uU423aC'
end
item.parameter name: 'text' do |p|
p.description = 'Your item\'s content. [1]'
p.type = :string
end
item.parameter name: 'url' do |p|
p.description = 'Source URL to get text from. Ingenia will extract the most relevant text [1]'
p.type = :string
p.example = 'https://www.example.com'
end
item.parameter name: 'bundle_id' do |p|
p.description = 'ID of the bundle in which to put the item'
p.type = :integer
p.default = '[user\'s first bundle]'
end
item.parameter name: 'tag_sets' do |p|
p.description = "A hash of tag sets, each of which is an array of tags that you consider of the same type [2]"
p.type = :hash
p.example = '{ "topics": [ "startups", "saas", "marketing" ], "geography": [ "United Kingdom", "Italy" ] }'
end
item.parameter name: 'tags' do |p|
p.description = "An array with the name of the tags you wish to assign to this item. If the tag doesn\'t exist, it will be created [2]."
p.type = :array
p.example = <<-EOF
[ "startups", "saas", "marketing" ]'
EOF
end
item.parameter name: 'tags' do |p|
p.description = "As above, but with a user-assigned score. The score should be a number between 0 and 1 that quantifies the strength of the association between the item and the tag (1: highest) [2]."
p.type = :hash
p.example = <<-EOF
{ "startups" : 0.2 , "sass" : 0.7, "marketing" : 1 }
EOF
end
item.parameter name: 'tag_ids' do |p|
p.description = "The Ingenia IDs of the tags you wish to assign to this item [2]"
p.type = :array
p.example = '[ 45, 787, 23 ]'
end
item.example = <<-EXAMPLE
{
text: "High tech startups and their positive power to change for good",
tag_sets: {
"Topics": [ "startups", "technology" ],
"Mood": [ "positive" ]
}
}
EXAMPLE
item.parameter metadata
item.footnote =<<-FN
<p>[1] You can input content as one of these fields: text, a URL, a file. Formats
supported for files include txt, html, pdf and all MS Office formats. If you send a file, it will extract the text
from it.</p>
<p>The text and the URL are input as part of the JSON component. The file
is sent as a multipart encoded https field.</p>
<p>[2] Only specify one of the following: tag_sets, tags or tag_ids </p>
FN
end
# Item JSON get form
json_item_show = define_object(name: 'Item: show output') do |item|
item.parameter name: 'bundle_id' do |p|
p.description = 'The id of the bundle that this item belongs to'
p.type = :numeric
end
item.parameter name: 'bundle_name' do |p|
p.description = 'The name of the bundle that this item belongs to'
p.type = :string
end
item.parameter name: 'concordance' do |p|
p.description = 'The extent to which the user and Ingenia agree on the categorisation of the item. 0 if the tags are different, 1 if they are identical. Use this to identify content that may need to be reviewed'
p.type = :float
end
item.parameter name: 'id' do |p|
p.description = 'A unique alphanumeric id'
p.type = :string
p.example = '785uU423aC'
end
item.parameter name: 'item_state' do |p|
p.description = 'The current state of the item'
p.type = :string
end
item.parameter name: 'language' do |p|
p.description = 'The language of the conent in this item'
p.type = :string
end
item.parameter name: 'text' do |p|
p.description = 'Your item\'s content'
p.type = :string
end
item.parameter name: 'created_at' do |p|
p.description = 'When this item was created'
p.type = :date_time
p.example = '2013-12-16T11:24:52+00:00'
end
#We should probably not show this to the user
item.parameter name: 'updated_at' do |p|
p.description = 'When this item was last updated'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
item.parameter name: 'last_classified_at' do |p|
p.description = 'When this item was last classified by the system; null if it hasn\'t been classified yet'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
item.parameter name: 'tag_sets' do |p|
p.description = 'An array of tag sets associated to the item'
p.type = :array
end
item.parameter name: 'tag_set' do |p|
p.description = 'A hash containing the tag set id and the array of tags associated to the item'
p.type = :hash
end
item.parameter name: 'tag' do |p|
p.description = 'A hash with the details of a tag associated to the item, including its id, name, user assigned score and user_selected'
p.type = :hash
end
item.parameter name: 'score' do |p|
p.description = 'An aggregation of the machine and rule scores, between 0 (lowest) and 1 (highest).'
p.type = :numeric
end
item.parameter name: 'user_selected' do |p|
p.description = 'Deprecated: please use user_assigned value, this will be removed in the next release'
p.type = :string
end
item.parameter name: 'user_assigned' do |p|
p.description = 'true if the tag was assigned to the item by the user, false if it was assigned by Ingenia'
p.type = :boolean
end
item.parameter name: 'user_assigned_score' do |p|
p.description = 'score assigned by the user when tag was created'
p.type = :float
end
item.parameter name: 'machine_score' do |p|
p.description = 'A number which quantifies the strength of the association between an item and a tag, between 0 (lowest) and 1 (highest)'
p.type = :numeric
end
item.parameter name: 'rule_score' do |p|
p.description = 'A number which quantifies the strength of the association between an item and a tag score, between -1 (lowest) and 1 (highest)'
p.type = :numeric
end
item.parameter name: 'membership_degree' do |p|
p.description = 'the degree to which this item is a member of its bundle'
p.type = :float
end
item.parameter name: 'metadata' do |p|
p.description = 'any additional data you associated to this content; it may include dates, values, urls, additional text, etc.'
p.type = :array
end
item.example = '
{
"id":"e19e134d0e79153349ff78a674283e0b",
"last_classified_at":2013-12-16T11:25:07+00:00,
"text":"How to get to scale with a saas startup in the UK? ...",
"tag_sets":
[
{
"topics":
{
"id":156,
"tags":
[
{
"id":4352,
"name":"startups",
"user_selected": "f",
"user_assigned": false,
"score":"0.8",
"machine_score":"0.45",
"rule_score": "0.35",
"user_assigned_score": null
},
{
"id": 7811,
"name": "saas",
"user_selected": "t",
"user_assigned": true,
"score": "0.45",
"machine_score":"0.45",
"rule_score": null,
"user_assigned_score": 0.7
},
{
"id":1327,
"name":"marketing",
"user_selected": "t",
"user_assigned": true,
"score": "0.50",
"machine_score":"0.45",
"rule_score": "0.05",
"user_assigned_score": 0.7
}
]
}
},
{
"geography":
{
"id":622,
"tags":
[
{
"id":3321,
"name":"united kingdom",
"score":"0.37",
"user_selected": "t",
"user_assigned": true
}
]
}
}
]
"created_at":"2013-12-16T11:24:52+00:00",
"updated_at":"2013-12-16T11:24:56+00:00"
}'
end
# Tag JSON POST form
json_tag = define_object(name: 'Tag: create / update input') do |tag|
tag.description = "Something you want to associate to an item, e.g., a concept, topic, tone, sentiment, keyword, person, company, product, etc."
tag.type = :json
tag.this_is_json!
tag.required = true
tag.parameter name: 'name' do |p|
p.description = 'The name of your tag; we advise to make it short but meaningful; unique to each tag set'
p.type = :string
p.required = true
end
tag.parameter name: 'tag_set_id' do |p|
p.description = 'The ID of the tag_set to which this tag belongs'
p.type = :integer
p.required = true
end
tag.parameter name: 'description' do |p|
p.description = "A description of this tag: this is helpful to define in a focused way how the tag should be used"
p.type = :string
end
tag.parameter name: 'disposition' do |p|
p.description = "The disposition of the tag. Float value between 0 and 1, defaults to 0.5. Lower values will tend to privilege precision (we suggest 0.25); higher values will tend to privilege recall (we suggest 0.75). For most uses, the default value will work well.
You will want to privilege precision (with a disposition < 0.5) if you want each tag assignment to be accurate, and are less worried about some items being missed, i.e., you prefer to have false negatives than false positives. If the disposition is 0, no item will be tagged with this tag.
You will want to privilege recall (with a disposition > 0.5) if you want each tag assignment to occur, and are less worried about some items being tagged incorrectly, i.e., you prefer to have false positives than false negatives. If the disposition is 1, all items will be tagged with this tag."
p.type = :float
p.default = 0.5
end
tag.example = '
{
"name":"Text Analytics",
"tag_set_id":37874,
"description":"A set of techniques designed to extract valuable information from textual content",
"disposition": 0.5
}'
end
json_tag_show = define_object(name: 'Tag: show output') do |tag|
tag.description = "Something you want to associate to an item, e.g., a concept, topic, tone, sentiment, keyword, person, company, product, etc."
tag.parameter name: 'id' do |p|
p.description = 'A unique numeric id, generated by Ingenia'
p.type = :numeric
end
tag.parameter name: 'name' do |p|
p.description = 'The name of your tag'
p.type = :string
end
tag.parameter name: 'tag_set_id' do |p|
p.description = 'The ID of the tag_set to which this tag belongs'
p.type = :integer
end
tag.parameter name: 'confidence' do |p|
p.description = "From 0 to 1; confidence gets closer to 1 the more Ingenia considers the training for this tag sufficient; if this value is low, we advise to increase your training set for this tag"
p.type = :float
end
tag.parameter name: 'description' do |p|
p.description = "A description of this tag"
p.type = :string
end
tag.parameter name: 'created_at' do |p|
p.description = 'When this tag was created'
p.type = :date_time
p.example = '2013-12-16T11:24:52+00:00'
end
#We should probably not show this to the user
tag.parameter name: 'updated_at' do |p|
p.description = 'When this tag was last updated'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
tag.example = '
{
"id":554273,
"name":"Text Analytics",
"tag_set_id":8547,
"confidence":0.95,
"description":"the process of deriving high-quality information from text",
"created_at":"2014-03-13T12:59:32Z",
"updated_at":"2014-03-13T12:59:32Z"
}'
end
json_tag_rules_show = define_object(name: 'Tag rules: index output') do |tag_rule|
tag_rule.description = "A list of rules applied to a tag to influence whether or not to apply the tag to an item."
tag_rule.parameter name: 'tag:id' do |p|
p.description = 'The ID of the tag to which this tag rule belongs'
p.type = :integer
end
tag_rule.parameter name: 'tag:name' do |p|
p.description = 'The name of the tag to which this tag rule belongs'
p.type = :string
end
tag_rule.parameter name: 'tag_rules:(array):id' do |p|
p.description = 'A unique numeric id, generated by Ingenia'
p.type = :numeric
end
tag_rule.parameter name: 'tag_rules:(array):text' do |p|
p.description = 'The word or phrase to which the rule should apply.'
p.type = :string
end
tag_rule.parameter name: 'tag_rules:(array):language' do |p|
p.description = 'The language of the word you\'ve entered.'
p.type = :string
end
tag_rule.parameter name: 'tag_rules:(array):influence' do |p|
p.description = 'A number from -1 to 1, it indicates the "strength" of the rule.'
p.type = :float
end
tag_rule.parameter name: 'tag_rules:(array):tag_rule_mode' do |p|
p.description = 'The tag rule mode used. Options are; word_present, word_absent, word_skipped or word_capped'
p.type = :float
end
tag_rule.parameter name: 'tag_rules:(array):created_at' do |p|
p.description = 'When this tag rule was created'
p.type = :date_time
p.example = '2013-12-16T11:24:52+00:00'
end
tag_rule.parameter name: 'tag_rules:(array):updated_at' do |p|
p.description = 'When this tag rule was last updated'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
tag_rule.example = '
{
"tag": {
id":554273,
"name":"Text Analytics",
},
"tag_rule": [{
"text": "data",
"influence": 0.4,
"language": "en",
"tag_rule_mode":"word_present",
"created_at":"2014-03-13T12:59:32Z",
"updated_at":"2014-03-13T12:59:32Z"
},
...
]
}'
end
json_tag_rule_show = define_object(name: 'Tag Rule: show output') do |tag_rule|
tag_rule.description = "A rule applied to a tag to influence whether or not to apply the tag to an item."
tag_rule.parameter name: 'tag:id' do |p|
p.description = 'The ID of the tag to which this tag rule belongs'
p.type = :integer
end
tag_rule.parameter name: 'tag:name' do |p|
p.description = 'The name of the tag to which this tag rule belongs'
p.type = :string
end
tag_rule.parameter name: 'tag_rule:id' do |p|
p.description = 'A unique numeric id, generated by Ingenia'
p.type = :numeric
end
tag_rule.parameter name: 'tag_rule:text' do |p|
p.description = 'The word or phrase to which the rule should apply.'
p.type = :string
end
tag_rule.parameter name: 'tag_rule:language' do |p|
p.description = 'The language of the word you\'ve entered.'
p.type = :string
end
tag_rule.parameter name: 'tag_rule:influence' do |p|
p.description = 'A number from -1 to 1, it indicates the "strength" of the rule.'
p.type = :float
end
tag_rule.parameter name: 'tag_rule:tag_rule_mode' do |p|
p.description = 'The tag rule mode used. Options are; word_present, word_absent, word_skipped or word_capped'
p.type = :string
end
tag_rule.parameter name: 'tag_rule:created_at' do |p|
p.description = 'When this tag rule was created'
p.type = :date_time
p.example = '2013-12-16T11:24:52+00:00'
end
tag_rule.parameter name: 'tag_rule:updated_at' do |p|
p.description = 'When this tag rule was last updated'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
tag_rule.example = '
{
"tag": {
id":554273,
"name":"Text Analytics",
},
"tag_rule": {
"text": "data",
"influence": 0.4,
"language": "en",
"tag_rule_mode":"word_present",
"created_at":"2014-03-13T12:59:32Z",
"updated_at":"2014-03-13T12:59:32Z"
}
}'
end
# TagRule JSON POST form
json_tag_rule_create = define_object(name: 'Tag rule: create input') do |tag_rule|
tag_rule.description = "A rule to apply to a tag to influence use of that tag."
tag_rule.type = :json
tag_rule.this_is_json!
tag_rule.parameter name: 'text' do |p|
p.description = 'The word or phrase to which the rule should apply.'
p.type = :string
end
tag_rule.parameter name: 'language' do |p|
p.description = 'The language of the word you\'ve entered.'
p.type = :string
end
tag_rule.parameter name: 'influence' do |p|
p.description = 'A number from -1 to 1, it indicates the "strength" of the rule.'
p.type = :float
end
tag_rule.parameter name: 'tag_rule_mode' do |p|
p.description = 'The tag rule mode used. Options are; word_present, word_absent, word_skipped or word_capped'
p.type = :string
end
tag_rule.example = '
{
"text":"ruby",
"influence":0.5,
"language":"en",
"tag_rule_mode":"word_present"
}'
end
# TagSet JSON POST form
json_tag_set = define_object(name: 'Tag set: create / update input') do |tag_set|
tag_set.description = "A collection of thematically consistent tags"
tag_set.type = :json
tag_set.required = true
tag_set.this_is_json!
tag_set.parameter name: 'name' do |p|
p.description = 'The name of your tag set; we advise to make it short but meaningful; must be unique'
p.type = :string
end
tag_set.example = '
{
"name":"Big Data"
}'
end
json_tag_set_show = define_object(name: 'Tag set: show output') do |tag_set|
tag_set.description = "A collection of thematically consistent tags"
tag_set.parameter name: 'id' do |p|
p.description = 'A unique numeric id, generated by Ingenia'
p.type = :numeric
end
tag_set.parameter name: 'name' do |p|
p.description = 'The name of your tag set'
p.type = :string
end
tag_set.parameter name: 'created_at' do |p|
p.description = 'When this tag set was created'
p.type = :date_time
p.example = '2013-12-16T11:24:52+00:00'
end
#We should probably not show this to the user
tag_set.parameter name: 'updated_at' do |p|
p.description = 'When this tag set was last updated'
p.type = :date_time
p.example = '2013-12-16T11:25:52+00:00'
end
tag_set.example = '
{
"id":178751,
"name":"Big Data",
"created_at":"2014-03-12T12:17:33Z",
"updated_at":"2014-03-12T12:17:33Z"
}'
end
# Classifications
json_classify = define_object(name: 'Classifications') do |classify|
classify.example = '
{
"api_version": "2.0",
"status": "okay",
"data": {
"classification_status": "complete",
"results": {
"Software": {
"id": 6,
"tags": [
{
"id": 31,
"name": "php",
"score": 0.655
},
{
"id": 90,
"name": "php-session",
"score": 0.315
},
{
"id": 158,
"name": "pass-by-reference",
"score": 0.262
},
{
"id": 160,
"name": "debugging",
"score": 0.24
},
{
"id": 159,
"name": "pass-by-value",
"score": 0.198
},
{
"id": 63,
"name": "apache",
"score": 0.132
}
]
}
}
}
}
'
end
define_api(name: 'Ingenia API', description: DESCRIPTION) do |api|
api.endpoint = 'api.ingeniapi.com/v2/'
api.version = '2.0'
api.format = 'json'
api.object json_basic_response
api.object json_item
api.object json_item_show
api.object json_bundle
api.object json_bundle_show
api.object json_tag
api.object json_tag_show
api.object json_tag_set
api.object json_tag_set_show
api.object json_similarity_response
api.resource name: 'Classifications' do |r|
r.description = ""
r.request name: 'Classify' do |req|
req.description = ''
req.call_type = :post
req.path = '/classify'
req.parameter name: 'text' do |p|
p.description = 'The text you want Ingenia to classify [1]'
p.type = :string
p.example = 'A comparative study of European secondary education systems illustrated issues related to their budgetary sustainability...'
end
req.parameter name: 'url' do |p|
p.description = 'The source URL from which to extract the text to be classified; Ingenia will extract the most relevant text [1]'
p.type = :string
p.example = 'https://www.example.com'
end
req.parameter name: 'file' do |p|
p.description = 'File to be used as text source. Sent as multipart upload. Accepted file extensions are: Text (txt), Postscript Document Format (pdf) and Microsoft Office Documents (doc, docx, xlsx, ppt, pptx). [1]'
p.type = :multipart
p.example = 'document.pdf'
end
req.parameter name: 'bundle_id' do |p|
p.description = 'ID of the bundle to which the item belongs'
p.type = :integer
p.default = '[user\'s first bundle]'
end
req.parameter name: 'min_tags' do |p|
p.description = 'Return at least these many tags'
p.type = :integer
p.default = 0
end
req.parameter name: 'max_tags' do |p|
p.description = 'Return at most these many tags'
p.type = :integer
p.default = 6
end
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/classify?text=A%20comparative%20study%20of%20European%20secondary%20education%20systems%20illustrated%20issues%20related%20to%20their%20budgetary%20sustainability&api_key=$api_key'
Response:
{
"classification_status": "complete",
"text": "A comparative study of European secondary education systems illustrated issues related to their budgetary sustainability",
"results": {
"Themes": {
"tags": [
{
"machine_score": -0.15,
"name": "Ed-tech",
"id": 174857,
"rule_score": 0.21,
"score": 0.06
}
],
"id": "1627"
}
}
}
EOF
req.footnote = <<-FN
<p>[1] You can input content as one of these fields: text, a URL, a file. Formats
supported for files include txt, html, pdf and all MS Office formats. If you send a file, it will extract the text
from it.</p>
<p>The text and the URL are input as part of the JSON component. The file
is sent as a multipart encoded https field.</p>
FN
end
end
##
# Personalisation
#
api.resource name: 'Personalisation' do |r|
r.description = ""
# r.request name: 'Similar to' do |req|
# req.description = ''
# req.call_type = :get
# req.path = '/items/:id/similar_to'
#
# req.parameter name: 'id' do |p|
# p.description = 'ID of item for which we want other similar items'
# p.type = :string
# p.required = true
# end
#
# req.parameter limit
#
# req.parameter full_text
#
# req.parameter name: 'mode' do |p|
# p.description = 'Constrain matches to base similarity on just "tag", just "word", or "auto" (first tags, then words)'
# p.type = :string
# p.example = 'mode=tag'
# p.default = 'auto'
# end
#
# req.parameter name: 'metadata_filters' do |p|
# p.description = 'Instruct ingenia to only consider knowledge items which match these criteria'
# p.type = :string
# p.example = 'metadata_filters[author]=Joe%20Bloggs'
# end
#
# req.parameter name: 'item_filters' do |p|
# p.description = 'Instruct ingenia to only consider knowledge items which were created within specific dates. Dates are inclusive.'
# p.type = :string
# p.example = 'item_filters[from]=2014-12-25&item_filters[to]=2014-12-30'
# end
#
# req.response = json_similarity_response
# end
r.request name: 'Similar to' do |req|
req.description = ''
req.call_type = :get
req.path = '/items/:id/similar_to'
req.parameter name: 'id' do |p|
p.description = 'ID of item to get similar items to'
p.type = :string
p.required = true
end
req.parameter name: 'bundle_id' do |p|
p.description = 'Tell ingenia which bundle this item is in. If this parameter is omitted, ingenia will only look for the item in the default bundle'
p.type = :integer
p.example = '77'
end
req.parameter name: 'bundle_ids' do |p|
p.description = 'Restrict your search to one or more bundles. If this parameter is omitted, all bundles will be scanned'
p.type = :array
p.example = '1,4,77'
end
req.parameter name: 'limit' do |p|
p.description = 'The number of items to return, the maximum is 100.'
p.type = :integer
p.example = '15'
end
req.parameter full_text
req.parameter name: 'mode' do |p|
p.description = 'Constrain matches to base similarity on just "tag", just "word", or "auto" (first tags, then words)'
p.type = :string
p.example = 'mode=tag'
p.default = 'auto'
end
req.parameter name: 'metadata_filters' do |p|
p.description = 'Instruct ingenia to only consider knowledge items which match these criteria'
p.type = :string
p.example = 'metadata_filters[author]=Joe%20Bloggs'
end
req.parameter name: 'item_filters' do |p|
p.description = 'Instruct ingenia to only consider knowledge items which were created within specific dates. Dates are inclusive.'
p.type = :string
p.example = 'item_filters[from]=2014-12-25&item_filters[to]=2014-12-30'
end
req.example = <<-EOF
curl -X GET 'https://api.ingeniapi.com/v2/items/ID423455-12-1432321250/similar_to?limit=3&api_key=$api_key'
Response:
[
{
"item": {
"id": "ID1959443-12-1458267383",
"text": "\n So it’s been a little over a year since GitHub fired me.\nI initially made a vague tweet about leaving the company, and then a few weeks later I wrot..."
},
"mode": "word",
"similarity": 0.194
},
{
"item": {
"id": "ID1834322-12-1455638255",
"text": " \n I worked there. It was literally the worst experience of my career - and I have worked at all of the hardest charging blue chips and two successfu..."
},
"mode": "word",
"similarity": 0.193
},
{
"item": {
"id": "ID1847748-12-1455841393",
"text": "Table of Contents (Show)Table of Contents (Hide)\n In This Issue of Venture Weekly:\n Top Story \nWhy Category Leaders Win, By Ablorde Ashigbi\n Per..."
},
"mode": "word",
"similarity": 0.19
}
]
EOF
req.response = json_similarity_response
end
r.request name: 'Similar to text' do |req|
req.description = ''
req.call_type = :post
req.path = '/similar_to_text'
req.parameter name: 'text' do |p|
p.description = 'Text of item for which we want other similar items'
p.type = :string
p.required = true
end
req.parameter name: 'bundle_id' do |p|
p.description = 'The bundle this item would most likely be found in. If this parameter is omitted, ingenia assumes the first bundle you created.'
p.type = :integer
p.example = '77'
end
req.parameter name: 'bundle_ids' do |p|
p.description = 'Find similar items in one or more bundles. If this parameter is omitted, ingenia find items from any of your bundles.'
p.type = :array
p.example = '1,4,77'
end
req.parameter name: 'limit' do |p|
p.description = 'The number of items to return, the maximum is 100.'
p.type = :integer
p.example = '15'
end
req.parameter full_text
req.parameter name: 'mode' do |p|
p.description = 'Constrain matches to base similarity on just "tag", just "word", or "auto" (first tags, then words)'
p.type = :string
p.example = 'mode=tag'
p.default = 'auto'
end
req.parameter name: 'metadata_filters' do |p|
p.description = 'Instruct ingenia to only consider knowledge items which match these criteria'
p.type = :string
p.example = 'metadata_filters[author]=Joe%20Bloggs'
end
req.parameter name: 'item_filters' do |p|
p.description = 'Instruct ingenia to only consider knowledge items which were created within specific dates. Dates are inclusive.'
p.type = :string
p.example = 'item_filters[from]=2014-12-25&item_filters[to]=2014-12-30'
end
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/similar_to_text?text=technology%latest&limit=3&api_key=$api_key'
Response:
[
{
"item": {
"id": "ID218266-10-1425298759",
"text": "Clarus Financial Technology | Esma\n+447771824036"
},
"mode": "word",
"similarity": 0.966
},
{
"item": {
"id": "CyberVally",
"text": "Technology blog group. blogging about latest technology related news."
},
"mode": "word",
"similarity": 0.87
},
{
"item": {
"id": "TechoTrack",
"text": "This is a technology blog. We provide latest updates on gadgets and technology."
},
"mode": "word",
"similarity": 0.869
}
]
EOF
req.response = json_similarity_response
end
r.request name: 'Similar to tags' do |req|
req.description = ''
req.call_type = :get
req.path = '/similar_to_tags'
req.parameter name: 'tag_ids' do |p|
p.description = 'JSON encoded array of tag IDs for which we want relevant items'
p.type = :array
p.example = '[ 45, 787, 23 ]'
p.required = true
end
req.parameter name: 'bundle_ids' do |p|
p.description = 'Find similar items in one or more bundles. If this parameter is omitted, ingenia will attempt to infer the bundles from the tags'
p.type = :array
p.example = '1,4,77'
end
req.parameter name: 'limit' do |p|
p.description = 'The number of items to return, the maximum is 100.'
p.type = :integer
p.example = '15'
end
req.parameter full_text
req.parameter name: 'metadata_filters' do |p|
p.description = 'Instruct ingenia to only consider knowledge items which match these criteria'
p.type = :string
p.example = 'metadata_filters[author]=Joe%20Bloggs'
end
req.parameter name: 'item_filters' do |p|
p.description = 'Instruct ingenia to only consider knowledge items which were created within specific dates. Dates are inclusive.'
p.type = :string
p.example = 'item_filters[from]=2014-12-25&item_filters[to]=2014-12-30'
end
req.example = <<-EOF
curl -X GET 'http://api.ingeniapi.com/v2/similar_to_tags?tag_ids=%5B189454%2C189475%5D&limit=3&api_key=$api_key'
Response:
[
{
"item": {
"id": "ID1959443-12-1458267383",
"text": "\n So it’s been a little over a year since GitHub fired me.\nI initially made a vague tweet about leaving the company, and then a few weeks later I wrot..."
},
"mode": "word",
"similarity": 0.194
},
{
"item": {
"id": "ID1834322-12-1455638255",
"text": " \n I worked there. It was literally the worst experience of my career - and I have worked at all of the hardest charging blue chips and two successfu..."
},
"mode": "word",
"similarity": 0.193
},
{
"item": {
"id": "ID1847748-12-1455841393",
"text": "Table of Contents (Show)Table of Contents (Hide)\n In This Issue of Venture Weekly:\n Top Story \nWhy Category Leaders Win, By Ablorde Ashigbi\n Per..."
},
"mode": "word",
"similarity": 0.19
}
]
EOF
req.response = json_similarity_response
end
end
##
# Summarization
#
api.resource name: 'Summarisation' do |r|
r.description = ""
r.request name: 'Summarise' do |req|
req.description = '<code class="get_post">GET</code> is also supported'
req.call_type = :post
req.path = '/summarise'
req.parameter name: 'text' do |p|
p.description = 'Text to summarise: the key sentences will be extracted [1]'
p.type = :string
end
req.parameter name: 'url' do |p|
p.description = 'URL of article to summarise: the key sentences will be extracted [1]'
p.type = :string
end
req.parameter name: 'id' do |p|
p.description = 'ID of the item to be summarised.'
p.type = :string
end
req.parameter name: 'include_tags' do |p|
p.description = 'If true the resulting sentences will be organised by each tag associated to the text, if false they are returned as a list'
p.type = :boolean
p.default = true
end
req.parameter name: 'order_by_position' do |p|
p.description = 'If true, the results will be ordered as they appear in the text, if false, they will be ordered by the score of the sentence'
p.type = :boolean
p.default = 'false'
end
req.parameter name: 'max_sentences' do |p|
p.description = 'Maximum number of sentences to return'
p.type = :integer
p.default = 2
end
req.footnote = <<-EOF
<p>[1] You must input content as either text or a URL. </p>
EOF
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/summarise?url=http://techcrunch.com/2016/05/11/charged/&api_key=$api_key'
Response:
{
"results": {
"Relevance": {
"tags": [
{
"machine_score": 0.11,
"name": "Relevance",
"id": 174842,
"rule_score": 0.31,
"score": 0.42,
"sentences": [
{
"text": "Venture capitalists in some sectors are increasingly eager to fund serious scientific innovations, they can be much tougher to do due diligence on than simple software that can be assessed based on immediate market traction.",
"score": 0.055,
"position": 4812
},
{
"text": " Otherwise, it could find it difficult to raise additional funding, hire or retain talent, and avoid a negative press spiral.",
"score": 0.043,
"position": 4686
}
]
}
],
"id": "1625"
}
}
}
EOF
end
end
api.resource name: 'Keywords' do |r|
r.description = ""
r.request name: 'Show' do |req|
req.description = 'Returns a list of keywords for a given item'
req.call_type = :get
req.path = '/keywords/:item_id'
req.parameter name: 'item_id' do |p|
p.description = 'ID of the item to show keyfords for.'
p.type = :integer
p.required = :true
end
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/keywords/457?api_key=$api_key'
Response:
[
{
"text": "chronograph",
"occurrences": 1,
"score": 254
},
{
"text": "measure",
"occurrences": 3,
"score": 122
},
{
"text": "time",
"occurrences": 8,
"score": 12
}
]
EOF
end
end
api.resource name: 'Clusters' do |r|
r.description = ""
r.request name: 'Show' do |req|
req.description = 'Returns a list clusters for a given bundle'
req.call_type = :get
req.path = '/clusters'
req.parameter name: 'bundle_id' do |p|
p.description = 'ID of the bundle to show clusters for.'
p.type = :integer
p.required = :true
end
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/clusters?bundle_id=544&api_key=$api_key'
Response:
{
"bundle_id": 544,
"date": "2016-05-18T08:00:15Z",
"clusters": [
{
"cluster": {
"id": 105636,
"score": 0.0235,
"words": [
{
"text": "journal",
"score": 457.39
},
{
"text": "org",
"score": 421.19
},
...
]
}
}
]
}
EOF
end
end
##
# Items
#
api.resource name: 'Items' do |r|
r.description = "Blocks of textual content, typically self-contained and homogeneous"
r.request name: 'Index' do |req|
req.description = 'Returns a list of all your items'
req.call_type = :get
req.path = '/items'
#req.parameter api_key
req.parameter limit
req.parameter full_text
req.parameter offset
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/items?api_key=$api_key'
Response:
[
{
"bundle_id": 778,
"bundle_name": "Here we go again",
"concordance": null,
"created_at": "2016-05-10T15:35:59Z",
"id": "61265a8b2e56ff9693753fd044630ed5",
"item_state": "processed",
"language": "en",
"last_classified_at": "2016-05-10T15:38:47Z",
"membership_degree": null,
"updated_at": "2016-05-10T15:38:47Z",
"tag_sets": [
],
"text": "Some inline text",
"metadata": [
null
]
},
{
"bundle_id": 778,
"bundle_name": "Here we go again",
"concordance": null,
"created_at": "2016-05-10T16:03:59Z",
"id": "3fdb62127e7a839e3f4e0ab6de7cd869",
"item_state": "processed",
"language": "en",
"last_classified_at": "2016-05-10T16:04:00Z",
"membership_degree": null,
"updated_at": "2016-05-10T16:04:01Z",
"tag_sets": [
],
"text": "Smartwatch cheats force Thai students back to exam halls - BBC News\\nSome 3,000 students in Thailand must retake university entrance exams after a cheating scam involving cameras and smartwatches was uncovered.The sophisticated scam happened at Rangsit University in Bangkok.The ...",
"metadata": [
null,
{
"name": "url-fetched",
"type": "date",
"content": "2016-05-10 16:03:59"
},
{
"name": "url",
"type": "url",
"content": "http://www.bbc.co.uk/news/world-asia-36253769"
}
]
}
]
EOF
req.response = json_item_show
end
r.request name: 'Show' do |req|
req.description = 'Returns a single item'
req.call_type = :get
req.path = '/items/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the item you want to show'
p.type = :string
p.required = true
end
#req.parameter api_key
req.parameter full_text
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/items/61265a8b2e56ff9693753fd044630ed5?api_key=$api_key'
Response:
{
"bundle_id": 778,
"bundle_name": "Tech Startups",
"concordance": null,
"created_at": "2016-05-10T15:35:59Z",
"id": "61265a8b2e56ff9693753fd044630ed5",
"item_state": "processed",
"language": "en",
"last_classified_at": "2016-05-10T15:38:47Z",
"membership_degree": null,
"updated_at": "2016-05-10T15:38:47Z",
"tag_sets": [
],
"text": "Some inline text",
"metadata": [
null
]
}
EOF
req.response = json_item_show
end
r.request name: 'Create' do |req|
req.description = 'Creates a new item'
req.call_type = :post
req.path = '/items'
req.parameter json_item
req.parameter name: 'file' do |p|
p.description = 'File to be used as text source. Sent as multipart upload. Accepted file extensions are: Text (txt), Postscript Document Format (pdf) and Microsoft Office Documents (doc, docx, xlsx, ppt, pptx). [1]'
p.type = :multipart
end
req.parameter name: 'update_existing' do |p|
p.description = 'Choice of what to do if the item sent via a create call already exists on Ingenia, as determined by its item ID. If this field is true, the tags supplied will overwrite those on the existing item. If false, no data is modified and a response is returned with a 409 code (Conflict) together with the existing item as JSON.'
p.default = true
p.type = :boolean
end
req.parameter name: 'classify' do |p|
p.description = 'If true, the response will also include a classification'
p.default = false
p.type = :boolean
end
req.footnote =<<-FN
<p>[1] You can input content as one of these fields: text, a URL, a file. Formats
supported for files include txt, html, pdf and all MS Office formats. If you send a file, it will extract the text
from it.</p>
<p>The text and the URL are input as part of the JSON component. The file
is sent as a multipart encoded https field.</p>
FN
req.example = <<-EOF
# Simply post item's text
curl -X POST \\
-F'json={ "text" : "Some inline text" }' \\
'https://api.ingeniapi.com/v2/items?api_key=$api_key&classify=true'
# Create an item with some text and assign a tag ('foo') to it with a score of 0.2.
curl -X POST \\
-F'json={ "text" : "Some inline text" , "tags" : { "foo" : 0.2 } }' \\
'https://api.ingeniapi.com/v2/items?api_key=$api_key&classify=true'
# Create an item with some text, create a new tag set ('my tag set') and add
# a tag ('foo') with a score of 0.2 to that tag set..
curl -X POST \\
-F'json={ "text" : "Some inline text" , "tag_sets" : { "my tag set" : { "foo" : 0.2 } } }' \\
'https://api.ingeniapi.com/v2/items?api_key=$api_key&classify=true'
# Create an item with the tag ('foo')
curl -X POST \\
-F'json={ "text" : "Some inline text" , "tags" : [ "foo"] }' \\
'https://api.ingeniapi.com/v2/items=$api_key&classify=true'
# Post url to retrieve content from and create an item with that content
curl -X POST \\
-F'json={ "url" : "https://www.zdziarski.com/blog/?p=3875" }' \\
'https://api.ingeniapi.com/v2/items?api_key=$api_key'
# Post a file using multipart/form-data upload and create an item with that content
curl -X POST \\
-F'json={}' \\
-F'file=@article.txt' \\
'https://api.ingeniapi.com/v2/items?api_key=$api_key&classify=true&update_existing=true'
EOF
req.response = json_item
end
r.request name: 'Update' do |req|
req.description = 'Update an existing item'
req.call_type = :put
req.path = '/items/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the item you want to update'
p.type = :string
p.required = true
end
req.parameter json_item
req.parameter name: 'file' do |p|
p.description = 'File to be used as text source. Sent as multipart upload. Accepted file types are: Text (txt), Postscript Document Format (pdf), Microsoft Office Documents (doc, docx, xls, xlsx, ppt, pptx). [1]'
p.type = :multipart
end
req.footnote =<<-FN
<p>[1] You can input content as ONE of: text, a URL, a file (formats
supported include txt, html, pdf, all the MS Office formats). If you
send a URL, Ingenia will extract the most meaningful text from it,
e.g., ignoring links. If you send a file, it will extract the text
from it.</p>
<p>The text and the URL are input as part of the JSON component. The file
is sent as a multipart encoded https field.</p>
FN
req.example = <<-EOF
curl -X PUT \\
-F'json={ "text" : "Some updated text" , "tags" : [ "foo"] }' \\
'https://api.ingeniapi.com/v2/items/61265a8b2e56ff9693753fd044630ed5?api_key=$api_key
Response:
{
"bundle_id": 778,
"created_at": "2016-05-10T15:35:59Z",
"id": "61265a8b2e56ff9693753fd044630ed5",
"last_classified_at": "2016-05-10T16:54:56Z",
"updated_at": "2016-05-10T16:54:57Z",
"text": "Some updated text",
"tag_sets": [
{
"Technologia": {
"id": 2860,
"tags": [
{
"id": 189475,
"name": "foo",
"user_selected": "t",
"user_assigned": true,
"score": "0.0",
"machine_score": "0",
"rule_score": null,
"user_assigned_score": "0"
}
]
}
}
]
}
EOF
req.response = json_item
end
r.request name: 'Delete' do |req|
req.description = 'Delete an existing item'
req.call_type = :delete
req.path = '/items/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the item you want to delete'
p.type = :string
p.required = true
end
req.example = <<-EOF
curl -X DELETE 'https://api.ingeniapi.com/v2/items/61265a8b2e56ff9693753fd044630ed5?api_key=$api_key'
Response:
{
"61265a8b2e56ff9693753fd044630ed5": "destroyed",
"bundle_id": 778
}
EOF
end
end
##
# Bundles
#
api.resource name: 'Bundles' do |r|
r.description = "Groups of thematically consistent items"
r.request name: 'Index' do |req|
req.description = 'Returns a list of all your bundles'
req.call_type = :get
req.path = '/bundles'
req.parameter limit
req.parameter offset
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/bundles?api_key=$api_key'
Response:
'{
[
{
"id":755,
"name":"New Bundle",
"tag_sets" : [
{ "name" : "technology", "id": 14562 },
{ "name" : "business", "id": 666 }
],
"created_at" : "2016-04-06T09:00:44Z",
"updated_at":"2016-04-06T09:00:44Z"
},
{
"id" : 756,
"name" : "Another Bundle",
"tag_sets" : [
{ "name" : "technology", "id": 14562 }
],
"created_at" : "2016-04-07T11:44:26Z",
"updated_at":"2016-04-07T11:44:26Z"
}
]
}'
EOF
req.response = json_bundle_show
end
r.request name: 'Show' do |req|
req.description = 'Returns a single bundle'
req.call_type = :get
req.path = '/bundles/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the bundle you want to show'
p.type = :integer
p.required = true
end
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/bundles/47858?api_key=$api_key'
Response:
'{
"id" : 47858,
"name" : "Tech Startups",
"tag_sets" : [
{ "name" : "technology", "id": 14562 },
{ "name" : "business", "id": 666 }
],
"created_at" :"2014-03-13T15:36:51Z",
"updated_at" :"2014-03-13T15:36:51Z",
}'
EOF
req.response = json_bundle_show
end
r.request name: 'Find_by_name' do |req|
req.description = 'Looks for a bundle that matches exactly text input'
req.call_type = :get
req.path = '/bundles/find_by_name'
#req.parameter api_key
req.parameter name: 'text' do |p|
p.description = 'Text of the bundle to look for'
p.type = :string
p.required = true
end
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/bundles/find_by_name?name=Tech%20Startups&api_key=$api_key'
Response:
'{
"id" : 47858,
"name" : "Tech Startups",
"tag_sets" : [
{ "name" : "technology", "id": 14562 },
{ "name" : "business", "id": 666 }
],
"created_at" :"2014-03-13T15:36:51Z",
"updated_at" :"2014-03-13T15:36:51Z",
}'
EOF
req.response = json_bundle_show
end
r.request name: 'Create' do |req|
req.description = 'Creates a new bundle'
req.call_type = :post
req.path = '/bundles'
req.parameter json_bundle
req.example = <<-EOF
curl -X POST \\
-F'json={ "name" : "New Bundle", "tag_set_ids" : [2820, 2819] }' \\
'https://api.ingeniapi.com/v2/bundles?api_key=$api_key'
Response:
'{
"id" : 47858,
"name" : "New Bundle",
"tag_sets" : [
{
"id" : 2820,
"name" : "Tag Set One"
},
{
"id : 2819,
"name : "Tag Set Two"
}
],
"created_at" :"2014-03-13T15:36:51Z",
"updated_at" :"2014-03-13T15:36:51Z"
}'
EOF
req.response = json_bundle
end
r.request name: 'Update' do |req|
req.description = 'Update an existing bundle'
req.call_type = :put
req.path = '/bundles/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the bundle you want to update'
p.type = :integer
p.required = true
end
req.parameter json_bundle
req.example = <<-EOF
curl -X PUT \\
-F'json={ "name" : "New Bundle Updated" }' \\
'https://api.ingeniapi.com/v2/bundles/47858?api_key=$api_key'
Response:
'{
"id" : 47858,
"name" : "New Bundle Updated",
"tag_sets" : [
{
"id" : 2820,
"name" : "Tag Set One"
},
{
"id : 2819,
"name : "Tag Set Two"
}
],
"created_at" :"2016-04-06T09:00:44Z",
"updated_at" :"2016-04-06T09:00:44Z",
}'
EOF
req.response = json_bundle
end
r.request name: 'Delete' do |req|
req.description = 'Delete an existing bundle'
req.call_type = :delete
req.path = '/bundles/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the bundle you want to delete'
p.type = :integer
p.required = true
req.example = <<-EOF
curl -X DELETE \\
'https://api.ingeniapi.com/v2/bundles/47858?api_key=$api_key'
Response:
'{
"47858" : "destroyed"
}'
EOF
end
end
end
##
# Tags
#
api.resource name: 'Tags' do |r|
r.description = "Tags are meaningful words or expressions that you want to associate to some of your items"
r.request name: 'Index' do |req|
req.description = 'List all your tags'
req.call_type = :get
req.path = '/tags'
req.parameter limit
req.parameter offset
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/tags?api_key=$api_key'
Response:
[
{
"confidence": 0.0,
"created_at": "2016-05-04T16:12:43Z",
"current_state": "not_enough_items_to_learn",
"description": "a term for data sets that are so large or complex that traditional data processing applications are inadequate",
"id": 189453,
"name": "Big Data",
"tag_set_id": 2858,
"updated_at": "2016-05-04T16:12:43Z"
},
{
"confidence": 0.0,
"created_at": "2016-05-04T16:08:05Z",
"current_state": "not_enough_items_to_learn",
"description": "the process of deriving high-quality information from text",
"id": 189452,
"name": "Text Analytics",
"tag_set_id": 2858,
"updated_at": "2016-05-04T16:08:05Z"
}
]
EOF
req.response = json_tag_show
end
r.request name: 'Show' do |req|
req.description = 'View a single tag'
req.call_type = :get
req.path = '/tags/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag you want to show'
p.type = :integer
p.required = true
end
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/tags/189453?api_key=$api_key'
Response:
{
"confidence": 0.0,
"created_at": "2016-05-04T16:12:43Z",
"current_state": "not_enough_items_to_learn",
"description": "",
"id": 189453,
"name": "New Tag",
"tag_set_id": 2858,
"updated_at": "2016-05-04T16:12:43Z"
}
EOF
req.response = json_tag_show
end
r.request name: 'Find_by_name' do |req|
req.description = 'Looks for a tag that matches exactly text input'
req.call_type = :get
req.path = '/tags/find_by_name'
req.parameter name: 'text' do |p|
p.description = 'Text of the tag to look for'
p.type = :string
p.required = true
end
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/tags/find_by_name?name=New%20Tag&api_key=$api_key'
Response:
{
"confidence": 0.0,
"created_at": "2016-05-04T16:12:43Z",
"current_state": "not_enough_items_to_learn",
"description": "",
"id": 189453,
"name": "New Tag",
"tag_set_id": 2858,
"updated_at": "2016-05-04T16:12:43Z"
}
EOF
req.response = json_tag_show
end
r.request name: 'Create' do |req|
req.description = 'Create a new tag'
req.call_type = :post
req.path = '/tags'
req.parameter json_tag
req.example = <<-EOF
curl -X POST \\
-F'json={ "tag_set_id" : 2858, "name" : "New Tag" }' \\
'https://api.ingeniapi.com/v2/tags?api_key=$api_key'
Response:
{
"confidence": 0.0,
"created_at": "2016-05-04T17:05:18Z",
"current_state": "unprocessed",
"description": "",
"id": 189455,
"name": "New Tag",
"tag_set_id": 2858,
"updated_at": "2016-05-04T17:05:18Z"
}
EOF
req.response = json_tag
end
r.request name: 'Update' do |req|
req.description = 'Update an existing tag'
req.call_type = :put
req.path = '/tags/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag you want to update'
p.type = :integer
p.required = true
end
req.parameter json_tag
req.example = <<-EOF
curl -X PUT \\
-F'json={ "name" : "New Tag Updated" }' \\
'https://api.ingeniapi.com/v2/tags/189453?api_key=$api_key'
Response:
{
"confidence": 0.0,
"created_at": "2016-05-04T16:12:43Z",
"current_state": "unprocessed",
"description": "",
"id": 189453,
"name": "New Tag Updated",
"tag_set_id": 2858
}
EOF
req.response = json_tag
end
r.request name: 'Merge' do |req|
req.description = 'Merge two or more existing tags'
req.call_type = :post
req.path = '/tags/:id/merge'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag into which you want to merge other tags; the resulting tag will have this name'
p.type = :integer
p.required = true
end
req.parameter name: 'tag_ids' do |p|
p.description = 'A JSON encoded array of tag IDs that will be merged into the main tag'
p.type = :array
p.example = '[ 23, 43, 2113 ]'
p.required = true
end
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/tags/189454/merge?tag_ids=%5B189452%2C189453%5D&api_key=$api_key'
/*(Where:
'%5B' = '['
'%2C' = ','
'%5D' = ']'
for constructing array of IDs in url params)*/
Response:
{
"189454":"merged"
}
EOF
end
r.request name: 'Delete' do |req|
req.description = 'Delete an existing tag'
req.call_type = :delete
req.path = '/tags/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag you want to delete'
p.type = :integer
p.required = true
end
req.example = <<-EOF
curl -X DELETE 'https://api.ingeniapi.com/v2/tags/189454?api_key=$api_key'
Response:
{
"189455" : "destroyed"
}
EOF
end
end
##
# Tag rules
#
api.resource name: 'Tag rules' do |r|
r.description = "Tag rules are rules that you want to associate with a tag to influence the tag choice"
r.request name: 'Index' do |req|
req.description = 'List all your tag rules for a tag'
req.call_type = :get
req.path = '/tag/:tag_id/tag_rules'
req.response = json_tag_rules_show
req.example = <<-EOF
curl https://api.ingeniapi.com/v2/tag/5/tag_rules?api_key=$api_key
EOF
req.parameter name: 'tag_id' do |p|
p.description = 'The ID of the tag to find its associated tag rules'
p.type = :integer
p.required = true
end
end
r.request name: 'Show' do |req|
req.description = 'View a single tag rule'
req.call_type = :get
req.path = '/tag/:tag_id/tag_rules/:id'
req.response = json_tag_rule_show
req.example = <<-EOF
curl https://api.ingeniapi.com/v2/tag/5/tag_rules/6?api_key=$api_key
EOF
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag rule'
p.type = :integer
p.required = true
end
req.parameter name: 'tag_id' do |p|
p.description = 'The ID of the tag'
p.type = :integer
p.required = true
end
end
r.request name: 'Create' do |req|
req.description = 'Create a new tag rule'
req.call_type = :post
req.path = '/tag/:tag_id/tag_rules'
req.parameter json_tag_rule_create
req.example = <<-EOF
curl -X POST \\
-F'json={ "text": "tag_text", "influence" : 0.3, "language": "en", "tag_rule_mode": "word_present" }' \\
https://api.ingeniapi.com/v2/tag/5/tag_rules?api_key=$api_key
EOF
req.parameter name: 'tag_id' do |p|
p.description = 'The ID of the tag'
p.type = :integer
p.required = true
end
end
r.request name: 'Delete' do |req|
req.description = 'Delete an existing tag rule'
req.call_type = :delete
req.path = '/tag/:tag_id/tag_rules/:id'
req.example = <<-EOF
curl -X DELETE \\
https://api.ingeniapi.com/v2/tag/5/tag_rules/6?api_key=$api_key
EOF
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag rule you want to delete'
p.type = :integer
p.required = true
end
req.parameter name: 'tag_id' do |p|
p.description = 'The ID of the tag'
p.type = :integer
p.required = true
end
end
end
##
# Tag Sets
#
api.resource name: 'Tag sets' do |r|
r.description = "Tag sets are thematically consistent groups of tags defined by you, such as, say, world countries, business sectors, product types, companies, concepts, topics, etc"
r.request name: 'Index' do |req|
req.description = 'List all your tag sets'
req.call_type = :get
req.path = '/tag_sets'
req.parameter limit
req.parameter offset
req.example = <<-EOF
# Simple request to fetch all tag sets
curl -s -q 'https://api.ingeniapi.com/v2/tag_sets?api_key=$api_key'
# ...and a bit more advanced example
curl -s -q 'https://api.ingeniapi.com/v2/tag_sets?limit=100&offset=100&bundle_id=42&api_key=$api_key'
Response:
'[
{
"created_at" : "2016-04-06T11:01:18Z",
"id" : 2820,
"name" : "Tag Set One",
"updated_at" : "2016-04-06T11:04:00Z"
},
{
"created_at" : "2016-04-06T09:00:44Z",
"id" : 2819,
"name" : "Tag Set Two",
"updated_at":"2016-04-06T09:00:44Z"
}
]'
EOF
req.response = json_tag_set_show
end
r.request name: 'Show' do |req|
req.description = 'View a single tag set'
req.call_type = :get
req.path = '/tag_sets/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag set you want to show'
p.type = :integer
p.required = true
end
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/tag_sets/2820?api_key=haDJdWeW41iwzEup7n8x'
Response:
'{
"created_at" : "2016-04-07T16:13:52Z",
"id" : 2822,
"name" : "Big Data",
"updated_at" : "2016-04-07T16:13:52Z"
}'
EOF
req.response = json_tag_set_show
end
r.request name: 'Find_by_name' do |req|
req.description = 'Looks for a tag set that matches exactly text input'
req.call_type = :get
req.path = '/tag_sets/find_by_name'
req.parameter name: 'text' do |p|
p.description = 'Text of tag set to look for'
p.type = :string
p.required = true
req.example = <<-EOF
curl 'https://api.ingeniapi.com/v2/tag_sets/find_by_name?name=Big%20Data&api_key=$api_key'
Response:
'{
"created_at" : "2016-04-07T16:13:52Z",
"id" : 2822,
"name" : "Big Data",
"updated_at" : "2016-04-07T16:13:52Z"
}'
EOF
req.response = json_tag_set_show
end
end
r.request name: 'Create' do |req|
req.description = 'Create a new tag set'
req.call_type = :post
req.path = '/tag_sets'
req.parameter json_tag_set
req.example = <<-EOF
curl -s -X POST \\
-F'json={ "name" : "new tag s" }' \\
'https://api.ingeniapi.com/v2/tag_sets?api_key=$api_key'
Response:
'{
"created_at" : "2016-04-07T16:49:24Z",
"id" : 2823,
"name" : "new tag s",
"updated_at" : "2016-04-07T16:49:24Z"
}'
EOF
req.response = json_tag_set
end
r.request name: 'Update' do |req|
req.description = 'Update an existing tag set'
req.call_type = :put
req.path = '/tag_sets/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag set you want to update'
p.type = :integer
p.required = true
end
req.parameter json_tag_set
req.example = <<-EOF
curl -s -X PUT \\
-F'json={ "name" : "Updated Tag Set Name" }' \\
'https://api.ingeniapi.com/v2/tag_sets/2823?api_key=$api_key'
Response:
'{
"created_at" : "2016-04-07T16:49:24Z",
"id" : 2823,
"name" : "Updated Tag Set Name",
"updated_at" : "2016-04-07T16:58:11Z"
}'
EOF
req.response = json_tag_set
end
r.request name: 'Merge' do |req|
req.description = 'Merge two or more existing tag sets'
req.call_type = :post
req.path = '/tag_sets/:id/merge'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag set into which you want to merge the other tag sets; the resulting tag set will have this name'
p.type = :integer
p.required = true
end
req.parameter name: 'tag_set_ids' do |p|
p.description = 'JSON encoded array of tag set IDs to merge into main tag set'
p.type = :array
p.example = '[ 12, 34, 56 ]'
p.required = true
end
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/tag_sets/2824/merge?tag_set_ids=%5B2833%2C2832%5D&api_key=$api_key'
/*(Where:
'%5B' = '['
'%2C' = ','
'%5D' = ']'
for constructing array of IDs in url params)*/
Response:
'{
{"tag_set_id" : 2824}
}'
EOF
end
r.request name: 'Delete' do |req|
req.description = 'Delete an existing tag set'
req.call_type = :delete
req.path = '/tag_sets/:id'
req.parameter name: 'id' do |p|
p.description = 'The ID of the tag set you want to delete.'
p.type = :integer
p.required = true
end
req.example = <<-EOF
curl -X DELETE 'https://api.ingeniapi.com/v2/tag_sets/2824?api_key=$api_key'
Response:
'{
"2824" : "destroyed"
}'
EOF
end
end
api.resource name: 'Text extraction' do |r|
r.description = "Returns stripped text for a given url"
r.request name: 'Get stripped text' do |req|
req.description = 'Returns stripped text for a given url'
req.call_type = :post
req.parameter uri
req.example = <<-EOF
# Request to get stripped content for url
curl -X POST -H 'Content-Type: application/json' -d '{"url":{"uri":"https://techcrunch.com/2016/08/02/instagram-stories/"}}' http://content-service.ingeniapi.com/urls
Response:
'{
"url": {
"uri": "https://techcrunch.com/2016/08/02/instagram-stories/"
},
"title": "Instagram launches “Stories,” a Snapchatty feature for imperfect sharing",
"content": "People only post the highlights of their life on Instagram, so today the app adds its own version of “Stories” ...'
EOF
end
r.request name: 'Get full html' do |req|
req.description = 'Returns full html for a url'
req.call_type = :get
req.parameter url
req.example = <<-EOF
# Request to get stripped content for url
curl 'https://techcrunch.com/2016/08/02/instagram-stories/'
Response:
<xmp>'<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:og="http://opengraphprotocol.org/schema/" xmlns:fb="http://www.facebook.com/2008/fbml" lang="en">
<head>
<title>Instagram launches “Stories,” a Snapchatty feature for imperfect sharing | TechCrunch</title>
...'</xmp>
EOF
end
end
##
# Administrative Calls
#
api.resource name: 'Administrative calls' do |r|
r.description = ""
r.request name: 'Status' do |req|
req.description = 'The status of your Ingenia account, indicating whether Ingenia has processed all your content; use this to test your API key, see [status call] for details'
req.call_type = :get
req.path = '/status'
req.parameter name: 'total_bundles' do |p|
p.description = 'Number of bundles you have own'
p.type = :integer
end
req.parameter name: 'processed_bundles' do |p|
p.description = 'Number of bundles where all items have been processed'
p.type = :integer
end
req.parameter name: 'total_items' do |p|
p.description = 'Number of items you have created'
p.type = :integer
end
req.parameter name: 'pending_items' do |p|
p.description = 'Number of items Ingenia has not yet processed'
p.type = :integer
end
req.parameter name: 'processed_items' do |p|
p.description = 'Number of items Ingenia has processed'
p.type = :integer
end
req.parameter name: 'total_tag_sets' do |p|
p.description = 'Number of tag sets you own'
p.type = :integer
end
req.parameter name: 'processed_tag_sets' do |p|
p.description = 'Number of tag sets Ingenia has processed'
p.type = :integer
end
req.parameter name: 'pending_tag_sets' do |p|
p.description = 'Number of tag sets ready to process, but which Ingenia has not yet processed'
p.type = :integer
end
req.parameter name: 'untrained_tag_sets' do |p|
p.description = 'Number of tag sets which do not have enough items to process'
p.type = :integer
end
req.parameter name: 'idle_tag_sets' do |p|
p.description = 'Number of tag sets that the user prefers to not be processed by Ingenia'
p.type = :integer
end
req.parameter name: 'total_tags' do |p|
p.description = 'Number of tags you have own'
p.type = :integer
end
req.parameter name: 'processed_tags' do |p|
p.description = 'Number of tags Ingenia has processed'
p.type = :integer
end
req.parameter name: 'pending_tags' do |p|
p.description = 'Number of tags Ingenia has not yet processed'
p.type = :integer
end
req.parameter name: 'untrained_tags' do |p|
p.description = 'Number of tags which are not assigned to items'
p.type = :integer
end
req.parameter name: 'idle_tags' do |p|
p.description = 'Number of tags that the user prefers to not be processed by Ingenia'
p.type = :integer
end
req.parameter name: 'ready_to_classify' do |p|
p.description = 'True if all tags assigned to items have been processed'
p.type = :boolean
end
req.example = <<-EOF
curl -X GET 'https://api.ingeniapi.com/v2/status?api_key=$api_key'
Response:
{
"total_bundles": 17,
"processed_bundles": 1,
"total_items": 2,
"pending_items": 0,
"processed_items": 2,
"total_tag_sets": 2,
"pending_tag_sets": 0,
"processed_tag_sets": 0,
"untrained_tag_sets": 2,
"idle_tag_sets": 0,
"total_tags": 3,
"pending_tags": 0,
"processed_tags": 0,
"untrained_tags": 3,
"idle_tags": 0,
"ready_to_classify": true
}
EOF
end
r.request name: 'Clear_data' do |req|
req.description = 'Delete all the data in your account; useful to restart from zero if the data was polluted'
req.call_type = :post
req.path = '/clear_data'
req.example = <<-EOF
curl -X POST 'https://api.ingeniapi.com/v2/clear_data?api_key=$api_key'
Response:
{}
EOF
end
end
end
@api = api
api.to_json
|
require "formula"
class Gitea < Formula
homepage "https://github.com/go-gitea/gitea"
head "https://github.com/go-gitea/gitea.git"
stable do
url "http://dl.gitea.io/gitea/1.0.0/gitea-1.0.0-darwin-10.6-amd64"
sha256 `curl -Ls http://dl.gitea.io/gitea/1.0.0/gitea-1.0.0-darwin-10.6-amd64.sha256`.split(" ").first
version "1.0.0"
end
devel do
url "http://dl.gitea.io/gitea/master/gitea-master-darwin-10.6-amd64"
sha256 `curl -Ls http://dl.gitea.io/gitea/master/gitea-master-darwin-10.6-amd64.sha256`.split(" ").first
version "master"
end
head do
url "https://github.com/go-gitea/gitea.git", :branch => "master"
depends_on "go" => :build
end
test do
system "#{bin}/gitea", "--version"
end
def install
case
when build.head?
mkdir_p buildpath/File.join("src", "code.gitea.io")
ln_s buildpath, buildpath/File.join("src", "code.gitea.io", "gitea")
ENV.append_path "PATH", File.join(buildpath, "bin")
ENV["GOPATH"] = buildpath
ENV["GOHOME"] = buildpath
ENV["TAGS"] = "sqlite"
system "cd src/code.gitea.io/gitea && make build"
bin.install "#{buildpath}/gitea" => "gitea"
when build.devel?
bin.install "#{buildpath}/gitea-master-darwin-10.6-amd64" => "gitea"
else
bin.install "#{buildpath}/gitea-1.0.0-darwin-10.6-amd64" => "gitea"
end
end
end
Use HTTPS and proper build tags
require "formula"
class Gitea < Formula
homepage "https://github.com/go-gitea/gitea"
head "https://github.com/go-gitea/gitea.git"
stable do
url "https://dl.gitea.io/gitea/1.0.0/gitea-1.0.0-darwin-10.6-amd64"
sha256 `curl -s https://dl.gitea.io/gitea/1.0.0/gitea-1.0.0-darwin-10.6-amd64.sha256`.split(" ").first
version "1.0.0"
end
devel do
url "https://dl.gitea.io/gitea/master/gitea-master-darwin-10.6-amd64"
sha256 `curl -s https://dl.gitea.io/gitea/master/gitea-master-darwin-10.6-amd64.sha256`.split(" ").first
version "master"
end
head do
url "https://github.com/go-gitea/gitea.git", :branch => "master"
depends_on "go" => :build
end
test do
system "#{bin}/gitea", "--version"
end
def install
case
when build.head?
mkdir_p buildpath/File.join("src", "code.gitea.io")
ln_s buildpath, buildpath/File.join("src", "code.gitea.io", "gitea")
ENV.append_path "PATH", File.join(buildpath, "bin")
ENV["GOPATH"] = buildpath
ENV["GOHOME"] = buildpath
ENV["TAGS"] = "bindata sqlite tidb"
system "cd src/code.gitea.io/gitea && make generate build"
bin.install "#{buildpath}/gitea" => "gitea"
when build.devel?
bin.install "#{buildpath}/gitea-master-darwin-10.6-amd64" => "gitea"
else
bin.install "#{buildpath}/gitea-1.0.0-darwin-10.6-amd64" => "gitea"
end
end
end
|
必要な情報だけ取得する仮実装
|
module DevFlow
class App
attr_accessor :config, :roadmap, :logger, :command, :git, :members, :waiting
def initialize config, command
@config, @commnad = config, command
# initialize logger
@logger = Logger.new(STDOUT)
@logger.level = config[:verbose] ? Logger::INFO : Logger::WARN
@logger.formatter = proc {|severity, datetime, progname, msg| "#{msg.to_s}\n"}
# initialize git console
@git = DevFlow::Girc.new 'git', config[:verbose]
error "Please use dw in a git directory" unless @git.in_git_dir?
# load configurations
if @config[:members_file] and File.exists? (@config[:members_file])
info "Load member information form #{@config[:members_file]}"
@config = @config.merge(YAML.load(File.open(@config[:members_file], 'r:utf-8').read))
else
warn "No member file to load"
end
if @config[:local_config] and File.exists? (@config[:local_config])
info "Load local configuration from #{@config[:local_config]}"
@config = @config.merge(YAML.load(File.open(@config[:local_config], 'r:utf-8').read))
end
# load roadmap, reload config
if @config[:roadmap] and File.exists? (@config[:roadmap])
info "Load roadmap from #{@config[:roadmap]}"
@roadmap = RoadMap.new(@config[:roadmap], @config).parse
@config = @roadmap.config
error "No leader defined for your porject!" unless @config['leader']
end
# convert member list to member name=>object hash
@members = Hash.new
@config["members"].each do |name, ary|
@members[name] = Member.new(name, *ary)
end
error "No known members defined!" unless all_member_names.size > 0
if @config["whoami"]
error "You (#{user_name}) are not in the known member list. You may use 'dw init' to setup the working environment." unless all_member_names.include? @config["whoami"]
end
# suggest user to take those tasks
@waiting = Hash.new
end
# log message handler
# ------------------------------
def error msg
@logger.fatal msg.bold.red
exit
end
def warn msg
@logger.warn msg.yellow
end
def info msg
@logger.info msg
end
# helper function
# ------------------------------
def all_member_names
@members.keys
end
def user_name
wi = @config["whoami"]
@members[wi] ? @members[wi].display_name : wi
end
def leader_name
@members[@config["leader"]].display_name
end
def task
@roadmap.tasks.each do |task|
return task if task.branch_name == @git.current_branch
end
nil
end
def in_trunk?
%w[master develop staging production].include? @git.current_branch
end
def in_release?
task and task.is_release?
end
def i_am_leader?
@config["whoami"] and @config["leader"] == @config["whoami"]
end
def i_am_moderator?
@config["whoami"] and @config["moderator"] == @config["whoami"]
end
def i_am_supervisor?
@config["whoami"] and @config["supervisor"] == @config["whoami"]
end
def i_have_power?
[@config["leader"], @config["supervisor"], @config["moderator"]].include? @config["whoami"]
end
def tasks_for_close
@roadmap.tasks.select {|task| task.progress == 99}
end
# display informations
# -----------------------
def hr; "-"*76 end
def hrh; hr.bold end
def hrb; "="*76 end
def hrbh; hrb.bold end
def hello
puts hrbh
puts "Hello, #{user_name.bold}."
puts "This is the DevFlow console, version: " + VERSION
puts hrh
puts "You are on branch #{@git.current_branch.bold.green}" if @git.current_branch
puts "You task is: #{self.task.display_name.bold}" if self.task
puts "You are the #{'leader'.bold} of the project." if self.i_am_leader?
puts "You are the #{'moderator'.bold} of the project." if self.i_am_moderator?
puts "You are the #{'supervisor'.bold} of the project." if self.i_am_supervisor?
end
def display_close_waiting
return false unless self.tasks_for_close.size > 0
puts "There have tasks marked completed and need you to review it:"
i = 0
self.tasks_for_close.each do |task|
i += 1
if @git.wd_clean?
puts task.as_title i.to_s
@waiting[i] = task
else
puts task.as_title " "
end
end
end
def display_tasks
i = 0
j = 0
remain = 0
@roadmap.tasks.each do |task|
next if task.parent and task.parent.is_completed?
next if task.is_pending? or task.is_deleted?
if i > 31 # only show 16 task lines at most
remain += 1
next
end
header = nil
header = '+'.bold.green if task.is_completed?
header = '-' unless header or task.is_workable?
unless header
j += 1
header = j.to_s.bold
header = ' ' unless @git.wd_clean?
@waiting[j] = task if @git.wd_clean?
end
puts task.as_title(header)
i += 1
end
puts "There #{remain.to_s.bold} more tasks not show here." if remain > 0
end
# interactive methods with git remote server
# ------------------------------------------------------
def ask_rebase force = false
return false if @config[:offline]
unless force
print "Rebase your wokring directory? [Y/n]:".bold.yellow
ans = STDIN.gets.chomp!
return false if ans == 'n'
end
# do the rebase:
puts "rebase you working directory from #{@config["git_remote"]}/devleop"
@git.rebase! @config["git_remote"], 'develop'
end
# switch to other branch
def switch_to! branch
if @git.branches.include? branch
info "Switch to branch #{branch}"
`git checkout #{branch}`
else
info "Switch to new branch #{branch}"
`git checkout -b #{branch}`
end
end
def upload_progress! task, progress, is_complete = false
current_branch = @git.current_branch
switch_to! 'develop' unless current_branch == 'develop'
info "Rewrite #{@config[:roadmap]} file"
@roadmap.rewrite! task.ln => progress
info "Set progress of #{task.display_name} to #{progress}"
`git commit -am 'update progress of task #{task.branch_name} to #{progress}'`
`git push #{@config[:git_remote]} develop` if @config[:git_remote]
# if this is a complete update, do not switch back
unless (is_complete or current_branch == 'develop')
switch_to! current_branch
`git merge develop`
end
end
end # class
end
debug on progress.
module DevFlow
class App
attr_accessor :config, :roadmap, :logger, :command, :git, :members, :waiting
def initialize config, command
@config, @commnad = config, command
# initialize logger
@logger = Logger.new(STDOUT)
@logger.level = config[:verbose] ? Logger::INFO : Logger::WARN
@logger.formatter = proc {|severity, datetime, progname, msg| "#{msg.to_s}\n"}
# initialize git console
@git = DevFlow::Girc.new 'git', config[:verbose]
error "Please use dw in a git directory" unless @git.in_git_dir?
# load configurations
if @config[:members_file] and File.exists? (@config[:members_file])
info "Load member information form #{@config[:members_file]}"
@config = @config.merge(YAML.load(File.open(@config[:members_file], 'r:utf-8').read))
else
warn "No member file to load"
end
if @config[:local_config] and File.exists? (@config[:local_config])
info "Load local configuration from #{@config[:local_config]}"
@config = @config.merge(YAML.load(File.open(@config[:local_config], 'r:utf-8').read))
end
# load roadmap, reload config
if @config[:roadmap] and File.exists? (@config[:roadmap])
info "Load roadmap from #{@config[:roadmap]}"
@roadmap = RoadMap.new(@config[:roadmap], @config).parse
@config = @roadmap.config
error "No leader defined for your porject!" unless @config['leader']
end
# convert member list to member name=>object hash
@members = Hash.new
@config["members"].each do |name, ary|
@members[name] = Member.new(name, *ary)
end
error "No known members defined!" unless all_member_names.size > 0
if @config["whoami"]
error "You (#{user_name}) are not in the known member list. You may use 'dw init' to setup the working environment." unless all_member_names.include? @config["whoami"]
end
# suggest user to take those tasks
@waiting = Hash.new
end
# log message handler
# ------------------------------
def error msg
@logger.fatal msg.bold.red
exit
end
def warn msg
@logger.warn msg.yellow
end
def info msg
@logger.info msg
end
# helper function
# ------------------------------
def all_member_names
@members.keys
end
def user_name
wi = @config["whoami"]
@members[wi] ? @members[wi].display_name : wi
end
def leader_name
@members[@config["leader"]].display_name
end
def task
@roadmap.tasks.each do |task|
return task if task.branch_name == @git.current_branch
end
nil
end
def in_trunk?
%w[master develop staging production].include? @git.current_branch
end
def in_release?
task and task.is_release?
end
def i_am_leader?
@config["whoami"] and @config["leader"] == @config["whoami"]
end
def i_am_moderator?
@config["whoami"] and @config["moderator"] == @config["whoami"]
end
def i_am_supervisor?
@config["whoami"] and @config["supervisor"] == @config["whoami"]
end
def i_have_power?
[@config["leader"], @config["supervisor"], @config["moderator"]].include? @config["whoami"]
end
def tasks_for_close
@roadmap.tasks.select {|task| task.progress == 99}
end
# display informations
# -----------------------
def hr; "-"*76 end
def hrh; hr.bold end
def hrb; "="*76 end
def hrbh; hrb.bold end
def hello
puts hrbh
puts "Hello, #{user_name.bold}."
puts "This is the DevFlow console, version: " + VERSION
puts hrh
puts "You are on branch #{@git.current_branch.bold.green}" if @git.current_branch
puts "You task is: #{self.task.display_name.bold}" if self.task
puts "You are the #{'leader'.bold} of the project." if self.i_am_leader?
puts "You are the #{'moderator'.bold} of the project." if self.i_am_moderator?
puts "You are the #{'supervisor'.bold} of the project." if self.i_am_supervisor?
end
def display_close_waiting
return false unless self.tasks_for_close.size > 0
puts "There have tasks marked completed and need you to review it:"
i = 0
self.tasks_for_close.each do |task|
i += 1
if @git.wd_clean?
puts task.as_title i.to_s
@waiting[i] = task
else
puts task.as_title " "
end
end
end
def display_tasks
i = 0
j = 0
remain = 0
@roadmap.tasks.each do |task|
next if task.parent and task.parent.is_completed?
next if task.is_pending? or task.is_deleted?
if i > 31 # only show 16 task lines at most
remain += 1
next
end
header = nil
header = '+'.bold.green if task.is_completed?
header = '-' unless header or task.is_workable?
unless header
j += 1
header = j.to_s.bold
header = ' ' unless @git.wd_clean?
@waiting[j] = task if @git.wd_clean?
end
puts task.as_title(header)
i += 1
end
puts "There #{remain.to_s.bold} more tasks not show here." if remain > 0
end
# interactive methods with git remote server
# ------------------------------------------------------
def ask_rebase force = false
return false if @config[:offline]
unless force
print "Rebase your wokring directory? [Y/n]:".bold.yellow
ans = STDIN.gets.chomp!
return false if ans == 'n'
end
# do the rebase:
puts "rebase you working directory from #{@config["git_remote"]}/devleop"
@git.rebase! @config["git_remote"], 'develop'
end
# switch to other branch
def switch_to! branch
if @git.branches.include? branch
info "Switch to branch #{branch}"
`git checkout #{branch}`
else
info "Switch to new branch #{branch}"
`git checkout -b #{branch}`
end
end
def upload_progress! task, progress, is_complete = false
current_branch = @git.current_branch
switch_to! 'develop' unless current_branch == 'develop'
info "Rewrite #{@config[:roadmap]} file"
@roadmap.rewrite! task.ln => progress
info "Set progress of #{task.display_name} to #{progress}"
`git commit -am 'update progress of task #{task.branch_name} to #{progress}'`
`git push #{@config[:git_remote]} develop` if @config[:git_remote]
# if this is a complete update, do not switch back
unless (is_complete or current_branch == 'develop')
switch_to! current_branch
`git merge develop`
`git push #{@config[:git_remote]} #{current_branch}` if @config[:git_remote]
end
end
end # class
end
|
require 'rubygems'
require 'diff/lcs'
require File.dirname(__FILE__) + '/result_processor'
def escape_content(s)
CGI.escapeHTML(s).gsub(" ", " ")
end
class DiffToHtml
INTEGRATION_MAP = {
:mediawiki => { :search_for => /\[\[([^\[\]]+)\]\]/, :replace_with => '#{url}/\1' },
:redmine => { :search_for => /\b(?:refs|fixes)\s*\#(\d+)/i, :replace_with => '#{url}/issues/show/\1' },
:bugzilla => { :search_for => /\bBUG\s*(\d+)/i, :replace_with => '#{url}/show_bug.cgi?id=\1' }
}.freeze
attr_accessor :file_prefix, :current_file_name
attr_reader :result
def initialize(previous_dir = nil, config = nil)
@previous_dir = previous_dir
@config = config || {}
@lines_added = 0
@file_added = false
@file_removed = false
@binary = false
end
def range_info(range)
matches = range.match(/^@@ \-(\S+) \+(\S+)/)
return matches[1..2].map { |m| m.split(',')[0].to_i }
end
def line_class(line)
if line[:op] == :removal
return " class=\"r\""
elsif line[:op] == :addition
return " class=\"a\""
else
return ''
end
end
def add_block_to_results(block, escape)
return if block.empty?
block.each do |line|
add_line_to_result(line, escape)
end
end
def lines_per_diff
@config['lines_per_diff']
end
def add_separator
return if lines_per_diff && @lines_added >= lines_per_diff
@diff_result << '<tr class="sep"><td class="sep" colspan="3" title="Unchanged content skipped between diff. blocks">…</td></tr>'
end
def add_line_to_result(line, escape)
@lines_added += 1
if lines_per_diff
if @lines_added == lines_per_diff
@diff_result << '<tr><td colspan="3">Diff too large and stripped…</td></tr>'
end
if @lines_added >= lines_per_diff
return
end
end
klass = line_class(line)
content = escape ? escape_content(line[:content]) : line[:content]
padding = ' ' if klass != ''
@diff_result << "<tr#{klass}>\n<td class=\"ln\">#{line[:removed]}</td>\n<td class=\"ln\">#{line[:added]}</td>\n<td>#{padding}#{content}</td></tr>"
end
def extract_block_content(block)
block.collect { |b| b[:content] }.join("\n")
end
def lcs_diff(removals, additions)
# arrays always have at least 1 element
callback = DiffCallback.new
s1 = extract_block_content(removals)
s2 = extract_block_content(additions)
s1 = tokenize_string(s1)
s2 = tokenize_string(s2)
Diff::LCS.traverse_balanced(s1, s2, callback)
processor = ResultProcessor.new(callback.tags)
diff_for_removals, diff_for_additions = processor.results
result = []
ln_start = removals[0][:removed]
diff_for_removals.each_with_index do |line, i|
result << { :removed => ln_start + i, :added => nil, :op => :removal, :content => line}
end
ln_start = additions[0][:added]
diff_for_additions.each_with_index do |line, i|
result << { :removed => nil, :added => ln_start + i, :op => :addition, :content => line}
end
result
end
def tokenize_string(str)
# tokenize by non-word characters
tokens = []
token = ''
str.scan(/./mu) do |ch|
if ch =~ /[^\W_]/u
token += ch
else
unless token.empty?
tokens << token
token = ''
end
tokens << ch
end
end
tokens << token unless token.empty?
tokens
end
def operation_description
binary = @binary ? 'binary ' : ''
if @file_removed
op = "Deleted"
elsif @file_added
op = "Added"
else
op = "Changed"
end
file_name = @current_file_name
if (@config["link_files"] && @config["link_files"] == "gitweb" && @config["gitweb"])
file_name = "<a href='#{@config['gitweb']['path']}?p=#{@config['gitweb']['project']};f=#{file_name};hb=HEAD'>#{file_name}</a>"
elsif (@config["link_files"] && @config["link_files"] == "gitorious" && @config["gitorious"])
file_name = "<a href='#{@config['gitorious']['path']}/#{@config['gitorious']['project']}/#{@config['gitorious']['repository']}/blobs/HEAD/#{file_name}'>#{file_name}</a>"
elsif (@config["link_files"] && @config["link_files"] == "cgit" && @config["cgit"])
file_name = "<a href='#{@config['cgit']['path']}/#{@config['cgit']['project']}/tree/#{file_name}'>#{file_name}</a>"
end
header = "#{op} #{binary}file #{file_name}"
"<h2>#{header}</h2>\n"
end
def lines_are_sequential?(first, second)
result = false
[:added, :removed].each do |side|
if !first[side].nil? && !second[side].nil?
result = true if first[side] == (second[side] - 1)
end
end
result
end
def add_changes_to_result
return if @current_file_name.nil?
@diff_result << operation_description
@diff_result << '<table>'
unless @diff_lines.empty?
removals = []
additions = []
@diff_lines.each_with_index do |line, index|
removals << line if line[:op] == :removal
additions << line if line[:op] == :addition
if line[:op] == :unchanged || index == @diff_lines.size - 1 # unchanged line or end of block, add prev lines to result
if removals.size > 0 && additions.size > 0 # block of removed and added lines - perform intelligent diff
add_block_to_results(lcs_diff(removals, additions), escape = false)
else # some lines removed or added - no need to perform intelligent diff
add_block_to_results(removals + additions, escape = true)
end
removals = []
additions = []
if index > 0 && index != @diff_lines.size - 1
prev_line = @diff_lines[index - 1]
add_separator unless lines_are_sequential?(prev_line, line)
end
add_line_to_result(line, escape = true) if line[:op] == :unchanged
end
end
@diff_lines = []
@diff_result << '</table>'
end
# reset values
@right_ln = nil
@left_ln = nil
@file_added = false
@file_removed = false
@binary = false
end
def diff_for_revision(content)
@left_ln = @right_ln = nil
@diff_result = []
@diff_lines = []
@removed_files = []
@current_file_name = nil
content.split("\n").each do |line|
if line =~ /^diff\s\-\-git/
line.match(/diff --git a\/(.*)\sb\//)
file_name = $1
add_changes_to_result
@current_file_name = file_name
end
op = line[0,1]
@left_ln.nil? || op == '@' ? process_info_line(line, op) : process_code_line(line, op)
end
add_changes_to_result
@diff_result.join("\n")
end
def process_code_line(line, op)
if op == '-'
@diff_lines << { :removed => @left_ln, :added => nil, :op => :removal, :content => line[1..-1] }
@left_ln += 1
elsif op == '+'
@diff_lines << { :added => @right_ln, :removed => nil, :op => :addition, :content => line[1..-1] }
@right_ln += 1
else
@diff_lines << { :added => @right_ln, :removed => @left_ln, :op => :unchanged, :content => line }
@right_ln += 1
@left_ln += 1
end
end
def process_info_line(line, op)
if line =~/^deleted\sfile\s/
@file_removed = true
elsif line =~ /^\-\-\-\s/ && line =~ /\/dev\/null/
@file_added = true
elsif line =~ /^\+\+\+\s/ && line =~ /\/dev\/null/
@file_removed = true
elsif line =~ /^Binary files \/dev\/null/ # Binary files /dev/null and ... differ (addition)
@binary = true
@file_added = true
elsif line =~ /\/dev\/null differ/ # Binary files ... and /dev/null differ (removal)
@binary = true
@file_removed = true
elsif op == '@'
@left_ln, @right_ln = range_info(line)
end
end
def extract_diff_from_git_show_output(content)
diff = []
diff_found = false
content.split("\n").each do |line|
diff_found = true if line =~ /^diff \-\-git/
next unless diff_found
diff << line
end
diff.join("\n")
end
def extract_commit_info_from_git_show_output(content)
result = { :message => [], :commit => '', :author => '', :date => '', :email => '' }
content.split("\n").each do |line|
if line =~ /^diff/ # end of commit info, return results
return result
elsif line =~ /^commit/
result[:commit] = line[7..-1]
elsif line =~ /^Author/
result[:author], result[:email] = author_name_and_email(line[8..-1])
elsif line =~ /^Date/
result[:date] = line[8..-1]
elsif line =~ /^Merge/
result[:merge] = line[8..-1]
else
clean_line = line.strip
result[:message] << clean_line unless clean_line.empty?
end
end
result
end
def message_array_as_html(message)
message_map(message.collect { |m| CGI.escapeHTML(m)}.join("<br />"))
end
def author_name_and_email(info)
# input string format: "autor name <author@email.net>"
result = info.scan(/(.*)\s<(.*)>/)[0]
return result if result.is_a?(Array) && result.size == 2 # normal operation
# incomplete author info - return it as author name
return [info, ''] if result.nil?
end
def first_sentence(message_array)
msg = message_array.first.to_s.strip
return message_array.first if msg.empty? || msg =~ /^Merge\:/
msg
end
def diff_between_revisions(rev1, rev2, repo, branch)
@result = []
if rev1 == rev2
commits = [rev1]
elsif rev1 =~ /^0+$/
# creating a new remote branch
commits = Git.branch_commits(branch)
elsif rev2 =~ /^0+$/
# deleting an existing remote branch
commits = []
else
log = Git.log(rev1, rev2)
commits = log.scan(/^commit\s([a-f0-9]+)/).map{|match| match[0]}
end
if defined?(Spec)
previous_list = []
else
previous_file = (!@previous_dir.nil? && File.exists?(@previous_dir)) ? File.join(@previous_dir, "previously.txt") : "/tmp/previously.txt"
previous_list = (File.read(previous_file).to_a.map { |sha| sha.chomp }.compact.uniq if File.exist?(previous_file)) || []
end
commits.reject!{|c| c.find{|sha| previous_list.include?(sha)} }
current_list = (previous_list + commits.flatten).last(10000)
File.open(previous_file, "w"){|f| f << current_list.join("\n") } unless current_list.empty? || defined?(Spec)
commits.each_with_index do |commit, i|
raw_diff = Git.show(commit)
raise "git show output is empty" if raw_diff.empty?
@last_raw = raw_diff
commit_info = extract_commit_info_from_git_show_output(raw_diff)
title = "<div class=\"title\">"
title += "<strong>Message:</strong> #{message_array_as_html commit_info[:message]}<br />\n"
title += "<strong>Commit:</strong> "
if (@config["link_files"] && @config["link_files"] == "gitweb" && @config["gitweb"])
title += "<a href='#{@config['gitweb']['path']}?p=#{@config['gitweb']['project']};a=commitdiff;h=#{commit_info[:commit]}'>#{commit_info[:commit]}</a>"
elsif (@config["link_files"] && @config["link_files"] == "gitorious" && @config["gitorious"])
title += "<a href='#{@config['gitorious']['path']}/#{@config['gitorious']['project']}/#{@config['gitorious']['repository']}/commit/#{commit_info[:commit]}'>#{commit_info[:commit]}</a>"
elsif (@config["link_files"] && @config["link_files"] == "trac" && @config["trac"])
title += "<a href='#{@config['trac']['path']}/#{commit_info[:commit]}'>#{commit_info[:commit]}</a>"
elsif (@config["link_files"] && @config["link_files"] == "cgit" && @config["cgit"])
title += "<a href='#{@config['cgit']['path']}/#{@config['cgit']['project']}/commit/?id=#{commit_info[:commit]}'>#{commit_info[:commit]}</a>"
else
title += " #{commit_info[:commit]}"
end
title += "<br />\n"
title += "<strong>Branch:</strong> #{branch}\n<br />" unless branch =~ /\/head/
title += "<strong>Date:</strong> #{CGI.escapeHTML commit_info[:date]}\n<br />"
title += "<strong>Author:</strong> #{CGI.escapeHTML(commit_info[:author])} <#{commit_info[:email]}>\n</div>"
text = "#{raw_diff}\n\n\n"
html = title
html += diff_for_revision(extract_diff_from_git_show_output(raw_diff))
html += "<br /><br />"
commit_info[:message] = first_sentence(commit_info[:message])
@result << {:commit_info => commit_info, :html_content => html, :text_content => text }
end
end
def message_replace!(message, search_for, replace_with)
full_replace_with = "<a href=\"#{replace_with}\">\\0</a>"
message.gsub!(Regexp.new(search_for), full_replace_with)
end
def message_map(message)
if @config.include?('message_integration')
@config['message_integration'].each_pair do |pm, url|
pm_def = DiffToHtml::INTEGRATION_MAP[pm.to_sym] or next
replace_with = pm_def[:replace_with].gsub('#{url}', url)
message_replace!(message, pm_def[:search_for], replace_with)
end
end
if @config.include?('message_map')
@config['message_map'].each_pair do |search_for, replace_with|
message_replace!(message, Regexp.new(search_for), replace_with)
end
end
message
end
end
class DiffCallback
attr_reader :tags
def initialize
@tags = []
end
def match(event)
@tags << { :action => :match, :token => event.old_element }
end
def discard_b(event)
@tags << { :action => :discard_b, :token => event.new_element }
end
def discard_a(event)
@tags << { :action => :discard_a, :token => event.old_element }
end
end
Fixes http://github.com/bitboxer/git-commit-notifier/issues/25
require 'rubygems'
require 'diff/lcs'
require File.dirname(__FILE__) + '/result_processor'
def escape_content(s)
CGI.escapeHTML(s).gsub(" ", " ")
end
class DiffToHtml
INTEGRATION_MAP = {
:mediawiki => { :search_for => /\[\[([^\[\]]+)\]\]/, :replace_with => '#{url}/\1' },
:redmine => { :search_for => /\b(?:refs|fixes)\s*\#(\d+)/i, :replace_with => '#{url}/issues/show/\1' },
:bugzilla => { :search_for => /\bBUG\s*(\d+)/i, :replace_with => '#{url}/show_bug.cgi?id=\1' }
}.freeze
attr_accessor :file_prefix, :current_file_name
attr_reader :result
def initialize(previous_dir = nil, config = nil)
@previous_dir = previous_dir
@config = config || {}
@lines_added = 0
@file_added = false
@file_removed = false
@binary = false
end
def range_info(range)
matches = range.match(/^@@ \-(\S+) \+(\S+)/)
return matches[1..2].map { |m| m.split(',')[0].to_i }
end
def line_class(line)
if line[:op] == :removal
return " class=\"r\""
elsif line[:op] == :addition
return " class=\"a\""
else
return ''
end
end
def add_block_to_results(block, escape)
return if block.empty?
block.each do |line|
add_line_to_result(line, escape)
end
end
def lines_per_diff
@config['lines_per_diff']
end
def add_separator
return if lines_per_diff && @lines_added >= lines_per_diff
@diff_result << '<tr class="sep"><td class="sep" colspan="3" title="Unchanged content skipped between diff. blocks">…</td></tr>'
end
def add_line_to_result(line, escape)
@lines_added += 1
if lines_per_diff
if @lines_added == lines_per_diff
@diff_result << '<tr><td colspan="3">Diff too large and stripped…</td></tr>'
end
if @lines_added >= lines_per_diff
return
end
end
klass = line_class(line)
content = escape ? escape_content(line[:content]) : line[:content]
padding = ' ' if klass != ''
@diff_result << "<tr#{klass}>\n<td class=\"ln\">#{line[:removed]}</td>\n<td class=\"ln\">#{line[:added]}</td>\n<td>#{padding}#{content}</td></tr>"
end
def extract_block_content(block)
block.collect { |b| b[:content] }.join("\n")
end
def lcs_diff(removals, additions)
# arrays always have at least 1 element
callback = DiffCallback.new
s1 = extract_block_content(removals)
s2 = extract_block_content(additions)
s1 = tokenize_string(s1)
s2 = tokenize_string(s2)
Diff::LCS.traverse_balanced(s1, s2, callback)
processor = ResultProcessor.new(callback.tags)
diff_for_removals, diff_for_additions = processor.results
result = []
ln_start = removals[0][:removed]
diff_for_removals.each_with_index do |line, i|
result << { :removed => ln_start + i, :added => nil, :op => :removal, :content => line}
end
ln_start = additions[0][:added]
diff_for_additions.each_with_index do |line, i|
result << { :removed => nil, :added => ln_start + i, :op => :addition, :content => line}
end
result
end
def tokenize_string(str)
# tokenize by non-word characters
tokens = []
token = ''
str.scan(/./mu) do |ch|
if ch =~ /[^\W_]/u
token += ch
else
unless token.empty?
tokens << token
token = ''
end
tokens << ch
end
end
tokens << token unless token.empty?
tokens
end
def operation_description
binary = @binary ? 'binary ' : ''
if @file_removed
op = "Deleted"
elsif @file_added
op = "Added"
else
op = "Changed"
end
file_name = @current_file_name
if (@config["link_files"] && @config["link_files"] == "gitweb" && @config["gitweb"])
file_name = "<a href='#{@config['gitweb']['path']}?p=#{@config['gitweb']['project']};f=#{file_name};hb=HEAD'>#{file_name}</a>"
elsif (@config["link_files"] && @config["link_files"] == "gitorious" && @config["gitorious"])
file_name = "<a href='#{@config['gitorious']['path']}/#{@config['gitorious']['project']}/#{@config['gitorious']['repository']}/blobs/HEAD/#{file_name}'>#{file_name}</a>"
elsif (@config["link_files"] && @config["link_files"] == "cgit" && @config["cgit"])
file_name = "<a href='#{@config['cgit']['path']}/#{@config['cgit']['project']}/tree/#{file_name}'>#{file_name}</a>"
end
header = "#{op} #{binary}file #{file_name}"
"<h2>#{header}</h2>\n"
end
def lines_are_sequential?(first, second)
result = false
[:added, :removed].each do |side|
if !first[side].nil? && !second[side].nil?
result = true if first[side] == (second[side] - 1)
end
end
result
end
def add_changes_to_result
return if @current_file_name.nil?
@diff_result << operation_description
@diff_result << '<table>'
unless @diff_lines.empty?
removals = []
additions = []
@diff_lines.each_with_index do |line, index|
removals << line if line[:op] == :removal
additions << line if line[:op] == :addition
if line[:op] == :unchanged || index == @diff_lines.size - 1 # unchanged line or end of block, add prev lines to result
if removals.size > 0 && additions.size > 0 # block of removed and added lines - perform intelligent diff
add_block_to_results(lcs_diff(removals, additions), escape = false)
else # some lines removed or added - no need to perform intelligent diff
add_block_to_results(removals + additions, escape = true)
end
removals = []
additions = []
if index > 0 && index != @diff_lines.size - 1
prev_line = @diff_lines[index - 1]
add_separator unless lines_are_sequential?(prev_line, line)
end
add_line_to_result(line, escape = true) if line[:op] == :unchanged
end
end
@diff_lines = []
@diff_result << '</table>'
end
# reset values
@right_ln = nil
@left_ln = nil
@file_added = false
@file_removed = false
@binary = false
end
def diff_for_revision(content)
@left_ln = @right_ln = nil
@diff_result = []
@diff_lines = []
@removed_files = []
@current_file_name = nil
content.split("\n").each do |line|
if line =~ /^diff\s\-\-git/
line.match(/diff --git a\/(.*)\sb\//)
file_name = $1
add_changes_to_result
@current_file_name = file_name
end
op = line[0,1]
@left_ln.nil? || op == '@' ? process_info_line(line, op) : process_code_line(line, op)
end
add_changes_to_result
@diff_result.join("\n")
end
def process_code_line(line, op)
if op == '-'
@diff_lines << { :removed => @left_ln, :added => nil, :op => :removal, :content => line[1..-1] }
@left_ln += 1
elsif op == '+'
@diff_lines << { :added => @right_ln, :removed => nil, :op => :addition, :content => line[1..-1] }
@right_ln += 1
else
@diff_lines << { :added => @right_ln, :removed => @left_ln, :op => :unchanged, :content => line }
@right_ln += 1
@left_ln += 1
end
end
def process_info_line(line, op)
if line =~/^deleted\sfile\s/
@file_removed = true
elsif line =~ /^\-\-\-\s/ && line =~ /\/dev\/null/
@file_added = true
elsif line =~ /^\+\+\+\s/ && line =~ /\/dev\/null/
@file_removed = true
elsif line =~ /^Binary files \/dev\/null/ # Binary files /dev/null and ... differ (addition)
@binary = true
@file_added = true
elsif line =~ /\/dev\/null differ/ # Binary files ... and /dev/null differ (removal)
@binary = true
@file_removed = true
elsif op == '@'
@left_ln, @right_ln = range_info(line)
end
end
def extract_diff_from_git_show_output(content)
diff = []
diff_found = false
content.split("\n").each do |line|
diff_found = true if line =~ /^diff \-\-git/
next unless diff_found
diff << line
end
diff.join("\n")
end
def extract_commit_info_from_git_show_output(content)
result = { :message => [], :commit => '', :author => '', :date => '', :email => '' }
content.split("\n").each do |line|
if line =~ /^diff/ # end of commit info, return results
return result
elsif line =~ /^commit/
result[:commit] = line[7..-1]
elsif line =~ /^Author/
result[:author], result[:email] = author_name_and_email(line[8..-1])
elsif line =~ /^Date/
result[:date] = line[8..-1]
elsif line =~ /^Merge/
result[:merge] = line[8..-1]
else
clean_line = line.strip
result[:message] << clean_line unless clean_line.empty?
end
end
result
end
def message_array_as_html(message)
message_map(message.collect { |m| CGI.escapeHTML(m)}.join("<br />"))
end
def author_name_and_email(info)
# input string format: "autor name <author@email.net>"
result = info.scan(/(.*)\s<(.*)>/)[0]
return result if result.is_a?(Array) && result.size == 2 # normal operation
# incomplete author info - return it as author name
return [info, ''] if result.nil?
end
def first_sentence(message_array)
msg = message_array.first.to_s.strip
return message_array.first if msg.empty? || msg =~ /^Merge\:/
msg
end
def diff_between_revisions(rev1, rev2, repo, branch)
@result = []
if rev1 == rev2
commits = [rev1]
elsif rev1 =~ /^0+$/
# creating a new remote branch
commits = Git.branch_commits(branch)
elsif rev2 =~ /^0+$/
# deleting an existing remote branch
commits = []
else
log = Git.log(rev1, rev2)
commits = log.scan(/^commit\s([a-f0-9]+)/).map{|match| match[0]}
end
if defined?(Spec)
previous_list = []
else
previous_file = (!@previous_dir.nil? && File.exists?(@previous_dir)) ? File.join(@previous_dir, "previously.txt") : "/tmp/previously.txt"
previous_list = (File.read(previous_file).to_a.map { |sha| sha.chomp }.compact.uniq if File.exist?(previous_file)) || []
end
commits.reject!{|c| c.find{|sha| previous_list.include?(sha)} }
current_list = (previous_list + commits.flatten).last(10000)
File.open(previous_file, "w"){|f| f << current_list.join("\n") } unless current_list.empty? || defined?(Spec)
commits.each_with_index do |commit, i|
raw_diff = Git.show(commit)
raise "git show output is empty" if raw_diff.empty?
@last_raw = raw_diff
commit_info = extract_commit_info_from_git_show_output(raw_diff)
title = "<div class=\"title\">"
title += "<strong>Message:</strong> #{message_array_as_html commit_info[:message]}<br />\n"
title += "<strong>Commit:</strong> "
if (@config["link_files"] && @config["link_files"] == "gitweb" && @config["gitweb"])
title += "<a href='#{@config['gitweb']['path']}?p=#{@config['gitweb']['project']};a=commitdiff;h=#{commit_info[:commit]}'>#{commit_info[:commit]}</a>"
elsif (@config["link_files"] && @config["link_files"] == "gitorious" && @config["gitorious"])
title += "<a href='#{@config['gitorious']['path']}/#{@config['gitorious']['project']}/#{@config['gitorious']['repository']}/commit/#{commit_info[:commit]}'>#{commit_info[:commit]}</a>"
elsif (@config["link_files"] && @config["link_files"] == "trac" && @config["trac"])
title += "<a href='#{@config['trac']['path']}/#{commit_info[:commit]}'>#{commit_info[:commit]}</a>"
elsif (@config["link_files"] && @config["link_files"] == "cgit" && @config["cgit"])
title += "<a href='#{@config['cgit']['path']}/#{@config['cgit']['project']}/commit/?id=#{commit_info[:commit]}'>#{commit_info[:commit]}</a>"
else
title += " #{commit_info[:commit]}"
end
title += "<br />\n"
title += "<strong>Branch:</strong> #{branch}\n<br />" unless branch =~ /\/head/
title += "<strong>Date:</strong> #{CGI.escapeHTML commit_info[:date]}\n<br />"
title += "<strong>Author:</strong> #{CGI.escapeHTML(commit_info[:author])} <#{commit_info[:email]}>\n</div>"
text = "#{raw_diff}\n\n\n"
html = title
html += diff_for_revision(extract_diff_from_git_show_output(raw_diff))
html += "<br /><br />"
commit_info[:message] = first_sentence(commit_info[:message])
@result << {:commit_info => commit_info, :html_content => html, :text_content => text }
end
end
def message_replace!(message, search_for, replace_with)
full_replace_with = "<a href=\"#{replace_with}\">\\0</a>"
message.gsub!(Regexp.new(search_for), full_replace_with)
end
def message_map(message)
if @config.include?('message_integration') && @config['message_integration'].respond_to?(:each_pair)
@config['message_integration'].each_pair do |pm, url|
pm_def = DiffToHtml::INTEGRATION_MAP[pm.to_sym] or next
replace_with = pm_def[:replace_with].gsub('#{url}', url)
message_replace!(message, pm_def[:search_for], replace_with)
end
end
if @config.include?('message_map') && @config['message_map'].respond_to?(:each_pair)
@config['message_map'].each_pair do |search_for, replace_with|
message_replace!(message, Regexp.new(search_for), replace_with)
end
end
message
end
end
class DiffCallback
attr_reader :tags
def initialize
@tags = []
end
def match(event)
@tags << { :action => :match, :token => event.old_element }
end
def discard_b(event)
@tags << { :action => :discard_b, :token => event.new_element }
end
def discard_a(event)
@tags << { :action => :discard_a, :token => event.old_element }
end
end
|
class Disc
VERSION = "0.0.19"
end
Version bump
class Disc
VERSION = "0.0.20"
end
|
# = distribution.rb -
# Distribution - Statistical Distributions package for Ruby
#
# Copyright (C) 2011-2014 Claudio Bustos
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# == Other Sources
#
# * Code of several Ruby engines came from statistics2.rb,
# created by Shin-ichiro HARA(sinara@blade.nagaokaut.ac.jp).
# Retrieve from http://blade.nagaokaut.ac.jp/~sinara/ruby/math/statistics2/
# * Code of Beta and Gamma distribution came from GSL project.
# Ported by John O. Woods
# Specific notices will be placed where there are appropiate
#
if !respond_to? :define_singleton_method
class Module
public :define_method
end
class Object
def define_singleton_method(name,&block)
sc=class <<self;self;end
sc.define_method(name,&block)
end
end
end
require 'distribution/math_extension'
require 'distribution/version'
# Several distributions modules to calculate pdf, cdf, inverse cdf and generate
# pseudo-random numbers for several statistical distributions
#
# == Usage:
# Distribution::Normal.cdf(1.96)
# => 0.97500210485178
# Distribution::Normal.p_value(0.95)
# => 1.64485364660836
module Distribution
module Shorthand
EQUIVALENCES={:p_value=>:p, :cdf=>:cdf, :pdf=>:pdf, :rng=>:r, :exact_pdf=>:epdf, :exact_cdf=>:ecdf, :exact_p_value=>:ep}
def self.add_shortcut(sh,m, &block)
if EQUIVALENCES.include? m.to_sym
sh_name=sh+"_#{m}"
define_method(sh_name,&block)
sh_name=sh+"_#{EQUIVALENCES[m.to_sym]}"
define_method(sh_name,&block)
end
end
end
SQ2PI = Math.sqrt(2 * Math::PI)
# Create a method 'has_<library>' on Module
# which require a library and return true or false
# according to success of failure
def self.create_has_library(library) #:nodoc:
define_singleton_method("has_#{library}?") do
cv="@@#{library}"
if !class_variable_defined? cv
begin
require library.to_s
class_variable_set(cv, true)
rescue LoadError
class_variable_set(cv, false)
end
end
class_variable_get(cv)
end
end
# Retrieves the libraries used to calculate
# distributions
def self.libraries_order
order=[:Ruby_]
order.unshift(:Statistics2_) if has_statistics2?
order.unshift(:GSL_) if has_gsl?
order.unshift(:Java_) if has_java?
order
end
create_has_library :gsl
create_has_library :statistics2
create_has_library :java
# Magic module
module Distributable #:nodoc:
# Create methods for each module and add methods to
# Distribution::Shorthand.
#
# Traverse Distribution.libraries_order adding
# methods availables for each engine module on
# the current library
#
# Kids: Metaprogramming trickery! Don't do at work.
# This section was created between a very long reunion
# and a 456 Km. travel
def create_distribution_methods()
Distribution.libraries_order.each do |l_name|
if const_defined? l_name
l =const_get(l_name)
# Add methods from engine to base base, if not yet included
l.singleton_methods.each do |m|
if !singleton_methods.include? m
define_method(m) do |*args|
l.send(m,*args)
end
# Add method to Distribution::Shorthand
sh=const_get(:SHORTHAND)
Distribution::Shorthand.add_shortcut(sh,m) do |*args|
l.send(m,*args)
end
module_function m
end
end
end
end
# create alias for common methods
alias_method :inverse_cdf, :p_value if singleton_methods.include? :p_value
end
end
def self.init_java()
$:.unshift(File.dirname(__FILE__)+"/../vendor/java")
require 'commons-math-2.2.jar'
java_import 'org.apache.commons.math.distribution.NormalDistributionImpl'
java_import 'org.apache.commons.math.distribution.PoissonDistributionImpl'
end
require 'distribution/normal'
require 'distribution/chisquare'
require 'distribution/gamma'
require 'distribution/beta'
require 'distribution/t'
require 'distribution/f'
require 'distribution/bivariatenormal'
require 'distribution/binomial'
require 'distribution/hypergeometric'
require 'distribution/exponential'
require 'distribution/poisson'
require 'distribution/logistic'
require 'distribution/lognormal'
require 'distribution/weibull'
if has_java?
init_java()
end
end
Stylistic changes to lib/distribution.rb
# = distribution.rb -
# Distribution - Statistical Distributions package for Ruby
#
# Copyright (C) 2011-2014 Claudio Bustos
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# == Other Sources
#
# * Code of several Ruby engines came from statistics2.rb,
# created by Shin-ichiro HARA(sinara@blade.nagaokaut.ac.jp).
# Retrieve from http://blade.nagaokaut.ac.jp/~sinara/ruby/math/statistics2/
# * Code of Beta and Gamma distribution came from GSL project.
# Ported by John O. Woods
# Specific notices will be placed where there are appropiate
#
require 'distribution/math_extension'
require 'distribution/version'
# Several distributions modules to calculate pdf, cdf, inverse cdf and generate
# pseudo-random numbers for several statistical distributions
#
# == Usage:
# Distribution::Normal.cdf(1.96)
# => 0.97500210485178
# Distribution::Normal.p_value(0.95)
# => 1.64485364660836
module Distribution
SQ2PI = Math.sqrt(2 * Math::PI)
module Shorthand
EQUIVALENCES = { p_value: :p, cdf: :cdf, pdf: :pdf, rng: :r,
exact_pdf: :epdf, exact_cdf: :ecdf, exact_p_value: :ep }
def self.add_shortcut(sh, m, &block)
if EQUIVALENCES.include? m.to_sym
sh_name = sh + "_#{m}"
define_method(sh_name, &block)
sh_name = sh + "_#{EQUIVALENCES[m.to_sym]}"
define_method(sh_name, &block)
end
end
end
# Create a method 'has_<library>' on Module
# which require a library and return true or false
# according to success of failure
def self.create_has_library(library) #:nodoc:
define_singleton_method("has_#{library}?") do
cv = "@@#{library}"
unless class_variable_defined? cv
begin
require library.to_s
class_variable_set(cv, true)
rescue LoadError
class_variable_set(cv, false)
end
end
class_variable_get(cv)
end
end
# Retrieves the libraries used to calculate
# distributions
def self.libraries_order
order = [:Ruby_]
order.unshift(:Statistics2_) if has_statistics2?
order.unshift(:GSL_) if has_gsl?
order.unshift(:Java_) if has_java?
order
end
create_has_library :gsl
create_has_library :statistics2
create_has_library :java
# Magic module
module Distributable #:nodoc:
# Create methods for each module and add methods to
# Distribution::Shorthand.
#
# Traverse Distribution.libraries_order adding
# methods availables for each engine module on
# the current library
#
# Kids: Metaprogramming trickery! Don't do at work.
# This section was created between a very long reunion
# and a 456 Km. travel
def create_distribution_methods
Distribution.libraries_order.each do |l_name|
if const_defined? l_name
l = const_get(l_name)
# Add methods from engine to base base, if not yet included
l.singleton_methods.each do |m|
unless singleton_methods.include? m
define_method(m) do |*args|
l.send(m, *args)
end
# Add method to Distribution::Shorthand
sh = const_get(:SHORTHAND)
Distribution::Shorthand.add_shortcut(sh, m) do |*args|
l.send(m, *args)
end
module_function m
end
end
end
end
# create alias for common methods
alias_method :inverse_cdf, :p_value if singleton_methods.include? :p_value
end
end
def self.init_java
$LOAD_PATH.unshift File.expand_path('../../vendor/java', __FILE__)
require 'commons-math-2.2.jar'
java_import 'org.apache.commons.math.distribution.NormalDistributionImpl'
java_import 'org.apache.commons.math.distribution.PoissonDistributionImpl'
end
require 'distribution/normal'
require 'distribution/chisquare'
require 'distribution/gamma'
require 'distribution/beta'
require 'distribution/t'
require 'distribution/f'
require 'distribution/bivariatenormal'
require 'distribution/binomial'
require 'distribution/hypergeometric'
require 'distribution/exponential'
require 'distribution/poisson'
require 'distribution/logistic'
require 'distribution/lognormal'
require 'distribution/weibull'
if has_java?
init_java
end
end
|
# Paperclip allows file attachments that are stored in the filesystem. All graphical
# transformations are done using the Graphics/ImageMagick command line utilities and
# are stored in Tempfiles until the record is saved. Paperclip does not require a
# separate model for storing the attachment's information, instead adding a few simple
# columns to your table.
#
# Author:: Jon Yurek
# Copyright:: Copyright (c) 2008 thoughtbot, inc.
# License:: MIT License (http://www.opensource.org/licenses/mit-license.php)
#
# Paperclip defines an attachment as any file, though it makes special considerations
# for image files. You can declare that a model has an attached file with the
# +has_attached_file+ method:
#
# class User < ActiveRecord::Base
# has_attached_file :avatar, :styles => { :thumb => "100x100" }
# end
#
# user = User.new
# user.avatar = params[:user][:avatar]
# user.avatar.url
# # => "/users/avatars/4/original_me.jpg"
# user.avatar.url(:thumb)
# # => "/users/avatars/4/thumb_me.jpg"
#
# See the +has_attached_file+ documentation for more details.
require 'tempfile'
require 'dm-paperclip/upfile'
require 'dm-paperclip/iostream'
require 'dm-paperclip/geometry'
require 'dm-paperclip/processor'
require 'dm-paperclip/thumbnail'
require 'dm-paperclip/storage'
require 'dm-paperclip/interpolations'
require 'dm-paperclip/attachment'
if defined? RAILS_ROOT
Dir.glob(File.join(File.expand_path(RAILS_ROOT), "lib", "paperclip_processors", "*.rb")).each do |processor|
require processor
end
end
# Only include validations if dm-validations is loaded
require 'dm-paperclip/validations' unless defined?(DataMapper::Validate).nil?
# The base module that gets included in ActiveRecord::Base. See the
# documentation for Paperclip::ClassMethods for more useful information.
module Paperclip
VERSION = "2.2.9.1"
class << self
# Provides configurability to Paperclip. There are a number of options available, such as:
# * whiny: Will raise an error if Paperclip cannot process thumbnails of
# an uploaded image. Defaults to true.
# * log: Logs progress to the Rails log. Uses ActiveRecord's logger, so honors
# log levels, etc. Defaults to true.
# * command_path: Defines the path at which to find the command line
# programs if they are not visible to Rails the system's search path. Defaults to
# nil, which uses the first executable found in the user's search path.
# * image_magick_path: Deprecated alias of command_path.
def options
@options ||= {
:whiny => true,
:image_magick_path => nil,
:command_path => nil,
:log => true,
:log_command => false,
:swallow_stderr => true
}
end
def path_for_command command #:nodoc:
if options[:image_magick_path]
warn("[DEPRECATION] :image_magick_path is deprecated and will be removed. Use :command_path instead")
end
path = [options[:command_path] || options[:image_magick_path], command].compact
File.join(*path)
end
def interpolates key, &block
Paperclip::Interpolations[key] = block
end
# The run method takes a command to execute and a string of parameters
# that get passed to it. The command is prefixed with the :command_path
# option from Paperclip.options. If you have many commands to run and
# they are in different paths, the suggested course of action is to
# symlink them so they are all in the same directory.
#
# If the command returns with a result code that is not one of the
# expected_outcodes, a PaperclipCommandLineError will be raised. Generally
# a code of 0 is expected, but a list of codes may be passed if necessary.
#
# This method can log the command being run when
# Paperclip.options[:log_command] is set to true (defaults to false). This
# will only log if logging in general is set to true as well.
def run cmd, params = "", expected_outcodes = 0
command = %Q<#{%Q[#{path_for_command(cmd)} #{params}].gsub(/\s+/, " ")}>
command = "#{command} 2>#{bit_bucket}" if Paperclip.options[:swallow_stderr]
Paperclip.log(command) if Paperclip.options[:log_command]
output = `#{command}`
unless [expected_outcodes].flatten.include?($?.exitstatus)
raise PaperclipCommandLineError, "Error while running #{cmd}"
end
output
end
def bit_bucket #:nodoc:
File.exists?("/dev/null") ? "/dev/null" : "NUL"
end
def included base #:nodoc:
base.extend ClassMethods
unless base.respond_to?(:define_callbacks)
base.send(:include, Paperclip::CallbackCompatability)
end
end
def processor name #:nodoc:
name = name.to_s.camel_case
processor = Paperclip.const_get(name)
unless processor.ancestors.include?(Paperclip::Processor)
raise PaperclipError.new("Processor #{name} was not found")
end
processor
end
# Log a paperclip-specific line. Uses ActiveRecord::Base.logger
# by default. Set Paperclip.options[:log] to false to turn off.
def log message
logger.info("[paperclip] #{message}") if logging?
end
def logger #:nodoc:
DataMapper.logger
end
def logging? #:nodoc:
options[:log]
end
end
class PaperclipError < StandardError #:nodoc:
end
class PaperclipCommandLineError < StandardError #:nodoc:
end
class NotIdentifiedByImageMagickError < PaperclipError #:nodoc:
end
class InfiniteInterpolationError < PaperclipError #:nodoc:
end
module Resource
def self.included(base)
base.class_eval <<-RUBY, __FILE__, __LINE__ + 1
class_variable_set(:@@attachment_definitions,nil) unless class_variable_defined?(:@@attachment_definitions)
def self.attachment_definitions
@@attachment_definitions
end
def self.attachment_definitions=(obj)
@@attachment_definitions = obj
end
RUBY
base.extend Paperclip::ClassMethods
end
end
module ClassMethods
# +has_attached_file+ gives the class it is called on an attribute that maps to a file. This
# is typically a file stored somewhere on the filesystem and has been uploaded by a user.
# The attribute returns a Paperclip::Attachment object which handles the management of
# that file. The intent is to make the attachment as much like a normal attribute. The
# thumbnails will be created when the new file is assigned, but they will *not* be saved
# until +save+ is called on the record. Likewise, if the attribute is set to +nil+ is
# called on it, the attachment will *not* be deleted until +save+ is called. See the
# Paperclip::Attachment documentation for more specifics. There are a number of options
# you can set to change the behavior of a Paperclip attachment:
# * +url+: The full URL of where the attachment is publically accessible. This can just
# as easily point to a directory served directly through Apache as it can to an action
# that can control permissions. You can specify the full domain and path, but usually
# just an absolute path is sufficient. The leading slash must be included manually for
# absolute paths. The default value is "/:class/:attachment/:id/:style_:filename". See
# Paperclip::Attachment#interpolate for more information on variable interpolaton.
# :url => "/:attachment/:id/:style_:basename:extension"
# :url => "http://some.other.host/stuff/:class/:id_:extension"
# * +default_url+: The URL that will be returned if there is no attachment assigned.
# This field is interpolated just as the url is. The default value is
# "/:class/:attachment/missing_:style.png"
# has_attached_file :avatar, :default_url => "/images/default_:style_avatar.png"
# User.new.avatar_url(:small) # => "/images/default_small_avatar.png"
# * +styles+: A hash of thumbnail styles and their geometries. You can find more about
# geometry strings at the ImageMagick website
# (http://www.imagemagick.org/script/command-line-options.php#resize). Paperclip
# also adds the "#" option (e.g. "50x50#"), which will resize the image to fit maximally
# inside the dimensions and then crop the rest off (weighted at the center). The
# default value is to generate no thumbnails.
# * +default_style+: The thumbnail style that will be used by default URLs.
# Defaults to +original+.
# has_attached_file :avatar, :styles => { :normal => "100x100#" },
# :default_style => :normal
# user.avatar.url # => "/avatars/23/normal_me.png"
# * +whiny_thumbnails+: Will raise an error if Paperclip cannot process thumbnails of an
# uploaded image. This will ovrride the global setting for this attachment.
# Defaults to true.
# * +convert_options+: When creating thumbnails, use this free-form options
# field to pass in various convert command options. Typical options are "-strip" to
# remove all Exif data from the image (save space for thumbnails and avatars) or
# "-depth 8" to specify the bit depth of the resulting conversion. See ImageMagick
# convert documentation for more options: (http://www.imagemagick.org/script/convert.php)
# Note that this option takes a hash of options, each of which correspond to the style
# of thumbnail being generated. You can also specify :all as a key, which will apply
# to all of the thumbnails being generated. If you specify options for the :original,
# it would be best if you did not specify destructive options, as the intent of keeping
# the original around is to regenerate all the thumbnails then requirements change.
# has_attached_file :avatar, :styles => { :large => "300x300", :negative => "100x100" }
# :convert_options => {
# :all => "-strip",
# :negative => "-negate"
# }
# * +storage+: Chooses the storage backend where the files will be stored. The current
# choices are :filesystem and :s3. The default is :filesystem. Make sure you read the
# documentation for Paperclip::Storage::Filesystem and Paperclip::Storage::S3
# for backend-specific options.
def has_attached_file name, options = {}
include InstanceMethods
self.attachment_definitions = {} if self.attachment_definitions.nil?
self.attachment_definitions[name] = {:validations => []}.merge(options)
property_options = options.delete_if { |k,v| ![ :public, :protected, :private, :accessor, :reader, :writer ].include?(key) }
property :"#{name}_file_name", String, property_options
property :"#{name}_content_type", String, property_options
property :"#{name}_file_size", Integer, property_options
property :"#{name}_updated_at", DateTime, property_options
after :save, :save_attached_files
before :destroy, :destroy_attached_files
# not needed with extlib just do before :post_process, or after :post_process
# define_callbacks :before_post_process, :after_post_process
# define_callbacks :"before_#{name}_post_process", :"after_#{name}_post_process"
define_method name do |*args|
a = attachment_for(name)
(args.length > 0) ? a.to_s(args.first) : a
end
define_method "#{name}=" do |file|
attachment_for(name).assign(file)
end
define_method "#{name}?" do
! attachment_for(name).original_filename.blank?
end
unless defined?(DataMapper::Validate).nil?
add_validator_to_context(opts_from_validator_args([name]), [name], Paperclip::Validate::CopyAttachmentErrors)
end
end
unless defined?(DataMapper::Validate).nil?
# Places ActiveRecord-style validations on the size of the file assigned. The
# possible options are:
# * +in+: a Range of bytes (i.e. +1..1.megabyte+),
# * +less_than+: equivalent to :in => 0..options[:less_than]
# * +greater_than+: equivalent to :in => options[:greater_than]..Infinity
# * +message+: error message to display, use :min and :max as replacements
def validates_attachment_size(*fields)
opts = opts_from_validator_args(fields)
add_validator_to_context(opts, fields, Paperclip::Validate::SizeValidator)
end
# Adds errors if thumbnail creation fails. The same as specifying :whiny_thumbnails => true.
def validates_attachment_thumbnails name, options = {}
self.attachment_definitions[name][:whiny_thumbnails] = true
end
# Places ActiveRecord-style validations on the presence of a file.
def validates_attachment_presence(*fields)
opts = opts_from_validator_args(fields)
add_validator_to_context(opts, fields, Paperclip::Validate::RequiredFieldValidator)
end
# Places ActiveRecord-style validations on the content type of the file assigned. The
# possible options are:
# * +content_type+: Allowed content types. Can be a single content type or an array. Allows all by default.
# * +message+: The message to display when the uploaded file has an invalid content type.
def validates_attachment_content_type(*fields)
opts = opts_from_validator_args(fields)
add_validator_to_context(opts, fields, Paperclip::Validate::ContentTypeValidator)
end
end
# Returns the attachment definitions defined by each call to
# has_attached_file.
def attachment_definitions
read_inheritable_attribute(:attachment_definitions)
end
end
module InstanceMethods #:nodoc:
def attachment_for name
@attachments ||= {}
@attachments[name] ||= Attachment.new(name, self, self.class.attachment_definitions[name])
end
def each_attachment
self.class.attachment_definitions.each do |name, definition|
yield(name, attachment_for(name))
end
end
def save_attached_files
logger.info("[paperclip] Saving attachments.")
each_attachment do |name, attachment|
attachment.send(:save)
end
end
def destroy_attached_files
logger.info("[paperclip] Deleting attachments.")
each_attachment do |name, attachment|
attachment.send(:queue_existing_for_delete)
attachment.send(:flush_deletes)
end
end
end
end
fixed log statements
# Paperclip allows file attachments that are stored in the filesystem. All graphical
# transformations are done using the Graphics/ImageMagick command line utilities and
# are stored in Tempfiles until the record is saved. Paperclip does not require a
# separate model for storing the attachment's information, instead adding a few simple
# columns to your table.
#
# Author:: Jon Yurek
# Copyright:: Copyright (c) 2008 thoughtbot, inc.
# License:: MIT License (http://www.opensource.org/licenses/mit-license.php)
#
# Paperclip defines an attachment as any file, though it makes special considerations
# for image files. You can declare that a model has an attached file with the
# +has_attached_file+ method:
#
# class User < ActiveRecord::Base
# has_attached_file :avatar, :styles => { :thumb => "100x100" }
# end
#
# user = User.new
# user.avatar = params[:user][:avatar]
# user.avatar.url
# # => "/users/avatars/4/original_me.jpg"
# user.avatar.url(:thumb)
# # => "/users/avatars/4/thumb_me.jpg"
#
# See the +has_attached_file+ documentation for more details.
require 'tempfile'
require 'dm-paperclip/upfile'
require 'dm-paperclip/iostream'
require 'dm-paperclip/geometry'
require 'dm-paperclip/processor'
require 'dm-paperclip/thumbnail'
require 'dm-paperclip/storage'
require 'dm-paperclip/interpolations'
require 'dm-paperclip/attachment'
if defined? RAILS_ROOT
Dir.glob(File.join(File.expand_path(RAILS_ROOT), "lib", "paperclip_processors", "*.rb")).each do |processor|
require processor
end
end
# Only include validations if dm-validations is loaded
require 'dm-paperclip/validations' unless defined?(DataMapper::Validate).nil?
# The base module that gets included in ActiveRecord::Base. See the
# documentation for Paperclip::ClassMethods for more useful information.
module Paperclip
VERSION = "2.2.9.1"
class << self
# Provides configurability to Paperclip. There are a number of options available, such as:
# * whiny: Will raise an error if Paperclip cannot process thumbnails of
# an uploaded image. Defaults to true.
# * log: Logs progress to the Rails log. Uses ActiveRecord's logger, so honors
# log levels, etc. Defaults to true.
# * command_path: Defines the path at which to find the command line
# programs if they are not visible to Rails the system's search path. Defaults to
# nil, which uses the first executable found in the user's search path.
# * image_magick_path: Deprecated alias of command_path.
def options
@options ||= {
:whiny => true,
:image_magick_path => nil,
:command_path => nil,
:log => true,
:log_command => false,
:swallow_stderr => true
}
end
def path_for_command command #:nodoc:
if options[:image_magick_path]
warn("[DEPRECATION] :image_magick_path is deprecated and will be removed. Use :command_path instead")
end
path = [options[:command_path] || options[:image_magick_path], command].compact
File.join(*path)
end
def interpolates key, &block
Paperclip::Interpolations[key] = block
end
# The run method takes a command to execute and a string of parameters
# that get passed to it. The command is prefixed with the :command_path
# option from Paperclip.options. If you have many commands to run and
# they are in different paths, the suggested course of action is to
# symlink them so they are all in the same directory.
#
# If the command returns with a result code that is not one of the
# expected_outcodes, a PaperclipCommandLineError will be raised. Generally
# a code of 0 is expected, but a list of codes may be passed if necessary.
#
# This method can log the command being run when
# Paperclip.options[:log_command] is set to true (defaults to false). This
# will only log if logging in general is set to true as well.
def run cmd, params = "", expected_outcodes = 0
command = %Q<#{%Q[#{path_for_command(cmd)} #{params}].gsub(/\s+/, " ")}>
command = "#{command} 2>#{bit_bucket}" if Paperclip.options[:swallow_stderr]
Paperclip.log(command) if Paperclip.options[:log_command]
output = `#{command}`
unless [expected_outcodes].flatten.include?($?.exitstatus)
raise PaperclipCommandLineError, "Error while running #{cmd}"
end
output
end
def bit_bucket #:nodoc:
File.exists?("/dev/null") ? "/dev/null" : "NUL"
end
def included base #:nodoc:
base.extend ClassMethods
unless base.respond_to?(:define_callbacks)
base.send(:include, Paperclip::CallbackCompatability)
end
end
def processor name #:nodoc:
name = name.to_s.camel_case
processor = Paperclip.const_get(name)
unless processor.ancestors.include?(Paperclip::Processor)
raise PaperclipError.new("Processor #{name} was not found")
end
processor
end
# Log a paperclip-specific line. Uses ActiveRecord::Base.logger
# by default. Set Paperclip.options[:log] to false to turn off.
def log message
logger.info("[paperclip] #{message}") if logging?
end
def logger #:nodoc:
DataMapper.logger
end
def logging? #:nodoc:
options[:log]
end
end
class PaperclipError < StandardError #:nodoc:
end
class PaperclipCommandLineError < StandardError #:nodoc:
end
class NotIdentifiedByImageMagickError < PaperclipError #:nodoc:
end
class InfiniteInterpolationError < PaperclipError #:nodoc:
end
module Resource
def self.included(base)
base.class_eval <<-RUBY, __FILE__, __LINE__ + 1
class_variable_set(:@@attachment_definitions,nil) unless class_variable_defined?(:@@attachment_definitions)
def self.attachment_definitions
@@attachment_definitions
end
def self.attachment_definitions=(obj)
@@attachment_definitions = obj
end
RUBY
base.extend Paperclip::ClassMethods
end
end
module ClassMethods
# +has_attached_file+ gives the class it is called on an attribute that maps to a file. This
# is typically a file stored somewhere on the filesystem and has been uploaded by a user.
# The attribute returns a Paperclip::Attachment object which handles the management of
# that file. The intent is to make the attachment as much like a normal attribute. The
# thumbnails will be created when the new file is assigned, but they will *not* be saved
# until +save+ is called on the record. Likewise, if the attribute is set to +nil+ is
# called on it, the attachment will *not* be deleted until +save+ is called. See the
# Paperclip::Attachment documentation for more specifics. There are a number of options
# you can set to change the behavior of a Paperclip attachment:
# * +url+: The full URL of where the attachment is publically accessible. This can just
# as easily point to a directory served directly through Apache as it can to an action
# that can control permissions. You can specify the full domain and path, but usually
# just an absolute path is sufficient. The leading slash must be included manually for
# absolute paths. The default value is "/:class/:attachment/:id/:style_:filename". See
# Paperclip::Attachment#interpolate for more information on variable interpolaton.
# :url => "/:attachment/:id/:style_:basename:extension"
# :url => "http://some.other.host/stuff/:class/:id_:extension"
# * +default_url+: The URL that will be returned if there is no attachment assigned.
# This field is interpolated just as the url is. The default value is
# "/:class/:attachment/missing_:style.png"
# has_attached_file :avatar, :default_url => "/images/default_:style_avatar.png"
# User.new.avatar_url(:small) # => "/images/default_small_avatar.png"
# * +styles+: A hash of thumbnail styles and their geometries. You can find more about
# geometry strings at the ImageMagick website
# (http://www.imagemagick.org/script/command-line-options.php#resize). Paperclip
# also adds the "#" option (e.g. "50x50#"), which will resize the image to fit maximally
# inside the dimensions and then crop the rest off (weighted at the center). The
# default value is to generate no thumbnails.
# * +default_style+: The thumbnail style that will be used by default URLs.
# Defaults to +original+.
# has_attached_file :avatar, :styles => { :normal => "100x100#" },
# :default_style => :normal
# user.avatar.url # => "/avatars/23/normal_me.png"
# * +whiny_thumbnails+: Will raise an error if Paperclip cannot process thumbnails of an
# uploaded image. This will ovrride the global setting for this attachment.
# Defaults to true.
# * +convert_options+: When creating thumbnails, use this free-form options
# field to pass in various convert command options. Typical options are "-strip" to
# remove all Exif data from the image (save space for thumbnails and avatars) or
# "-depth 8" to specify the bit depth of the resulting conversion. See ImageMagick
# convert documentation for more options: (http://www.imagemagick.org/script/convert.php)
# Note that this option takes a hash of options, each of which correspond to the style
# of thumbnail being generated. You can also specify :all as a key, which will apply
# to all of the thumbnails being generated. If you specify options for the :original,
# it would be best if you did not specify destructive options, as the intent of keeping
# the original around is to regenerate all the thumbnails then requirements change.
# has_attached_file :avatar, :styles => { :large => "300x300", :negative => "100x100" }
# :convert_options => {
# :all => "-strip",
# :negative => "-negate"
# }
# * +storage+: Chooses the storage backend where the files will be stored. The current
# choices are :filesystem and :s3. The default is :filesystem. Make sure you read the
# documentation for Paperclip::Storage::Filesystem and Paperclip::Storage::S3
# for backend-specific options.
def has_attached_file name, options = {}
include InstanceMethods
self.attachment_definitions = {} if self.attachment_definitions.nil?
self.attachment_definitions[name] = {:validations => []}.merge(options)
property_options = options.delete_if { |k,v| ![ :public, :protected, :private, :accessor, :reader, :writer ].include?(key) }
property :"#{name}_file_name", String, property_options
property :"#{name}_content_type", String, property_options
property :"#{name}_file_size", Integer, property_options
property :"#{name}_updated_at", DateTime, property_options
after :save, :save_attached_files
before :destroy, :destroy_attached_files
# not needed with extlib just do before :post_process, or after :post_process
# define_callbacks :before_post_process, :after_post_process
# define_callbacks :"before_#{name}_post_process", :"after_#{name}_post_process"
define_method name do |*args|
a = attachment_for(name)
(args.length > 0) ? a.to_s(args.first) : a
end
define_method "#{name}=" do |file|
attachment_for(name).assign(file)
end
define_method "#{name}?" do
! attachment_for(name).original_filename.blank?
end
unless defined?(DataMapper::Validate).nil?
add_validator_to_context(opts_from_validator_args([name]), [name], Paperclip::Validate::CopyAttachmentErrors)
end
end
unless defined?(DataMapper::Validate).nil?
# Places ActiveRecord-style validations on the size of the file assigned. The
# possible options are:
# * +in+: a Range of bytes (i.e. +1..1.megabyte+),
# * +less_than+: equivalent to :in => 0..options[:less_than]
# * +greater_than+: equivalent to :in => options[:greater_than]..Infinity
# * +message+: error message to display, use :min and :max as replacements
def validates_attachment_size(*fields)
opts = opts_from_validator_args(fields)
add_validator_to_context(opts, fields, Paperclip::Validate::SizeValidator)
end
# Adds errors if thumbnail creation fails. The same as specifying :whiny_thumbnails => true.
def validates_attachment_thumbnails name, options = {}
self.attachment_definitions[name][:whiny_thumbnails] = true
end
# Places ActiveRecord-style validations on the presence of a file.
def validates_attachment_presence(*fields)
opts = opts_from_validator_args(fields)
add_validator_to_context(opts, fields, Paperclip::Validate::RequiredFieldValidator)
end
# Places ActiveRecord-style validations on the content type of the file assigned. The
# possible options are:
# * +content_type+: Allowed content types. Can be a single content type or an array. Allows all by default.
# * +message+: The message to display when the uploaded file has an invalid content type.
def validates_attachment_content_type(*fields)
opts = opts_from_validator_args(fields)
add_validator_to_context(opts, fields, Paperclip::Validate::ContentTypeValidator)
end
end
# Returns the attachment definitions defined by each call to
# has_attached_file.
def attachment_definitions
read_inheritable_attribute(:attachment_definitions)
end
end
module InstanceMethods #:nodoc:
def attachment_for name
@attachments ||= {}
@attachments[name] ||= Attachment.new(name, self, self.class.attachment_definitions[name])
end
def each_attachment
self.class.attachment_definitions.each do |name, definition|
yield(name, attachment_for(name))
end
end
def save_attached_files
Paperclip.log("Saving attachments.")
each_attachment do |name, attachment|
attachment.send(:save)
end
end
def destroy_attached_files
Paperclip.log("Deleting attachments.")
each_attachment do |name, attachment|
attachment.send(:queue_existing_for_delete)
attachment.send(:flush_deletes)
end
end
end
end |
module Dnsync
class Nsone
def initialize(api_key, domain)
unless api_key.present?
raise ArgumentError, "api_key must be specified"
end
unless domain.present?
raise ArgumentError, "domain must be specified"
end
@api_key = api_key
@domain = domain
end
def connection
@connection ||= Faraday.new('https://api.nsone.net/v1/') do |conn|
conn.request :json
# conn.response :logger
conn.response :raise_error
conn.response :json, :content_type => /\bjson$/
conn.adapter Faraday.default_adapter
conn.headers['X-NSONE-Key'] = @api_key
conn.options.timeout = 5
conn.options.open_timeout = 5
end
end
def zone
zone = connection.get("zones/#{@domain}").body
records = zone['records'].map do |record|
record_for(record['domain'], record['type'])
end
Zone.new(@domain, records)
end
def record_for(fqdn, record_type)
record = connection.get("zones/#{@domain}/#{fqdn}/#{record_type}").body
answers = record['answers'].map do |answer_record|
case answer_record['answer'].length
when 2
priority, content = *answer_record['answer']
when 1
content = answer_record['answer'].first
else
raise "Unknown answer format: #{answer_record.inspect}"
end
Answer.new(content, priority)
end
Record.new(record['domain'], record['type'], record['ttl'], answers)
rescue Faraday::ClientError => ex
if ex.response[:status].to_i == 429
sleep 0.4 + rand
retry
else
raise
end
end
def create_record(record)
answers = record.answers.map do |answer|
if answer.priority
{ :answer => [ answer.priority, answer.content ] }
else
{ :answer => [ answer.content ] }
end
end
connection.put("zones/#{@domain}/#{record.name}/#{record.type}") do |req|
req.body = {
:type => record.type,
:zone => @domain,
:domain => record.name,
:ttl => record.ttl,
:answers => answers
}
end
end
def update_record(record)
answers = record.answers.map do |answer|
if answer.priority
{ :answer => [ answer.priority, answer.content ] }
else
{ :answer => [ answer.content ] }
end
end
connection.post("zones/#{@domain}/#{record.name}/#{record.type}") do |req|
req.body = {
:type => record.type,
:zone => @domain,
:domain => record.name,
:ttl => record.ttl,
:answers => answers
}
end
end
def remove_record(record)
connection.delete("zones/#{@domain}/#{record.name}/#{record.type}")
end
end
end
Fix problem with exception handling
module Dnsync
class Nsone
def initialize(api_key, domain)
unless api_key.present?
raise ArgumentError, "api_key must be specified"
end
unless domain.present?
raise ArgumentError, "domain must be specified"
end
@api_key = api_key
@domain = domain
end
def connection
@connection ||= Faraday.new('https://api.nsone.net/v1/') do |conn|
conn.request :json
# conn.response :logger
conn.response :raise_error
conn.response :json, :content_type => /\bjson$/
conn.adapter Faraday.default_adapter
conn.headers['X-NSONE-Key'] = @api_key
conn.options.timeout = 5
conn.options.open_timeout = 5
end
end
def zone
zone = connection.get("zones/#{@domain}").body
records = zone['records'].map do |record|
record_for(record['domain'], record['type'])
end
Zone.new(@domain, records)
end
def record_for(fqdn, record_type)
record = connection.get("zones/#{@domain}/#{fqdn}/#{record_type}").body
answers = record['answers'].map do |answer_record|
case answer_record['answer'].length
when 2
priority, content = *answer_record['answer']
when 1
content = answer_record['answer'].first
else
raise "Unknown answer format: #{answer_record.inspect}"
end
Answer.new(content, priority)
end
Record.new(record['domain'], record['type'], record['ttl'], answers)
rescue Faraday::ClientError => ex
if ex.response && ex.response[:status].to_i == 429
sleep 0.4 + rand
retry
else
raise
end
end
def create_record(record)
answers = record.answers.map do |answer|
if answer.priority
{ :answer => [ answer.priority, answer.content ] }
else
{ :answer => [ answer.content ] }
end
end
connection.put("zones/#{@domain}/#{record.name}/#{record.type}") do |req|
req.body = {
:type => record.type,
:zone => @domain,
:domain => record.name,
:ttl => record.ttl,
:answers => answers
}
end
end
def update_record(record)
answers = record.answers.map do |answer|
if answer.priority
{ :answer => [ answer.priority, answer.content ] }
else
{ :answer => [ answer.content ] }
end
end
connection.post("zones/#{@domain}/#{record.name}/#{record.type}") do |req|
req.body = {
:type => record.type,
:zone => @domain,
:domain => record.name,
:ttl => record.ttl,
:answers => answers
}
end
end
def remove_record(record)
connection.delete("zones/#{@domain}/#{record.name}/#{record.type}")
end
end
end |
class Dslh
VERSION = '0.2.5'
end
Update version
class Dslh
VERSION = '0.2.6'
end
|
require "spec_helper"
module Subjoin
class ExampleResource < Resource
include Inheritable
ROOT_URI="http://example.com"
end
class NonStandardUri < ExampleResource
TYPE_PATH="nonstandard"
end
class ExampleArticle < ExampleResource
TYPE_PATH="articles"
end
end
describe Subjoin::Inheritable do
before :each do
allow_any_instance_of(Faraday::Connection).
to receive(:get).and_return(double(Faraday::Response, :body => ARTICLE))
end
it "has a root uri" do
expect(Subjoin::ExampleResource::ROOT_URI).to eq "http://example.com"
end
describe "#type_url" do
it "is a class method" do
expect(Subjoin::ExampleResource::type_url).to eq URI("http://example.com/exampleresource")
end
end
end
describe Subjoin::Document do
describe "#new" do
context "with a single string parameter" do
it "maps derived types" do
expect_any_instance_of(Faraday::Connection)
.to receive(:get).with(URI("http://example.com/articles"), {})
.and_return(double(Faraday::Response, :body => ARTICLE))
Subjoin::Document.new("articles")
end
end
context "with two string parameters" do
it "maps derived types with the second string as an id" do
expect_any_instance_of(Faraday::Connection)
.to receive(:get).with(URI("http://example.com/articles/2"), {})
.and_return(double(Faraday::Response, :body => ARTICLE))
Subjoin::Document.new("articles", "2")
end
end
end
end
Where I was when my computer crashed
require "spec_helper"
module Subjoin
class ExampleResource < Resource
include Inheritable
ROOT_URI="http://example.com"
end
class NonStandardUri < ExampleResource
TYPE_PATH="nonstandard"
end
class ExampleArticle < ExampleResource
TYPE_PATH="articles"
end
end
describe Subjoin::Inheritable do
before :each do
allow_any_instance_of(Faraday::Connection).
to receive(:get).and_return(double(Faraday::Response, :body => ARTICLE))
end
it "has a root uri" do
expect(Subjoin::ExampleResource::ROOT_URI).to eq "http://example.com"
end
describe "#type_url" do
it "is a class method" do
expect(Subjoin::ExampleResource::type_url).to eq URI("http://example.com/exampleresource")
end
end
end
describe Subjoin::Document do
describe "#new" do
context "with a single string parameter" do
it "maps derived types" do
expect_any_instance_of(Faraday::Connection)
.to receive(:get).with(URI("http://example.com/articles"), {})
.and_return(double(Faraday::Response, :body => ARTICLE))
Subjoin::Document.new("articles")
end
it "returns objects of the right class" do
expect_any_instance_of(Faraday::Connection).
to receive(:get).
and_return(double(Faraday::Response, :body => COMPOUND))
expect(Subjoin::Document.new("articles").data.first).
to be_an_instance_of Fixnum
end
end
context "with two string parameters" do
it "maps derived types with the second string as an id" do
expect_any_instance_of(Faraday::Connection)
.to receive(:get).with(URI("http://example.com/articles/2"), {})
.and_return(double(Faraday::Response, :body => ARTICLE))
Subjoin::Document.new("articles", "2")
end
end
end
end
|
require "spec_helper"
module Vault
describe Auth do
subject { vault_test_client }
describe "#token" do
before do
subject.token = nil
end
it "verifies the token and saves it on the client" do
token = RSpec::VaultServer.token
subject.auth.token(token)
expect(subject.token).to eq(token)
end
it "raises an error if the token is invalid" do
expect {
expect {
subject.auth.token("nope-not-real")
}.to raise_error(HTTPError)
}.to_not change(subject, :token)
end
end
describe "#app_id" do
before(:context) do
@app_id = "aeece56e-3f9b-40c3-8f85-781d3e9a8f68"
@user_id = "3b87be76-95cf-493a-a61b-7d5fc70870ad"
vault_test_client.sys.enable_auth("app-id", "app-id", nil)
vault_test_client.logical.write("auth/app-id/map/app-id/#{@app_id}", { value: "default" })
vault_test_client.logical.write("auth/app-id/map/user-id/#{@user_id}", { value: @app_id })
vault_test_client.sys.enable_auth("new-app-id", "app-id", nil)
vault_test_client.logical.write("auth/new-app-id/map/app-id/#{@app_id}", { value: "default" })
vault_test_client.logical.write("auth/new-app-id/map/user-id/#{@user_id}", { value: @app_id })
end
before do
subject.token = nil
end
it "authenticates and saves the token on the client" do
result = subject.auth.app_id(@app_id, @user_id)
expect(subject.token).to eq(result.auth.client_token)
end
it "authenticates with custom options" do
result = subject.auth.app_id(@app_id, @user_id, mount: "new-app-id")
expect(subject.token).to eq(result.auth.client_token)
end
it "raises an error if the authentication is bad" do
expect {
expect {
subject.auth.app_id("nope", "bad")
}.to raise_error(HTTPError)
}.to_not change(subject, :token)
end
end
describe "#approle", vault: ">= 0.6.1" do
before(:context) do
@approle = "sample-role-name"
vault_test_client.sys.enable_auth("approle", "approle", nil)
end
after(:context) do
vault_test_client.sys.disable_auth("approle")
end
before do
subject.token = nil
end
context "when approle has default settings" do
before(:context) do
vault_test_client.approle.set_role(@approle)
@role_id = vault_test_client.approle.role_id(@approle)
@secret_id = vault_test_client.approle.create_secret_id(@approle).data[:secret_id]
end
after(:context) do
vault_test_client.approle.delete_role(@approle)
end
it "authenticates and saves the token on the client" do
result = subject.auth.approle(@role_id, @secret_id)
expect(subject.token).to eq(result.auth.client_token)
end
it "raises an error if the authentication is bad" do
expect {
expect {
subject.auth.approle("nope", "bad")
}.to raise_error(HTTPError)
}.to_not change(subject, :token)
end
end
context "when approle has 'bind_secret_id' disabled" do
before(:context) do
opts = {
bind_secret_id: false,
bound_cidr_list: "127.0.0.1/32"
}
vault_test_client.approle.set_role(@approle, opts)
@role_id = vault_test_client.approle.role_id(@approle)
end
after(:context) do
vault_test_client.approle.delete_role(@approle)
end
it "authenticates w/o secret_id and saves the token on the client" do
result = subject.auth.approle(@role_id)
expect(subject.token).to eq(result.auth.client_token)
end
end
end
describe "#userpass" do
before(:context) do
@username = "sethvargo"
@password = "s3kr3t"
vault_test_client.sys.enable_auth("userpass", "userpass", nil)
vault_test_client.logical.write("auth/userpass/users/#{@username}", { password: @password, policies: "default" })
vault_test_client.sys.enable_auth("new-userpass", "userpass", nil)
vault_test_client.logical.write("auth/new-userpass/users/#{@username}", { password: @password, policies: "default" })
end
before do
subject.token = nil
end
it "authenticates and saves the token on the client" do
result = subject.auth.userpass(@username, @password)
expect(subject.token).to eq(result.auth.client_token)
end
it "authenticates with custom options" do
result = subject.auth.userpass(@username, @password, mount: "new-userpass")
expect(subject.token).to eq(result.auth.client_token)
end
it "raises an error if the authentication is bad" do
expect {
expect {
subject.auth.userpass("nope", "bad")
}.to raise_error(HTTPError)
}.to_not change(subject, :token)
end
end
describe "#tls" do
before(:context) do
vault_test_client.sys.enable_auth("cert", "cert", nil)
end
after(:context) do
vault_test_client.sys.disable_auth("cert")
end
let!(:old_token) { subject.token }
let(:certificate) do
{
display_name: "sample-cert",
certificate: RSpec::SampleCertificate.cert,
policies: "default",
ttl: 3600,
}
end
let(:auth_cert) { RSpec::SampleCertificate.cert << RSpec::SampleCertificate.key }
after do
subject.token = old_token
end
it "authenticates and saves the token on the client" do
pending "dev server does not support tls"
subject.auth_tls.set_certificate("kaelumania", certificate)
result = subject.auth.tls(auth_cert)
expect(subject.token).to eq(result.auth.client_token)
end
it "authenticates with default ssl_pem_file" do
pending "dev server does not support tls"
subject.auth_tls.set_certificate("kaelumania", certificate)
subject.ssl_pem_file = auth_cert
result = subject.auth.tls
expect(subject.token).to eq(result.auth.client_token)
end
it "raises an error if the authentication is bad" do
subject.sys.disable_auth("cert")
expect {
expect {
subject.auth.tls(auth_cert)
}.to raise_error(HTTPError)
}.to_not change { subject.token }
end
end
end
end
Do not run cert tests on Vault < 0.6.2
require "spec_helper"
module Vault
describe Auth do
subject { vault_test_client }
describe "#token" do
before do
subject.token = nil
end
it "verifies the token and saves it on the client" do
token = RSpec::VaultServer.token
subject.auth.token(token)
expect(subject.token).to eq(token)
end
it "raises an error if the token is invalid" do
expect {
expect {
subject.auth.token("nope-not-real")
}.to raise_error(HTTPError)
}.to_not change(subject, :token)
end
end
describe "#app_id" do
before(:context) do
@app_id = "aeece56e-3f9b-40c3-8f85-781d3e9a8f68"
@user_id = "3b87be76-95cf-493a-a61b-7d5fc70870ad"
vault_test_client.sys.enable_auth("app-id", "app-id", nil)
vault_test_client.logical.write("auth/app-id/map/app-id/#{@app_id}", { value: "default" })
vault_test_client.logical.write("auth/app-id/map/user-id/#{@user_id}", { value: @app_id })
vault_test_client.sys.enable_auth("new-app-id", "app-id", nil)
vault_test_client.logical.write("auth/new-app-id/map/app-id/#{@app_id}", { value: "default" })
vault_test_client.logical.write("auth/new-app-id/map/user-id/#{@user_id}", { value: @app_id })
end
before do
subject.token = nil
end
it "authenticates and saves the token on the client" do
result = subject.auth.app_id(@app_id, @user_id)
expect(subject.token).to eq(result.auth.client_token)
end
it "authenticates with custom options" do
result = subject.auth.app_id(@app_id, @user_id, mount: "new-app-id")
expect(subject.token).to eq(result.auth.client_token)
end
it "raises an error if the authentication is bad" do
expect {
expect {
subject.auth.app_id("nope", "bad")
}.to raise_error(HTTPError)
}.to_not change(subject, :token)
end
end
describe "#approle", vault: ">= 0.6.1" do
before(:context) do
@approle = "sample-role-name"
vault_test_client.sys.enable_auth("approle", "approle", nil)
end
after(:context) do
vault_test_client.sys.disable_auth("approle")
end
before do
subject.token = nil
end
context "when approle has default settings" do
before(:context) do
vault_test_client.approle.set_role(@approle)
@role_id = vault_test_client.approle.role_id(@approle)
@secret_id = vault_test_client.approle.create_secret_id(@approle).data[:secret_id]
end
after(:context) do
vault_test_client.approle.delete_role(@approle)
end
it "authenticates and saves the token on the client" do
result = subject.auth.approle(@role_id, @secret_id)
expect(subject.token).to eq(result.auth.client_token)
end
it "raises an error if the authentication is bad" do
expect {
expect {
subject.auth.approle("nope", "bad")
}.to raise_error(HTTPError)
}.to_not change(subject, :token)
end
end
context "when approle has 'bind_secret_id' disabled" do
before(:context) do
opts = {
bind_secret_id: false,
bound_cidr_list: "127.0.0.1/32"
}
vault_test_client.approle.set_role(@approle, opts)
@role_id = vault_test_client.approle.role_id(@approle)
end
after(:context) do
vault_test_client.approle.delete_role(@approle)
end
it "authenticates w/o secret_id and saves the token on the client" do
result = subject.auth.approle(@role_id)
expect(subject.token).to eq(result.auth.client_token)
end
end
end
describe "#userpass" do
before(:context) do
@username = "sethvargo"
@password = "s3kr3t"
vault_test_client.sys.enable_auth("userpass", "userpass", nil)
vault_test_client.logical.write("auth/userpass/users/#{@username}", { password: @password, policies: "default" })
vault_test_client.sys.enable_auth("new-userpass", "userpass", nil)
vault_test_client.logical.write("auth/new-userpass/users/#{@username}", { password: @password, policies: "default" })
end
before do
subject.token = nil
end
it "authenticates and saves the token on the client" do
result = subject.auth.userpass(@username, @password)
expect(subject.token).to eq(result.auth.client_token)
end
it "authenticates with custom options" do
result = subject.auth.userpass(@username, @password, mount: "new-userpass")
expect(subject.token).to eq(result.auth.client_token)
end
it "raises an error if the authentication is bad" do
expect {
expect {
subject.auth.userpass("nope", "bad")
}.to raise_error(HTTPError)
}.to_not change(subject, :token)
end
end
describe "#tls" do
before(:context) do
vault_test_client.sys.enable_auth("cert", "cert", nil)
end
after(:context) do
vault_test_client.sys.disable_auth("cert")
end
let!(:old_token) { subject.token }
let(:certificate) do
{
display_name: "sample-cert",
certificate: RSpec::SampleCertificate.cert,
policies: "default",
ttl: 3600,
}
end
let(:auth_cert) { RSpec::SampleCertificate.cert << RSpec::SampleCertificate.key }
after do
subject.token = old_token
end
it "authenticates and saves the token on the client" do
pending "dev server does not support tls"
subject.auth_tls.set_certificate("kaelumania", certificate)
result = subject.auth.tls(auth_cert)
expect(subject.token).to eq(result.auth.client_token)
end
it "authenticates with default ssl_pem_file" do
pending "dev server does not support tls"
subject.auth_tls.set_certificate("kaelumania", certificate)
subject.ssl_pem_file = auth_cert
result = subject.auth.tls
expect(subject.token).to eq(result.auth.client_token)
end
it "raises an error if the authentication is bad", vault: "> 0.6.1" do
subject.sys.disable_auth("cert")
expect {
expect {
subject.auth.tls(auth_cert)
}.to raise_error(HTTPError)
}.to_not change { subject.token }
end
end
end
end
|
require 'spec_helper'
describe JanusGateway::Transport::Http do
let(:url) { 'http://example.com' }
let(:data) { { 'janus' => 'success', 'transaction' => 'ABCDEFGHIJK' } }
let(:transport) { JanusGateway::Transport::Http.new(url) }
describe '#send_transaction' do
before do
transport.stub(:_send) do |data|
janus_server.respond(data)
end
end
error_0 = JanusGateway::Error.new(0, 'HTTP/Transport response: `501`')
janus_response = {
timeout: '{"janus":"success", "transaction":"000"}',
test: '{"janus":"success", "transaction":"ABCDEFGHIJK"}',
create: "{\"error\":{\"code\": #{error_0.code}, \"reason\": \"#{error_0.info}\"}}"
}
let(:janus_server) { HttpDummyJanusServer.new(janus_response) }
it 'should response with timeout' do
transport.stub(:transaction_id_new).and_return('000')
transport.stub(:_transaction_timeout).and_return(0.001)
transport.stub(:send)
promise = transport.send_transaction(janus: 'timeout')
EM.run do
promise.rescue do
EM.stop
end
end
expect(promise.value).to eq(nil)
expect(promise.rejected?).to eq(true)
end
it 'fulfills transaction promises' do
transport.stub(:transaction_id_new).and_return('ABCDEFGHIJK')
expect(transport).to receive(:_send).with(janus: 'test', transaction: 'ABCDEFGHIJK')
promise = transport.send_transaction(janus: 'test')
EM.run do
EM.error_handler do |e|
puts e
EM.stop
end
promise.then { EM.stop }
promise.rescue { EM.stop }
end
expect(promise.value).to eq(data)
end
it 'rejects transaction promises' do
transport.stub(:transaction_id_new).and_return('ABCDEFGHIJK')
transport.stub(:_send) do
Concurrent::Promise.reject('501')
end
promise = transport.send_transaction(janus: 'create')
EM.run do
promise.rescue do
EM.stop
end
end
expect(promise.value).to eq(nil)
expect(promise.rejected?).to eq(true)
expect(promise.reason.message).to eq(error_0.message)
end
end
describe 'when _send' do
include WebMock::API
context 'when given invalid data' do
it 'should raise when cannot parse to json' do
expect { transport.__send__(:_send, 'foo') }.to raise_error(StandardError)
end
end
context 'when given proper data' do
let(:http_request) { stub_request(:post, url) }
let(:request) { transport.__send__(:_send, 'request_param' => 'value') }
it 'should send proper http request' do
http_request.with(body: { 'request_param' => 'value' })
http_request.to_return(body: '[]')
EM.run do
request.then { EM.stop }
request.rescue { EM.stop }
end
expect(request.fulfilled?).to eq(true)
end
context 'and responds with valid response' do
it 'should resolve with body' do
http_request.to_return(body: '{"response_param":"value"}')
EM.run do
request.then { EM.stop }
request.rescue { EM.stop }
end
expect(request.fulfilled?).to eq(true)
expect(request.value).to eq('response_param' => 'value')
end
end
context 'and responds with non-valid status' do
it 'should reject with http error' do
http_request.to_return(status: [500, 'Internal Server Error'])
EM.run do
request.then { EM.stop }
request.rescue { EM.stop }
end
expect(request.rejected?).to eq(true)
end
end
context 'and responds with invalid json data' do
it 'should reject with error' do
http_request.to_return(body: 'invalid-json')
EM.run do
request.then { EM.stop }
request.rescue { EM.stop }
end
expect(request.rejected?).to eq(true)
end
end
context 'and timeouts' do
let(:request) { transport.__send__(:_send, []) }
it 'should reject with error' do
http_request.to_timeout
EM.run do
request.then { EM.stop }
request.rescue { EM.stop }
end
expect(request.rejected?).to eq(true)
end
end
end
end
end
Add more assertions
require 'spec_helper'
describe JanusGateway::Transport::Http do
let(:url) { 'http://example.com' }
let(:data) { { 'janus' => 'success', 'transaction' => 'ABCDEFGHIJK' } }
let(:transport) { JanusGateway::Transport::Http.new(url) }
describe '#send_transaction' do
before do
transport.stub(:_send) do |data|
janus_server.respond(data)
end
end
error_0 = JanusGateway::Error.new(0, 'HTTP/Transport response: `501`')
janus_response = {
timeout: '{"janus":"success", "transaction":"000"}',
test: '{"janus":"success", "transaction":"ABCDEFGHIJK"}',
create: "{\"error\":{\"code\": #{error_0.code}, \"reason\": \"#{error_0.info}\"}}"
}
let(:janus_server) { HttpDummyJanusServer.new(janus_response) }
it 'should response with timeout' do
transport.stub(:transaction_id_new).and_return('000')
transport.stub(:_transaction_timeout).and_return(0.001)
transport.stub(:send)
promise = transport.send_transaction(janus: 'timeout')
EM.run do
promise.rescue do
EM.stop
end
end
expect(promise.value).to eq(nil)
expect(promise.rejected?).to eq(true)
end
it 'fulfills transaction promises' do
transport.stub(:transaction_id_new).and_return('ABCDEFGHIJK')
expect(transport).to receive(:_send).with(janus: 'test', transaction: 'ABCDEFGHIJK')
promise = transport.send_transaction(janus: 'test')
EM.run do
EM.error_handler do |e|
puts e
EM.stop
end
promise.then { EM.stop }
promise.rescue { EM.stop }
end
expect(promise.fulfilled?).to eq(true)
expect(promise.value).to eq(data)
expect(promise.rejected?).to eq(false)
expect(promise.reason).to eq(nil)
end
it 'rejects transaction promises' do
transport.stub(:transaction_id_new).and_return('ABCDEFGHIJK')
transport.stub(:_send) do
Concurrent::Promise.reject('501')
end
promise = transport.send_transaction(janus: 'create')
EM.run do
promise.rescue do
EM.stop
end
end
expect(promise.value).to eq(nil)
expect(promise.rejected?).to eq(true)
expect(promise.reason.message).to eq(error_0.message)
end
end
describe 'when _send' do
include WebMock::API
context 'when given invalid data' do
it 'should raise when cannot parse to json' do
expect { transport.__send__(:_send, 'foo') }.to raise_error(StandardError)
end
end
context 'when given proper data' do
let(:http_request) { stub_request(:post, url) }
let(:request) { transport.__send__(:_send, 'request_param' => 'value') }
it 'should send proper http request' do
http_request.with(body: { 'request_param' => 'value' })
http_request.to_return(body: '[]')
EM.run do
request.then { EM.stop }
request.rescue { EM.stop }
end
expect(request.fulfilled?).to eq(true)
end
context 'and responds with valid response' do
it 'should resolve with body' do
http_request.to_return(body: '{"response_param":"value"}')
EM.run do
request.then { EM.stop }
request.rescue { EM.stop }
end
expect(request.fulfilled?).to eq(true)
expect(request.value).to eq('response_param' => 'value')
end
end
context 'and responds with non-valid status' do
it 'should reject with http error' do
http_request.to_return(status: [500, 'Internal Server Error'])
EM.run do
request.then { EM.stop }
request.rescue { EM.stop }
end
expect(request.rejected?).to eq(true)
end
end
context 'and responds with invalid json data' do
it 'should reject with error' do
http_request.to_return(body: 'invalid-json')
EM.run do
request.then { EM.stop }
request.rescue { EM.stop }
end
expect(request.rejected?).to eq(true)
end
end
context 'and timeouts' do
let(:request) { transport.__send__(:_send, []) }
it 'should reject with error' do
http_request.to_timeout
EM.run do
request.then { EM.stop }
request.rescue { EM.stop }
end
expect(request.rejected?).to eq(true)
end
end
end
end
end
|
require 'spec_helper'
describe Danica::Formatted do
let(:content) { Danica::Wrapper::Variable.new(latex: :V, gnuplot: :v) }
let(:format) { :tex }
let(:options) { {} }
subject do
described_class.new(content, format, options)
end
describe '#to_s' do
context 'when format is tex' do
it 'return the expected tex string' do
expect(subject.to_s).to eq('V')
end
end
context 'when format is gnu' do
let(:format) { :gnu }
it 'return the expected gnu string' do
expect(subject.to_s).to eq('v')
end
end
context 'when variable has numeric value' do
let(:content) { Danica::Wrapper::Number.new(1/3.0) }
it 'returns the formatted number' do
expect(subject.to_s).to eq('0.3333333333333333')
end
context 'when passing decimals settings' do
let(:options) { { decimals: 4 } }
it 'returns the formatted number' do
expect(subject.to_s).to eq('0.3333')
end
end
end
end
describe 'operators' do
describe '+' do
it do
expect(subject + 2).to be_a(described_class)
end
it 'keeps being able to parse format' do
expect((subject + 2).to_s).to eq('V + 2')
end
end
describe '*' do
it do
expect(subject * 2).to be_a(described_class)
end
it 'keeps being able to parse format' do
expect((subject * 2).to_s).to eq('V \cdot 2')
end
end
describe '-@' do
it do
expect(-subject).to be_a(described_class)
end
it 'keeps being able to parse format' do
expect((-subject).to_s).to eq('-V')
end
end
end
describe '#to_f' do
let(:content) { Danica::Wrapper::Number.new(2) }
it do
expect(subject.to_f).to be_a(Numeric)
end
it 'returns the number' do
expect(subject.to_f).to eq(2)
end
end
describe '#to' do
it do
expect(subject.to(:tex)).to be_a(String)
end
it 'returns the string' do
expect(subject.to(:tex)).to eq('V')
end
end
describe '#tex' do
it do
expect(subject.tex).to be_a(Danica::Formatted)
end
context 'when original format is tex' do
it 'returns the tex string' do
expect(subject.tex.to_s).to eq('V')
end
it 'returns similar object' do
expect(subject.tex).to eq(subject)
end
end
context 'when original format is gnu' do
let(:format) { :gnu }
it 'returns the tex string' do
expect(subject.tex.to_s).to eq('V')
end
it 'returns a new format object' do
expect(subject.tex).not_to eq(subject)
end
end
end
describe '#gnu' do
it do
expect(subject.gnu).to be_a(Danica::Formatted)
end
context 'when original format is tex' do
it 'returns the gnu string' do
expect(subject.gnu.to_s).to eq('v')
end
end
context 'when original format is gnu' do
let(:format) { :gnu }
it 'returns the gnu string' do
expect(subject.gnu.to_s).to eq('v')
end
end
end
end
Formatted responds to all methods
require 'spec_helper'
describe Danica::Formatted do
let(:content) { Danica::Wrapper::Variable.new(latex: :V, gnuplot: :v) }
let(:format) { :tex }
let(:options) { {} }
subject do
described_class.new(content, format, options)
end
it_behaves_like 'an object that respond to basic_methods'
describe '#to_s' do
context 'when format is tex' do
it 'return the expected tex string' do
expect(subject.to_s).to eq('V')
end
end
context 'when format is gnu' do
let(:format) { :gnu }
it 'return the expected gnu string' do
expect(subject.to_s).to eq('v')
end
end
context 'when variable has numeric value' do
let(:content) { Danica::Wrapper::Number.new(1/3.0) }
it 'returns the formatted number' do
expect(subject.to_s).to eq('0.3333333333333333')
end
context 'when passing decimals settings' do
let(:options) { { decimals: 4 } }
it 'returns the formatted number' do
expect(subject.to_s).to eq('0.3333')
end
end
end
end
describe 'operators' do
describe '+' do
it do
expect(subject + 2).to be_a(described_class)
end
it 'keeps being able to parse format' do
expect((subject + 2).to_s).to eq('V + 2')
end
end
describe '*' do
it do
expect(subject * 2).to be_a(described_class)
end
it 'keeps being able to parse format' do
expect((subject * 2).to_s).to eq('V \cdot 2')
end
end
describe '-@' do
it do
expect(-subject).to be_a(described_class)
end
it 'keeps being able to parse format' do
expect((-subject).to_s).to eq('-V')
end
end
end
describe '#to_f' do
let(:content) { Danica::Wrapper::Number.new(2) }
it do
expect(subject.to_f).to be_a(Numeric)
end
it 'returns the number' do
expect(subject.to_f).to eq(2)
end
end
describe '#to' do
it do
expect(subject.to(:tex)).to be_a(String)
end
it 'returns the string' do
expect(subject.to(:tex)).to eq('V')
end
end
describe '#tex' do
it do
expect(subject.tex).to be_a(Danica::Formatted)
end
context 'when original format is tex' do
it 'returns the tex string' do
expect(subject.tex.to_s).to eq('V')
end
it 'returns similar object' do
expect(subject.tex).to eq(subject)
end
end
context 'when original format is gnu' do
let(:format) { :gnu }
it 'returns the tex string' do
expect(subject.tex.to_s).to eq('V')
end
it 'returns a new format object' do
expect(subject.tex).not_to eq(subject)
end
end
end
describe '#gnu' do
it do
expect(subject.gnu).to be_a(Danica::Formatted)
end
context 'when original format is tex' do
it 'returns the gnu string' do
expect(subject.gnu.to_s).to eq('v')
end
end
context 'when original format is gnu' do
let(:format) { :gnu }
it 'returns the gnu string' do
expect(subject.gnu.to_s).to eq('v')
end
end
end
end
|
# -*- coding: utf-8 -*-
require 'spec_helper'
describe DXRubySDL::Sound, '音を表すクラス' do
describe '.new' do
shared_context '.new' do
subject { DXRubySDL::Sound.new(fixture_path(filename)) }
it '呼び出すことができる' do
subject
end
end
context 'WAVE形式のファイルの場合' do
let(:filename) { 'sound.wav' }
include_context '.new'
end
context 'MIDI形式のファイルの場合' do
let(:filename) { 'bgm.mid' }
include_context '.new'
end
end
describe '#play' do
context 'WAVE形式のファイルの場合' do
let(:path) { fixture_path('sound.wav') }
let(:sound) { DXRubySDL::Sound.new(path) }
subject { sound.play }
it 'SDL::Mixer.play_channelを呼び出す' do
wave =
sound.instance_variable_get('@sound').instance_variable_get('@wave')
expect(SDL::Mixer).to receive(:play_channel).with(-1, wave, 0)
subject
end
context '3回連続で呼び出した場合' do
before do
wave = sound.instance_variable_get('@sound')
.instance_variable_get('@wave')
count = 0
expect(SDL::Mixer)
.to receive(:play_channel).with(-1, wave, 0).exactly(4).times {
count += 1
if count == 3
count = 0
raise SDL::Error.new('couldn\'t play wave:' \
' No free channels available')
end
(count - 1) % 2
}
expect(SDL::Mixer).to receive(:halt).with(0)
end
it '最初のものを停止する' do
3.times { sound.play }
end
end
end
context 'MIDI形式のファイルの場合' do
let(:path) { fixture_path('bgm.mid') }
let(:sound) { DXRubySDL::Sound.new(path) }
subject { sound.play }
it 'SDL::Mixer.play_musicを呼び出す' do
music =
sound.instance_variable_get('@sound').instance_variable_get('@music')
expect(SDL::Mixer).to receive(:play_music).with(music, -1)
subject
end
end
end
describe '#stop' do
context 'WAVE形式のファイルの場合' do
let(:path) { fixture_path('sound.wav') }
let(:sound) { DXRubySDL::Sound.new(path) }
subject { sound.stop }
it 'SDL::Mixer.halt_musicが呼び出される' do
sound.play
expect(SDL::Mixer).to receive(:halt)
subject
end
end
context 'MIDI形式のファイルの場合' do
let(:path) { fixture_path('bgm.mid') }
let(:sound) { DXRubySDL::Sound.new(path) }
subject { sound.stop }
it 'SDL::Mixer.halt_musicが呼び出される' do
sound.play
expect(SDL::Mixer).to receive(:halt_music)
subject
end
end
end
end
refactored spec.
# -*- coding: utf-8 -*-
require 'spec_helper'
describe DXRubySDL::Sound, '音を表すクラス' do
describe '.new' do
shared_context '.new' do
subject { DXRubySDL::Sound.new(fixture_path(filename)) }
it '呼び出すことができる' do
subject
end
end
context 'WAVE形式のファイルの場合' do
let(:filename) { 'sound.wav' }
include_context '.new'
end
context 'MIDI形式のファイルの場合' do
let(:filename) { 'bgm.mid' }
include_context '.new'
end
end
describe '#play' do
context 'WAVE形式のファイルの場合' do
let(:path) { fixture_path('sound.wav') }
let(:sound) { DXRubySDL::Sound.new(path) }
subject { sound.play }
it 'SDL::Mixer.play_channelを呼び出す' do
wave =
sound.instance_variable_get('@sound').instance_variable_get('@wave')
expect(SDL::Mixer).to receive(:play_channel).with(-1, wave, 0)
subject
end
context '3回連続で呼び出した場合' do
before do
wave = sound.instance_variable_get('@sound')
.instance_variable_get('@wave')
count = 0
expect(SDL::Mixer)
.to receive(:play_channel).with(-1, wave, 0).exactly(4).times {
count += 1
if count == 3
count = 0
raise SDL::Error.new('couldn\'t play wave:' \
' No free channels available')
end
(count - 1) % 2
}
expect(SDL::Mixer).to receive(:halt).with(0)
end
it '最初のものを停止する' do
3.times { sound.play }
end
end
end
context 'MIDI形式のファイルの場合' do
let(:path) { fixture_path('bgm.mid') }
let(:sound) { DXRubySDL::Sound.new(path) }
subject { sound.play }
it 'SDL::Mixer.play_musicを呼び出す' do
music =
sound.instance_variable_get('@sound').instance_variable_get('@music')
expect(SDL::Mixer).to receive(:play_music).with(music, -1)
subject
end
end
end
describe '#stop' do
context 'WAVE file' do
let(:path) { fixture_path('sound.wav') }
let(:sound) { DXRubySDL::Sound.new(path) }
subject { sound.stop }
before do
allow(SDL::Mixer).to receive(:halt)
sound.play
subject
end
describe SDL::Mixer do
it { expect(SDL::Mixer).to have_received(:halt).with(0).once }
end
end
context 'MIDI file' do
let(:path) { fixture_path('bgm.mid') }
let(:sound) { DXRubySDL::Sound.new(path) }
subject { sound.stop }
before do
allow(SDL::Mixer).to receive(:halt_music)
sound.play
subject
end
describe SDL::Mixer do
it { expect(SDL::Mixer).to have_received(:halt_music).with(no_args).once }
end
end
end
end
|
require 'spec_helper'
RSpec.describe RDStation::Client do
context "when access_token is given" do
let(:access_token) { 'access_token' }
let(:client) { described_class.new(access_token: access_token) }
it "returns the correct endpoint" do
expect(client.contacts).to be_a(RDStation::Contacts)
expect(client.events).to be_a(RDStation::Events)
expect(client.fields).to be_a(RDStation::Fields)
expect(client.webhooks).to be_a(RDStation::Webhooks)
end
it "creates an authorization_header and initilizes the enpoints with this header" do
mock_authorization_header = double(RDStation::AuthorizationHeader)
expect(RDStation::AuthorizationHeader).to receive(:new)
.with({ access_token: access_token})
.and_return(mock_authorization_header)
expect(RDStation::Contacts).to receive(:new).with({ authorization_header: mock_authorization_header })
expect(RDStation::Events).to receive(:new).with({ authorization_header: mock_authorization_header })
expect(RDStation::Fields).to receive(:new).with({ authorization_header: mock_authorization_header })
expect(RDStation::Webhooks).to receive(:new).with({ authorization_header: mock_authorization_header })
client.contacts
client.events
client.fields
client.webhooks
end
end
end
Create context for missing token in client_spec
require 'spec_helper'
RSpec.describe RDStation::Client do
context "when access_token is given" do
let(:access_token) { 'access_token' }
let(:client) { described_class.new(access_token: access_token) }
it "returns the correct endpoint" do
expect(client.contacts).to be_a(RDStation::Contacts)
expect(client.events).to be_a(RDStation::Events)
expect(client.fields).to be_a(RDStation::Fields)
expect(client.webhooks).to be_a(RDStation::Webhooks)
end
it "creates an authorization_header and initilizes the enpoints with this header" do
mock_authorization_header = double(RDStation::AuthorizationHeader)
expect(RDStation::AuthorizationHeader).to receive(:new)
.with({ access_token: access_token})
.and_return(mock_authorization_header)
expect(RDStation::Contacts).to receive(:new).with({ authorization_header: mock_authorization_header })
expect(RDStation::Events).to receive(:new).with({ authorization_header: mock_authorization_header })
expect(RDStation::Fields).to receive(:new).with({ authorization_header: mock_authorization_header })
expect(RDStation::Webhooks).to receive(:new).with({ authorization_header: mock_authorization_header })
client.contacts
client.events
client.fields
client.webhooks
end
end
context "when access_token isn't given" do
it "raises a ArgumentError exception" do
expect{ described_class.new(access_token: nil) }.to raise_error(ArgumentError)
end
end
end
|
require "unit_helper"
require "timecop"
module Sidekiq
module Worker
end
end
describe Revily::Event::Job do
before { Timecop.freeze Time.local(2012, 10, 26, 10, 49) }
after { Timecop.return }
describe ".schedule" do
before { Revily::Sidekiq.stub(:schedule) }
it "schedules a job to run in the future" do
described_class.schedule(:default, 30.minutes, {}, {})
puts Time.now.to_i.to_f
expect(Revily::Sidekiq).to have_received(:schedule).with(
described_class,
:perform,
{ queue: :default, retries: 8, backtrace: true, at: (Time.now.to_i.to_f + 1800) },
{ payload: {}, params: {} }
)
end
end
describe ".run" do
before { Revily::Sidekiq.stub(:run) }
it "runs a job immediately" do
described_class.run(:default, {}, {})
expect(Revily::Sidekiq).to have_received(:run).with(
described_class,
:perform,
{ queue: :default, retries: 8, backtrace: true},
{ payload: {}, params: {} }
)
end
end
describe "#run" do
let(:job) { described_class.new }
context "valid" do
before { job.stub(valid?: true, process: true) }
it "performs a valid job" do
job.run
expect(job).to have_received(:process)
end
end
context "invalid" do
before { job.stub(valid?: false, process: true) }
it "does not perform an invalid job" do
job_result = job.run
expect(job_result).to be_false
expect(job).not_to have_received(:process)
end
end
end
end
drop an unneeded puts [ci skip]
require "unit_helper"
require "timecop"
module Sidekiq
module Worker
end
end
describe Revily::Event::Job do
before { Timecop.freeze Time.local(2012, 10, 26, 10, 49) }
after { Timecop.return }
describe ".schedule" do
before { Revily::Sidekiq.stub(:schedule) }
it "schedules a job to run in the future" do
described_class.schedule(:default, 30.minutes, {}, {})
expect(Revily::Sidekiq).to have_received(:schedule).with(
described_class,
:perform,
{ queue: :default, retries: 8, backtrace: true, at: (Time.now.to_i.to_f + 1800) },
{ payload: {}, params: {} }
)
end
end
describe ".run" do
before { Revily::Sidekiq.stub(:run) }
it "runs a job immediately" do
described_class.run(:default, {}, {})
expect(Revily::Sidekiq).to have_received(:run).with(
described_class,
:perform,
{ queue: :default, retries: 8, backtrace: true},
{ payload: {}, params: {} }
)
end
end
describe "#run" do
let(:job) { described_class.new }
context "valid" do
before { job.stub(valid?: true, process: true) }
it "performs a valid job" do
job.run
expect(job).to have_received(:process)
end
end
context "invalid" do
before { job.stub(valid?: false, process: true) }
it "does not perform an invalid job" do
job_result = job.run
expect(job_result).to be_false
expect(job).not_to have_received(:process)
end
end
end
end
|
require "spec_helper"
describe Lob::Resources::Letter do
before :each do
@sample_address_params = {
name: "TestAddress",
email: "test@test.com",
address_line1: "123 Test Street",
address_line2: "Unit 199",
address_city: "Mountain View",
address_state: "CA",
address_country: "US",
address_zip: 94085
}
end
subject { Lob::Client.new(api_key: API_KEY) }
describe "list" do
it "should list letter" do
assert subject.letters.list["object"] == "list"
end
end
describe "create" do
it "should create a letter with address_id" do
new_address = subject.addresses.create @sample_address_params
new_letter = subject.letters.create(
description: "TestLetter",
color: true,
file: "https://s3-us-west-2.amazonaws.com/lob-assets/letter-goblue.pdf",
to: new_address["id"],
from: @sample_address_params
)
new_letter["description"].must_equal("TestLetter")
end
it "should create a letter with a local file" do
new_address = subject.addresses.create @sample_address_params
new_letter = subject.letters.create(
description: "TestLetter",
color: true,
file: File.new(File.expand_path("../../../samples/8.5x11.pdf", __FILE__)),
to: new_address["id"],
from: @sample_address_params
)
new_letter["description"].must_equal("TestLetter")
end
end
describe "find" do
it "should find a letter" do
new_address = subject.addresses.create @sample_address_params
new_letter = subject.letters.create(
description: "TestLetter",
color: true,
file: "https://s3-us-west-2.amazonaws.com/lob-assets/letter-goblue.pdf",
to: new_address["id"],
from: new_address["id"]
)
result = subject.letters.find(new_letter["id"])
assert /#{new_letter["description"]}/ =~ result.to_s
end
end
describe "find" do
it "should find a letter" do
new_address = subject.addresses.create @sample_address_params
new_letter = subject.letters.create(
description: "TestLetter",
color: true,
file: "https://s3-us-west-2.amazonaws.com/lob-assets/letter-goblue.pdf",
to: new_address["id"],
from: new_address["id"]
)
result = subject.letters.find(new_letter["id"])
assert /#{new_letter["description"]}/ =~ result.to_s
end
end
describe "destroy" do
it "should destroy a letter" do
new_address = subject.addresses.create @sample_address_params
new_letter = subject.letters.create(
description: "TestLetter",
color: true,
file: "https://s3-us-west-2.amazonaws.com/lob-assets/letter-goblue.pdf",
to: new_address["id"],
from: new_address["id"]
)
result = subject.letters.destroy(new_letter["id"])
result["id"].must_equal(new_letter["id"])
result["deleted"].must_equal(true)
end
end
end
test(letters): remove duplicate test (#167)
require "spec_helper"
describe Lob::Resources::Letter do
before :each do
@sample_address_params = {
name: "TestAddress",
email: "test@test.com",
address_line1: "123 Test Street",
address_line2: "Unit 199",
address_city: "Mountain View",
address_state: "CA",
address_country: "US",
address_zip: 94085
}
end
subject { Lob::Client.new(api_key: API_KEY) }
describe "list" do
it "should list letter" do
assert subject.letters.list["object"] == "list"
end
end
describe "create" do
it "should create a letter with address_id" do
new_address = subject.addresses.create @sample_address_params
new_letter = subject.letters.create(
description: "TestLetter",
color: true,
file: "https://s3-us-west-2.amazonaws.com/lob-assets/letter-goblue.pdf",
to: new_address["id"],
from: @sample_address_params
)
new_letter["description"].must_equal("TestLetter")
end
it "should create a letter with a local file" do
new_address = subject.addresses.create @sample_address_params
new_letter = subject.letters.create(
description: "TestLetter",
color: true,
file: File.new(File.expand_path("../../../samples/8.5x11.pdf", __FILE__)),
to: new_address["id"],
from: @sample_address_params
)
new_letter["description"].must_equal("TestLetter")
end
end
describe "find" do
it "should find a letter" do
new_address = subject.addresses.create @sample_address_params
new_letter = subject.letters.create(
description: "TestLetter",
color: true,
file: "https://s3-us-west-2.amazonaws.com/lob-assets/letter-goblue.pdf",
to: new_address["id"],
from: new_address["id"]
)
result = subject.letters.find(new_letter["id"])
assert /#{new_letter["description"]}/ =~ result.to_s
end
end
describe "destroy" do
it "should destroy a letter" do
new_address = subject.addresses.create @sample_address_params
new_letter = subject.letters.create(
description: "TestLetter",
color: true,
file: "https://s3-us-west-2.amazonaws.com/lob-assets/letter-goblue.pdf",
to: new_address["id"],
from: new_address["id"]
)
result = subject.letters.destroy(new_letter["id"])
result["id"].must_equal(new_letter["id"])
result["deleted"].must_equal(true)
end
end
end
|
Test for mab2snr->volume_count
describe Metacrunch::UBPB::Transformations::MAB2SNR::VolumeCount do
it "456, 466, 476, 486, 496 works" do
["456", "466", "476", "486", "496"].each do |field|
mab = mab_builder do
datafield(field, ind2: "1") { subfield("a", "2") }
end
result = mab2snr(mab)
expect(result.first_value("control/volume_count")).to eq(2)
end
end
it "Lower fields have precedence over hight fields" do
mab = mab_builder do
datafield("456", ind2: "1") { subfield("a", "1") }
datafield("496", ind2: "1") { subfield("a", "2") }
end
result = mab2snr(mab)
expect(result.first_value("control/volume_count")).to eq(1)
end
end
|
require "spec_helper"
describe OrderMailer do
describe '#thank_you_membership' do
let(:order) { FactoryGirl.build(:order) }
before(:each) do
OrderMailer.thank_you_membership(order).deliver
@email = ActionMailer::Base.deliveries.last
end
it 'email has proper headers' do
@email.subject.should == "Thank you for joining CIW's Member Program"
@email.from[0].should == 'forms@chicagoideas.com'
@email.to[0].should == order.user.email
end
it 'email has proper content' do
@email.body.should match(/Thank you for joining CIW's Member Program/)
@email.body.should match(/Hi, #{order.name_on_card}/)
@email.body.should match(/Included in your #{order.member_type.title} Member package you receive/)
[:specific_benefits, :general_benefits].each do |benefit|
order.member_type.try(benefit).split("\r\n").reject { |i| i.empty? }.each do |line|
@email.body.should match(/#{line}/)
end
end
end
end
end
Fixed order mailer spec.
require "spec_helper"
describe OrderMailer do
describe '#thank_you_membership' do
let(:order) { FactoryGirl.build(:order) }
before(:each) do
OrderMailer.thank_you_membership(order).deliver
@email = ActionMailer::Base.deliveries.last
end
it 'email has proper headers' do
@email.subject.should == "Thank you for joining CIW's Member Program"
@email.from[0].should == 'forms@chicagoideas.com'
@email.to[0].should == order.user.email
end
it 'email has proper content' do
@email.body.should match(/Thank you for joining CIW's Member Program/)
@email.body.should match(/Dear, #{order.name_on_card}/)
@email.body.should match(/Included in your #{order.member_type.title} Member package you receive/)
[:specific_benefits, :general_benefits].each do |benefit|
order.member_type.try(benefit).split("\r\n").reject { |i| i.empty? }.each do |line|
@email.body.should match(/#{line}/)
end
end
end
end
end
|
module SmellOfMatcher
class SmellOf
def initialize(klass, *expected_smells)
@klass = klass
@expected_smells = expected_smells
@config = {}
end
def failure_message_for_should
"Expected #{@source.desc} to smell of #{@klass}, but it didn't: #{@reason}"
end
def failure_message_for_should_not
"Expected #{@source.desc} not to smell of #{@klass}, but it did"
end
def matches?(src)
@source = src.to_reek_source
ctx = MethodContext.new(nil, @source.syntax_tree)
detector = @klass.new(@source.desc, @klass.default_config.merge(@config))
detector.examine(ctx)
actual_smells = detector.smells_found.to_a
if actual_smells.empty?
@reason = 'no smells found by detector'
return false
end
actual_smells.each do |smell|
if smell.smell_class != @klass::SMELL_CLASS ||
smell.subclass != @klass::SMELL_SUBCLASS
@reason = "Found #{smell.smell_class}/#{smell.subclass}"
return false
end
end
expected_number_of_smells = @expected_smells.empty? ? 1 : @expected_smells.length
if expected_number_of_smells != actual_smells.length
@reason = "expected #{expected_number_of_smells} smell(s), found #{actual_smells.length}"
return false
end
@expected_smells.zip(actual_smells).each do |expected_smell, actual_smell|
expected_smell.each do |key, value|
actual_value = actual_smell.smell[key]
if actual_value != value
@reason = "expected #{key} to be #{value}, was #{actual_value}"
return false
end
end
end
true
end
def with_config(options)
@config = options
self
end
end
def smell_of(klass, *smells)
SmellOf.new(klass, *smells)
end
end
RSpec.configure do |config|
config.include(SmellOfMatcher)
end
Extract methods for each of the non-matching cases
module SmellOfMatcher
class SmellOf
def initialize(klass, *expected_smells)
@klass = klass
@expected_smells = expected_smells
@config = {}
end
def failure_message_for_should
"Expected #{@source.desc} to smell of #{@klass}, but it didn't: #{@reason}"
end
def failure_message_for_should_not
"Expected #{@source.desc} not to smell of #{@klass}, but it did"
end
def matches?(src)
@source = src.to_reek_source
detect_smells
return false if no_smells_found?
return false if wrong_smell_class_found?
return false if wrong_number_of_smells_found?
return false if wrong_smell_details_found?
true
end
def with_config(options)
@config = options
self
end
private
def detect_smells
ctx = MethodContext.new(nil, @source.syntax_tree)
detector = @klass.new(@source.desc, @klass.default_config.merge(@config))
detector.examine(ctx)
@actual_smells = detector.smells_found.to_a
end
def no_smells_found?
if @actual_smells.empty?
@reason = 'no smells found by detector'
return true
end
end
def wrong_smell_class_found?
@actual_smells.each do |smell|
if smell.smell_class != @klass::SMELL_CLASS ||
smell.subclass != @klass::SMELL_SUBCLASS
@reason = "Found #{smell.smell_class}/#{smell.subclass}"
return true
end
end
false
end
def wrong_number_of_smells_found?
expected_number_of_smells = @expected_smells.empty? ? 1 : @expected_smells.length
if expected_number_of_smells != @actual_smells.length
@reason = "expected #{expected_number_of_smells} smell(s), found #{@actual_smells.length}"
true
end
end
def wrong_smell_details_found?
@expected_smells.zip(@actual_smells).each do |expected_smell, actual_smell|
expected_smell.each do |key, value|
actual_value = actual_smell.smell[key]
if actual_value != value
@reason = "expected #{key} to be #{value}, was #{actual_value}"
return true
end
end
end
false
end
end
def smell_of(klass, *smells)
SmellOf.new(klass, *smells)
end
end
RSpec.configure do |config|
config.include(SmellOfMatcher)
end
|
require 'rails_helper'
RSpec.describe Account::Field, type: :model do
describe '#verified?' do
let(:account) { double('Account', local?: true) }
subject { described_class.new(account, 'name' => 'Foo', 'value' => 'Bar', 'verified_at' => verified_at) }
context 'when verified_at is set' do
let(:verified_at) { Time.now.utc.iso8601 }
it 'returns true' do
expect(subject.verified?).to be true
end
end
context 'when verified_at is not set' do
let(:verified_at) { nil }
it 'returns false' do
expect(subject.verified?).to be false
end
end
end
describe '#mark_verified!' do
let(:account) { double('Account', local?: true) }
let(:original_hash) { { 'name' => 'Foo', 'value' => 'Bar' } }
subject { described_class.new(account, original_hash) }
before do
subject.mark_verified!
end
it 'updates verified_at' do
expect(subject.verified_at).to_not be_nil
end
it 'updates original hash' do
expect(original_hash['verified_at']).to_not be_nil
end
end
describe '#verifiable?' do
let(:account) { double('Account', local?: local) }
subject { described_class.new(account, 'name' => 'Foo', 'value' => value) }
context 'for local accounts' do
let(:local) { true }
context 'for a URL with misleading authentication' do
let(:value) { 'https://spacex.com @h.43z.one' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for a URL' do
let(:value) { 'https://example.com' }
it 'returns true' do
expect(subject.verifiable?).to be true
end
end
context 'for an IDN URL' do
let(:value) { 'http://twitter.com∕dougallj∕status∕1590357240443437057.ê.cc/twitter.html' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for text that is not a URL' do
let(:value) { 'Hello world' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for text that contains a URL' do
let(:value) { 'Hello https://example.com world' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
end
context 'for remote accounts' do
let(:local) { false }
context 'for a link' do
let(:value) { '<a href="https://www.patreon.com/mastodon" target="_blank" rel="nofollow noopener noreferrer me"><span class="invisible">https://www.</span><span class="">patreon.com/mastodon</span><span class="invisible"></span></a>' }
it 'returns true' do
expect(subject.verifiable?).to be true
end
end
context 'for a link with misleading authentication' do
let(:value) { '<a href="https://google.com @h.43z.one" target="_blank" rel="nofollow noopener noreferrer me"><span class="invisible">https://</span><span class="">google.com</span><span class="invisible"> @h.43z.one</span></a>' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for HTML that has more than just a link' do
let(:value) { '<a href="https://google.com" target="_blank" rel="nofollow noopener noreferrer me"><span class="invisible">https://</span><span class="">google.com</span><span class="invisible"></span></a> @h.43z.one' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for a link with different visible text' do
let(:value) { '<a href="https://google.com/bar">https://example.com/foo</a>' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for text that is a URL but is not linked' do
let(:value) { 'https://example.com/foo' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
end
end
end
Test blank account field verifiability (#20458)
* Test blank account field verifiability
This change tests the need for #20428, which ensures that we guard against a situation in which `at_xpath` returns `nil`.
* Test verifiability of blank fields for remote account profiles
This adds a counterpart test for remote account profiles' fields' verifiability when those fields are blank. I previously added the same test for local accounts.
require 'rails_helper'
RSpec.describe Account::Field, type: :model do
describe '#verified?' do
let(:account) { double('Account', local?: true) }
subject { described_class.new(account, 'name' => 'Foo', 'value' => 'Bar', 'verified_at' => verified_at) }
context 'when verified_at is set' do
let(:verified_at) { Time.now.utc.iso8601 }
it 'returns true' do
expect(subject.verified?).to be true
end
end
context 'when verified_at is not set' do
let(:verified_at) { nil }
it 'returns false' do
expect(subject.verified?).to be false
end
end
end
describe '#mark_verified!' do
let(:account) { double('Account', local?: true) }
let(:original_hash) { { 'name' => 'Foo', 'value' => 'Bar' } }
subject { described_class.new(account, original_hash) }
before do
subject.mark_verified!
end
it 'updates verified_at' do
expect(subject.verified_at).to_not be_nil
end
it 'updates original hash' do
expect(original_hash['verified_at']).to_not be_nil
end
end
describe '#verifiable?' do
let(:account) { double('Account', local?: local) }
subject { described_class.new(account, 'name' => 'Foo', 'value' => value) }
context 'for local accounts' do
let(:local) { true }
context 'for a URL with misleading authentication' do
let(:value) { 'https://spacex.com @h.43z.one' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for a URL' do
let(:value) { 'https://example.com' }
it 'returns true' do
expect(subject.verifiable?).to be true
end
end
context 'for an IDN URL' do
let(:value) { 'http://twitter.com∕dougallj∕status∕1590357240443437057.ê.cc/twitter.html' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for text that is not a URL' do
let(:value) { 'Hello world' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for text that contains a URL' do
let(:value) { 'Hello https://example.com world' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for text which is blank' do
let(:value) { '' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
end
context 'for remote accounts' do
let(:local) { false }
context 'for a link' do
let(:value) { '<a href="https://www.patreon.com/mastodon" target="_blank" rel="nofollow noopener noreferrer me"><span class="invisible">https://www.</span><span class="">patreon.com/mastodon</span><span class="invisible"></span></a>' }
it 'returns true' do
expect(subject.verifiable?).to be true
end
end
context 'for a link with misleading authentication' do
let(:value) { '<a href="https://google.com @h.43z.one" target="_blank" rel="nofollow noopener noreferrer me"><span class="invisible">https://</span><span class="">google.com</span><span class="invisible"> @h.43z.one</span></a>' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for HTML that has more than just a link' do
let(:value) { '<a href="https://google.com" target="_blank" rel="nofollow noopener noreferrer me"><span class="invisible">https://</span><span class="">google.com</span><span class="invisible"></span></a> @h.43z.one' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for a link with different visible text' do
let(:value) { '<a href="https://google.com/bar">https://example.com/foo</a>' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for text that is a URL but is not linked' do
let(:value) { 'https://example.com/foo' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
context 'for text which is blank' do
let(:value) { '' }
it 'returns false' do
expect(subject.verifiable?).to be false
end
end
end
end
end
|
# encoding: utf-8
require 'rails_helper'
describe BlogCategory do
before(:each) do
@blog_category = FactoryGirl.create(:blog_category)
end
subject { @blog_category }
describe "模型校验" do
it "必须存在类别名" do
@blog_category.name = ""
expect(@blog_category).not_to be_valid
end
it "类别名不能重复" do
@blog_category_with_same_name = @blog_category.dup
expect(@blog_category_with_same_name).not_to be_valid
end
end
end
Refactor blog_category model test
# encoding: utf-8
require 'rails_helper'
describe BlogCategory do
it { should have_many(:blogs) }
it { should validate_uniqueness_of(:name).with_message('类别名已存在') }
it { should validate_presence_of(:name).with_message('必须存在类别名') }
end
|
require 'spec_helper'
describe MetricResult do
describe 'method' do
describe 'initialize' do
it 'should return an instance of Metric Result' do
metric =
metric_configuration =
value = 1.3 #A sample value
@error = error
compound = true
name = "Sample name"
scope = Granularity.new(:SOFTWARE)
Metric.new(compound, name, scope).should be_a(Metric)
end
end
end
end
Accidentaly pushed a file in the last commit
|
require File.expand_path(File.join(File.dirname(__FILE__), "spec_helper"))
Merb.start(
:merb_root => File.dirname(__FILE__) / "test_app",
:fork_for_class_load => false
)
Merb::Config[:log_stream] = File.open("/dev/null", "w")
Merb.reset_logger!
require "webrat"
describe "an app tested with raw webrat" do
it "supports request" do
resp = request("/testing")
resp.should be_successful
end
it "correctly handles links even if the request " \
"wasn't originally made by webrat" do
request("/testing")
@session.click_link("Next")
@session.response.should have_xpath("//p[contains(., 'Got to next')]")
end
describe "with the webrat session" do
before(:each) do
@session = Webrat::MerbSession.new
@session.visits("/testing")
end
it "supports Webrat session #visiting" do
@session.response.should be_successful
end
it "supports Webrat session #click" do
@session.click_link("Next")
@session.response.should have_xpath("//p[contains(., 'Got to next')]")
end
end
end
describe "an app tested using the webrat proxies" do
describe("#visits") do
it "supports visits" do
visits("/testing")
end
it "can use the Merb expectations with visits" do
visits("/testing").should be_successful
end
it "supports visits intermixed with request" do
request("/testing")
resp = visits("/testing/next")
resp.should have_xpath("//p")
end
end
describe("#click_link") do
it "supports click_link" do
visit "/testing"
click_link "Next"
end
it "can use the Merb expectations with click_link" do
visit "/testing"
resp = click_link "Next"
resp.should have_xpath("//p[contains(., 'Got to next')]")
end
it "supports click_link after a request" do
request("/testing")
resp = click_link "Next"
resp.should have_xpath("//p[contains(., 'Got to next')]")
end
end
describe "filling in forms" do
it "lets you fill in text fields" do
visit "/testing/show_form"
fill_in "Name", :with => "Merby"
fill_in "Address", :with => "82 South Park"
click_button "Submit!"
end
it "returns the response when you fill in text fields" do
visit "/testing/show_form"
fill_in "name", :with => "Merby"
fill_in "address", :with => "82 South Park"
resp = click_button "Submit!"
resp.should have_xpath("//p[contains(., 'Merby')]")
end
it "lets you check checkboxes" do
visit "/testing/show_form"
check "Tis truez"
end
it "returns the response when you check checkboxes" do
visit "/testing/show_form"
check "Tis truez"
resp = click_button "Submit!"
resp.should have_xpath("//p[contains(., 'truez: 1')]")
end
end
end
Remove webrat specs around non-public API
require File.expand_path(File.join(File.dirname(__FILE__), "spec_helper"))
Merb.start(
:merb_root => File.dirname(__FILE__) / "test_app",
:fork_for_class_load => false
)
Merb::Config[:log_stream] = File.open("/dev/null", "w")
Merb.reset_logger!
describe "an app tested using the webrat proxies" do
describe("#visits") do
it "supports visits" do
visits("/testing")
end
it "can use the Merb expectations with visits" do
visits("/testing").should be_successful
end
it "supports visits intermixed with request" do
request("/testing")
resp = visits("/testing/next")
resp.should have_xpath("//p")
end
end
describe("#click_link") do
it "supports click_link" do
visit "/testing"
click_link "Next"
end
it "can use the Merb expectations with click_link" do
visit "/testing"
resp = click_link "Next"
resp.should have_xpath("//p[contains(., 'Got to next')]")
end
it "supports click_link after a request" do
request("/testing")
resp = click_link "Next"
resp.should have_xpath("//p[contains(., 'Got to next')]")
end
end
describe "filling in forms" do
it "lets you fill in text fields" do
visit "/testing/show_form"
fill_in "Name", :with => "Merby"
fill_in "Address", :with => "82 South Park"
click_button "Submit!"
end
it "returns the response when you fill in text fields" do
visit "/testing/show_form"
fill_in "name", :with => "Merby"
fill_in "address", :with => "82 South Park"
resp = click_button "Submit!"
resp.should have_xpath("//p[contains(., 'Merby')]")
end
it "lets you check checkboxes" do
visit "/testing/show_form"
check "Tis truez"
end
it "returns the response when you check checkboxes" do
visit "/testing/show_form"
check "Tis truez"
resp = click_button "Submit!"
resp.should have_xpath("//p[contains(., 'truez: 1')]")
end
end
end |
# -*- encoding: US-ASCII -*-
require File.expand_path('../../../spec_helper', __FILE__)
require File.expand_path('../fixtures/methods', __FILE__)
describe "Time#_dump" do
before :each do
@local = Time.at(946812800)
@t = Time.at(946812800)
@t = @t.gmtime
@s = @t.send(:_dump)
end
ruby_bug("http://redmine.ruby-lang.org/issues/show/627", "1.8.7") do
it "preserves the GMT flag" do
@t.gmt?.should == true
dump = @t.send(:_dump).unpack("VV").first
((dump >> 30) & 0x1).should == 1
@local.gmt?.should == false
dump = @local.send(:_dump).unpack("VV").first
((dump >> 30) & 0x1).should == 0
end
it "dumps a Time object to a bytestring" do
@s.should be_kind_of(String)
@s.should == [3222863947, 2235564032].pack("VV")
end
it "dumps an array with a date as first element" do
high = 1 << 31 |
(@t.gmt? ? 1 : 0) << 30 |
(@t.year - 1900) << 14 |
(@t.mon - 1) << 10 |
@t.mday << 5 |
@t.hour
high.should == @s.unpack("VV").first
end
end
it "dumps an array with a time as second element" do
low = @t.min << 26 |
@t.sec << 20 |
@t.usec
low.should == @s.unpack("VV").last
end
it "dumps like MRI's marshaled time format" do
t = Time.utc(2000, 1, 15, 20, 1, 1, 203).localtime
t.send(:_dump).should == "\364\001\031\200\313\000\020\004"
end
end
Add specs for Time#_dump
# -*- encoding: US-ASCII -*-
require File.expand_path('../../../spec_helper', __FILE__)
require File.expand_path('../fixtures/methods', __FILE__)
describe "Time#_dump" do
before :each do
@local = Time.at(946812800)
@t = Time.at(946812800)
@t = @t.gmtime
@s = @t.send(:_dump)
end
ruby_version_is ""..."2.0" do
it "is a public method" do
Time.should have_public_instance_method(:_dump, false)
end
end
ruby_version_is "2.0" do
it "is a private method" do
Time.should have_private_instance_method(:_dump, false)
end
end
ruby_bug("http://redmine.ruby-lang.org/issues/show/627", "1.8.7") do
it "preserves the GMT flag" do
@t.gmt?.should == true
dump = @t.send(:_dump).unpack("VV").first
((dump >> 30) & 0x1).should == 1
@local.gmt?.should == false
dump = @local.send(:_dump).unpack("VV").first
((dump >> 30) & 0x1).should == 0
end
it "dumps a Time object to a bytestring" do
@s.should be_kind_of(String)
@s.should == [3222863947, 2235564032].pack("VV")
end
it "dumps an array with a date as first element" do
high = 1 << 31 |
(@t.gmt? ? 1 : 0) << 30 |
(@t.year - 1900) << 14 |
(@t.mon - 1) << 10 |
@t.mday << 5 |
@t.hour
high.should == @s.unpack("VV").first
end
end
it "dumps an array with a time as second element" do
low = @t.min << 26 |
@t.sec << 20 |
@t.usec
low.should == @s.unpack("VV").last
end
it "dumps like MRI's marshaled time format" do
t = Time.utc(2000, 1, 15, 20, 1, 1, 203).localtime
t.send(:_dump).should == "\364\001\031\200\313\000\020\004"
end
end
|
require "test_helper"
include Warden::Test::Helpers
feature "CanUserMessage" do
scenario "should message another user", js: true do
@user1 = FactoryGirl.create(:user)
@user2 = FactoryGirl.create(:user)
login_as @user1
visit message_new_path(@user2)
body = "hola trololo"
fill_in "mailboxer_message_subject", with: "hola mundo"
fill_in "mailboxer_message_body", with: body
click_button "Enviar"
page.must_have_content body
page.must_have_content "Mover mensaje a papelera"
reply = "What a nice emoji😀!What a nice emoji😀!What a nice emoji😀!What a nice emoji😀!What a nice emoji😀!"
fill_in "mailboxer_message_body", with: reply
click_button "Enviar"
page.must_have_content reply
end
end
[tests] Extract a method to keep DRYer code
require "test_helper"
include Warden::Test::Helpers
feature "CanUserMessage" do
scenario "should message another user", js: true do
@user1 = FactoryGirl.create(:user)
@user2 = FactoryGirl.create(:user)
login_as @user1
visit message_new_path(@user2)
send_message("hola trololo", "hola mundo")
page.must_have_content body
page.must_have_content "Mover mensaje a papelera"
send_message("What a nice emoji😀!What a nice emoji😀!What a nice emoji😀!What a nice emoji😀!What a nice emoji😀!")
page.must_have_content reply
end
def send_message(body, subject = nil)
fill_in("mailboxer_message_subject", with: subject) if subject
fill_in "mailboxer_message_body", with: body
click_button "Enviar"
end
end
|
# encoding: utf-8
$:.push File.expand_path("../lib", __FILE__)
require "tinymce-rails-imageupload/version"
Gem::Specification.new do |s|
s.name = "tinymce-rails-imageupload"
s.version = Tinymce::Rails::Imageupload::VERSION
s.authors = ["Per Christian B. Viken"]
s.email = ["perchr@northblue.org"]
s.homepage = "http://eastblue.org/oss"
s.summary = %q{TinyMCE plugin for taking image uploads in Rails >= 3.1}
s.description = %q{TinyMCE plugin for taking image uploads in Rails >= 3.1}
s.files = `git ls-files`.split("\n")
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
s.require_paths = ["lib"]
s.add_runtime_dependency "railties", ">= 3.1"
s.add_runtime_dependency "tinymce-rails", "~> 4.0"
s.add_development_dependency "bundler", "~> 1.0"
s.add_development_dependency "rails", ">= 3.1"
end
Add license information and expand the description slightly
# encoding: utf-8
$:.push File.expand_path("../lib", __FILE__)
require "tinymce-rails-imageupload/version"
Gem::Specification.new do |s|
s.name = "tinymce-rails-imageupload"
s.version = Tinymce::Rails::Imageupload::VERSION
s.authors = ["Per Christian B. Viken"]
s.email = ["perchr@northblue.org"]
s.homepage = "http://eastblue.org/oss"
s.summary = %q{TinyMCE plugin for taking image uploads in Rails >= 3.2}
s.description = %q{TinyMCE plugin for taking image uploads in Rails >= 3.2. Image storage is handled manually, so works with everything.}
s.files = `git ls-files`.split("\n")
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
s.require_paths = ["lib"]
s.license = "MIT"
s.add_runtime_dependency "railties", ">= 3.1"
s.add_runtime_dependency "tinymce-rails", "~> 4.0"
s.add_development_dependency "bundler", "~> 1.0"
s.add_development_dependency "rails", ">= 3.1"
end
|
# frozen_string_literal: true
#
# Copyright (C) 2012 - present Instructure, Inc.
#
# This file is part of Canvas.
#
# Canvas is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, version 3 of the License.
#
# Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
require File.expand_path(File.dirname(__FILE__) + '/common')
require File.expand_path(File.dirname(__FILE__) + '/helpers/calendar2_common')
describe "course copy" do
include_context "in-process server selenium tests"
include Calendar2Common
def validate_course_main_page
header = f('#breadcrumbs .home + li a')
expect(header).to be_displayed
expect(header.text).to eq @course.course_code
end
it "copies the course" do
course_with_admin_logged_in
@course.syllabus_body = "<p>haha</p>"
@course.tab_configuration = [{ "id" => 0 }, { "id" => 14 }, { "id" => 8 }, { "id" => 5 }, { "id" => 6 }, { "id" => 2 }, { "id" => 3, "hidden" => true }]
@course.default_view = 'modules'
@course.wiki_pages.create!(:title => "hi", :body => "Whatever")
@course.save!
get "/courses/#{@course.id}/copy"
expect_new_page_load { f('button[type="submit"]').click }
run_jobs
expect(f('div.progressStatus span')).to include_text 'Completed'
@new_course = Course.last
expect(@new_course.syllabus_body).to eq @course.syllabus_body
expect(@new_course.tab_configuration).to eq @course.tab_configuration
expect(@new_course.default_view).to eq @course.default_view
expect(@new_course.wiki_pages.count).to eq 1
end
# TODO reimplement per CNVS-29604, but make sure we're testing at the right level
it "should copy the course with different settings"
it "sets the course name and code correctly" do
course_with_admin_logged_in
get "/courses/#{@course.id}/copy"
name = f('#course_name')
replace_content(name, "course name of testing")
name = f('#course_course_code')
replace_content(name, "course code of testing")
expect_new_page_load { f('button[type="submit"]').click }
new_course = Course.last
expect(new_course.name).to eq "course name of testing"
expect(new_course.course_code).to eq "course code of testing"
end
it "adjusts the dates" do
course_with_admin_logged_in
get "/courses/#{@course.id}/copy"
f('#dateAdjustCheckbox').click
replace_and_proceed(f('#oldStartDate'), '7/1/2012')
replace_and_proceed(f('#oldEndDate'), 'Jul 11, 2012')
replace_and_proceed(f('#newStartDate'), '8-5-2012')
replace_and_proceed(f('#newEndDate'), 'Aug 15, 2012')
f('#addDaySubstitution').click
click_option('#daySubstitution ul > div:nth-child(1) .currentDay', "1", :value)
click_option('#daySubstitution ul > div:nth-child(1) .subDay', "2", :value)
expect_new_page_load { f('button[type="submit"]').click }
opts = ContentMigration.last.migration_settings["date_shift_options"]
expect(opts['shift_dates']).to eq '1'
expect(opts['day_substitutions']).to eq({ "1" => "2" })
expected = {
"old_start_date" => "Jul 1, 2012", "old_end_date" => "Jul 11, 2012",
"new_start_date" => "Aug 5, 2012", "new_end_date" => "Aug 15, 2012"
}
expected.each do |k, v|
expect(Date.parse(opts[k].to_s)).to eq Date.parse(v)
end
end
it "removes dates" do
course_with_admin_logged_in
get "/courses/#{@course.id}/copy"
f('#dateAdjustCheckbox').click
f('#dateRemoveOption').click
expect_new_page_load { f('button[type="submit"]').click }
opts = ContentMigration.last.migration_settings["date_shift_options"]
expect(opts['remove_dates']).to eq '1'
end
it "creates the new course in the same sub-account" do
account_model
subaccount = @account.sub_accounts.create!(:name => "subadubdub")
course_with_admin_logged_in(:account => subaccount)
@course.syllabus_body = "<p>haha</p>"
@course.save!
get "/courses/#{@course.id}/settings"
link = f('.copy_course_link')
expect(link).to be_displayed
expect_new_page_load { link.click }
expect_new_page_load { f('button[type="submit"]').click }
run_jobs
expect(f('div.progressStatus span')).to include_text 'Completed'
@new_course = subaccount.courses.where("id <>?", @course.id).last
expect(@new_course.syllabus_body).to eq @course.syllabus_body
end
it "is not able to submit invalid course dates" do
course_with_admin_logged_in
get "/courses/#{@course.id}/copy"
replace_content(f('#course_start_at'), 'Aug 15, 2012')
replace_and_proceed(f('#course_conclude_at'), 'Jul 11, 2012')
button = f('button.btn-primary')
expect(button).to be_disabled
replace_and_proceed(f('#course_conclude_at'), 'Aug 30, 2012')
expect(button).not_to be_disabled
end
context "with calendar events" do
around do |example|
Timecop.freeze(Time.zone.local(2016, 5, 1, 10, 5, 0)) do
Auditors::ActiveRecord::Partitioner.process
example.call
end
end
before(:each) do
course_with_admin_logged_in
@date_to_use = 2.weeks.from_now.monday.strftime("%Y-%m-%d")
end
# this test requires jobs to run in the middle of it and course_copys
# need to check a lot of things, a longer timeout is reasonable.
it "shifts the dates a week later", priority: "2", test_id: 2953906, custom_timeout: 30 do
get "/calendar"
quick_jump_to_date(@date_to_use)
create_calendar_event('Monday Event', true, false, false, @date_to_use, true)
get "/courses/#{@course.id}/copy"
new_course_name = "copied course"
replace_content(f("input[type=text][id=course_name]"), new_course_name)
replace_content(f("input[type=text][id=course_course_code]"), "copied")
f("input[type=checkbox][id=dateAdjustCheckbox]").click
date = 1.week.from_now.strftime("%Y-%m-%d")
replace_content(f("input[type=text][id=newStartDate]"), date)
submit_form('#copy_course_form')
run_jobs
raise "progress bar is still there after waiting" unless wait_for_no_such_element(timeout: 10) { f('.bar') }
expect(f('div.progressStatus span')).to include_text 'Completed'
get "/calendar#view_name=week"
quick_jump_to_date(@date_to_use)
f('.fc-event').click
expect(f('.event-details-content')).to include_text("#{@course.name}")
f('.navigate_next').click
f('.fc-event').click
expect(f('.event-details-content')).to include_text("#{new_course_name}")
end
end
end
fix flaky course copy date shift selenium spec
test plan: specs
flag = none
closes LS-2751
Change-Id: If835549782d1e1772e95360e3d770c3d4e37db23
Reviewed-on: https://gerrit.instructure.com/c/canvas-lms/+/275989
Tested-by: Service Cloud Jenkins <9144042a601061f88f1e1d7a1753ea3e2972119d@instructure.com>
Reviewed-by: Robin Kuss <0b206b54367b2de0334896ea45826f0e3194bd35@instructure.com>
QA-Review: Robin Kuss <0b206b54367b2de0334896ea45826f0e3194bd35@instructure.com>
Product-Review: Jeremy Stanley <b3f594e10a9edcf5413cf1190121d45078c62290@instructure.com>
# frozen_string_literal: true
#
# Copyright (C) 2012 - present Instructure, Inc.
#
# This file is part of Canvas.
#
# Canvas is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, version 3 of the License.
#
# Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
require File.expand_path(File.dirname(__FILE__) + '/common')
require File.expand_path(File.dirname(__FILE__) + '/helpers/calendar2_common')
describe "course copy" do
include_context "in-process server selenium tests"
include Calendar2Common
def validate_course_main_page
header = f('#breadcrumbs .home + li a')
expect(header).to be_displayed
expect(header.text).to eq @course.course_code
end
it "copies the course" do
course_with_admin_logged_in
@course.syllabus_body = "<p>haha</p>"
@course.tab_configuration = [{ "id" => 0 }, { "id" => 14 }, { "id" => 8 }, { "id" => 5 }, { "id" => 6 }, { "id" => 2 }, { "id" => 3, "hidden" => true }]
@course.default_view = 'modules'
@course.wiki_pages.create!(:title => "hi", :body => "Whatever")
@course.save!
get "/courses/#{@course.id}/copy"
expect_new_page_load { f('button[type="submit"]').click }
run_jobs
expect(f('div.progressStatus span')).to include_text 'Completed'
@new_course = Course.last
expect(@new_course.syllabus_body).to eq @course.syllabus_body
expect(@new_course.tab_configuration).to eq @course.tab_configuration
expect(@new_course.default_view).to eq @course.default_view
expect(@new_course.wiki_pages.count).to eq 1
end
# TODO reimplement per CNVS-29604, but make sure we're testing at the right level
it "should copy the course with different settings"
it "sets the course name and code correctly" do
course_with_admin_logged_in
get "/courses/#{@course.id}/copy"
name = f('#course_name')
replace_content(name, "course name of testing")
name = f('#course_course_code')
replace_content(name, "course code of testing")
expect_new_page_load { f('button[type="submit"]').click }
new_course = Course.last
expect(new_course.name).to eq "course name of testing"
expect(new_course.course_code).to eq "course code of testing"
end
it "adjusts the dates" do
course_with_admin_logged_in
get "/courses/#{@course.id}/copy"
f('#dateAdjustCheckbox').click
replace_and_proceed(f('#oldStartDate'), '7/1/2012')
replace_and_proceed(f('#oldEndDate'), 'Jul 11, 2012')
replace_and_proceed(f('#newStartDate'), '8-5-2012')
replace_and_proceed(f('#newEndDate'), 'Aug 15, 2012')
f('#addDaySubstitution').click
click_option('#daySubstitution ul > div:nth-child(1) .currentDay', "1", :value)
click_option('#daySubstitution ul > div:nth-child(1) .subDay', "2", :value)
expect_new_page_load { f('button[type="submit"]').click }
opts = ContentMigration.last.migration_settings["date_shift_options"]
expect(opts['shift_dates']).to eq '1'
expect(opts['day_substitutions']).to eq({ "1" => "2" })
expected = {
"old_start_date" => "Jul 1, 2012", "old_end_date" => "Jul 11, 2012",
"new_start_date" => "Aug 5, 2012", "new_end_date" => "Aug 15, 2012"
}
expected.each do |k, v|
expect(Date.parse(opts[k].to_s)).to eq Date.parse(v)
end
end
it "removes dates" do
course_with_admin_logged_in
get "/courses/#{@course.id}/copy"
f('#dateAdjustCheckbox').click
f('#dateRemoveOption').click
expect_new_page_load { f('button[type="submit"]').click }
opts = ContentMigration.last.migration_settings["date_shift_options"]
expect(opts['remove_dates']).to eq '1'
end
it "creates the new course in the same sub-account" do
account_model
subaccount = @account.sub_accounts.create!(:name => "subadubdub")
course_with_admin_logged_in(:account => subaccount)
@course.syllabus_body = "<p>haha</p>"
@course.save!
get "/courses/#{@course.id}/settings"
link = f('.copy_course_link')
expect(link).to be_displayed
expect_new_page_load { link.click }
expect_new_page_load { f('button[type="submit"]').click }
run_jobs
expect(f('div.progressStatus span')).to include_text 'Completed'
@new_course = subaccount.courses.where("id <>?", @course.id).last
expect(@new_course.syllabus_body).to eq @course.syllabus_body
end
it "is not able to submit invalid course dates" do
course_with_admin_logged_in
get "/courses/#{@course.id}/copy"
replace_content(f('#course_start_at'), 'Aug 15, 2012')
replace_and_proceed(f('#course_conclude_at'), 'Jul 11, 2012')
button = f('button.btn-primary')
expect(button).to be_disabled
replace_and_proceed(f('#course_conclude_at'), 'Aug 30, 2012')
expect(button).not_to be_disabled
end
context "with calendar events" do
around do |example|
Timecop.freeze(Time.zone.local(2016, 5, 1, 10, 5, 0)) do
Auditors::ActiveRecord::Partitioner.process
example.call
end
end
before(:each) do
course_with_admin_logged_in
@date_to_use = 2.weeks.from_now.monday.strftime("%Y-%m-%d")
end
# this test requires jobs to run in the middle of it and course_copys
# need to check a lot of things, a longer timeout is reasonable.
it "shifts the dates a week later", priority: "2", test_id: 2953906, custom_timeout: 30 do
event = @course.calendar_events.create! title: 'Monday Event', start_at: @date_to_use
get "/courses/#{@course.id}/copy"
new_course_name = "copied course"
replace_content(f("input[type=text][id=course_name]"), new_course_name)
replace_content(f("input[type=text][id=course_course_code]"), "copied")
f("input[type=checkbox][id=dateAdjustCheckbox]").click
date = 1.week.from_now.strftime("%Y-%m-%d")
replace_content(f("input[type=text][id=newStartDate]"), date, tab_out: true)
expect_new_page_load { submit_form('#copy_course_form') }
run_jobs
new_course = Course.where(name: new_course_name).last
new_event = new_course.calendar_events.where(title: 'Monday Event').last
expect(new_event.all_day_date).to eq event.all_day_date + 7.days
end
end
end
|
require "spec_helper"
describe Sidekiq::Merger::Redis do
subject { described_class.new }
let(:now) { Time.now }
let(:execution_time) { now + 10.seconds }
before { Timecop.freeze(now) }
describe ".purge" do
it "cleans up all the keys" do
described_class.redis do |conn|
conn.sadd("sidekiq-merger:merges", "test")
conn.set("sidekiq-merger:unique_msg:foo", "test")
conn.set("sidekiq-merger:msg:foo", "test")
conn.set("sidekiq-merger:lock:foo", "test")
end
described_class.purge!
described_class.redis do |conn|
expect(conn.smembers("sidekiq-merger:merges")).to be_empty
expect(conn.keys("sidekiq-merger:unique_msg:*")).to be_empty
expect(conn.keys("sidekiq-merger:msg:*")).to be_empty
expect(conn.keys("sidekiq-merger:lock:*")).to be_empty
end
end
end
describe "#push" do
shared_examples_for "push spec" do
it "pushes the msg" do
subject.push(pushing_key, pushing_msg, pushing_execution_time)
described_class.redis do |conn|
expect(conn.smembers("sidekiq-merger:merges")).to contain_exactly(*merge_keys)
expect(conn.keys("sidekiq-merger:time:*")).to contain_exactly(*times.keys)
expect(conn.keys("sidekiq-merger:unique_msg:*")).to contain_exactly(*unique_msgs_h.keys)
unique_msgs_h.each do |key, msgs|
expect(conn.smembers(key)).to contain_exactly(*msgs)
end
expect(conn.keys("sidekiq-merger:msg:*")).to contain_exactly(*msgs_h.keys)
msgs_h.each do |key, msgs|
expect(conn.lrange(key, 0, -1)).to contain_exactly(*msgs)
end
end
end
it "sets the execution time" do
subject.push(pushing_key, pushing_msg, pushing_execution_time)
described_class.redis do |conn|
merge_keys.each do |key, time|
expect(conn.get(key)).to eq time
end
end
end
end
let(:pushing_key) { "foo" }
let(:pushing_msg) { [1, 2, 3] }
let(:pushing_execution_time) { execution_time }
include_examples "push spec" do
let(:merge_keys) { ["foo"] }
let(:times) { {
"sidekiq-merger:time:foo" => execution_time.to_i.to_s,
} }
let(:unique_msgs_h) { {
"sidekiq-merger:unique_msg:foo" => ["[1,2,3]"]
} }
let(:msgs_h) { {
"sidekiq-merger:msg:foo" => ["[1,2,3]"]
} }
end
context "the merge key already exists" do
let(:pushing_msg) { [2, 3, 4] }
before { subject.push("foo", [1, 2, 3], execution_time) }
include_examples "push spec" do
let(:merge_keys) { ["foo"] }
let(:times) { {
"sidekiq-merger:time:foo" => execution_time.to_i.to_s,
} }
let(:unique_msgs_h) { {
"sidekiq-merger:unique_msg:foo" => ["[1,2,3]", "[2,3,4]"]
} }
let(:msgs_h) { {
"sidekiq-merger:msg:foo" => ["[1,2,3]", "[2,3,4]"]
} }
end
end
context "the msg has already ben pushed" do
before { subject.push("foo", [1, 2, 3], execution_time) }
include_examples "push spec" do
let(:merge_keys) { ["foo"] }
let(:times) { {
"sidekiq-merger:time:foo" => execution_time.to_i.to_s,
} }
let(:unique_msgs_h) { {
"sidekiq-merger:unique_msg:foo" => ["[1,2,3]"]
} }
let(:msgs_h) { {
"sidekiq-merger:msg:foo" => ["[1,2,3]", "[1,2,3]"]
} }
end
end
context "other merge key already exists" do
let(:pushing_key) { "bar" }
let(:pushing_msg) { [2, 3, 4] }
let(:pushing_execution_time) { execution_time + 1.hour }
before { subject.push("foo", [1, 2, 3], execution_time) }
include_examples "push spec" do
let(:merge_keys) { ["foo", "bar"] }
let(:times) { {
"sidekiq-merger:time:foo" => execution_time.to_i.to_s,
"sidekiq-merger:time:bar" => (execution_time + 1.hour).to_i.to_s,
} }
let(:unique_msgs_h) { {
"sidekiq-merger:unique_msg:foo" => ["[1,2,3]"],
"sidekiq-merger:unique_msg:bar" => ["[2,3,4]"],
} }
let(:msgs_h) { {
"sidekiq-merger:msg:foo" => ["[1,2,3]"],
"sidekiq-merger:msg:bar" => ["[2,3,4]"],
} }
end
end
end
describe "#delete" do
before do
subject.redis do |conn|
conn.sadd("sidekiq-merger:unique_msg:foo", "[1,2,3]")
conn.sadd("sidekiq-merger:unique_msg:foo", "[2,3,4]")
conn.lpush("sidekiq-merger:msg:foo", "[1,2,3]")
conn.lpush("sidekiq-merger:msg:foo", "[2,3,4]")
end
end
it "deletes the msg" do
subject.delete("foo", [1, 2, 3])
subject.redis do |conn|
expect(conn.smembers("sidekiq-merger:unique_msg:foo")).to contain_exactly "[2,3,4]"
expect(conn.lrange("sidekiq-merger:msg:foo", 0, -1)).to contain_exactly "[2,3,4]"
end
end
context "with duplicate msgs" do
it "deletes the msg" do
subject.redis do |conn|
conn.lpush("sidekiq-merger:msg:foo", "[1,2,3]")
end
subject.delete("foo", [1, 2, 3])
subject.redis do |conn|
expect(conn.smembers("sidekiq-merger:unique_msg:foo")).to contain_exactly "[2,3,4]"
expect(conn.lrange("sidekiq-merger:msg:foo", 0, -1)).to contain_exactly "[2,3,4]"
end
end
end
end
describe "#size" do
before do
subject.redis do |conn|
conn.lpush("sidekiq-merger:msg:foo", "[1,2,3]")
conn.lpush("sidekiq-merger:msg:foo", "[2,3,4]")
end
end
it "returns the size" do
expect(subject.size("foo")).to eq 2
end
end
describe "#exists?" do
context "unique key exists" do
it "returns true" do
described_class.redis { |conn| conn.sadd("sidekiq-merger:unique_msg:foo", "\"test\"") }
expect(subject.exists?("foo", "test")).to eq true
end
end
context "unique key does not exists" do
it "returns false" do
expect(subject.exists?("foo", "test")).to eq false
end
end
end
describe "#all" do
end
describe "#lock" do
end
describe "#get" do
before do
subject.push("bar", [1, 2, 3], execution_time)
subject.push("bar", [2, 3, 4], execution_time)
end
it "gets all the msg" do
expect(subject.get("bar")).to contain_exactly [1, 2, 3], [2, 3, 4]
expect(subject.size("bar")).to eq 2
end
end
describe "#pluck" do
before do
subject.push("bar", [1, 2, 3], execution_time)
subject.push("bar", [2, 3, 4], execution_time)
end
it "plucks all the msg" do
expect(subject.pluck("bar")).to contain_exactly [1, 2, 3], [2, 3, 4]
expect(subject.size("bar")).to eq 0
end
end
describe "#delete_all" do
end
end
Add spec of all method
require "spec_helper"
describe Sidekiq::Merger::Redis do
subject { described_class.new }
let(:now) { Time.now }
let(:execution_time) { now + 10.seconds }
before { Timecop.freeze(now) }
describe ".purge" do
it "cleans up all the keys" do
described_class.redis do |conn|
conn.sadd("sidekiq-merger:merges", "test")
conn.set("sidekiq-merger:unique_msg:foo", "test")
conn.set("sidekiq-merger:msg:foo", "test")
conn.set("sidekiq-merger:lock:foo", "test")
end
described_class.purge!
described_class.redis do |conn|
expect(conn.smembers("sidekiq-merger:merges")).to be_empty
expect(conn.keys("sidekiq-merger:unique_msg:*")).to be_empty
expect(conn.keys("sidekiq-merger:msg:*")).to be_empty
expect(conn.keys("sidekiq-merger:lock:*")).to be_empty
end
end
end
describe "#push" do
shared_examples_for "push spec" do
it "pushes the msg" do
subject.push(pushing_key, pushing_msg, pushing_execution_time)
described_class.redis do |conn|
expect(conn.smembers("sidekiq-merger:merges")).to contain_exactly(*merge_keys)
expect(conn.keys("sidekiq-merger:time:*")).to contain_exactly(*times.keys)
expect(conn.keys("sidekiq-merger:unique_msg:*")).to contain_exactly(*unique_msgs_h.keys)
unique_msgs_h.each do |key, msgs|
expect(conn.smembers(key)).to contain_exactly(*msgs)
end
expect(conn.keys("sidekiq-merger:msg:*")).to contain_exactly(*msgs_h.keys)
msgs_h.each do |key, msgs|
expect(conn.lrange(key, 0, -1)).to contain_exactly(*msgs)
end
end
end
it "sets the execution time" do
subject.push(pushing_key, pushing_msg, pushing_execution_time)
described_class.redis do |conn|
merge_keys.each do |key, time|
expect(conn.get(key)).to eq time
end
end
end
end
let(:pushing_key) { "foo" }
let(:pushing_msg) { [1, 2, 3] }
let(:pushing_execution_time) { execution_time }
include_examples "push spec" do
let(:merge_keys) { ["foo"] }
let(:times) { {
"sidekiq-merger:time:foo" => execution_time.to_i.to_s,
} }
let(:unique_msgs_h) { {
"sidekiq-merger:unique_msg:foo" => ["[1,2,3]"]
} }
let(:msgs_h) { {
"sidekiq-merger:msg:foo" => ["[1,2,3]"]
} }
end
context "the merge key already exists" do
let(:pushing_msg) { [2, 3, 4] }
before { subject.push("foo", [1, 2, 3], execution_time) }
include_examples "push spec" do
let(:merge_keys) { ["foo"] }
let(:times) { {
"sidekiq-merger:time:foo" => execution_time.to_i.to_s,
} }
let(:unique_msgs_h) { {
"sidekiq-merger:unique_msg:foo" => ["[1,2,3]", "[2,3,4]"]
} }
let(:msgs_h) { {
"sidekiq-merger:msg:foo" => ["[1,2,3]", "[2,3,4]"]
} }
end
end
context "the msg has already ben pushed" do
before { subject.push("foo", [1, 2, 3], execution_time) }
include_examples "push spec" do
let(:merge_keys) { ["foo"] }
let(:times) { {
"sidekiq-merger:time:foo" => execution_time.to_i.to_s,
} }
let(:unique_msgs_h) { {
"sidekiq-merger:unique_msg:foo" => ["[1,2,3]"]
} }
let(:msgs_h) { {
"sidekiq-merger:msg:foo" => ["[1,2,3]", "[1,2,3]"]
} }
end
end
context "other merge key already exists" do
let(:pushing_key) { "bar" }
let(:pushing_msg) { [2, 3, 4] }
let(:pushing_execution_time) { execution_time + 1.hour }
before { subject.push("foo", [1, 2, 3], execution_time) }
include_examples "push spec" do
let(:merge_keys) { ["foo", "bar"] }
let(:times) { {
"sidekiq-merger:time:foo" => execution_time.to_i.to_s,
"sidekiq-merger:time:bar" => (execution_time + 1.hour).to_i.to_s,
} }
let(:unique_msgs_h) { {
"sidekiq-merger:unique_msg:foo" => ["[1,2,3]"],
"sidekiq-merger:unique_msg:bar" => ["[2,3,4]"],
} }
let(:msgs_h) { {
"sidekiq-merger:msg:foo" => ["[1,2,3]"],
"sidekiq-merger:msg:bar" => ["[2,3,4]"],
} }
end
end
end
describe "#delete" do
before do
subject.redis do |conn|
conn.sadd("sidekiq-merger:unique_msg:foo", "[1,2,3]")
conn.sadd("sidekiq-merger:unique_msg:foo", "[2,3,4]")
conn.lpush("sidekiq-merger:msg:foo", "[1,2,3]")
conn.lpush("sidekiq-merger:msg:foo", "[2,3,4]")
end
end
it "deletes the msg" do
subject.delete("foo", [1, 2, 3])
subject.redis do |conn|
expect(conn.smembers("sidekiq-merger:unique_msg:foo")).to contain_exactly "[2,3,4]"
expect(conn.lrange("sidekiq-merger:msg:foo", 0, -1)).to contain_exactly "[2,3,4]"
end
end
context "with duplicate msgs" do
it "deletes the msg" do
subject.redis do |conn|
conn.lpush("sidekiq-merger:msg:foo", "[1,2,3]")
end
subject.delete("foo", [1, 2, 3])
subject.redis do |conn|
expect(conn.smembers("sidekiq-merger:unique_msg:foo")).to contain_exactly "[2,3,4]"
expect(conn.lrange("sidekiq-merger:msg:foo", 0, -1)).to contain_exactly "[2,3,4]"
end
end
end
end
describe "#size" do
before do
subject.redis do |conn|
conn.lpush("sidekiq-merger:msg:foo", "[1,2,3]")
conn.lpush("sidekiq-merger:msg:foo", "[2,3,4]")
end
end
it "returns the size" do
expect(subject.size("foo")).to eq 2
end
end
describe "#exists?" do
context "unique key exists" do
it "returns true" do
described_class.redis { |conn| conn.sadd("sidekiq-merger:unique_msg:foo", "\"test\"") }
expect(subject.exists?("foo", "test")).to eq true
end
end
context "unique key does not exists" do
it "returns false" do
expect(subject.exists?("foo", "test")).to eq false
end
end
end
describe "#all" do
before do
subject.push("foo", [1, 2, 3], execution_time)
subject.push("bar", [2, 3, 4], execution_time)
end
it "gets all the msg" do
expect(subject.all).to contain_exactly "foo", "bar"
end
end
describe "#lock" do
end
describe "#get" do
before do
subject.push("bar", [1, 2, 3], execution_time)
subject.push("bar", [2, 3, 4], execution_time)
end
it "gets all the msg" do
expect(subject.get("bar")).to contain_exactly [1, 2, 3], [2, 3, 4]
expect(subject.size("bar")).to eq 2
end
end
describe "#pluck" do
before do
subject.push("bar", [1, 2, 3], execution_time)
subject.push("bar", [2, 3, 4], execution_time)
end
it "plucks all the msg" do
expect(subject.pluck("bar")).to contain_exactly [1, 2, 3], [2, 3, 4]
expect(subject.size("bar")).to eq 0
end
end
describe "#delete_all" do
end
end
|
module ControllerMacros
def sign_in(user = double('user'))
if user.nil?
allow(request.env['warden']).to receive(:authenticate!).and_throw(:warden, {:scope => :user})
allow(controller).to receive(:current_user).and_return(nil)
else
allow(request.env['warden']).to receive(:authenticate!).and_return(user)
allow(controller).to receive(:current_user).and_return(user)
end
user
end
def login_admin
before(:each) do
@request.env["devise.mapping"] = Devise.mappings[:admin]
sign_in FactoryGirl.create(:admin) # Using factory girl as an example
end
end
def login_user
before(:each) do
@request.env["devise.mapping"] = Devise.mappings[:user]
user = FactoryGirl.create(:user)
#user.confirm! # or set a confirmed_at inside the factory. Only necessary if you are using the "confirmable" module
sign_in user
end
end
end
added @admin,@user to access the objects made form factory girl
module ControllerMacros
def sign_in(user = double('user'))
if user.nil?
allow(request.env['warden']).to receive(:authenticate!).and_throw(:warden, {:scope => :user})
allow(controller).to receive(:current_user).and_return(nil)
else
allow(request.env['warden']).to receive(:authenticate!).and_return(user)
allow(controller).to receive(:current_user).and_return(user)
end
user
end
def login_admin
before(:each) do
@request.env["devise.mapping"] = Devise.mappings[:admin]
@admin = sign_in FactoryGirl.create(:admin) # Using factory girl as an example
end
end
def login_user
before(:each) do
@request.env["devise.mapping"] = Devise.mappings[:user]
user = FactoryGirl.create(:user)
#user.confirm! # or set a confirmed_at inside the factory. Only necessary if you are using the "confirmable" module
@user = sign_in user
end
end
end |
require 'spec_helper'
describe 'snapraid::default' do
context 'On Ubuntu 14.04' do
let(:chef_run) do
runner = ChefSpec::SoloRunner.new(platform: 'ubuntu', version: '14.04') do |node|
node.set['snapraid']['content_files'] = ['/mnt/virtual']
node.set['snapraid']['parity_disks'] = ['/mnt/virtual']
node.set['snapraid']['data_disks'] = ['/mnt/virtual']
node.set['snapraid']['block_size'] = '512'
end
runner.converge(described_recipe)
end
it 'installs snapraid' do
expect(chef_run).to install_with_make_ark('snapraid')
end
it 'creates the correct configuration file' do
expect(chef_run).to create_template('/etc/snapraid.conf')
expect(chef_run).to render_file('/etc/snapraid.conf').with_content(%r{
.*parity\s/mnt/virtual/snapraid.parity.*}x).with_content(%r{
content\s/mnt/virtual/snapraid.content.*}x).with_content(%r{
disk\sd1\s/mnt/virtual.*}x)
.with_content(/
block_size\s512.*/x)
end
end
context 'On Windows 2012R2' do
let(:chef_run) do
runner = ChefSpec::SoloRunner.new(platform: 'windows',
version: '2012r2',
file_cache_path: 'C:\chef\cache') do |node|
node.set['snapraid']['content_files'] = ['D:\\']
node.set['snapraid']['parity_disks'] = ['D:\\']
node.set['snapraid']['data_disks'] = ['D:\\']
node.set['snapraid']['block_size'] = '512'
node.set['snapraid']['install_directory'] = 'D:\SnapRaid'
node.set['snapraid']['config_directory'] = 'D:\SnapRaid\Config'
node.set['snapraid']['url'] = 'https://github.com/amadvance/snapraid/releases/download/v9.1/snapraid-9.1-windows-x64.zip'
end
runner.converge(described_recipe)
end
it 'downloads snapraid' do
expect(chef_run).to create_remote_file('C:\chef\cache\snapraid-9.1-windows-x64.zip')
resource = chef_run.remote_file('C:\chef\cache\snapraid-9.1-windows-x64.zip')
expect(resource).to notify('windows_zipfile[D:\SnapRaid]').to(:unzip).immediately
end
it 'defines unzip of snapraid with action nothing' do
expect(chef_run).to_not unzip_windows_zipfile_to('D:\SnapRaid')
end
it 'creates directories' do
expect(chef_run).to create_directory('D:\SnapRaid')
expect(chef_run).to create_directory('D:\SnapRaid\Config')
end
it 'creates the correct configuration file' do
expect(chef_run).to create_template('D:\SnapRaid\Config/snapraid.conf')
expect(chef_run).to render_file('D:\SnapRaid\Config/snapraid.conf').with_content(/
.*parity\sD:\\snapraid.parity.*/x).with_content(/
content\sD:\\snapraid.content.*/x).with_content(/
disk\sd1\sD:\\.*/x)
.with_content(/
block_size\s512.*/x)
end
end
end
trying to fix CI failures
require 'spec_helper'
describe 'snapraid::default' do
context 'On Ubuntu 14.04' do
let(:chef_run) do
runner = ChefSpec::SoloRunner.new(platform: 'ubuntu', version: '14.04') do |node|
node.set['snapraid']['content_files'] = ['/mnt/virtual']
node.set['snapraid']['parity_disks'] = ['/mnt/virtual']
node.set['snapraid']['data_disks'] = ['/mnt/virtual']
node.set['snapraid']['block_size'] = '512'
end
runner.converge(described_recipe)
end
it 'installs snapraid' do
expect(chef_run).to install_with_make_ark('snapraid')
end
it 'creates the correct configuration file' do
expect(chef_run).to create_template('/etc/snapraid.conf')
expect(chef_run).to render_file('/etc/snapraid.conf').with_content(%r{
.*parity\s/mnt/virtual/snapraid.parity.*}x).with_content(%r{
content\s/mnt/virtual/snapraid.content.*}x).with_content(%r{
disk\sd1\s/mnt/virtual.*}x)
.with_content(/
block_size\s512.*/x)
end
end
context 'On Windows 2012R2' do
let(:chef_run) do
runner = ChefSpec::SoloRunner.new(platform: 'windows',
version: '2012R2',
file_cache_path: 'C:\chef\cache') do |node|
node.set['snapraid']['content_files'] = ['D:\\']
node.set['snapraid']['parity_disks'] = ['D:\\']
node.set['snapraid']['data_disks'] = ['D:\\']
node.set['snapraid']['block_size'] = '512'
node.set['snapraid']['install_directory'] = 'D:\SnapRaid'
node.set['snapraid']['config_directory'] = 'D:\SnapRaid\Config'
node.set['snapraid']['url'] = 'https://github.com/amadvance/snapraid/releases/download/v9.1/snapraid-9.1-windows-x64.zip'
end
runner.converge(described_recipe)
end
it 'downloads snapraid' do
expect(chef_run).to create_remote_file('C:\chef\cache\snapraid-9.1-windows-x64.zip')
resource = chef_run.remote_file('C:\chef\cache\snapraid-9.1-windows-x64.zip')
expect(resource).to notify('windows_zipfile[D:\SnapRaid]').to(:unzip).immediately
end
it 'defines unzip of snapraid with action nothing' do
expect(chef_run).to_not unzip_windows_zipfile_to('D:\SnapRaid')
end
it 'creates directories' do
expect(chef_run).to create_directory('D:\SnapRaid')
expect(chef_run).to create_directory('D:\SnapRaid\Config')
end
it 'creates the correct configuration file' do
expect(chef_run).to create_template('D:\SnapRaid\Config/snapraid.conf')
expect(chef_run).to render_file('D:\SnapRaid\Config/snapraid.conf').with_content(/
.*parity\sD:\\snapraid.parity.*/x).with_content(/
content\sD:\\snapraid.content.*/x).with_content(/
disk\sd1\sD:\\.*/x)
.with_content(/
block_size\s512.*/x)
end
end
end
|
require 'spec_helper'
describe 'memcached::default' do
before do
stub_command('getent passwd memcached').and_return(false)
stub_command('getent passwd nobody').and_return(false)
stub_command('getent passwd memcache').and_return(false)
stub_command('dpkg -s memcached').and_return(true)
end
context 'on rhel 5' do
let(:chef_run) { ChefSpec::ServerRunner.new(step_into: ['memcached_instance'], platform: 'centos', version: '5.11').converge(described_recipe) }
it 'installs redhat-lsb package' do
expect(chef_run).to install_package('redhat-lsb')
end
it 'installs memcached package' do
expect(chef_run).to install_package('memcached')
end
it 'creates memcached group' do
expect(chef_run).to create_group('memcached')
end
it 'creates memcached user' do
expect(chef_run).to create_user('memcached')
end
it 'templates /etc/init.d/memcached' do
expect(chef_run).to create_template('/etc/init.d/memcached')
end
it 'creates log file' do
expect(chef_run).to create_file('/var/log/memcached.log')
end
end
context 'on rhel 6' do
let(:chef_run) { ChefSpec::ServerRunner.new(step_into: ['memcached_instance'], platform: 'centos', version: '6.7').converge(described_recipe) }
it 'installs redhat-lsb package' do
expect(chef_run).to install_package('redhat-lsb-core')
end
it 'installs memcached package' do
expect(chef_run).to install_package('memcached')
end
it 'creates memcached user' do
expect(chef_run).to create_user('memcached')
end
it 'creates memcached group' do
expect(chef_run).to create_group('memcached')
end
it 'templates /etc/init.d/memcached' do
expect(chef_run).to create_template('/etc/init.d/memcached')
end
it 'creates log file' do
expect(chef_run).to create_file('/var/log/memcached.log')
end
end
context 'on ubuntu' do
let(:chef_run) { ChefSpec::ServerRunner.new(step_into: ['memcached_instance'], platform: 'ubuntu', version: '14.04').converge(described_recipe) }
it 'installs memcached package' do
expect(chef_run).to install_package('memcached')
end
it 'creates memcache group' do
expect(chef_run).to create_group('memcache')
end
it 'deletes /etc/default/memcached' do
expect(chef_run).to delete_file('/etc/default/memcached')
end
it 'creates log file' do
expect(chef_run).to create_file('/var/log/memcached.log')
end
end
end
Remove the RHEL 5 specs
Signed-off-by: Tim Smith <764ef62106582a09ed09dfa0b6bff7c05fd7d1e4@chef.io>
require 'spec_helper'
describe 'memcached::default' do
before do
stub_command('getent passwd memcached').and_return(false)
stub_command('getent passwd nobody').and_return(false)
stub_command('getent passwd memcache').and_return(false)
stub_command('dpkg -s memcached').and_return(true)
end
context 'on rhel 6' do
let(:chef_run) { ChefSpec::ServerRunner.new(step_into: ['memcached_instance'], platform: 'centos', version: '6.9').converge(described_recipe) }
it 'installs redhat-lsb package' do
expect(chef_run).to install_package('redhat-lsb-core')
end
it 'installs memcached package' do
expect(chef_run).to install_package('memcached')
end
it 'creates memcached user' do
expect(chef_run).to create_user('memcached')
end
it 'creates memcached group' do
expect(chef_run).to create_group('memcached')
end
it 'templates /etc/init.d/memcached' do
expect(chef_run).to create_template('/etc/init.d/memcached')
end
it 'creates log file' do
expect(chef_run).to create_file('/var/log/memcached.log')
end
end
context 'on ubuntu' do
let(:chef_run) { ChefSpec::ServerRunner.new(step_into: ['memcached_instance'], platform: 'ubuntu', version: '14.04').converge(described_recipe) }
it 'installs memcached package' do
expect(chef_run).to install_package('memcached')
end
it 'creates memcache group' do
expect(chef_run).to create_group('memcache')
end
it 'deletes /etc/default/memcached' do
expect(chef_run).to delete_file('/etc/default/memcached')
end
it 'creates log file' do
expect(chef_run).to create_file('/var/log/memcached.log')
end
end
end
|
#
# Cookbook Name:: universe_ubuntu
# Spec:: default
#
# Copyright (c) 2016 The Authors, All Rights Reserved.
require 'spec_helper'
describe 'universe_ubuntu::default' do
context 'When all attributes are default, on an Ubuntu' do
before do
stub_command('[ -x /home/vagrant/anaconda3/bin/conda ]').and_return(false)
stub_command('[ -e /home/vagrant/anaconda3/envs/universe ]').and_return(false)
stub_command('[ -x /home/vagrant/anaconda3/envs/universe/bin/tensorboard ]').and_return(false)
end
let(:chef_run) do
ChefSpec::ServerRunner.new(platform: 'ubuntu', version: '14.04') do |node|
node.override['universe']['user']['name'] = 'vagrant'
node.override['universe']['user']['home'] = '/home/vagrant'
node.override['universe']['conda_env'][:CONDA_PREFIX] = '/home/vagrant/anaconda3/envs/universe'
node.override['universe']['conda_env'][:PATH] = "/home/vagrant/anaconda3/envs/universe/bin:#{ENV['PATH']}"
node.override['universe']['gpu'] = true
node.automatic['os_version'] = 'specific_kernel_version'
end.converge(described_recipe)
end
let(:add_user) { chef_run.group('docker') }
it 'converges successfully' do
expect { chef_run }.to_not raise_error
end
it 'Include apt recipe' do
expect(chef_run).to include_recipe('apt::default')
end
it 'add new golang repository' do
expect(chef_run).to add_apt_repository('newer golang apt repo')
end
it 'edit /etc/X11/Xwrapper.config' do
expect(chef_run).to run_ruby_block('Allow non root users start the GUI')
end
it 'add docker repository' do
expect(chef_run).to add_apt_repository('docker')
end
pkgs = %w(golang
libjpeg-turbo8-dev
make
tmux
htop
chromium-browser
git
cmake
zlib1g-dev
libjpeg-dev
xvfb
libav-tools
xorg-dev
python-opengl
libboost-all-dev
libsdl2-dev
swig)
pkgs.each do |name|
it "install #{name} package" do
expect(chef_run).to install_package name
end
end
it 'creates remote_file anaconda if missing' do
user = 'vagrant'
expect(chef_run).to create_remote_file_if_missing(
"#{Chef::Config[:file_cache_path]}/Anaconda3-4.2.0-Linux-x86_64.sh")
.with(
owner: user,
group: user,
mode: '0755',
checksum: '73b51715a12b6382dd4df3dd1905b531bd6792d4aa7273b2377a0436d45f0e78'
)
end
it 'installs anaconda' do
expect(chef_run).to run_execute("bash #{Chef::Config[:file_cache_path]}/Anaconda3-4.2.0-Linux-x86_64.sh -b")
.with(user: 'vagrant')
end
it 'creates conda env file' do
expect(chef_run).to create_template('/home/vagrant/environment.yml')
.with(owner: 'vagrant',
group: 'vagrant')
end
it 'creates conda environment' do
expect(chef_run).to run_execute('/home/vagrant/anaconda3/bin/conda env create -f environment.yml')
.with(user: 'vagrant', cwd: '/home/vagrant')
end
it 'Installs Tensorflow' do
conda_prefix = '/home/vagrant/anaconda3/envs/universe'
expect(chef_run).to run_execute("#{conda_prefix}/bin/pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.11.0-cp35-cp35m-linux_x86_64.whl")
.with(
user: 'vagrant',
environment: {
'CONDA_DEFAULT_ENV' => 'universe',
'CONDA_PREFIX' => conda_prefix,
'PATH' => "#{conda_prefix}/bin:#{ENV['PATH']}"
})
end
docker_pkgs = ['linux-image-extra-specific_kernel_version',
'linux-image-extra-virtual',
'docker-engine']
docker_pkgs.each do |name|
it "Installs #{name} package" do
expect(chef_run).to install_package(name)
end
end
it 'Add current user to docker group' do
expect(chef_run).to modify_group('docker')
end
it 'Notifies service docker restart' do
expect(add_user).to notify('service[docker]').to(:restart).immediately
end
it 'Notifies service lightdm' do
expect(add_user).to notify('service[lightdm]').to(:restart).immediately
end
it 'Clone gym repo' do
expect(chef_run).to sync_git('/home/vagrant/gym')
end
it 'Clone universe repo' do
expect(chef_run).to sync_git('/home/vagrant/universe')
end
it 'Clone starter agent repo' do
expect(chef_run).to sync_git('/home/vagrant/universe-starter-agent')
end
it 'Install Gym modules' do
conda_prefix = '/home/vagrant/anaconda3/envs/universe'
expect(chef_run).to run_execute("#{conda_prefix}/bin/pip install -e '.[all]'")
.with(
user: 'vagrant',
cwd: '/home/vagrant/gym',
environment: {
'CONDA_DEFAULT_ENV' => 'universe',
'CONDA_PREFIX' => conda_prefix,
'PATH' => "#{conda_prefix}/bin:#{ENV['PATH']}"
})
end
it 'Install Universe modules' do
conda_prefix = '/home/vagrant/anaconda3/envs/universe'
expect(chef_run).to run_execute("#{conda_prefix}/bin/pip install -e .")
.with(
user: 'vagrant',
cwd: '/home/vagrant/universe',
environment: {
'CONDA_DEFAULT_ENV' => 'universe',
'CONDA_PREFIX' => conda_prefix,
'PATH' => "#{conda_prefix}/bin:#{ENV['PATH']}"
})
end
end
end
Unit test on ruby block that add lines to bashrc
#
# Cookbook Name:: universe_ubuntu
# Spec:: default
#
# Copyright (c) 2016 The Authors, All Rights Reserved.
require 'spec_helper'
describe 'universe_ubuntu::default' do
context 'When all attributes are default, on an Ubuntu' do
before do
stub_command('[ -x /home/vagrant/anaconda3/bin/conda ]').and_return(false)
stub_command('[ -e /home/vagrant/anaconda3/envs/universe ]').and_return(false)
stub_command('[ -x /home/vagrant/anaconda3/envs/universe/bin/tensorboard ]').and_return(false)
end
let(:chef_run) do
ChefSpec::ServerRunner.new(platform: 'ubuntu', version: '14.04') do |node|
node.override['universe']['user']['name'] = 'vagrant'
node.override['universe']['user']['home'] = '/home/vagrant'
node.override['universe']['conda_env'][:CONDA_PREFIX] = '/home/vagrant/anaconda3/envs/universe'
node.override['universe']['conda_env'][:PATH] = "/home/vagrant/anaconda3/envs/universe/bin:#{ENV['PATH']}"
node.override['universe']['gpu'] = true
node.automatic['os_version'] = 'specific_kernel_version'
end.converge(described_recipe)
end
let(:add_user) { chef_run.group('docker') }
it 'converges successfully' do
expect { chef_run }.to_not raise_error
end
it 'Include apt recipe' do
expect(chef_run).to include_recipe('apt::default')
end
it 'add new golang repository' do
expect(chef_run).to add_apt_repository('newer golang apt repo')
end
it 'edit /etc/X11/Xwrapper.config' do
expect(chef_run).to run_ruby_block('Allow non root users start the GUI')
end
it 'add docker repository' do
expect(chef_run).to add_apt_repository('docker')
end
pkgs = %w(golang
libjpeg-turbo8-dev
make
tmux
htop
chromium-browser
git
cmake
zlib1g-dev
libjpeg-dev
xvfb
libav-tools
xorg-dev
python-opengl
libboost-all-dev
libsdl2-dev
swig)
pkgs.each do |name|
it "install #{name} package" do
expect(chef_run).to install_package name
end
end
it 'creates remote_file anaconda if missing' do
user = 'vagrant'
expect(chef_run).to create_remote_file_if_missing(
"#{Chef::Config[:file_cache_path]}/Anaconda3-4.2.0-Linux-x86_64.sh")
.with(
owner: user,
group: user,
mode: '0755',
checksum: '73b51715a12b6382dd4df3dd1905b531bd6792d4aa7273b2377a0436d45f0e78'
)
end
it 'installs anaconda' do
expect(chef_run).to run_execute("bash #{Chef::Config[:file_cache_path]}/Anaconda3-4.2.0-Linux-x86_64.sh -b")
.with(user: 'vagrant')
end
it 'creates conda env file' do
expect(chef_run).to create_template('/home/vagrant/environment.yml')
.with(owner: 'vagrant',
group: 'vagrant')
end
it 'creates conda environment' do
expect(chef_run).to run_execute('/home/vagrant/anaconda3/bin/conda env create -f environment.yml')
.with(user: 'vagrant', cwd: '/home/vagrant')
end
it 'Installs Tensorflow' do
conda_prefix = '/home/vagrant/anaconda3/envs/universe'
expect(chef_run).to run_execute("#{conda_prefix}/bin/pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.11.0-cp35-cp35m-linux_x86_64.whl")
.with(
user: 'vagrant',
environment: {
'CONDA_DEFAULT_ENV' => 'universe',
'CONDA_PREFIX' => conda_prefix,
'PATH' => "#{conda_prefix}/bin:#{ENV['PATH']}"
})
end
it 'add lines to shell config files' do
expect(chef_run).to run_ruby_block('Add Anaconda and Universe to bashrc')
end
docker_pkgs = ['linux-image-extra-specific_kernel_version',
'linux-image-extra-virtual',
'docker-engine']
docker_pkgs.each do |name|
it "Installs #{name} package" do
expect(chef_run).to install_package(name)
end
end
it 'Add current user to docker group' do
expect(chef_run).to modify_group('docker')
end
it 'Notifies service docker restart' do
expect(add_user).to notify('service[docker]').to(:restart).immediately
end
it 'Notifies service lightdm' do
expect(add_user).to notify('service[lightdm]').to(:restart).immediately
end
it 'Clone gym repo' do
expect(chef_run).to sync_git('/home/vagrant/gym')
end
it 'Clone universe repo' do
expect(chef_run).to sync_git('/home/vagrant/universe')
end
it 'Clone starter agent repo' do
expect(chef_run).to sync_git('/home/vagrant/universe-starter-agent')
end
it 'Install Gym modules' do
conda_prefix = '/home/vagrant/anaconda3/envs/universe'
expect(chef_run).to run_execute("#{conda_prefix}/bin/pip install -e '.[all]'")
.with(
user: 'vagrant',
cwd: '/home/vagrant/gym',
environment: {
'CONDA_DEFAULT_ENV' => 'universe',
'CONDA_PREFIX' => conda_prefix,
'PATH' => "#{conda_prefix}/bin:#{ENV['PATH']}"
})
end
it 'Install Universe modules' do
conda_prefix = '/home/vagrant/anaconda3/envs/universe'
expect(chef_run).to run_execute("#{conda_prefix}/bin/pip install -e .")
.with(
user: 'vagrant',
cwd: '/home/vagrant/universe',
environment: {
'CONDA_DEFAULT_ENV' => 'universe',
'CONDA_PREFIX' => conda_prefix,
'PATH' => "#{conda_prefix}/bin:#{ENV['PATH']}"
})
end
end
end
|
require 'spec_helper'
describe Travis::API::V3::Services::Log::Find, set_app: true do
let(:user) { Factory.create(:user) }
let(:repo) { Factory.create(:repository, owner_name: user.login, name: 'minimal', owner: user)}
let(:build) { Factory.create(:build, repository: repo) }
let(:job) { Travis::API::V3::Models::Job.create(build: build) }
let(:job2) { Travis::API::V3::Models::Job.create(build: build)}
let(:job3) { Travis::API::V3::Models::Job.create(build: build)}
let(:s3job) { Travis::API::V3::Models::Job.create(build: build) }
let(:token) { Travis::Api::App::AccessToken.create(user: user, app_id: 1) }
let(:headers) { { 'HTTP_AUTHORIZATION' => "token #{token}" } }
let(:parsed_body) { JSON.load(body) }
let(:log) { Travis::API::V3::Models::Log.create(job: job) }
let(:log2) { Travis::API::V3::Models::Log.create(job: job2) }
let(:log3) { Travis::API::V3::Models::Log.create(job: job3) }
let(:s3log) { Travis::API::V3::Models::Log.create(job: s3job, content: 'minimal log 1') }
let(:find_log) { "string" }
let(:xml_content) {
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>jobs/#{s3job.id}/log.txt</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"hgb9dede5f27731c9771645a39863328"</ETag>
<Size>20308738</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
<body>$ git clean -fdx\nRemoving Gemfile.lock\n$ git fetch
</body>
</Contents>
</ListBucketResult>"
}
before do
log3.delete
Travis::API::V3::AccessControl::LegacyToken.any_instance.stubs(:visible?).returns(true)
stub_request(:get, "https://bucket.s3.amazonaws.com/?max-keys=1000").
to_return(:status => 200, :body => xml_content, :headers => {})
stub_request(:get, "https://s3.amazonaws.com/archive.travis-ci.com/?prefix=jobs/#{s3job.id}/log.txt").
to_return(status: 200, body: xml_content, headers: {})
Fog.mock!
Travis.config.logs_options.s3 = { access_key_id: 'key', secret_access_key: 'secret' }
storage = Fog::Storage.new({
:aws_access_key_id => "key",
:aws_secret_access_key => "secret",
:provider => "AWS"
})
bucket = storage.directories.create(:key => 'archive.travis-ci.org')
file = bucket.files.create(
:key => "jobs/#{s3job.id}/log.txt",
:body => "$ git clean -fdx\nRemoving Gemfile.lock\n$ git fetch"
)
end
after { Fog::Mock.reset }
context 'when log stored in db' do
describe 'returns log with an array of Log Parts' do
example do
log_part = log.log_parts.create(content: "logging it", number: 0)
get("/v3/job/#{log.job.id}/log", {}, headers)
expect(parsed_body).to eq(
'@href' => "/v3/job/#{log.job.id}/log",
'@representation' => 'standard',
'@type' => 'log',
'content' => nil,
'id' => log.id,
'log_parts' => [{
"@type" => "log_part",
"@representation" => "minimal",
"content" => log_part.content,
"number" => log_part.number }])
end
end
describe 'returns aggregated log with an array of Log Parts' do
before { log2.update_attributes(aggregated_at: Time.now, content: "aggregating!")}
example do
get("/v3/job/#{log2.job.id}/log", {}, headers)
expect(parsed_body).to eq(
'@type' => 'log',
'@href' => "/v3/job/#{log2.job.id}/log",
'@representation' => 'standard',
'content' => "aggregating!",
'id' => log2.id,
'log_parts' => [{
"@type" => "log_part",
"@representation" => "minimal",
"content" => "aggregating!",
"number" => 0 }])
end
end
describe 'returns log as plain text' do
example do
log_part = log.log_parts.create(content: "logging it", number: 1)
log_part2 = log.log_parts.create(content: "logging more", number: 2)
log_part3 = log.log_parts.create(content: "logging forever", number: 3)
get("/v3/job/#{log.job.id}/log", {}, headers.merge('HTTP_ACCEPT' => 'text/plain'))
expect(body).to eq(
"logging it\nlogging more\nlogging forever")
end
end
end
context 'when log not found in db but stored on S3' do
describe 'returns log with an array of Log Parts' do
example do
s3log.update_attributes(archived_at: Time.now)
get("/v3/job/#{s3log.job.id}/log", {}, headers)
expect(parsed_body).to eq(
'@type' => 'log',
'@href' => "/v3/job/#{s3job.id}/log",
'@representation' => 'standard',
'id' => s3log.id,
'content' => 'minimal log 1',
'log_parts' => [{
"@type"=>"log_part",
"@representation"=>"minimal",
"content"=>"$ git clean -fdx\nRemoving Gemfile.lock\n$ git fetch",
"number"=>0}])
end
end
describe 'returns log as plain text' do
example do
s3log.update_attributes(archived_at: Time.now)
get("/v3/job/#{s3log.job.id}/log", {}, headers.merge('HTTP_ACCEPT' => 'text/plain'))
expect(last_response.headers).to include("Content-Type" => "text/plain")
expect(body).to eq(
"$ git clean -fdx\nRemoving Gemfile.lock\n$ git fetch")
end
end
end
context 'when log not found anywhere' do
describe 'does not return log - returns error' do
example do
log3.delete
get("/v3/job/#{job3.id}/log", {}, headers)
expect(parsed_body).to eq({
"@type"=>"error",
"error_type"=>"not_found",
"error_message"=>"log not found"})
end
end
end
end
fix the test
require 'spec_helper'
describe Travis::API::V3::Services::Log::Find, set_app: true do
let(:user) { Factory.create(:user) }
let(:repo) { Factory.create(:repository, owner_name: user.login, name: 'minimal', owner: user)}
let(:build) { Factory.create(:build, repository: repo) }
let(:job) { Travis::API::V3::Models::Job.create(build: build) }
let(:job2) { Travis::API::V3::Models::Job.create(build: build)}
let(:job3) { Travis::API::V3::Models::Job.create(build: build)}
let(:s3job) { Travis::API::V3::Models::Job.create(build: build) }
let(:token) { Travis::Api::App::AccessToken.create(user: user, app_id: 1) }
let(:headers) { { 'HTTP_AUTHORIZATION' => "token #{token}" } }
let(:parsed_body) { JSON.load(body) }
let(:log) { Travis::API::V3::Models::Log.create(job: job) }
let(:log2) { Travis::API::V3::Models::Log.create(job: job2) }
let(:log3) { Travis::API::V3::Models::Log.create(job: job3) }
let(:s3log) { Travis::API::V3::Models::Log.create(job: s3job, content: 'minimal log 1') }
let(:find_log) { "string" }
let(:xml_content) {
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<ListBucketResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>jobs/#{s3job.id}/log.txt</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"hgb9dede5f27731c9771645a39863328"</ETag>
<Size>20308738</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
<body>$ git clean -fdx\nRemoving Gemfile.lock\n$ git fetch
</body>
</Contents>
</ListBucketResult>"
}
before do
log3.delete
Travis::API::V3::AccessControl::LegacyToken.any_instance.stubs(:visible?).returns(true)
stub_request(:get, "https://bucket.s3.amazonaws.com/?max-keys=1000").
to_return(:status => 200, :body => xml_content, :headers => {})
stub_request(:get, "https://s3.amazonaws.com/archive.travis-ci.com/?prefix=jobs/#{s3job.id}/log.txt").
to_return(status: 200, body: xml_content, headers: {})
Fog.mock!
Travis.config.logs_options.s3 = { access_key_id: 'key', secret_access_key: 'secret' }
storage = Fog::Storage.new({
:aws_access_key_id => "key",
:aws_secret_access_key => "secret",
:provider => "AWS"
})
bucket = storage.directories.create(:key => 'archive.travis-ci.org')
file = bucket.files.create(
:key => "jobs/#{s3job.id}/log.txt",
:body => "$ git clean -fdx\nRemoving Gemfile.lock\n$ git fetch"
)
end
after { Fog::Mock.reset }
context 'when log stored in db' do
describe 'returns log with an array of Log Parts' do
example do
log_part = log.log_parts.create(content: "logging it", number: 0)
get("/v3/job/#{log.job.id}/log", {}, headers)
expect(parsed_body).to eq(
'@href' => "/v3/job/#{log.job.id}/log",
'@representation' => 'standard',
'@type' => 'log',
'content' => nil,
'id' => log.id,
'log_parts' => [{
"@type" => "log_part",
"@representation" => "minimal",
"content" => log_part.content,
"number" => log_part.number }])
end
end
describe 'returns aggregated log with an array of Log Parts' do
before { log2.update_attributes(aggregated_at: Time.now, content: "aggregating!")}
example do
get("/v3/job/#{log2.job.id}/log", {}, headers)
expect(parsed_body).to eq(
'@type' => 'log',
'@href' => "/v3/job/#{log2.job.id}/log",
'@representation' => 'standard',
'content' => "aggregating!",
'id' => log2.id,
'log_parts' => [{
"@type" => "log_part",
"@representation" => "minimal",
"content" => "aggregating!",
"number" => 0 }])
end
end
describe 'returns log as plain text' do
example do
log_part = log.log_parts.create(content: "logging it", number: 1)
log_part2 = log.log_parts.create(content: "logging more", number: 2)
log_part3 = log.log_parts.create(content: "logging forever", number: 3)
get("/v3/job/#{log.job.id}/log", {}, headers.merge('HTTP_ACCEPT' => 'text/plain'))
expect(body).to eq(
"logging it\nlogging more\nlogging forever")
end
end
end
context 'when log not found in db but stored on S3' do
describe 'returns log with an array of Log Parts' do
example do
s3log.update_attributes(archived_at: Time.now)
get("/v3/job/#{s3log.job.id}/log", {}, headers)
expect(parsed_body).to eq(
'@type' => 'log',
'@href' => "/v3/job/#{s3job.id}/log",
'@representation' => 'standard',
'id' => s3log.id,
'content' => 'minimal log 1',
'log_parts' => [{
"@type"=>"log_part",
"@representation"=>"minimal",
"content"=>"$ git clean -fdx\nRemoving Gemfile.lock\n$ git fetch",
"number"=>0}])
end
end
describe 'returns log as plain text' do
example do
s3log.update_attributes(archived_at: Time.now)
get("/v3/job/#{s3log.job.id}/log", {}, headers.merge('HTTP_ACCEPT' => 'text/plain'))
expect(last_response.headers).to include("Content-Type" => "application/json")
expect(body).to eq(
"$ git clean -fdx\nRemoving Gemfile.lock\n$ git fetch")
end
end
end
context 'when log not found anywhere' do
describe 'does not return log - returns error' do
example do
log3.delete
get("/v3/job/#{job3.id}/log", {}, headers)
expect(parsed_body).to eq({
"@type"=>"error",
"error_type"=>"not_found",
"error_message"=>"log not found"})
end
end
end
end
|
require 'rspec'
$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
$LOAD_PATH.unshift(File.dirname(__FILE__))
require 'webdriver-user-agent'
require 'selenium-webdriver'
require 'watir-webdriver'
CHROMEBROWSER_UICHROME_HEIGHT = 72
CHROMEBROWSER_UI_MINIMUM_HEIGHT = 200
FIREFOXBROWSER_UICHROME_HEIGHT = 79
describe "webdriver user agent" do
after :each do
@driver.quit if @driver
end
# window.innerWidth and window.innerHeight
# do not accurately provide
# browser widths and heights
# http://stackoverflow.com/a/8876069/1651458
it "can create a new webdriver driver using firefox and iphone (portrait) by default" do
@driver = Webdriver::UserAgent.driver
expect(@driver.browser).to eq(:firefox)
expect(@driver.execute_script('return navigator.userAgent')).to include 'iPhone'
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(375)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(559 - FIREFOXBROWSER_UICHROME_HEIGHT)
end
it "can create a new webdriver driver using chrome and iphone 6 plus (landscape)" do
@driver = Webdriver::UserAgent.driver(:browser => :chrome, :agent => :iphone6plus, :orientation => :landscape)
expect(@driver.browser).to eq(:chrome)
expect(@driver.execute_script('return navigator.userAgent')).to include 'iPhone'
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(736)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(414 - CHROMEBROWSER_UICHROME_HEIGHT)
end
it "can create a new webdriver driver using chrome and iPad (landscape)" do
@driver = Webdriver::UserAgent.driver(:browser => :chrome, :agent => :iPad, :orientation => :landscape)
expect(@driver.browser).to eq(:chrome)
expect(@driver.execute_script('return navigator.userAgent')).to include 'iPad'
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(1024)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(704 - CHROMEBROWSER_UICHROME_HEIGHT)
end
it "can create a new webdriver driver using firefox and android phone (landscape)" do
@driver = Webdriver::UserAgent.driver(:browser => :chrome, :agent => :android_phone, :orientation => :landscape)
expect(@driver.browser).to eq(:chrome)
expect(@driver.execute_script('return navigator.userAgent')).to include 'Android'
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(480)
# Chrome is apparently setting some kind of minimum height
# As seen on Chrome v42 on OS X Yosimite
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(CHROMEBROWSER_UI_MINIMUM_HEIGHT)
end
it "can create a new webdriver driver using firefox and android tablet (portrait)" do
@driver = Webdriver::UserAgent.driver(:browser => :chrome, :agent => :android_tablet, :orientation => :portrait)
expect(@driver.browser).to eq(:chrome)
expect(@driver.execute_script('return navigator.userAgent')).to include 'Android'
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(768)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(946 - CHROMEBROWSER_UICHROME_HEIGHT)
end
it "can create a new webdriver driver using firefox and random user agent" do
@driver = Webdriver::UserAgent.driver(:agent => :random)
expect(@driver.browser).to eq(:firefox)
expect(@driver.execute_script('return navigator.userAgent')).not_to be_nil
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).not_to eq(320)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).not_to eq(356 - FIREFOXBROWSER_UICHROME_HEIGHT)
end
it "can create a new webdriver driver using chrome and random user agent" do
@driver = Webdriver::UserAgent.driver(:browser => :chrome, :agent => :random)
expect(@driver.browser).to eq(:chrome)
expect(@driver.execute_script('return navigator.userAgent')).not_to be_nil
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).not_to eq(320)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).not_to eq(356 - CHROMEBROWSER_UICHROME_HEIGHT)
end
it "can create a new webdriver driver using an existing firefox profile" do
profile = Selenium::WebDriver::Firefox::Profile.new
profile['browser.startup.homepage'] = "data:text/html,<title>hello</title>"
@driver = Webdriver::UserAgent.driver(:browser => :firefox, :profile => profile)
expect(@driver.browser).to eq(:firefox)
expect(@driver.execute_script('return navigator.userAgent')).to include 'iPhone'
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(375)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(559 - FIREFOXBROWSER_UICHROME_HEIGHT)
expect(@driver.title).to eq('hello')
end
it "can create a new webdriver driver using firefox and user-specified user agent" do
@driver = Webdriver::UserAgent.driver(:user_agent_string => "Mozilla/4.0 (compatible; MSIE 5.5b1; Mac_PowerPC)")
expect(@driver.browser).to eq(:firefox)
expect(@driver.execute_script('return navigator.userAgent')).not_to be_nil
@browser = Watir::Browser.new @driver
expect(@browser.url).to eq("about:blank")
end
it "can create a new webdriver driver using firefox and user-specified viewport sizes (string or int)" do
width = 800
height = 600
@driver = Webdriver::UserAgent.driver(:viewport_width => "#{width}", :viewport_height => height, :agent => :iphone6)
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(800)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(600 - FIREFOXBROWSER_UICHROME_HEIGHT)
@browser = Watir::Browser.new @driver
expect(@browser.url).to eq("about:blank")
end
it "can create a new webdriver driver, handling for nonsense height and widths" do
@driver = Webdriver::UserAgent.driver(:viewport_width => "abc", :agent => :iphone6)
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(375)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(559 - FIREFOXBROWSER_UICHROME_HEIGHT)
@browser = Watir::Browser.new @driver
expect(@browser.url).to eq("about:blank")
end
it "can allow using selenium driver for watir browser" do
@driver = Webdriver::UserAgent.driver(:browser => :firefox, :agent => :iphone, :orientation => :portrait)
@browser = Watir::Browser.new @driver
expect(@browser.url).to eq("about:blank")
end
end
Improve test for user-supplied user agent string.
require 'rspec'
$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
$LOAD_PATH.unshift(File.dirname(__FILE__))
require 'webdriver-user-agent'
require 'selenium-webdriver'
require 'watir-webdriver'
CHROMEBROWSER_UICHROME_HEIGHT = 72
CHROMEBROWSER_UI_MINIMUM_HEIGHT = 200
FIREFOXBROWSER_UICHROME_HEIGHT = 79
describe "webdriver user agent" do
after :each do
@driver.quit if @driver
end
# window.innerWidth and window.innerHeight
# do not accurately provide
# browser widths and heights
# http://stackoverflow.com/a/8876069/1651458
it "can create a new webdriver driver using firefox and iphone (portrait) by default" do
@driver = Webdriver::UserAgent.driver
expect(@driver.browser).to eq(:firefox)
expect(@driver.execute_script('return navigator.userAgent')).to include 'iPhone'
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(375)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(559 - FIREFOXBROWSER_UICHROME_HEIGHT)
end
it "can create a new webdriver driver using chrome and iphone 6 plus (landscape)" do
@driver = Webdriver::UserAgent.driver(:browser => :chrome, :agent => :iphone6plus, :orientation => :landscape)
expect(@driver.browser).to eq(:chrome)
expect(@driver.execute_script('return navigator.userAgent')).to include 'iPhone'
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(736)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(414 - CHROMEBROWSER_UICHROME_HEIGHT)
end
it "can create a new webdriver driver using chrome and iPad (landscape)" do
@driver = Webdriver::UserAgent.driver(:browser => :chrome, :agent => :iPad, :orientation => :landscape)
expect(@driver.browser).to eq(:chrome)
expect(@driver.execute_script('return navigator.userAgent')).to include 'iPad'
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(1024)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(704 - CHROMEBROWSER_UICHROME_HEIGHT)
end
it "can create a new webdriver driver using firefox and android phone (landscape)" do
@driver = Webdriver::UserAgent.driver(:browser => :chrome, :agent => :android_phone, :orientation => :landscape)
expect(@driver.browser).to eq(:chrome)
expect(@driver.execute_script('return navigator.userAgent')).to include 'Android'
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(480)
# Chrome is apparently setting some kind of minimum height
# As seen on Chrome v42 on OS X Yosimite
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(CHROMEBROWSER_UI_MINIMUM_HEIGHT)
end
it "can create a new webdriver driver using firefox and android tablet (portrait)" do
@driver = Webdriver::UserAgent.driver(:browser => :chrome, :agent => :android_tablet, :orientation => :portrait)
expect(@driver.browser).to eq(:chrome)
expect(@driver.execute_script('return navigator.userAgent')).to include 'Android'
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(768)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(946 - CHROMEBROWSER_UICHROME_HEIGHT)
end
it "can create a new webdriver driver using firefox and random user agent" do
@driver = Webdriver::UserAgent.driver(:agent => :random)
expect(@driver.browser).to eq(:firefox)
expect(@driver.execute_script('return navigator.userAgent')).not_to be_nil
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).not_to eq(320)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).not_to eq(356 - FIREFOXBROWSER_UICHROME_HEIGHT)
end
it "can create a new webdriver driver using chrome and random user agent" do
@driver = Webdriver::UserAgent.driver(:browser => :chrome, :agent => :random)
expect(@driver.browser).to eq(:chrome)
expect(@driver.execute_script('return navigator.userAgent')).not_to be_nil
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).not_to eq(320)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).not_to eq(356 - CHROMEBROWSER_UICHROME_HEIGHT)
end
it "can create a new webdriver driver using an existing firefox profile" do
profile = Selenium::WebDriver::Firefox::Profile.new
profile['browser.startup.homepage'] = "data:text/html,<title>hello</title>"
@driver = Webdriver::UserAgent.driver(:browser => :firefox, :profile => profile)
expect(@driver.browser).to eq(:firefox)
expect(@driver.execute_script('return navigator.userAgent')).to include 'iPhone'
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(375)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(559 - FIREFOXBROWSER_UICHROME_HEIGHT)
expect(@driver.title).to eq('hello')
end
it "can create a new webdriver driver using firefox and user-specified user agent" do
@driver = Webdriver::UserAgent.driver(:user_agent_string => "Mozilla/4.0 (compatible; MSIE 5.5b1; Mac_PowerPC)")
expect(@driver.browser).to eq(:firefox)
expect(@driver.execute_script('return navigator.userAgent')).to include 'Mac_PowerPC'
@browser = Watir::Browser.new @driver
expect(@browser.url).to eq("about:blank")
end
it "can create a new webdriver driver using firefox and user-specified viewport sizes (string or int)" do
width = 800
height = 600
@driver = Webdriver::UserAgent.driver(:viewport_width => "#{width}", :viewport_height => height, :agent => :iphone6)
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(800)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(600 - FIREFOXBROWSER_UICHROME_HEIGHT)
@browser = Watir::Browser.new @driver
expect(@browser.url).to eq("about:blank")
end
it "can create a new webdriver driver, handling for nonsense height and widths" do
@driver = Webdriver::UserAgent.driver(:viewport_width => "abc", :agent => :iphone6)
expect(@driver.execute_script('return Math.max(document.documentElement.clientWidth, window.innerWidth || 0)')).to eq(375)
expect(@driver.execute_script('return Math.max(document.documentElement.clientHeight, window.innerHeight || 0)')).to eq(559 - FIREFOXBROWSER_UICHROME_HEIGHT)
@browser = Watir::Browser.new @driver
expect(@browser.url).to eq("about:blank")
end
it "can allow using selenium driver for watir browser" do
@driver = Webdriver::UserAgent.driver(:browser => :firefox, :agent => :iphone, :orientation => :portrait)
@browser = Watir::Browser.new @driver
expect(@browser.url).to eq("about:blank")
end
end |
module Enat
VERSION = "0.1.1"
end
Bumping version.
module Enat
VERSION = '0.1.2'
end
|
module Excon
class Socket
include Utils
extend Forwardable
attr_accessor :data
def params
Excon.display_warning('Excon::Socket#params is deprecated use Excon::Socket#data instead.')
@data
end
def params=(new_params)
Excon.display_warning('Excon::Socket#params= is deprecated use Excon::Socket#data= instead.')
@data = new_params
end
attr_reader :remote_ip
def_delegators(:@socket, :close)
def initialize(data = {})
@data = data
@nonblock = data[:nonblock]
@read_buffer = ''
@eof = false
connect
end
def read(max_length = nil)
if @eof
return max_length ? nil : ''
elsif @nonblock
read_nonblock(max_length)
else
read_block(max_length)
end
end
def readline
return legacy_readline if RUBY_VERSION.to_f <= 1.8_7
begin
buffer = ''
buffer << @socket.read_nonblock(1) while buffer[-1] != "\n"
buffer
rescue Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitReadable
if timeout_reached('read')
raise_timeout_error('read')
else
retry
end
rescue OpenSSL::SSL::SSLError => e
if e.message == 'read would block'
if timeout_reached('read')
raise_timeout_error('read')
else
retry
end
else
raise(error)
end
end
end
def legacy_readline
begin
Timeout.timeout(@data[:read_timeout]) do
@socket.readline
end
rescue Timeout::Error
raise Excon::Errors::Timeout.new('read timeout reached')
end
end
def write(data)
if @nonblock
write_nonblock(data)
else
write_block(data)
end
end
def local_address
unpacked_sockaddr[1]
end
def local_port
unpacked_sockaddr[0]
end
private
def connect
@socket = nil
exception = nil
if @data[:proxy]
family = @data[:proxy][:family] || ::Socket::Constants::AF_UNSPEC
args = [@data[:proxy][:hostname], @data[:proxy][:port], family, ::Socket::Constants::SOCK_STREAM]
else
family = @data[:family] || ::Socket::Constants::AF_UNSPEC
args = [@data[:hostname], @data[:port], family, ::Socket::Constants::SOCK_STREAM]
end
if RUBY_VERSION >= '1.9.2' && defined?(RUBY_ENGINE) && RUBY_ENGINE == 'ruby'
args << nil << nil << false # no reverse lookup
end
addrinfo = ::Socket.getaddrinfo(*args)
addrinfo.each do |_, port, _, ip, a_family, s_type|
@remote_ip = ip
# nonblocking connect
begin
sockaddr = ::Socket.sockaddr_in(port, ip)
socket = ::Socket.new(a_family, s_type, 0)
if @data[:reuseaddr]
socket.setsockopt(::Socket::Constants::SOL_SOCKET, ::Socket::Constants::SO_REUSEADDR, true)
if defined?(::Socket::Constants::SO_REUSEPORT)
socket.setsockopt(::Socket::Constants::SOL_SOCKET, ::Socket::Constants::SO_REUSEPORT, true)
end
end
if @nonblock
socket.connect_nonblock(sockaddr)
else
socket.connect(sockaddr)
end
@socket = socket
break
rescue Errno::EINPROGRESS
unless IO.select(nil, [socket], nil, @data[:connect_timeout])
raise(Excon::Errors::Timeout.new('connect timeout reached'))
end
begin
socket.connect_nonblock(sockaddr)
@socket = socket
break
rescue Errno::EISCONN
@socket = socket
break
rescue SystemCallError => exception
socket.close rescue nil
next
end
rescue SystemCallError => exception
socket.close rescue nil if socket
next
end
end
# this will be our last encountered exception
fail exception unless @socket
if @data[:tcp_nodelay]
@socket.setsockopt(::Socket::IPPROTO_TCP,
::Socket::TCP_NODELAY,
true)
end
end
def read_nonblock(max_length)
begin
if max_length
until @read_buffer.length >= max_length
@read_buffer << @socket.read_nonblock(max_length - @read_buffer.length)
end
else
loop do
@read_buffer << @socket.read_nonblock(@data[:chunk_size])
end
end
rescue OpenSSL::SSL::SSLError => error
if error.message == 'read would block'
if timeout_reached('read')
raise_timeout_error('read')
else
retry
end
else
raise(error)
end
rescue Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitReadable
if @read_buffer.empty?
# if we didn't read anything, try again...
if timeout_reached('read')
raise_timeout_error('read')
else
retry
end
end
rescue EOFError
@eof = true
end
if max_length
if @read_buffer.empty?
nil # EOF met at beginning
else
@read_buffer.slice!(0, max_length)
end
else
# read until EOFError, so return everything
@read_buffer.slice!(0, @read_buffer.length)
end
end
def read_block(max_length)
@socket.read(max_length)
rescue OpenSSL::SSL::SSLError => error
if error.message == 'read would block'
if timeout_reached('read')
raise_timeout_error('read')
else
retry
end
else
raise(error)
end
rescue Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitReadable
if @read_buffer.empty?
if timeout_reached('read')
raise_timeout_error('read')
else
retry
end
end
rescue EOFError
@eof = true
end
def write_nonblock(data)
if FORCE_ENC
data.force_encoding('BINARY')
end
loop do
written = nil
begin
# I wish that this API accepted a start position, then we wouldn't
# have to slice data when there is a short write.
written = @socket.write_nonblock(data)
rescue OpenSSL::SSL::SSLError, Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitWritable => error
if error.is_a?(OpenSSL::SSL::SSLError) && error.message != 'write would block'
raise error
else
if timeout_reached('write')
raise_timeout_error('write')
else
retry
end
end
end
# Fast, common case.
break if written == data.size
# This takes advantage of the fact that most ruby implementations
# have Copy-On-Write strings. Thusly why requesting a subrange
# of data, we actually don't copy data because the new string
# simply references a subrange of the original.
data = data[written, data.size]
end
end
def write_block(data)
@socket.write(data)
rescue OpenSSL::SSL::SSLError, Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitWritable => error
if error.is_a?(OpenSSL::SSL::SSLError) && error.message != 'write would block'
raise error
else
if timeout_reached('write')
raise_timeout_error('write')
else
retry
end
end
end
def timeout_reached(type)
if type == 'read'
args = [[@socket], nil, nil, @data[:read_timeout]]
else
args = [nil, [@socket], nil, @data[:write_timeout]]
end
IO.select(*args) ? nil : true
end
def raise_timeout_error(type)
fail Excon::Errors::Timeout.new("#{type} timeout reached")
end
def unpacked_sockaddr
@unpacked_sockaddr ||= ::Socket.unpack_sockaddr_in(@socket.to_io.getsockname)
rescue ArgumentError => e
unless e.message == 'not an AF_INET/AF_INET6 sockaddr'
raise
end
end
end
end
fix socket lookup stuff to play nicer with 2.2.1
fixes #482
module Excon
class Socket
include Utils
extend Forwardable
attr_accessor :data
def params
Excon.display_warning('Excon::Socket#params is deprecated use Excon::Socket#data instead.')
@data
end
def params=(new_params)
Excon.display_warning('Excon::Socket#params= is deprecated use Excon::Socket#data= instead.')
@data = new_params
end
attr_reader :remote_ip
def_delegators(:@socket, :close)
def initialize(data = {})
@data = data
@nonblock = data[:nonblock]
@read_buffer = ''
@eof = false
connect
end
def read(max_length = nil)
if @eof
return max_length ? nil : ''
elsif @nonblock
read_nonblock(max_length)
else
read_block(max_length)
end
end
def readline
return legacy_readline if RUBY_VERSION.to_f <= 1.8_7
begin
buffer = ''
buffer << @socket.read_nonblock(1) while buffer[-1] != "\n"
buffer
rescue Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitReadable
if timeout_reached('read')
raise_timeout_error('read')
else
retry
end
rescue OpenSSL::SSL::SSLError => e
if e.message == 'read would block'
if timeout_reached('read')
raise_timeout_error('read')
else
retry
end
else
raise(error)
end
end
end
def legacy_readline
begin
Timeout.timeout(@data[:read_timeout]) do
@socket.readline
end
rescue Timeout::Error
raise Excon::Errors::Timeout.new('read timeout reached')
end
end
def write(data)
if @nonblock
write_nonblock(data)
else
write_block(data)
end
end
def local_address
unpacked_sockaddr[1]
end
def local_port
unpacked_sockaddr[0]
end
private
def connect
@socket = nil
exception = nil
if @data[:proxy]
family = @data[:proxy][:family] || ::Socket::Constants::AF_UNSPEC
args = [@data[:proxy][:hostname], @data[:proxy][:port], family, ::Socket::Constants::SOCK_STREAM]
else
family = @data[:family] || ::Socket::Constants::AF_UNSPEC
args = [@data[:hostname], @data[:port], family, ::Socket::Constants::SOCK_STREAM]
end
if RUBY_VERSION >= '1.9.2' && defined?(RUBY_ENGINE) && RUBY_ENGINE == 'ruby'
args << nil << nil << false # no reverse lookup
end
addrinfo = ::Socket.getaddrinfo(*args)
addrinfo.each do |_, port, _, ip, a_family, s_type|
@remote_ip = ip
# already succeeded on previous addrinfo
if @socket
break
end
# nonblocking connect
begin
sockaddr = ::Socket.sockaddr_in(port, ip)
socket = ::Socket.new(a_family, s_type, 0)
if @data[:reuseaddr]
socket.setsockopt(::Socket::Constants::SOL_SOCKET, ::Socket::Constants::SO_REUSEADDR, true)
if defined?(::Socket::Constants::SO_REUSEPORT)
socket.setsockopt(::Socket::Constants::SOL_SOCKET, ::Socket::Constants::SO_REUSEPORT, true)
end
end
if @nonblock
socket.connect_nonblock(sockaddr)
else
socket.connect(sockaddr)
end
@socket = socket
rescue Errno::EINPROGRESS
unless IO.select(nil, [socket], nil, @data[:connect_timeout])
raise(Excon::Errors::Timeout.new('connect timeout reached'))
end
begin
socket.connect_nonblock(sockaddr)
@socket = socket
rescue Errno::EISCONN
@socket = socket
rescue SystemCallError => exception
socket.close rescue nil
end
rescue SystemCallError => exception
socket.close rescue nil if socket
end
end
# this will be our last encountered exception
fail exception unless @socket
if @data[:tcp_nodelay]
@socket.setsockopt(::Socket::IPPROTO_TCP,
::Socket::TCP_NODELAY,
true)
end
end
def read_nonblock(max_length)
begin
if max_length
until @read_buffer.length >= max_length
@read_buffer << @socket.read_nonblock(max_length - @read_buffer.length)
end
else
loop do
@read_buffer << @socket.read_nonblock(@data[:chunk_size])
end
end
rescue OpenSSL::SSL::SSLError => error
if error.message == 'read would block'
if timeout_reached('read')
raise_timeout_error('read')
else
retry
end
else
raise(error)
end
rescue Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitReadable
if @read_buffer.empty?
# if we didn't read anything, try again...
if timeout_reached('read')
raise_timeout_error('read')
else
retry
end
end
rescue EOFError
@eof = true
end
if max_length
if @read_buffer.empty?
nil # EOF met at beginning
else
@read_buffer.slice!(0, max_length)
end
else
# read until EOFError, so return everything
@read_buffer.slice!(0, @read_buffer.length)
end
end
def read_block(max_length)
@socket.read(max_length)
rescue OpenSSL::SSL::SSLError => error
if error.message == 'read would block'
if timeout_reached('read')
raise_timeout_error('read')
else
retry
end
else
raise(error)
end
rescue Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitReadable
if @read_buffer.empty?
if timeout_reached('read')
raise_timeout_error('read')
else
retry
end
end
rescue EOFError
@eof = true
end
def write_nonblock(data)
if FORCE_ENC
data.force_encoding('BINARY')
end
loop do
written = nil
begin
# I wish that this API accepted a start position, then we wouldn't
# have to slice data when there is a short write.
written = @socket.write_nonblock(data)
rescue OpenSSL::SSL::SSLError, Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitWritable => error
if error.is_a?(OpenSSL::SSL::SSLError) && error.message != 'write would block'
raise error
else
if timeout_reached('write')
raise_timeout_error('write')
else
retry
end
end
end
# Fast, common case.
break if written == data.size
# This takes advantage of the fact that most ruby implementations
# have Copy-On-Write strings. Thusly why requesting a subrange
# of data, we actually don't copy data because the new string
# simply references a subrange of the original.
data = data[written, data.size]
end
end
def write_block(data)
@socket.write(data)
rescue OpenSSL::SSL::SSLError, Errno::EAGAIN, Errno::EWOULDBLOCK, IO::WaitWritable => error
if error.is_a?(OpenSSL::SSL::SSLError) && error.message != 'write would block'
raise error
else
if timeout_reached('write')
raise_timeout_error('write')
else
retry
end
end
end
def timeout_reached(type)
if type == 'read'
args = [[@socket], nil, nil, @data[:read_timeout]]
else
args = [nil, [@socket], nil, @data[:write_timeout]]
end
IO.select(*args) ? nil : true
end
def raise_timeout_error(type)
fail Excon::Errors::Timeout.new("#{type} timeout reached")
end
def unpacked_sockaddr
@unpacked_sockaddr ||= ::Socket.unpack_sockaddr_in(@socket.to_io.getsockname)
rescue ArgumentError => e
unless e.message == 'not an AF_INET/AF_INET6 sockaddr'
raise
end
end
end
end
|
module Exif
VERSION = "0.11.1"
end
bump version
module Exif
VERSION = "0.11.2"
end
|
require 'English'
$LOAD_PATH.unshift File.expand_path('lib', __dir__)
require 'spaarti/version'
Gem::Specification.new do |s|
s.name = 'spaarti'
s.version = Spaarti::VERSION
s.date = Time.now.strftime('%Y-%m-%d')
s.summary = 'Helper for cloning GitHub repos'
s.description = 'Maintain local clones of repos you have access to on GitHub'
s.authors = ['Les Aker']
s.email = 'me@lesaker.org'
s.homepage = 'https://github.com/akerl/spaarti'
s.license = 'MIT'
s.files = `git ls-files`.split
s.test_files = `git ls-files spec/*`.split
s.executables = ['spaarti']
s.add_dependency 'cymbal', '~> 2.0.0'
s.add_dependency 'mercenary', '~> 0.3.4'
s.add_dependency 'octoauth', '~> 1.5.5'
s.add_dependency 'octokit', '~> 4.13.0'
s.add_development_dependency 'codecov', '~> 0.1.1'
s.add_development_dependency 'fuubar', '~> 2.3.0'
s.add_development_dependency 'goodcop', '~> 0.6.0'
s.add_development_dependency 'rake', '~> 12.3.0'
s.add_development_dependency 'rspec', '~> 3.8.0'
s.add_development_dependency 'rubocop', '~> 0.66.0'
s.add_development_dependency 'vcr', '~> 4.0.0'
s.add_development_dependency 'webmock', '~> 3.5.1'
end
Updated version of octokit to 4.14.0
require 'English'
$LOAD_PATH.unshift File.expand_path('lib', __dir__)
require 'spaarti/version'
Gem::Specification.new do |s|
s.name = 'spaarti'
s.version = Spaarti::VERSION
s.date = Time.now.strftime('%Y-%m-%d')
s.summary = 'Helper for cloning GitHub repos'
s.description = 'Maintain local clones of repos you have access to on GitHub'
s.authors = ['Les Aker']
s.email = 'me@lesaker.org'
s.homepage = 'https://github.com/akerl/spaarti'
s.license = 'MIT'
s.files = `git ls-files`.split
s.test_files = `git ls-files spec/*`.split
s.executables = ['spaarti']
s.add_dependency 'cymbal', '~> 2.0.0'
s.add_dependency 'mercenary', '~> 0.3.4'
s.add_dependency 'octoauth', '~> 1.5.5'
s.add_dependency 'octokit', '~> 4.14.0'
s.add_development_dependency 'codecov', '~> 0.1.1'
s.add_development_dependency 'fuubar', '~> 2.3.0'
s.add_development_dependency 'goodcop', '~> 0.6.0'
s.add_development_dependency 'rake', '~> 12.3.0'
s.add_development_dependency 'rspec', '~> 3.8.0'
s.add_development_dependency 'rubocop', '~> 0.66.0'
s.add_development_dependency 'vcr', '~> 4.0.0'
s.add_development_dependency 'webmock', '~> 3.5.1'
end
|
require 'time'
class Service::Rally < Service
string :server, :username, :workspace, :repository
password :password
white_list :server, :workspace, :repository
attr_accessor :wksp_ref, :user_cache
def receive_push
server = data['server']
username = data['username']
password = data['password']
workspace = data['workspace']
scm_repository = data['repository']
raise_config_error("No Server value specified") if server.nil? or server.strip.length == 0
raise_config_error("No UserName value specified") if username.nil? or username.strip.length == 0
raise_config_error("No Password value specified") if password.nil? or password.strip.length == 0
raise_config_error("No Workspace value specified") if workspace.nil? or workspace.strip.length == 0
branch = payload['ref'].split('/')[-1] # most of the time it'll be refs/heads/master ==> master
repo = payload['repository']['name']
repo_uri = payload['repository']['url']
http.ssl[:verify] = false
if server =~ /^https?:\/\// # if they have http:// or https://, leave server value unchanged
http.url_prefix = "#{server}/slm/webservice/1.30"
else
server = "#{server}.rallydev.com" if server !~ /\./ # leave unchanged if '.' in server
http.url_prefix = "https://#{server}/slm/webservice/1.30"
end
http.basic_auth(username, password)
http.headers['Content-Type'] = 'application/json'
http.headers['X-RallyIntegrationVendor'] = 'Rally'
http.headers['X-RallyIntegrationName'] = 'GitHub-Service'
http.headers['X-RallyIntegrationVersion'] = '1.1'
# create the repo in Rally if it doesn't already exist
@wksp_ref = validateWorkspace(workspace)
repo_ref = getOrCreateRepo(scm_repository, repo, repo_uri)
@user_cache = {}
payload['commits'].each do |commit|
artifact_refs = snarfArtifacts(commit['message'])
addChangeset(commit, repo_ref, artifact_refs, repo_uri, branch)
end
end
def addChangeset(commit, repo_ref, artifact_refs, repo_uri, branch)
author = commit['author']['email']
if !@user_cache.has_key?(author)
user = rallyQuery('User', 'Name,UserName', 'UserName = "%s"' % [author])
user_ref = ""
user_ref = itemRef(user) unless user.nil?
@user_cache[author] = user_ref
end
user_ref = @user_cache[author]
message = commit['message'][0..3999] # message max size is 4000 characters
changeset = { 'SCMRepository' => repo_ref,
'Revision' => commit['id'],
'CommitTimestamp' => Time.iso8601(commit['timestamp']).strftime("%FT%H:%M:%S.00Z"),
'Author' => user_ref,
'Message' => message,
'Uri' => '%s/commit/%s' % [repo_uri, commit['id']],
'Artifacts' => artifact_refs # [{'_ref' => 'defect/1324.js'}, {}...]
}
changeset.delete('Author') if user_ref == ""
begin
changeset_item = rallyCreate('Changeset', changeset)
chgset_ref = itemRef(changeset_item)
rescue Faraday::Error => boom # or some other sort of Faraday::Error::xxxError
raise_config_error("Unable to create Rally Changeset")
# changeset_item = nil
end
return if changeset_item.nil?
# change has changeset_ref, Action, PathAndFilename, Uri
changes = []
commit['added'].each { |add| changes << {'Action' => 'A', 'PathAndFilename' => add } }
commit['modified'].each { |mod| changes << {'Action' => 'M', 'PathAndFilename' => mod } }
commit['removed'].each { |rem| changes << {'Action' => 'R', 'PathAndFilename' => rem } }
changes.each do |change|
change['Changeset'] = chgset_ref
change['Uri'] = '%s/blob/%s/%s' % [repo_uri, branch, change['PathAndFilename']]
chg_item = rallyCreate('Change', change)
end
end
def validateWorkspace(workspace)
all_your_workspaces = rallyWorkspaces()
target_workspace = all_your_workspaces.select {|wksp| wksp['Name'] == workspace and wksp['State'] != 'Closed'}
if target_workspace.length != 1
problem = 'Config Error: target workspace: %s not available in list of workspaces associated with your credentials' % [workspace]
raise_config_error(problem)
end
return itemRef(target_workspace[0])
end
def getOrCreateRepo(scm_repository, repo, repo_uri)
scm_repository = repo if (scm_repository.nil? or scm_repository == "")
repo_item = rallyQuery('SCMRepository', 'Name', 'Name = "%s"' % scm_repository)
return itemRef(repo_item) unless repo_item.nil?
repo_info = { 'Workspace' => @wksp_ref, 'Name' => scm_repository, 'SCMType' => 'GitHub',
'Description' => 'GitHub-Service push changesets',
'Uri' => '%s' % [repo_uri]
}
repo_item = rallyCreate('SCMRepository', repo_info)
return itemRef(repo_item)
end
def itemRef(item) ref = item['_ref'].split('/')[-2..-1].join('/')[0..-4] end
def rallyWorkspaces()
response = @http.get('Subscription.js?fetch=Name,Workspaces,Workspace&pretty=true')
raise_config_error('Config error: credentials not valid for Rally endpoint') if response.status == 401
raise_config_error('Config error: Unable to obtain your Rally subscription info') unless response.success?
qr = JSON.parse(response.body)
begin
workspaces = qr['Subscription']['Workspaces']
rescue Exception => ex
raise_config_error('Config error: No such workspace for your credentials')
end
return workspaces
end
def rallyQuery(entity, fields, criteria)
target_url = '%s.js?fetch=%s' % [entity.downcase, fields]
target_url += '&query=(%s)' % [criteria] if criteria.length > 0
target_url += '&workspace=%s' % [@wksp_ref]
res = @http.get(target_url)
raise StandardError("Config Error, #{entity} query failed") unless res.success?
qr = JSON.parse(res.body)['QueryResult']
item = qr['TotalResultCount'] > 0 ? qr['Results'][0] : nil
return item
end
def rallyCreate(entity, data)
create_url = "%s/create.js?workspace=%s" % [entity, @wksp_ref]
payload = {"#{entity}" => data}
res = @http.post(create_url, generate_json(payload))
raise_config_error("Unable to create the Rally #{entity} for #{data['Name']}") unless res.success?
cr = JSON.parse(res.body)['CreateResult']
item = cr['Object']
return item
end
def snarfArtifacts(message)
art_type = { 'D' => 'defect', 'DE' => 'defect', 'DS' => 'defectsuite',
'TA' => 'task', 'TC' => 'testcase',
'S' => 'hierarchicalrequirement',
'US' => 'hierarchicalrequirement'
}
formatted_id_pattern = '^(%s)\d+[\.:;]?$' % art_type.keys.join('|') # '^(D|DE|DS|TA|TC|S|US)\d+[\.:;]?$'
artifact_detector = Regexp.compile(formatted_id_pattern)
words = message.gsub(',', ' ').gsub('\r\n', '\n').gsub('\n', ' ').gsub('\t', ' ').split(' ')
rally_formatted_ids = words.select { |word| artifact_detector.match(word) }
artifacts = [] # actually, just the refs
rally_formatted_ids.uniq.each do |fmtid|
next unless fmtid =~ /^(([A-Z]{1,2})\d+)[\.:;]?$/
fmtid, prefix = $1, $2
entity = art_type[prefix]
artifact = rallyQuery(entity, 'Name', 'FormattedID = "%s"' % fmtid)
next if artifact.nil?
art_ref = itemRef(artifact)
artifacts << {'_ref' => art_ref}
end
return artifacts
end
end
Format code to improve reading a little bit and remove trailing spaces.
require 'time'
class Service::Rally < Service
string :server, :username, :workspace, :repository
password :password
white_list :server, :workspace, :repository
attr_accessor :wksp_ref, :user_cache
def receive_push
server = data['server']
username = data['username']
password = data['password']
workspace = data['workspace']
scm_repository = data['repository']
raise_config_error("No Server value specified") if server.nil? or server.strip.length == 0
raise_config_error("No UserName value specified") if username.nil? or username.strip.length == 0
raise_config_error("No Password value specified") if password.nil? or password.strip.length == 0
raise_config_error("No Workspace value specified") if workspace.nil? or workspace.strip.length == 0
branch = payload['ref'].split('/')[-1] # most of the time it'll be refs/heads/master ==> master
repo = payload['repository']['name']
repo_uri = payload['repository']['url']
http.ssl[:verify] = false
if server =~ /^https?:\/\// # if they have http:// or https://, leave server value unchanged
http.url_prefix = "#{server}/slm/webservice/1.30"
else
server = "#{server}.rallydev.com" if server !~ /\./ # leave unchanged if '.' in server
http.url_prefix = "https://#{server}/slm/webservice/1.30"
end
http.basic_auth(username, password)
http.headers['Content-Type'] = 'application/json'
http.headers['X-RallyIntegrationVendor'] = 'Rally'
http.headers['X-RallyIntegrationName'] = 'GitHub-Service'
http.headers['X-RallyIntegrationVersion'] = '1.1'
# create the repo in Rally if it doesn't already exist
@wksp_ref = validateWorkspace(workspace)
repo_ref = getOrCreateRepo(scm_repository, repo, repo_uri)
@user_cache = {}
payload['commits'].each do |commit|
artifact_refs = snarfArtifacts(commit['message'])
addChangeset(commit, repo_ref, artifact_refs, repo_uri, branch)
end
end
def addChangeset(commit, repo_ref, artifact_refs, repo_uri, branch)
author = commit['author']['email']
if !@user_cache.has_key?(author)
user = rallyQuery('User', 'Name,UserName', 'UserName = "%s"' % [author])
user_ref = ""
user_ref = itemRef(user) unless user.nil?
@user_cache[author] = user_ref
end
user_ref = @user_cache[author]
message = commit['message'][0..3999] # message max size is 4000 characters
changeset = {
'SCMRepository' => repo_ref,
'Revision' => commit['id'],
'CommitTimestamp' => Time.iso8601(commit['timestamp']).strftime("%FT%H:%M:%S.00Z"),
'Author' => user_ref,
'Message' => message,
'Uri' => '%s/commit/%s' % [repo_uri, commit['id']],
'Artifacts' => artifact_refs # [{'_ref' => 'defect/1324.js'}, {}...]
}
changeset.delete('Author') if user_ref == ""
begin
changeset_item = rallyCreate('Changeset', changeset)
chgset_ref = itemRef(changeset_item)
rescue Faraday::Error => boom # or some other sort of Faraday::Error::xxxError
raise_config_error("Unable to create Rally Changeset")
# changeset_item = nil
end
return if changeset_item.nil?
# change has changeset_ref, Action, PathAndFilename, Uri
changes = []
commit['added'].each { |add| changes << {'Action' => 'A', 'PathAndFilename' => add } }
commit['modified'].each { |mod| changes << {'Action' => 'M', 'PathAndFilename' => mod } }
commit['removed'].each { |rem| changes << {'Action' => 'R', 'PathAndFilename' => rem } }
changes.each do |change|
change['Changeset'] = chgset_ref
change['Uri'] = '%s/blob/%s/%s' % [repo_uri, branch, change['PathAndFilename']]
chg_item = rallyCreate('Change', change)
end
end
def validateWorkspace(workspace)
all_your_workspaces = rallyWorkspaces()
target_workspace = all_your_workspaces.select {|wksp| wksp['Name'] == workspace and wksp['State'] != 'Closed'}
if target_workspace.length != 1
problem = 'Config Error: target workspace %s not available in list of workspaces associated with your credentials' % [workspace]
raise_config_error(problem)
end
return itemRef(target_workspace[0])
end
def getOrCreateRepo(scm_repository, repo, repo_uri)
scm_repository = repo if (scm_repository.nil? or scm_repository == "")
repo_item = rallyQuery('SCMRepository', 'Name', 'Name = "%s"' % scm_repository)
return itemRef(repo_item) unless repo_item.nil?
repo_info = {
'Workspace' => @wksp_ref,
'Name' => scm_repository,
'SCMType' => 'GitHub',
'Description' => 'GitHub-Service push Changesets',
'Uri' => '%s' % [repo_uri]
}
repo_item = rallyCreate('SCMRepository', repo_info)
return itemRef(repo_item)
end
def itemRef(item)
ref = item['_ref'].split('/')[-2..-1].join('/')[0..-4]
end
def rallyWorkspaces()
response = @http.get('Subscription.js?fetch=Name,Workspaces,Workspace&pretty=true')
raise_config_error('Config error: credentials not valid for Rally endpoint') if response.status == 401
raise_config_error('Config error: unable to obtain your Rally subscription info') unless response.success?
qr = JSON.parse(response.body)
begin
workspaces = qr['Subscription']['Workspaces']
rescue Exception => ex
raise_config_error('Config error: no such workspace for your credentials')
end
return workspaces
end
def rallyQuery(entity, fields, criteria)
target_url = '%s.js?fetch=%s' % [entity.downcase, fields]
target_url += '&query=(%s)' % [criteria] if criteria.length > 0
target_url += '&workspace=%s' % [@wksp_ref]
res = @http.get(target_url)
raise StandardError("Config Error: #{entity} query failed") unless res.success?
qr = JSON.parse(res.body)['QueryResult']
item = qr['TotalResultCount'] > 0 ? qr['Results'][0] : nil
return item
end
def rallyCreate(entity, data)
create_url = "%s/create.js?workspace=%s" % [entity, @wksp_ref]
payload = {"#{entity}" => data}
res = @http.post(create_url, generate_json(payload))
raise_config_error("Unable to create the Rally #{entity} for #{data['Name']}") unless res.success?
cr = JSON.parse(res.body)['CreateResult']
item = cr['Object']
return item
end
def snarfArtifacts(message)
art_type = {
'D' => 'defect',
'DE' => 'defect',
'DS' => 'defectsuite',
'TA' => 'task',
'TC' => 'testcase',
'S' => 'hierarchicalrequirement',
'US' => 'hierarchicalrequirement'
}
formatted_id_pattern = '^(%s)\d+[\.:;]?$' % art_type.keys.join('|') # '^(D|DE|DS|TA|TC|S|US)\d+[\.:;]?$'
artifact_detector = Regexp.compile(formatted_id_pattern)
words = message.gsub(',', ' ').gsub('\r\n', '\n').gsub('\n', ' ').gsub('\t', ' ').split(' ')
rally_formatted_ids = words.select { |word| artifact_detector.match(word) }
artifacts = [] # actually, just the refs
rally_formatted_ids.uniq.each do |fmtid|
next unless fmtid =~ /^(([A-Z]{1,2})\d+)[\.:;]?$/
fmtid, prefix = $1, $2
entity = art_type[prefix]
artifact = rallyQuery(entity, 'Name', 'FormattedID = "%s"' % fmtid)
next if artifact.nil?
art_ref = itemRef(artifact)
artifacts << {'_ref' => art_ref}
end
return artifacts
end
end
|
require 'rubygems'
require 'nokogiri'
require 'gruff'
require 'date'
class Sample
attr_accessor :time, :kwh
def initialize(time, kwh)
@time = time
@kwh = kwh
end
def inspect
"<Sample #{@time} #{@kwh}>"
end
end
data = File.read('dump.html')
document = Nokogiri::HTML(data)
nodes = document.css('noscript area').to_a.reverse
# difficult to tell if this information is in UTC or not.
hour_increment = 1/24.0
timestamp = DateTime.new(2009,9,27,0,0,0) - hour_increment + 1/(24.0*60)
samples = nodes.map do |n|
kwh = n['alt'].to_f
timestamp = timestamp + hour_increment
Sample.new(timestamp, kwh)
end
g = Gruff::Line.new
g.hide_dots = true
g.title = "Power Usage"
g.y_axis_label = "KW/H"
g.data("House", samples.map{|s| s.kwh})
i = -24
labels = []
samples.each_slice(24) do |s|
i+=24
labels << [i, s.first.time.strftime("%m-%d")]
labels << [i+13, s[12].time.strftime("%H:%M")]
end
g.labels = Hash[labels]
g.theme_pastel()
g.write('power.png')
Small changes to graph.rb, which has been left in the dust.
require 'rubygems'
require 'nokogiri'
require 'gruff'
require 'date'
class Sample
attr_accessor :time, :kwh
def initialize(time, kwh)
@time = time
@kwh = kwh
end
def inspect
"<Sample #{@time} #{@kwh}>"
end
end
data = File.read('dump2.html')
document = Nokogiri::HTML(data)
nodes = document.css('noscript area').to_a.reverse
# difficult to tell if this information is in UTC or not.
hour_increment = 1/24.0
timestamp = DateTime.new(2009,9,27,0,0,0) - hour_increment + 1/(24.0*60)
samples = nodes.map do |n|
kwh = n['alt'].to_f
timestamp = timestamp + hour_increment
Sample.new(timestamp, kwh)
end
g = Gruff::Line.new
g.hide_dots = true
g.title = "Power Usage"
g.y_axis_label = "KW/H"
g.data("House", samples.map{|s| s.kwh})
i = -24
labels = []
samples.each_slice(24) do |s|
i+=24
labels << [i, s.first.time.strftime("%m-%d")]
labels << [i+13, s[12].time.strftime("%H:%M")] if s[12]
end
g.labels = Hash[labels]
g.theme_pastel()
g.write('power.png')
|
class Octave < Formula
desc "high-level interpreted language for numerical computing"
homepage "https://www.gnu.org/software/octave/index.html"
revision 2
stable do
url "http://ftpmirror.gnu.org/octave/octave-4.0.2.tar.gz"
sha256 "39cd8fd36c218fc00adace28d74a6c7c9c6faab7113a5ba3c4372324c755bdc1"
# Fix alignment of dock widget titles for OSX (bug #46592)
# See: http://savannah.gnu.org/bugs/?46592
patch do
url "http://hg.savannah.gnu.org/hgweb/octave/raw-rev/e870a68742a6"
sha256 "0ddcd8dd032be79d5a846ad2bc190569794e4e1a33ce99f25147d70ae6974682"
end
end
bottle do
sha256 "4fc609b1c414e490006349a86e8d60f6cc0e26549c2482062a2e2b6a895b593c" => :el_capitan
sha256 "6c35a5e895917308387476f608cf425583d2b1c0e3e0110b3296cbd7b777b73b" => :yosemite
sha256 "72e77f5930847f13e97bb8a5dd571cb47504dfe1b4d7cff8e38383801ef03c1a" => :mavericks
end
if OS.mac? && MacOS.clang_version < "7.0"
# Fix the build error with LLVM 3.5svn (-3.6svn?) and libc++ (bug #43298)
# See: http://savannah.gnu.org/bugs/?43298
patch do
url "http://savannah.gnu.org/bugs/download.php?file_id=32255"
sha256 "ef83b32384a37cca13ecdd30d98dacac314b7c23f2c1df3d1113074bd1169c0f"
end
# Fixes includes "base-list.h" and "config.h" in comment-list.h and "oct.h" (bug #41027)
# Core developers don't like this fix, see: http://savannah.gnu.org/bugs/?41027
patch do
url "http://savannah.gnu.org/bugs/download.php?file_id=31400"
sha256 "efdf91390210a64e4732da15dcac576fb1fade7b85f9bacf4010d102c1974729"
end
end
# dependencies needed for head
# "librsvg" and ":tex" are currently not necessary
# since we do not build the pdf docs ("DOC_TARGETS=")
head do
url "http://www.octave.org/hg/octave", :branch => "default", :using => :hg
depends_on :hg => :build
depends_on "autoconf" => :build
depends_on "automake" => :build
depends_on "bison" => :build
depends_on "icoutils" => :build
end
skip_clean "share/info" # Keep the docs
# deprecated options
deprecated_option "without-check" => "without-test"
# options, enabled by default
option "without-curl", "Do not use cURL (urlread/urlwrite/@ftp)"
option "without-docs", "Do not install documentation"
option "without-fftw", "Do not use FFTW (fft,ifft,fft2,etc.)"
option "without-glpk", "Do not use GLPK"
option "without-gnuplot", "Do not use gnuplot graphics"
option "without-gui", "Use the graphical user interface"
option "without-hdf5", "Do not use HDF5 (hdf5 data file support)"
option "without-opengl", "Do not use opengl"
option "without-qhull", "Do not use the Qhull library (delaunay,voronoi,etc.)"
option "without-qrupdate", "Do not use the QRupdate package (qrdelete,qrinsert,qrshift,qrupdate)"
option "without-suite-sparse", "Do not use SuiteSparse (sparse matrix operations)"
option "without-test", "Do not perform build-time tests (not recommended)"
option "without-zlib", "Do not use zlib (compressed MATLAB file formats)"
# options, disabled by default
option "with-audio", "Use the sndfile and portaudio libraries for audio operations"
option "with-fltk", "Build with FLTK graphics backend"
option "with-java", "Use Java, requires Java 6 from https://support.apple.com/kb/DL1572"
option "with-jit", "Use the experimental just-in-time compiler (not recommended)"
option "with-openblas", "Use OpenBLAS instead of native LAPACK/BLAS"
option "with-osmesa", "Use the OSmesa library (incompatible with opengl)"
# build dependencies
depends_on "gnu-sed" => :build
depends_on "pkg-config" => :build
# essential dependencies
depends_on :fortran
depends_on :x11
depends_on "fontconfig"
depends_on "freetype"
depends_on "texinfo" => :build # we need >4.8
depends_on "pcre"
# recommended dependencies (implicit options)
depends_on "readline" => :recommended
depends_on "arpack" => :recommended
depends_on "epstool" => :recommended
depends_on "ghostscript" => :recommended # ps/pdf image output
depends_on "gl2ps" => :recommended
depends_on "graphicsmagick" => :recommended # imread/imwrite
depends_on "transfig" => :recommended
# conditional dependecies (explicit options)
depends_on "curl" if build.with?("curl") && MacOS.version == :leopard
depends_on "fftw" if build.with? "fftw"
depends_on "fltk" if build.with? "fltk"
depends_on "glpk" if build.with? "glpk"
depends_on "gnuplot" if build.with? "gnuplot"
depends_on "hdf5" if build.with? "hdf5"
depends_on :java => "1.6" if build.with? "java"
depends_on "llvm" if build.with? "jit"
depends_on "pstoedit" if build.with? "ghostscript"
depends_on "qhull" if build.with? "qhull"
depends_on "qrupdate" if build.with? "qrupdate"
depends_on "qscintilla2" if build.with? "gui"
depends_on "qt" if build.with? "gui"
depends_on "suite-sparse" if build.with? "suite-sparse"
depends_on "libsndfile" if build.with? "audio"
depends_on "portaudio" if build.with? "audio"
depends_on "veclibfort" if build.without? "openblas"
depends_on "openblas" => (OS.mac? ? :optional : :recommended)
# If GraphicsMagick was built from source, it is possible that it was
# done to change quantum depth. If so, our Octave bottles are no good.
# https://github.com/Homebrew/homebrew-science/issues/2737
if build.with? "graphicsmagick"
def pour_bottle?
Tab.for_name("graphicsmagick").bottle?
end
end
def install
ENV.m64 if MacOS.prefer_64_bit?
ENV.append_to_cflags "-D_REENTRANT"
ENV.append "LDFLAGS", "-L#{Formula["readline"].opt_lib} -lreadline" if build.with? "readline"
ENV.prepend_path "PATH", "#{Formula["texinfo"].opt_bin}"
ENV["FONTCONFIG_PATH"] = "/opt/X11/lib/X11/fontconfig"
# basic arguments
args = ["--prefix=#{prefix}"]
args << "--enable-dependency-tracking"
args << "--enable-link-all-dependencies"
args << "--enable-shared"
args << "--disable-static"
args << "--with-x=no" if OS.mac? # We don't need X11 for Mac at all
# arguments for options enabled by default
args << "--without-curl" if build.without? "curl"
args << "--disable-docs" if build.without? "docs"
args << "--without-fftw3" if build.without? "fftw"
args << "--with-fltk-prefix=#{Formula["fltk"].opt_prefix}" if build.with? "fltk"
args << "--without-glpk" if build.without? "glpk"
args << "--disable-gui" if build.without? "gui"
args << "--without-hdf5" if build.without? "hdf5"
args << "--without-opengl" if build.without? "opengl"
args << "--without-framework-opengl" if build.without? "opengl"
args << "--without-OSMesa" if build.without? "osmesa"
args << "--without-qhull" if build.without? "qhull"
args << "--without-qrupdate" if build.without? "qrupdate"
args << "--disable-readline" if build.without? "readline"
args << "--without-zlib" if build.without? "zlib"
# arguments for options disabled by default
args << "--with-portaudio" if build.without? "audio"
args << "--with-sndfile" if build.without? "audio"
args << "--disable-java" if build.without? "java"
args << "--enable-jit" if build.with? "jit"
# arguments if building without suite-sparse
if build.without? "suite-sparse"
args << "--without-amd"
args << "--without-camd"
args << "--without-colamd"
args << "--without-ccolamd"
args << "--without-cxsparse"
args << "--without-camd"
args << "--without-cholmod"
args << "--without-umfpack"
else
ENV.append_to_cflags "-L#{Formula["suite-sparse"].opt_lib} -lsuitesparseconfig"
ENV.append_to_cflags "-L#{Formula["metis"].opt_lib} -lmetis"
end
# check if openblas settings are compatible
if build.with? "openblas"
if ["arpack", "qrupdate", "suite-sparse"].any? { |n| Tab.for_name(n).without? "openblas" }
odie "Octave is compiled --with-openblas but arpack, qrupdate or suite-sparse are not."
else
args << "--with-blas=-L#{Formula["openblas"].opt_lib} -lopenblas"
end
elsif OS.mac? # without "openblas"
if ["arpack", "qrupdate", "suite-sparse"].any? { |n| Tab.for_name(n).with? "openblas" }
odie "Arpack, qrupdate or suite-sparse are compiled --with-openblas but Octave is not."
else
args << "--with-blas=-L#{Formula["veclibfort"].opt_lib} -lveclibFort"
end
else # OS.linux? and without "openblas"
args << "-lblas -llapack"
end
system "./bootstrap" if build.head?
# libtool needs to see -framework to handle dependencies better.
inreplace "configure", "-Wl,-framework -Wl,", "-framework "
# the Mac build configuration passes all linker flags to mkoctfile to
# be inserted into every oct/mex build. This is actually unnecessary and
# can cause linking problems.
inreplace "src/mkoctfile.in.cc", /%OCTAVE_CONF_OCT(AVE)?_LINK_(DEPS|OPTS)%/, '""'
# make gnuplot the default backend since the native qt backend is rather unstable
# if (build.with? "gnuplot") && (Tab.for_name("gnuplot").with? "qt")
# system "echo", "\"graphics_toolkit('gnuplot');\" >> \"scripts/startup/local-rcfile\""
# system "echo", "\"setenv('GNUTERM','qt');\" >> \"scripts/startup/local-rcfile\"" if Tab.for_name("gnuplot").with? "qt"
# end
system "./configure", *args
# call make with "DOC_TARGETS=" such that the manual is not build
# due to broken osmesa which is required to generate the images
# however the texinfo for built-in octave help is still generated
# this can be disabled by "--without-docs"
system "make", "all", "DOC_TARGETS="
system "make", "check", "DOC_TARGETS=" if build.with? "test"
system "make", "install", "DOC_TARGETS="
prefix.install "test/fntests.log" if File.exist? "test/fntests.log"
end
def caveats
s = ""
if build.with?("gui")
s += <<-EOS.undent
The graphical user interface is now used when running Octave interactively.
The start-up option --no-gui will run the familiar command line interface.
The option --no-gui-libs runs a minimalist command line interface that does not
link with the Qt libraries and uses the fltk toolkit for plotting if available.
EOS
else
s += <<-EOS.undent
The graphical user interface is now enabled by default; run 'octave-cli' or
install via brew with the option --without-gui to disable it.
EOS
end
if build.with?("gnuplot")
s += <<-EOS.undent
Gnuplot is configured as default graphics toolkit, this can be changed within
Octave using 'graphics_toolkit'. Other Gnuplot terminals can be used by setting
the environment variable GNUTERM and building gnuplot with the following options.
setenv('GNUTERM','qt') # Requires QT; install gnuplot --with-qt
setenv('GNUTERM','x11') # Requires XQuartz; install gnuplot --with-x11
setenv('GNUTERM','wxt') # Requires wxmac; install gnuplot --with-wxmac
setenv('GNUTERM','aqua') # Requires AquaTerm; install gnuplot --with-aquaterm
You may also set this variable from within Octave. For printing the cairo backend
is recommended, i.e., install gnuplot with --with-cairo, and use
print -dpdfcairo figure.pdf
EOS
end
if build.without?("osmesa") || (build.with?("osmesa") && build.with?("opengl"))
s += <<-EOS.undent
When using the native qt or fltk toolkits then invisible figures do not work because
osmesa does currently not work with the Mac's OpenGL implementation. The usage of
gnuplot is recommended.
EOS
end
if build.without?("openblas")
s += <<-EOS.undent
Octave has been compiled with Apple's BLAS routines, this leads to segfaults in some
tests. The option "--with-openblas" is a more conservative choice.
EOS
end
s += "\n" unless s.empty?
s
end
test do
system "octave", "--eval", "(22/7 - pi)/pi"
# this is supposed to crash octave if there is a problem with veclibfort
system "octave", "--eval", "single ([1+i 2+i 3+i]) * single ([ 4+i ; 5+i ; 6+i])"
end
end
octave: link with correct hdf5 library
Closes #3700.
Signed-off-by: Dominique Orban <b6b12199b0ff1342bf41262c4fcd3a561907b571@gmail.com>
class Octave < Formula
desc "high-level interpreted language for numerical computing"
homepage "https://www.gnu.org/software/octave/index.html"
revision 3
stable do
url "http://ftpmirror.gnu.org/octave/octave-4.0.2.tar.gz"
sha256 "39cd8fd36c218fc00adace28d74a6c7c9c6faab7113a5ba3c4372324c755bdc1"
# Fix alignment of dock widget titles for OSX (bug #46592)
# See: http://savannah.gnu.org/bugs/?46592
patch do
url "http://hg.savannah.gnu.org/hgweb/octave/raw-rev/e870a68742a6"
sha256 "0ddcd8dd032be79d5a846ad2bc190569794e4e1a33ce99f25147d70ae6974682"
end
end
bottle do
sha256 "4fc609b1c414e490006349a86e8d60f6cc0e26549c2482062a2e2b6a895b593c" => :el_capitan
sha256 "6c35a5e895917308387476f608cf425583d2b1c0e3e0110b3296cbd7b777b73b" => :yosemite
sha256 "72e77f5930847f13e97bb8a5dd571cb47504dfe1b4d7cff8e38383801ef03c1a" => :mavericks
end
if OS.mac? && MacOS.clang_version < "7.0"
# Fix the build error with LLVM 3.5svn (-3.6svn?) and libc++ (bug #43298)
# See: http://savannah.gnu.org/bugs/?43298
patch do
url "http://savannah.gnu.org/bugs/download.php?file_id=32255"
sha256 "ef83b32384a37cca13ecdd30d98dacac314b7c23f2c1df3d1113074bd1169c0f"
end
# Fixes includes "base-list.h" and "config.h" in comment-list.h and "oct.h" (bug #41027)
# Core developers don't like this fix, see: http://savannah.gnu.org/bugs/?41027
patch do
url "http://savannah.gnu.org/bugs/download.php?file_id=31400"
sha256 "efdf91390210a64e4732da15dcac576fb1fade7b85f9bacf4010d102c1974729"
end
end
# dependencies needed for head
# "librsvg" and ":tex" are currently not necessary
# since we do not build the pdf docs ("DOC_TARGETS=")
head do
url "http://www.octave.org/hg/octave", :branch => "default", :using => :hg
depends_on :hg => :build
depends_on "autoconf" => :build
depends_on "automake" => :build
depends_on "bison" => :build
depends_on "icoutils" => :build
end
skip_clean "share/info" # Keep the docs
# deprecated options
deprecated_option "without-check" => "without-test"
# options, enabled by default
option "without-curl", "Do not use cURL (urlread/urlwrite/@ftp)"
option "without-docs", "Do not install documentation"
option "without-fftw", "Do not use FFTW (fft,ifft,fft2,etc.)"
option "without-glpk", "Do not use GLPK"
option "without-gnuplot", "Do not use gnuplot graphics"
option "without-gui", "Use the graphical user interface"
option "without-hdf5", "Do not use HDF5 (hdf5 data file support)"
option "without-opengl", "Do not use opengl"
option "without-qhull", "Do not use the Qhull library (delaunay,voronoi,etc.)"
option "without-qrupdate", "Do not use the QRupdate package (qrdelete,qrinsert,qrshift,qrupdate)"
option "without-suite-sparse", "Do not use SuiteSparse (sparse matrix operations)"
option "without-test", "Do not perform build-time tests (not recommended)"
option "without-zlib", "Do not use zlib (compressed MATLAB file formats)"
# options, disabled by default
option "with-audio", "Use the sndfile and portaudio libraries for audio operations"
option "with-fltk", "Build with FLTK graphics backend"
option "with-java", "Use Java, requires Java 6 from https://support.apple.com/kb/DL1572"
option "with-jit", "Use the experimental just-in-time compiler (not recommended)"
option "with-openblas", "Use OpenBLAS instead of native LAPACK/BLAS"
option "with-osmesa", "Use the OSmesa library (incompatible with opengl)"
# build dependencies
depends_on "gnu-sed" => :build
depends_on "pkg-config" => :build
# essential dependencies
depends_on :fortran
depends_on :x11
depends_on "fontconfig"
depends_on "freetype"
depends_on "texinfo" => :build # we need >4.8
depends_on "pcre"
# recommended dependencies (implicit options)
depends_on "readline" => :recommended
depends_on "arpack" => :recommended
depends_on "epstool" => :recommended
depends_on "ghostscript" => :recommended # ps/pdf image output
depends_on "gl2ps" => :recommended
depends_on "graphicsmagick" => :recommended # imread/imwrite
depends_on "transfig" => :recommended
# conditional dependecies (explicit options)
depends_on "curl" if build.with?("curl") && MacOS.version == :leopard
depends_on "fftw" if build.with? "fftw"
depends_on "fltk" if build.with? "fltk"
depends_on "glpk" if build.with? "glpk"
depends_on "gnuplot" if build.with? "gnuplot"
depends_on "hdf5" if build.with? "hdf5"
depends_on :java => "1.6" if build.with? "java"
depends_on "llvm" if build.with? "jit"
depends_on "pstoedit" if build.with? "ghostscript"
depends_on "qhull" if build.with? "qhull"
depends_on "qrupdate" if build.with? "qrupdate"
depends_on "qscintilla2" if build.with? "gui"
depends_on "qt" if build.with? "gui"
depends_on "suite-sparse" if build.with? "suite-sparse"
depends_on "libsndfile" if build.with? "audio"
depends_on "portaudio" if build.with? "audio"
depends_on "veclibfort" if build.without? "openblas"
depends_on "openblas" => (OS.mac? ? :optional : :recommended)
# If GraphicsMagick was built from source, it is possible that it was
# done to change quantum depth. If so, our Octave bottles are no good.
# https://github.com/Homebrew/homebrew-science/issues/2737
if build.with? "graphicsmagick"
def pour_bottle?
Tab.for_name("graphicsmagick").bottle?
end
end
def install
ENV.m64 if MacOS.prefer_64_bit?
ENV.append_to_cflags "-D_REENTRANT"
ENV.append "LDFLAGS", "-L#{Formula["readline"].opt_lib} -lreadline" if build.with? "readline"
ENV.prepend_path "PATH", "#{Formula["texinfo"].opt_bin}"
ENV["FONTCONFIG_PATH"] = "/opt/X11/lib/X11/fontconfig"
# basic arguments
args = ["--prefix=#{prefix}"]
args << "--enable-dependency-tracking"
args << "--enable-link-all-dependencies"
args << "--enable-shared"
args << "--disable-static"
args << "--with-x=no" if OS.mac? # We don't need X11 for Mac at all
# arguments for options enabled by default
args << "--without-curl" if build.without? "curl"
args << "--disable-docs" if build.without? "docs"
args << "--without-fftw3" if build.without? "fftw"
args << "--with-fltk-prefix=#{Formula["fltk"].opt_prefix}" if build.with? "fltk"
args << "--without-glpk" if build.without? "glpk"
args << "--disable-gui" if build.without? "gui"
args << "--without-opengl" if build.without? "opengl"
args << "--without-framework-opengl" if build.without? "opengl"
args << "--without-OSMesa" if build.without? "osmesa"
args << "--without-qhull" if build.without? "qhull"
args << "--without-qrupdate" if build.without? "qrupdate"
args << "--disable-readline" if build.without? "readline"
args << "--without-zlib" if build.without? "zlib"
# arguments for options disabled by default
args << "--with-portaudio" if build.without? "audio"
args << "--with-sndfile" if build.without? "audio"
args << "--disable-java" if build.without? "java"
args << "--enable-jit" if build.with? "jit"
# ensure that the right hdf5 library is used
if build.with? "hdf5"
args << "--with-hdf5-includedir=#{Formula["hdf5"].opt_include}"
args << "--with-hdf5-libdir=#{Formula["hdf5"].opt_lib}"
else
args << "--without-hdf5"
end
# arguments if building without suite-sparse
if build.without? "suite-sparse"
args << "--without-amd"
args << "--without-camd"
args << "--without-colamd"
args << "--without-ccolamd"
args << "--without-cxsparse"
args << "--without-camd"
args << "--without-cholmod"
args << "--without-umfpack"
else
ENV.append_to_cflags "-L#{Formula["suite-sparse"].opt_lib} -lsuitesparseconfig"
ENV.append_to_cflags "-L#{Formula["metis"].opt_lib} -lmetis"
end
# check if openblas settings are compatible
if build.with? "openblas"
if ["arpack", "qrupdate", "suite-sparse"].any? { |n| Tab.for_name(n).without? "openblas" }
odie "Octave is compiled --with-openblas but arpack, qrupdate or suite-sparse are not."
else
args << "--with-blas=-L#{Formula["openblas"].opt_lib} -lopenblas"
end
elsif OS.mac? # without "openblas"
if ["arpack", "qrupdate", "suite-sparse"].any? { |n| Tab.for_name(n).with? "openblas" }
odie "Arpack, qrupdate or suite-sparse are compiled --with-openblas but Octave is not."
else
args << "--with-blas=-L#{Formula["veclibfort"].opt_lib} -lveclibFort"
end
else # OS.linux? and without "openblas"
args << "-lblas -llapack"
end
system "./bootstrap" if build.head?
# libtool needs to see -framework to handle dependencies better.
inreplace "configure", "-Wl,-framework -Wl,", "-framework "
# the Mac build configuration passes all linker flags to mkoctfile to
# be inserted into every oct/mex build. This is actually unnecessary and
# can cause linking problems.
inreplace "src/mkoctfile.in.cc", /%OCTAVE_CONF_OCT(AVE)?_LINK_(DEPS|OPTS)%/, '""'
# make gnuplot the default backend since the native qt backend is rather unstable
# if (build.with? "gnuplot") && (Tab.for_name("gnuplot").with? "qt")
# system "echo", "\"graphics_toolkit('gnuplot');\" >> \"scripts/startup/local-rcfile\""
# system "echo", "\"setenv('GNUTERM','qt');\" >> \"scripts/startup/local-rcfile\"" if Tab.for_name("gnuplot").with? "qt"
# end
system "./configure", *args
# call make with "DOC_TARGETS=" such that the manual is not build
# due to broken osmesa which is required to generate the images
# however the texinfo for built-in octave help is still generated
# this can be disabled by "--without-docs"
system "make", "all", "DOC_TARGETS="
system "make", "check", "DOC_TARGETS=" if build.with? "test"
system "make", "install", "DOC_TARGETS="
prefix.install "test/fntests.log" if File.exist? "test/fntests.log"
end
def caveats
s = ""
if build.with?("gui")
s += <<-EOS.undent
The graphical user interface is now used when running Octave interactively.
The start-up option --no-gui will run the familiar command line interface.
The option --no-gui-libs runs a minimalist command line interface that does not
link with the Qt libraries and uses the fltk toolkit for plotting if available.
EOS
else
s += <<-EOS.undent
The graphical user interface is now enabled by default; run 'octave-cli' or
install via brew with the option --without-gui to disable it.
EOS
end
if build.with?("gnuplot")
s += <<-EOS.undent
Gnuplot is configured as default graphics toolkit, this can be changed within
Octave using 'graphics_toolkit'. Other Gnuplot terminals can be used by setting
the environment variable GNUTERM and building gnuplot with the following options.
setenv('GNUTERM','qt') # Requires QT; install gnuplot --with-qt
setenv('GNUTERM','x11') # Requires XQuartz; install gnuplot --with-x11
setenv('GNUTERM','wxt') # Requires wxmac; install gnuplot --with-wxmac
setenv('GNUTERM','aqua') # Requires AquaTerm; install gnuplot --with-aquaterm
You may also set this variable from within Octave. For printing the cairo backend
is recommended, i.e., install gnuplot with --with-cairo, and use
print -dpdfcairo figure.pdf
EOS
end
if build.without?("osmesa") || (build.with?("osmesa") && build.with?("opengl"))
s += <<-EOS.undent
When using the native qt or fltk toolkits then invisible figures do not work because
osmesa does currently not work with the Mac's OpenGL implementation. The usage of
gnuplot is recommended.
EOS
end
if build.without?("openblas")
s += <<-EOS.undent
Octave has been compiled with Apple's BLAS routines, this leads to segfaults in some
tests. The option "--with-openblas" is a more conservative choice.
EOS
end
s += "\n" unless s.empty?
s
end
test do
system "octave", "--eval", "(22/7 - pi)/pi"
# this is supposed to crash octave if there is a problem with veclibfort
system "octave", "--eval", "single ([1+i 2+i 3+i]) * single ([ 4+i ; 5+i ; 6+i])"
end
end
|
module Shanty
VERSION = "0.0.24"
end
Bump to version v0.0.25
module Shanty
VERSION = "0.0.25"
end
|
module Shanty
VERSION = "0.0.18"
end
Bump version to v0.0.19
module Shanty
VERSION = "0.0.19"
end
|
# use three modules to keep indentation in this file
module Indent
module AssertCookie
class ResponseCookie
attr_reader :value, :path, :domain, :expires, :http_only, :secure
KEY_MAP = {
'domain' => 'domain',
'path' => 'path',
'expires' => 'expires',
'HttpOnly' => 'http_only',
'secure' => 'secure',
}
def initialize(value, options={})
@value = value
options.each do |key, value|
target_key = KEY_MAP[key.to_s]
if target_key
instance_variable_set("@#{target_key}", value)
else
raise ArgumentError, "Unknown option: #{key}"
end
end
end
end
module Assertions
# Custom assertions for cookies
#
# assert_cookie :pass,
# :value => lambda { |value| UUID.parse(value).valid? }
#
# assert_cookie :yellow, :value => ['sunny', 'days']
#
# assert_cookie :delight, :value => 'yum'
#
# assert_cookie :secret, :path => lambda { |path| path =~ /secret/ },
# :secure => true
def assert_cookie(name, options={}, message="")
clean_backtrace do
cookie = cookies[name.to_s]
# this plugin has no rails version attached to it, so the following might be actually wrong
if cookie.respond_to?(:value)
value = cookie.value
elsif cookie.is_a?(String)
value = cookie
else
assert false, "Cookie was something unexpected: #{cookie.inspect}"
end
# rails escapes cookies when it sets them, but
# does not unescape them when it reads them back
if value.is_a?(Array)
value = value.map do |v|
CGI.unescape(v)
end
else
value = CGI.unescape(value)
end
msg = build_message(message, "expected cookie named <?> but it was not found.", name)
assert_not_nil cookie, msg
case
when options[:value].respond_to?(:call)
msg = build_message(message,
"expected result of value block to be true but it was false.")
value.each do |value|
assert(options[:value].call(value), msg)
end
when options[:value].respond_to?(:each) && !options[:value].is_a?(String)
options[:value].each do |required_value|
msg = build_message(message,
"expected cookie value to include <?> but it was not found.", required_value)
assert(value.include?(required_value), msg)
end
else
msg = build_message(message, "expected cookie value to be <?> but it was <?>.",
options[:value], value)
assert(value.include?(options[:value]), msg)
end if options.key?(:value)
cookie = cookie(name)
assert_call_or_value :path, options, cookie, message
assert_call_or_value :domain, options, cookie, message
assert_call_or_value :expires, options, cookie, message
assert_call_or_value :secure, options, cookie, message
end
end
# Tests that a cookie named +name+ does not exist. This is useful
# because cookies['name'] may be nil or [] in a functional test.
#
# assert_no_cookie :chocolate
def assert_no_cookie(name, message="")
cookie = cookies[name.to_s]
msg = build_message(message, "no cookie expected but found <?>.", name)
assert_block(msg) { cookie.nil? or (cookie.kind_of?(Array) and cookie.blank?) or cookie == '' }
end
def assert_cookie_set(name, message="")
cookie = cookies[name.to_s]
msg = build_message(message, "expected cookie named <?> but it was not found.", name)
assert_block(msg) { !(cookie.nil? or (cookie.kind_of?(Array) and cookie.blank?) or cookie == '') }
end
def clear_cookies
# or: @integration_session.instance_variable_set("@cookies", {})
reset!
end
protected
def cookie(name)
if @response
parsed_cookies = parse_cookies(@response.headers['Set-Cookie'])
parsed_cookies[name.to_s]
else
convert_cookie(cookies[name.to_s])
end
end
def parse_cookies(cookies)
# rails discards everything but cookie value for integration session
# amazing
# do the parsing ourselves
cookies_hash = {}
cookies = cookies.to_s.split("\n") unless cookies.is_a?(Array)
cookies.each do |cookie|
name, value, options = cookie.match(/^([^=]*)=([^;]*);(.*)/)[1,3]
options = options.split(';')
options_hash = {}
options.each do |option|
option_name, option_value = option.strip.split('=')
options_hash[option_name] = option_value || true
end
value = CGI.unescape(value)
cookie = ResponseCookie.new(value, options_hash)
cookies_hash[name.to_s] = cookie
end
cookies_hash
end
def convert_cookie(cookie)
options_hash = {
'domain' => cookie.domain,
'path' => cookie.path,
'expires' => cookie.expires,
# HttpOnly is not supported by cgi module
'HttpOnly' => nil,
'secure' => cookie.secure,
}
ResponseCookie.new(cookie, options_hash)
end
def assert_call_or_value(name, options, cookie, message="")
case
when options[name].respond_to?(:call)
msg = build_message(message,
"expected result of <?> block to be true but it was false.", name.to_s)
assert(options[name].call(cookie.send(name)), msg)
else
msg = build_message(message, "expected cookie <?> to be <?> but it was <?>.",
name.to_s, options[name], cookie.send(name))
assert_equal(options[name], cookie.send(name), msg)
end if options.key?(name)
end
end
end
end
Factor out cookie retrieval method such that we can patch it in the app code
# use three modules to keep indentation in this file
module Indent
module AssertCookie
class ResponseCookie
attr_reader :value, :path, :domain, :expires, :http_only, :secure
KEY_MAP = {
'domain' => 'domain',
'path' => 'path',
'expires' => 'expires',
'HttpOnly' => 'http_only',
'secure' => 'secure',
}
def initialize(value, options={})
@value = value
options.each do |key, value|
target_key = KEY_MAP[key.to_s]
if target_key
instance_variable_set("@#{target_key}", value)
else
raise ArgumentError, "Unknown option: #{key}"
end
end
end
end
module Assertions
# Custom assertions for cookies
#
# assert_cookie :pass,
# :value => lambda { |value| UUID.parse(value).valid? }
#
# assert_cookie :yellow, :value => ['sunny', 'days']
#
# assert_cookie :delight, :value => 'yum'
#
# assert_cookie :secret, :path => lambda { |path| path =~ /secret/ },
# :secure => true
def assert_cookie(name, options={}, message="")
clean_backtrace do
cookie = get_cookie(name)
# this plugin has no rails version attached to it, so the following might be actually wrong
if cookie.respond_to?(:value)
value = cookie.value
elsif cookie.is_a?(String)
value = cookie
else
assert false, "Cookie was something unexpected: #{cookie.inspect}"
end
# rails escapes cookies when it sets them, but
# does not unescape them when it reads them back
if value.is_a?(Array)
value = value.map do |v|
CGI.unescape(v)
end
else
value = CGI.unescape(value)
end
msg = build_message(message, "expected cookie named <?> but it was not found.", name)
assert_not_nil cookie, msg
case
when options[:value].respond_to?(:call)
msg = build_message(message,
"expected result of value block to be true but it was false.")
value.each do |value|
assert(options[:value].call(value), msg)
end
when options[:value].respond_to?(:each) && !options[:value].is_a?(String)
options[:value].each do |required_value|
msg = build_message(message,
"expected cookie value to include <?> but it was not found.", required_value)
assert(value.include?(required_value), msg)
end
else
msg = build_message(message, "expected cookie value to be <?> but it was <?>.",
options[:value], value)
assert(value.include?(options[:value]), msg)
end if options.key?(:value)
cookie = full_cookie(name)
assert_call_or_value :path, options, cookie, message
assert_call_or_value :domain, options, cookie, message
assert_call_or_value :expires, options, cookie, message
assert_call_or_value :secure, options, cookie, message
end
end
# Tests that a cookie named +name+ does not exist. This is useful
# because cookies['name'] may be nil or [] in a functional test.
#
# assert_no_cookie :chocolate
def assert_no_cookie(name, message="")
cookie = get_cookie(name)
msg = build_message(message, "no cookie expected but found <?>.", name)
assert_block(msg) { cookie.nil? or (cookie.kind_of?(Array) and cookie.blank?) or cookie == '' }
end
def assert_cookie_set(name, message="")
cookie = get_cookie(name)
msg = build_message(message, "expected cookie named <?> but it was not found.", name)
assert_block(msg) { !(cookie.nil? or (cookie.kind_of?(Array) and cookie.blank?) or cookie == '') }
end
def clear_cookies
# or: @integration_session.instance_variable_set("@cookies", {})
reset!
end
protected
def full_cookie(name)
if @response
parsed_cookies = parse_cookies(@response.headers['Set-Cookie'])
parsed_cookies[name.to_s]
else
convert_cookie(cookies[name.to_s])
end
end
def get_cookie(name)
cookies[name.to_s]
end
def parse_cookies(cookies)
# rails discards everything but cookie value for integration session
# amazing
# do the parsing ourselves
cookies_hash = {}
cookies = cookies.to_s.split("\n") unless cookies.is_a?(Array)
cookies.each do |cookie|
name, value, options = cookie.match(/^([^=]*)=([^;]*);(.*)/)[1,3]
options = options.split(';')
options_hash = {}
options.each do |option|
option_name, option_value = option.strip.split('=')
options_hash[option_name] = option_value || true
end
value = CGI.unescape(value)
cookie = ResponseCookie.new(value, options_hash)
cookies_hash[name.to_s] = cookie
end
cookies_hash
end
def convert_cookie(cookie)
options_hash = {
'domain' => cookie.domain,
'path' => cookie.path,
'expires' => cookie.expires,
# HttpOnly is not supported by cgi module
'HttpOnly' => nil,
'secure' => cookie.secure,
}
ResponseCookie.new(cookie, options_hash)
end
def assert_call_or_value(name, options, cookie, message="")
case
when options[name].respond_to?(:call)
msg = build_message(message,
"expected result of <?> block to be true but it was false.", name.to_s)
assert(options[name].call(cookie.send(name)), msg)
else
msg = build_message(message, "expected cookie <?> to be <?> but it was <?>.",
name.to_s, options[name], cookie.send(name))
assert_equal(options[name], cookie.send(name), msg)
end if options.key?(name)
end
end
end
end
|
module Shelly
VERSION = "0.1.9"
end
Bump version to 0.1.10
module Shelly
VERSION = "0.1.10"
end
|
module ActionView::Helpers::AssetTagHelper
def javascript_include_tag_with_versioned(*sources)
set_cache_options_with_versioning_from_sources!(sources)
javascript_include_tag_without_versioned(*sources)
end
def stylesheet_link_tag_with_versioned(*sources)
set_cache_options_with_versioning_from_sources!(sources)
stylesheet_link_tag_without_versioned(*sources)
end
alias_method_chain :javascript_include_tag, :versioned
alias_method_chain :stylesheet_link_tag, :versioned
private
def set_cache_options_with_versioning_from_sources!(sources)
options = sources.extract_options!.stringify_keys!
options['cache'] = version_cache_name(options['cache'], options.delete('version'))
sources << options
end
def version_cache_name(cache_name, version)
if cache_name && (version || RAILS_ASSET_VERSION)
cache_name = 'all' unless cache_name.is_a? String
cache_name = [cache_name, version || RAILS_ASSET_VERSION].join('_')
end
end
end
Fixing a couple bugs.
module ActionView::Helpers::AssetTagHelper
def javascript_include_tag_with_versioned(*sources)
set_cache_options_with_versioning_from_sources!(sources)
javascript_include_tag_without_versioned(*sources)
end
def stylesheet_link_tag_with_versioned(*sources)
set_cache_options_with_versioning_from_sources!(sources)
stylesheet_link_tag_without_versioned(*sources)
end
alias_method_chain :javascript_include_tag, :versioned
alias_method_chain :stylesheet_link_tag, :versioned
private
def set_cache_options_with_versioning_from_sources!(sources)
options = sources.extract_options!.stringify_keys!
options['cache'] = version_cache_name(options['cache'], options.delete('version'))
sources << options
end
def version_cache_name(cache_name, version)
if cache_name && (version || (defined?(::ASSET_VERSION) && ::ASSET_VERSION))
cache_name = 'all' unless cache_name.is_a? String
cache_name = [cache_name, version || (defined?(::ASSET_VERSION) && ::ASSET_VERSION)].join('_')
end
cache_name
end
end
|
require 'selenium-webdriver'
require 'capybara'
class Shoot::Scenario
URL = sprintf 'http://%s:%s@hub.browserstack.com/wd/hub',
ENV['BROWSERSTACK_USER'],
ENV['BROWSERSTACK_KEY']
include Capybara::DSL
def initialize(platform)
@platform = platform
config_capabilities
Capybara.register_driver platform_name do |app|
Capybara::Selenium::Driver.new(app,
browser: :remote,
url: URL,
desired_capabilities: @capabilities)
end
puts "Running #{platform_name}"
Capybara.current_driver = platform_name
end
def platform_name
@platform_name ||= if @platform['device']
@platform['device']
else
name_items = %w(browser browser_version os os_version)
@platform.values_at(*name_items).join(' ')
end
end
def ok
puts 'OK'
page.driver.quit
end
def config_capabilities # rubocop:disable AbcSize
@capabilities = Selenium::WebDriver::Remote::Capabilities.new
@capabilities[:browser] = @platform['browser']
@capabilities[:browser_version] = @platform['browser_version']
@capabilities[:os] = @platform['os']
@capabilities[:os_version] = @platform['os_version']
@capabilities['browserstack.debug'] = 'true'
@capabilities[:name] = "Digital Goods - #{@platform}"
@capabilities[:browserName] = @platform['browser']
@capabilities[:platform] = @platform['os']
@capabilities[:device] = @platform['device'] if @platform['device']
end
def shoot(method)
send(method)
sleep(1) # Just in case
require 'fileutils'
FileUtils::mkdir_p '.screenshots'
save_screenshot(".screenshots/#{method} #{platform_name}.png")
end
end
Moves methods to private on Scenario
require 'selenium-webdriver'
require 'capybara'
class Shoot::Scenario
URL = sprintf 'http://%s:%s@hub.browserstack.com/wd/hub',
ENV['BROWSERSTACK_USER'],
ENV['BROWSERSTACK_KEY']
include Capybara::DSL
def initialize(platform)
@platform = platform
config_capabilities
Capybara.register_driver platform_name do |app|
Capybara::Selenium::Driver.new(app,
browser: :remote,
url: URL,
desired_capabilities: @capabilities)
end
puts "Running #{platform_name}"
Capybara.current_driver = platform_name
end
def shoot(method)
send(method)
Kernel.sleep(1) # Just in case
require 'fileutils'
FileUtils::mkdir_p '.screenshots'
save_screenshot(".screenshots/#{method} #{platform_name}.png")
end
private
def platform_name
@platform_name ||= if @platform['device']
@platform['device']
else
name_items = %w(browser browser_version os os_version)
@platform.values_at(*name_items).join(' ')
end
end
def ok
puts 'OK'
page.driver.quit
end
def config_capabilities # rubocop:disable AbcSize
@capabilities = Selenium::WebDriver::Remote::Capabilities.new
@capabilities[:browser] = @platform['browser']
@capabilities[:browser_version] = @platform['browser_version']
@capabilities[:os] = @platform['os']
@capabilities[:os_version] = @platform['os_version']
@capabilities['browserstack.debug'] = 'true'
@capabilities[:name] = "Digital Goods - #{@platform}"
@capabilities[:browserName] = @platform['browser']
@capabilities[:platform] = @platform['os']
@capabilities[:device] = @platform['device'] if @platform['device']
end
end
|
require 'uri'
require 'json'
require 'shellwords'
module Shrimp
class NoExecutableError < StandardError
def initialize
msg = "No phantomjs executable found at #{Shrimp.configuration.phantomjs}\n"
msg << ">> Please install phantomjs - http://phantomjs.org/download.html"
super(msg)
end
end
class ImproperSourceError < StandardError
def initialize(msg = nil)
super("Improper Source: #{msg}")
end
end
class RenderingError < StandardError
def initialize(msg = nil)
super("Rendering Error: #{msg}")
end
end
class Phantom
attr_accessor :source, :configuration, :outfile
attr_reader :options, :cookies, :result, :error
SCRIPT_FILE = File.expand_path('../rasterize.js', __FILE__)
# Public: Runs the phantomjs binary
#
# Returns the stdout output of phantomjs
def run
@error = nil
@result = `#{cmd}`
unless $?.exitstatus == 0
@error = @result
@result = nil
end
@result
end
def run!
@error = nil
@result = `#{cmd}`
unless $?.exitstatus == 0
@error = @result
@result = nil
raise RenderingError.new(@error)
end
@result
end
# Public: Returns the phantom rasterize command
def cmd
cookie_file = dump_cookies
format, zoom, margin, orientation = options[:format], options[:zoom], options[:margin], options[:orientation]
rendering_time, timeout = options[:rendering_time], options[:rendering_timeout]
viewport_width, viewport_height = options[:viewport_width], options[:viewport_height]
max_redirect_count = options[:max_redirect_count]
header = "'#{options[:header]}'"
footer = "'#{(options[:footer])}'"
language = "'#{(options[:language])}'"
@outfile ||= "#{options[:tmpdir]}/#{Digest::MD5.hexdigest((Time.now.to_i + rand(9001)).to_s)}.pdf"
command_config_file = "--config=#{options[:command_config_file]}"
[
Shrimp.configuration.phantomjs,
command_config_file,
SCRIPT_FILE,
@source.to_s.shellescape,
@outfile,
format,
zoom,
margin,
orientation,
cookie_file,
rendering_time,
timeout,
viewport_width,
viewport_height,
max_redirect_count,
header,
footer,
language
].join(" ")
end
# Public: initializes a new Phantom Object
#
# url_or_file - The url of the html document to render
# options - a hash with options for rendering
# * format - the paper format for the output eg: "5in*7.5in", "10cm*20cm", "A4", "Letter"
# * zoom - the viewport zoom factor
# * margin - the margins for the pdf
# * command_config_file - the path to a json configuration file for command-line options
# cookies - hash with cookies to use for rendering
# outfile - optional path for the output file a Tempfile will be created if not given
#
# Returns self
def initialize(url_or_file, options = { }, cookies={ }, outfile = nil)
@source = Source.new(url_or_file)
@options = Shrimp.configuration.default_options.merge(options)
@cookies = cookies
@outfile = File.expand_path(outfile) if outfile
raise NoExecutableError.new unless File.exists?(Shrimp.configuration.phantomjs)
end
# Public: renders to pdf
# path - the destination path defaults to outfile
#
# Returns the path to the pdf file
def to_pdf(path=nil)
@outfile = File.expand_path(path) if path
self.run
@outfile
end
# Public: renders to pdf
# path - the destination path defaults to outfile
#
# Returns a File Handle of the Resulting pdf
def to_file(path=nil)
self.to_pdf(path)
File.new(@outfile)
end
# Public: renders to pdf
# path - the destination path defaults to outfile
#
# Returns the binary string of the pdf
def to_string(path=nil)
File.open(self.to_pdf(path)).read
end
def to_pdf!(path=nil)
@outfile = File.expand_path(path) if path
self.run!
@outfile
end
def to_file!(path=nil)
self.to_pdf!(path)
File.new(@outfile)
end
def to_string!(path=nil)
File.open(self.to_pdf!(path)).read
end
private
def dump_cookies
host = @source.url? ? URI::parse(@source.to_s).host : "/"
json = @cookies.inject([]) { |a, (k, v)| a.push({ :name => k, :value => v, :domain => host }); a }.to_json
File.open("#{options[:tmpdir]}/#{rand}.cookies", 'w') { |f| f.puts json; f }.path
end
end
end
remove shellescape for phantomjs call
require 'uri'
require 'json'
require 'shellwords'
module Shrimp
class NoExecutableError < StandardError
def initialize
msg = "No phantomjs executable found at #{Shrimp.configuration.phantomjs}\n"
msg << ">> Please install phantomjs - http://phantomjs.org/download.html"
super(msg)
end
end
class ImproperSourceError < StandardError
def initialize(msg = nil)
super("Improper Source: #{msg}")
end
end
class RenderingError < StandardError
def initialize(msg = nil)
super("Rendering Error: #{msg}")
end
end
class Phantom
attr_accessor :source, :configuration, :outfile
attr_reader :options, :cookies, :result, :error
SCRIPT_FILE = File.expand_path('../rasterize.js', __FILE__)
# Public: Runs the phantomjs binary
#
# Returns the stdout output of phantomjs
def run
@error = nil
@result = `#{cmd}`
unless $?.exitstatus == 0
@error = @result
@result = nil
end
@result
end
def run!
@error = nil
@result = `#{cmd}`
unless $?.exitstatus == 0
@error = @result
@result = nil
raise RenderingError.new(@error)
end
@result
end
# Public: Returns the phantom rasterize command
def cmd
cookie_file = dump_cookies
format, zoom, margin, orientation = options[:format], options[:zoom], options[:margin], options[:orientation]
rendering_time, timeout = options[:rendering_time], options[:rendering_timeout]
viewport_width, viewport_height = options[:viewport_width], options[:viewport_height]
max_redirect_count = options[:max_redirect_count]
header = "'#{options[:header]}'"
footer = "'#{(options[:footer])}'"
language = "'#{(options[:language])}'"
@outfile ||= "#{options[:tmpdir]}/#{Digest::MD5.hexdigest((Time.now.to_i + rand(9001)).to_s)}.pdf"
command_config_file = "--config=#{options[:command_config_file]}"
[
Shrimp.configuration.phantomjs,
command_config_file,
SCRIPT_FILE,
@source.to_s,
@outfile,
format,
zoom,
margin,
orientation,
cookie_file,
rendering_time,
timeout,
viewport_width,
viewport_height,
max_redirect_count,
header,
footer,
language
].join(" ")
end
# Public: initializes a new Phantom Object
#
# url_or_file - The url of the html document to render
# options - a hash with options for rendering
# * format - the paper format for the output eg: "5in*7.5in", "10cm*20cm", "A4", "Letter"
# * zoom - the viewport zoom factor
# * margin - the margins for the pdf
# * command_config_file - the path to a json configuration file for command-line options
# cookies - hash with cookies to use for rendering
# outfile - optional path for the output file a Tempfile will be created if not given
#
# Returns self
def initialize(url_or_file, options = { }, cookies={ }, outfile = nil)
@source = Source.new(url_or_file)
@options = Shrimp.configuration.default_options.merge(options)
@cookies = cookies
@outfile = File.expand_path(outfile) if outfile
raise NoExecutableError.new unless File.exists?(Shrimp.configuration.phantomjs)
end
# Public: renders to pdf
# path - the destination path defaults to outfile
#
# Returns the path to the pdf file
def to_pdf(path=nil)
@outfile = File.expand_path(path) if path
self.run
@outfile
end
# Public: renders to pdf
# path - the destination path defaults to outfile
#
# Returns a File Handle of the Resulting pdf
def to_file(path=nil)
self.to_pdf(path)
File.new(@outfile)
end
# Public: renders to pdf
# path - the destination path defaults to outfile
#
# Returns the binary string of the pdf
def to_string(path=nil)
File.open(self.to_pdf(path)).read
end
def to_pdf!(path=nil)
@outfile = File.expand_path(path) if path
self.run!
@outfile
end
def to_file!(path=nil)
self.to_pdf!(path)
File.new(@outfile)
end
def to_string!(path=nil)
File.open(self.to_pdf!(path)).read
end
private
def dump_cookies
host = @source.url? ? URI::parse(@source.to_s).host : "/"
json = @cookies.inject([]) { |a, (k, v)| a.push({ :name => k, :value => v, :domain => host }); a }.to_json
File.open("#{options[:tmpdir]}/#{rand}.cookies", 'w') { |f| f.puts json; f }.path
end
end
end
|
module Sicuro
VERSION = "1.0.0"
end
Just kidding, let's go with 0.1.0 since it still has a failing test.
module Sicuro
VERSION = "0.1.0"
end
|
require "fileutils"
module Slideoff
module Utils
module_function
def init_directory(name)
raise "Directory '#{name}' does already exist. Use another one." if Dir.exist? name
FileUtils.mkdir_p name
Dir.chdir(name) do |dir|
FileUtils.mkdir_p 'main'
File.open('main/index.md', 'w') do |file|
file.write <<-EOF
!SLIDE title cover h
#Title
##Subtitle

!SLIDE cover w

!SLIDE cover h

!SLIDE cover w h

!SLIDE
# Normal list
* Keep
* It
* Super
* Simple
!SLIDE chapter h
# Chapter

!SLIDE section h
# Section

!SLIDE shout up
# Shout up!
!SLIDE shout left
# Shout left!
!SLIDE shout right
# Shout right!
!SLIDE shout down
# Shout down!
!SLIDE
# Bullet list
* Keep
* It
* Super
* Simple
!SLIDE
# Numbered list
1. Keep
1. It
1. Super
1. Simple
!SLIDE small
# Small slide with full of text
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut.
!SLIDE
# Description list
Ruby
: Loving scripting language
HTTP
: Hyper Text Transfer Protocol
CSS
: Cascading Style Sheet
REST
: Representational state transfer
!SLIDE
# Link
1. Click on [DSIW/slideoff](http://github.com/DSIW/slideoff).
!SLIDE
# Code
```sh
$ gem install slideoff
```
```ruby
class World
def hello
puts "Hello \#{self.class.name}!"
end
end
World.new.hello #=> "Hello World!"
```
Note: Code `self.class.name` will be evaluated.
!SLIDE
# Table
|Left column|Centered column| Right column
|-----------|:-------------:|------------:
|Dataset 1 |Dataset 1 | Dataset 2
|Dataset 1 |Dataset 2 | Dataset 2
|Dataset 1 |Dataset 3 | Dataset 2
|Dataset 1 |Dataset 4 | Dataset 2
!SLIDE
# Colors
1. <span class="text-green">abc</span>
1. <span class="text-red">abc</span>
1. <span class="text-purple">abc</span>
1. <span class="text-orange">abc</span>
1. <span class="text-blue">abc</span>
1. <span class="text-bluegreen">abc</span>
!SLIDE
# Highlighting
1. <mark class="green">abc</mark>
1. <mark class="red">abc</mark>
1. <mark class="purple">abc</mark>
1. <mark class="orange">abc</mark>
1. <mark class="blue">abc</mark>
1. <mark class="bluegreen">abc</mark>
!SLIDE
# Colorize with custom markdown
This is ==orange==some== __orange__super__ and _underlined_ text.
!SLIDE incr-list
# Incremental list
* First item
* Second item
* Third item
!SLIDE incr-code
# Incremental Code
```ruby
1.class #=> Fixnum
1.1.class #=> Float
"abc".clas #=> String
:abc.clas #=> Symbol
```
!SLIDE typewriter
# Incremental Code
```sh
bundle install
```
!SLIDE incr-table
# Incremental table
|Left column|Centered column| Right column
|-----------|:-------------:|------------:
|Dataset 1 |Dataset 1 | Dataset 2
|Dataset 1 |Dataset 2 | Dataset 2
|Dataset 1 |Dataset 3 | Dataset 2
|Dataset 1 |Dataset 4 | Dataset 2
!SLIDE incremental
# Content with pauses
This is the first text.
!PAUSE
More text <span class="inactive">with hidden info</span>!
!SLIDE disabled
# Hidden slide
!SLIDE
# Blockquote
>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut.
>
>Athor Name
!SLIDE
# Box
+++
+++ Box without shadow
+++ Content
+++ shadow
+++ Box with shadow
+++ Content
!SLIDE
# Arranged Box
<div class="col2">
<div class="box box-alert">
<div>Errors</div>
<div>aren't good</div>
</div>
<div class="inactive">
<div class="box box-success">
<div>No errors</div>
<div>are better</div>
</div>
</div>
</div>
!SLIDE
# Box with different colors (1)
+++ box-blue
+++ Header
+++ Content
+++ box-bluegreen
+++ Header
+++ Content
+++ box-green
+++ Header
+++ Content
!SLIDE
# Box with different colors (2)
+++ box-red
+++ Header
+++ Content
+++ box-purple
+++ Header
+++ Content
+++ box-orange
+++ Header
+++ Content
!SLIDE
# Image

!SLIDE img-place-middle
# Centered Image

!SLIDE section cover w
# Image via Flickr API
!F[22565509]
!SLIDE
# Placement with "place t r b l"
<span class="bg-red place">center</span>
<span class="bg-red place t">top</span>
<span class="bg-red place t l">top-left</span>
<span class="bg-red place t r">top-right</span>
<span class="bg-red place l">left</span>
<span class="bg-red place r">right</span>
<span class="bg-red place b">bottom</span>
<span class="bg-red place b l">bottom-left</span>
<span class="bg-red place b r">bottom-right</span>
!SLIDE
# Diagram with Highcharts
<div id="diagram-pie" style="width: 850px; height: 565px;"></div>
<script type="text/javascript">
$(function () {
$('#diagram-pie').highcharts({
credits: { enabled: false },
plotOptions: {
pie: {
cursor: 'pointer',
dataLabels: {
enabled: true,
distance: 40,
style: {
fontSize: '23px',
color: 'black'
},
formatter: function() {
return '<b>'+ this.point.name +'</b> ('+this.point.year+'): '+ Math.round(this.percentage*10)/10.0 +'%';
}
}
}
},
series: [{
type: 'pie',
innerSize: '45%',
data: [
{ name: 'Dataset 1', year: '1980', y: 1.3, },
{ name: 'Dataset 2', year: '1990', y: 0.3, },
{ name: 'Dataset 3', year: '2000', y: 10.0, },
{ name: 'Dataset 4', year: '2010', y: 88.3, },
{ name: 'Dataset 5', year: '2020', y: 0.1 }
]
}]
});
});
</script>
!SLIDE noheader cover h
<div class="left-33">
<p style="font: 500 46px/1 'Open Sans'" class="text-center">Header</p>
<p style="font-size: 28px;" class="text-center">Subheader</p>
<ul class="border-separated">
<li>list item</li>
<li>list item</li>
<li>list item</li>
<li>list item</li>
</ul>
</div>

!SLIDE
# Clickbindings
Click | Action
------------------------------- | -------------------------
Left click | Goto next slide
Right click | Goto previous slide
!SLIDE
# Keybindings (1)
Key | Action
------------------------------- | -------------------------
F5 / Enter | Goto slide mode
Esc | Goto list mode
Home | Goto first slide
End | Goto last slide
!SLIDE
# Keybindings (2)
Key | Action
------------------------------- | -------------------------
Tab / Space | Goto next slide
... with Shift | Goto previous slide
PageUp / Up / Left / h / k | Goto previous slide
... with Shift | Goto previous chapter
PageDown / Down / right / l / j | Goto next slide
... with Shift | Goto next chapter
EOF
end
File.open('presentation.json', 'w') do |file|
file.write <<-EOF
{
"title": "#{name}",
"author": "Me",
"theme": "modern",
"pygments_style": "github",
//"duration": 20,
//"flickr_api_key": "...",
//"remote_host": "...",
//"remote_path": "...",
"sections": {
"main": {"title": "Modern theme", "show_chapter": false, "show_toc": false}
}
}
EOF
end
File.open('style.css', 'w') do |file|
file.write <<-EOF
/* Override your theme styles */
.slide {
}
EOF
end
puts `git init`
end
end
def install_theme(git_repository_url, theme_name)
theme_name ||= git_repository_url.split('/').last.gsub(/slideoff-|theme-/, '')
theme_path = File.join(CONFIG.dir, 'themes', theme_name)
FileUtils.mkdir_p File.dirname(theme_path)
`git clone #{git_repository_url} #{theme_path}`
puts
puts "Please make sure that '#{theme_name}' is set in your presentation.json"
end
def upload(options = {})
generate_static(options)
path = CONFIG.remote_path
mkdir_commands = parents(path).map { |path| "mkdir -vp -m 755 #{path}" }
remote_cmd mkdir_commands
`scp -r #{File.join(static_dir, "*")} #{CONFIG.remote_host}:#{path}`
remote_cmd "chmod -vR o+r #{path}"
end
def generate_static(options = {})
fork_server(options) do
FileUtils.mkdir_p(static_dir)
Dir.chdir(static_dir) do |dir|
`wget -E -H -k -nH -p http://127.0.0.1:#{options[:port]}/`
File.write('robots.txt', "User-agent: *\nDisallow: /\n")
end
end
end
def serve_static(port, options = {})
puts "Listening python server on http://0.0.0.0:#{port}" if options[:verbose]
`python3 -m http.server #{port}`
end
def generate_pdf_file(options = {})
pdf_file = "talk_#{convert_to_filename(CONFIG.title)}.pdf"
fork_server(options) do
page_width = '8000px'
page_height = '6000px'
page_margin = 0
source = "http://127.0.0.1:#{options[:port]}"
dest = pdf_file
`wkhtmltopdf --page-width #{page_width} --page-height #{page_height} -B #{page_margin} -R #{page_margin} -L #{page_margin} -T #{page_margin} #{source} #{dest}`
end
pdf_file
end
private
def self.parents(dir)
splitted = dir.split(File::SEPARATOR)
splitted.length.times.reduce([]) { |_parents, i| _parents << splitted[0..i].join(File::SEPARATOR) }
end
def self.remote_cmd(cmds)
`ssh #{CONFIG.remote_host} "#{Array(cmds).join(';')}"`
end
def self.static_dir
"../static_#{File.basename(Dir.pwd)}"
end
def self.convert_to_filename(string)
string.downcase.gsub(/[^a-z0-9.-]/, '')
end
def self.fork_server(options)
pid = Process.fork { Slideoff::Server.new(options).start }
sleep 2
begin
yield
ensure
Process.kill "QUIT", pid
Process.wait pid
end
end
end
end
Change static dir to public in current presentation dir
require "fileutils"
module Slideoff
module Utils
module_function
def init_directory(name)
raise "Directory '#{name}' does already exist. Use another one." if Dir.exist? name
FileUtils.mkdir_p name
Dir.chdir(name) do |dir|
FileUtils.mkdir_p 'main'
File.open('main/index.md', 'w') do |file|
file.write <<-EOF
!SLIDE title cover h
#Title
##Subtitle

!SLIDE cover w

!SLIDE cover h

!SLIDE cover w h

!SLIDE
# Normal list
* Keep
* It
* Super
* Simple
!SLIDE chapter h
# Chapter

!SLIDE section h
# Section

!SLIDE shout up
# Shout up!
!SLIDE shout left
# Shout left!
!SLIDE shout right
# Shout right!
!SLIDE shout down
# Shout down!
!SLIDE
# Bullet list
* Keep
* It
* Super
* Simple
!SLIDE
# Numbered list
1. Keep
1. It
1. Super
1. Simple
!SLIDE small
# Small slide with full of text
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut.
!SLIDE
# Description list
Ruby
: Loving scripting language
HTTP
: Hyper Text Transfer Protocol
CSS
: Cascading Style Sheet
REST
: Representational state transfer
!SLIDE
# Link
1. Click on [DSIW/slideoff](http://github.com/DSIW/slideoff).
!SLIDE
# Code
```sh
$ gem install slideoff
```
```ruby
class World
def hello
puts "Hello \#{self.class.name}!"
end
end
World.new.hello #=> "Hello World!"
```
Note: Code `self.class.name` will be evaluated.
!SLIDE
# Table
|Left column|Centered column| Right column
|-----------|:-------------:|------------:
|Dataset 1 |Dataset 1 | Dataset 2
|Dataset 1 |Dataset 2 | Dataset 2
|Dataset 1 |Dataset 3 | Dataset 2
|Dataset 1 |Dataset 4 | Dataset 2
!SLIDE
# Colors
1. <span class="text-green">abc</span>
1. <span class="text-red">abc</span>
1. <span class="text-purple">abc</span>
1. <span class="text-orange">abc</span>
1. <span class="text-blue">abc</span>
1. <span class="text-bluegreen">abc</span>
!SLIDE
# Highlighting
1. <mark class="green">abc</mark>
1. <mark class="red">abc</mark>
1. <mark class="purple">abc</mark>
1. <mark class="orange">abc</mark>
1. <mark class="blue">abc</mark>
1. <mark class="bluegreen">abc</mark>
!SLIDE
# Colorize with custom markdown
This is ==orange==some== __orange__super__ and _underlined_ text.
!SLIDE incr-list
# Incremental list
* First item
* Second item
* Third item
!SLIDE incr-code
# Incremental Code
```ruby
1.class #=> Fixnum
1.1.class #=> Float
"abc".clas #=> String
:abc.clas #=> Symbol
```
!SLIDE typewriter
# Incremental Code
```sh
bundle install
```
!SLIDE incr-table
# Incremental table
|Left column|Centered column| Right column
|-----------|:-------------:|------------:
|Dataset 1 |Dataset 1 | Dataset 2
|Dataset 1 |Dataset 2 | Dataset 2
|Dataset 1 |Dataset 3 | Dataset 2
|Dataset 1 |Dataset 4 | Dataset 2
!SLIDE incremental
# Content with pauses
This is the first text.
!PAUSE
More text <span class="inactive">with hidden info</span>!
!SLIDE disabled
# Hidden slide
!SLIDE
# Blockquote
>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut.
>
>Athor Name
!SLIDE
# Box
+++
+++ Box without shadow
+++ Content
+++ shadow
+++ Box with shadow
+++ Content
!SLIDE
# Arranged Box
<div class="col2">
<div class="box box-alert">
<div>Errors</div>
<div>aren't good</div>
</div>
<div class="inactive">
<div class="box box-success">
<div>No errors</div>
<div>are better</div>
</div>
</div>
</div>
!SLIDE
# Box with different colors (1)
+++ box-blue
+++ Header
+++ Content
+++ box-bluegreen
+++ Header
+++ Content
+++ box-green
+++ Header
+++ Content
!SLIDE
# Box with different colors (2)
+++ box-red
+++ Header
+++ Content
+++ box-purple
+++ Header
+++ Content
+++ box-orange
+++ Header
+++ Content
!SLIDE
# Image

!SLIDE img-place-middle
# Centered Image

!SLIDE section cover w
# Image via Flickr API
!F[22565509]
!SLIDE
# Placement with "place t r b l"
<span class="bg-red place">center</span>
<span class="bg-red place t">top</span>
<span class="bg-red place t l">top-left</span>
<span class="bg-red place t r">top-right</span>
<span class="bg-red place l">left</span>
<span class="bg-red place r">right</span>
<span class="bg-red place b">bottom</span>
<span class="bg-red place b l">bottom-left</span>
<span class="bg-red place b r">bottom-right</span>
!SLIDE
# Diagram with Highcharts
<div id="diagram-pie" style="width: 850px; height: 565px;"></div>
<script type="text/javascript">
$(function () {
$('#diagram-pie').highcharts({
credits: { enabled: false },
plotOptions: {
pie: {
cursor: 'pointer',
dataLabels: {
enabled: true,
distance: 40,
style: {
fontSize: '23px',
color: 'black'
},
formatter: function() {
return '<b>'+ this.point.name +'</b> ('+this.point.year+'): '+ Math.round(this.percentage*10)/10.0 +'%';
}
}
}
},
series: [{
type: 'pie',
innerSize: '45%',
data: [
{ name: 'Dataset 1', year: '1980', y: 1.3, },
{ name: 'Dataset 2', year: '1990', y: 0.3, },
{ name: 'Dataset 3', year: '2000', y: 10.0, },
{ name: 'Dataset 4', year: '2010', y: 88.3, },
{ name: 'Dataset 5', year: '2020', y: 0.1 }
]
}]
});
});
</script>
!SLIDE noheader cover h
<div class="left-33">
<p style="font: 500 46px/1 'Open Sans'" class="text-center">Header</p>
<p style="font-size: 28px;" class="text-center">Subheader</p>
<ul class="border-separated">
<li>list item</li>
<li>list item</li>
<li>list item</li>
<li>list item</li>
</ul>
</div>

!SLIDE
# Clickbindings
Click | Action
------------------------------- | -------------------------
Left click | Goto next slide
Right click | Goto previous slide
!SLIDE
# Keybindings (1)
Key | Action
------------------------------- | -------------------------
F5 / Enter | Goto slide mode
Esc | Goto list mode
Home | Goto first slide
End | Goto last slide
!SLIDE
# Keybindings (2)
Key | Action
------------------------------- | -------------------------
Tab / Space | Goto next slide
... with Shift | Goto previous slide
PageUp / Up / Left / h / k | Goto previous slide
... with Shift | Goto previous chapter
PageDown / Down / right / l / j | Goto next slide
... with Shift | Goto next chapter
EOF
end
File.open('presentation.json', 'w') do |file|
file.write <<-EOF
{
"title": "#{name}",
"author": "Me",
"theme": "modern",
"pygments_style": "github",
//"duration": 20,
//"flickr_api_key": "...",
//"remote_host": "...",
//"remote_path": "...",
"sections": {
"main": {"title": "Modern theme", "show_chapter": false, "show_toc": false}
}
}
EOF
end
File.open('style.css', 'w') do |file|
file.write <<-EOF
/* Override your theme styles */
.slide {
}
EOF
end
puts `git init`
end
end
def install_theme(git_repository_url, theme_name)
theme_name ||= git_repository_url.split('/').last.gsub(/slideoff-|theme-/, '')
theme_path = File.join(CONFIG.dir, 'themes', theme_name)
FileUtils.mkdir_p File.dirname(theme_path)
`git clone #{git_repository_url} #{theme_path}`
puts
puts "Please make sure that '#{theme_name}' is set in your presentation.json"
end
def upload(options = {})
generate_static(options)
path = CONFIG.remote_path
mkdir_commands = parents(path).map { |path| "mkdir -vp -m 755 #{path}" }
remote_cmd mkdir_commands
`scp -r #{File.join(static_dir, "*")} #{CONFIG.remote_host}:#{path}`
remote_cmd "chmod -vR o+r #{path}"
end
def generate_static(options = {})
fork_server(options) do
FileUtils.mkdir_p(static_dir)
Dir.chdir(static_dir) do |dir|
`wget -E -H -k -nH -p http://127.0.0.1:#{options[:port]}/`
File.write('robots.txt', "User-agent: *\nDisallow: /\n")
end
end
end
def serve_static(port, options = {})
puts "Listening python server on http://0.0.0.0:#{port}" if options[:verbose]
`python3 -m http.server #{port}`
end
def generate_pdf_file(options = {})
pdf_file = "talk_#{convert_to_filename(CONFIG.title)}.pdf"
fork_server(options) do
page_width = '8000px'
page_height = '6000px'
page_margin = 0
source = "http://127.0.0.1:#{options[:port]}"
dest = pdf_file
`wkhtmltopdf --page-width #{page_width} --page-height #{page_height} -B #{page_margin} -R #{page_margin} -L #{page_margin} -T #{page_margin} #{source} #{dest}`
end
pdf_file
end
private
def self.parents(dir)
splitted = dir.split(File::SEPARATOR)
splitted.length.times.reduce([]) { |_parents, i| _parents << splitted[0..i].join(File::SEPARATOR) }
end
def self.remote_cmd(cmds)
`ssh #{CONFIG.remote_host} "#{Array(cmds).join(';')}"`
end
def self.static_dir
"public"
end
def self.convert_to_filename(string)
string.downcase.gsub(/[^a-z0-9.-]/, '')
end
def self.fork_server(options)
pid = Process.fork { Slideoff::Server.new(options).start }
sleep 2
begin
yield
ensure
Process.kill "QUIT", pid
Process.wait pid
end
end
end
end
|
# encoding: utf-8
#
require 'meta-state'
require 'sockjs/protocol'
module SockJS
class Session < MetaState::Machine
class Consumer
def initialize(response, transport)
@response = response
@transport = transport
@total_sent_length = 0
puts response.inspect
end
attr_reader :response, :transport, :total_sent_length
#Close the *response* not the *session*
def disconnect
#WEBSCOKET shouldn have limit of data
if @transport.kind_of?(SockJS::Transports::WebSocket)
@total_sent_length = 0
return
end
@response.finish
end
def heartbeat
transport.heartbeat_frame(response)
end
def messages(items)
unless items.empty?
@total_sent_length += transport.messages_frame(response, items)
end
end
def closing(status, message)
transport.closing_frame(response, status, message)
end
#XXX Still not sure what this is *FOR*
def check_alive
if !@response.body.closed?
if @response.due_for_alive_check
SockJS.debug "Checking if still alive"
@response.write(@transport.empty_string)
else
puts "~ [TODO] Not checking if still alive, why?"
puts "Status: #{@status} (response.body.closed: #{@response.body.closed?})\nSession class: #{self.class}\nTransport class: #{@transport.class}\nResponse: #{@response.to_s}\n\n"
end
end
end
end
state :Detached do
def on_enter
@consumer = nil
clear_all_timers
set_disconnect_timer
end
def attach_consumer(response, transport)
@consumer = Consumer.new(response, transport)
transition_to :attached
after_consumer_attached
end
def detach_consumer
#XXX Not sure if this is the right behavior
close(1002,"Connection interrupted")
end
def send(*messages)
@outbox += messages
end
def close(status = nil, message = nil)
@close_status = status
@close_message = message
transition_to(:closed)
closed
end
end
state :Attached do
def on_enter
@consumer.messages(@outbox)
@outbox.clear
clear_all_timers
check_content_length
set_heartbeat_timer
end
def attach_consumer(response, transport)
SockJS.debug "Session#attach_consumer: another connection still open"
transport.closing_frame(response, 2010, "Another connection still open")
close(1002, "Connection interrupted")
end
def detach_consumer
transition_to :detached
after_consumer_detached
end
def send(*messages)
@consumer.messages(messages)
check_content_length
end
def send_heartbeat
@consumer.heartbeat
end
def close(status = 1002, message = "Connection interrupted")
@close_status = status
@close_message = message
@consumer.closing(@close_status, @close_message)
@consumer = nil
transition_to(:closed)
closed
end
end
state :Closed do
def on_enter
@close_status ||= 3000
@close_message ||= "Go away!"
clear_all_timers
set_close_timer
end
def attach_consumer(response, transport)
transport.closing_frame(response, @close_status, @close_message)
end
end
#### Client Code interface
# All incoming data is treated as incoming messages,
# either single json-encoded messages or an array
# of json-encoded messages, depending on transport.
def receive_message(data)
clear_timer(:disconnect)
SockJS.debug "Session receiving message: #{data.inspect}"
messages = parse_json(data)
SockJS.debug "Message parsed as: #{messages.inspect}"
unless messages.empty?
@received_messages.push(*messages)
end
EM.next_tick do
run_user_app
end
set_disconnect_timer
end
def check_content_length
if @consumer.total_sent_length >= max_permitted_content_length
SockJS.debug "Maximum content length exceeded, closing the connection."
@consumer.disconnect
else
SockJS.debug "Permitted content length: #{@consumer.total_sent_length} of #{max_permitted_content_length}"
end
end
def run_user_app
unless @received_messages.empty?
reset_heartbeat_timer
SockJS.debug "Executing user's SockJS app"
raise @error if @error
@received_messages.each do |message|
SockJS.debug "Executing app with message #{message.inspect}"
process_message(message)
end
@received_messages.clear
after_app_run
SockJS.debug "User's SockJS app finished"
end
rescue SockJS::CloseError => error
Protocol::ClosingFrame.new(error.status, error.message)
end
def process_message(message)
end
def opened
end
def after_app_run
end
def closed
end
def after_consumer_attached
end
def after_consumer_detached
end
attr_accessor :disconnect_delay, :interval
attr_reader :transport, :response, :outbox, :closing_frame, :data
def initialize(connection)
super()
debug_with do |msg|
SockJS::debug(msg)
end
@connection = connection
@disconnect_delay = 5 # TODO: make this configurable.
@received_messages = []
@outbox = []
@total_sent_content_length = 0
@interval = 0.1
@closing_frame = nil
@data = {}
@alive = true
@timers = {}
end
def alive?
!!@alive
end
#XXX This is probably important - need to examine this case
def on_close
SockJS.debug "The connection has been closed on the client side (current status: #{@status})."
close_session(1002, "Connection interrupted")
end
def max_permitted_content_length
@max_permitted_content_length ||= ($DEBUG ? 4096 : 128_000)
end
def parse_json(data)
if data.empty?
return []
end
JSON.parse("[#{data}]")[0]
rescue JSON::ParserError => error
raise SockJS::InvalidJSON.new(500, "Broken JSON encoding.")
end
#Timers:
#"alive_checker" - need to check spec. Appears to check that response is
#live. Premature?
#
#"disconnect" - expires and closes the session - time without a consumer
#
#"close" - duration between closed and removed from management
#
#"heartbeat" - periodic for hb frame
#Timer actions:
def disconnect_expired
SockJS.debug "#{@disconnect_delay} has passed, firing @disconnect_timer"
close
end
#XXX Remove? What's this for?
def check_response_alive
if @consumer
begin
@consumer.check_alive
rescue Exception => error
puts "==> #{error.message}"
SockJS.debug error
puts "==> #{error.message}"
on_close
@alive_checker.cancel
end
else
puts "~ [TODO] Not checking if still alive, why?"
end
end
def heartbeat_triggered
# It's better as we know for sure that
# clearing the buffer won't change it.
SockJS.debug "Sending heartbeat frame."
begin
send_heartbeat
rescue Exception => error
# Nah these exceptions are OK ... let's figure out when they occur
# and let's just not set the timer for such cases in the first place.
SockJS.debug "Exception when sending heartbeat frame: #{error.inspect}"
end
end
#Timer machinery
def set_timer(name, type, delay, &action)
@timers[name] ||=
begin
SockJS.debug "Setting timer: #{name} to expire after #{delay}"
type.new(delay, &action)
end
end
def clear_timer(name)
@timers[name].cancel unless @timers[name].nil?
@timers.delete(name)
end
def clear_all_timers
@timers.values.each do |timer|
timer.cancel
end
@timers.clear
end
def set_alive_timer
set_timer(:alive_check, EM::PeriodicTimer, 1) do
check_response_alive
end
end
def reset_alive_timer
clear_timer(:alive_check)
set_alive_timer
end
def set_heartbeat_timer
clear_timer(:disconnect)
clear_timer(:alive)
set_timer(:heartbeat, EM::PeriodicTimer, 25) do
heartbeat_triggered
end
end
def reset_heartbeat_timer
clear_timer(:heartbeat)
set_heartbeat_timer
end
def set_disconnect_timer
set_timer(:disconnect, EM::Timer, @disconnect_delay) do
disconnect_expired
end
end
def reset_disconnect_timer
clear_timer(:disconnect)
set_disconnect_timer
end
def set_close_timer
set_timer(:close, EM::Timer, @disconnect_delay) do
@alive = false
end
end
def reset_close_timer
clear_timer(:close)
set_close_timer
end
end
class WebSocketSession < Session
attr_accessor :ws
undef :response
def send_data(frame)
if frame.nil?
raise TypeError.new("Frame must not be nil!")
end
unless frame.empty?
SockJS.debug "@ws.send(#{frame.inspect})"
@ws.send(frame)
end
end
def after_app_run
return super unless self.closing?
after_close
end
def after_close
SockJS.debug "after_close: calling #finish"
finish
SockJS.debug "after_close: closing @ws and clearing @transport."
@ws.close
@transport = nil
end
def set_alive_checker
end
end
end
Removed some debug info
# encoding: utf-8
#
require 'meta-state'
require 'sockjs/protocol'
module SockJS
class Session < MetaState::Machine
class Consumer
def initialize(response, transport)
@response = response
@transport = transport
@total_sent_length = 0
end
attr_reader :response, :transport, :total_sent_length
#Close the *response* not the *session*
def disconnect
#WEBSCOKET shouldn have limit of data
if @transport.kind_of?(SockJS::Transports::WebSocket)
@total_sent_length = 0
return
end
@response.finish
end
def heartbeat
transport.heartbeat_frame(response)
end
def messages(items)
unless items.empty?
@total_sent_length += transport.messages_frame(response, items)
end
end
def closing(status, message)
transport.closing_frame(response, status, message)
end
#XXX Still not sure what this is *FOR*
def check_alive
if !@response.body.closed?
if @response.due_for_alive_check
SockJS.debug "Checking if still alive"
@response.write(@transport.empty_string)
else
puts "~ [TODO] Not checking if still alive, why?"
puts "Status: #{@status} (response.body.closed: #{@response.body.closed?})\nSession class: #{self.class}\nTransport class: #{@transport.class}\nResponse: #{@response.to_s}\n\n"
end
end
end
end
state :Detached do
def on_enter
@consumer = nil
clear_all_timers
set_disconnect_timer
end
def attach_consumer(response, transport)
@consumer = Consumer.new(response, transport)
transition_to :attached
after_consumer_attached
end
def detach_consumer
#XXX Not sure if this is the right behavior
close(1002,"Connection interrupted")
end
def send(*messages)
@outbox += messages
end
def close(status = nil, message = nil)
@close_status = status
@close_message = message
transition_to(:closed)
closed
end
end
state :Attached do
def on_enter
@consumer.messages(@outbox)
@outbox.clear
clear_all_timers
check_content_length
set_heartbeat_timer
end
def attach_consumer(response, transport)
SockJS.debug "Session#attach_consumer: another connection still open"
transport.closing_frame(response, 2010, "Another connection still open")
close(1002, "Connection interrupted")
end
def detach_consumer
transition_to :detached
after_consumer_detached
end
def send(*messages)
@consumer.messages(messages)
check_content_length
end
def send_heartbeat
@consumer.heartbeat
end
def close(status = 1002, message = "Connection interrupted")
@close_status = status
@close_message = message
@consumer.closing(@close_status, @close_message)
@consumer = nil
transition_to(:closed)
closed
end
end
state :Closed do
def on_enter
@close_status ||= 3000
@close_message ||= "Go away!"
clear_all_timers
set_close_timer
end
def attach_consumer(response, transport)
transport.closing_frame(response, @close_status, @close_message)
end
end
#### Client Code interface
# All incoming data is treated as incoming messages,
# either single json-encoded messages or an array
# of json-encoded messages, depending on transport.
def receive_message(data)
clear_timer(:disconnect)
SockJS.debug "Session receiving message: #{data.inspect}"
messages = parse_json(data)
SockJS.debug "Message parsed as: #{messages.inspect}"
unless messages.empty?
@received_messages.push(*messages)
end
EM.next_tick do
run_user_app
end
set_disconnect_timer
end
def check_content_length
if @consumer.total_sent_length >= max_permitted_content_length
SockJS.debug "Maximum content length exceeded, closing the connection."
@consumer.disconnect
else
SockJS.debug "Permitted content length: #{@consumer.total_sent_length} of #{max_permitted_content_length}"
end
end
def run_user_app
unless @received_messages.empty?
reset_heartbeat_timer
SockJS.debug "Executing user's SockJS app"
raise @error if @error
@received_messages.each do |message|
SockJS.debug "Executing app with message #{message.inspect}"
process_message(message)
end
@received_messages.clear
after_app_run
SockJS.debug "User's SockJS app finished"
end
rescue SockJS::CloseError => error
Protocol::ClosingFrame.new(error.status, error.message)
end
def process_message(message)
end
def opened
end
def after_app_run
end
def closed
end
def after_consumer_attached
end
def after_consumer_detached
end
attr_accessor :disconnect_delay, :interval
attr_reader :transport, :response, :outbox, :closing_frame, :data
def initialize(connection)
super()
debug_with do |msg|
SockJS::debug(msg)
end
@connection = connection
@disconnect_delay = 5 # TODO: make this configurable.
@received_messages = []
@outbox = []
@total_sent_content_length = 0
@interval = 0.1
@closing_frame = nil
@data = {}
@alive = true
@timers = {}
end
def alive?
!!@alive
end
#XXX This is probably important - need to examine this case
def on_close
SockJS.debug "The connection has been closed on the client side (current status: #{@status})."
close_session(1002, "Connection interrupted")
end
def max_permitted_content_length
@max_permitted_content_length ||= ($DEBUG ? 4096 : 128_000)
end
def parse_json(data)
if data.empty?
return []
end
JSON.parse("[#{data}]")[0]
rescue JSON::ParserError => error
raise SockJS::InvalidJSON.new(500, "Broken JSON encoding.")
end
#Timers:
#"alive_checker" - need to check spec. Appears to check that response is
#live. Premature?
#
#"disconnect" - expires and closes the session - time without a consumer
#
#"close" - duration between closed and removed from management
#
#"heartbeat" - periodic for hb frame
#Timer actions:
def disconnect_expired
SockJS.debug "#{@disconnect_delay} has passed, firing @disconnect_timer"
close
end
#XXX Remove? What's this for?
def check_response_alive
if @consumer
begin
@consumer.check_alive
rescue Exception => error
puts "==> #{error.message}"
SockJS.debug error
puts "==> #{error.message}"
on_close
@alive_checker.cancel
end
else
puts "~ [TODO] Not checking if still alive, why?"
end
end
def heartbeat_triggered
# It's better as we know for sure that
# clearing the buffer won't change it.
SockJS.debug "Sending heartbeat frame."
begin
send_heartbeat
rescue Exception => error
# Nah these exceptions are OK ... let's figure out when they occur
# and let's just not set the timer for such cases in the first place.
SockJS.debug "Exception when sending heartbeat frame: #{error.inspect}"
end
end
#Timer machinery
def set_timer(name, type, delay, &action)
@timers[name] ||=
begin
SockJS.debug "Setting timer: #{name} to expire after #{delay}"
type.new(delay, &action)
end
end
def clear_timer(name)
@timers[name].cancel unless @timers[name].nil?
@timers.delete(name)
end
def clear_all_timers
@timers.values.each do |timer|
timer.cancel
end
@timers.clear
end
def set_alive_timer
set_timer(:alive_check, EM::PeriodicTimer, 1) do
check_response_alive
end
end
def reset_alive_timer
clear_timer(:alive_check)
set_alive_timer
end
def set_heartbeat_timer
clear_timer(:disconnect)
clear_timer(:alive)
set_timer(:heartbeat, EM::PeriodicTimer, 25) do
heartbeat_triggered
end
end
def reset_heartbeat_timer
clear_timer(:heartbeat)
set_heartbeat_timer
end
def set_disconnect_timer
set_timer(:disconnect, EM::Timer, @disconnect_delay) do
disconnect_expired
end
end
def reset_disconnect_timer
clear_timer(:disconnect)
set_disconnect_timer
end
def set_close_timer
set_timer(:close, EM::Timer, @disconnect_delay) do
@alive = false
end
end
def reset_close_timer
clear_timer(:close)
set_close_timer
end
end
class WebSocketSession < Session
attr_accessor :ws
undef :response
def send_data(frame)
if frame.nil?
raise TypeError.new("Frame must not be nil!")
end
unless frame.empty?
SockJS.debug "@ws.send(#{frame.inspect})"
@ws.send(frame)
end
end
def after_app_run
return super unless self.closing?
after_close
end
def after_close
SockJS.debug "after_close: calling #finish"
finish
SockJS.debug "after_close: closing @ws and clearing @transport."
@ws.close
@transport = nil
end
def set_alive_checker
end
end
end
|
module Sortah
VERSION = "0.0.1"
end
bump version to 0.5, 1.0
module Sortah
VERSION = "0.5.0"
end
|
module AwesomeFlags
def all_flags(column = nil)
get_flags
c.map
end
def my_flags(column = nil)
get_flags
c.map {|var| self.send(var) ? var : nil}.compact!
end
def get_flags(column)
= self.flag_mapping
if column.nil?
c = a.values.map {|var| var.keys}.flatten
else
b = a[column]
c = Array.[](b.keys).flatten
end
end
end
testing
module AwesomeFlags
def all_flags(column = nil)
get_flags
c.map
end
def my_flags(column = nil)
get_flags
c.map {|var| self.send(var) ? var : nil}.compact!
end
def get_flags(column)
a = self.flag_mapping
if column.nil?
c = a.values.map {|var| var.keys}.flatten
else
b = a[column]
c = Array.[](b.keys).flatten
end
end
end |
# encoding: utf-8
module Ayadn
class Workers
def initialize
@thor = Thor::Shell::Color.new
@status = Status.new
end
def build_aliases_list(list)
table = init_table
table.title = "List of your channel aliases".color(:cyan) + "".color(:white)
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
list.each {|obj| table << [obj[0].to_s.color(:green), obj[1].color(:red)]}
table
end
def build_blacklist_list(list)
table = init_table
table.title = "Your blacklist".color(:cyan) + "".color(:white)
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
table.headings = [ 'Name', 'Type' ]
list.sort!
list.each {|obj| table << ["#{obj[1].capitalize}".color(:green), "#{obj[0]}".color(:red)]}
table
end
def build_reposted_list(list, target)
table = init_table
table.title = "List of users who reposted post ".color(:cyan) + "#{target}".color(:red) + "".color(:white)
users_list = []
list.each do |obj|
obj['name'].nil? ? name = "" : name = obj['name']
users_list << {:username => obj['username'], :name => name, :you_follow => obj['you_follow'], :follows_you => obj['follows_you'], :id => obj['id']}
end
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
return users_list, table
end
def build_starred_list(list, target)
table = init_table
table.title = "List of users who starred post ".color(:cyan) + "#{target}".color(:red) + "".color(:white)
users_list = []
list.each do |obj|
obj['name'].nil? ? name = "" : name = obj['name']
users_list << {:username => obj['username'], :name => name, :you_follow => obj['you_follow'], :follows_you => obj['follows_you'], :id => obj['id']}
end
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
return users_list, table
end
def build_followings_list(list, target, options = {}) #takes a hash of users with ayadn format
table = init_table
table.title = if target == "me"
"List of users you're following".color(:cyan) + "".color(:white)
else
"List of users ".color(:cyan) + "#{target}".color(:red) + " is following ".color(:cyan) + "".color(:white)
end
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
users_list = build_users_array(list)
build_users_list(users_list, table, options)
end
def build_followers_list(list, target, options = {})
table = init_table
table.title = if target == "me"
"List of your followers".color(:cyan) + "".color(:white)
else
"List of users following ".color(:cyan) + "#{target}".color(:red) + "".color(:white)
end
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
build_users_list(build_users_array(list), table, options)
end
def build_muted_list(list, options = {})
table = init_table
table.title = "List of users you muted".color(:cyan) + "".color(:white)
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
build_users_list(build_users_array(list), table, options)
end
def build_blocked_list(list, options = {})
table = init_table
table.title = "List of users you blocked".color(:cyan) + "".color(:white)
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
build_users_list(build_users_array(list), table, options)
end
def build_users_list(list, table, options = {})
users = at(list.map {|obj| obj[:username]})
ids = list.map {|obj| obj[:id].to_i}
ranks = NiceRank.new.from_ids(ids)
indexed_ranks = {}
ranks.each do |r|
if r.empty?
indexed_ranks = false
break
else
indexed_ranks[r['user_id']] = r
end
end
table << ['USERNAME'.color(:red), 'NAME'.color(:red), 'POSTS/DAY'.color(:red)]
table << :separator
arr = []
if options[:username]
list.sort_by! { |obj| obj[:username] }
elsif options[:name]
list.sort_by! { |obj| obj[:name].downcase }
end
list.each do |obj|
obj[:name] = "(no name)" if obj[:name].nil?
unless indexed_ranks == false
details = indexed_ranks[obj[:id].to_i]
if details['user']['posts_day'] == -1
posts_day = 'ignored'
else
posts_day = details['user']['posts_day'].round(2).to_s
end
else
posts_day = 'unknown'
end
obj[:username].length > 23 ? username = "#{obj[:username][0..20]}..." : username = obj[:username]
obj[:name].length > 23 ? name = "#{obj[:name][0..20]}..." : name = obj[:name]
arr << [ "@#{username} ".color(Settings.options[:colors][:username]), "#{name}", posts_day ]
end
if options[:posts_day]
arr.sort_by! { |obj| obj[2].to_f }
end
if options[:reverse]
arr.reverse!
end
arr.each_with_index do |obj, index|
table << arr[index]
if index + 1 != arr.length && Settings.options[:timeline][:compact] == false
table << :separator
end
end
table
end
# builds a hash of hashes, each hash is a normalized post with post id as a key
def build_posts(data, niceranks = {})
# skip objects in blacklist unless force
posts = {}
data.each.with_index(1) do |post, index|
unless Settings.options[:force]
if Settings.options[:blacklist][:active] == true
if Databases.is_in_blacklist?('client', post['source']['name'].downcase)
Debug.skipped({source: post['source']['name']})
next
end
end
end
unless Settings.options[:force]
if Settings.options[:blacklist][:active] == true
if Databases.is_in_blacklist?('user', post['user']['username'].downcase)
Debug.skipped({user: post['user']['username']})
next
end
end
end
hashtags = extract_hashtags(post)
@skip = false
unless Settings.options[:force]
if Settings.options[:blacklist][:active] == true
hashtags.each do |h|
if Databases.is_in_blacklist?('hashtag', h.downcase)
@skip = true
Debug.skipped({hashtag: h})
break
end
end
end
end
next if @skip
mentions= []
post['entities']['mentions'].each { |m| mentions << m['name'] }
unless Settings.options[:force]
if Settings.options[:blacklist][:active] == true
mentions.each do |m|
if Databases.is_in_blacklist?('mention', m.downcase)
@skip = true
Debug.skipped({mention: m})
break
end
end
end
end
next if @skip
# create custom objects from ADN response
if niceranks[post['user']['id'].to_i]
rank = niceranks[post['user']['id'].to_i][:rank]
is_human = niceranks[post['user']['id'].to_i][:is_human]
real_person = niceranks[post['user']['id'].to_i][:real_person]
else
rank = false
is_human = 'unknown'
real_person = 'unknown'
end
if post['user'].has_key?('name')
name = post['user']['name'].to_s.force_encoding("UTF-8")
else
name = "(no name)"
end
source = post['source']['name'].to_s.force_encoding("UTF-8")
values = {
count: index,
id: post['id'].to_i,
name: name,
thread_id: post['thread_id'],
username: post['user']['username'],
user_id: post['user']['id'].to_i,
nicerank: rank,
is_human: is_human,
real_person: real_person,
handle: "@#{post['user']['username']}",
type: post['user']['type'],
date: parsed_time(post['created_at']),
you_starred: post['you_starred'],
source_name: source,
source_link: post['source']['link'],
canonical_url: post['canonical_url'],
tags: hashtags,
links: extract_links(post),
mentions: mentions,
directed_to: mentions.first || false
}
values[:checkins], values[:has_checkins] = extract_checkins(post)
if post['repost_of']
values[:is_repost] = true
values[:repost_of] = post['repost_of']['id']
values[:original_poster] = post['repost_of']['user']['username']
else
values[:is_repost] = false
values[:repost_of] = nil
values[:original_poster] = post['user']['username']
end
unless post['text'].nil?
values[:raw_text] = post['text']
values[:text] = colorize_text(post['text'], mentions, hashtags)
else
values[:raw_text] = ""
values[:text] = "(no text)"
end
unless post['num_stars'].nil? || post['num_stars'] == 0
values[:is_starred] = true
values[:num_stars] = post['num_stars']
else
values[:is_starred] = false
values[:num_stars] = 0
end
if post['num_replies']
values[:num_replies] = post['num_replies']
else
values[:num_replies] = 0
end
if post['reply_to']
values[:is_reply] = true
values[:reply_to] = post['reply_to']
else
values[:is_reply] = false
values[:reply_to] = nil
end
if post['num_reposts']
values[:num_reposts] = post['num_reposts']
else
values[:num_reposts] = 0
end
posts[post['id'].to_i] = values
end
posts
end
def extract_links(post)
links = post['entities']['links'].map { |l| l['url'] }
unless post['annotations'].nil? || post['annotations'].empty?
post['annotations'].each do |ann|
if ann['type'] == "net.app.core.oembed"
if ann['value']['embeddable_url']
links << ann['value']['embeddable_url']
elsif ann['value']['url'] && Settings.options[:timeline][:channel_oembed] == true
links << ann['value']['url']
end
end
end
end
links.uniq
end
def save_links(links, origin, args = "")
links.sort!
obj = {
'meta' => {
'type' => 'links',
'origin' => origin,
'args' => args,
'created_at' => Time.now,
'username' => Settings.config[:identity][:handle]
},
'data' => links
}
filename = "#{Settings.config[:identity][:handle]}_#{origin}_links.json"
FileOps.save_links(obj, filename)
@status.links_saved(filename)
end
def extract_hashtags(post)
post['entities']['hashtags'].map { |h| h['name'] }
end
def build_channels(data, options = {})
bucket = []
data = [data] unless data.is_a?(Array)
if options[:channels]
@thor.say_status :downloading, "list of channels and their users credentials", :yellow
@thor.say_status :info, "it could take a while if there are many results and users", :cyan
else
@thor.say_status :downloading, "the channels and their users attributes (owners, writers, editors and readers)", :yellow
@thor.say_status :info, "users are recorded in a database for later filtering and analyzing", :cyan
@thor.say_status :info, "it could take a while if there are many results", :cyan
end
chan = Struct.new(:id, :num_messages, :subscribers, :type, :owner, :annotations, :readers, :editors, :writers, :you_subscribed, :unread, :recent_message_id, :recent_message)
no_user = {}
@api = API.new
data.each do |ch|
unless ch['writers']['user_ids'].empty?
@thor.say_status :parsing, "channel #{ch['id']}", :cyan
usernames = []
ch['writers']['user_ids'].each do |id|
next if no_user[id]
db = Databases.find_user_by_id(id)
if db.nil?
@thor.say_status :downloading, "user #{id}", :yellow
resp = @api.get_user(id)
if resp['meta']['code'] != 200
@thor.say_status :error, "can't get user #{id}'s data, skipping", :red
no_user[id] = true
next
end
the_username = resp['data']['username']
@thor.say_status :recording, "@#{the_username}", :yellow
usernames << "@" + the_username
Databases.add_to_users_db(id, the_username, resp['data']['name'])
else
the_username = "@#{db}"
@thor.say_status :match, "#{the_username} is already in the database", :blue
usernames << the_username
end
end
usernames << Settings.config[:identity][:handle] unless usernames.length == 1 && usernames.first == Settings.config[:identity][:handle]
writers = usernames.join(", ")
else
writers = Settings.config[:identity][:handle]
end
if ch['has_unread']
unread = "This channel has unread message(s)"
else
unread = "No unread messages"
end
bucket << chan.new(ch['id'], ch['counts']['messages'], ch['counts']['subscribers'], ch['type'], ch['owner'], ch['annotations'], ch['readers'], ch['editors'], writers, ch['you_subscribed'], unread, ch['recent_message_id'], ch['recent_message'])
end
puts "\e[H\e[2J"
bucket
end
def parsed_time(string)
"#{string[0...10]} #{string[11...19]}"
end
def get_original_id(post_id, resp)
if resp['data']['repost_of']
@status.redirecting
id = resp['data']['repost_of']['id']
Errors.repost(post_id, id)
return id
else
return post_id
end
end
def get_channel_id_from_alias(channel_id)
unless channel_id.is_integer?
orig = channel_id
channel_id = Databases.get_channel_id(orig)
if channel_id.nil?
Errors.warn("Alias '#{orig}' doesn't exist.")
@status.no_alias
exit
end
end
channel_id
end
def length_of_index
Databases.get_index_length
end
def get_post_from_index id
Databases.get_post_from_index id
end
def get_real_post_id post_id
id = post_id.to_i
if id <= 200
resp = get_post_from_index(id)
post_id = resp['id']
end
post_id
end
def add_arobase username
add_arobase_if_missing(username)
end
def add_arobase_if_missing(username) # expects an array of username(s), works on the first one and outputs a string
unless username.first == "me"
username = username.first.chars
username.unshift("@") unless username.first == "@"
else
username = "me".chars
end
username.join
end
def remove_arobase_if_present args
args.map! do |username|
temp = username.chars
temp.shift if temp.first == "@"
temp.join
end
args
end
def add_arobases_to_usernames args
args.map do |username|
if username == 'me'
who_am_i
else
temp = username.chars
temp.unshift("@") unless temp.first == "@"
temp.join
end
end
end
def at usernames
usernames.map do |user|
if user == 'me'
'me'
elsif user[0] == '@'
user
else
"@#{user}"
end
end
end
def who_am_i
Databases.active_account(Amalgalite::Database.new(Dir.home + "/ayadn/accounts.sqlite"))[2]
end
def extract_users(resp)
users_hash = {}
resp['data'].each do |item|
users_hash[item['id']] = [item['username'], item['name'], item['you_follow'], item['follows_you']]
end
users_hash
end
def colorize_text(text, mentions, hashtags)
reg_split = '[~:-;,?!\'&`^=+<>*%()\/"“”’°£$€.…]'
reg_tag = '#([[:alpha:]0-9_]{1,255})(?![\w+])'
reg_mention = '@([A-Za-z0-9_]{1,20})(?![\w+])'
reg_sentence = '^.+[\r\n]*'
handles, words, sentences = [], [], []
mentions.each {|username| handles << "@#{username}"}
hashtag_color = Settings.options[:colors][:hashtags]
mention_color = Settings.options[:colors][:mentions]
text.scan(/#{reg_sentence}/) do |sentence|
sentence.split(' ').each do |word|
word_chars = word.chars
sanitized, word = [], []
word_chars.each do |ch|
if UnicodeUtils.general_category(ch) == :Other_Symbol
sanitized << "#{ch} "
else
sanitized << ch
end
end
word = sanitized.join
if word =~ /#\w+/
slices = word.split('#')
has_h = false
slices.each do |tag|
has_h = true if hashtags.include?(tag.downcase.scan(/[[:alpha:]0-9_]/).join(''))
end
if has_h == true
if slices.length > 1
words << slices.join('#').gsub(/#{reg_tag}/, '#\1'.color(hashtag_color))
else
words << word.gsub(/#{reg_tag}/, '#\1'.color(hashtag_color))
end
else
words << word
end
elsif word =~ /@\w+/
enc = []
warr = word.split(' ')
warr.each do |w|
@str = def_str(w, reg_split)
if handles.include?(@str.downcase)
if warr.length == 1
enc << w.gsub(/#{reg_mention}/, '@\1'.color(mention_color))
else
enc << " #{w.gsub(/#{reg_mention}/, '@\1'.color(mention_color))}"
end
else
enc << w
end
end
words << enc.join
else
words << word
end
end
sentences << words.join(' ')
words = Array.new
end
if Settings.options[:timeline][:compact] == true
without_linebreaks = sentences.keep_if { |s| s != "" }
without_linebreaks.join("\n")
else
sentences.join("\n")
end
end
def links_from_posts(stream)
links = []
stream['data'].each do |post|
extract_links(post).each {|l| links << l}
end
links.uniq
end
def all_but_me usernames
arr = usernames.select {|user| user != 'me'}
at(arr)
end
def self.epoch_to_date(epoch)
Time.at(epoch).to_time
end
private
def def_str(word, reg_split)
splitted = word.split(/#{reg_split}/) if word =~ /#{reg_split}/
if splitted
splitted.each {|d| @str = d if d =~ /@\w+/}
return word if @str.nil?
@str
else
word
end
end
def init_table
Terminal::Table.new do |t|
t.style = { :width => Settings.options[:formats][:table][:width] }
end
end
def build_users_array(list)
users = list.map do |key, value|
{:username => value[0], :name => value[1], :you_follow => value[2], :follows_you => value[3], :id => key}
end
if Settings.options[:formats][:list][:reverse]
return users.reverse
else
return users
end
end
def extract_checkins(post)
has_checkins = false
checkins = {}
unless post['annotations'].nil? || post['annotations'].empty?
post['annotations'].each do |obj|
case obj['type']
when "net.app.core.checkin", "net.app.ohai.location"
has_checkins = true
checkins = {
name: obj['value']['name'],
address: obj['value']['address'],
address_extended: obj['value']['address_extended'],
locality: obj['value']['locality'],
postcode: obj['value']['postcode'],
country_code: obj['value']['country_code'],
website: obj['value']['website'],
telephone: obj['value']['telephone']
}
unless obj['value']['categories'].nil?
unless obj['value']['categories'][0].nil?
checkins[:categories] = obj['value']['categories'][0]['labels'].join(", ")
end
end
unless obj['value']['factual_id'].nil?
checkins[:factual_id] = obj['value']['factual_id']
end
unless obj['value']['longitude'].nil?
checkins[:longitude] = obj['value']['longitude']
checkins[:latitude] = obj['value']['latitude']
end
unless obj['value']['title'].nil?
checkins[:title] = obj['value']['title']
end
unless obj['value']['region'].nil?
checkins[:region] = obj['value']['region']
end
end
end
end
return checkins, has_checkins
end
end
end
users posts in users lists
# encoding: utf-8
module Ayadn
class Workers
def initialize
@thor = Thor::Shell::Color.new
@status = Status.new
end
def build_aliases_list(list)
table = init_table
table.title = "List of your channel aliases".color(:cyan) + "".color(:white)
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
list.each {|obj| table << [obj[0].to_s.color(:green), obj[1].color(:red)]}
table
end
def build_blacklist_list(list)
table = init_table
table.title = "Your blacklist".color(:cyan) + "".color(:white)
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
table.headings = [ 'Name', 'Type' ]
list.sort!
list.each {|obj| table << ["#{obj[1].capitalize}".color(:green), "#{obj[0]}".color(:red)]}
table
end
def build_reposted_list(list, target)
table = init_table
table.title = "List of users who reposted post ".color(:cyan) + "#{target}".color(:red) + "".color(:white)
users_list = []
list.each do |obj|
obj['name'].nil? ? name = "" : name = obj['name']
users_list << {:username => obj['username'], :name => name, :you_follow => obj['you_follow'], :follows_you => obj['follows_you'], :id => obj['id']}
end
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
return users_list, table
end
def build_starred_list(list, target)
table = init_table
table.title = "List of users who starred post ".color(:cyan) + "#{target}".color(:red) + "".color(:white)
users_list = []
list.each do |obj|
obj['name'].nil? ? name = "" : name = obj['name']
users_list << {:username => obj['username'], :name => name, :you_follow => obj['you_follow'], :follows_you => obj['follows_you'], :id => obj['id']}
end
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
return users_list, table
end
def build_followings_list(list, target, options = {}) #takes a hash of users with ayadn format
table = init_table
table.title = if target == "me"
"List of users you're following".color(:cyan) + "".color(:white)
else
"List of users ".color(:cyan) + "#{target}".color(:red) + " is following ".color(:cyan) + "".color(:white)
end
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
users_list = build_users_array(list)
build_users_list(users_list, table, options)
end
def build_followers_list(list, target, options = {})
table = init_table
table.title = if target == "me"
"List of your followers".color(:cyan) + "".color(:white)
else
"List of users following ".color(:cyan) + "#{target}".color(:red) + "".color(:white)
end
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
build_users_list(build_users_array(list), table, options)
end
def build_muted_list(list, options = {})
table = init_table
table.title = "List of users you muted".color(:cyan) + "".color(:white)
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
build_users_list(build_users_array(list), table, options)
end
def build_blocked_list(list, options = {})
table = init_table
table.title = "List of users you blocked".color(:cyan) + "".color(:white)
table.style = {border_x: ' ', border_i: ' ', border_y: ' '}
build_users_list(build_users_array(list), table, options)
end
def build_users_list(list, table, options = {})
users = at(list.map {|obj| obj[:username]})
ids = list.map {|obj| obj[:id].to_i}
ranks = NiceRank.new.from_ids(ids)
indexed_ranks = {}
ranks.each do |r|
if r.empty?
indexed_ranks = false
break
else
indexed_ranks[r['user_id']] = r
end
end
table << ['USERNAME'.color(:red), 'NAME'.color(:red), 'POSTS'.color(:red), 'POSTS/DAY'.color(:red)]
table << :separator
arr = []
if options[:username]
list.sort_by! { |obj| obj[:username] }
elsif options[:name]
list.sort_by! { |obj| obj[:name].downcase }
end
list.each do |obj|
obj[:name] = "(no name)" if obj[:name].nil?
unless indexed_ranks == false
details = indexed_ranks[obj[:id].to_i]
if details['user']['posts_day'] == -1
posts_day = 'ignored'
else
posts_day = details['user']['posts_day'].round(2).to_s
end
else
posts_day = 'unknown'
end
obj[:username].length > 23 ? username = "#{obj[:username][0..20]}..." : username = obj[:username]
obj[:name].length > 23 ? name = "#{obj[:name][0..20]}..." : name = obj[:name]
arr << [ "@#{username} ".color(Settings.options[:colors][:username]), "#{name}", obj[:posts], posts_day ]
end
if options[:posts_day]
arr.sort_by! { |obj| obj[2].to_f }
end
if options[:reverse]
arr.reverse!
end
arr.each_with_index do |obj, index|
table << arr[index]
if index + 1 != arr.length && Settings.options[:timeline][:compact] == false
table << :separator
end
end
table
end
# builds a hash of hashes, each hash is a normalized post with post id as a key
def build_posts(data, niceranks = {})
# skip objects in blacklist unless force
posts = {}
data.each.with_index(1) do |post, index|
unless Settings.options[:force]
if Settings.options[:blacklist][:active] == true
if Databases.is_in_blacklist?('client', post['source']['name'].downcase)
Debug.skipped({source: post['source']['name']})
next
end
end
end
unless Settings.options[:force]
if Settings.options[:blacklist][:active] == true
if Databases.is_in_blacklist?('user', post['user']['username'].downcase)
Debug.skipped({user: post['user']['username']})
next
end
end
end
hashtags = extract_hashtags(post)
@skip = false
unless Settings.options[:force]
if Settings.options[:blacklist][:active] == true
hashtags.each do |h|
if Databases.is_in_blacklist?('hashtag', h.downcase)
@skip = true
Debug.skipped({hashtag: h})
break
end
end
end
end
next if @skip
mentions= []
post['entities']['mentions'].each { |m| mentions << m['name'] }
unless Settings.options[:force]
if Settings.options[:blacklist][:active] == true
mentions.each do |m|
if Databases.is_in_blacklist?('mention', m.downcase)
@skip = true
Debug.skipped({mention: m})
break
end
end
end
end
next if @skip
# create custom objects from ADN response
if niceranks[post['user']['id'].to_i]
rank = niceranks[post['user']['id'].to_i][:rank]
is_human = niceranks[post['user']['id'].to_i][:is_human]
real_person = niceranks[post['user']['id'].to_i][:real_person]
else
rank = false
is_human = 'unknown'
real_person = 'unknown'
end
if post['user'].has_key?('name')
name = post['user']['name'].to_s.force_encoding("UTF-8")
else
name = "(no name)"
end
source = post['source']['name'].to_s.force_encoding("UTF-8")
values = {
count: index,
id: post['id'].to_i,
name: name,
thread_id: post['thread_id'],
username: post['user']['username'],
user_id: post['user']['id'].to_i,
nicerank: rank,
is_human: is_human,
real_person: real_person,
handle: "@#{post['user']['username']}",
type: post['user']['type'],
date: parsed_time(post['created_at']),
you_starred: post['you_starred'],
source_name: source,
source_link: post['source']['link'],
canonical_url: post['canonical_url'],
tags: hashtags,
links: extract_links(post),
mentions: mentions,
directed_to: mentions.first || false
}
values[:checkins], values[:has_checkins] = extract_checkins(post)
if post['repost_of']
values[:is_repost] = true
values[:repost_of] = post['repost_of']['id']
values[:original_poster] = post['repost_of']['user']['username']
else
values[:is_repost] = false
values[:repost_of] = nil
values[:original_poster] = post['user']['username']
end
unless post['text'].nil?
values[:raw_text] = post['text']
values[:text] = colorize_text(post['text'], mentions, hashtags)
else
values[:raw_text] = ""
values[:text] = "(no text)"
end
unless post['num_stars'].nil? || post['num_stars'] == 0
values[:is_starred] = true
values[:num_stars] = post['num_stars']
else
values[:is_starred] = false
values[:num_stars] = 0
end
if post['num_replies']
values[:num_replies] = post['num_replies']
else
values[:num_replies] = 0
end
if post['reply_to']
values[:is_reply] = true
values[:reply_to] = post['reply_to']
else
values[:is_reply] = false
values[:reply_to] = nil
end
if post['num_reposts']
values[:num_reposts] = post['num_reposts']
else
values[:num_reposts] = 0
end
posts[post['id'].to_i] = values
end
posts
end
def extract_links(post)
links = post['entities']['links'].map { |l| l['url'] }
unless post['annotations'].nil? || post['annotations'].empty?
post['annotations'].each do |ann|
if ann['type'] == "net.app.core.oembed"
if ann['value']['embeddable_url']
links << ann['value']['embeddable_url']
elsif ann['value']['url'] && Settings.options[:timeline][:channel_oembed] == true
links << ann['value']['url']
end
end
end
end
links.uniq
end
def save_links(links, origin, args = "")
links.sort!
obj = {
'meta' => {
'type' => 'links',
'origin' => origin,
'args' => args,
'created_at' => Time.now,
'username' => Settings.config[:identity][:handle]
},
'data' => links
}
filename = "#{Settings.config[:identity][:handle]}_#{origin}_links.json"
FileOps.save_links(obj, filename)
@status.links_saved(filename)
end
def extract_hashtags(post)
post['entities']['hashtags'].map { |h| h['name'] }
end
def build_channels(data, options = {})
bucket = []
data = [data] unless data.is_a?(Array)
if options[:channels]
@thor.say_status :downloading, "list of channels and their users credentials", :yellow
@thor.say_status :info, "it could take a while if there are many results and users", :cyan
else
@thor.say_status :downloading, "the channels and their users attributes (owners, writers, editors and readers)", :yellow
@thor.say_status :info, "users are recorded in a database for later filtering and analyzing", :cyan
@thor.say_status :info, "it could take a while if there are many results", :cyan
end
chan = Struct.new(:id, :num_messages, :subscribers, :type, :owner, :annotations, :readers, :editors, :writers, :you_subscribed, :unread, :recent_message_id, :recent_message)
no_user = {}
@api = API.new
data.each do |ch|
unless ch['writers']['user_ids'].empty?
@thor.say_status :parsing, "channel #{ch['id']}", :cyan
usernames = []
ch['writers']['user_ids'].each do |id|
next if no_user[id]
db = Databases.find_user_by_id(id)
if db.nil?
@thor.say_status :downloading, "user #{id}", :yellow
resp = @api.get_user(id)
if resp['meta']['code'] != 200
@thor.say_status :error, "can't get user #{id}'s data, skipping", :red
no_user[id] = true
next
end
the_username = resp['data']['username']
@thor.say_status :recording, "@#{the_username}", :yellow
usernames << "@" + the_username
Databases.add_to_users_db(id, the_username, resp['data']['name'])
else
the_username = "@#{db}"
@thor.say_status :match, "#{the_username} is already in the database", :blue
usernames << the_username
end
end
usernames << Settings.config[:identity][:handle] unless usernames.length == 1 && usernames.first == Settings.config[:identity][:handle]
writers = usernames.join(", ")
else
writers = Settings.config[:identity][:handle]
end
if ch['has_unread']
unread = "This channel has unread message(s)"
else
unread = "No unread messages"
end
bucket << chan.new(ch['id'], ch['counts']['messages'], ch['counts']['subscribers'], ch['type'], ch['owner'], ch['annotations'], ch['readers'], ch['editors'], writers, ch['you_subscribed'], unread, ch['recent_message_id'], ch['recent_message'])
end
puts "\e[H\e[2J"
bucket
end
def parsed_time(string)
"#{string[0...10]} #{string[11...19]}"
end
def get_original_id(post_id, resp)
if resp['data']['repost_of']
@status.redirecting
id = resp['data']['repost_of']['id']
Errors.repost(post_id, id)
return id
else
return post_id
end
end
def get_channel_id_from_alias(channel_id)
unless channel_id.is_integer?
orig = channel_id
channel_id = Databases.get_channel_id(orig)
if channel_id.nil?
Errors.warn("Alias '#{orig}' doesn't exist.")
@status.no_alias
exit
end
end
channel_id
end
def length_of_index
Databases.get_index_length
end
def get_post_from_index id
Databases.get_post_from_index id
end
def get_real_post_id post_id
id = post_id.to_i
if id <= 200
resp = get_post_from_index(id)
post_id = resp['id']
end
post_id
end
def add_arobase username
add_arobase_if_missing(username)
end
def add_arobase_if_missing(username) # expects an array of username(s), works on the first one and outputs a string
unless username.first == "me"
username = username.first.chars
username.unshift("@") unless username.first == "@"
else
username = "me".chars
end
username.join
end
def remove_arobase_if_present args
args.map! do |username|
temp = username.chars
temp.shift if temp.first == "@"
temp.join
end
args
end
def add_arobases_to_usernames args
args.map do |username|
if username == 'me'
who_am_i
else
temp = username.chars
temp.unshift("@") unless temp.first == "@"
temp.join
end
end
end
def at usernames
usernames.map do |user|
if user == 'me'
'me'
elsif user[0] == '@'
user
else
"@#{user}"
end
end
end
def who_am_i
Databases.active_account(Amalgalite::Database.new(Dir.home + "/ayadn/accounts.sqlite"))[2]
end
def extract_users(resp)
users_hash = {}
resp['data'].each do |item|
users_hash[item['id']] = [item['username'], item['name'], item['you_follow'], item['follows_you'], item['counts']['posts']]
end
users_hash
end
def colorize_text(text, mentions, hashtags)
reg_split = '[~:-;,?!\'&`^=+<>*%()\/"“”’°£$€.…]'
reg_tag = '#([[:alpha:]0-9_]{1,255})(?![\w+])'
reg_mention = '@([A-Za-z0-9_]{1,20})(?![\w+])'
reg_sentence = '^.+[\r\n]*'
handles, words, sentences = [], [], []
mentions.each {|username| handles << "@#{username}"}
hashtag_color = Settings.options[:colors][:hashtags]
mention_color = Settings.options[:colors][:mentions]
text.scan(/#{reg_sentence}/) do |sentence|
sentence.split(' ').each do |word|
word_chars = word.chars
sanitized, word = [], []
word_chars.each do |ch|
if UnicodeUtils.general_category(ch) == :Other_Symbol
sanitized << "#{ch} "
else
sanitized << ch
end
end
word = sanitized.join
if word =~ /#\w+/
slices = word.split('#')
has_h = false
slices.each do |tag|
has_h = true if hashtags.include?(tag.downcase.scan(/[[:alpha:]0-9_]/).join(''))
end
if has_h == true
if slices.length > 1
words << slices.join('#').gsub(/#{reg_tag}/, '#\1'.color(hashtag_color))
else
words << word.gsub(/#{reg_tag}/, '#\1'.color(hashtag_color))
end
else
words << word
end
elsif word =~ /@\w+/
enc = []
warr = word.split(' ')
warr.each do |w|
@str = def_str(w, reg_split)
if handles.include?(@str.downcase)
if warr.length == 1
enc << w.gsub(/#{reg_mention}/, '@\1'.color(mention_color))
else
enc << " #{w.gsub(/#{reg_mention}/, '@\1'.color(mention_color))}"
end
else
enc << w
end
end
words << enc.join
else
words << word
end
end
sentences << words.join(' ')
words = Array.new
end
if Settings.options[:timeline][:compact] == true
without_linebreaks = sentences.keep_if { |s| s != "" }
without_linebreaks.join("\n")
else
sentences.join("\n")
end
end
def links_from_posts(stream)
links = []
stream['data'].each do |post|
extract_links(post).each {|l| links << l}
end
links.uniq
end
def all_but_me usernames
arr = usernames.select {|user| user != 'me'}
at(arr)
end
def self.epoch_to_date(epoch)
Time.at(epoch).to_time
end
private
def def_str(word, reg_split)
splitted = word.split(/#{reg_split}/) if word =~ /#{reg_split}/
if splitted
splitted.each {|d| @str = d if d =~ /@\w+/}
return word if @str.nil?
@str
else
word
end
end
def init_table
Terminal::Table.new do |t|
t.style = { :width => Settings.options[:formats][:table][:width] }
end
end
def build_users_array(list)
users = list.map do |key, value|
{:username => value[0], :name => value[1], :you_follow => value[2], :follows_you => value[3], :id => key, :posts => value[4]}
end
return users
end
def extract_checkins(post)
has_checkins = false
checkins = {}
unless post['annotations'].nil? || post['annotations'].empty?
post['annotations'].each do |obj|
case obj['type']
when "net.app.core.checkin", "net.app.ohai.location"
has_checkins = true
checkins = {
name: obj['value']['name'],
address: obj['value']['address'],
address_extended: obj['value']['address_extended'],
locality: obj['value']['locality'],
postcode: obj['value']['postcode'],
country_code: obj['value']['country_code'],
website: obj['value']['website'],
telephone: obj['value']['telephone']
}
unless obj['value']['categories'].nil?
unless obj['value']['categories'][0].nil?
checkins[:categories] = obj['value']['categories'][0]['labels'].join(", ")
end
end
unless obj['value']['factual_id'].nil?
checkins[:factual_id] = obj['value']['factual_id']
end
unless obj['value']['longitude'].nil?
checkins[:longitude] = obj['value']['longitude']
checkins[:latitude] = obj['value']['latitude']
end
unless obj['value']['title'].nil?
checkins[:title] = obj['value']['title']
end
unless obj['value']['region'].nil?
checkins[:region] = obj['value']['region']
end
end
end
end
return checkins, has_checkins
end
end
end
|
require 'json'
require 'source_map/offset'
require 'source_map/vlq'
module SourceMap
Mapping = Struct.new(:source, :generated, :original, :name) do
def to_s
"#{generated.line}:#{generated.column}->#{original.line}:#{original.column}"
end
alias_method :inspect, :to_s
end
class Map
include Enumerable
def self.from_json(json)
from_hash JSON.parse(json)
end
def self.from_hash(hash)
str = hash['mappings']
sources = hash['sources']
names = hash['names']
mappings = decode_vlq_mappings(str, sources, names)
new(mappings, hash['file'])
end
# Internal: Decode VLQ mappings and match up sources and symbol names.
#
# str - VLQ string from 'mappings' attribute
# sources - Array of Strings from 'sources' attribute
# names - Array of Strings from 'names' attribute
#
# Returns an Array of Mappings.
def self.decode_vlq_mappings(str, sources = [], names = [])
mappings = []
source_id = 0
original_line = 1
original_column = 0
name_id = 0
VLQ.decode_mappings(str).each_with_index do |group, index|
generated_column = 0
generated_line = index + 1
group.each do |segment|
generated_column += segment[0]
generated = Offset.new(generated_line, generated_column)
if segment.size >= 4
source_id += segment[1]
original_line += segment[2]
original_column += segment[3]
source = sources[source_id]
original = Offset.new(original_line, original_column)
else
# TODO: Research this case
next
end
if segment[4]
name_id += segment[4]
name = names[name_id]
end
mappings << Mapping.new(source, generated, original, name)
end
end
mappings
end
def initialize(mappings = [], filename = nil)
@mappings, @filename = mappings, filename
end
attr_reader :filename
def line_count
@line_count ||= @mappings.any? ? @mappings.last.generated.line : 0
end
def size
@mappings.size
end
def [](i)
@mappings[i]
end
def each(&block)
@mappings.each(&block)
end
def to_s
@string ||= build_vlq_string
end
def sources
@sources ||= @mappings.map(&:source).uniq.compact
end
def names
@names ||= @mappings.map(&:name).uniq.compact
end
def +(other)
mappings = @mappings.dup
offset = line_count + 1
other.each do |m|
mappings << Mapping.new(
m.source, m.generated + offset,
m.original, m.name
)
end
self.class.new(mappings)
end
def |(other)
mappings = []
other.each do |m|
om = bsearch(m.original)
next unless om
mappings << Mapping.new(
om.source, m.generated,
om.original, om.name
)
end
self.class.new(mappings, other.filename)
end
def bsearch(offset, from = 0, to = size - 1)
mid = (from + to) / 2
# We haven't found a match
if from > to
return from < 1 ? nil : self[from-1]
end
# We found an exact match
if offset == self[mid].generated
self[mid]
# We need to filter more
elsif offset < self[mid].generated
bsearch(offset, from, mid - 1)
elsif offset > self[mid].generated
bsearch(offset, mid + 1, to)
end
end
def as_json
{
"version" => 3,
"file" => filename,
"lineCount" => line_count,
"mappings" => to_s,
"sources" => sources,
"names" => names
}
end
protected
def build_vlq_string
source_id = 0
source_line = 1
source_column = 0
name_id = 0
by_lines = @mappings.group_by { |m| m.generated.line }
sources_index = Hash[sources.each_with_index.to_a]
names_index = Hash[names.each_with_index.to_a]
ary = (1..by_lines.keys.max).map do |line|
generated_column = 0
(by_lines[line] || []).map do |mapping|
group = []
group << mapping.generated.column - generated_column
group << sources_index[mapping.source] - source_id
group << mapping.original.line - source_line
group << mapping.original.column - source_column
group << names_index[mapping.name] - name_id if mapping.name
generated_column = mapping.generated.column
source_id = sources_index[mapping.source]
source_line = mapping.original.line
source_column = mapping.original.column
name_id = names_index[mapping.name] if mapping.name
group
end
end
VLQ.encode_mappings(ary)
end
end
end
Add name to mapping inspect
require 'json'
require 'source_map/offset'
require 'source_map/vlq'
module SourceMap
Mapping = Struct.new(:source, :generated, :original, :name) do
def to_s
str = "#{generated.line}:#{generated.column}"
str << "->#{original.line}:#{original.column}"
str << "##{name}" if name
str
end
alias_method :inspect, :to_s
end
class Map
include Enumerable
def self.from_json(json)
from_hash JSON.parse(json)
end
def self.from_hash(hash)
str = hash['mappings']
sources = hash['sources']
names = hash['names']
mappings = decode_vlq_mappings(str, sources, names)
new(mappings, hash['file'])
end
# Internal: Decode VLQ mappings and match up sources and symbol names.
#
# str - VLQ string from 'mappings' attribute
# sources - Array of Strings from 'sources' attribute
# names - Array of Strings from 'names' attribute
#
# Returns an Array of Mappings.
def self.decode_vlq_mappings(str, sources = [], names = [])
mappings = []
source_id = 0
original_line = 1
original_column = 0
name_id = 0
VLQ.decode_mappings(str).each_with_index do |group, index|
generated_column = 0
generated_line = index + 1
group.each do |segment|
generated_column += segment[0]
generated = Offset.new(generated_line, generated_column)
if segment.size >= 4
source_id += segment[1]
original_line += segment[2]
original_column += segment[3]
source = sources[source_id]
original = Offset.new(original_line, original_column)
else
# TODO: Research this case
next
end
if segment[4]
name_id += segment[4]
name = names[name_id]
end
mappings << Mapping.new(source, generated, original, name)
end
end
mappings
end
def initialize(mappings = [], filename = nil)
@mappings, @filename = mappings, filename
end
attr_reader :filename
def line_count
@line_count ||= @mappings.any? ? @mappings.last.generated.line : 0
end
def size
@mappings.size
end
def [](i)
@mappings[i]
end
def each(&block)
@mappings.each(&block)
end
def to_s
@string ||= build_vlq_string
end
def sources
@sources ||= @mappings.map(&:source).uniq.compact
end
def names
@names ||= @mappings.map(&:name).uniq.compact
end
def +(other)
mappings = @mappings.dup
offset = line_count + 1
other.each do |m|
mappings << Mapping.new(
m.source, m.generated + offset,
m.original, m.name
)
end
self.class.new(mappings)
end
def |(other)
mappings = []
other.each do |m|
om = bsearch(m.original)
next unless om
mappings << Mapping.new(
om.source, m.generated,
om.original, om.name
)
end
self.class.new(mappings, other.filename)
end
def bsearch(offset, from = 0, to = size - 1)
mid = (from + to) / 2
# We haven't found a match
if from > to
return from < 1 ? nil : self[from-1]
end
# We found an exact match
if offset == self[mid].generated
self[mid]
# We need to filter more
elsif offset < self[mid].generated
bsearch(offset, from, mid - 1)
elsif offset > self[mid].generated
bsearch(offset, mid + 1, to)
end
end
def as_json
{
"version" => 3,
"file" => filename,
"lineCount" => line_count,
"mappings" => to_s,
"sources" => sources,
"names" => names
}
end
protected
def build_vlq_string
source_id = 0
source_line = 1
source_column = 0
name_id = 0
by_lines = @mappings.group_by { |m| m.generated.line }
sources_index = Hash[sources.each_with_index.to_a]
names_index = Hash[names.each_with_index.to_a]
ary = (1..by_lines.keys.max).map do |line|
generated_column = 0
(by_lines[line] || []).map do |mapping|
group = []
group << mapping.generated.column - generated_column
group << sources_index[mapping.source] - source_id
group << mapping.original.line - source_line
group << mapping.original.column - source_column
group << names_index[mapping.name] - name_id if mapping.name
generated_column = mapping.generated.column
source_id = sources_index[mapping.source]
source_line = mapping.original.line
source_column = mapping.original.column
name_id = names_index[mapping.name] if mapping.name
group
end
end
VLQ.encode_mappings(ary)
end
end
end
|
puts __FILE__ if defined?(DEBUG)
require 'json'
require 'rake'
require_relative('environment.rb')
require_relative('project.rb')
class Projects < Hash
attr_accessor :filename
def initialize
@filename=''
end
def update
self.each{|k,v|
self[k]=Project.new(v) if(v.is_a?(String))
self[k]=Project.new(v) if(!v.is_a?(Project) && v.is_a?(Hash))
self[k][:name]=k
}
end
def save filename=''
@filename=filename if !filename.nil? && filename.length > 0
File.open(@filename,'w'){|f|f.write(JSON.pretty_generate(self))} if @filename.length > 0
end
def open filename=''
@filename=filename if filename.length > 0
JSON.parse(IO.read(@filename)).each{|k,v| self[k]=v}
update
end
def self.user_projects_filename
FileUtils.mkdir("#{Environment.dev_root}/data") if(!File.exists?("#{Environment.dev_root}/data"))
"#{Environment.dev_root}/data/PROJECTS.json"
end
def self.current
project=nil
url=Git.remote_origin
url=Svn.url if url.length==0
if(Rake.application.original_dir.include?('/wrk/') &&
url.length > 0)
project=Project.new(url)
name=Rake.application.original_dir.gsub("#{Environment.dev_root}/wrk/",'')
project[:name] = name if(name.length>0 && !name.include?(Environment.dev_root))
if(defined?(PROJECTS))
PROJECTS[name]=project if(!PROJECTS.has_key?(name))
project.each{|k,v|PROJECTS[name][k]=v}
PROJECTS.save
else
project[:name]=name
end
end
project
end
def pull
self.each{|k,v| v.pull if v.respond_to?("pull".to_sym)}
end
def rake
self.each{|k,v| v.rake if v.respond_to?("rake".to_sym)}
end
end
PROJECTS=Projects.new
PROJECTS.open Projects.user_projects_filename if File.exists? Projects.user_projects_filename
PROJECTS.save Projects.user_projects_filename if !File.exists? Projects.user_projects_filename
'all'
puts __FILE__ if defined?(DEBUG)
require 'json'
require 'rake'
require_relative('environment.rb')
require_relative('project.rb')
require_relative('../apps/git.rb')
require_relative('../apps/svn.rb')
class Projects < Hash
attr_accessor :filename
def initialize
@filename=''
end
def update
self.each{|k,v|
self[k]=Project.new(v) if(v.is_a?(String))
self[k]=Project.new(v) if(!v.is_a?(Project) && v.is_a?(Hash))
self[k][:name]=k
}
end
def save filename=''
@filename=filename if !filename.nil? && filename.length > 0
File.open(@filename,'w'){|f|f.write(JSON.pretty_generate(self))} if @filename.length > 0
end
def open filename=''
@filename=filename if filename.length > 0
JSON.parse(IO.read(@filename)).each{|k,v| self[k]=v}
update
end
def self.user_projects_filename
FileUtils.mkdir("#{Environment.dev_root}/data") if(!File.exists?("#{Environment.dev_root}/data"))
"#{Environment.dev_root}/data/PROJECTS.json"
end
def self.current
project=nil
url=Git.remote_origin
url=Svn.url if url.length==0
if(Rake.application.original_dir.include?('/wrk/') &&
url.length > 0)
project=Project.new(url)
name=Rake.application.original_dir.gsub("#{Environment.dev_root}/wrk/",'')
project[:name] = name if(name.length>0 && !name.include?(Environment.dev_root))
if(defined?(PROJECTS))
PROJECTS[name]=project if(!PROJECTS.has_key?(name))
project.each{|k,v|PROJECTS[name][k]=v}
PROJECTS.save
else
project[:name]=name
end
end
project
end
def pull
self.each{|k,v| v.pull if v.respond_to?("pull".to_sym)}
end
def rake
self.each{|k,v| v.rake if v.respond_to?("rake".to_sym)}
end
end
current=Projects.current # this makes sure the current project is added to PROJECTS
PROJECTS=Projects.new
PROJECTS.open Projects.user_projects_filename if File.exists? Projects.user_projects_filename
PROJECTS.save Projects.user_projects_filename if !File.exists? Projects.user_projects_filename
|
# encoding: utf-8
module Sponges
# This class concern is to create a Supervisor, set some signals handlers and
# watch over the supervisor.
#
class Runner
attr_reader :store
def initialize(name, options = {}, block)
@name, @block = name, block
@options = default_options.merge options
@store = Sponges::Store.new(@name)
if store.running?
Sponges.logger.error "Runner #{@name} already started."
exit
end
end
def start
if daemonize?
Sponges.logger.info "Supervisor daemonized."
Process.daemon
end
Sponges.logger.info "Runner #{@name} start message received."
@supervisor = fork_supervisor
trap_signals
Sponges.logger.info "Supervisor started with #{@supervisor} pid."
Process.waitpid(@supervisor) unless daemonize?
end
private
def trap_signals
Sponges::SIGNALS.each do |signal|
trap(signal) {|signal| kill_supervisor(signal) }
end
end
def kill_supervisor(signal)
Process.kill signal, @supervisor
end
def default_options
{
size: Machine::Info::Cpu.cores_size
}
end
def fork_supervisor
fork do
Supervisor.new(@name, @options, store, @block).start
end
end
def daemonize?
!!@options[:daemonize]
end
end
end
Going bold on foreground exit
Signed-off-by: chatgris <f9469d12bf3d131e7aae80be27ccfe58aa9db1f1@af83.com>
# encoding: utf-8
module Sponges
# This class concern is to create a Supervisor, set some signals handlers and
# watch over the supervisor.
#
class Runner
attr_reader :store
def initialize(name, options = {}, block)
@name, @block = name, block
@options = default_options.merge options
@store = Sponges::Store.new(@name)
if store.running?
Sponges.logger.error "Runner #{@name} already started."
exit
end
end
def start
if daemonize?
Sponges.logger.info "Supervisor daemonized."
Process.daemon
end
Sponges.logger.info "Runner #{@name} start message received."
@supervisor = fork_supervisor
trap_signals
Sponges.logger.info "Supervisor started with #{@supervisor} pid."
Process.waitpid(@supervisor) unless daemonize?
end
private
def trap_signals
Sponges::SIGNALS.each do |signal|
trap(signal) {|signal| kill_supervisor }
end
end
def kill_supervisor
Process.kill :QUIT, @supervisor
end
def default_options
{
size: Machine::Info::Cpu.cores_size
}
end
def fork_supervisor
fork do
Supervisor.new(@name, @options, store, @block).start
end
end
def daemonize?
!!@options[:daemonize]
end
end
end
|
require 'sprockets/asset'
require 'sprockets/bower'
require 'sprockets/cache'
require 'sprockets/configuration'
require 'sprockets/digest_utils'
require 'sprockets/errors'
require 'sprockets/loader'
require 'sprockets/path_digest_utils'
require 'sprockets/path_dependency_utils'
require 'sprockets/path_utils'
require 'sprockets/resolve'
require 'sprockets/server'
module Sprockets
# `Base` class for `Environment` and `Cached`.
class Base
include PathUtils, PathDependencyUtils, PathDigestUtils, DigestUtils
include Configuration
include Server
include Resolve, Loader
include Bower
# Get persistent cache store
attr_reader :cache
# Set persistent cache store
#
# The cache store must implement a pair of getters and
# setters. Either `get(key)`/`set(key, value)`,
# `[key]`/`[key]=value`, `read(key)`/`write(key, value)`.
def cache=(cache)
@cache = Cache.new(cache, logger)
end
# Return an `Cached`. Must be implemented by the subclass.
def cached
raise NotImplementedError
end
alias_method :index, :cached
# Internal: Compute digest for path.
#
# path - String filename or directory path.
#
# Returns a String digest or nil.
def file_digest(path)
if stat = self.stat(path)
# Caveat: Digests are cached by the path's current mtime. Its possible
# for a files contents to have changed and its mtime to have been
# negligently reset thus appearing as if the file hasn't changed on
# disk. Also, the mtime is only read to the nearest second. Its
# also possible the file was updated more than once in a given second.
cache.fetch(['file_digest', path, stat.mtime.to_i]) do
self.stat_digest(path, stat)
end
end
end
# Find asset by logical path or expanded path.
def find_asset(*args)
uri, _ = resolve(*args)
if uri
load(uri)
end
end
def find_all_linked_assets(*args)
return to_enum(__method__, *args) unless block_given?
asset = find_asset(*args)
return unless asset
yield asset
stack = asset.links.to_a
while uri = stack.shift
yield asset = load(uri)
stack = asset.links.to_a + stack
end
nil
end
# Preferred `find_asset` shorthand.
#
# environment['application.js']
#
def [](*args)
find_asset(*args)
end
# Pretty inspect
def inspect
"#<#{self.class}:0x#{object_id.to_s(16)} " +
"root=#{root.to_s.inspect}, " +
"paths=#{paths.inspect}>"
end
end
end
Pull source maps into env
require 'sprockets/asset'
require 'sprockets/bower'
require 'sprockets/cache'
require 'sprockets/configuration'
require 'sprockets/digest_utils'
require 'sprockets/errors'
require 'sprockets/loader'
require 'sprockets/path_digest_utils'
require 'sprockets/path_dependency_utils'
require 'sprockets/path_utils'
require 'sprockets/resolve'
require 'sprockets/server'
module Sprockets
# `Base` class for `Environment` and `Cached`.
class Base
include PathUtils, PathDependencyUtils, PathDigestUtils, DigestUtils, SourceMapUtils
include Configuration
include Server
include Resolve, Loader
include Bower
# Get persistent cache store
attr_reader :cache
# Set persistent cache store
#
# The cache store must implement a pair of getters and
# setters. Either `get(key)`/`set(key, value)`,
# `[key]`/`[key]=value`, `read(key)`/`write(key, value)`.
def cache=(cache)
@cache = Cache.new(cache, logger)
end
# Return an `Cached`. Must be implemented by the subclass.
def cached
raise NotImplementedError
end
alias_method :index, :cached
# Internal: Compute digest for path.
#
# path - String filename or directory path.
#
# Returns a String digest or nil.
def file_digest(path)
if stat = self.stat(path)
# Caveat: Digests are cached by the path's current mtime. Its possible
# for a files contents to have changed and its mtime to have been
# negligently reset thus appearing as if the file hasn't changed on
# disk. Also, the mtime is only read to the nearest second. Its
# also possible the file was updated more than once in a given second.
cache.fetch(['file_digest', path, stat.mtime.to_i]) do
self.stat_digest(path, stat)
end
end
end
# Find asset by logical path or expanded path.
def find_asset(*args)
uri, _ = resolve(*args)
if uri
load(uri)
end
end
def find_all_linked_assets(*args)
return to_enum(__method__, *args) unless block_given?
asset = find_asset(*args)
return unless asset
yield asset
stack = asset.links.to_a
while uri = stack.shift
yield asset = load(uri)
stack = asset.links.to_a + stack
end
nil
end
# Preferred `find_asset` shorthand.
#
# environment['application.js']
#
def [](*args)
find_asset(*args)
end
# Pretty inspect
def inspect
"#<#{self.class}:0x#{object_id.to_s(16)} " +
"root=#{root.to_s.inspect}, " +
"paths=#{paths.inspect}>"
end
end
end
|
require 'sprockets/encoding_utils'
require 'sprockets/http_utils'
require 'sprockets/utils'
module Sprockets
module Mime
include HTTPUtils, Utils
# Public: Mapping of MIME type Strings to properties Hash.
#
# key - MIME Type String
# value - Hash
# extensions - Array of extnames
# charset - Default Encoding or function to detect encoding
#
# Returns Hash.
def mime_types
config[:mime_types]
end
# Internal: Mapping of MIME extension Strings to MIME type Strings.
#
# Used for internal fast lookup purposes.
#
# Examples:
#
# mime_exts['.js'] #=> 'application/javascript'
#
# key - MIME extension String
# value - MIME Type String
#
# Returns Hash.
def mime_exts
config[:mime_exts]
end
# Public: Register a new mime type.
#
# mime_type - String MIME Type
# options - Hash
# extensions: Array of String extnames
# charset: Proc/Method that detects the charset of a file.
# See EncodingUtils.
#
# Returns nothing.
def register_mime_type(mime_type, options = {})
# Legacy extension argument, will be removed from 4.x
if options.is_a?(String)
options = { extensions: [options] }
end
extnames = Array(options[:extensions]).map { |extname|
Sprockets::Utils.normalize_extension(extname)
}
charset = options[:charset]
charset ||= :default if mime_type.start_with?('text/')
charset = EncodingUtils::CHARSET_DETECT[charset] if charset.is_a?(Symbol)
self.config = hash_reassoc(config, :mime_exts) do |mime_exts|
extnames.each do |extname|
mime_exts[extname] = mime_type
end
mime_exts
end
self.config = hash_reassoc(config, :mime_types) do |mime_types|
type = { extensions: extnames }
type[:charset] = charset if charset
mime_types.merge(mime_type => type)
end
self.config = hash_reassoc(config, :_extnames) do
compute_extname_map
end
end
# Internal: Get detecter function for MIME type.
#
# mime_type - String MIME type
#
# Returns Proc detector or nil if none is available.
def mime_type_charset_detecter(mime_type)
if type = config[:mime_types][mime_type]
if detect = type[:charset]
return detect
end
end
end
# Public: Read file on disk with MIME type specific encoding.
#
# filename - String path
# content_type - String MIME type
#
# Returns String file contents transcoded to UTF-8 or in its external
# encoding.
def read_file(filename, content_type = nil)
data = File.binread(filename)
if detect = mime_type_charset_detecter(content_type)
detect.call(data).encode(Encoding::UTF_8, :universal_newline => true)
else
data
end
end
private
def compute_extname_map
graph = {}
([nil] + pipelines.keys.map(&:to_s)).each do |pipeline|
pipeline_extname = ".#{pipeline}" if pipeline
([[nil, nil]] + config[:mime_exts].to_a).each do |format_extname, format_type|
3.times do |n|
config[:engines].keys.permutation(n).each do |engine_extnames|
key = "#{pipeline_extname}#{format_extname}#{engine_extnames.join}"
type = format_type || config[:engine_mime_types][engine_extnames.first]
graph[key] = {type: type, engines: engine_extnames, pipeline: pipeline}
end
end
end
end
graph
end
end
end
Restore "#19 Add more extnames"
This reverts commit 4ca496dfb7969e3fb26400158721c88dc8a6f694.
require 'sprockets/encoding_utils'
require 'sprockets/http_utils'
require 'sprockets/utils'
module Sprockets
module Mime
include HTTPUtils, Utils
# Public: Mapping of MIME type Strings to properties Hash.
#
# key - MIME Type String
# value - Hash
# extensions - Array of extnames
# charset - Default Encoding or function to detect encoding
#
# Returns Hash.
def mime_types
config[:mime_types]
end
# Internal: Mapping of MIME extension Strings to MIME type Strings.
#
# Used for internal fast lookup purposes.
#
# Examples:
#
# mime_exts['.js'] #=> 'application/javascript'
#
# key - MIME extension String
# value - MIME Type String
#
# Returns Hash.
def mime_exts
config[:mime_exts]
end
# Public: Register a new mime type.
#
# mime_type - String MIME Type
# options - Hash
# extensions: Array of String extnames
# charset: Proc/Method that detects the charset of a file.
# See EncodingUtils.
#
# Returns nothing.
def register_mime_type(mime_type, options = {})
# Legacy extension argument, will be removed from 4.x
if options.is_a?(String)
options = { extensions: [options] }
end
extnames = Array(options[:extensions]).map { |extname|
Sprockets::Utils.normalize_extension(extname)
}
charset = options[:charset]
charset ||= :default if mime_type.start_with?('text/')
charset = EncodingUtils::CHARSET_DETECT[charset] if charset.is_a?(Symbol)
self.config = hash_reassoc(config, :mime_exts) do |mime_exts|
extnames.each do |extname|
mime_exts[extname] = mime_type
end
mime_exts
end
self.config = hash_reassoc(config, :mime_types) do |mime_types|
type = { extensions: extnames }
type[:charset] = charset if charset
mime_types.merge(mime_type => type)
end
self.config = hash_reassoc(config, :_extnames) do
compute_extname_map
end
end
# Internal: Get detecter function for MIME type.
#
# mime_type - String MIME type
#
# Returns Proc detector or nil if none is available.
def mime_type_charset_detecter(mime_type)
if type = config[:mime_types][mime_type]
if detect = type[:charset]
return detect
end
end
end
# Public: Read file on disk with MIME type specific encoding.
#
# filename - String path
# content_type - String MIME type
#
# Returns String file contents transcoded to UTF-8 or in its external
# encoding.
def read_file(filename, content_type = nil)
data = File.binread(filename)
if detect = mime_type_charset_detecter(content_type)
detect.call(data).encode(Encoding::UTF_8, :universal_newline => true)
else
data
end
end
private
def compute_extname_map
graph = {}
([nil] + pipelines.keys.map(&:to_s)).each do |pipeline|
pipeline_extname = ".#{pipeline}" if pipeline
([[nil, nil]] + config[:mime_exts].to_a).each do |format_extname, format_type|
4.times do |n|
config[:engines].keys.permutation(n).each do |engine_extnames|
key = "#{pipeline_extname}#{format_extname}#{engine_extnames.join}"
type = format_type || config[:engine_mime_types][engine_extnames.first]
graph[key] = {type: type, engines: engine_extnames, pipeline: pipeline}
end
end
end
end
graph
end
end
end
|
require 'sinatra/base'
require 'sinatra/flash'
# require 'chartkick' # Create beautiful Javascript charts with one line of Ruby
require 'rack-flash'
require 'httparty'
require 'slim'
require 'active_support'
require 'active_support/core_ext'
# Sinatra controller
class ApplicationController < Sinatra::Base
use Rack::MethodOverride
use Rack::Session::Pool
use Rack::Flash
register Sinatra::Flash # to be deleted
helpers ApplicationHelpers
set :views, File.expand_path('../../views', __FILE__)
set :public_folder, File.expand_path('../../public', __FILE__)
configure :production, :development do
enable :logging
end
# Web functions
get_root = lambda do
session[:keywords] ||= default_keywords(6)
@added_word = params['added_word']
@deleted_word = params['deleted_word']
add_keyword(@added_word) if @added_word
del_keyword(@deleted_word) if @deleted_word
## show trend line
@data_count = []
@categories = set_xaxis
@tags = params['tags']
@author = params['author']
@title = params['title']
for i in 0...session[:keywords].length
@tags = session[:keywords][i]
options = { headers: { 'Content-Type' => 'application/json' }, query: { :tags => @tags } }
@article = HTTParty.get(api_url('article/filter?'), options)
@data = count_article(@tags, @article)
@data_count[i] = @data
end
slim :trend
end
get_article = lambda do
session[:keywords] ||= default_keywords(6)
if params['viewid']
@viewid = params['viewid']
options = { headers: { 'Content-Type' => 'application/json' }, query: { :viewid => @viewid } }
@article = HTTParty.get(api_url('article'), options)
@error_msg = 'The article view id does not exist'
error_send('/article', @error_msg) if @article['title'].length == 0
else
@card = dayrank_article
end
slim :article
end
get_about = lambda do
session[:keywords] ||= default_keywords(6)
slim :about
end
# Web App Views Routes
get '/?', &get_root
get '/article/?', &get_article
get '/about/?', &get_about
end
Fix small bug in article
require 'sinatra/base'
require 'sinatra/flash'
# require 'chartkick' # Create beautiful Javascript charts with one line of Ruby
require 'rack-flash'
require 'httparty'
require 'slim'
require 'active_support'
require 'active_support/core_ext'
# Sinatra controller
class ApplicationController < Sinatra::Base
use Rack::MethodOverride
use Rack::Session::Pool
use Rack::Flash
register Sinatra::Flash # to be deleted
helpers ApplicationHelpers
set :views, File.expand_path('../../views', __FILE__)
set :public_folder, File.expand_path('../../public', __FILE__)
configure :production, :development do
enable :logging
end
# Web functions
get_root = lambda do
session[:keywords] ||= default_keywords(6)
@added_word = params['added_word']
@deleted_word = params['deleted_word']
add_keyword(@added_word) if @added_word
del_keyword(@deleted_word) if @deleted_word
## show trend line
@data_count = []
@categories = set_xaxis
@tags = params['tags']
@author = params['author']
@title = params['title']
for i in 0...session[:keywords].length
@tags = session[:keywords][i]
options = { headers: { 'Content-Type' => 'application/json' }, query: { :tags => @tags } }
@article = HTTParty.get(api_url('article/filter?'), options)
@data = count_article(@tags, @article)
@data_count[i] = @data
end
slim :trend
end
get_article = lambda do
session[:keywords] ||= default_keywords(6)
@viewid = params['viewid']
options = { headers: { 'Content-Type' => 'application/json' }, query: { :viewid => @viewid } }
@article = HTTParty.get(api_url('article'), options)
@card = dayrank_article
slim :article
end
get_about = lambda do
session[:keywords] ||= default_keywords(6)
slim :about
end
# Web App Views Routes
get '/?', &get_root
get '/article/?', &get_article
get '/about/?', &get_about
end
|
#
# Cookbook Name:: foundation
# Recipe:: board
#
# Copyright 2014, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "mediawiki"
passwords = data_bag_item("foundation", "passwords")
mediawiki_site "board.osmfoundation.org" do
sitename "OSMF Board Wiki"
directory "/srv/board.osmfoundation.org"
ssl_enabled true
database_name "board-wiki"
database_user "board-wikiuser"
database_password passwords["board"]["database"]
admin_password passwords["board"]["admin"]
logo "/Wiki.png"
email_contact "webmaster@openstreetmap.org"
email_sender "webmaster@openstreetmap.org"
email_sender_name "OSMF Board Wiki"
private true
recaptcha_public_key "6LflIQATAAAAAMXyDWpba-FgipVzE-aGF4HIR59N"
recaptcha_private_key passwords["board"]["recaptcha"]
end
cookbook_file "/srv/board.osmfoundation.org/Wiki.png" do
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0644
end
Fix OSMF board metaname
#
# Cookbook Name:: foundation
# Recipe:: board
#
# Copyright 2014, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "mediawiki"
passwords = data_bag_item("foundation", "passwords")
mediawiki_site "board.osmfoundation.org" do
sitename "OSMF Board Wiki"
metanamespace "OSMFBoard"
directory "/srv/board.osmfoundation.org"
ssl_enabled true
database_name "board-wiki"
database_user "board-wikiuser"
database_password passwords["board"]["database"]
admin_password passwords["board"]["admin"]
logo "/Wiki.png"
email_contact "webmaster@openstreetmap.org"
email_sender "webmaster@openstreetmap.org"
email_sender_name "OSMF Board Wiki"
private true
recaptcha_public_key "6LflIQATAAAAAMXyDWpba-FgipVzE-aGF4HIR59N"
recaptcha_private_key passwords["board"]["recaptcha"]
end
cookbook_file "/srv/board.osmfoundation.org/Wiki.png" do
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0644
end
|
#
# Cookbook Name:: hardware
# Recipe:: default
#
# Copyright 2012, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "tools"
include_recipe "munin"
ohai_plugin "hardware" do
template "ohai.rb.erb"
end
case node[:cpu][:"0"][:vendor_id]
when "GenuineIntel"
package "intel-microcode"
end
case node[:cpu][:"0"][:vendor_id]
when "AuthenticAMD"
if node[:lsb][:release].to_f >= 14.04
package "amd64-microcode"
end
end
if node[:dmi] && node[:dmi][:system]
case node[:dmi][:system][:manufacturer]
when "empty"
manufacturer = node[:dmi][:base_board][:manufacturer]
product = node[:dmi][:base_board][:product_name]
else
manufacturer = node[:dmi][:system][:manufacturer]
product = node[:dmi][:system][:product_name]
end
else
manufacturer = "Unknown"
product = "Unknown"
end
units = []
if node[:roles].include?("bytemark") || node[:roles].include?("exonetric")
units << "0"
end
case manufacturer
when "HP"
package "hponcfg"
package "hp-health"
units << "1"
when "TYAN"
units << "0"
when "TYAN Computer Corporation"
units << "0"
when "Supermicro"
case product
when "H8DGU", "X9SCD", "X7DBU", "X7DW3", "X9DR7/E-(J)LN4F", "X9DR3-F", "X9DRW", "SYS-2028U-TN24R4T+"
units << "1"
else
units << "0"
end
when "IBM"
units << "0"
end
# Remove legacy HP G4 support which breaks modern hp-health 10.4
if manufacturer == "HP"
%w(/opt/hp/hp-health/bin/hpasmd /usr/lib/libhpasmintrfc.so.3.0 %/usr/lib/libhpasmintrfc.so.3 /usr/lib/libhpasmintrfc.so).each do |filename|
file filename do
action :delete
end
end
directory "/opt/hp/hp-legacy" do
action :delete
recursive true
end
end
units.sort.uniq.each do |unit|
if node[:lsb][:release].to_f >= 16.04
service "serial-getty@ttyS#{unit}" do
action [:enable, :start]
end
else
file "/etc/init/ttySttyS#{unit}.conf" do
action :delete
end
template "/etc/init/ttyS#{unit}.conf" do
source "tty.conf.erb"
owner "root"
group "root"
mode 0o644
variables :unit => unit
end
service "ttyS#{unit}" do
provider Chef::Provider::Service::Upstart
action [:enable, :start]
supports :status => true, :restart => true, :reload => false
subscribes :restart, "template[/etc/init/ttyS#{unit}.conf]"
end
end
end
# if we need a different / special kernel version to make the hardware
# work (e.g: https://github.com/openstreetmap/operations/issues/45) then
# ensure that we have the package installed. the grub template will
# make sure that this is the default on boot.
if node[:hardware][:grub][:kernel]
kernel_version = node[:hardware][:grub][:kernel]
package "linux-image-#{kernel_version}-generic"
package "linux-image-extra-#{kernel_version}-generic"
package "linux-headers-#{kernel_version}-generic"
boot_device = IO.popen(["df", "/boot"]).readlines.last.split.first
boot_uuid = IO.popen(["blkid", "-o", "value", "-s", "UUID", boot_device]).readlines.first.chomp
grub_entry = "gnulinux-advanced-#{boot_uuid}>gnulinux-#{kernel_version}-advanced-#{boot_uuid}"
else
grub_entry = "0"
end
if File.exist?("/etc/default/grub")
execute "update-grub" do
action :nothing
command "/usr/sbin/update-grub"
end
template "/etc/default/grub" do
source "grub.erb"
owner "root"
group "root"
mode 0o644
variables :units => units, :entry => grub_entry
notifies :run, "execute[update-grub]"
end
end
execute "update-initramfs" do
action :nothing
command "update-initramfs -u -k all"
user "root"
group "root"
end
template "/etc/initramfs-tools/conf.d/mdadm" do
source "initramfs-mdadm.erb"
owner "root"
group "root"
mode 0o644
notifies :run, "execute[update-initramfs]"
end
package "haveged"
service "haveged" do
action [:enable, :start]
end
if node[:kernel][:modules].include?("ipmi_si")
package "ipmitool"
end
if node[:lsb][:release].to_f >= 12.10
package "irqbalance"
template "/etc/default/irqbalance" do
source "irqbalance.erb"
owner "root"
group "root"
mode 0o644
end
service "irqbalance" do
action [:start, :enable]
supports :status => false, :restart => true, :reload => false
subscribes :restart, "template[/etc/default/irqbalance]"
end
end
tools_packages = []
status_packages = {}
node[:kernel][:modules].each_key do |modname|
case modname
when "cciss"
tools_packages << "hpssacli"
status_packages["cciss-vol-status"] ||= []
when "hpsa"
tools_packages << "hpssacli"
status_packages["cciss-vol-status"] ||= []
when "mptsas"
tools_packages << "lsiutil"
# status_packages["mpt-status"] ||= []
when "mpt2sas", "mpt3sas"
tools_packages << "sas2ircu"
status_packages["sas2ircu-status"] ||= []
when "megaraid_mm"
tools_packages << "megactl"
status_packages["megaraid-status"] ||= []
when "megaraid_sas"
tools_packages << "megacli"
status_packages["megaclisas-status"] ||= []
when "aacraid"
tools_packages << "arcconf"
status_packages["aacraid-status"] ||= []
when "arcmsr"
tools_packages << "areca"
end
end
node[:block_device].each do |name, attributes|
next unless attributes[:vendor] == "HP" && attributes[:model] == "LOGICAL VOLUME"
if name =~ /^cciss!(c[0-9]+)d[0-9]+$/
status_packages["cciss-vol-status"] |= ["cciss/#{Regexp.last_match[1]}d0"]
else
Dir.glob("/sys/block/#{name}/device/scsi_generic/*").each do |sg|
status_packages["cciss-vol-status"] |= [File.basename(sg)]
end
end
end
%w(hpssacli lsiutil sas2ircu megactl megacli arcconf).each do |tools_package|
if tools_packages.include?(tools_package)
package tools_package
else
package tools_package do
action :purge
end
end
end
if tools_packages.include?("areca")
include_recipe "git"
git "/opt/areca" do
action :sync
repository "git://chef.openstreetmap.org/areca.git"
user "root"
group "root"
end
else
directory "/opt/areca" do
action :delete
recursive true
end
end
["cciss-vol-status", "mpt-status", "sas2ircu-status", "megaraid-status", "megaclisas-status", "aacraid-status"].each do |status_package|
if status_packages.include?(status_package)
package status_package
template "/etc/default/#{status_package}d" do
source "raid.default.erb"
owner "root"
group "root"
mode 0o644
variables :devices => status_packages[status_package]
end
service "#{status_package}d" do
action [:start, :enable]
supports :status => false, :restart => true, :reload => false
subscribes :restart, "template[/etc/default/#{status_package}d]"
end
else
package status_package do
action :purge
end
file "/etc/default/#{status_package}d" do
action :delete
end
end
end
disks = if node[:hardware][:disk]
node[:hardware][:disk][:disks]
else
[]
end
intel_ssds = disks.select { |d| d[:vendor] == "INTEL" && d[:model] =~ /^SSD/ }
nvmes = if node[:hardware][:pci]
node[:hardware][:pci].values.select { |pci| pci[:driver] == "nvme" }
else
[]
end
intel_nvmes = nvmes.select { |pci| pci[:vendor_name] == "Intel Corporation" }
if !intel_ssds.empty? || !intel_nvmes.empty?
package "unzip"
package "alien"
remote_file "#{Chef::Config[:file_cache_path]}/DataCenterTool_3_0_0_Linux.zip" do
source "https://downloadmirror.intel.com/23931/eng/DataCenterTool_3_0_0_Linux.zip"
end
execute "unzip-DataCenterTool" do
command "unzip DataCenterTool_3_0_0_Linux.zip isdct-3.0.0.400-15.x86_64.rpm"
cwd Chef::Config[:file_cache_path]
user "root"
group "root"
not_if { File.exist?("#{Chef::Config[:file_cache_path]}/isdct-3.0.0.400-15.x86_64.rpm") }
end
execute "alien-isdct" do
command "alien --to-deb isdct-3.0.0.400-15.x86_64.rpm"
cwd Chef::Config[:file_cache_path]
user "root"
group "root"
not_if { File.exist?("#{Chef::Config[:file_cache_path]}/isdct_3.0.0.400-16_amd64.deb") }
end
dpkg_package "isdct" do
source "#{Chef::Config[:file_cache_path]}/isdct_3.0.0.400-16_amd64.deb"
end
end
disks = disks.map do |disk|
next if disk[:state] == "spun_down"
if disk[:smart_device]
controller = node[:hardware][:disk][:controllers][disk[:controller]]
device = controller[:device].sub("/dev/", "")
smart = disk[:smart_device]
if device.start_with?("cciss/") && smart =~ /^cciss,(\d+)$/
array = node[:hardware][:disk][:arrays][disk[:arrays].first]
munin = "cciss-3#{array[:wwn]}-#{Regexp.last_match(1)}"
elsif smart =~ /^.*,(\d+)$/
munin = "#{device}-#{Regexp.last_match(1)}"
elsif smart =~ %r{^.*,(\d+)/(\d+)$}
munin = "#{device}-#{Regexp.last_match(1)}:#{Regexp.last_match(2)}"
end
elsif disk[:device]
device = disk[:device].sub("/dev/", "")
munin = device
end
next if device.nil?
Hash[
:device => device,
:smart => smart,
:munin => munin,
:hddtemp => munin.tr("-:", "_")
]
end
smartd_service = if node[:lsb][:release].to_f >= 16.04
"smartd"
else
"smartmontools"
end
disks = disks.compact
if disks.count > 0
package "smartmontools"
template "/usr/local/bin/smartd-mailer" do
source "smartd-mailer.erb"
owner "root"
group "root"
mode 0o755
end
template "/etc/smartd.conf" do
source "smartd.conf.erb"
owner "root"
group "root"
mode 0o644
variables :disks => disks
end
template "/etc/default/smartmontools" do
source "smartmontools.erb"
owner "root"
group "root"
mode 0o644
end
service smartd_service do
action [:enable, :start]
subscribes :reload, "template[/etc/smartd.conf]"
subscribes :restart, "template[/etc/default/smartmontools]"
end
# Don't try and do munin monitoring of disks behind
# an Areca controller as they only allow one thing to
# talk to the controller at a time and smartd will
# throw errors if it clashes with munin
disks = disks.reject { |disk| disk[:smart] && disk[:smart].start_with?("areca,") }
disks.each do |disk|
munin_plugin "smart_#{disk[:munin]}" do
target "smart_"
conf "munin.smart.erb"
conf_variables :disk => disk
end
end
else
service smartd_service do
action [:stop, :disable]
end
end
if disks.count > 0
munin_plugin "hddtemp_smartctl" do
conf "munin.hddtemp.erb"
conf_variables :disks => disks
end
else
munin_plugin "hddtemp_smartctl" do
action :delete
conf "munin.hddtemp.erb"
end
end
plugins = Dir.glob("/etc/munin/plugins/smart_*").map { |p| File.basename(p) } -
disks.map { |d| "smart_#{d[:munin]}" }
plugins.each do |plugin|
munin_plugin plugin do
action :delete
end
end
if File.exist?("/etc/mdadm/mdadm.conf")
mdadm_conf = edit_file "/etc/mdadm/mdadm.conf" do |line|
line.gsub!(/^MAILADDR .*$/, "MAILADDR admins@openstreetmap.org")
line
end
file "/etc/mdadm/mdadm.conf" do
owner "root"
group "root"
mode 0o644
content mdadm_conf
end
service "mdadm" do
action :nothing
subscribes :restart, "file[/etc/mdadm/mdadm.conf]"
end
end
template "/etc/modules" do
source "modules.erb"
owner "root"
group "root"
mode 0o644
end
if node[:lsb][:release].to_f <= 12.10
service "module-init-tools" do
provider Chef::Provider::Service::Upstart
action :nothing
subscribes :start, "template[/etc/modules]"
end
else
service "kmod" do
if node[:lsb][:release].to_f >= 15.10
provider Chef::Provider::Service::Systemd
else
provider Chef::Provider::Service::Upstart
end
action :nothing
subscribes :start, "template[/etc/modules]"
end
end
if node[:hardware][:watchdog]
package "watchdog"
template "/etc/default/watchdog" do
source "watchdog.erb"
owner "root"
group "root"
mode 0o644
variables :module => node[:hardware][:watchdog]
end
service "watchdog" do
action [:enable, :start]
end
end
unless Dir.glob("/sys/class/hwmon/hwmon*").empty?
package "lm-sensors"
Dir.glob("/sys/devices/platform/coretemp.*").each do |coretemp|
cpu = File.basename(coretemp).sub("coretemp.", "").to_i
chip = format("coretemp-isa-%04d", cpu)
temps = if File.exist?("#{coretemp}/name")
Dir.glob("#{coretemp}/temp*_input").map do |temp|
File.basename(temp).sub("temp", "").sub("_input", "").to_i
end.sort
else
Dir.glob("#{coretemp}/hwmon/hwmon*/temp*_input").map do |temp|
File.basename(temp).sub("temp", "").sub("_input", "").to_i
end.sort
end
if temps.first == 1
node.default[:hardware][:sensors][chip][:temps][:temp1][:label] = "CPU #{cpu}"
temps.shift
end
temps.each_with_index do |temp, index|
node.default[:hardware][:sensors][chip][:temps]["temp#{temp}"][:label] = "CPU #{cpu} Core #{index}"
end
end
execute "/etc/sensors.d/chef.conf" do
action :nothing
command "/usr/bin/sensors -s"
user "root"
group "root"
end
template "/etc/sensors.d/chef.conf" do
source "sensors.conf.erb"
owner "root"
group "root"
mode 0o644
notifies :run, "execute[/etc/sensors.d/chef.conf]"
end
end
Downgrade hp-health package due to startup issues in latest release
#
# Cookbook Name:: hardware
# Recipe:: default
#
# Copyright 2012, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "tools"
include_recipe "munin"
ohai_plugin "hardware" do
template "ohai.rb.erb"
end
case node[:cpu][:"0"][:vendor_id]
when "GenuineIntel"
package "intel-microcode"
end
case node[:cpu][:"0"][:vendor_id]
when "AuthenticAMD"
if node[:lsb][:release].to_f >= 14.04
package "amd64-microcode"
end
end
if node[:dmi] && node[:dmi][:system]
case node[:dmi][:system][:manufacturer]
when "empty"
manufacturer = node[:dmi][:base_board][:manufacturer]
product = node[:dmi][:base_board][:product_name]
else
manufacturer = node[:dmi][:system][:manufacturer]
product = node[:dmi][:system][:product_name]
end
else
manufacturer = "Unknown"
product = "Unknown"
end
units = []
if node[:roles].include?("bytemark") || node[:roles].include?("exonetric")
units << "0"
end
case manufacturer
when "HP"
package "hponcfg"
# Downgrade hp-health to 10.0.0.1.3-4. as 10.40-1815.49 has issues with reliable startup
package "hp-health" do
action :install
version "10.0.0.1.3-4."
notifies :restart, "service[hp-health]"
end
service "hp-health" do
action [:enable, :start]
supports :status => true, :restart => true, :reload => true
end
units << "1"
when "TYAN"
units << "0"
when "TYAN Computer Corporation"
units << "0"
when "Supermicro"
case product
when "H8DGU", "X9SCD", "X7DBU", "X7DW3", "X9DR7/E-(J)LN4F", "X9DR3-F", "X9DRW", "SYS-2028U-TN24R4T+"
units << "1"
else
units << "0"
end
when "IBM"
units << "0"
end
# Remove legacy HP G4 support which breaks modern hp-health 10.4
if manufacturer == "HP"
%w(/opt/hp/hp-health/bin/hpasmd /usr/lib/libhpasmintrfc.so.3.0 %/usr/lib/libhpasmintrfc.so.3 /usr/lib/libhpasmintrfc.so).each do |filename|
file filename do
action :delete
end
end
directory "/opt/hp/hp-legacy" do
action :delete
recursive true
end
end
units.sort.uniq.each do |unit|
if node[:lsb][:release].to_f >= 16.04
service "serial-getty@ttyS#{unit}" do
action [:enable, :start]
end
else
file "/etc/init/ttySttyS#{unit}.conf" do
action :delete
end
template "/etc/init/ttyS#{unit}.conf" do
source "tty.conf.erb"
owner "root"
group "root"
mode 0o644
variables :unit => unit
end
service "ttyS#{unit}" do
provider Chef::Provider::Service::Upstart
action [:enable, :start]
supports :status => true, :restart => true, :reload => false
subscribes :restart, "template[/etc/init/ttyS#{unit}.conf]"
end
end
end
# if we need a different / special kernel version to make the hardware
# work (e.g: https://github.com/openstreetmap/operations/issues/45) then
# ensure that we have the package installed. the grub template will
# make sure that this is the default on boot.
if node[:hardware][:grub][:kernel]
kernel_version = node[:hardware][:grub][:kernel]
package "linux-image-#{kernel_version}-generic"
package "linux-image-extra-#{kernel_version}-generic"
package "linux-headers-#{kernel_version}-generic"
boot_device = IO.popen(["df", "/boot"]).readlines.last.split.first
boot_uuid = IO.popen(["blkid", "-o", "value", "-s", "UUID", boot_device]).readlines.first.chomp
grub_entry = "gnulinux-advanced-#{boot_uuid}>gnulinux-#{kernel_version}-advanced-#{boot_uuid}"
else
grub_entry = "0"
end
if File.exist?("/etc/default/grub")
execute "update-grub" do
action :nothing
command "/usr/sbin/update-grub"
end
template "/etc/default/grub" do
source "grub.erb"
owner "root"
group "root"
mode 0o644
variables :units => units, :entry => grub_entry
notifies :run, "execute[update-grub]"
end
end
execute "update-initramfs" do
action :nothing
command "update-initramfs -u -k all"
user "root"
group "root"
end
template "/etc/initramfs-tools/conf.d/mdadm" do
source "initramfs-mdadm.erb"
owner "root"
group "root"
mode 0o644
notifies :run, "execute[update-initramfs]"
end
package "haveged"
service "haveged" do
action [:enable, :start]
end
if node[:kernel][:modules].include?("ipmi_si")
package "ipmitool"
end
if node[:lsb][:release].to_f >= 12.10
package "irqbalance"
template "/etc/default/irqbalance" do
source "irqbalance.erb"
owner "root"
group "root"
mode 0o644
end
service "irqbalance" do
action [:start, :enable]
supports :status => false, :restart => true, :reload => false
subscribes :restart, "template[/etc/default/irqbalance]"
end
end
tools_packages = []
status_packages = {}
node[:kernel][:modules].each_key do |modname|
case modname
when "cciss"
tools_packages << "hpssacli"
status_packages["cciss-vol-status"] ||= []
when "hpsa"
tools_packages << "hpssacli"
status_packages["cciss-vol-status"] ||= []
when "mptsas"
tools_packages << "lsiutil"
# status_packages["mpt-status"] ||= []
when "mpt2sas", "mpt3sas"
tools_packages << "sas2ircu"
status_packages["sas2ircu-status"] ||= []
when "megaraid_mm"
tools_packages << "megactl"
status_packages["megaraid-status"] ||= []
when "megaraid_sas"
tools_packages << "megacli"
status_packages["megaclisas-status"] ||= []
when "aacraid"
tools_packages << "arcconf"
status_packages["aacraid-status"] ||= []
when "arcmsr"
tools_packages << "areca"
end
end
node[:block_device].each do |name, attributes|
next unless attributes[:vendor] == "HP" && attributes[:model] == "LOGICAL VOLUME"
if name =~ /^cciss!(c[0-9]+)d[0-9]+$/
status_packages["cciss-vol-status"] |= ["cciss/#{Regexp.last_match[1]}d0"]
else
Dir.glob("/sys/block/#{name}/device/scsi_generic/*").each do |sg|
status_packages["cciss-vol-status"] |= [File.basename(sg)]
end
end
end
%w(hpssacli lsiutil sas2ircu megactl megacli arcconf).each do |tools_package|
if tools_packages.include?(tools_package)
package tools_package
else
package tools_package do
action :purge
end
end
end
if tools_packages.include?("areca")
include_recipe "git"
git "/opt/areca" do
action :sync
repository "git://chef.openstreetmap.org/areca.git"
user "root"
group "root"
end
else
directory "/opt/areca" do
action :delete
recursive true
end
end
["cciss-vol-status", "mpt-status", "sas2ircu-status", "megaraid-status", "megaclisas-status", "aacraid-status"].each do |status_package|
if status_packages.include?(status_package)
package status_package
template "/etc/default/#{status_package}d" do
source "raid.default.erb"
owner "root"
group "root"
mode 0o644
variables :devices => status_packages[status_package]
end
service "#{status_package}d" do
action [:start, :enable]
supports :status => false, :restart => true, :reload => false
subscribes :restart, "template[/etc/default/#{status_package}d]"
end
else
package status_package do
action :purge
end
file "/etc/default/#{status_package}d" do
action :delete
end
end
end
disks = if node[:hardware][:disk]
node[:hardware][:disk][:disks]
else
[]
end
intel_ssds = disks.select { |d| d[:vendor] == "INTEL" && d[:model] =~ /^SSD/ }
nvmes = if node[:hardware][:pci]
node[:hardware][:pci].values.select { |pci| pci[:driver] == "nvme" }
else
[]
end
intel_nvmes = nvmes.select { |pci| pci[:vendor_name] == "Intel Corporation" }
if !intel_ssds.empty? || !intel_nvmes.empty?
package "unzip"
package "alien"
remote_file "#{Chef::Config[:file_cache_path]}/DataCenterTool_3_0_0_Linux.zip" do
source "https://downloadmirror.intel.com/23931/eng/DataCenterTool_3_0_0_Linux.zip"
end
execute "unzip-DataCenterTool" do
command "unzip DataCenterTool_3_0_0_Linux.zip isdct-3.0.0.400-15.x86_64.rpm"
cwd Chef::Config[:file_cache_path]
user "root"
group "root"
not_if { File.exist?("#{Chef::Config[:file_cache_path]}/isdct-3.0.0.400-15.x86_64.rpm") }
end
execute "alien-isdct" do
command "alien --to-deb isdct-3.0.0.400-15.x86_64.rpm"
cwd Chef::Config[:file_cache_path]
user "root"
group "root"
not_if { File.exist?("#{Chef::Config[:file_cache_path]}/isdct_3.0.0.400-16_amd64.deb") }
end
dpkg_package "isdct" do
source "#{Chef::Config[:file_cache_path]}/isdct_3.0.0.400-16_amd64.deb"
end
end
disks = disks.map do |disk|
next if disk[:state] == "spun_down"
if disk[:smart_device]
controller = node[:hardware][:disk][:controllers][disk[:controller]]
device = controller[:device].sub("/dev/", "")
smart = disk[:smart_device]
if device.start_with?("cciss/") && smart =~ /^cciss,(\d+)$/
array = node[:hardware][:disk][:arrays][disk[:arrays].first]
munin = "cciss-3#{array[:wwn]}-#{Regexp.last_match(1)}"
elsif smart =~ /^.*,(\d+)$/
munin = "#{device}-#{Regexp.last_match(1)}"
elsif smart =~ %r{^.*,(\d+)/(\d+)$}
munin = "#{device}-#{Regexp.last_match(1)}:#{Regexp.last_match(2)}"
end
elsif disk[:device]
device = disk[:device].sub("/dev/", "")
munin = device
end
next if device.nil?
Hash[
:device => device,
:smart => smart,
:munin => munin,
:hddtemp => munin.tr("-:", "_")
]
end
smartd_service = if node[:lsb][:release].to_f >= 16.04
"smartd"
else
"smartmontools"
end
disks = disks.compact
if disks.count > 0
package "smartmontools"
template "/usr/local/bin/smartd-mailer" do
source "smartd-mailer.erb"
owner "root"
group "root"
mode 0o755
end
template "/etc/smartd.conf" do
source "smartd.conf.erb"
owner "root"
group "root"
mode 0o644
variables :disks => disks
end
template "/etc/default/smartmontools" do
source "smartmontools.erb"
owner "root"
group "root"
mode 0o644
end
service smartd_service do
action [:enable, :start]
subscribes :reload, "template[/etc/smartd.conf]"
subscribes :restart, "template[/etc/default/smartmontools]"
end
# Don't try and do munin monitoring of disks behind
# an Areca controller as they only allow one thing to
# talk to the controller at a time and smartd will
# throw errors if it clashes with munin
disks = disks.reject { |disk| disk[:smart] && disk[:smart].start_with?("areca,") }
disks.each do |disk|
munin_plugin "smart_#{disk[:munin]}" do
target "smart_"
conf "munin.smart.erb"
conf_variables :disk => disk
end
end
else
service smartd_service do
action [:stop, :disable]
end
end
if disks.count > 0
munin_plugin "hddtemp_smartctl" do
conf "munin.hddtemp.erb"
conf_variables :disks => disks
end
else
munin_plugin "hddtemp_smartctl" do
action :delete
conf "munin.hddtemp.erb"
end
end
plugins = Dir.glob("/etc/munin/plugins/smart_*").map { |p| File.basename(p) } -
disks.map { |d| "smart_#{d[:munin]}" }
plugins.each do |plugin|
munin_plugin plugin do
action :delete
end
end
if File.exist?("/etc/mdadm/mdadm.conf")
mdadm_conf = edit_file "/etc/mdadm/mdadm.conf" do |line|
line.gsub!(/^MAILADDR .*$/, "MAILADDR admins@openstreetmap.org")
line
end
file "/etc/mdadm/mdadm.conf" do
owner "root"
group "root"
mode 0o644
content mdadm_conf
end
service "mdadm" do
action :nothing
subscribes :restart, "file[/etc/mdadm/mdadm.conf]"
end
end
template "/etc/modules" do
source "modules.erb"
owner "root"
group "root"
mode 0o644
end
if node[:lsb][:release].to_f <= 12.10
service "module-init-tools" do
provider Chef::Provider::Service::Upstart
action :nothing
subscribes :start, "template[/etc/modules]"
end
else
service "kmod" do
if node[:lsb][:release].to_f >= 15.10
provider Chef::Provider::Service::Systemd
else
provider Chef::Provider::Service::Upstart
end
action :nothing
subscribes :start, "template[/etc/modules]"
end
end
if node[:hardware][:watchdog]
package "watchdog"
template "/etc/default/watchdog" do
source "watchdog.erb"
owner "root"
group "root"
mode 0o644
variables :module => node[:hardware][:watchdog]
end
service "watchdog" do
action [:enable, :start]
end
end
unless Dir.glob("/sys/class/hwmon/hwmon*").empty?
package "lm-sensors"
Dir.glob("/sys/devices/platform/coretemp.*").each do |coretemp|
cpu = File.basename(coretemp).sub("coretemp.", "").to_i
chip = format("coretemp-isa-%04d", cpu)
temps = if File.exist?("#{coretemp}/name")
Dir.glob("#{coretemp}/temp*_input").map do |temp|
File.basename(temp).sub("temp", "").sub("_input", "").to_i
end.sort
else
Dir.glob("#{coretemp}/hwmon/hwmon*/temp*_input").map do |temp|
File.basename(temp).sub("temp", "").sub("_input", "").to_i
end.sort
end
if temps.first == 1
node.default[:hardware][:sensors][chip][:temps][:temp1][:label] = "CPU #{cpu}"
temps.shift
end
temps.each_with_index do |temp, index|
node.default[:hardware][:sensors][chip][:temps]["temp#{temp}"][:label] = "CPU #{cpu} Core #{index}"
end
end
execute "/etc/sensors.d/chef.conf" do
action :nothing
command "/usr/bin/sensors -s"
user "root"
group "root"
end
template "/etc/sensors.d/chef.conf" do
source "sensors.conf.erb"
owner "root"
group "root"
mode 0o644
notifies :run, "execute[/etc/sensors.d/chef.conf]"
end
end
|
#
# Cookbook:: hardware
# Recipe:: default
#
# Copyright:: 2012, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "apt"
include_recipe "git"
include_recipe "munin"
include_recipe "prometheus"
include_recipe "sysfs"
include_recipe "tools"
ohai_plugin "hardware" do
template "ohai.rb.erb"
end
case node[:cpu][:"0"][:vendor_id]
when "GenuineIntel"
package "intel-microcode"
when "AuthenticAMD"
package "amd64-microcode"
end
if node[:dmi] && node[:dmi][:system]
case node[:dmi][:system][:manufacturer]
when "empty"
manufacturer = node[:dmi][:base_board][:manufacturer]
product = node[:dmi][:base_board][:product_name]
else
manufacturer = node[:dmi][:system][:manufacturer]
product = node[:dmi][:system][:product_name]
end
else
manufacturer = "Unknown"
product = "Unknown"
end
units = []
if node[:roles].include?("bytemark") || node[:roles].include?("exonetric")
units << "0"
end
case manufacturer
when "HP"
package "hponcfg"
package "hp-health" do
action :install
notifies :restart, "service[hp-health]"
end
service "hp-health" do
action [:enable, :start]
supports :status => true, :restart => true
end
if product.end_with?("Gen8", "Gen9")
package "hp-ams" do
action :install
notifies :restart, "service[hp-ams]"
end
service "hp-ams" do
action [:enable, :start]
supports :status => true, :restart => true
end
end
units << "1"
when "TYAN"
units << "0"
when "TYAN Computer Corporation"
units << "0"
when "Supermicro"
units << "1"
when "IBM"
units << "0"
when "VMware, Inc."
package "open-vm-tools"
# Remove timeSync plugin completely
# https://github.com/vmware/open-vm-tools/issues/302
file "/usr/lib/open-vm-tools/plugins/vmsvc/libtimeSync.so" do
action :delete
notifies :restart, "service[open-vm-tools]"
end
# Attempt to tell Host we are not interested in timeSync
execute "vmware-toolbox-cmd-timesync-disable" do
command "/usr/bin/vmware-toolbox-cmd timesync disable"
ignore_failure true
end
service "open-vm-tools" do
action [:enable, :start]
supports :status => true, :restart => true
end
end
units.sort.uniq.each do |unit|
service "serial-getty@ttyS#{unit}" do
action [:enable, :start]
end
end
# if we need a different / special kernel version to make the hardware
# work (e.g: https://github.com/openstreetmap/operations/issues/45) then
# ensure that we have the package installed. the grub template will
# make sure that this is the default on boot.
if node[:hardware][:grub][:kernel]
kernel_version = node[:hardware][:grub][:kernel]
package "linux-image-#{kernel_version}-generic"
package "linux-image-extra-#{kernel_version}-generic"
package "linux-headers-#{kernel_version}-generic"
package "linux-tools-#{kernel_version}-generic"
boot_device = IO.popen(["df", "/boot"]).readlines.last.split.first
boot_uuid = IO.popen(["blkid", "-o", "value", "-s", "UUID", boot_device]).readlines.first.chomp
grub_entry = "gnulinux-advanced-#{boot_uuid}>gnulinux-#{kernel_version}-advanced-#{boot_uuid}"
else
grub_entry = "0"
end
if File.exist?("/etc/default/grub")
execute "update-grub" do
action :nothing
command "/usr/sbin/update-grub"
not_if { ENV["TEST_KITCHEN"] }
end
template "/etc/default/grub" do
source "grub.erb"
owner "root"
group "root"
mode "644"
variables :units => units, :entry => grub_entry
notifies :run, "execute[update-grub]"
end
end
execute "update-initramfs" do
action :nothing
command "update-initramfs -u -k all"
user "root"
group "root"
end
template "/etc/initramfs-tools/conf.d/mdadm" do
source "initramfs-mdadm.erb"
owner "root"
group "root"
mode "644"
notifies :run, "execute[update-initramfs]"
end
package "haveged"
service "haveged" do
action [:enable, :start]
end
package "ipmitool" if node[:kernel][:modules].include?("ipmi_si")
package "irqbalance"
service "irqbalance" do
action [:start, :enable]
supports :status => false, :restart => true, :reload => false
end
# Link Layer Discovery Protocol Daemon
package "lldpd"
service "lldpd" do
action [:start, :enable]
supports :status => true, :restart => true, :reload => true
end
tools_packages = []
status_packages = {}
if node[:virtualization][:role] != "guest" ||
(node[:virtualization][:system] != "lxc" &&
node[:virtualization][:system] != "lxd" &&
node[:virtualization][:system] != "openvz")
node[:kernel][:modules].each_key do |modname|
case modname
when "cciss"
tools_packages << "ssacli"
status_packages["cciss-vol-status"] ||= []
when "hpsa"
tools_packages << "ssacli"
status_packages["cciss-vol-status"] ||= []
when "mptsas"
tools_packages << "lsiutil"
status_packages["mpt-status"] ||= []
when "mpt2sas", "mpt3sas"
tools_packages << "sas2ircu"
status_packages["sas2ircu-status"] ||= []
when "megaraid_mm"
tools_packages << "megactl"
status_packages["megaraid-status"] ||= []
when "megaraid_sas"
tools_packages << "megacli"
status_packages["megaclisas-status"] ||= []
when "aacraid"
tools_packages << "arcconf"
status_packages["aacraid-status"] ||= []
when "arcmsr"
tools_packages << "areca"
end
end
node[:block_device].each do |name, attributes|
next unless attributes[:vendor] == "HP" && attributes[:model] == "LOGICAL VOLUME"
if name =~ /^cciss!(c[0-9]+)d[0-9]+$/
status_packages["cciss-vol-status"] |= ["cciss/#{Regexp.last_match[1]}d0"]
else
Dir.glob("/sys/block/#{name}/device/scsi_generic/*").each do |sg|
status_packages["cciss-vol-status"] |= [File.basename(sg)]
end
end
end
end
%w[ssacli lsiutil sas2ircu megactl megacli arcconf].each do |tools_package|
if tools_packages.include?(tools_package)
package tools_package
else
package tools_package do
action :purge
end
end
end
if tools_packages.include?("areca")
include_recipe "git"
git "/opt/areca" do
action :sync
repository "https://git.openstreetmap.org/private/areca.git"
depth 1
user "root"
group "root"
not_if { ENV["TEST_KITCHEN"] }
end
else
directory "/opt/areca" do
action :delete
recursive true
end
end
if status_packages.include?("cciss-vol-status")
template "/usr/local/bin/cciss-vol-statusd" do
source "cciss-vol-statusd.erb"
owner "root"
group "root"
mode "755"
notifies :restart, "service[cciss-vol-statusd]"
end
systemd_service "cciss-vol-statusd" do
description "Check cciss_vol_status values in the background"
exec_start "/usr/local/bin/cciss-vol-statusd"
private_tmp true
protect_system "full"
protect_home true
no_new_privileges true
notifies :restart, "service[cciss-vol-statusd]"
end
else
systemd_service "cciss-vol-statusd" do
action :delete
end
template "/usr/local/bin/cciss-vol-statusd" do
action :delete
end
end
%w[cciss-vol-status mpt-status sas2ircu-status megaraid-status megaclisas-status aacraid-status].each do |status_package|
if status_packages.include?(status_package)
package status_package
template "/etc/default/#{status_package}d" do
source "raid.default.erb"
owner "root"
group "root"
mode "644"
variables :devices => status_packages[status_package]
end
service "#{status_package}d" do
action [:start, :enable]
supports :status => false, :restart => true, :reload => false
subscribes :restart, "template[/etc/default/#{status_package}d]"
end
else
package status_package do
action :purge
end
file "/etc/default/#{status_package}d" do
action :delete
end
end
end
disks = if node[:hardware][:disk]
node[:hardware][:disk][:disks]
else
[]
end
intel_ssds = disks.select { |d| d[:vendor] == "INTEL" && d[:model] =~ /^SSD/ }
nvmes = if node[:hardware][:pci]
node[:hardware][:pci].values.select { |pci| pci[:driver] == "nvme" }
else
[]
end
intel_nvmes = nvmes.select { |pci| pci[:vendor_name] == "Intel Corporation" }
if !intel_ssds.empty? || !intel_nvmes.empty?
package "unzip"
intel_ssd_tool_version = "3.0.26"
remote_file "#{Chef::Config[:file_cache_path]}/Intel_SSD_Data_Center_Tool_#{intel_ssd_tool_version}_Linux.zip" do
source "https://downloadmirror.intel.com/29720/eng/Intel_SSD_DCT_#{intel_ssd_tool_version}_Linux.zip"
end
execute "#{Chef::Config[:file_cache_path]}/Intel_SSD_Data_Center_Tool_#{intel_ssd_tool_version}_Linux.zip" do
command "unzip Intel_SSD_Data_Center_Tool_#{intel_ssd_tool_version}_Linux.zip isdct_#{intel_ssd_tool_version}.400-1_amd64.deb"
cwd Chef::Config[:file_cache_path]
user "root"
group "root"
not_if { ::File.exist?("#{Chef::Config[:file_cache_path]}/isdct_#{intel_ssd_tool_version}.400-1_amd64.deb") }
end
dpkg_package "isdct" do
version "#{intel_ssd_tool_version}-1"
source "#{Chef::Config[:file_cache_path]}/isdct_#{intel_ssd_tool_version}.400-1_amd64.deb"
end
end
disks = disks.map do |disk|
next if disk[:state] == "spun_down" || %w[unconfigured failed].any?(disk[:status])
if disk[:smart_device]
controller = node[:hardware][:disk][:controllers][disk[:controller]]
if controller && controller[:device]
device = controller[:device].sub("/dev/", "")
smart = disk[:smart_device]
if device.start_with?("cciss/") && smart =~ /^cciss,(\d+)$/
array = node[:hardware][:disk][:arrays][disk[:arrays].first]
munin = "cciss-3#{array[:wwn]}-#{Regexp.last_match(1)}"
elsif smart =~ /^.*,(\d+)$/
munin = "#{device}-#{Regexp.last_match(1)}"
elsif smart =~ %r{^.*,(\d+)/(\d+)$}
munin = "#{device}-#{Regexp.last_match(1)}:#{Regexp.last_match(2)}"
end
elsif disk[:device]
device = disk[:device].sub("/dev/", "")
smart = disk[:smart_device]
if smart =~ /^.*,(\d+),(\d+),(\d+)$/
munin = "#{device}-#{Regexp.last_match(1)}:#{Regexp.last_match(2)}:#{Regexp.last_match(3)}"
end
end
elsif disk[:device] =~ %r{^/dev/(nvme\d+)n\d+$}
device = Regexp.last_match(1)
munin = device
elsif disk[:device]
device = disk[:device].sub("/dev/", "")
munin = device
end
next if device.nil?
Hash[
:device => device,
:smart => smart,
:munin => munin,
:hddtemp => munin.tr("-:", "_")
]
end
disks = disks.compact.uniq
if disks.count.positive?
package "smartmontools"
template "/etc/cron.daily/update-smart-drivedb" do
source "update-smart-drivedb.erb"
owner "root"
group "root"
mode "755"
end
template "/usr/local/bin/smartd-mailer" do
source "smartd-mailer.erb"
owner "root"
group "root"
mode "755"
end
template "/etc/smartd.conf" do
source "smartd.conf.erb"
owner "root"
group "root"
mode "644"
variables :disks => disks
end
template "/etc/default/smartmontools" do
source "smartmontools.erb"
owner "root"
group "root"
mode "644"
end
service "smartmontools" do
action [:enable, :start]
subscribes :reload, "template[/etc/smartd.conf]"
subscribes :restart, "template[/etc/default/smartmontools]"
end
# Don't try and do munin monitoring of disks behind
# an Areca controller as they only allow one thing to
# talk to the controller at a time and smartd will
# throw errors if it clashes with munin
disks = disks.reject { |disk| disk[:smart]&.start_with?("areca,") }
disks.each do |disk|
munin_plugin "smart_#{disk[:munin]}" do
target "smart_"
conf "munin.smart.erb"
conf_variables :disk => disk
end
end
template "/etc/prometheus/collectors/smart.devices" do
source "smart.devices.erb"
owner "root"
group "root"
mode "644"
variables :disks => disks
end
prometheus_collector "smart" do
interval "15m"
end
else
service "smartd" do
action [:stop, :disable]
end
end
if disks.count.positive?
munin_plugin "hddtemp_smartctl" do
conf "munin.hddtemp.erb"
conf_variables :disks => disks
end
else
munin_plugin "hddtemp_smartctl" do
action :delete
conf "munin.hddtemp.erb"
end
end
plugins = Dir.glob("/etc/munin/plugins/smart_*").map { |p| File.basename(p) } -
disks.map { |d| "smart_#{d[:munin]}" }
plugins.each do |plugin|
munin_plugin plugin do
action :delete
conf "munin.smart.erb"
end
end
if File.exist?("/etc/mdadm/mdadm.conf")
mdadm_conf = edit_file "/etc/mdadm/mdadm.conf" do |line|
line.gsub!(/^MAILADDR .*$/, "MAILADDR admins@openstreetmap.org")
line
end
file "/etc/mdadm/mdadm.conf" do
owner "root"
group "root"
mode "644"
content mdadm_conf
end
service "mdadm" do
action :nothing
subscribes :restart, "file[/etc/mdadm/mdadm.conf]"
end
end
template "/etc/modules" do
source "modules.erb"
owner "root"
group "root"
mode "644"
end
service "kmod" do
action :nothing
subscribes :start, "template[/etc/modules]"
end
if node[:hardware][:watchdog]
package "watchdog"
template "/etc/default/watchdog" do
source "watchdog.erb"
owner "root"
group "root"
mode "644"
variables :module => node[:hardware][:watchdog]
end
service "watchdog" do
action [:enable, :start]
end
end
unless Dir.glob("/sys/class/hwmon/hwmon*").empty?
package "lm-sensors"
Dir.glob("/sys/devices/platform/coretemp.*").each do |coretemp|
cpu = File.basename(coretemp).sub("coretemp.", "").to_i
chip = format("coretemp-isa-%04d", cpu)
temps = if File.exist?("#{coretemp}/name")
Dir.glob("#{coretemp}/temp*_input").map do |temp|
File.basename(temp).sub("temp", "").sub("_input", "").to_i
end.sort
else
Dir.glob("#{coretemp}/hwmon/hwmon*/temp*_input").map do |temp|
File.basename(temp).sub("temp", "").sub("_input", "").to_i
end.sort
end
if temps.first == 1
node.default[:hardware][:sensors][chip][:temps][:temp1][:label] = "CPU #{cpu}"
temps.shift
end
temps.each_with_index do |temp, index|
node.default[:hardware][:sensors][chip][:temps]["temp#{temp}"][:label] = "CPU #{cpu} Core #{index}"
end
end
execute "/etc/sensors.d/chef.conf" do
action :nothing
command "/usr/bin/sensors -s"
user "root"
group "root"
end
template "/etc/sensors.d/chef.conf" do
source "sensors.conf.erb"
owner "root"
group "root"
mode "644"
notifies :run, "execute[/etc/sensors.d/chef.conf]"
end
end
if node[:hardware][:shm_size]
execute "remount-dev-shm" do
action :nothing
command "/bin/mount -o remount /dev/shm"
user "root"
group "root"
end
mount "/dev/shm" do
action :enable
device "tmpfs"
fstype "tmpfs"
options "rw,nosuid,nodev,size=#{node[:hardware][:shm_size]}"
notifies :run, "execute[remount-dev-shm]"
end
end
Configure ipmi exporter for prometheus
#
# Cookbook:: hardware
# Recipe:: default
#
# Copyright:: 2012, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include_recipe "apt"
include_recipe "git"
include_recipe "munin"
include_recipe "prometheus"
include_recipe "sysfs"
include_recipe "tools"
ohai_plugin "hardware" do
template "ohai.rb.erb"
end
case node[:cpu][:"0"][:vendor_id]
when "GenuineIntel"
package "intel-microcode"
when "AuthenticAMD"
package "amd64-microcode"
end
if node[:dmi] && node[:dmi][:system]
case node[:dmi][:system][:manufacturer]
when "empty"
manufacturer = node[:dmi][:base_board][:manufacturer]
product = node[:dmi][:base_board][:product_name]
else
manufacturer = node[:dmi][:system][:manufacturer]
product = node[:dmi][:system][:product_name]
end
else
manufacturer = "Unknown"
product = "Unknown"
end
units = []
if node[:roles].include?("bytemark") || node[:roles].include?("exonetric")
units << "0"
end
case manufacturer
when "HP"
package "hponcfg"
package "hp-health" do
action :install
notifies :restart, "service[hp-health]"
end
service "hp-health" do
action [:enable, :start]
supports :status => true, :restart => true
end
if product.end_with?("Gen8", "Gen9")
package "hp-ams" do
action :install
notifies :restart, "service[hp-ams]"
end
service "hp-ams" do
action [:enable, :start]
supports :status => true, :restart => true
end
end
units << "1"
when "TYAN"
units << "0"
when "TYAN Computer Corporation"
units << "0"
when "Supermicro"
units << "1"
when "IBM"
units << "0"
when "VMware, Inc."
package "open-vm-tools"
# Remove timeSync plugin completely
# https://github.com/vmware/open-vm-tools/issues/302
file "/usr/lib/open-vm-tools/plugins/vmsvc/libtimeSync.so" do
action :delete
notifies :restart, "service[open-vm-tools]"
end
# Attempt to tell Host we are not interested in timeSync
execute "vmware-toolbox-cmd-timesync-disable" do
command "/usr/bin/vmware-toolbox-cmd timesync disable"
ignore_failure true
end
service "open-vm-tools" do
action [:enable, :start]
supports :status => true, :restart => true
end
end
units.sort.uniq.each do |unit|
service "serial-getty@ttyS#{unit}" do
action [:enable, :start]
end
end
# if we need a different / special kernel version to make the hardware
# work (e.g: https://github.com/openstreetmap/operations/issues/45) then
# ensure that we have the package installed. the grub template will
# make sure that this is the default on boot.
if node[:hardware][:grub][:kernel]
kernel_version = node[:hardware][:grub][:kernel]
package "linux-image-#{kernel_version}-generic"
package "linux-image-extra-#{kernel_version}-generic"
package "linux-headers-#{kernel_version}-generic"
package "linux-tools-#{kernel_version}-generic"
boot_device = IO.popen(["df", "/boot"]).readlines.last.split.first
boot_uuid = IO.popen(["blkid", "-o", "value", "-s", "UUID", boot_device]).readlines.first.chomp
grub_entry = "gnulinux-advanced-#{boot_uuid}>gnulinux-#{kernel_version}-advanced-#{boot_uuid}"
else
grub_entry = "0"
end
if File.exist?("/etc/default/grub")
execute "update-grub" do
action :nothing
command "/usr/sbin/update-grub"
not_if { ENV["TEST_KITCHEN"] }
end
template "/etc/default/grub" do
source "grub.erb"
owner "root"
group "root"
mode "644"
variables :units => units, :entry => grub_entry
notifies :run, "execute[update-grub]"
end
end
execute "update-initramfs" do
action :nothing
command "update-initramfs -u -k all"
user "root"
group "root"
end
template "/etc/initramfs-tools/conf.d/mdadm" do
source "initramfs-mdadm.erb"
owner "root"
group "root"
mode "644"
notifies :run, "execute[update-initramfs]"
end
package "haveged"
service "haveged" do
action [:enable, :start]
end
if node[:kernel][:modules].include?("ipmi_si")
package "ipmitool"
package "freeipmi-tools"
prometheus_exporter "ipmi" do
port 9290
end
end
package "irqbalance"
service "irqbalance" do
action [:start, :enable]
supports :status => false, :restart => true, :reload => false
end
# Link Layer Discovery Protocol Daemon
package "lldpd"
service "lldpd" do
action [:start, :enable]
supports :status => true, :restart => true, :reload => true
end
tools_packages = []
status_packages = {}
if node[:virtualization][:role] != "guest" ||
(node[:virtualization][:system] != "lxc" &&
node[:virtualization][:system] != "lxd" &&
node[:virtualization][:system] != "openvz")
node[:kernel][:modules].each_key do |modname|
case modname
when "cciss"
tools_packages << "ssacli"
status_packages["cciss-vol-status"] ||= []
when "hpsa"
tools_packages << "ssacli"
status_packages["cciss-vol-status"] ||= []
when "mptsas"
tools_packages << "lsiutil"
status_packages["mpt-status"] ||= []
when "mpt2sas", "mpt3sas"
tools_packages << "sas2ircu"
status_packages["sas2ircu-status"] ||= []
when "megaraid_mm"
tools_packages << "megactl"
status_packages["megaraid-status"] ||= []
when "megaraid_sas"
tools_packages << "megacli"
status_packages["megaclisas-status"] ||= []
when "aacraid"
tools_packages << "arcconf"
status_packages["aacraid-status"] ||= []
when "arcmsr"
tools_packages << "areca"
end
end
node[:block_device].each do |name, attributes|
next unless attributes[:vendor] == "HP" && attributes[:model] == "LOGICAL VOLUME"
if name =~ /^cciss!(c[0-9]+)d[0-9]+$/
status_packages["cciss-vol-status"] |= ["cciss/#{Regexp.last_match[1]}d0"]
else
Dir.glob("/sys/block/#{name}/device/scsi_generic/*").each do |sg|
status_packages["cciss-vol-status"] |= [File.basename(sg)]
end
end
end
end
%w[ssacli lsiutil sas2ircu megactl megacli arcconf].each do |tools_package|
if tools_packages.include?(tools_package)
package tools_package
else
package tools_package do
action :purge
end
end
end
if tools_packages.include?("areca")
include_recipe "git"
git "/opt/areca" do
action :sync
repository "https://git.openstreetmap.org/private/areca.git"
depth 1
user "root"
group "root"
not_if { ENV["TEST_KITCHEN"] }
end
else
directory "/opt/areca" do
action :delete
recursive true
end
end
if status_packages.include?("cciss-vol-status")
template "/usr/local/bin/cciss-vol-statusd" do
source "cciss-vol-statusd.erb"
owner "root"
group "root"
mode "755"
notifies :restart, "service[cciss-vol-statusd]"
end
systemd_service "cciss-vol-statusd" do
description "Check cciss_vol_status values in the background"
exec_start "/usr/local/bin/cciss-vol-statusd"
private_tmp true
protect_system "full"
protect_home true
no_new_privileges true
notifies :restart, "service[cciss-vol-statusd]"
end
else
systemd_service "cciss-vol-statusd" do
action :delete
end
template "/usr/local/bin/cciss-vol-statusd" do
action :delete
end
end
%w[cciss-vol-status mpt-status sas2ircu-status megaraid-status megaclisas-status aacraid-status].each do |status_package|
if status_packages.include?(status_package)
package status_package
template "/etc/default/#{status_package}d" do
source "raid.default.erb"
owner "root"
group "root"
mode "644"
variables :devices => status_packages[status_package]
end
service "#{status_package}d" do
action [:start, :enable]
supports :status => false, :restart => true, :reload => false
subscribes :restart, "template[/etc/default/#{status_package}d]"
end
else
package status_package do
action :purge
end
file "/etc/default/#{status_package}d" do
action :delete
end
end
end
disks = if node[:hardware][:disk]
node[:hardware][:disk][:disks]
else
[]
end
intel_ssds = disks.select { |d| d[:vendor] == "INTEL" && d[:model] =~ /^SSD/ }
nvmes = if node[:hardware][:pci]
node[:hardware][:pci].values.select { |pci| pci[:driver] == "nvme" }
else
[]
end
intel_nvmes = nvmes.select { |pci| pci[:vendor_name] == "Intel Corporation" }
if !intel_ssds.empty? || !intel_nvmes.empty?
package "unzip"
intel_ssd_tool_version = "3.0.26"
remote_file "#{Chef::Config[:file_cache_path]}/Intel_SSD_Data_Center_Tool_#{intel_ssd_tool_version}_Linux.zip" do
source "https://downloadmirror.intel.com/29720/eng/Intel_SSD_DCT_#{intel_ssd_tool_version}_Linux.zip"
end
execute "#{Chef::Config[:file_cache_path]}/Intel_SSD_Data_Center_Tool_#{intel_ssd_tool_version}_Linux.zip" do
command "unzip Intel_SSD_Data_Center_Tool_#{intel_ssd_tool_version}_Linux.zip isdct_#{intel_ssd_tool_version}.400-1_amd64.deb"
cwd Chef::Config[:file_cache_path]
user "root"
group "root"
not_if { ::File.exist?("#{Chef::Config[:file_cache_path]}/isdct_#{intel_ssd_tool_version}.400-1_amd64.deb") }
end
dpkg_package "isdct" do
version "#{intel_ssd_tool_version}-1"
source "#{Chef::Config[:file_cache_path]}/isdct_#{intel_ssd_tool_version}.400-1_amd64.deb"
end
end
disks = disks.map do |disk|
next if disk[:state] == "spun_down" || %w[unconfigured failed].any?(disk[:status])
if disk[:smart_device]
controller = node[:hardware][:disk][:controllers][disk[:controller]]
if controller && controller[:device]
device = controller[:device].sub("/dev/", "")
smart = disk[:smart_device]
if device.start_with?("cciss/") && smart =~ /^cciss,(\d+)$/
array = node[:hardware][:disk][:arrays][disk[:arrays].first]
munin = "cciss-3#{array[:wwn]}-#{Regexp.last_match(1)}"
elsif smart =~ /^.*,(\d+)$/
munin = "#{device}-#{Regexp.last_match(1)}"
elsif smart =~ %r{^.*,(\d+)/(\d+)$}
munin = "#{device}-#{Regexp.last_match(1)}:#{Regexp.last_match(2)}"
end
elsif disk[:device]
device = disk[:device].sub("/dev/", "")
smart = disk[:smart_device]
if smart =~ /^.*,(\d+),(\d+),(\d+)$/
munin = "#{device}-#{Regexp.last_match(1)}:#{Regexp.last_match(2)}:#{Regexp.last_match(3)}"
end
end
elsif disk[:device] =~ %r{^/dev/(nvme\d+)n\d+$}
device = Regexp.last_match(1)
munin = device
elsif disk[:device]
device = disk[:device].sub("/dev/", "")
munin = device
end
next if device.nil?
Hash[
:device => device,
:smart => smart,
:munin => munin,
:hddtemp => munin.tr("-:", "_")
]
end
disks = disks.compact.uniq
if disks.count.positive?
package "smartmontools"
template "/etc/cron.daily/update-smart-drivedb" do
source "update-smart-drivedb.erb"
owner "root"
group "root"
mode "755"
end
template "/usr/local/bin/smartd-mailer" do
source "smartd-mailer.erb"
owner "root"
group "root"
mode "755"
end
template "/etc/smartd.conf" do
source "smartd.conf.erb"
owner "root"
group "root"
mode "644"
variables :disks => disks
end
template "/etc/default/smartmontools" do
source "smartmontools.erb"
owner "root"
group "root"
mode "644"
end
service "smartmontools" do
action [:enable, :start]
subscribes :reload, "template[/etc/smartd.conf]"
subscribes :restart, "template[/etc/default/smartmontools]"
end
# Don't try and do munin monitoring of disks behind
# an Areca controller as they only allow one thing to
# talk to the controller at a time and smartd will
# throw errors if it clashes with munin
disks = disks.reject { |disk| disk[:smart]&.start_with?("areca,") }
disks.each do |disk|
munin_plugin "smart_#{disk[:munin]}" do
target "smart_"
conf "munin.smart.erb"
conf_variables :disk => disk
end
end
template "/etc/prometheus/collectors/smart.devices" do
source "smart.devices.erb"
owner "root"
group "root"
mode "644"
variables :disks => disks
end
prometheus_collector "smart" do
interval "15m"
end
else
service "smartd" do
action [:stop, :disable]
end
end
if disks.count.positive?
munin_plugin "hddtemp_smartctl" do
conf "munin.hddtemp.erb"
conf_variables :disks => disks
end
else
munin_plugin "hddtemp_smartctl" do
action :delete
conf "munin.hddtemp.erb"
end
end
plugins = Dir.glob("/etc/munin/plugins/smart_*").map { |p| File.basename(p) } -
disks.map { |d| "smart_#{d[:munin]}" }
plugins.each do |plugin|
munin_plugin plugin do
action :delete
conf "munin.smart.erb"
end
end
if File.exist?("/etc/mdadm/mdadm.conf")
mdadm_conf = edit_file "/etc/mdadm/mdadm.conf" do |line|
line.gsub!(/^MAILADDR .*$/, "MAILADDR admins@openstreetmap.org")
line
end
file "/etc/mdadm/mdadm.conf" do
owner "root"
group "root"
mode "644"
content mdadm_conf
end
service "mdadm" do
action :nothing
subscribes :restart, "file[/etc/mdadm/mdadm.conf]"
end
end
template "/etc/modules" do
source "modules.erb"
owner "root"
group "root"
mode "644"
end
service "kmod" do
action :nothing
subscribes :start, "template[/etc/modules]"
end
if node[:hardware][:watchdog]
package "watchdog"
template "/etc/default/watchdog" do
source "watchdog.erb"
owner "root"
group "root"
mode "644"
variables :module => node[:hardware][:watchdog]
end
service "watchdog" do
action [:enable, :start]
end
end
unless Dir.glob("/sys/class/hwmon/hwmon*").empty?
package "lm-sensors"
Dir.glob("/sys/devices/platform/coretemp.*").each do |coretemp|
cpu = File.basename(coretemp).sub("coretemp.", "").to_i
chip = format("coretemp-isa-%04d", cpu)
temps = if File.exist?("#{coretemp}/name")
Dir.glob("#{coretemp}/temp*_input").map do |temp|
File.basename(temp).sub("temp", "").sub("_input", "").to_i
end.sort
else
Dir.glob("#{coretemp}/hwmon/hwmon*/temp*_input").map do |temp|
File.basename(temp).sub("temp", "").sub("_input", "").to_i
end.sort
end
if temps.first == 1
node.default[:hardware][:sensors][chip][:temps][:temp1][:label] = "CPU #{cpu}"
temps.shift
end
temps.each_with_index do |temp, index|
node.default[:hardware][:sensors][chip][:temps]["temp#{temp}"][:label] = "CPU #{cpu} Core #{index}"
end
end
execute "/etc/sensors.d/chef.conf" do
action :nothing
command "/usr/bin/sensors -s"
user "root"
group "root"
end
template "/etc/sensors.d/chef.conf" do
source "sensors.conf.erb"
owner "root"
group "root"
mode "644"
notifies :run, "execute[/etc/sensors.d/chef.conf]"
end
end
if node[:hardware][:shm_size]
execute "remount-dev-shm" do
action :nothing
command "/bin/mount -o remount /dev/shm"
user "root"
group "root"
end
mount "/dev/shm" do
action :enable
device "tmpfs"
fstype "tmpfs"
options "rw,nosuid,nodev,size=#{node[:hardware][:shm_size]}"
notifies :run, "execute[remount-dev-shm]"
end
end
|
#
# Cookbook Name:: mediawiki
# Resource:: mediawiki_site
#
# Copyright 2015, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
default_action :create
property :site, :kind_of => String, :name_attribute => true
property :aliases, :kind_of => [String, Array]
property :directory, :kind_of => String
property :version, :kind_of => String, :default => "1.31"
property :database_name, :kind_of => String, :required => true
property :database_user, :kind_of => String, :required => true
property :database_password, :kind_of => String, :required => true
property :sitename, :kind_of => String, :default => "OpenStreetMap Wiki"
property :metanamespace, :kind_of => String, :default => "OpenStreetMap"
property :logo, :kind_of => String, :default => "$wgStylePath/common/images/wiki.png"
property :email_contact, :kind_of => String, :default => ""
property :email_sender, :kind_of => String, :default => ""
property :email_sender_name, :kind_of => String, :default => "MediaWiki Mail"
property :commons, :kind_of => [TrueClass, FalseClass], :default => true
property :skin, :kind_of => String, :default => "vector"
property :site_notice, :kind_of => [String, TrueClass, FalseClass], :default => false
property :site_readonly, :kind_of => [String, TrueClass, FalseClass], :default => false
property :admin_user, :kind_of => String, :default => "Admin"
property :admin_password, :kind_of => String, :required => true
property :private_accounts, :kind_of => [TrueClass, FalseClass], :default => false
property :private_site, :kind_of => [TrueClass, FalseClass], :default => false
property :recaptcha_public_key, :kind_of => String
property :recaptcha_private_key, :kind_of => String
property :extra_file_extensions, :kind_of => [String, Array], :default => []
property :reload_apache, :kind_of => [TrueClass, FalseClass], :default => true
action :create do
node.normal_unless[:mediawiki][:sites][new_resource.site] = {}
node.normal[:mediawiki][:sites][new_resource.site][:directory] = site_directory
node.normal[:mediawiki][:sites][new_resource.site][:version] = new_resource.version
node.normal_unless[:mediawiki][:sites][new_resource.site][:wgSecretKey] = SecureRandom.base64(48)
mysql_user "#{new_resource.database_user}@localhost" do
password new_resource.database_password
end
mysql_database new_resource.database_name do
permissions "#{new_resource.database_user}@localhost" => :all
end
mediawiki_directory = "#{site_directory}/w"
ruby_block "rename-installer-localsettings" do
action :nothing
block do
::File.rename("#{mediawiki_directory}/LocalSettings.php", "#{mediawiki_directory}/LocalSettings-install.php")
end
end
execute "#{mediawiki_directory}/maintenance/install.php" do
action :nothing
# Use metanamespace as Site Name to ensure correct set namespace
command "php maintenance/install.php --server '#{name}' --dbtype 'mysql' --dbname '#{new_resource.database_name}' --dbuser '#{new_resource.database_user}' --dbpass '#{new_resource.database_password}' --dbserver 'localhost' --scriptpath /w --pass '#{new_resource.admin_password}' '#{new_resource.metanamespace}' '#{new_resource.admin_user}'"
cwd mediawiki_directory
user node[:mediawiki][:user]
group node[:mediawiki][:group]
not_if do
::File.exist?("#{mediawiki_directory}/LocalSettings-install.php")
end
notifies :run, "ruby_block[rename-installer-localsettings]", :immediately
end
execute "#{mediawiki_directory}/maintenance/update.php" do
action :nothing
command "php maintenance/update.php --quick"
cwd mediawiki_directory
user node[:mediawiki][:user]
group node[:mediawiki][:group]
end
declare_resource :directory, site_directory do
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o775
end
declare_resource :directory, mediawiki_directory do
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o775
end
mediawiki_reference = "REL#{new_resource.version}".tr(".", "_")
git "#{mediawiki_directory}/vendor" do
action :nothing
repository "https://gerrit.wikimedia.org/r/p/mediawiki/vendor.git"
revision mediawiki_reference
user node[:mediawiki][:user]
group node[:mediawiki][:group]
end
git mediawiki_directory do
action :sync
repository "https://gerrit.wikimedia.org/r/p/mediawiki/core.git"
revision mediawiki_reference
user node[:mediawiki][:user]
group node[:mediawiki][:group]
notifies :sync, "git[#{mediawiki_directory}/vendor]", :immediately
notifies :run, "execute[#{mediawiki_directory}/maintenance/install.php]", :immediately
notifies :run, "execute[#{mediawiki_directory}/maintenance/update.php]"
end
# Safety catch if git doesn't update but install.php hasn't run
ruby_block "catch-installer-localsettings-run" do
action :run
block do
end
not_if do
::File.exist?("#{mediawiki_directory}/LocalSettings-install.php")
end
notifies :run, "execute[#{mediawiki_directory}/maintenance/install.php]", :immediately
end
declare_resource :directory, "#{mediawiki_directory}/images" do
owner "www-data"
group node[:mediawiki][:group]
mode 0o775
end
declare_resource :directory, "#{mediawiki_directory}/cache" do
owner "www-data"
group node[:mediawiki][:group]
mode 0o775
end
declare_resource :directory, "#{mediawiki_directory}/LocalSettings.d" do
user node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o775
end
template "#{mediawiki_directory}/LocalSettings.php" do
cookbook "mediawiki"
source "LocalSettings.php.erb"
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o664
variables :name => new_resource.site,
:directory => mediawiki_directory,
:database_params => database_params,
:mediawiki => mediawiki_params
notifies :run, "execute[#{mediawiki_directory}/maintenance/update.php]"
end
template "/etc/cron.d/mediawiki-#{cron_name}" do
cookbook "mediawiki"
source "mediawiki.cron.erb"
owner "root"
group "root"
mode 0o644
variables :name => new_resource.site, :directory => site_directory,
:user => node[:mediawiki][:user]
end
template "/etc/cron.daily/mediawiki-#{cron_name}-backup" do
cookbook "mediawiki"
source "mediawiki-backup.cron.erb"
owner "root"
group "root"
mode 0o700
variables :name => new_resource.site,
:directory => site_directory,
:database_params => database_params
end
# MobileFrontend extension is required by MinervaNeue skin
mediawiki_extension "MobileFrontend" do # ~FC005
site new_resource.site
template "mw-ext-MobileFrontend.inc.php.erb"
end
# MobileFrontend extension is required by MinervaNeue skin
mediawiki_skin "MinervaNeue" do
site new_resource.site
update_site false
end
mediawiki_skin "CologneBlue" do
site new_resource.site
update_site false
end
mediawiki_skin "Modern" do
site new_resource.site
update_site false
end
mediawiki_skin "MonoBook" do
site new_resource.site
update_site false
end
mediawiki_skin "Vector" do
site new_resource.site
update_site false
end
mediawiki_extension "Cite" do
site new_resource.site
update_site false
end
mediawiki_extension "CiteThisPage" do
site new_resource.site
update_site false
end
if new_resource.private_accounts || new_resource.private_site
mediawiki_extension "ConfirmEdit" do
site new_resource.site
update_site false
action :delete
end
else
mediawiki_extension "ConfirmEdit" do
site new_resource.site
template "mw-ext-ConfirmEdit.inc.php.erb"
variables :public_key => new_resource.recaptcha_public_key,
:private_key => new_resource.recaptcha_private_key
update_site false
end
end
mediawiki_extension "Gadgets" do
site new_resource.site
update_site false
end
mediawiki_extension "ImageMap" do
site new_resource.site
update_site false
end
mediawiki_extension "InputBox" do
site new_resource.site
update_site false
end
mediawiki_extension "Interwiki" do
site new_resource.site
update_site false
end
mediawiki_extension "Nuke" do
site new_resource.site
update_site false
end
mediawiki_extension "ParserFunctions" do
site new_resource.site
template "mw-ext-ParserFunctions.inc.php.erb"
update_site false
end
mediawiki_extension "PdfHandler" do
site new_resource.site
template "mw-ext-PdfHandler.inc.php.erb"
update_site false
end
mediawiki_extension "Poem" do
site new_resource.site
update_site false
end
mediawiki_extension "Renameuser" do
site new_resource.site
update_site false
end
mediawiki_extension "SimpleAntiSpam" do
site new_resource.site
update_site false
action :delete
end
mediawiki_extension "SpamBlacklist" do
site new_resource.site
template "mw-ext-SpamBlacklist.inc.php.erb"
update_site false
end
mediawiki_extension "SyntaxHighlight_GeSHi" do
site new_resource.site
template "mw-ext-SyntaxHighlight.inc.php.erb"
update_site false
end
mediawiki_extension "TitleBlacklist" do
site new_resource.site
template "mw-ext-TitleBlacklist.inc.php.erb"
update_site false
end
mediawiki_extension "WikiEditor" do
site new_resource.site
update_site false
end
mediawiki_extension "Babel" do
site new_resource.site
template "mw-ext-Babel.inc.php.erb"
update_site false
end
mediawiki_extension "cldr" do
site new_resource.site
template "mw-ext-cldr.inc.php.erb"
update_site false
end
mediawiki_extension "CleanChanges" do
site new_resource.site
template "mw-ext-CleanChanges.inc.php.erb"
update_site false
end
mediawiki_extension "LocalisationUpdate" do
site new_resource.site
template "mw-ext-LocalisationUpdate.inc.php.erb"
update_site false
end
# LocalisationUpdate Update Cron
# template "/etc/cron.d/mediawiki-#{name}-LocalisationUpdate" do
# cookbook "mediawiki"
# source "mediawiki-LocalisationUpdate.cron.erb"
# owner "root"
# group "root"
# mode 0755
# variables :name => name, :directory => site_directory, :user => node[:mediawiki][:user]
# end
# mediawiki_extension "Translate" do
# site new_resource.site
# template "mw-ext-Translate.inc.php.erb"
# update_site false
# end
mediawiki_extension "UniversalLanguageSelector" do
site new_resource.site
template "mw-ext-UniversalLanguageSelector.inc.php.erb"
update_site false
end
mediawiki_extension "AntiSpoof" do
site new_resource.site
template "mw-ext-AntiSpoof.inc.php.erb"
compose true
update_site false
end
mediawiki_extension "AbuseFilter" do
site new_resource.site
template "mw-ext-AbuseFilter.inc.php.erb"
compose true
update_site false
end
mediawiki_extension "CheckUser" do
site new_resource.site
template "mw-ext-CheckUser.inc.php.erb"
update_site false
end
mediawiki_extension "DismissableSiteNotice" do
site new_resource.site
update_site false
end
mediawiki_extension "Elastica" do
site new_resource.site
compose true
update_site false
end
mediawiki_extension "CirrusSearch" do
site new_resource.site
compose true
template "mw-ext-CirrusSearch.inc.php.erb"
update_site false
end
mediawiki_extension "osmtaginfo" do
site new_resource.site
template "mw-ext-osmtaginfo.inc.php.erb"
repository "git://github.com/Firefishy/osmtaginfo.git"
tag "live"
update_site false
end
mediawiki_extension "SimpleMap" do
site new_resource.site
template "mw-ext-SimpleMap.inc.php.erb"
repository "git://github.com/Firefishy/SimpleMap.git"
tag "live"
update_site false
end
mediawiki_extension "SlippyMap" do
site new_resource.site
template "mw-ext-SlippyMap.inc.php.erb"
repository "git://github.com/Firefishy/SlippyMap.git"
tag "live"
update_site false
end
mediawiki_extension "Mantle" do
site new_resource.site
update_site false
action :delete
end
mediawiki_extension "DisableAccount" do
site new_resource.site
template "mw-ext-DisableAccount.inc.php.erb"
update_site false
end
mediawiki_extension "VisualEditor" do
site new_resource.site
template "mw-ext-VisualEditor.inc.php.erb"
update_site false
end
mediawiki_extension "TemplateData" do
site new_resource.site
update_site false
end
cookbook_file "#{site_directory}/cc-wiki.png" do
cookbook "mediawiki"
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o644
backup false
end
cookbook_file "#{site_directory}/googled06a989d1ccc8364.html" do
cookbook "mediawiki"
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o644
backup false
end
cookbook_file "#{site_directory}/googlefac54c35e800caab.html" do
cookbook "mediawiki"
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o644
backup false
end
ssl_certificate new_resource.site do
domains [new_resource.site] + Array(new_resource.aliases)
end
apache_site new_resource.site do
cookbook "mediawiki"
template "apache.erb"
directory site_directory
variables :aliases => Array(new_resource.aliases),
:private_site => new_resource.private_site
reload_apache false
end
# FIXME: needs to run one
execute "#{mediawiki_directory}/extensions/CirrusSearch/maintenance/updateSearchIndexConfig.php" do
action :nothing
command "php extensions/CirrusSearch/maintenance/updateSearchIndexConfig.php"
cwd mediawiki_directory
user node[:mediawiki][:user]
group node[:mediawiki][:group]
end
end
action :update do
mediawiki_directory = "#{site_directory}/w"
template "#{mediawiki_directory}/LocalSettings.php" do
cookbook "mediawiki"
source "LocalSettings.php.erb"
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o664
variables :name => new_resource.site,
:directory => mediawiki_directory,
:database_params => database_params,
:mediawiki => mediawiki_params
notifies :run, "execute[#{mediawiki_directory}/maintenance/update.php]"
end
execute "#{mediawiki_directory}/maintenance/update.php" do
action :run
command "php maintenance/update.php --quick"
cwd mediawiki_directory
user node[:mediawiki][:user]
group node[:mediawiki][:group]
end
end
action :delete do
apache_site new_resource.site do
action :delete
reload_apache false
end
declare_resource :directory, site_directory do
action :delete
recursive true
end
mysql_database new_resource.database_name do
action :drop
end
mysql_user "#{new_resource.database_user}@localhost" do
action :drop
end
end
action_class do
def site_directory
new_resource.directory || "/srv/#{new_resource.site}"
end
def cron_name
new_resource.site.tr(".", "_")
end
def database_params
{
:host => "localhost",
:name => new_resource.database_name,
:username => new_resource.database_user,
:password => new_resource.database_password
}
end
def mediawiki_params
{
:sitename => new_resource.sitename,
:metanamespace => new_resource.metanamespace,
:logo => new_resource.logo,
:email_contact => new_resource.email_contact,
:email_sender => new_resource.email_sender,
:email_sender_name => new_resource.email_sender_name,
:commons => new_resource.commons,
:skin => new_resource.skin,
:site_notice => new_resource.site_notice,
:site_readonly => new_resource.site_readonly,
:extra_file_extensions => new_resource.extra_file_extensions,
:private_accounts => new_resource.private_accounts,
:private_site => new_resource.private_site
}
end
end
def after_created
notifies :reload, "service[apache2]" if reload_apache
end
Suppress foodcritic warning
#
# Cookbook Name:: mediawiki
# Resource:: mediawiki_site
#
# Copyright 2015, OpenStreetMap Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
default_action :create
property :site, :kind_of => String, :name_attribute => true
property :aliases, :kind_of => [String, Array]
property :directory, :kind_of => String
property :version, :kind_of => String, :default => "1.31"
property :database_name, :kind_of => String, :required => true
property :database_user, :kind_of => String, :required => true
property :database_password, :kind_of => String, :required => true
property :sitename, :kind_of => String, :default => "OpenStreetMap Wiki"
property :metanamespace, :kind_of => String, :default => "OpenStreetMap"
property :logo, :kind_of => String, :default => "$wgStylePath/common/images/wiki.png"
property :email_contact, :kind_of => String, :default => ""
property :email_sender, :kind_of => String, :default => ""
property :email_sender_name, :kind_of => String, :default => "MediaWiki Mail"
property :commons, :kind_of => [TrueClass, FalseClass], :default => true
property :skin, :kind_of => String, :default => "vector"
property :site_notice, :kind_of => [String, TrueClass, FalseClass], :default => false
property :site_readonly, :kind_of => [String, TrueClass, FalseClass], :default => false
property :admin_user, :kind_of => String, :default => "Admin"
property :admin_password, :kind_of => String, :required => true
property :private_accounts, :kind_of => [TrueClass, FalseClass], :default => false
property :private_site, :kind_of => [TrueClass, FalseClass], :default => false
property :recaptcha_public_key, :kind_of => String
property :recaptcha_private_key, :kind_of => String
property :extra_file_extensions, :kind_of => [String, Array], :default => []
property :reload_apache, :kind_of => [TrueClass, FalseClass], :default => true
action :create do
node.normal_unless[:mediawiki][:sites][new_resource.site] = {}
node.normal[:mediawiki][:sites][new_resource.site][:directory] = site_directory
node.normal[:mediawiki][:sites][new_resource.site][:version] = new_resource.version
node.normal_unless[:mediawiki][:sites][new_resource.site][:wgSecretKey] = SecureRandom.base64(48)
mysql_user "#{new_resource.database_user}@localhost" do
password new_resource.database_password
end
mysql_database new_resource.database_name do
permissions "#{new_resource.database_user}@localhost" => :all
end
mediawiki_directory = "#{site_directory}/w"
ruby_block "rename-installer-localsettings" do
action :nothing
block do
::File.rename("#{mediawiki_directory}/LocalSettings.php", "#{mediawiki_directory}/LocalSettings-install.php")
end
end
execute "#{mediawiki_directory}/maintenance/install.php" do
action :nothing
# Use metanamespace as Site Name to ensure correct set namespace
command "php maintenance/install.php --server '#{name}' --dbtype 'mysql' --dbname '#{new_resource.database_name}' --dbuser '#{new_resource.database_user}' --dbpass '#{new_resource.database_password}' --dbserver 'localhost' --scriptpath /w --pass '#{new_resource.admin_password}' '#{new_resource.metanamespace}' '#{new_resource.admin_user}'"
cwd mediawiki_directory
user node[:mediawiki][:user]
group node[:mediawiki][:group]
not_if do
::File.exist?("#{mediawiki_directory}/LocalSettings-install.php")
end
notifies :run, "ruby_block[rename-installer-localsettings]", :immediately
end
execute "#{mediawiki_directory}/maintenance/update.php" do
action :nothing
command "php maintenance/update.php --quick"
cwd mediawiki_directory
user node[:mediawiki][:user]
group node[:mediawiki][:group]
end
declare_resource :directory, site_directory do
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o775
end
declare_resource :directory, mediawiki_directory do
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o775
end
mediawiki_reference = "REL#{new_resource.version}".tr(".", "_")
git "#{mediawiki_directory}/vendor" do
action :nothing
repository "https://gerrit.wikimedia.org/r/p/mediawiki/vendor.git"
revision mediawiki_reference
user node[:mediawiki][:user]
group node[:mediawiki][:group]
end
git mediawiki_directory do
action :sync
repository "https://gerrit.wikimedia.org/r/p/mediawiki/core.git"
revision mediawiki_reference
user node[:mediawiki][:user]
group node[:mediawiki][:group]
notifies :sync, "git[#{mediawiki_directory}/vendor]", :immediately
notifies :run, "execute[#{mediawiki_directory}/maintenance/install.php]", :immediately
notifies :run, "execute[#{mediawiki_directory}/maintenance/update.php]"
end
# Safety catch if git doesn't update but install.php hasn't run
ruby_block "catch-installer-localsettings-run" do
action :run
block do
end
not_if do
::File.exist?("#{mediawiki_directory}/LocalSettings-install.php")
end
notifies :run, "execute[#{mediawiki_directory}/maintenance/install.php]", :immediately
end
declare_resource :directory, "#{mediawiki_directory}/images" do
owner "www-data"
group node[:mediawiki][:group]
mode 0o775
end
declare_resource :directory, "#{mediawiki_directory}/cache" do
owner "www-data"
group node[:mediawiki][:group]
mode 0o775
end
declare_resource :directory, "#{mediawiki_directory}/LocalSettings.d" do
user node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o775
end
template "#{mediawiki_directory}/LocalSettings.php" do
cookbook "mediawiki"
source "LocalSettings.php.erb"
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o664
variables :name => new_resource.site,
:directory => mediawiki_directory,
:database_params => database_params,
:mediawiki => mediawiki_params
notifies :run, "execute[#{mediawiki_directory}/maintenance/update.php]"
end
template "/etc/cron.d/mediawiki-#{cron_name}" do
cookbook "mediawiki"
source "mediawiki.cron.erb"
owner "root"
group "root"
mode 0o644
variables :name => new_resource.site, :directory => site_directory,
:user => node[:mediawiki][:user]
end
template "/etc/cron.daily/mediawiki-#{cron_name}-backup" do
cookbook "mediawiki"
source "mediawiki-backup.cron.erb"
owner "root"
group "root"
mode 0o700
variables :name => new_resource.site,
:directory => site_directory,
:database_params => database_params
end
# MobileFrontend extension is required by MinervaNeue skin
mediawiki_extension "MobileFrontend" do # ~FC005
site new_resource.site
template "mw-ext-MobileFrontend.inc.php.erb"
end
# MobileFrontend extension is required by MinervaNeue skin
mediawiki_skin "MinervaNeue" do # ~FC005
site new_resource.site
update_site false
end
mediawiki_skin "CologneBlue" do
site new_resource.site
update_site false
end
mediawiki_skin "Modern" do
site new_resource.site
update_site false
end
mediawiki_skin "MonoBook" do
site new_resource.site
update_site false
end
mediawiki_skin "Vector" do
site new_resource.site
update_site false
end
mediawiki_extension "Cite" do
site new_resource.site
update_site false
end
mediawiki_extension "CiteThisPage" do
site new_resource.site
update_site false
end
if new_resource.private_accounts || new_resource.private_site
mediawiki_extension "ConfirmEdit" do
site new_resource.site
update_site false
action :delete
end
else
mediawiki_extension "ConfirmEdit" do
site new_resource.site
template "mw-ext-ConfirmEdit.inc.php.erb"
variables :public_key => new_resource.recaptcha_public_key,
:private_key => new_resource.recaptcha_private_key
update_site false
end
end
mediawiki_extension "Gadgets" do
site new_resource.site
update_site false
end
mediawiki_extension "ImageMap" do
site new_resource.site
update_site false
end
mediawiki_extension "InputBox" do
site new_resource.site
update_site false
end
mediawiki_extension "Interwiki" do
site new_resource.site
update_site false
end
mediawiki_extension "Nuke" do
site new_resource.site
update_site false
end
mediawiki_extension "ParserFunctions" do
site new_resource.site
template "mw-ext-ParserFunctions.inc.php.erb"
update_site false
end
mediawiki_extension "PdfHandler" do
site new_resource.site
template "mw-ext-PdfHandler.inc.php.erb"
update_site false
end
mediawiki_extension "Poem" do
site new_resource.site
update_site false
end
mediawiki_extension "Renameuser" do
site new_resource.site
update_site false
end
mediawiki_extension "SimpleAntiSpam" do
site new_resource.site
update_site false
action :delete
end
mediawiki_extension "SpamBlacklist" do
site new_resource.site
template "mw-ext-SpamBlacklist.inc.php.erb"
update_site false
end
mediawiki_extension "SyntaxHighlight_GeSHi" do
site new_resource.site
template "mw-ext-SyntaxHighlight.inc.php.erb"
update_site false
end
mediawiki_extension "TitleBlacklist" do
site new_resource.site
template "mw-ext-TitleBlacklist.inc.php.erb"
update_site false
end
mediawiki_extension "WikiEditor" do
site new_resource.site
update_site false
end
mediawiki_extension "Babel" do
site new_resource.site
template "mw-ext-Babel.inc.php.erb"
update_site false
end
mediawiki_extension "cldr" do
site new_resource.site
template "mw-ext-cldr.inc.php.erb"
update_site false
end
mediawiki_extension "CleanChanges" do
site new_resource.site
template "mw-ext-CleanChanges.inc.php.erb"
update_site false
end
mediawiki_extension "LocalisationUpdate" do
site new_resource.site
template "mw-ext-LocalisationUpdate.inc.php.erb"
update_site false
end
# LocalisationUpdate Update Cron
# template "/etc/cron.d/mediawiki-#{name}-LocalisationUpdate" do
# cookbook "mediawiki"
# source "mediawiki-LocalisationUpdate.cron.erb"
# owner "root"
# group "root"
# mode 0755
# variables :name => name, :directory => site_directory, :user => node[:mediawiki][:user]
# end
# mediawiki_extension "Translate" do
# site new_resource.site
# template "mw-ext-Translate.inc.php.erb"
# update_site false
# end
mediawiki_extension "UniversalLanguageSelector" do
site new_resource.site
template "mw-ext-UniversalLanguageSelector.inc.php.erb"
update_site false
end
mediawiki_extension "AntiSpoof" do
site new_resource.site
template "mw-ext-AntiSpoof.inc.php.erb"
compose true
update_site false
end
mediawiki_extension "AbuseFilter" do
site new_resource.site
template "mw-ext-AbuseFilter.inc.php.erb"
compose true
update_site false
end
mediawiki_extension "CheckUser" do
site new_resource.site
template "mw-ext-CheckUser.inc.php.erb"
update_site false
end
mediawiki_extension "DismissableSiteNotice" do
site new_resource.site
update_site false
end
mediawiki_extension "Elastica" do
site new_resource.site
compose true
update_site false
end
mediawiki_extension "CirrusSearch" do
site new_resource.site
compose true
template "mw-ext-CirrusSearch.inc.php.erb"
update_site false
end
mediawiki_extension "osmtaginfo" do
site new_resource.site
template "mw-ext-osmtaginfo.inc.php.erb"
repository "git://github.com/Firefishy/osmtaginfo.git"
tag "live"
update_site false
end
mediawiki_extension "SimpleMap" do
site new_resource.site
template "mw-ext-SimpleMap.inc.php.erb"
repository "git://github.com/Firefishy/SimpleMap.git"
tag "live"
update_site false
end
mediawiki_extension "SlippyMap" do
site new_resource.site
template "mw-ext-SlippyMap.inc.php.erb"
repository "git://github.com/Firefishy/SlippyMap.git"
tag "live"
update_site false
end
mediawiki_extension "Mantle" do
site new_resource.site
update_site false
action :delete
end
mediawiki_extension "DisableAccount" do
site new_resource.site
template "mw-ext-DisableAccount.inc.php.erb"
update_site false
end
mediawiki_extension "VisualEditor" do
site new_resource.site
template "mw-ext-VisualEditor.inc.php.erb"
update_site false
end
mediawiki_extension "TemplateData" do
site new_resource.site
update_site false
end
cookbook_file "#{site_directory}/cc-wiki.png" do
cookbook "mediawiki"
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o644
backup false
end
cookbook_file "#{site_directory}/googled06a989d1ccc8364.html" do
cookbook "mediawiki"
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o644
backup false
end
cookbook_file "#{site_directory}/googlefac54c35e800caab.html" do
cookbook "mediawiki"
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o644
backup false
end
ssl_certificate new_resource.site do
domains [new_resource.site] + Array(new_resource.aliases)
end
apache_site new_resource.site do
cookbook "mediawiki"
template "apache.erb"
directory site_directory
variables :aliases => Array(new_resource.aliases),
:private_site => new_resource.private_site
reload_apache false
end
# FIXME: needs to run one
execute "#{mediawiki_directory}/extensions/CirrusSearch/maintenance/updateSearchIndexConfig.php" do
action :nothing
command "php extensions/CirrusSearch/maintenance/updateSearchIndexConfig.php"
cwd mediawiki_directory
user node[:mediawiki][:user]
group node[:mediawiki][:group]
end
end
action :update do
mediawiki_directory = "#{site_directory}/w"
template "#{mediawiki_directory}/LocalSettings.php" do
cookbook "mediawiki"
source "LocalSettings.php.erb"
owner node[:mediawiki][:user]
group node[:mediawiki][:group]
mode 0o664
variables :name => new_resource.site,
:directory => mediawiki_directory,
:database_params => database_params,
:mediawiki => mediawiki_params
notifies :run, "execute[#{mediawiki_directory}/maintenance/update.php]"
end
execute "#{mediawiki_directory}/maintenance/update.php" do
action :run
command "php maintenance/update.php --quick"
cwd mediawiki_directory
user node[:mediawiki][:user]
group node[:mediawiki][:group]
end
end
action :delete do
apache_site new_resource.site do
action :delete
reload_apache false
end
declare_resource :directory, site_directory do
action :delete
recursive true
end
mysql_database new_resource.database_name do
action :drop
end
mysql_user "#{new_resource.database_user}@localhost" do
action :drop
end
end
action_class do
def site_directory
new_resource.directory || "/srv/#{new_resource.site}"
end
def cron_name
new_resource.site.tr(".", "_")
end
def database_params
{
:host => "localhost",
:name => new_resource.database_name,
:username => new_resource.database_user,
:password => new_resource.database_password
}
end
def mediawiki_params
{
:sitename => new_resource.sitename,
:metanamespace => new_resource.metanamespace,
:logo => new_resource.logo,
:email_contact => new_resource.email_contact,
:email_sender => new_resource.email_sender,
:email_sender_name => new_resource.email_sender_name,
:commons => new_resource.commons,
:skin => new_resource.skin,
:site_notice => new_resource.site_notice,
:site_readonly => new_resource.site_readonly,
:extra_file_extensions => new_resource.extra_file_extensions,
:private_accounts => new_resource.private_accounts,
:private_site => new_resource.private_site
}
end
end
def after_created
notifies :reload, "service[apache2]" if reload_apache
end
|
module Beatr
VERSION = '0.0.7'
end
bump version
module Beatr
VERSION = '0.0.8'
end
|
default[:piwik][:version] = "2.16.0"
default[:piwik][:plugins] = %w(
Actions API BulkTracking Contents CoreAdminHome CoreConsole CoreHome
CorePluginsAdmin CoreUpdater CoreVisualizations CustomVariables
Dashboard DevicesDetection DevicePlugins DoNotTrack Events Feedback Goals
Heartbeat ImageGraph Installation LanguagesManager Live Login Morpheus
MultiSites Overlay PrivacyManager Provider Proxy Referrers Resolution
SegmentEditor SEO SitesManager Transitions UserCountry UserCountryMap
UserLanguage UsersManager Widgetize VisitFrequency VisitorInterest
VisitsSummary VisitTime
)
Update piwik to 2.16.1
default[:piwik][:version] = "2.16.1"
default[:piwik][:plugins] = %w(
Actions API BulkTracking Contents CoreAdminHome CoreConsole CoreHome
CorePluginsAdmin CoreUpdater CoreVisualizations CustomVariables
Dashboard DevicesDetection DevicePlugins DoNotTrack Events Feedback Goals
Heartbeat ImageGraph Installation LanguagesManager Live Login Morpheus
MultiSites Overlay PrivacyManager Provider Proxy Referrers Resolution
SegmentEditor SEO SitesManager Transitions UserCountry UserCountryMap
UserLanguage UsersManager Widgetize VisitFrequency VisitorInterest
VisitsSummary VisitTime
)
|
default[:piwik][:version] = "3.9.0"
default[:piwik][:plugins] = %w[
Actions Annotations API BulkTracking Contents CoreAdminHome CoreConsole
CoreHome CorePluginsAdmin CoreUpdater CoreVisualizations CustomPiwikJs
CustomVariables Dashboard DevicePlugins DevicesDetection Diagnostics Ecommerce
Events Feedback GeoIp2 Goals Heartbeat ImageGraph Insights Installation Intl
LanguagesManager Live Login Marketplace MobileMessaging Monolog Morpheus
MultiSites Overlay PrivacyManager ProfessionalServices Provider Proxy
Referrers Resolution RssWidget ScheduledReports SegmentEditor SEO SitesManager
Transitions UserCountry UserCountryMap UserId UserLanguage UsersManager
VisitFrequency VisitorInterest VisitsSummary VisitTime WebsiteMeasurable
Widgetize
]
Update piwik to 3.10.0
default[:piwik][:version] = "3.10.0"
default[:piwik][:plugins] = %w[
Actions Annotations API BulkTracking Contents CoreAdminHome CoreConsole
CoreHome CorePluginsAdmin CoreUpdater CoreVisualizations CustomPiwikJs
CustomVariables Dashboard DevicePlugins DevicesDetection Diagnostics Ecommerce
Events Feedback GeoIp2 Goals Heartbeat ImageGraph Insights Installation Intl
LanguagesManager Live Login Marketplace MobileMessaging Monolog Morpheus
MultiSites Overlay PrivacyManager ProfessionalServices Provider Proxy
Referrers Resolution RssWidget ScheduledReports SegmentEditor SEO SitesManager
Transitions UserCountry UserCountryMap UserId UserLanguage UsersManager
VisitFrequency VisitorInterest VisitsSummary VisitTime WebsiteMeasurable
Widgetize
]
|
# Copyright 2011 Ryan J. Geyer
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
default[:skeme][:install_packages] = value_for_platform("centos" => { "default" => ["libxml2-devel", "libxslt-devel"] }, "default" => ["libxml2-dev", "libxslt1-dev"])
either or
# Copyright 2011 Ryan J. Geyer
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
case node[:platform]
when "centos"
default[:skeme][:install_packages] = ["libxml2-devel", "libxslt-devel"]
when "ubuntu"
default[:skeme][:install_packages] = ["libxml2-dev", "libxslt1-dev"]
end
|
module Bjork
VERSION = "0.1.0"
end
Version
module Bjork
VERSION = "0.1.1"
end
|
#!/usr/bin/env ruby -rubygems
# -*- encoding: utf-8 -*-
GEMSPEC = Gem::Specification.new do |gem|
gem.version = File.read('VERSION').chomp
gem.date = File.mtime('VERSION').strftime('%Y-%m-%d')
gem.name = 'trinity'
gem.homepage = 'http://trinity.datagraph.org/'
gem.license = 'Public Domain' if gem.respond_to?(:license=)
gem.summary = 'A minimalistic web framework for publishing Linked Data.'
gem.description = 'Trinity is a minimalistic web framework for publishing Linked Data.'
gem.rubyforge_project = nil
gem.authors = ['Arto Bendiken', 'Ben Lavender', 'Josh Huckabee']
gem.email = 'arto.bendiken@gmail.com'
gem.platform = Gem::Platform::RUBY
gem.files = %w(AUTHORS README UNLICENSE VERSION) + Dir.glob('lib/**/*.rb')
gem.bindir = %q(bin)
gem.executables = %w(trinity)
gem.default_executable = gem.executables.first
gem.require_paths = %w(lib)
gem.extensions = %w()
gem.test_files = %w()
gem.has_rdoc = false
gem.required_ruby_version = '>= 1.8.2'
gem.requirements = []
gem.add_development_dependency 'rspec', '>= 1.2.9'
gem.add_development_dependency 'yard' , '>= 0.5.2'
gem.add_runtime_dependency 'rdf', '>= 0.0.8'
gem.add_runtime_dependency 'addressable', '>= 2.1.1'
gem.add_runtime_dependency 'mime-types', '>= 1.16'
gem.add_runtime_dependency 'rack', '>= 1.1.0'
gem.add_runtime_dependency 'thin', '>= 1.2.5'
gem.post_install_message = nil
end
Updated gemspec.
#!/usr/bin/env ruby -rubygems
# -*- encoding: utf-8 -*-
GEMSPEC = Gem::Specification.new do |gem|
gem.version = File.read('VERSION').chomp
gem.date = File.mtime('VERSION').strftime('%Y-%m-%d')
gem.name = 'trinity'
gem.homepage = 'http://trinity.datagraph.org/'
gem.license = 'Public Domain' if gem.respond_to?(:license=)
gem.summary = 'A minimalistic web framework for publishing Linked Data.'
gem.description = 'Trinity is a minimalistic web framework for publishing Linked Data.'
gem.rubyforge_project = nil
gem.authors = ['Arto Bendiken', 'Ben Lavender', 'Josh Huckabee']
gem.email = 'arto.bendiken@gmail.com'
gem.platform = Gem::Platform::RUBY
gem.files = %w(AUTHORS README UNLICENSE VERSION etc/localhost.nt) + Dir.glob('lib/**/*.rb')
gem.bindir = %q(bin)
gem.executables = %w(trinity)
gem.default_executable = gem.executables.first
gem.require_paths = %w(lib)
gem.extensions = %w()
gem.test_files = %w()
gem.has_rdoc = false
gem.required_ruby_version = '>= 1.8.2'
gem.requirements = []
gem.add_development_dependency 'rspec', '>= 1.2.9'
gem.add_development_dependency 'yard' , '>= 0.5.2'
gem.add_runtime_dependency 'rdf', '>= 0.0.8'
gem.add_runtime_dependency 'addressable', '>= 2.1.1'
gem.add_runtime_dependency 'mime-types', '>= 1.16'
gem.add_runtime_dependency 'rack', '>= 1.1.0'
gem.add_runtime_dependency 'thin', '>= 1.2.5'
gem.post_install_message = nil
end
|
module BooJS
VERSION = '0.0.25'
end
gem 0.0.26
module BooJS
VERSION = '0.0.26'
end
|
#!/usr/bin/env ruby -rubygems
# -*- encoding: utf-8 -*-
Gem::Specification.new do |gem|
gem.version = File.read('VERSION').chomp
gem.date = File.mtime('VERSION').strftime('%Y-%m-%d')
gem.name = 'rdf-mongo'
gem.homepage = 'http://ruby-rdf.github.com/rdf-mongo'
gem.license = 'MIT License' if gem.respond_to?(:license=)
gem.summary = 'A storage adapter for integrating MongoDB and rdf.rb, a Ruby library for working with Resource Description Framework (RDF) data.'
gem.description = 'rdf-mongo is a storage adapter for integrating MongoDB and rdf.rb, a Ruby library for working with Resource Description Framework (RDF) data.'
gem.authors = ['Pius Uzamere']
gem.email = 'pius@alum.mit.edu'
gem.platform = Gem::Platform::RUBY
gem.files = %w(LICENSE VERSION README.md) + Dir.glob('lib/**/*.rb')
gem.require_paths = %w(lib)
gem.extensions = %w()
gem.test_files = Dir.glob('spec/*.spec')
gem.has_rdoc = false
gem.required_ruby_version = '>= 1.8.7'
gem.requirements = []
gem.add_runtime_dependency 'rdf', '>= 1.0'
gem.add_runtime_dependency 'mongo', '>= 1.5.1'
gem.add_development_dependency 'rdf-spec', '>= 1.0'
gem.add_development_dependency 'rspec', '>= 2.12.0'
gem.add_development_dependency 'yard' , '>= 0.8.3'
gem.add_runtime_dependency 'addressable', '>= 2.3.2'
gem.post_install_message = "Have fun! :)"
end
Add bson_ext as development dependency.
#!/usr/bin/env ruby -rubygems
# -*- encoding: utf-8 -*-
Gem::Specification.new do |gem|
gem.version = File.read('VERSION').chomp
gem.date = File.mtime('VERSION').strftime('%Y-%m-%d')
gem.name = 'rdf-mongo'
gem.homepage = 'http://ruby-rdf.github.com/rdf-mongo'
gem.license = 'MIT License' if gem.respond_to?(:license=)
gem.summary = 'A storage adapter for integrating MongoDB and rdf.rb, a Ruby library for working with Resource Description Framework (RDF) data.'
gem.description = 'rdf-mongo is a storage adapter for integrating MongoDB and rdf.rb, a Ruby library for working with Resource Description Framework (RDF) data.'
gem.authors = ['Pius Uzamere']
gem.email = 'pius@alum.mit.edu'
gem.platform = Gem::Platform::RUBY
gem.files = %w(LICENSE VERSION README.md) + Dir.glob('lib/**/*.rb')
gem.require_paths = %w(lib)
gem.extensions = %w()
gem.test_files = Dir.glob('spec/*.spec')
gem.has_rdoc = false
gem.required_ruby_version = '>= 1.8.7'
gem.requirements = []
gem.add_runtime_dependency 'rdf', '>= 1.0'
gem.add_runtime_dependency 'mongo', '>= 1.5.1'
gem.add_runtime_dependency 'addressable', '>= 2.3.2'
gem.add_development_dependency 'rdf-spec', '>= 1.0'
gem.add_development_dependency 'rspec', '>= 2.12.0'
gem.add_development_dependency 'yard' , '>= 0.8.3'
gem.add_development_dependency 'bson_ext'
gem.post_install_message = "Have fun! :)"
end
|
module Braid
VERSION = '1.0.20'.freeze
end
Bump the version for release
module Braid
VERSION = '1.0.21'.freeze
end
|
#!/usr/bin/env ruby -rubygems
# -*- encoding: utf-8 -*-
Gem::Specification.new do |gem|
gem.version = File.read('VERSION').chomp
gem.date = File.mtime('VERSION').strftime('%Y-%m-%d')
gem.name = 'rdf'
gem.homepage = 'http://rdf.rubyforge.org/'
gem.license = 'Public Domain' if gem.respond_to?(:license=)
gem.summary = 'A Ruby library for working with Resource Description Framework (RDF) data.'
gem.description = 'RDF.rb is a pure-Ruby library for working with Resource Description Framework (RDF) data.'
gem.rubyforge_project = 'rdf'
gem.authors = ['Arto Bendiken', 'Ben Lavender', 'Gregg Kellogg']
gem.email = 'public-rdf-ruby@w3.org'
gem.platform = Gem::Platform::RUBY
gem.files = %w(AUTHORS CREDITS README UNLICENSE VERSION bin/rdf etc/doap.nt) + Dir.glob('lib/**/*.rb')
gem.bindir = %q(bin)
gem.executables = %w(rdf)
gem.default_executable = gem.executables.first
gem.require_paths = %w(lib)
gem.extensions = %w()
gem.test_files = %w()
gem.has_rdoc = false
gem.required_ruby_version = '>= 1.8.1'
gem.requirements = []
gem.add_runtime_dependency 'addressable', '>= 2.2.6'
gem.add_development_dependency 'yard', '>= 0.7.5'
gem.add_development_dependency 'rdf-spec', '~> 0.3.5'
gem.add_development_dependency 'rspec', '>= 2.8.0'
gem.post_install_message = nil
end
Update homepage.
#!/usr/bin/env ruby -rubygems
# -*- encoding: utf-8 -*-
Gem::Specification.new do |gem|
gem.version = File.read('VERSION').chomp
gem.date = File.mtime('VERSION').strftime('%Y-%m-%d')
gem.name = 'rdf'
gem.homepage = 'http://github.com/ruby-rdf/rdf'
gem.license = 'Public Domain' if gem.respond_to?(:license=)
gem.summary = 'A Ruby library for working with Resource Description Framework (RDF) data.'
gem.description = 'RDF.rb is a pure-Ruby library for working with Resource Description Framework (RDF) data.'
gem.rubyforge_project = 'rdf'
gem.authors = ['Arto Bendiken', 'Ben Lavender', 'Gregg Kellogg']
gem.email = 'public-rdf-ruby@w3.org'
gem.platform = Gem::Platform::RUBY
gem.files = %w(AUTHORS CREDITS README UNLICENSE VERSION bin/rdf etc/doap.nt) + Dir.glob('lib/**/*.rb')
gem.bindir = %q(bin)
gem.executables = %w(rdf)
gem.default_executable = gem.executables.first
gem.require_paths = %w(lib)
gem.extensions = %w()
gem.test_files = %w()
gem.has_rdoc = false
gem.required_ruby_version = '>= 1.8.1'
gem.requirements = []
gem.add_runtime_dependency 'addressable', '>= 2.2.6'
gem.add_development_dependency 'yard', '>= 0.7.5'
gem.add_development_dependency 'rdf-spec', '~> 0.3.5'
gem.add_development_dependency 'rspec', '>= 2.8.0'
gem.post_install_message = nil
end
|
# -*- encoding: utf-8 -*-
require 'rubygems' unless Object.const_defined?(:Gem)
require File.dirname(__FILE__) + "/lib/ripl/version"
Gem::Specification.new do |s|
s.name = "ripl"
s.version = Ripl::VERSION
s.authors = ["Gabriel Horner"]
s.email = "gabriel.horner@gmail.com"
s.homepage = "http://github.com/cldwalker/ripl"
s.summary = "ruby interactive print loop - A light, modular alternative to irb and a shell framework"
s.description = "ripl is a light, modular alternative to irb. Like irb, it loads ~/.irbrc, has autocompletion and keeps history in ~/.irb_history. Unlike irb, it is highly customizable via plugins and supports commands i.e. ripl-play. This customizability makes it easy to build custom shells (i.e. for a gem or application) and complex shells (i.e. for the web). In other words, ripl is also a shell framework. Works on ruby 1.8.7 and greater."
s.required_rubygems_version = ">= 1.3.6"
s.executables = %w(ripl)
s.add_dependency 'bond', '~> 0.4.0'
s.add_development_dependency 'bacon', '>= 1.1.0'
s.add_development_dependency 'rr', '>= 1.0.4'
s.add_development_dependency 'bacon-bits'
s.add_development_dependency 'bacon-rr'
s.add_development_dependency 'rake', '0.9.2.2'
s.files = Dir.glob(%w[{lib,test}/**/*.rb bin/* [A-Z]*.{txt,rdoc} ext/**/*.{rb,c} **/deps.rip]) + %w{Rakefile .gemspec}
s.files += Dir.glob(['man/*', '*.gemspec']) + %w{.travis.yml test/.riplrc}
s.extra_rdoc_files = ["README.rdoc", "LICENSE.txt"]
s.license = 'MIT'
end
tweak gemspec
# -*- encoding: utf-8 -*-
require 'rubygems' unless Object.const_defined?(:Gem)
require File.dirname(__FILE__) + "/lib/ripl/version"
Gem::Specification.new do |s|
s.name = "ripl"
s.version = Ripl::VERSION
s.authors = ["Gabriel Horner"]
s.email = "gabriel.horner@gmail.com"
s.homepage = "http://github.com/cldwalker/ripl"
s.summary = "ruby interactive print loop - A light, modular alternative to irb and a shell framework"
s.description = "ripl is a light, modular alternative to irb. Like irb, it loads ~/.irbrc, has autocompletion and keeps history in ~/.irb_history. Unlike irb, it is highly customizable via plugins and supports commands i.e. ripl-play. This customizability makes it easy to build custom shells (i.e. for a gem or application) and complex shells (i.e. for the web). In other words, ripl is also a shell framework. Works on ruby 1.8.7 and greater."
s.required_rubygems_version = ">= 1.3.6"
s.executables = %w(ripl)
s.add_dependency 'bond', '~> 0.4.0'
s.add_development_dependency 'bacon', '>= 1.1.0'
s.add_development_dependency 'rr', '>= 1.0.4'
s.add_development_dependency 'bacon-bits'
s.add_development_dependency 'bacon-rr'
s.add_development_dependency 'rake', '0.9.2.2'
s.files = Dir.glob(%w[{lib,test}/**/*.rb bin/* [A-Z]*.{txt,rdoc} ext/**/*.{rb,c} **/deps.rip]) + %w{Rakefile .gemspec .travis.yml}
s.files += Dir.glob(['man/*', '*.gemspec']) + %w{test/.riplrc}
s.extra_rdoc_files = ["README.rdoc", "LICENSE.txt"]
s.license = 'MIT'
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.