CombinedText
stringlengths
4
3.42M
require_relative '../spec_helper' include Hawkular::Inventory describe 'Inventory' do let(:resource_hash) do { 'id' => 'root', 'type' => {}, 'children' => [ { 'id' => 'child-01', 'type' => { 'id' => 'type-01' }, 'children' => [] }, { 'id' => 'child-02', 'type' => { 'id' => 'type-02' }, 'children' => [ { 'id' => 'child-03', 'type' => { 'id' => 'type-02' }, 'children' => [] }, { 'id' => 'child-04', 'type' => { 'id' => 'type-02' }, 'children' => [] }, { 'id' => 'child-05', 'type' => { 'id' => 'type-03' }, 'children' => [] } ] }, { 'id' => 'child-06', 'type' => { 'id' => 'type-01' }, 'children' => [] } ] } end let(:resource) do Resource.new(resource_hash) end describe '#children' do it 'returns direct children' do expect(resource.children.size).to eq(3) end it 'returns all children' do expect(resource.children(true).size).to eq(6) end end describe '#children_by_type' do it 'returns direct children' do expect(resource.children_by_type('type-02').size).to eq(1) end it 'returns 0 when direct children type is not found' do expect(resource.children_by_type('type-03').size).to eq(0) end it 'works recursive' do expect(resource.children_by_type('type-02', true).size).to eq(3) end it 'works recursive and does not matter if the type is not on top' do expect(resource.children_by_type('type-03', true).size).to eq(1) end end describe 'ResultFetcher' do batches = [[1, 2], [3, 4], [5]] page_size = 2 it 'fetches two first pages' do calls_count = 0 fetch_func = lambda do |offset| calls_count += 1 { 'startOffset' => offset, 'resultSize' => 5, 'results' => batches[offset / page_size] } end result_fetcher = Hawkular::Inventory::ResultFetcher.new(fetch_func) # Take first three items, expecting 2 calls values = result_fetcher.take(3) expect(values).to eq([1, 2, 3]) expect(calls_count).to eq(2) end it 'fetches all pages while asking more' do calls_count = 0 fetch_func = lambda do |offset| calls_count += 1 { 'startOffset' => offset, 'resultSize' => 5, 'results' => batches[offset / page_size] } end result_fetcher = Hawkular::Inventory::ResultFetcher.new(fetch_func) # Take more, expecting 3 calls values = result_fetcher.take(10) expect(values).to eq([1, 2, 3, 4, 5]) expect(calls_count).to eq(3) end it 'fetches all pages' do calls_count = 0 fetch_func = lambda do |offset| calls_count += 1 { 'startOffset' => offset, 'resultSize' => 5, 'results' => batches[offset / page_size] } end result_fetcher = Hawkular::Inventory::ResultFetcher.new(fetch_func) expect(result_fetcher.collect { |i| i }).to eq([1, 2, 3, 4, 5]) expect(calls_count).to eq(3) end end end declare result_fetcher in let block require_relative '../spec_helper' include Hawkular::Inventory describe 'Inventory' do let(:resource_hash) do { 'id' => 'root', 'type' => {}, 'children' => [ { 'id' => 'child-01', 'type' => { 'id' => 'type-01' }, 'children' => [] }, { 'id' => 'child-02', 'type' => { 'id' => 'type-02' }, 'children' => [ { 'id' => 'child-03', 'type' => { 'id' => 'type-02' }, 'children' => [] }, { 'id' => 'child-04', 'type' => { 'id' => 'type-02' }, 'children' => [] }, { 'id' => 'child-05', 'type' => { 'id' => 'type-03' }, 'children' => [] } ] }, { 'id' => 'child-06', 'type' => { 'id' => 'type-01' }, 'children' => [] } ] } end let(:resource) do Resource.new(resource_hash) end describe '#children' do it 'returns direct children' do expect(resource.children.size).to eq(3) end it 'returns all children' do expect(resource.children(true).size).to eq(6) end end describe '#children_by_type' do it 'returns direct children' do expect(resource.children_by_type('type-02').size).to eq(1) end it 'returns 0 when direct children type is not found' do expect(resource.children_by_type('type-03').size).to eq(0) end it 'works recursive' do expect(resource.children_by_type('type-02', true).size).to eq(3) end it 'works recursive and does not matter if the type is not on top' do expect(resource.children_by_type('type-03', true).size).to eq(1) end end describe 'ResultFetcher' do batches = [[1, 2], [3, 4], [5]] page_size = 2 calls_count = 0 let(:result_fetcher) do calls_count = 0 fetch_func = lambda do |offset| calls_count += 1 { 'startOffset' => offset, 'resultSize' => 5, 'results' => batches[offset / page_size] } end Hawkular::Inventory::ResultFetcher.new(fetch_func) end it 'fetches two first pages' do # Take first three items, expecting 2 calls values = result_fetcher.take(3) expect(values).to eq([1, 2, 3]) expect(calls_count).to eq(2) end it 'fetches all pages while asking more' do # Take more, expecting 3 calls values = result_fetcher.take(10) expect(values).to eq([1, 2, 3, 4, 5]) expect(calls_count).to eq(3) end it 'fetches all pages' do expect(result_fetcher.collect { |i| i }).to eq([1, 2, 3, 4, 5]) expect(calls_count).to eq(3) end end end
require 'sfn' require 'sparkle_formation' require 'pathname' module Sfn module CommandModule # Template handling helper methods module Template # cloudformation directories that should be ignored TEMPLATE_IGNORE_DIRECTORIES = %w(components dynamics registry) # maximum number of attempts to get valid parameter value MAX_PARAMETER_ATTEMPTS = 5 module InstanceMethods # Extract template content based on type # # @param thing [SparkleFormation, Hash] # @param scrub [Truthy, Falsey] scrub nested templates # @return [Hash] def template_content(thing, scrub=false) if(thing.is_a?(SparkleFormation)) if(scrub) dump_stack_for_storage(thing) else config[:sparkle_dump] ? thing.sparkle_dump : thing.dump end else thing end end # Request compile time parameter value # # @param p_name [String, Symbol] name of parameter # @param p_config [Hash] parameter meta information # @param cur_val [Object, NilClass] current value assigned to parameter # @param nested [TrueClass, FalseClass] template is nested # @option p_config [String, Symbol] :type # @option p_config [String, Symbol] :default # @option p_config [String, Symbol] :description # @option p_config [String, Symbol] :multiple # @return [Object] def request_compile_parameter(p_name, p_config, cur_val, nested=false) result = nil attempts = 0 unless(cur_val || p_config[:default].nil?) cur_val = p_config[:default] end if(cur_val.is_a?(Array)) cur_val = cur_val.map(&:to_s).join(',') end until(result && (!result.respond_to?(:empty?) || !result.empty?)) attempts += 1 if(config[:interactive_parameters] && (!nested || !p_config.key?(:prompt_when_nested) || p_config[:prompt_when_nested] == true)) result = ui.ask_question( p_name.to_s.split('_').map(&:capitalize).join, :default => cur_val.to_s.empty? ? nil : cur_val.to_s ) else result = cur_val.to_s end case p_config.fetch(:type, 'string').to_s.downcase.to_sym when :string if(p_config[:multiple]) result = result.split(',').map(&:strip) end when :number if(p_config[:multiple]) result = result.split(',').map(&:strip) new_result = result.map do |item| new_item = item.to_i new_item if new_item.to_s == item end result = new_result.size == result.size ? new_result : [] else new_result = result.to_i result = new_result.to_s == result ? new_result : nil end else raise ArgumentError.new "Unknown compile time parameter type provided: `#{p_config[:type].inspect}` (Parameter: #{p_name})" end valid = validate_parameter(result, p_config.to_smash) unless(valid == true) result = nil valid.each do |invalid_msg| ui.error invalid_msg.last end end if(result.nil? || (result.respond_to?(:empty?) && result.empty?)) if(attempts > MAX_PARAMETER_ATTEMPTS) ui.fatal "Failed to receive allowed parameter! (Parameter: #{p_name})" exit 1 end end end result end # @return [Array<SparkleFormation::SparklePack>] def sparkle_packs memoize(:sparkle_packs) do [config.fetch(:sparkle_pack, [])].flatten.compact.map do |sparkle_name| begin require sparkle_name rescue LoadError ui.fatal "Failed to locate sparkle pack `#{sparkle_name}` for loading!" raise end begin SparkleFormation::Sparkle.new(:name => sparkle_name) rescue ArgumentError ui.fatal "Failed to properly setup sparkle pack `#{sparkle_name}`. Check implementation." raise end end end end # @return [SparkleFormation::SparkleCollection] def sparkle_collection memoize(:sparkle_collection) do collection = SparkleFormation::SparkleCollection.new( :provider => config.get(:credentials, :provider) ) begin if(config[:base_directory]) root_pack = SparkleFormation::SparklePack.new( :root => config[:base_directory], :provider => config.get(:credentials, :provider) ) else root_pack = SparkleFormation::SparklePack.new( :provider => config.get(:credentials, :provider) ) end collection.set_root(root_pack) rescue Errno::ENOENT ui.warn 'No local SparkleFormation files detected' end sparkle_packs.each do |pack| collection.add_sparkle(pack) end collection end end # Load the template file # # @param args [Symbol] options (:allow_missing) # @return [Hash] loaded template def load_template_file(*args) c_stack = (args.detect{|i| i.is_a?(Hash)} || {})[:stack] unless(config[:template]) set_paths_and_discover_file! unless(config[:file]) unless(args.include?(:allow_missing)) ui.fatal "Invalid formation file path provided: #{config[:file]}" raise IOError.new "Failed to locate file: #{config[:file]}" end end end if(config[:template]) config[:template] elsif(config[:file]) if(config[:processing]) compile_state = merge_compile_time_parameters sf = SparkleFormation.compile(config[:file], :sparkle) if(name_args.first) sf.name = name_args.first end sf.compile_time_parameter_setter do |formation| f_name = formation.root_path.map(&:name).map(&:to_s) pathed_name = f_name.join(' > ') f_name = f_name.join('__') if(formation.root? && compile_state[f_name].nil?) current_state = compile_state else current_state = compile_state.fetch(f_name, Smash.new) end # NOTE: Prevent nesting stack compile state within stack compile state current_state.delete("#{f_name}__#{f_name}") if(formation.compile_state) current_state = current_state.merge(formation.compile_state) end unless(formation.parameters.empty?) ui.info "#{ui.color('Compile time parameters:', :bold)} - template: #{ui.color(pathed_name, :green, :bold)}" unless config[:print_only] formation.parameters.each do |k,v| valid_keys = [ "#{f_name}__#{k}", Bogo::Utility.camel("#{f_name}__#{k}").downcase, k, Bogo::Utility.camel(k).downcase ] current_value = valid_keys.map do |key| current_state[key] end.compact.first primary_key, secondary_key = ["#{f_name}__#{k}", k] current_state[k] = request_compile_parameter(k, v, current_value, !!formation.parent ) end formation.compile_state = current_state end end sf.sparkle.apply sparkle_collection custom_stack_types.each do |s_type| unless(sf.stack_resource_types.include?(s_type)) sf.stack_resource_types.push(s_type) end end run_callbacks_for(:template, :stack_name => arguments.first, :sparkle_stack => sf) if(sf.nested? && config[:apply_nesting]) validate_nesting_bucket! if(config[:apply_nesting] == true) config[:apply_nesting] = :deep end case config[:apply_nesting].to_sym when :deep process_nested_stack_deep(sf, c_stack) when :shallow process_nested_stack_shallow(sf, c_stack) when :none sf else raise ArgumentError.new "Unknown nesting style requested: #{config[:apply_nesting].inspect}!" end sf else sf end else template = _from_json(File.read(config[:file])) run_callbacks_for(:template, :stack_name => arguments.first, :hash_stack => template) template end else raise ArgumentError.new 'Failed to locate template for processing!' end end # Merge parameters provided directly via configuration into # core parameter set def merge_compile_time_parameters compile_state = config.fetch(:compile_parameters, Smash.new) ui.debug "Initial compile parameters - #{compile_state}" compile_state.keys.each do |cs_key| unless(cs_key.to_s.start_with?("#{arguments.first}__")) named_cs_key = "#{arguments.first}__#{cs_key}" non_named = compile_state.delete(cs_key) if(non_named && !compile_state.key?(named_cs_key)) ui.debug "Setting non-named compile parameter `#{cs_key}` into `#{named_cs_key}`" compile_state[named_cs_key] = non_named else ui.debug "Discarding non-named compile parameter due to set named - `#{cs_key}` </> `#{named_cs_key}`" end end end ui.debug "Merged compile parameters - #{compile_state}" compile_state end # Force user friendly error if nesting bucket is not set within configuration def validate_nesting_bucket! if(config[:nesting_bucket].to_s.empty?) ui.error 'Missing required configuration value for `nesting_bucket`. Cannot generated nested templates!' raise ArgumentError.new 'Required configuration value for `nesting_bucket` not provided.' end end # Processes template using the original shallow workflow # # @param sf [SparkleFormation] stack formation # @param c_stack [Miasma::Models::Orchestration::Stack] existing stack # @return [Hash] dumped stack def process_nested_stack_shallow(sf, c_stack=nil) sf.apply_nesting(:shallow) do |stack_name, stack, resource| run_callbacks_for(:template, :stack_name => stack_name, :sparkle_stack => stack) bucket = provider.connection.api_for(:storage).buckets.get( config[:nesting_bucket] ) if(config[:print_only]) template_url = "http://example.com/bucket/#{name_args.first}_#{stack_name}.json" else stack_definition = dump_stack_for_storage(stack) unless(bucket) raise "Failed to locate configured bucket for stack template storage (#{bucket})!" end file = bucket.files.build file.name = "#{name_args.first}_#{stack_name}.json" file.content_type = 'text/json' file.body = MultiJson.dump(parameter_scrub!(stack_definition)) file.save url = URI.parse(file.url) template_url = "#{url.scheme}://#{url.host}#{url.path}" end resource.properties.set!('TemplateURL', template_url) end end # Processes template using new deep workflow # # @param sf [SparkleFormation] stack # @param c_stack [Miasma::Models::Orchestration::Stack] existing stack # @return [SparkleFormation::SparkleStruct] compiled structure def process_nested_stack_deep(sf, c_stack=nil) sf.apply_nesting(:deep) do |stack_name, stack, resource| run_callbacks_for(:template, :stack_name => stack_name, :sparkle_stack => stack) stack_resource = resource._dump current_stack = c_stack ? c_stack.nested_stacks.detect{|s| s.data[:logical_id] == stack_name} : nil current_parameters = extract_current_nested_template_parameters(stack, stack_name, current_stack) if(current_stack && current_stack.data[:parent_stack]) current_parameters.merge!( current_stack.data[:parent_stack].template.fetch( 'Resources', stack_name, 'Properties', 'Parameters', current_stack.data[:parent_stack].template.fetch( 'resources', stack_name, 'properties', 'parameters', Smash.new ) ) ) end full_stack_name = [ config[:nesting_prefix], stack.root_path.map(&:name).map(&:to_s).join('_') ].compact.join('/') unless(config[:print_only]) result = Smash.new( :parameters => populate_parameters!(stack, :stack => current_stack, :current_parameters => current_parameters ) ) store_template(full_stack_name, stack, result) else result = Smash.new( :url => "http://example.com/bucket/#{full_stack_name}.json" ) end format_nested_stack_results(resource._self.provider, result).each do |k,v| resource.properties.set!(k, v) end end end # Extract currently defined parameters for nested template # # @param template [SparkleFormation] # @param stack_name [String] # @param c_stack [Miasma::Models::Orchestration::Stack] # @return [Hash] def extract_current_nested_template_parameters(template, stack_name, c_stack=nil) if(template.parent) current_parameters = template.parent.compile.resources.set!(stack_name).properties.parameters current_parameters.nil? ? Smash.new : current_parameters._dump else Smash.new end end # Store template in remote bucket and update given result hash # # @param full_stack_name [String] unique resource name for template # @param template [SparkleFormation, Hash] template instance # @param result [Hash] # @return [Hash] def store_template(full_stack_name, template, result) stack_definition = template.is_a?(SparkleFormation) ? dump_stack_for_storage(template) : template bucket = provider.connection.api_for(:storage).buckets.get( config[:nesting_bucket] ) unless(bucket) raise "Failed to locate configured bucket for stack template storage (#{config[:nesting_bucket]})!" end file = bucket.files.build file.name = "#{full_stack_name}.json" file.content_type = 'text/json' file.body = MultiJson.dump(parameter_scrub!(stack_definition)) file.save result.merge!( :url => file.url ) end # Remove internally used `Stack` property from Stack resources and # and generate compiled Hash # # @param template [SparkleFormation] # @return [Hash] def dump_stack_for_storage(template) nested_stacks = template.nested_stacks(:with_resource, :with_name).map do |nested_stack, nested_resource, nested_name| [nested_name, nested_resource, nested_resource.properties.delete!(:stack)] end stack_definition = template.dump if(config[:plan]) nested_stacks.each do |nested_name, nested_resource, nested_data| nested_resource.properties.set!(:stack, nested_data) end end stack_definition end # Scrub sparkle/sfn customizations from the stack resource data # # @param template [Hash] # @return [Hash] def scrub_template(template) template = parameter_scrub!(template) (template['Resources'] || {}).each do |r_name, r_content| if(valid_stack_types.include?(r_content['Type'])) result = (r_content['Properties'] || {}).delete('Stack') end end template end # Update the nested stack information for specific provider # # @param provider [Symbol] # @param results [Hash] # @return [Hash] def format_nested_stack_results(provider, results) case provider when :aws if(results[:parameters]) results['Parameters'] = results.delete(:parameters) end if(results[:url]) url = URI.parse(results.delete(:url)) results['TemplateURL'] = "#{url.scheme}://#{url.host}#{url.path}" end results when :heat, :rackspace results[:template] = results.delete(:url) results when :azure if(results[:parameters]) results[:parameters] = Smash[ results[:parameters].map do |key, value| [key, value.is_a?(Hash) ? value : Smash.new(:value => value)] end ] end if(results[:url]) results[:templateLink] = Smash.new( :uri => results.delete(:url), :contentVersion => '1.0.0.0' ) end results[:mode] = 'Incremental' results else raise "Unknown stack provider value given! `#{provider}`" end end # Apply template translation # # @param template [Hash] # @return [Hash] def translate_template(template) if(klass_name = config[:translate]) klass = SparkleFormation::Translation.const_get(camel(klass_name)) args = { :parameters => config.fetch(:options, :parameters, Smash.new) } if(chunk_size = config[:translate_chunk_size]) args.merge!( :options => { :serialization_chunk_size => chunk_size } ) end translator = klass.new(template, args) translator.translate! template = translator.translated ui.info "#{ui.color('Translation applied:', :bold)} #{ui.color(klass_name, :yellow)}" end template end # Set SparkleFormation paths and locate tempate # # @return [TrueClass] def set_paths_and_discover_file! if(config[:processing]) if(!config[:file] && config[:file_path_prompt]) config[:file] = prompt_for_template else file_lookup_path = File.expand_path(config[:file]) unless(File.exists?(file_lookup_path)) file_lookup_path = config[:file] end config[:file] = sparkle_collection.get( :template, file_lookup_path )[:path] end else if(config[:file]) unless(File.exists?(config[:file])) raise Errno::ENOENT.new("No such file - #{config[:file]}") end else raise "Template processing is disabled. Path to serialized template via `--file` required!" end end true end # Prompt user for template selection # # @param prefix [String] prefix filter for names # @return [String] path to template def prompt_for_template(prefix=nil) if(prefix) collection_name = prefix.split('__').map do |c_name| c_name.split('_').map(&:capitalize).join(' ') end.join(' / ') ui.info "Viewing collection: #{ui.color(collection_name, :bold)}" template_names = sparkle_collection.templates.fetch(provider.connection.provider, {}).keys.find_all do |t_name| t_name.to_s.start_with?(prefix.to_s) end else template_names = sparkle_collection.templates.fetch(provider.connection.provider, {}).keys end collections = template_names.map do |t_name| t_name = t_name.to_s.sub(/^#{Regexp.escape(prefix.to_s)}/, '') if(t_name.include?('__')) c_name = t_name.split('__').first [[prefix, c_name].compact.join('') + '__', c_name] end end.compact.uniq(&:first) templates = template_names.map do |t_name| t_name = t_name.to_s.sub(/^#{Regexp.escape(prefix.to_s)}/, '') unless(t_name.include?('__')) [[prefix, t_name].compact.join(''), t_name] end end.compact if(collections.empty? && templates.empty?) ui.error 'Failed to locate any templates!' return nil end ui.info "Please select an entry#{ '(or collection to list)' unless collections.empty?}:" output = [] idx = 1 valid = {} unless(collections.empty?) output << ui.color('Collections:', :bold) collections.each do |full_name, part_name| valid[idx] = {:name => full_name, :type => :collection} output << [idx, part_name.split('_').map(&:capitalize).join(' ')] idx += 1 end end unless(templates.empty?) output << ui.color('Templates:', :bold) templates.each do |full_name, part_name| valid[idx] = {:name => full_name, :type => :template} output << [idx, part_name.split('_').map(&:capitalize).join(' ')] idx += 1 end end max = idx.to_s.length output.map! do |line| if(line.is_a?(Array)) " #{line.first}.#{' ' * (max - line.first.to_s.length)} #{line.last}" else line end end ui.puts "#{output.join("\n")}\n" response = nil until(valid[response]) response = ui.ask_question('Enter selection').to_i end entry = valid[response] if(entry[:type] == :collection) prompt_for_template(entry[:name]) else sparkle_collection.get(:template, entry[:name])[:path] end end end module ClassMethods end # Load methods into class and define options # # @param klass [Class] def self.included(klass) klass.class_eval do extend Sfn::CommandModule::Template::ClassMethods include Sfn::CommandModule::Template::InstanceMethods include Sfn::Utils::PathSelector include Sfn::Utils::StackParameterScrubber end end end end end Include validator module with template module require 'sfn' require 'sparkle_formation' require 'pathname' module Sfn module CommandModule # Template handling helper methods module Template # cloudformation directories that should be ignored TEMPLATE_IGNORE_DIRECTORIES = %w(components dynamics registry) # maximum number of attempts to get valid parameter value MAX_PARAMETER_ATTEMPTS = 5 module InstanceMethods # Extract template content based on type # # @param thing [SparkleFormation, Hash] # @param scrub [Truthy, Falsey] scrub nested templates # @return [Hash] def template_content(thing, scrub=false) if(thing.is_a?(SparkleFormation)) if(scrub) dump_stack_for_storage(thing) else config[:sparkle_dump] ? thing.sparkle_dump : thing.dump end else thing end end # Request compile time parameter value # # @param p_name [String, Symbol] name of parameter # @param p_config [Hash] parameter meta information # @param cur_val [Object, NilClass] current value assigned to parameter # @param nested [TrueClass, FalseClass] template is nested # @option p_config [String, Symbol] :type # @option p_config [String, Symbol] :default # @option p_config [String, Symbol] :description # @option p_config [String, Symbol] :multiple # @return [Object] def request_compile_parameter(p_name, p_config, cur_val, nested=false) result = nil attempts = 0 unless(cur_val || p_config[:default].nil?) cur_val = p_config[:default] end if(cur_val.is_a?(Array)) cur_val = cur_val.map(&:to_s).join(',') end until(result && (!result.respond_to?(:empty?) || !result.empty?)) attempts += 1 if(config[:interactive_parameters] && (!nested || !p_config.key?(:prompt_when_nested) || p_config[:prompt_when_nested] == true)) result = ui.ask_question( p_name.to_s.split('_').map(&:capitalize).join, :default => cur_val.to_s.empty? ? nil : cur_val.to_s ) else result = cur_val.to_s end case p_config.fetch(:type, 'string').to_s.downcase.to_sym when :string if(p_config[:multiple]) result = result.split(',').map(&:strip) end when :number if(p_config[:multiple]) result = result.split(',').map(&:strip) new_result = result.map do |item| new_item = item.to_i new_item if new_item.to_s == item end result = new_result.size == result.size ? new_result : [] else new_result = result.to_i result = new_result.to_s == result ? new_result : nil end else raise ArgumentError.new "Unknown compile time parameter type provided: `#{p_config[:type].inspect}` (Parameter: #{p_name})" end valid = validate_parameter(result, p_config.to_smash) unless(valid == true) result = nil valid.each do |invalid_msg| ui.error invalid_msg.last end end if(result.nil? || (result.respond_to?(:empty?) && result.empty?)) if(attempts > MAX_PARAMETER_ATTEMPTS) ui.fatal "Failed to receive allowed parameter! (Parameter: #{p_name})" exit 1 end end end result end # @return [Array<SparkleFormation::SparklePack>] def sparkle_packs memoize(:sparkle_packs) do [config.fetch(:sparkle_pack, [])].flatten.compact.map do |sparkle_name| begin require sparkle_name rescue LoadError ui.fatal "Failed to locate sparkle pack `#{sparkle_name}` for loading!" raise end begin SparkleFormation::Sparkle.new(:name => sparkle_name) rescue ArgumentError ui.fatal "Failed to properly setup sparkle pack `#{sparkle_name}`. Check implementation." raise end end end end # @return [SparkleFormation::SparkleCollection] def sparkle_collection memoize(:sparkle_collection) do collection = SparkleFormation::SparkleCollection.new( :provider => config.get(:credentials, :provider) ) begin if(config[:base_directory]) root_pack = SparkleFormation::SparklePack.new( :root => config[:base_directory], :provider => config.get(:credentials, :provider) ) else root_pack = SparkleFormation::SparklePack.new( :provider => config.get(:credentials, :provider) ) end collection.set_root(root_pack) rescue Errno::ENOENT ui.warn 'No local SparkleFormation files detected' end sparkle_packs.each do |pack| collection.add_sparkle(pack) end collection end end # Load the template file # # @param args [Symbol] options (:allow_missing) # @return [Hash] loaded template def load_template_file(*args) c_stack = (args.detect{|i| i.is_a?(Hash)} || {})[:stack] unless(config[:template]) set_paths_and_discover_file! unless(config[:file]) unless(args.include?(:allow_missing)) ui.fatal "Invalid formation file path provided: #{config[:file]}" raise IOError.new "Failed to locate file: #{config[:file]}" end end end if(config[:template]) config[:template] elsif(config[:file]) if(config[:processing]) compile_state = merge_compile_time_parameters sf = SparkleFormation.compile(config[:file], :sparkle) if(name_args.first) sf.name = name_args.first end sf.compile_time_parameter_setter do |formation| f_name = formation.root_path.map(&:name).map(&:to_s) pathed_name = f_name.join(' > ') f_name = f_name.join('__') if(formation.root? && compile_state[f_name].nil?) current_state = compile_state else current_state = compile_state.fetch(f_name, Smash.new) end # NOTE: Prevent nesting stack compile state within stack compile state current_state.delete("#{f_name}__#{f_name}") if(formation.compile_state) current_state = current_state.merge(formation.compile_state) end unless(formation.parameters.empty?) ui.info "#{ui.color('Compile time parameters:', :bold)} - template: #{ui.color(pathed_name, :green, :bold)}" unless config[:print_only] formation.parameters.each do |k,v| valid_keys = [ "#{f_name}__#{k}", Bogo::Utility.camel("#{f_name}__#{k}").downcase, k, Bogo::Utility.camel(k).downcase ] current_value = valid_keys.map do |key| current_state[key] end.compact.first primary_key, secondary_key = ["#{f_name}__#{k}", k] current_state[k] = request_compile_parameter(k, v, current_value, !!formation.parent ) end formation.compile_state = current_state end end sf.sparkle.apply sparkle_collection custom_stack_types.each do |s_type| unless(sf.stack_resource_types.include?(s_type)) sf.stack_resource_types.push(s_type) end end run_callbacks_for(:template, :stack_name => arguments.first, :sparkle_stack => sf) if(sf.nested? && config[:apply_nesting]) validate_nesting_bucket! if(config[:apply_nesting] == true) config[:apply_nesting] = :deep end case config[:apply_nesting].to_sym when :deep process_nested_stack_deep(sf, c_stack) when :shallow process_nested_stack_shallow(sf, c_stack) when :none sf else raise ArgumentError.new "Unknown nesting style requested: #{config[:apply_nesting].inspect}!" end sf else sf end else template = _from_json(File.read(config[:file])) run_callbacks_for(:template, :stack_name => arguments.first, :hash_stack => template) template end else raise ArgumentError.new 'Failed to locate template for processing!' end end # Merge parameters provided directly via configuration into # core parameter set def merge_compile_time_parameters compile_state = config.fetch(:compile_parameters, Smash.new) ui.debug "Initial compile parameters - #{compile_state}" compile_state.keys.each do |cs_key| unless(cs_key.to_s.start_with?("#{arguments.first}__")) named_cs_key = "#{arguments.first}__#{cs_key}" non_named = compile_state.delete(cs_key) if(non_named && !compile_state.key?(named_cs_key)) ui.debug "Setting non-named compile parameter `#{cs_key}` into `#{named_cs_key}`" compile_state[named_cs_key] = non_named else ui.debug "Discarding non-named compile parameter due to set named - `#{cs_key}` </> `#{named_cs_key}`" end end end ui.debug "Merged compile parameters - #{compile_state}" compile_state end # Force user friendly error if nesting bucket is not set within configuration def validate_nesting_bucket! if(config[:nesting_bucket].to_s.empty?) ui.error 'Missing required configuration value for `nesting_bucket`. Cannot generated nested templates!' raise ArgumentError.new 'Required configuration value for `nesting_bucket` not provided.' end end # Processes template using the original shallow workflow # # @param sf [SparkleFormation] stack formation # @param c_stack [Miasma::Models::Orchestration::Stack] existing stack # @return [Hash] dumped stack def process_nested_stack_shallow(sf, c_stack=nil) sf.apply_nesting(:shallow) do |stack_name, stack, resource| run_callbacks_for(:template, :stack_name => stack_name, :sparkle_stack => stack) bucket = provider.connection.api_for(:storage).buckets.get( config[:nesting_bucket] ) if(config[:print_only]) template_url = "http://example.com/bucket/#{name_args.first}_#{stack_name}.json" else stack_definition = dump_stack_for_storage(stack) unless(bucket) raise "Failed to locate configured bucket for stack template storage (#{bucket})!" end file = bucket.files.build file.name = "#{name_args.first}_#{stack_name}.json" file.content_type = 'text/json' file.body = MultiJson.dump(parameter_scrub!(stack_definition)) file.save url = URI.parse(file.url) template_url = "#{url.scheme}://#{url.host}#{url.path}" end resource.properties.set!('TemplateURL', template_url) end end # Processes template using new deep workflow # # @param sf [SparkleFormation] stack # @param c_stack [Miasma::Models::Orchestration::Stack] existing stack # @return [SparkleFormation::SparkleStruct] compiled structure def process_nested_stack_deep(sf, c_stack=nil) sf.apply_nesting(:deep) do |stack_name, stack, resource| run_callbacks_for(:template, :stack_name => stack_name, :sparkle_stack => stack) stack_resource = resource._dump current_stack = c_stack ? c_stack.nested_stacks.detect{|s| s.data[:logical_id] == stack_name} : nil current_parameters = extract_current_nested_template_parameters(stack, stack_name, current_stack) if(current_stack && current_stack.data[:parent_stack]) current_parameters.merge!( current_stack.data[:parent_stack].template.fetch( 'Resources', stack_name, 'Properties', 'Parameters', current_stack.data[:parent_stack].template.fetch( 'resources', stack_name, 'properties', 'parameters', Smash.new ) ) ) end full_stack_name = [ config[:nesting_prefix], stack.root_path.map(&:name).map(&:to_s).join('_') ].compact.join('/') unless(config[:print_only]) result = Smash.new( :parameters => populate_parameters!(stack, :stack => current_stack, :current_parameters => current_parameters ) ) store_template(full_stack_name, stack, result) else result = Smash.new( :url => "http://example.com/bucket/#{full_stack_name}.json" ) end format_nested_stack_results(resource._self.provider, result).each do |k,v| resource.properties.set!(k, v) end end end # Extract currently defined parameters for nested template # # @param template [SparkleFormation] # @param stack_name [String] # @param c_stack [Miasma::Models::Orchestration::Stack] # @return [Hash] def extract_current_nested_template_parameters(template, stack_name, c_stack=nil) if(template.parent) current_parameters = template.parent.compile.resources.set!(stack_name).properties.parameters current_parameters.nil? ? Smash.new : current_parameters._dump else Smash.new end end # Store template in remote bucket and update given result hash # # @param full_stack_name [String] unique resource name for template # @param template [SparkleFormation, Hash] template instance # @param result [Hash] # @return [Hash] def store_template(full_stack_name, template, result) stack_definition = template.is_a?(SparkleFormation) ? dump_stack_for_storage(template) : template bucket = provider.connection.api_for(:storage).buckets.get( config[:nesting_bucket] ) unless(bucket) raise "Failed to locate configured bucket for stack template storage (#{config[:nesting_bucket]})!" end file = bucket.files.build file.name = "#{full_stack_name}.json" file.content_type = 'text/json' file.body = MultiJson.dump(parameter_scrub!(stack_definition)) file.save result.merge!( :url => file.url ) end # Remove internally used `Stack` property from Stack resources and # and generate compiled Hash # # @param template [SparkleFormation] # @return [Hash] def dump_stack_for_storage(template) nested_stacks = template.nested_stacks(:with_resource, :with_name).map do |nested_stack, nested_resource, nested_name| [nested_name, nested_resource, nested_resource.properties.delete!(:stack)] end stack_definition = template.dump if(config[:plan]) nested_stacks.each do |nested_name, nested_resource, nested_data| nested_resource.properties.set!(:stack, nested_data) end end stack_definition end # Scrub sparkle/sfn customizations from the stack resource data # # @param template [Hash] # @return [Hash] def scrub_template(template) template = parameter_scrub!(template) (template['Resources'] || {}).each do |r_name, r_content| if(valid_stack_types.include?(r_content['Type'])) result = (r_content['Properties'] || {}).delete('Stack') end end template end # Update the nested stack information for specific provider # # @param provider [Symbol] # @param results [Hash] # @return [Hash] def format_nested_stack_results(provider, results) case provider when :aws if(results[:parameters]) results['Parameters'] = results.delete(:parameters) end if(results[:url]) url = URI.parse(results.delete(:url)) results['TemplateURL'] = "#{url.scheme}://#{url.host}#{url.path}" end results when :heat, :rackspace results[:template] = results.delete(:url) results when :azure if(results[:parameters]) results[:parameters] = Smash[ results[:parameters].map do |key, value| [key, value.is_a?(Hash) ? value : Smash.new(:value => value)] end ] end if(results[:url]) results[:templateLink] = Smash.new( :uri => results.delete(:url), :contentVersion => '1.0.0.0' ) end results[:mode] = 'Incremental' results else raise "Unknown stack provider value given! `#{provider}`" end end # Apply template translation # # @param template [Hash] # @return [Hash] def translate_template(template) if(klass_name = config[:translate]) klass = SparkleFormation::Translation.const_get(camel(klass_name)) args = { :parameters => config.fetch(:options, :parameters, Smash.new) } if(chunk_size = config[:translate_chunk_size]) args.merge!( :options => { :serialization_chunk_size => chunk_size } ) end translator = klass.new(template, args) translator.translate! template = translator.translated ui.info "#{ui.color('Translation applied:', :bold)} #{ui.color(klass_name, :yellow)}" end template end # Set SparkleFormation paths and locate tempate # # @return [TrueClass] def set_paths_and_discover_file! if(config[:processing]) if(!config[:file] && config[:file_path_prompt]) config[:file] = prompt_for_template else file_lookup_path = File.expand_path(config[:file]) unless(File.exists?(file_lookup_path)) file_lookup_path = config[:file] end config[:file] = sparkle_collection.get( :template, file_lookup_path )[:path] end else if(config[:file]) unless(File.exists?(config[:file])) raise Errno::ENOENT.new("No such file - #{config[:file]}") end else raise "Template processing is disabled. Path to serialized template via `--file` required!" end end true end # Prompt user for template selection # # @param prefix [String] prefix filter for names # @return [String] path to template def prompt_for_template(prefix=nil) if(prefix) collection_name = prefix.split('__').map do |c_name| c_name.split('_').map(&:capitalize).join(' ') end.join(' / ') ui.info "Viewing collection: #{ui.color(collection_name, :bold)}" template_names = sparkle_collection.templates.fetch(provider.connection.provider, {}).keys.find_all do |t_name| t_name.to_s.start_with?(prefix.to_s) end else template_names = sparkle_collection.templates.fetch(provider.connection.provider, {}).keys end collections = template_names.map do |t_name| t_name = t_name.to_s.sub(/^#{Regexp.escape(prefix.to_s)}/, '') if(t_name.include?('__')) c_name = t_name.split('__').first [[prefix, c_name].compact.join('') + '__', c_name] end end.compact.uniq(&:first) templates = template_names.map do |t_name| t_name = t_name.to_s.sub(/^#{Regexp.escape(prefix.to_s)}/, '') unless(t_name.include?('__')) [[prefix, t_name].compact.join(''), t_name] end end.compact if(collections.empty? && templates.empty?) ui.error 'Failed to locate any templates!' return nil end ui.info "Please select an entry#{ '(or collection to list)' unless collections.empty?}:" output = [] idx = 1 valid = {} unless(collections.empty?) output << ui.color('Collections:', :bold) collections.each do |full_name, part_name| valid[idx] = {:name => full_name, :type => :collection} output << [idx, part_name.split('_').map(&:capitalize).join(' ')] idx += 1 end end unless(templates.empty?) output << ui.color('Templates:', :bold) templates.each do |full_name, part_name| valid[idx] = {:name => full_name, :type => :template} output << [idx, part_name.split('_').map(&:capitalize).join(' ')] idx += 1 end end max = idx.to_s.length output.map! do |line| if(line.is_a?(Array)) " #{line.first}.#{' ' * (max - line.first.to_s.length)} #{line.last}" else line end end ui.puts "#{output.join("\n")}\n" response = nil until(valid[response]) response = ui.ask_question('Enter selection').to_i end entry = valid[response] if(entry[:type] == :collection) prompt_for_template(entry[:name]) else sparkle_collection.get(:template, entry[:name])[:path] end end end module ClassMethods end # Load methods into class and define options # # @param klass [Class] def self.included(klass) klass.class_eval do extend Sfn::CommandModule::Template::ClassMethods include Sfn::CommandModule::Template::InstanceMethods include Sfn::Utils::PathSelector include Sfn::Utils::StackParameterScrubber include Sfn::Utils::StackParameterValidator end end end end end
# encoding: utf-8 require File.expand_path("../spec_helper", __FILE__) describe "Area" do before :each do browser.goto(WatirSpec.files + "/images.html") end # Exists method describe "#exist?" do it "returns true if the area exists" do browser.area(:id, "NCE").should exist browser.area(:id, /NCE/).should exist browser.area(:title, "Tables").should exist browser.area(:title, /Tables/).should exist not_compliant_on [:webdriver, :ie] do browser.area(:href, "tables.html").should exist end browser.area(:href, /tables/).should exist browser.area(:index, 0).should exist browser.area(:xpath, "//area[@id='NCE']").should exist end it "returns the first area if given no args" do browser.area.should exist end it "returns false if the area doesn't exist" do browser.area(:id, "no_such_id").should_not exist browser.area(:id, /no_such_id/).should_not exist browser.area(:title, "no_such_title").should_not exist browser.area(:title, /no_such_title/).should_not exist browser.area(:href, "no-tables.html").should_not exist browser.area(:href, /no-tables/).should_not exist browser.area(:index, 1337).should_not exist browser.area(:xpath, "//area[@id='no_such_id']").should_not exist end it "raises TypeError when 'what' argument is invalid" do lambda { browser.area(:id, 3.14).exists? }.should raise_error(TypeError) end it "raises MissingWayOfFindingObjectException when 'how' argument is invalid" do lambda { browser.area(:no_such_how, 'some_value').exists? }.should raise_error(MissingWayOfFindingObjectException) end end # Attribute methods describe "#id" do it "returns the id attribute" do browser.area(:index, 0).id.should == "NCE" end it "returns an empty string if the element exists and the attribute doesn't" do browser.area(:index, 2).id.should == '' end it "raises UnknownObjectException if the area doesn't exist" do lambda { browser.area(:id, "no_such_id").id }.should raise_error(UnknownObjectException) lambda { browser.area(:index, 1337).id }.should raise_error(UnknownObjectException) end end describe "#respond_to?" do it "returns true for all attribute methods" do browser.area(:index, 0).should respond_to(:id) end end end add guard for watir # encoding: utf-8 require File.expand_path("../spec_helper", __FILE__) describe "Area" do before :each do browser.goto(WatirSpec.files + "/images.html") end # Exists method describe "#exist?" do it "returns true if the area exists" do browser.area(:id, "NCE").should exist browser.area(:id, /NCE/).should exist browser.area(:title, "Tables").should exist browser.area(:title, /Tables/).should exist not_compliant_on [:webdriver, :ie], :watir do browser.area(:href, "tables.html").should exist end browser.area(:href, /tables/).should exist browser.area(:index, 0).should exist browser.area(:xpath, "//area[@id='NCE']").should exist end it "returns the first area if given no args" do browser.area.should exist end it "returns false if the area doesn't exist" do browser.area(:id, "no_such_id").should_not exist browser.area(:id, /no_such_id/).should_not exist browser.area(:title, "no_such_title").should_not exist browser.area(:title, /no_such_title/).should_not exist browser.area(:href, "no-tables.html").should_not exist browser.area(:href, /no-tables/).should_not exist browser.area(:index, 1337).should_not exist browser.area(:xpath, "//area[@id='no_such_id']").should_not exist end it "raises TypeError when 'what' argument is invalid" do lambda { browser.area(:id, 3.14).exists? }.should raise_error(TypeError) end it "raises MissingWayOfFindingObjectException when 'how' argument is invalid" do lambda { browser.area(:no_such_how, 'some_value').exists? }.should raise_error(MissingWayOfFindingObjectException) end end # Attribute methods describe "#id" do it "returns the id attribute" do browser.area(:index, 0).id.should == "NCE" end it "returns an empty string if the element exists and the attribute doesn't" do browser.area(:index, 2).id.should == '' end it "raises UnknownObjectException if the area doesn't exist" do lambda { browser.area(:id, "no_such_id").id }.should raise_error(UnknownObjectException) lambda { browser.area(:index, 1337).id }.should raise_error(UnknownObjectException) end end describe "#respond_to?" do it "returns true for all attribute methods" do browser.area(:index, 0).should respond_to(:id) end end end
Add Zipstream::Body spec require 'spec_helper' describe Zipstream::Body do it "should stream as a body" do stage = 1 body = Zipstream::Body.new do |zip| stage = 2 zip.write "README", "This is a README!" stage = 3 zip.write "LICENSE", "Copyright (c) 2012 Me" stage = 4 end.to_enum buffer = "" stage.should == 1 buffer += body.next until stage > 1 stage.should == 2 buffer += body.next until stage > 2 stage.should == 3 loop { buffer += body.next } stage.should == 4 end end
require 'standarized_deploy/common' configuration = Capistrano::Configuration.respond_to?(:instance) ? Capistrano::Configuration.instance(:must_exist) : Capistrano.configuration(:must_exist) configuration.load do _cset(:app_name) do abort "Please specify :app_name corresponding to your server, set :app_name, 'foo'" end _cset(:app_domain) do abort "Please specify :app_domain corresponding to your server, set :app_domain, 'foo.bar.com'" end _cset(:staging_restart_workers) { false } _cset(:staging_clear_redis_cache) { false } ssh_options[:forward_agent] = true set :rvm_ruby_string, 'default' set :scm, :git set :scm_verbose, true set :git_enable_submodules, 1 set :deploy_via, :remote_cache set :use_sudo, false set :application, app_domain role :app, app_domain role :web, app_domain role :db, app_domain, :primary => true default_environment['LANG'] = 'en_US.UTF-8' default_environment['LANGUAGE'] = 'en_US' default_environment['LC_ALL'] = 'en_US.UTF-8' set :rvm_ruby_string, 'default' set :rvm_type, :system set :user, app_name set :deploy_to, "/home/#{user}" set :branch do current_branch = %x[ git branch | grep ^* | awk {'print $2'} ].chomp branch = Capistrano::CLI.ui.ask "Branch to deploy (make sure to push the branch first): [#{current_branch}] " branch = current_branch if branch.empty? branch end namespace :deploy do task :start do run "#{deploy_to}/bin/start" end task :restart do run "#{deploy_to}/bin/restart" end end %w(start stop restart).each do |action| task "#{action}_workers" do run "cd #{current_path}; #{deploy_to}/bin/workers #{action}" end end desc "Symlinks the database.yml" task :symlink_db, :roles => :app do run "ln -nfs #{deploy_to}/shared/config/database.yml #{release_path}/config/database.yml" end if staging_restart_workers after 'deploy', 'restart_workers' end if staging_clear_redis_cache after 'deploy', 'redis_cache:clear' end after 'bundle:install', 'symlink_db' end remove staging_ prefix require 'standarized_deploy/common' configuration = Capistrano::Configuration.respond_to?(:instance) ? Capistrano::Configuration.instance(:must_exist) : Capistrano.configuration(:must_exist) configuration.load do _cset(:app_name) do abort "Please specify :app_name corresponding to your server, set :app_name, 'foo'" end _cset(:app_domain) do abort "Please specify :app_domain corresponding to your server, set :app_domain, 'foo.bar.com'" end _cset(:restart_workers) { false } _cset(:clear_redis_cache) { false } ssh_options[:forward_agent] = true set :rvm_ruby_string, 'default' set :scm, :git set :scm_verbose, true set :git_enable_submodules, 1 set :deploy_via, :remote_cache set :use_sudo, false set :application, app_domain role :app, app_domain role :web, app_domain role :db, app_domain, :primary => true default_environment['LANG'] = 'en_US.UTF-8' default_environment['LANGUAGE'] = 'en_US' default_environment['LC_ALL'] = 'en_US.UTF-8' set :rvm_ruby_string, 'default' set :rvm_type, :system set :user, app_name set :deploy_to, "/home/#{user}" set :branch do current_branch = %x[ git branch | grep ^* | awk {'print $2'} ].chomp branch = Capistrano::CLI.ui.ask "Branch to deploy (make sure to push the branch first): [#{current_branch}] " branch = current_branch if branch.empty? branch end namespace :deploy do task :start do run "#{deploy_to}/bin/start" end task :restart do run "#{deploy_to}/bin/restart" end end %w(start stop restart).each do |action| task "#{action}_workers" do run "cd #{current_path}; #{deploy_to}/bin/workers #{action}" end end desc "Symlinks the database.yml" task :symlink_db, :roles => :app do run "ln -nfs #{deploy_to}/shared/config/database.yml #{release_path}/config/database.yml" end if restart_workers after 'deploy', 'restart_workers' end if clear_redis_cache after 'deploy', 'redis_cache:clear' end after 'bundle:install', 'symlink_db' end
module Stash module Harvester class Application def initialize(from_time: nil, until_time: nil, config_file: nil) puts "from_time:\t#{from_time}" puts "until_time:\t#{until_time}" puts "config_file:\t#{config_file}" end def start end private def ensure_config_file(config_file) config_file ||= default_config_file raise ArgumentError, "No configuration file provided, and none found in default locations #{config_file_defaults.join(' or ')}" unless config_file config_file end def default_config_file config_file_defaults.each do |cf| return cf if File.exist?(cf) end end def config_file_defaults [File.expand_path('stash-harvester.yml', Dir.pwd), File.expand_path('.stash-harvester.yml', Dir.home)] end end end end Improve code style module Stash module Harvester class Application def initialize(from_time: nil, until_time: nil, config_file: nil) puts "from_time:\t#{from_time}" puts "until_time:\t#{until_time}" puts "config_file:\t#{config_file}" end def start end private def ensure_config_file(config_file) config_file ||= default_config_file fail ArgumentError, "No configuration file provided, and none found in default locations #{config_file_defaults.join(' or ')}" unless config_file config_file end def default_config_file config_file_defaults.each do |cf| return cf if File.exist?(cf) end end def config_file_defaults [File.expand_path('stash-harvester.yml', Dir.pwd), File.expand_path('.stash-harvester.yml', Dir.home)] end end end end
class Thor module ZshCompletion VERSION = "0.1.9".freeze end end v0.1.10 class Thor module ZshCompletion VERSION = "0.1.10".freeze end end
module Travis module Build class Script class NodeJs < Script DEFAULT_VERSION = '0.10' YARN_REQUIRED_NODE_VERSION = '4' NPM_QUIET_TREE_VERSION = '5' NPM_CI_CMD_VERSION = '5.8.0' def export super if node_js_given_in_config? sh.export 'TRAVIS_NODE_VERSION', version, echo: false end end def setup super prepend_path './node_modules/.bin' convert_legacy_nodejs_config update_nvm nvm_install npm_disable_prefix npm_disable_spinner npm_disable_progress npm_disable_strict_ssl unless npm_strict_ssl? setup_npm_cache if use_npm_cache? install_yarn end def announce super if iojs_3_plus? sh.cmd 'echo -e "#include <array>\nstd::array<int, 1> arr = {0}; int main() {return 0;}" > /tmp/foo-$$.cpp', echo: false sh.raw "if ! ($CXX -std=c++11 -o /dev/null /tmp/foo-$$.cpp >&/dev/null || g++ -std=c++11 -o /dev/null /tmp/foo-$$.cpp >&/dev/null); then" sh.echo "Starting with io.js 3 and Node.js 4, building native extensions requires C++11-compatible compiler, which seems unavailable on this VM. Please read https://docs.travis-ci.com/user/languages/javascript-with-nodejs#Node.js-v4-(or-io.js-v3)-compiler-requirements.", ansi: :yellow sh.raw "fi" sh.cmd 'rm -f /tmp/foo-$$.cpp', echo: false end sh.cmd 'node --version' sh.cmd 'npm --version' sh.cmd 'nvm --version' sh.if "-f yarn.lock" do sh.cmd 'yarn --version' sh.cmd 'hash -d yarn', echo: false end end def install sh.if '-f package.json' do sh.if "-f yarn.lock" do sh.if yarn_req_not_met do npm_install config[:npm_args] end sh.else do sh.cmd "yarn", retry: true, fold: 'install' end end sh.else do npm_install config[:npm_args] end end end def script sh.if '-f package.json' do sh.if "-f yarn.lock" do sh.if yarn_req_not_met do sh.cmd 'npm test' end sh.else do sh.cmd 'yarn test' end end sh.else do sh.cmd 'npm test' end end sh.else do sh.cmd 'make test' end end def cache_slug super << '--node-' << version end def setup_cache if data.cache?(:yarn) sh.fold 'cache.yarn' do sh.echo '' directory_cache.add '$HOME/.cache/yarn' end end end def use_directory_cache? super || data.cache?(:yarn) end private def convert_legacy_nodejs_config # TODO deprecate :nodejs # some old projects use language: nodejs. MK. if config[:nodejs] && !config[:node_js] config[:node_js] = config[:nodejs] end end def node_js_given_in_config? !!config[:node_js] end def version @version ||= begin version = Array(config[:node_js]).first version == 0.1 ? '0.10' : version.to_s end end def nvm_install if node_js_given_in_config? use_nvm_version else use_nvm_default end end def use_nvm_default sh.if '-f .nvmrc' do sh.echo "Using nodejs version from .nvmrc", ansi: :yellow install_version '$(< .nvmrc)' end sh.else do install_version DEFAULT_VERSION end end def use_nvm_version install_version version end def install_version(ver) sh.fold "nvm.install" do sh.cmd "nvm install #{ver}", assert: false, timing: true sh.if '$? -ne 0' do sh.echo "Failed to install #{ver}. Remote repository may not be reachable.", ansi: :red sh.echo "Using locally available version #{ver}, if applicable." sh.cmd "nvm use #{ver}", assert: false, timing: false sh.if '$? -ne 0' do sh.echo "Unable to use #{ver}", ansi: :red sh.cmd "false", assert: true, echo: false, timing: false end end sh.export 'TRAVIS_NODE_VERSION', ver, echo: false end end def update_nvm return if app_host.empty? sh.echo "Updating nvm", ansi: :yellow, timing: false nvm_dir = "$HOME/.nvm" sh.raw "mkdir -p #{nvm_dir}" sh.raw "curl -s -o #{nvm_dir}/nvm.sh https://#{app_host}/files/nvm.sh".untaint, assert: false sh.raw "curl -s -o #{nvm_dir}/nvm-exec https://#{app_host}/files/nvm-exec".untaint, assert: false sh.raw "chmod 0755 #{nvm_dir}/nvm.sh #{nvm_dir}/nvm-exec", assert: true sh.raw "source #{nvm_dir}/nvm.sh", assert: false end def npm_disable_prefix sh.if "$(command -v sw_vers) && -f $HOME/.npmrc" do sh.cmd "npm config delete prefix" end end def npm_disable_spinner sh.cmd 'npm config set spin false', echo: false, timing: false end def npm_disable_strict_ssl # sh.echo '### Disabling strict SSL ###', ansi: :red sh.cmd 'echo "### Disabling strict SSL ###"' sh.cmd 'npm conf set strict-ssl false', echo: true end def npm_disable_progress sh.cmd "npm config set progress false", echo: false, timing: false end def npm_strict_ssl? !node_0_6? && !node_0_8? && !node_0_9? end def node_0_6? (config[:node_js] || '').to_s.split('.')[0..1] == %w(0 6) end def node_0_8? (config[:node_js] || '').to_s.split('.')[0..1] == %w(0 8) end def node_0_9? (config[:node_js] || '').to_s.split('.')[0..1] == %w(0 9) end def use_npm_cache? Array(config[:cache]).include?('npm') end def setup_npm_cache if data.hosts && data.hosts[:npm_cache] sh.cmd 'npm config set registry http://registry.npmjs.org/', timing: false sh.cmd "npm config set proxy #{data.hosts[:npm_cache]}", timing: false end end def iojs_3_plus? (config[:node_js] || '').to_s.split('.')[0].to_i >= 3 end def npm_install(args) sh.fold "install.npm" do sh.if "$(vers2int `npm -v`) -gt $(vers2int #{NPM_CI_CMD_VERSION})" do sh.if "-f npm-shrinkwrap.json -o -f package-lock.json" do sh.cmd "npm ci #{args}", retry: true else sh.cmd "npm install #{args}", retry: true end end sh.else do sh.cmd "npm install #{args}", retry: true end sh.if "$(vers2int `npm -v`) -gt $(vers2int #{NPM_QUIET_TREE_VERSION})" do sh.cmd "npm ls", echo: true, assert: false end end end def install_yarn sh.if "-f yarn.lock" do sh.if yarn_req_not_met do sh.echo "Node.js version $(node --version) does not meet requirement for yarn." \ " Please use Node.js #{YARN_REQUIRED_NODE_VERSION} or later.", ansi: :red npm_install config[:npm_args] end sh.else do sh.fold "install.yarn" do sh.if "-z \"$(command -v yarn)\"" do sh.if "-z \"$(command -v gpg)\"" do sh.export "YARN_GPG", "no" end sh.echo "Installing yarn", ansi: :green sh.cmd "curl -o- -L https://yarnpkg.com/install.sh | bash", echo: true, timing: true sh.echo "Setting up \\$PATH", ansi: :green sh.export "PATH", "$HOME/.yarn/bin:$PATH" end end end end end def prepend_path(path) sh.if "$(echo :$PATH: | grep -v :#{path}:)" do sh.export "PATH", "#{path}:$PATH", echo: true end end def yarn_req_not_met "$(vers2int $(echo `node --version` | tr -d 'v')) -lt $(vers2int #{YARN_REQUIRED_NODE_VERSION})" end end end end end change greater than to greater or equal to module Travis module Build class Script class NodeJs < Script DEFAULT_VERSION = '0.10' YARN_REQUIRED_NODE_VERSION = '4' NPM_QUIET_TREE_VERSION = '5' NPM_CI_CMD_VERSION = '5.8.0' def export super if node_js_given_in_config? sh.export 'TRAVIS_NODE_VERSION', version, echo: false end end def setup super prepend_path './node_modules/.bin' convert_legacy_nodejs_config update_nvm nvm_install npm_disable_prefix npm_disable_spinner npm_disable_progress npm_disable_strict_ssl unless npm_strict_ssl? setup_npm_cache if use_npm_cache? install_yarn end def announce super if iojs_3_plus? sh.cmd 'echo -e "#include <array>\nstd::array<int, 1> arr = {0}; int main() {return 0;}" > /tmp/foo-$$.cpp', echo: false sh.raw "if ! ($CXX -std=c++11 -o /dev/null /tmp/foo-$$.cpp >&/dev/null || g++ -std=c++11 -o /dev/null /tmp/foo-$$.cpp >&/dev/null); then" sh.echo "Starting with io.js 3 and Node.js 4, building native extensions requires C++11-compatible compiler, which seems unavailable on this VM. Please read https://docs.travis-ci.com/user/languages/javascript-with-nodejs#Node.js-v4-(or-io.js-v3)-compiler-requirements.", ansi: :yellow sh.raw "fi" sh.cmd 'rm -f /tmp/foo-$$.cpp', echo: false end sh.cmd 'node --version' sh.cmd 'npm --version' sh.cmd 'nvm --version' sh.if "-f yarn.lock" do sh.cmd 'yarn --version' sh.cmd 'hash -d yarn', echo: false end end def install sh.if '-f package.json' do sh.if "-f yarn.lock" do sh.if yarn_req_not_met do npm_install config[:npm_args] end sh.else do sh.cmd "yarn", retry: true, fold: 'install' end end sh.else do npm_install config[:npm_args] end end end def script sh.if '-f package.json' do sh.if "-f yarn.lock" do sh.if yarn_req_not_met do sh.cmd 'npm test' end sh.else do sh.cmd 'yarn test' end end sh.else do sh.cmd 'npm test' end end sh.else do sh.cmd 'make test' end end def cache_slug super << '--node-' << version end def setup_cache if data.cache?(:yarn) sh.fold 'cache.yarn' do sh.echo '' directory_cache.add '$HOME/.cache/yarn' end end end def use_directory_cache? super || data.cache?(:yarn) end private def convert_legacy_nodejs_config # TODO deprecate :nodejs # some old projects use language: nodejs. MK. if config[:nodejs] && !config[:node_js] config[:node_js] = config[:nodejs] end end def node_js_given_in_config? !!config[:node_js] end def version @version ||= begin version = Array(config[:node_js]).first version == 0.1 ? '0.10' : version.to_s end end def nvm_install if node_js_given_in_config? use_nvm_version else use_nvm_default end end def use_nvm_default sh.if '-f .nvmrc' do sh.echo "Using nodejs version from .nvmrc", ansi: :yellow install_version '$(< .nvmrc)' end sh.else do install_version DEFAULT_VERSION end end def use_nvm_version install_version version end def install_version(ver) sh.fold "nvm.install" do sh.cmd "nvm install #{ver}", assert: false, timing: true sh.if '$? -ne 0' do sh.echo "Failed to install #{ver}. Remote repository may not be reachable.", ansi: :red sh.echo "Using locally available version #{ver}, if applicable." sh.cmd "nvm use #{ver}", assert: false, timing: false sh.if '$? -ne 0' do sh.echo "Unable to use #{ver}", ansi: :red sh.cmd "false", assert: true, echo: false, timing: false end end sh.export 'TRAVIS_NODE_VERSION', ver, echo: false end end def update_nvm return if app_host.empty? sh.echo "Updating nvm", ansi: :yellow, timing: false nvm_dir = "$HOME/.nvm" sh.raw "mkdir -p #{nvm_dir}" sh.raw "curl -s -o #{nvm_dir}/nvm.sh https://#{app_host}/files/nvm.sh".untaint, assert: false sh.raw "curl -s -o #{nvm_dir}/nvm-exec https://#{app_host}/files/nvm-exec".untaint, assert: false sh.raw "chmod 0755 #{nvm_dir}/nvm.sh #{nvm_dir}/nvm-exec", assert: true sh.raw "source #{nvm_dir}/nvm.sh", assert: false end def npm_disable_prefix sh.if "$(command -v sw_vers) && -f $HOME/.npmrc" do sh.cmd "npm config delete prefix" end end def npm_disable_spinner sh.cmd 'npm config set spin false', echo: false, timing: false end def npm_disable_strict_ssl # sh.echo '### Disabling strict SSL ###', ansi: :red sh.cmd 'echo "### Disabling strict SSL ###"' sh.cmd 'npm conf set strict-ssl false', echo: true end def npm_disable_progress sh.cmd "npm config set progress false", echo: false, timing: false end def npm_strict_ssl? !node_0_6? && !node_0_8? && !node_0_9? end def node_0_6? (config[:node_js] || '').to_s.split('.')[0..1] == %w(0 6) end def node_0_8? (config[:node_js] || '').to_s.split('.')[0..1] == %w(0 8) end def node_0_9? (config[:node_js] || '').to_s.split('.')[0..1] == %w(0 9) end def use_npm_cache? Array(config[:cache]).include?('npm') end def setup_npm_cache if data.hosts && data.hosts[:npm_cache] sh.cmd 'npm config set registry http://registry.npmjs.org/', timing: false sh.cmd "npm config set proxy #{data.hosts[:npm_cache]}", timing: false end end def iojs_3_plus? (config[:node_js] || '').to_s.split('.')[0].to_i >= 3 end def npm_install(args) sh.fold "install.npm" do sh.if "$(vers2int `npm -v`) -ge $(vers2int #{NPM_CI_CMD_VERSION})" do sh.if "-f npm-shrinkwrap.json -o -f package-lock.json" do sh.cmd "npm ci #{args}", retry: true else sh.cmd "npm install #{args}", retry: true end end sh.else do sh.cmd "npm install #{args}", retry: true end sh.if "$(vers2int `npm -v`) -gt $(vers2int #{NPM_QUIET_TREE_VERSION})" do sh.cmd "npm ls", echo: true, assert: false end end end def install_yarn sh.if "-f yarn.lock" do sh.if yarn_req_not_met do sh.echo "Node.js version $(node --version) does not meet requirement for yarn." \ " Please use Node.js #{YARN_REQUIRED_NODE_VERSION} or later.", ansi: :red npm_install config[:npm_args] end sh.else do sh.fold "install.yarn" do sh.if "-z \"$(command -v yarn)\"" do sh.if "-z \"$(command -v gpg)\"" do sh.export "YARN_GPG", "no" end sh.echo "Installing yarn", ansi: :green sh.cmd "curl -o- -L https://yarnpkg.com/install.sh | bash", echo: true, timing: true sh.echo "Setting up \\$PATH", ansi: :green sh.export "PATH", "$HOME/.yarn/bin:$PATH" end end end end end def prepend_path(path) sh.if "$(echo :$PATH: | grep -v :#{path}:)" do sh.export "PATH", "#{path}:$PATH", echo: true end end def yarn_req_not_met "$(vers2int $(echo `node --version` | tr -d 'v')) -lt $(vers2int #{YARN_REQUIRED_NODE_VERSION})" end end end end end
module TubeGetter module Crawler class Drtuber < Base def initialize(url) super @agent.user_agent = "Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3" end def crawl uri.subdomain = 'm' doc = self.get(uri.to_s) puts "\n" + (doc / 'title').inner_text + "\n\n" video_url = uri.normalized_host + doc.at('.links3_big/a[data-link_type="mp4"]')['href'] puts video_url wget(video_url, temp_filename) puts `#{TubeGetter::Config.ffmpeg_path} #{TubeGetter::Config.ffmpeg_default_options} -i "#{temp_filename}" -vcodec copy -acodec copy "#{target_filename}"` if File.exist?(target_filename) && File.size(target_filename) > 0 `rm "#{temp_filename}"` end end def title @doc ||= self.get() end # ------------------------------------------------------------------------------------------------------------ def self.slug "drtuber" end def self.get_id_from_url(url) url.gsub(/.*\/video\/(\d+)\/.*/, "\\1") end end end end fixed drtuber getter module TubeGetter module Crawler class Drtuber < Base def crawl # uri.subdomain = 'm' doc = self.get(uri.to_s) puts "\n" + (doc / 'title').inner_text + "\n\n" video_url = doc.at('#hidden_html5_block/video/source')['src'] wget(video_url + '&play', target_filename) # puts `#{TubeGetter::Config.ffmpeg_path} #{TubeGetter::Config.ffmpeg_default_options} -i "#{temp_filename}" -vcodec copy -acodec copy "#{target_filename}"` # # if File.exist?(target_filename) && File.size(target_filename) > 0 # `rm "#{temp_filename}"` # end end def title @doc ||= self.get() end # ------------------------------------------------------------------------------------------------------------ def self.slug "drtuber" end def self.get_id_from_url(url) url.gsub(/.*\/video\/(\d+)\/.*/, "\\1") end end end end
require 'ui_bibz/ui/core/modal/components/modal_header' require 'ui_bibz/ui/core/modal/components/modal_body' require 'ui_bibz/ui/core/modal/components/modal_footer' module UiBibz::Ui::Core # Create an modal # # This element is an extend of UiBibz::Ui::Core::Component. # # ==== Attributes # # * +content+ - Content of element # * +options+ - Options of element # * +html_options+ - Html Options of element # # ==== Options # # You can add HTML attributes using the +html_options+. # You can pass arguments in options attribute: # # ==== Signatures # # UiBibz::Ui::Core::Modal.new(options = nil, html_options = nil) do |m| # m.header content, options, html_options, &block # m.body content, options, html_options, &block # m.footer content, options, html_options, &block # end # # ==== Examples # # UiBibz::Ui::Core::Modal.new({glyph: { name: 'eye', size: 3}, { class: 'test' }) do |m| # m.header 'Title' # m.body 'Content' # m.footer do # button_link 'Ok', '#', class: :success # end # end.render # # ==== Helper # # modal(options = {}, html_options = {}) do |m| # m.header do # 'Title' # end # m.body do # 'Content' # end # m.footer do # 'Footer' # end # end # class Modal < Component # See UiBibz::Ui::Core::Component.initialize def initialize content = nil, options = nil, html_options = nil, &block super end # Render html tag def render content_tag :div, class_and_html_options(['modal', effect]) do content_tag :div, class: 'modal-dialog', role: 'document' do content_tag :div, class: 'modal-content' do concat @header concat @body concat @footer end end end end def header content = nil, options = nil, html_options = nil, &block @header = ModalHeader.new(content, options, html_options, &block).render end def footer content = nil, options = nil, html_options = nil, &block @footer = ModalFooter.new(content, options, html_options, &block).render end def body content = nil, options = nil, html_options = nil, &block @body = ModalBody.new(content, options, html_options, &block).render end private def effect @options[:effect] unless @options[:effect].nil? end end end add size to modal require 'ui_bibz/ui/core/modal/components/modal_header' require 'ui_bibz/ui/core/modal/components/modal_body' require 'ui_bibz/ui/core/modal/components/modal_footer' module UiBibz::Ui::Core # Create an modal # # This element is an extend of UiBibz::Ui::Core::Component. # # ==== Attributes # # * +content+ - Content of element # * +options+ - Options of element # * +html_options+ - Html Options of element # # ==== Options # # You can add HTML attributes using the +html_options+. # You can pass arguments in options attribute: # * +size+ # (+:sm+, +:lg+) # # ==== Signatures # # UiBibz::Ui::Core::Modal.new(options = nil, html_options = nil) do |m| # m.header content, options, html_options, &block # m.body content, options, html_options, &block # m.footer content, options, html_options, &block # end # # ==== Examples # # UiBibz::Ui::Core::Modal.new({glyph: { name: 'eye', size: 3}, { class: 'test' }) do |m| # m.header 'Title' # m.body 'Content' # m.footer do # button_link 'Ok', '#', class: :success # end # end.render # # ==== Helper # # modal(options = {}, html_options = {}) do |m| # m.header do # 'Title' # end # m.body do # 'Content' # end # m.footer do # 'Footer' # end # end # class Modal < Component # See UiBibz::Ui::Core::Component.initialize def initialize content = nil, options = nil, html_options = nil, &block super end # Render html tag def render content_tag :div, class_and_html_options(['modal', effect]) do content_tag :div, class: "modal-dialog #{ size }", role: 'document' do content_tag :div, class: 'modal-content' do concat @header concat @body concat @footer end end end end def header content = nil, options = nil, html_options = nil, &block @header = ModalHeader.new(content, options, html_options, &block).render end def footer content = nil, options = nil, html_options = nil, &block @footer = ModalFooter.new(content, options, html_options, &block).render end def body content = nil, options = nil, html_options = nil, &block @body = ModalBody.new(content, options, html_options, &block).render end private # :lg, :sm or :xs def size "modal-#{ @options[:size] }" if @options[:size] end def effect @options[:effect] unless @options[:effect].nil? end end end
module Vcloud module Core class VmOrchestrator def initialize vm, vapp @vm = Vm.new(vm, vapp) end def customize(vm_config) @vm.configure_network_interfaces vm_config[:network_connections] @vm.update_storage_profile(vm_config[:storage_profile]) if vm_config[:storage_profile] if hardware_config = vm_config[:hardware_config] @vm.update_cpu_count(hardware_config[:cpu]) @vm.update_memory_size_in_mb(hardware_config[:memory]) end @vm.add_extra_disks(vm_config[:extra_disks]) @vm.update_metadata(vm_config[:metadata]) @vm.configure_guest_customization_section( @vm.vapp_name, vm_config[:bootstrap], vm_config[:extra_disks] ) end end end end set VM name to be same as vApp module Vcloud module Core class VmOrchestrator def initialize vm, vapp @vm = Vm.new(vm, vapp) end def customize(vm_config) @vm.name = @vm.vapp_name @vm.configure_network_interfaces vm_config[:network_connections] @vm.update_storage_profile(vm_config[:storage_profile]) if vm_config[:storage_profile] if hardware_config = vm_config[:hardware_config] @vm.update_cpu_count(hardware_config[:cpu]) @vm.update_memory_size_in_mb(hardware_config[:memory]) end @vm.add_extra_disks(vm_config[:extra_disks]) @vm.update_metadata(vm_config[:metadata]) @vm.configure_guest_customization_section( @vm.vapp_name, vm_config[:bootstrap], vm_config[:extra_disks] ) end end end end
# # Jekyll Asset Bundler # # Author : Colin Kennedy # Repo : http://github.com/moshen/jekyll-asset_bundler # Version: 0.12 # License: MIT, see LICENSE file # require 'yaml' require 'digest/md5' require 'net/http' require 'uri' module Jekyll class BundleTag < Liquid::Block def initialize(tag_name, text, tokens) super @text = text @files = {} end def render(context) src = context.registers[:site].source raw_markup = super(context) begin # Some ugliness to work around the Block returning an array # in liquid <2.4.0 # Note: Jekyll 1.0.x only require liquid 2.3 @assets = YAML::load(raw_markup.kind_of?(Array) ? raw_markup.first : raw_markup) rescue puts <<-END Asset Bundler - Error: Problem parsing a YAML bundle #{raw_markup} #{$!} END end if !@assets.kind_of?(Array) puts "Asset Bundler - Error: YAML bundle is not an Array\n#{raw_markup}" @assets = [] end add_files_from_list(src, @assets) markup = "" @files.each {|k,v| markup.concat(Bundle.new(v, k, context).markup()) } markup end def add_files_from_list(src, list) list.each {|a| path = File.join(src, a) if (File.basename(a) !~ /^\.+/ and File.file?(path)) or a =~ /^(https?:)?\/\//i add_file_by_type(a) else puts "Asset Bundler Error - File: #{path} not found, ignoring..." end } end def add_file_by_type(file) if file =~ /\.([^\.]+)$/ type = $1.downcase() return if Bundle.supported_types.index(type).nil? if !@files.key?(type) @files[type] = [] end @files[type].push(file) end end end class BundleGlobTag < BundleTag def add_files_from_list(src, list) list.each {|a| Dir.glob(File.join(src, a)) {|f| if f !~ /^\.+/ and File.file?(f) add_file_by_type(f.sub(src,'')) end } } end end class DevAssetsTag < BundleTag def render(context) if Bundle.config(context)['dev'] super(context) else '' end end def add_files_from_list(src, list) list.each {|a| add_file_by_type(a) } end end class Bundle @@bundles = {} @@default_config = { 'compile' => { 'coffee' => false, 'less' => false }, 'compress' => { 'js' => false, 'css' => false }, 'base_path' => '/bundles/', 'server_url' => '', 'remove_bundled' => false, 'dev' => false, 'markup_templates' => { 'js' => Liquid::Template.parse("<script type='text/javascript' src='{{url}}'></script>\n"), 'coffee' => Liquid::Template.parse("<script type='text/coffeescript' src='{{url}}'></script>\n"), 'css' => Liquid::Template.parse("<link rel='stylesheet' type='text/css' href='{{url}}' />\n"), 'less' => Liquid::Template.parse("<link rel='stylesheet/less' type='text/css' href='{{url}}' />\n") } } @@current_config = nil @@supported_types = ['js', 'css'] attr_reader :content, :hash, :filename, :base def initialize(files, type, context) @files = files @type = type @context = context @content = '' @hash = '' @filename = '' @config = Bundle.config(@context) @base = @config['base_path'] @filename_hash = Digest::MD5.hexdigest(@files.join()) if @@bundles.key?(@filename_hash) @filename = @@bundles[@filename_hash].filename @base = @@bundles[@filename_hash].base else load_content() end end def self.config(context) if @@current_config.nil? ret_config = nil if context.registers[:site].config.key?("asset_bundler") ret_config = @@default_config.deep_merge(context.registers[:site].config["asset_bundler"]) ret_config['markup_templates'].keys.each {|k| if !ret_config['markup_templates'][k].instance_of?(Liquid::Template) if ret_config['markup_templates'][k].instance_of?(String) ret_config['markup_templates'][k] = Liquid::Template.parse(ret_config['markup_templates'][k]); else puts <<-END Asset Bundler - Error: Problem parsing _config.yml The value for configuration option: asset_bundler => markup_templates => #{k} Is not recognized as a String for use as a valid template. Reverting to the default template. END ret_config['markup_templates'][k] = @@default_config['markup_templates'][k]; end end } if context.registers[:site].config['asset_bundler'].key?('cdn') and ret_config['server_url'].empty? ret_config['server_url'] = context.registers[:site].config['asset_bundler']['cdn'] end else ret_config = @@default_config end # Check to make sure the base_path begins with a slash # This is to make sure that the path works with a potential base CDN url if ret_config['base_path'] !~ /^\// ret_config['base_path'].insert(0,'/') end if context.registers[:site].config.key?("dev") ret_config['dev'] = context.registers[:site].config["dev"] ? true : false end # Let's assume that when flag 'watch' or 'serving' is enabled, we want dev mode if context.registers[:site].config['serving'] || context.registers[:site].config['watch'] ret_config['dev'] = true end @@current_config = ret_config end @@current_config end def self.supported_types @@supported_types end def load_content() if @config['dev'] @@bundles[@filename_hash] = self return end src = @context.registers[:site].source @files.each {|f| if f =~ /^(https?:)?\/\//i # Make all requests via http f = "http:#{f}" if !$1 f.sub!( /^https/i, "http" ) if $1 =~ /^https/i @content.concat(remote_asset_cache(URI(f))) else # Load file from path and render it if it contains tags # Extract the path parts f = File.split(f) # Render the page path file page = Page.new(@context.registers[:site], src, f[0], f[1]) page.render(@context.registers[:site].layouts, @context.registers[:site].site_payload()) @content.concat(page.output) end } @hash = Digest::MD5.hexdigest(@content) @filename = "#{@hash}.#{@type}" cache_file = File.join(cache_dir(), @filename) if File.readable?(cache_file) and @config['compress'][@type] @content = File.read(cache_file) elsif @config['compress'][@type] # TODO: Compilation of Less and CoffeeScript would go here compress() File.open(cache_file, "w") {|f| f.write(@content) } end @context.registers[:site].static_files.push(self) remove_bundled() if @config['remove_bundled'] @@bundles[@filename_hash] = self end def cache_dir() cache_dir = File.expand_path( "../_asset_bundler_cache", @context.registers[:site].plugins.first ) if( !File.directory?(cache_dir) ) FileUtils.mkdir_p(cache_dir) end cache_dir end def remote_asset_cache(uri) cache_file = File.join(cache_dir(), "remote.#{Digest::MD5.hexdigest(uri.to_s)}.#{@type}") content = "" if File.readable?(cache_file) content = File.read(cache_file) else begin puts "Asset Bundler - Downloading: #{uri.to_s}" content = Net::HTTP.get(uri) File.open(cache_file, "w") {|f| f.write( content ) } rescue puts "Asset Bundler - Error: There was a problem downloading #{f}\n #{$!}" end end return content end # Removes StaticFiles from the _site if they are bundled # and the remove_bundled option is true # which... it isn't by default def remove_bundled() src = @context.registers[:site].source @files.each {|f| @context.registers[:site].static_files.select! {|s| if s.instance_of?(StaticFile) s.path != File.join(src, f) else true end } } end def compress() return if @config['dev'] case @config['compress'][@type] when 'yui' compress_yui() when 'closure' compress_closure() else compress_command() end end def compress_command() temp_path = cache_dir() command = String.new(@config['compress'][@type]) infile = false outfile = false used_files = [] if command =~ /:infile/ File.open(File.join(temp_path, "infile.#{@filename_hash}.#{@type}"), mode="w") {|f| f.write(@content) used_files.push( f.path ) infile = f.path } command.sub!( /:infile/, "\"#{infile.gsub(File::SEPARATOR, File::ALT_SEPARATOR || File::SEPARATOR)}\"") end if command =~ /:outfile/ outfile = File.join(temp_path, "outfile.#{@filename_hash}.#{@type}") used_files.push( outfile ) command.sub!( /:outfile/, "\"#{outfile.gsub(File::SEPARATOR, File::ALT_SEPARATOR || File::SEPARATOR)}\"") end if infile and outfile `#{command}` else mode = "r" mode = "r+" if !infile IO.popen(command, mode) {|i| if !infile i.puts(@content) i.close_write() end if !outfile @content = "" i.each {|line| @content << line } end } end if outfile @content = File.read( outfile ) end used_files.each {|f| File.unlink( f ) } end def compress_yui() require 'yui/compressor' case @type when 'js' @content = YUI::JavaScriptCompressor.new.compress(@content) when 'css' @content = YUI::CssCompressor.new.compress(@content) end end def compress_closure() require 'closure-compiler' case @type when 'js' @content = Closure::Compiler.new.compile(@content) end end def markup() return dev_markup() if @config['dev'] @config['markup_templates'][@type].render( 'url' => "#{@config['server_url']}#{@base}#{@filename}" ) end def dev_markup() output = '' @files.each {|f| output.concat( @config['markup_templates'][@type].render('url' => "#{f}") ) } return output end # Methods required by Jekyll::Site to write out the bundle # This is where we give Jekyll::Bundle a Jekyll::StaticFile # duck call and send it on its way. def destination(dest) File.join(dest, @base, @filename) end def write(dest) dest_path = destination(dest) return false if File.exists?(dest_path) FileUtils.mkdir_p(File.dirname(dest_path)) File.open(dest_path, "w") {|o| o.write(@content) } true end # End o' the duck call end end Liquid::Template.register_tag('bundle' , Jekyll::BundleTag ) Liquid::Template.register_tag('bundle_glob', Jekyll::BundleGlobTag) Liquid::Template.register_tag('dev_assets' , Jekyll::DevAssetsTag ) Fixes #22 deep_merge => Utils.deep_merge_hashes Fixes Jekyll 2.0 compatibility What I've learned today: Other people's monkey patches will change on you. # # Jekyll Asset Bundler # # Author : Colin Kennedy # Repo : http://github.com/moshen/jekyll-asset_bundler # Version: 0.12 # License: MIT, see LICENSE file # require 'yaml' require 'digest/md5' require 'net/http' require 'uri' module Jekyll class BundleTag < Liquid::Block def initialize(tag_name, text, tokens) super @text = text @files = {} end def render(context) src = context.registers[:site].source raw_markup = super(context) begin # Some ugliness to work around the Block returning an array # in liquid <2.4.0 # Note: Jekyll 1.0.x only require liquid 2.3 @assets = YAML::load(raw_markup.kind_of?(Array) ? raw_markup.first : raw_markup) rescue puts <<-END Asset Bundler - Error: Problem parsing a YAML bundle #{raw_markup} #{$!} END end if !@assets.kind_of?(Array) puts "Asset Bundler - Error: YAML bundle is not an Array\n#{raw_markup}" @assets = [] end add_files_from_list(src, @assets) markup = "" @files.each {|k,v| markup.concat(Bundle.new(v, k, context).markup()) } markup end def add_files_from_list(src, list) list.each {|a| path = File.join(src, a) if (File.basename(a) !~ /^\.+/ and File.file?(path)) or a =~ /^(https?:)?\/\//i add_file_by_type(a) else puts "Asset Bundler Error - File: #{path} not found, ignoring..." end } end def add_file_by_type(file) if file =~ /\.([^\.]+)$/ type = $1.downcase() return if Bundle.supported_types.index(type).nil? if !@files.key?(type) @files[type] = [] end @files[type].push(file) end end end class BundleGlobTag < BundleTag def add_files_from_list(src, list) list.each {|a| Dir.glob(File.join(src, a)) {|f| if f !~ /^\.+/ and File.file?(f) add_file_by_type(f.sub(src,'')) end } } end end class DevAssetsTag < BundleTag def render(context) if Bundle.config(context)['dev'] super(context) else '' end end def add_files_from_list(src, list) list.each {|a| add_file_by_type(a) } end end class Bundle @@bundles = {} @@default_config = { 'compile' => { 'coffee' => false, 'less' => false }, 'compress' => { 'js' => false, 'css' => false }, 'base_path' => '/bundles/', 'server_url' => '', 'remove_bundled' => false, 'dev' => false, 'markup_templates' => { 'js' => Liquid::Template.parse("<script type='text/javascript' src='{{url}}'></script>\n"), 'coffee' => Liquid::Template.parse("<script type='text/coffeescript' src='{{url}}'></script>\n"), 'css' => Liquid::Template.parse("<link rel='stylesheet' type='text/css' href='{{url}}' />\n"), 'less' => Liquid::Template.parse("<link rel='stylesheet/less' type='text/css' href='{{url}}' />\n") } } @@current_config = nil @@supported_types = ['js', 'css'] attr_reader :content, :hash, :filename, :base def initialize(files, type, context) @files = files @type = type @context = context @content = '' @hash = '' @filename = '' @config = Bundle.config(@context) @base = @config['base_path'] @filename_hash = Digest::MD5.hexdigest(@files.join()) if @@bundles.key?(@filename_hash) @filename = @@bundles[@filename_hash].filename @base = @@bundles[@filename_hash].base else load_content() end end def self.config(context) if @@current_config.nil? ret_config = nil if context.registers[:site].config.key?("asset_bundler") ret_config = Utils.deep_merge_hashes(@@default_config, context.registers[:site].config["asset_bundler"]) ret_config['markup_templates'].keys.each {|k| if !ret_config['markup_templates'][k].instance_of?(Liquid::Template) if ret_config['markup_templates'][k].instance_of?(String) ret_config['markup_templates'][k] = Liquid::Template.parse(ret_config['markup_templates'][k]); else puts <<-END Asset Bundler - Error: Problem parsing _config.yml The value for configuration option: asset_bundler => markup_templates => #{k} Is not recognized as a String for use as a valid template. Reverting to the default template. END ret_config['markup_templates'][k] = @@default_config['markup_templates'][k]; end end } if context.registers[:site].config['asset_bundler'].key?('cdn') and ret_config['server_url'].empty? ret_config['server_url'] = context.registers[:site].config['asset_bundler']['cdn'] end else ret_config = @@default_config end # Check to make sure the base_path begins with a slash # This is to make sure that the path works with a potential base CDN url if ret_config['base_path'] !~ /^\// ret_config['base_path'].insert(0,'/') end if context.registers[:site].config.key?("dev") ret_config['dev'] = context.registers[:site].config["dev"] ? true : false end # Let's assume that when flag 'watch' or 'serving' is enabled, we want dev mode if context.registers[:site].config['serving'] || context.registers[:site].config['watch'] ret_config['dev'] = true end @@current_config = ret_config end @@current_config end def self.supported_types @@supported_types end def load_content() if @config['dev'] @@bundles[@filename_hash] = self return end src = @context.registers[:site].source @files.each {|f| if f =~ /^(https?:)?\/\//i # Make all requests via http f = "http:#{f}" if !$1 f.sub!( /^https/i, "http" ) if $1 =~ /^https/i @content.concat(remote_asset_cache(URI(f))) else # Load file from path and render it if it contains tags # Extract the path parts f = File.split(f) # Render the page path file page = Page.new(@context.registers[:site], src, f[0], f[1]) page.render(@context.registers[:site].layouts, @context.registers[:site].site_payload()) @content.concat(page.output) end } @hash = Digest::MD5.hexdigest(@content) @filename = "#{@hash}.#{@type}" cache_file = File.join(cache_dir(), @filename) if File.readable?(cache_file) and @config['compress'][@type] @content = File.read(cache_file) elsif @config['compress'][@type] # TODO: Compilation of Less and CoffeeScript would go here compress() File.open(cache_file, "w") {|f| f.write(@content) } end @context.registers[:site].static_files.push(self) remove_bundled() if @config['remove_bundled'] @@bundles[@filename_hash] = self end def cache_dir() cache_dir = File.expand_path( "../_asset_bundler_cache", @context.registers[:site].plugins.first ) if( !File.directory?(cache_dir) ) FileUtils.mkdir_p(cache_dir) end cache_dir end def remote_asset_cache(uri) cache_file = File.join(cache_dir(), "remote.#{Digest::MD5.hexdigest(uri.to_s)}.#{@type}") content = "" if File.readable?(cache_file) content = File.read(cache_file) else begin puts "Asset Bundler - Downloading: #{uri.to_s}" content = Net::HTTP.get(uri) File.open(cache_file, "w") {|f| f.write( content ) } rescue puts "Asset Bundler - Error: There was a problem downloading #{f}\n #{$!}" end end return content end # Removes StaticFiles from the _site if they are bundled # and the remove_bundled option is true # which... it isn't by default def remove_bundled() src = @context.registers[:site].source @files.each {|f| @context.registers[:site].static_files.select! {|s| if s.instance_of?(StaticFile) s.path != File.join(src, f) else true end } } end def compress() return if @config['dev'] case @config['compress'][@type] when 'yui' compress_yui() when 'closure' compress_closure() else compress_command() end end def compress_command() temp_path = cache_dir() command = String.new(@config['compress'][@type]) infile = false outfile = false used_files = [] if command =~ /:infile/ File.open(File.join(temp_path, "infile.#{@filename_hash}.#{@type}"), mode="w") {|f| f.write(@content) used_files.push( f.path ) infile = f.path } command.sub!( /:infile/, "\"#{infile.gsub(File::SEPARATOR, File::ALT_SEPARATOR || File::SEPARATOR)}\"") end if command =~ /:outfile/ outfile = File.join(temp_path, "outfile.#{@filename_hash}.#{@type}") used_files.push( outfile ) command.sub!( /:outfile/, "\"#{outfile.gsub(File::SEPARATOR, File::ALT_SEPARATOR || File::SEPARATOR)}\"") end if infile and outfile `#{command}` else mode = "r" mode = "r+" if !infile IO.popen(command, mode) {|i| if !infile i.puts(@content) i.close_write() end if !outfile @content = "" i.each {|line| @content << line } end } end if outfile @content = File.read( outfile ) end used_files.each {|f| File.unlink( f ) } end def compress_yui() require 'yui/compressor' case @type when 'js' @content = YUI::JavaScriptCompressor.new.compress(@content) when 'css' @content = YUI::CssCompressor.new.compress(@content) end end def compress_closure() require 'closure-compiler' case @type when 'js' @content = Closure::Compiler.new.compile(@content) end end def markup() return dev_markup() if @config['dev'] @config['markup_templates'][@type].render( 'url' => "#{@config['server_url']}#{@base}#{@filename}" ) end def dev_markup() output = '' @files.each {|f| output.concat( @config['markup_templates'][@type].render('url' => "#{f}") ) } return output end # Methods required by Jekyll::Site to write out the bundle # This is where we give Jekyll::Bundle a Jekyll::StaticFile # duck call and send it on its way. def destination(dest) File.join(dest, @base, @filename) end def write(dest) dest_path = destination(dest) return false if File.exists?(dest_path) FileUtils.mkdir_p(File.dirname(dest_path)) File.open(dest_path, "w") {|o| o.write(@content) } true end # End o' the duck call end end Liquid::Template.register_tag('bundle' , Jekyll::BundleTag ) Liquid::Template.register_tag('bundle_glob', Jekyll::BundleGlobTag) Liquid::Template.register_tag('dev_assets' , Jekyll::DevAssetsTag )
# Generated by jeweler # DO NOT EDIT THIS FILE DIRECTLY # Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec' # -*- encoding: utf-8 -*- Gem::Specification.new do |s| s.name = "kookaburra" s.version = "0.22.1" s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= s.authors = ["John Wilger", "Sam Livingston-Gray", "Ravi Gadad"] s.date = "2012-03-25" s.description = "Cucumber + Capybara = Kookaburra? It made sense at the time." s.email = "johnwilger@gmail.com" s.extra_rdoc_files = [ "LICENSE.txt", "README.markdown" ] s.files = [ ".document", ".rspec", ".rvmrc", ".yardopts", "Gemfile", "Gemfile.lock", "LICENSE.txt", "README.markdown", "Rakefile", "VERSION", "kookaburra.gemspec", "lib/kookaburra.rb", "lib/kookaburra/api_driver.rb", "lib/kookaburra/assertion.rb", "lib/kookaburra/configuration.rb", "lib/kookaburra/dependency_accessor.rb", "lib/kookaburra/exceptions.rb", "lib/kookaburra/given_driver.rb", "lib/kookaburra/json_api_driver.rb", "lib/kookaburra/mental_model.rb", "lib/kookaburra/test_helpers.rb", "lib/kookaburra/ui_driver.rb", "lib/kookaburra/ui_driver/ui_component.rb", "lib/kookaburra/ui_driver/ui_component/address_bar.rb", "spec/integration/test_a_rack_application_spec.rb", "spec/kookaburra/api_driver_spec.rb", "spec/kookaburra/configuration_spec.rb", "spec/kookaburra/json_api_driver_spec.rb", "spec/kookaburra/mental_model_spec.rb", "spec/kookaburra/ui_driver/ui_component/address_bar_spec.rb", "spec/kookaburra/ui_driver/ui_component_spec.rb", "spec/kookaburra/ui_driver_spec.rb", "spec/kookaburra_spec.rb", "spec/support/shared_examples/it_can_make_assertions.rb", "spec/support/shared_examples/it_has_a_dependency_accessor.rb" ] s.homepage = "http://github.com/projectdx/kookaburra" s.licenses = ["MIT"] s.require_paths = ["lib"] s.rubygems_version = "1.8.15" s.summary = "WindowDriver testing pattern for Ruby apps" if s.respond_to? :specification_version then s.specification_version = 3 if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then s.add_runtime_dependency(%q<i18n>, [">= 0"]) s.add_runtime_dependency(%q<activesupport>, [">= 3.0"]) s.add_runtime_dependency(%q<patron>, [">= 0"]) s.add_development_dependency(%q<rspec>, [">= 0"]) s.add_development_dependency(%q<capybara>, [">= 0"]) s.add_development_dependency(%q<yard>, [">= 0"]) s.add_development_dependency(%q<redcarpet>, ["~> 1.0"]) s.add_development_dependency(%q<jeweler>, [">= 0"]) s.add_development_dependency(%q<rcov>, [">= 0"]) s.add_development_dependency(%q<reek>, [">= 0"]) s.add_development_dependency(%q<sinatra>, [">= 0"]) else s.add_dependency(%q<i18n>, [">= 0"]) s.add_dependency(%q<activesupport>, [">= 3.0"]) s.add_dependency(%q<patron>, [">= 0"]) s.add_dependency(%q<rspec>, [">= 0"]) s.add_dependency(%q<capybara>, [">= 0"]) s.add_dependency(%q<yard>, [">= 0"]) s.add_dependency(%q<redcarpet>, ["~> 1.0"]) s.add_dependency(%q<jeweler>, [">= 0"]) s.add_dependency(%q<rcov>, [">= 0"]) s.add_dependency(%q<reek>, [">= 0"]) s.add_dependency(%q<sinatra>, [">= 0"]) end else s.add_dependency(%q<i18n>, [">= 0"]) s.add_dependency(%q<activesupport>, [">= 3.0"]) s.add_dependency(%q<patron>, [">= 0"]) s.add_dependency(%q<rspec>, [">= 0"]) s.add_dependency(%q<capybara>, [">= 0"]) s.add_dependency(%q<yard>, [">= 0"]) s.add_dependency(%q<redcarpet>, ["~> 1.0"]) s.add_dependency(%q<jeweler>, [">= 0"]) s.add_dependency(%q<rcov>, [">= 0"]) s.add_dependency(%q<reek>, [">= 0"]) s.add_dependency(%q<sinatra>, [">= 0"]) end end Regenerate gemspec for version 0.22.2 # Generated by jeweler # DO NOT EDIT THIS FILE DIRECTLY # Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec' # -*- encoding: utf-8 -*- Gem::Specification.new do |s| s.name = "kookaburra" s.version = "0.22.2" s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= s.authors = ["John Wilger", "Sam Livingston-Gray", "Ravi Gadad"] s.date = "2012-03-26" s.description = "Cucumber + Capybara = Kookaburra? It made sense at the time." s.email = "johnwilger@gmail.com" s.extra_rdoc_files = [ "LICENSE.txt", "README.markdown" ] s.files = [ ".document", ".rspec", ".rvmrc", ".yardopts", "Gemfile", "Gemfile.lock", "LICENSE.txt", "README.markdown", "Rakefile", "VERSION", "kookaburra.gemspec", "lib/kookaburra.rb", "lib/kookaburra/api_driver.rb", "lib/kookaburra/assertion.rb", "lib/kookaburra/configuration.rb", "lib/kookaburra/dependency_accessor.rb", "lib/kookaburra/exceptions.rb", "lib/kookaburra/given_driver.rb", "lib/kookaburra/json_api_driver.rb", "lib/kookaburra/mental_model.rb", "lib/kookaburra/test_helpers.rb", "lib/kookaburra/ui_driver.rb", "lib/kookaburra/ui_driver/ui_component.rb", "lib/kookaburra/ui_driver/ui_component/address_bar.rb", "spec/integration/test_a_rack_application_spec.rb", "spec/kookaburra/api_driver_spec.rb", "spec/kookaburra/configuration_spec.rb", "spec/kookaburra/json_api_driver_spec.rb", "spec/kookaburra/mental_model_spec.rb", "spec/kookaburra/ui_driver/ui_component/address_bar_spec.rb", "spec/kookaburra/ui_driver/ui_component_spec.rb", "spec/kookaburra/ui_driver_spec.rb", "spec/kookaburra_spec.rb", "spec/support/shared_examples/it_can_make_assertions.rb", "spec/support/shared_examples/it_has_a_dependency_accessor.rb" ] s.homepage = "http://github.com/projectdx/kookaburra" s.licenses = ["MIT"] s.require_paths = ["lib"] s.rubygems_version = "1.8.17" s.summary = "WindowDriver testing pattern for Ruby apps" if s.respond_to? :specification_version then s.specification_version = 3 if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then s.add_runtime_dependency(%q<i18n>, [">= 0"]) s.add_runtime_dependency(%q<activesupport>, [">= 3.0"]) s.add_runtime_dependency(%q<patron>, [">= 0"]) s.add_development_dependency(%q<rspec>, [">= 0"]) s.add_development_dependency(%q<capybara>, [">= 0"]) s.add_development_dependency(%q<yard>, [">= 0"]) s.add_development_dependency(%q<redcarpet>, ["~> 1.0"]) s.add_development_dependency(%q<jeweler>, [">= 0"]) s.add_development_dependency(%q<rcov>, [">= 0"]) s.add_development_dependency(%q<reek>, [">= 0"]) s.add_development_dependency(%q<sinatra>, [">= 0"]) else s.add_dependency(%q<i18n>, [">= 0"]) s.add_dependency(%q<activesupport>, [">= 3.0"]) s.add_dependency(%q<patron>, [">= 0"]) s.add_dependency(%q<rspec>, [">= 0"]) s.add_dependency(%q<capybara>, [">= 0"]) s.add_dependency(%q<yard>, [">= 0"]) s.add_dependency(%q<redcarpet>, ["~> 1.0"]) s.add_dependency(%q<jeweler>, [">= 0"]) s.add_dependency(%q<rcov>, [">= 0"]) s.add_dependency(%q<reek>, [">= 0"]) s.add_dependency(%q<sinatra>, [">= 0"]) end else s.add_dependency(%q<i18n>, [">= 0"]) s.add_dependency(%q<activesupport>, [">= 3.0"]) s.add_dependency(%q<patron>, [">= 0"]) s.add_dependency(%q<rspec>, [">= 0"]) s.add_dependency(%q<capybara>, [">= 0"]) s.add_dependency(%q<yard>, [">= 0"]) s.add_dependency(%q<redcarpet>, ["~> 1.0"]) s.add_dependency(%q<jeweler>, [">= 0"]) s.add_dependency(%q<rcov>, [">= 0"]) s.add_dependency(%q<reek>, [">= 0"]) s.add_dependency(%q<sinatra>, [">= 0"]) end end
Create string.rb module Zeamays class Corn class Fruiter module String def self.fruit_short_string(s) bytesize = 2.bytesize [bytesize].pack("C") + s end def self.fruit_medium_string(s) bytesize = 2.bytesize [bytesize].pack("n") + s end def self.fruit_large_string(s) bytesize = 2.bytesize [bytesize].pack("N") + s end end end end end
# Copyright 2017 Google, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require_relative "../video_samples" require "rspec" require "tempfile" require "net/http" require "uri" describe "Google Cloud Video API sample" do before do @faces_file = "demomaker/google_gmail.mp4" @labels_file = "demomaker/cat.mp4" @shots_file = "demomaker/gbikes_dinosaur.mp4" @safe_search_file = "demomaker/pizza.mp4" end it "can analyze labels from a gcs file" do expect { analyze_labels_gcs path: "gs://#{@labels_file}" }.to output( /Label description: Animal/ ).to_stdout end it "can analyze labels from a local file" do begin local_tempfile = Tempfile.new "temp_video" File.open local_tempfile.path, "w" do |file| file_contents = Net::HTTP.get URI("http://storage.googleapis.com/#{@labels_file}") file.write file_contents file.flush end expect { analyze_labels_local path: local_tempfile.path }.to output( /Label description: Animal/ ).to_stdout ensure local_tempfile.close local_tempfile.unlink end end it "can analyze faces from a gcs file" do expect { analyze_faces path: "gs://#{@labels_file}" }.to output( /Thumbnail size: 1948/ ).to_stdout end it "can analyze safe search from a gcs file" do expect { analyze_safe_search path: "gs://#{@safe_search_file}" }.to output( /adult: VERY_UNLIKELY/ ).to_stdout end it "can analyze shots from a gcs file" do expect { analyze_shots path: "gs://#{@shots_file}" }.to output( /0.0 through 5/ ).to_stdout end end Removed face detection thumbnail size because size may change # Copyright 2017 Google, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require_relative "../video_samples" require "rspec" require "tempfile" require "net/http" require "uri" describe "Google Cloud Video API sample" do before do @faces_file = "demomaker/google_gmail.mp4" @labels_file = "demomaker/cat.mp4" @shots_file = "demomaker/gbikes_dinosaur.mp4" @safe_search_file = "demomaker/pizza.mp4" end it "can analyze labels from a gcs file" do expect { analyze_labels_gcs path: "gs://#{@labels_file}" }.to output( /Label description: Animal/ ).to_stdout end it "can analyze labels from a local file" do begin local_tempfile = Tempfile.new "temp_video" File.open local_tempfile.path, "w" do |file| file_contents = Net::HTTP.get URI("http://storage.googleapis.com/#{@labels_file}") file.write file_contents file.flush end expect { analyze_labels_local path: local_tempfile.path }.to output( /Label description: Animal/ ).to_stdout ensure local_tempfile.close local_tempfile.unlink end end it "can analyze faces from a gcs file" do expect { analyze_faces path: "gs://#{@labels_file}" }.to output( /Thumbnail size:/ ).to_stdout end it "can analyze safe search from a gcs file" do expect { analyze_safe_search path: "gs://#{@safe_search_file}" }.to output( /adult: VERY_UNLIKELY/ ).to_stdout end it "can analyze shots from a gcs file" do expect { analyze_shots path: "gs://#{@shots_file}" }.to output( /0.0 through 5/ ).to_stdout end end
require 'chef/provider/lwrp_base' require_relative 'helpers' class Chef::Provider class SystemdUnit < Chef::Provider::LWRPBase use_inline_resources def whyrun_supported? true end provides :systemd_unit Systemd::Helpers.unit_types.each do |unit_type| provides "systemd_#{unit_type}".to_sym end action :create do unit_path = Systemd::Helpers.unit_path(new_resource) directory Systemd::Helpers.drop_in_root(new_resource) do only_if { new_resource.drop_in } end execute 'reload-sytemd' do command 'systemctl daemon-reload' action :nothing subscribes :run, "file[#{unit_path}]", :immediately end file unit_path do content Systemd::Helpers.ini_config(new_resource.to_hash) action :create end end action :delete do unit_path = Systemd::Helpers.unit_path(new_resource) execute 'reload-sytemd' do command 'systemctl daemon-reload' action :nothing subscribes :run, "file[#{unit_path}]", :immediately end file unit_path do action :delete end end end end enforce setting an override if unit is a drop-in require 'chef/provider/lwrp_base' require_relative 'helpers' class Chef::Provider class SystemdUnit < Chef::Provider::LWRPBase use_inline_resources def whyrun_supported? true end provides :systemd_unit Systemd::Helpers.unit_types.each do |unit_type| provides "systemd_#{unit_type}".to_sym end action :create do unit_path = Systemd::Helpers.unit_path(new_resource) if new_resource.drop_in && new_resource.override.nil? fail( Chef::Exceptions::ValidationFailed, 'Required argument override is missing!' ) end directory Systemd::Helpers.drop_in_root(new_resource) do only_if { new_resource.drop_in } end execute 'reload-sytemd' do command 'systemctl daemon-reload' action :nothing subscribes :run, "file[#{unit_path}]", :immediately end file unit_path do content Systemd::Helpers.ini_config(new_resource.to_hash) action :create end end action :delete do unit_path = Systemd::Helpers.unit_path(new_resource) execute 'reload-sytemd' do command 'systemctl daemon-reload' action :nothing subscribes :run, "file[#{unit_path}]", :immediately end file unit_path do action :delete end end end end
require 'spec_helper' require 'stringio' describe RSpec::Mate::Runner do before(:each) do # TODO: long path @first_failing_spec = /fixtures\/example_failing_spec\.rb&line=3/n @second_failing_spec = /fixtures\/example_failing_spec\.rb&line=7/n set_env load File.expand_path( # TODO: long path "#{File.dirname(__FILE__)}/../../../lib/rspec/mate.rb" ) @spec_mate = RSpec::Mate::Runner.new @test_runner_io = StringIO.new end after(:each) do set_env $".delete_if do |path| path =~ /example_failing_spec\.rb/ end end describe "#run" do it "shows a nicely formatted error when there's an uncaught exception" do ENV['TM_FILEPATH'] = fixtures_path('example_syntax_error_spec.rb') @spec_mate.run_file(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should =~ /Uncaught Exception/ html.should_not =~ /^ .%<.*$/ end it "shows standard error output nicely in a PRE block" do ENV['TM_FILEPATH'] = fixtures_path('example_stderr_spec.rb') @spec_mate.run_file(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should =~ /#{Regexp.escape("<h2>stderr:</h2><pre>2 + 2 = 4\n4 &lt; 8\n</pre>")}/ end end describe "#run_file" do it "runs whole file when only file specified" do ENV['TM_FILEPATH'] = fixtures_path('example_failing_spec.rb') @spec_mate.run_file(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should =~ @first_failing_spec html.should =~ @second_failing_spec end end describe "#run_files" do it "runs all selected files" do fixtures = [ 'example_failing_spec.rb', 'example_passing_spec.rb' ] # TODO: adjust fixtures_path to take an array ENV['TM_SELECTED_FILES'] = fixtures.map do |fixture| "'#{fixtures_path(fixture)}'" end.join(" ") @spec_mate.run_files(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should =~ @first_failing_spec html.should =~ @second_failing_spec html.should =~ /should pass/ html.should =~ /should pass too/ end end describe "#run_last_remembered_file" do it "runs all of the selected files" do @spec_mate.save_as_last_remembered_file fixtures_path('example_failing_spec.rb') @spec_mate.run_last_remembered_file(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should =~ @first_failing_spec end end describe "#run_focused" do it "runs first spec when file and line 4 specified" do ENV['TM_FILEPATH'] = fixtures_path('example_failing_spec.rb') ENV['TM_LINE_NUMBER'] = '4' @spec_mate.run_focussed(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should =~ @first_failing_spec html.should_not =~ @second_failing_spec end it "runs first spec when file and line 8 specified" do ENV['TM_FILEPATH'] = fixtures_path('example_failing_spec.rb') ENV['TM_LINE_NUMBER'] = '8' @spec_mate.run_focussed(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should_not =~ @first_failing_spec html.should =~ @second_failing_spec end end describe "error cases" do it "raises an exception when TM_PROJECT_DIRECTORY points to bad location" do ENV['TM_PROJECT_DIRECTORY'] = __FILE__ # bad on purpose lambda do # TODO: long path load File.dirname(__FILE__) + '/../../../lib/rspec/mate.rb' end.should_not raise_error end it "raises an exception when TM_RSPEC_HOME points to bad location" do ENV['TM_RSPEC_HOME'] = __FILE__ # bad on purpose lambda do load File.dirname(__FILE__) + '/../lib/rspec_mate.rb' end.should raise_error end end describe "alternative formatter" do it "adds a custom formatter to the command if TM_RSPEC_FORMATTER is set" do ENV['TM_RSPEC_FORMATTER'] = 'RSpec::Core::Formatters::BaseTextFormatter' ENV['TM_FILEPATH'] = fixtures_path('example_failing_spec.rb') @spec_mate.run_file(@test_runner_io) @test_runner_io.rewind text = @test_runner_io.read text.should =~ /1\) An example failing spec should fail/ text.should =~ /2\) An example failing spec should also fail/ end end private def fixtures_path(fixture) # TODO: long path fixtures_path = File.expand_path( File.dirname(__FILE__) ) + '/../../../fixtures' File.join(fixtures_path, fixture) end def set_env # TODO: long path root = File.expand_path('../../../../../../rspec-core', __FILE__) ENV['TM_FILEPATH'] = nil ENV['TM_LINE_NUMBER'] = nil ENV['TM_PROJECT_DIRECTORY'] = File.expand_path(File.dirname(__FILE__)) ENV['TM_RSPEC_HOME'] = "#{root}" end end Fix spec description. require 'spec_helper' require 'stringio' describe RSpec::Mate::Runner do before(:each) do # TODO: long path @first_failing_spec = /fixtures\/example_failing_spec\.rb&line=3/n @second_failing_spec = /fixtures\/example_failing_spec\.rb&line=7/n set_env load File.expand_path( # TODO: long path "#{File.dirname(__FILE__)}/../../../lib/rspec/mate.rb" ) @spec_mate = RSpec::Mate::Runner.new @test_runner_io = StringIO.new end after(:each) do set_env $".delete_if do |path| path =~ /example_failing_spec\.rb/ end end describe "#run" do it "shows a nicely formatted error when there's an uncaught exception" do ENV['TM_FILEPATH'] = fixtures_path('example_syntax_error_spec.rb') @spec_mate.run_file(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should =~ /Uncaught Exception/ html.should_not =~ /^ .%<.*$/ end it "shows standard error output nicely in a PRE block" do ENV['TM_FILEPATH'] = fixtures_path('example_stderr_spec.rb') @spec_mate.run_file(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should =~ /#{Regexp.escape("<h2>stderr:</h2><pre>2 + 2 = 4\n4 &lt; 8\n</pre>")}/ end end describe "#run_file" do it "runs whole file when only file specified" do ENV['TM_FILEPATH'] = fixtures_path('example_failing_spec.rb') @spec_mate.run_file(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should =~ @first_failing_spec html.should =~ @second_failing_spec end end describe "#run_files" do it "runs all selected files" do fixtures = [ 'example_failing_spec.rb', 'example_passing_spec.rb' ] # TODO: adjust fixtures_path to take an array ENV['TM_SELECTED_FILES'] = fixtures.map do |fixture| "'#{fixtures_path(fixture)}'" end.join(" ") @spec_mate.run_files(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should =~ @first_failing_spec html.should =~ @second_failing_spec html.should =~ /should pass/ html.should =~ /should pass too/ end end describe "#run_last_remembered_file" do it "runs all of the selected files" do @spec_mate.save_as_last_remembered_file fixtures_path('example_failing_spec.rb') @spec_mate.run_last_remembered_file(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should =~ @first_failing_spec end end describe "#run_focused" do it "runs first spec when file and line 4 specified" do ENV['TM_FILEPATH'] = fixtures_path('example_failing_spec.rb') ENV['TM_LINE_NUMBER'] = '4' @spec_mate.run_focussed(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should =~ @first_failing_spec html.should_not =~ @second_failing_spec end it "runs second spec when file and line 8 specified" do ENV['TM_FILEPATH'] = fixtures_path('example_failing_spec.rb') ENV['TM_LINE_NUMBER'] = '8' @spec_mate.run_focussed(@test_runner_io) @test_runner_io.rewind html = @test_runner_io.read html.should_not =~ @first_failing_spec html.should =~ @second_failing_spec end end describe "error cases" do it "raises an exception when TM_PROJECT_DIRECTORY points to bad location" do ENV['TM_PROJECT_DIRECTORY'] = __FILE__ # bad on purpose lambda do # TODO: long path load File.dirname(__FILE__) + '/../../../lib/rspec/mate.rb' end.should_not raise_error end it "raises an exception when TM_RSPEC_HOME points to bad location" do ENV['TM_RSPEC_HOME'] = __FILE__ # bad on purpose lambda do load File.dirname(__FILE__) + '/../lib/rspec_mate.rb' end.should raise_error end end describe "alternative formatter" do it "adds a custom formatter to the command if TM_RSPEC_FORMATTER is set" do ENV['TM_RSPEC_FORMATTER'] = 'RSpec::Core::Formatters::BaseTextFormatter' ENV['TM_FILEPATH'] = fixtures_path('example_failing_spec.rb') @spec_mate.run_file(@test_runner_io) @test_runner_io.rewind text = @test_runner_io.read text.should =~ /1\) An example failing spec should fail/ text.should =~ /2\) An example failing spec should also fail/ end end private def fixtures_path(fixture) # TODO: long path fixtures_path = File.expand_path( File.dirname(__FILE__) ) + '/../../../fixtures' File.join(fixtures_path, fixture) end def set_env # TODO: long path root = File.expand_path('../../../../../../rspec-core', __FILE__) ENV['TM_FILEPATH'] = nil ENV['TM_LINE_NUMBER'] = nil ENV['TM_PROJECT_DIRECTORY'] = File.expand_path(File.dirname(__FILE__)) ENV['TM_RSPEC_HOME'] = "#{root}" end end
Vmdb::Application.configure do # Settings specified here will take precedence over those in config/application.rb # The test environment is used exclusively to run your application's # test suite. You never need to work with it otherwise. Remember that # your test database is "scratch space" for the test suite and is wiped # and recreated between test runs. Don't rely on the data there! config.cache_classes = true config.eager_load = false # Configure static asset server for tests with Cache-Control for performance config.serve_static_files = true config.static_cache_control = "public, max-age=3600" # Log error messages when you accidentally call methods on nil config.whiny_nils = true # Show full error reports and disable caching config.consider_all_requests_local = true config.action_controller.perform_caching = false # Raise exceptions instead of rendering exception templates config.action_dispatch.show_exceptions = true # Disable request forgery protection in test environment config.action_controller.allow_forgery_protection = false # Tell Action Mailer not to deliver emails to the real world. # The :test delivery method accumulates sent emails in the # ActionMailer::Base.deliveries array. config.action_mailer.delivery_method = :test # Raise exception on mass assignment protection for Active Record models # TODO: Fix our code to abide by Rails mass_assignment protection: # http://jonathanleighton.com/articles/2011/mass-assignment-security-shouldnt-happen-in-the-model/ # config.active_record.mass_assignment_sanitizer = :strict # Print deprecation notices to the stderr config.active_support.deprecation = :stderr # Customize any additional options below... # Do not include all helpers for all views config.action_controller.include_all_helpers = false config.secret_key_base = SecureRandom.random_bytes(32) config.secret_token = SecureRandom.random_bytes(32) end require "minitest" require "shoulda-matchers" require "factory_girl" require "timecop" require "vcr" require "webmock/rspec" require "capybara" Ignore a particular deprecation message from rspec-rails We know about it, and we'll make sure to upgrade rspec-rails before we try to move to Rails 5.0. But that's a substantial update; nagging tests aren't going to make it happen any faster. Vmdb::Application.configure do # Settings specified here will take precedence over those in config/application.rb # The test environment is used exclusively to run your application's # test suite. You never need to work with it otherwise. Remember that # your test database is "scratch space" for the test suite and is wiped # and recreated between test runs. Don't rely on the data there! config.cache_classes = true config.eager_load = false # Configure static asset server for tests with Cache-Control for performance config.serve_static_files = true config.static_cache_control = "public, max-age=3600" # Log error messages when you accidentally call methods on nil config.whiny_nils = true # Show full error reports and disable caching config.consider_all_requests_local = true config.action_controller.perform_caching = false # Raise exceptions instead of rendering exception templates config.action_dispatch.show_exceptions = true # Disable request forgery protection in test environment config.action_controller.allow_forgery_protection = false # Tell Action Mailer not to deliver emails to the real world. # The :test delivery method accumulates sent emails in the # ActionMailer::Base.deliveries array. config.action_mailer.delivery_method = :test # Raise exception on mass assignment protection for Active Record models # TODO: Fix our code to abide by Rails mass_assignment protection: # http://jonathanleighton.com/articles/2011/mass-assignment-security-shouldnt-happen-in-the-model/ # config.active_record.mass_assignment_sanitizer = :strict # Print deprecation notices to the stderr config.active_support.deprecation = lambda do |message, callstack| unless message =~ /named_routes\.helpers.*rspec-rails.*controller_example_group/ ActiveSupport::Deprecation::DEFAULT_BEHAVIORS[:stderr].call(message, callstack) end end # Customize any additional options below... # Do not include all helpers for all views config.action_controller.include_all_helpers = false config.secret_key_base = SecureRandom.random_bytes(32) config.secret_token = SecureRandom.random_bytes(32) end require "minitest" require "shoulda-matchers" require "factory_girl" require "timecop" require "vcr" require "webmock/rspec" require "capybara"
# Try to include the rails initializer. If this isn't in a gem, this will fail. if Refinery.is_a_gem begin require 'initializer' rescue LoadError => load_error # we don't need to do anything. puts "*** RefineryCMS gem load failed, attempting to load traditionally... ***" end end module Refinery class Configuration < Rails::Configuration def default_plugin_paths paths = super.push(Refinery.root.join("vendor", "plugins").to_s).uniq end end class PluginLoader < Rails::Plugin::Loader def add_plugin_load_paths # call Rails' add_plugin_load_paths super # add plugin lib paths to the $LOAD_PATH so that rake tasks etc. can be run when using a gem for refinery or gems for plugins. search_for = Regexp.new(Refinery.root.join("vendor", "plugins", ".+?", "lib").to_s) # find all the plugin paths paths = plugins.collect{ |plugin| plugin.load_paths }.flatten # just use lib paths from Refinery engines paths = paths.reject{|path| path.scan(search_for).empty? or path.include?('/rails-') } # reject Refinery lib paths if they're already included in this app. paths = paths.reject{ |path| path.include?(Refinery.root.to_s) } unless Refinery.is_a_gem paths.uniq! ($refinery_gem_plugin_lib_paths = paths).each do |path| $LOAD_PATH.unshift path end $LOAD_PATH.uniq! end end class Initializer < Rails::Initializer def self.run(command = :process, configuration = Configuration.new) # Set up configuration that is rather specific to Refinery. (some plugins require on other more 'core' plugins). # We do make sure we check that things haven't already been set in the application. configuration.plugin_loader = Refinery::PluginLoader unless configuration.plugin_loader != Rails::Plugin::Loader configuration.plugins = [ :acts_as_indexed, :authlogic, :friendly_id, :will_paginate :all ] if configuration.plugins.nil? # Pass our configuration along to Rails. Rails.configuration = configuration # call Rails' run super # Create deprecations for variables that we've stopped using (possibly remove in 1.0?) require 'refinery/deprecations' end end end fix missing comma, oops. # Try to include the rails initializer. If this isn't in a gem, this will fail. if Refinery.is_a_gem begin require 'initializer' rescue LoadError => load_error # we don't need to do anything. puts "*** RefineryCMS gem load failed, attempting to load traditionally... ***" end end module Refinery class Configuration < Rails::Configuration def default_plugin_paths paths = super.push(Refinery.root.join("vendor", "plugins").to_s).uniq end end class PluginLoader < Rails::Plugin::Loader def add_plugin_load_paths # call Rails' add_plugin_load_paths super # add plugin lib paths to the $LOAD_PATH so that rake tasks etc. can be run when using a gem for refinery or gems for plugins. search_for = Regexp.new(Refinery.root.join("vendor", "plugins", ".+?", "lib").to_s) # find all the plugin paths paths = plugins.collect{ |plugin| plugin.load_paths }.flatten # just use lib paths from Refinery engines paths = paths.reject{|path| path.scan(search_for).empty? or path.include?('/rails-') } # reject Refinery lib paths if they're already included in this app. paths = paths.reject{ |path| path.include?(Refinery.root.to_s) } unless Refinery.is_a_gem paths.uniq! ($refinery_gem_plugin_lib_paths = paths).each do |path| $LOAD_PATH.unshift path end $LOAD_PATH.uniq! end end class Initializer < Rails::Initializer def self.run(command = :process, configuration = Configuration.new) # Set up configuration that is rather specific to Refinery. (some plugins require on other more 'core' plugins). # We do make sure we check that things haven't already been set in the application. configuration.plugin_loader = Refinery::PluginLoader unless configuration.plugin_loader != Rails::Plugin::Loader configuration.plugins = [ :acts_as_indexed, :authlogic, :friendly_id, :will_paginate, :all ] if configuration.plugins.nil? # Pass our configuration along to Rails. Rails.configuration = configuration # call Rails' run super # Create deprecations for variables that we've stopped using (possibly remove in 1.0?) require 'refinery/deprecations' end end end
# coding: utf-8 lib = File.expand_path("lib", __dir__) $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) require "jekyll-import/version" Gem::Specification.new do |s| s.specification_version = 2 if s.respond_to? :specification_version= s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= s.rubygems_version = "2.2.2" s.required_ruby_version = ">= 2.1" s.name = "jekyll-import" s.version = JekyllImport::VERSION s.license = "MIT" s.summary = "Import command for Jekyll (static site generator)." s.description = "Provides the Import command for Jekyll." s.authors = ["Tom Preston-Werner"] s.email = "tom@mojombo.com" s.homepage = "http://github.com/jekyll/jekyll-import" s.files = `git ls-files`.split($INPUT_RECORD_SEPARATOR).grep(%r!^lib/!) s.require_paths = %w(lib) s.rdoc_options = ["--charset=UTF-8"] s.extra_rdoc_files = %w(README.markdown LICENSE) # runtime dependencies s.add_runtime_dependency("jekyll", ENV["JEKYLL_VERSION"] ? "~> #{ENV["JEKYLL_VERSION"]}" : ">= 1.4") s.add_runtime_dependency("fastercsv") s.add_runtime_dependency("nokogiri") # development dependencies s.add_development_dependency("rake", "~> 10.1.0") s.add_development_dependency("rdoc", "~> 4.0.0") s.add_development_dependency("activesupport", "~> 4.2") # test dependencies: s.add_development_dependency("redgreen", "~> 1.2") s.add_development_dependency("shoulda", "~> 3.5") s.add_development_dependency("rr", "~> 1.0") s.add_development_dependency("simplecov", "~> 0.7") s.add_development_dependency("simplecov-gem-adapter", "~> 1.0.1") s.add_development_dependency("rubocop", "0.51") # migrator dependencies: s.add_development_dependency("sequel", "~> 3.42") s.add_development_dependency("htmlentities", "~> 4.3") s.add_development_dependency("hpricot", "~> 0.8") s.add_development_dependency("pg", "~> 0.12") s.add_development_dependency("mysql2", "~> 0.3") s.add_development_dependency("sqlite3", "~> 1.3.13") s.add_development_dependency("behance", "~> 0.3") s.add_development_dependency("unidecode") s.add_development_dependency("open_uri_redirections") s.add_development_dependency("reverse_markdown") # site dependencies: s.add_development_dependency("launchy", "~> 2.4") end Lint with Rubocop # frozen_string_literal: true lib = File.expand_path("lib", __dir__) $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) require "jekyll-import/version" Gem::Specification.new do |s| s.specification_version = 2 if s.respond_to? :specification_version= s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= s.rubygems_version = "2.2.2" s.required_ruby_version = ">= 2.1" s.name = "jekyll-import" s.version = JekyllImport::VERSION s.license = "MIT" s.summary = "Import command for Jekyll (static site generator)." s.description = "Provides the Import command for Jekyll." s.authors = ["Tom Preston-Werner"] s.email = "tom@mojombo.com" s.homepage = "http://github.com/jekyll/jekyll-import" s.files = `git ls-files`.split($INPUT_RECORD_SEPARATOR).grep(%r!^lib/!) s.require_paths = %w(lib) s.rdoc_options = ["--charset=UTF-8"] s.extra_rdoc_files = %w(README.markdown LICENSE) # runtime dependencies s.add_runtime_dependency("fastercsv") s.add_runtime_dependency("jekyll", ENV["JEKYLL_VERSION"] ? "~> #{ENV["JEKYLL_VERSION"]}" : ">= 1.4") s.add_runtime_dependency("nokogiri") # development dependencies s.add_development_dependency("activesupport", "~> 4.2") s.add_development_dependency("rake", "~> 10.1.0") s.add_development_dependency("rake", "~> 10.1.0") s.add_development_dependency("rdoc", "~> 4.0.0") # test dependencies: s.add_development_dependency("redgreen", "~> 1.2") s.add_development_dependency("rr", "~> 1.0") s.add_development_dependency("rubocop", "0.51") s.add_development_dependency("shoulda", "~> 3.5") s.add_development_dependency("simplecov", "~> 0.7") s.add_development_dependency("simplecov-gem-adapter", "~> 1.0.1") # migrator dependencies: s.add_development_dependency("behance", "~> 0.3") s.add_development_dependency("hpricot", "~> 0.8") s.add_development_dependency("htmlentities", "~> 4.3") s.add_development_dependency("mysql2", "~> 0.3") s.add_development_dependency("open_uri_redirections") s.add_development_dependency("pg", "~> 0.12") s.add_development_dependency("reverse_markdown") s.add_development_dependency("sequel", "~> 3.42") s.add_development_dependency("sqlite3", "~> 1.3.13") s.add_development_dependency("unidecode") # site dependencies: s.add_development_dependency("launchy", "~> 2.4") end
require 'active_record/connection_adapters/abstract_adapter' require 'active_support/core_ext/object/blank' require 'active_record/connection_adapters/statement_pool' # Make sure we're using pg high enough for PGResult#values gem 'pg', '~> 0.11' require 'pg' module ActiveRecord module ConnectionHandling # Establishes a connection to the database that's used by all Active Record objects def postgresql_connection(config) # :nodoc: config = config.symbolize_keys # Forward any unused config params to PGconn.connect. conn_params = config.except(:statement_limit, :encoding, :min_messages, :schema_search_path, :schema_order, :adapter, :pool, :wait_timeout) # Map ActiveRecords param names to PGs. conn_params[:user] = conn_params.delete(:username) if conn_params[:username] conn_params[:dbname] = conn_params.delete(:database) if conn_params[:database] # The postgres drivers don't allow the creation of an unconnected PGconn object, # so just pass a nil connection object for the time being. ConnectionAdapters::PostgreSQLAdapter.new(nil, logger, conn_params, config) end end module ConnectionAdapters # PostgreSQL-specific extensions to column definitions in a table. class PostgreSQLColumn < Column #:nodoc: # Instantiates a new PostgreSQL column definition in a table. def initialize(name, default, sql_type = nil, null = true) super(name, self.class.extract_value_from_default(default), sql_type, null) end # :stopdoc: class << self attr_accessor :money_precision def string_to_time(string) return string unless String === string case string when 'infinity' then 1.0 / 0.0 when '-infinity' then -1.0 / 0.0 else super end end def cast_hstore(object) if Hash === object object.map { |k,v| "#{escape_hstore(k)}=>#{escape_hstore(v)}" }.join ', ' else kvs = object.scan(/(?<!\\)".*?(?<!\\)"/).map { |o| unescape_hstore(o[1...-1]) } Hash[kvs.each_slice(2).to_a] end end private HSTORE_ESCAPE = { ' ' => '\\ ', '\\' => '\\\\', '"' => '\\"', '=' => '\\=', } HSTORE_ESCAPE_RE = Regexp.union(HSTORE_ESCAPE.keys) HSTORE_UNESCAPE = HSTORE_ESCAPE.invert HSTORE_UNESCAPE_RE = Regexp.union(HSTORE_UNESCAPE.keys) def unescape_hstore(value) value.gsub(HSTORE_UNESCAPE_RE) do |match| HSTORE_UNESCAPE[match] end end def escape_hstore(value) value.gsub(HSTORE_ESCAPE_RE) do |match| HSTORE_ESCAPE[match] end end end # :startdoc: private def extract_limit(sql_type) case sql_type when /^bigint/i; 8 when /^smallint/i; 2 else super end end # Extracts the scale from PostgreSQL-specific data types. def extract_scale(sql_type) # Money type has a fixed scale of 2. sql_type =~ /^money/ ? 2 : super end # Extracts the precision from PostgreSQL-specific data types. def extract_precision(sql_type) if sql_type == 'money' self.class.money_precision else super end end # Maps PostgreSQL-specific data types to logical Rails types. def simplified_type(field_type) case field_type # Numeric and monetary types when /^(?:real|double precision)$/ :float # Monetary types when 'money' :decimal when 'hstore' :hstore # Character types when /^(?:character varying|bpchar)(?:\(\d+\))?$/ :string # Binary data types when 'bytea' :binary # Date/time types when /^timestamp with(?:out)? time zone$/ :datetime when 'interval' :string # Geometric types when /^(?:point|line|lseg|box|"?path"?|polygon|circle)$/ :string # Network address types when /^(?:cidr|inet|macaddr)$/ :string # Bit strings when /^bit(?: varying)?(?:\(\d+\))?$/ :string # XML type when 'xml' :xml # tsvector type when 'tsvector' :tsvector # Arrays when /^\D+\[\]$/ :string # Object identifier types when 'oid' :integer # UUID type when 'uuid' :string # Small and big integer types when /^(?:small|big)int$/ :integer # Pass through all types that are not specific to PostgreSQL. else super end end # Extracts the value from a PostgreSQL column default definition. def self.extract_value_from_default(default) case default # This is a performance optimization for Ruby 1.9.2 in development. # If the value is nil, we return nil straight away without checking # the regular expressions. If we check each regular expression, # Regexp#=== will call NilClass#to_str, which will trigger # method_missing (defined by whiny nil in ActiveSupport) which # makes this method very very slow. when NilClass nil # Numeric types when /\A\(?(-?\d+(\.\d*)?\)?)\z/ $1 # Character types when /\A'(.*)'::(?:character varying|bpchar|text)\z/m $1 # Character types (8.1 formatting) when /\AE'(.*)'::(?:character varying|bpchar|text)\z/m $1.gsub(/\\(\d\d\d)/) { $1.oct.chr } # Binary data types when /\A'(.*)'::bytea\z/m $1 # Date/time types when /\A'(.+)'::(?:time(?:stamp)? with(?:out)? time zone|date)\z/ $1 when /\A'(.*)'::interval\z/ $1 # Boolean type when 'true' true when 'false' false # Geometric types when /\A'(.*)'::(?:point|line|lseg|box|"?path"?|polygon|circle)\z/ $1 # Network address types when /\A'(.*)'::(?:cidr|inet|macaddr)\z/ $1 # Bit string types when /\AB'(.*)'::"?bit(?: varying)?"?\z/ $1 # XML type when /\A'(.*)'::xml\z/m $1 # Arrays when /\A'(.*)'::"?\D+"?\[\]\z/ $1 # Object identifier types when /\A-?\d+\z/ $1 else # Anything else is blank, some user type, or some function # and we can't know the value of that, so return nil. nil end end end # The PostgreSQL adapter works with the native C (https://bitbucket.org/ged/ruby-pg) driver. # # Options: # # * <tt>:host</tt> - Defaults to a Unix-domain socket in /tmp. On machines without Unix-domain sockets, # the default is to connect to localhost. # * <tt>:port</tt> - Defaults to 5432. # * <tt>:username</tt> - Defaults to be the same as the operating system name of the user running the application. # * <tt>:password</tt> - Password to be used if the server demands password authentication. # * <tt>:database</tt> - Defaults to be the same as the user name. # * <tt>:schema_search_path</tt> - An optional schema search path for the connection given # as a string of comma-separated schema names. This is backward-compatible with the <tt>:schema_order</tt> option. # * <tt>:encoding</tt> - An optional client encoding that is used in a <tt>SET client_encoding TO # <encoding></tt> call on the connection. # * <tt>:min_messages</tt> - An optional client min messages that is used in a # <tt>SET client_min_messages TO <min_messages></tt> call on the connection. # # Any further options are used as connection parameters to libpq. See # http://www.postgresql.org/docs/9.1/static/libpq-connect.html for the # list of parameters. # # In addition, default connection parameters of libpq can be set per environment variables. # See http://www.postgresql.org/docs/9.1/static/libpq-envars.html . class PostgreSQLAdapter < AbstractAdapter class TableDefinition < ActiveRecord::ConnectionAdapters::TableDefinition def xml(*args) options = args.extract_options! column(args[0], 'xml', options) end def tsvector(*args) options = args.extract_options! column(args[0], 'tsvector', options) end def hstore(name, options = {}) column(name, 'hstore', options) end end ADAPTER_NAME = 'PostgreSQL' NATIVE_DATABASE_TYPES = { :primary_key => "serial primary key", :string => { :name => "character varying", :limit => 255 }, :text => { :name => "text" }, :integer => { :name => "integer" }, :float => { :name => "float" }, :decimal => { :name => "decimal" }, :datetime => { :name => "timestamp" }, :timestamp => { :name => "timestamp" }, :time => { :name => "time" }, :date => { :name => "date" }, :binary => { :name => "bytea" }, :boolean => { :name => "boolean" }, :xml => { :name => "xml" }, :tsvector => { :name => "tsvector" } } # Returns 'PostgreSQL' as adapter name for identification purposes. def adapter_name ADAPTER_NAME end # Returns +true+, since this connection adapter supports prepared statement # caching. def supports_statement_cache? true end def supports_index_sort_order? true end class StatementPool < ConnectionAdapters::StatementPool def initialize(connection, max) super @counter = 0 @cache = Hash.new { |h,pid| h[pid] = {} } end def each(&block); cache.each(&block); end def key?(key); cache.key?(key); end def [](key); cache[key]; end def length; cache.length; end def next_key "a#{@counter + 1}" end def []=(sql, key) while @max <= cache.size dealloc(cache.shift.last) end @counter += 1 cache[sql] = key end def clear cache.each_value do |stmt_key| dealloc stmt_key end cache.clear end def delete(sql_key) dealloc cache[sql_key] cache.delete sql_key end private def cache @cache[$$] end def dealloc(key) @connection.query "DEALLOCATE #{key}" if connection_active? end def connection_active? @connection.status == PGconn::CONNECTION_OK rescue PGError false end end # Initializes and connects a PostgreSQL adapter. def initialize(connection, logger, connection_parameters, config) super(connection, logger) @connection_parameters, @config = connection_parameters, config @visitor = Arel::Visitors::PostgreSQL.new self # @local_tz is initialized as nil to avoid warnings when connect tries to use it @local_tz = nil @table_alias_length = nil connect @statements = StatementPool.new @connection, config.fetch(:statement_limit) { 1000 } if postgresql_version < 80200 raise "Your version of PostgreSQL (#{postgresql_version}) is too old, please upgrade!" end @local_tz = execute('SHOW TIME ZONE', 'SCHEMA').first["TimeZone"] end # Clears the prepared statements cache. def clear_cache! @statements.clear end # Is this connection alive and ready for queries? def active? @connection.status == PGconn::CONNECTION_OK rescue PGError false end # Close then reopen the connection. def reconnect! clear_cache! @connection.reset configure_connection end def reset! clear_cache! super end # Disconnects from the database if already connected. Otherwise, this # method does nothing. def disconnect! clear_cache! @connection.close rescue nil end def native_database_types #:nodoc: NATIVE_DATABASE_TYPES end # Returns true, since this connection adapter supports migrations. def supports_migrations? true end # Does PostgreSQL support finding primary key on non-Active Record tables? def supports_primary_key? #:nodoc: true end # Enable standard-conforming strings if available. def set_standard_conforming_strings old, self.client_min_messages = client_min_messages, 'panic' execute('SET standard_conforming_strings = on', 'SCHEMA') rescue nil ensure self.client_min_messages = old end def supports_insert_with_returning? true end def supports_ddl_transactions? true end # Returns true, since this connection adapter supports savepoints. def supports_savepoints? true end # Returns true. def supports_explain? true end # Returns the configured supported identifier length supported by PostgreSQL def table_alias_length @table_alias_length ||= query('SHOW max_identifier_length')[0][0].to_i end # QUOTING ================================================== # Escapes binary strings for bytea input to the database. def escape_bytea(value) @connection.escape_bytea(value) if value end # Unescapes bytea output from a database to the binary string it represents. # NOTE: This is NOT an inverse of escape_bytea! This is only to be used # on escaped binary output from database drive. def unescape_bytea(value) @connection.unescape_bytea(value) if value end # Quotes PostgreSQL-specific data types for SQL input. def quote(value, column = nil) #:nodoc: return super unless column case value when Float return super unless value.infinite? && column.type == :datetime "'#{value.to_s.downcase}'" when Numeric return super unless column.sql_type == 'money' # Not truly string input, so doesn't require (or allow) escape string syntax. "'#{value}'" when String case column.sql_type when 'bytea' then "'#{escape_bytea(value)}'" when 'xml' then "xml '#{quote_string(value)}'" when /^bit/ case value when /^[01]*$/ then "B'#{value}'" # Bit-string notation when /^[0-9A-F]*$/i then "X'#{value}'" # Hexadecimal notation end else super end else super end end def type_cast(value, column) return super unless column case value when String return super unless 'bytea' == column.sql_type { :value => value, :format => 1 } else super end end # Quotes strings for use in SQL input. def quote_string(s) #:nodoc: @connection.escape(s) end # Checks the following cases: # # - table_name # - "table.name" # - schema_name.table_name # - schema_name."table.name" # - "schema.name".table_name # - "schema.name"."table.name" def quote_table_name(name) schema, name_part = extract_pg_identifier_from_name(name.to_s) unless name_part quote_column_name(schema) else table_name, name_part = extract_pg_identifier_from_name(name_part) "#{quote_column_name(schema)}.#{quote_column_name(table_name)}" end end # Quotes column names for use in SQL queries. def quote_column_name(name) #:nodoc: PGconn.quote_ident(name.to_s) end # Quote date/time values for use in SQL input. Includes microseconds # if the value is a Time responding to usec. def quoted_date(value) #:nodoc: if value.acts_like?(:time) && value.respond_to?(:usec) "#{super}.#{sprintf("%06d", value.usec)}" else super end end # Set the authorized user for this session def session_auth=(user) clear_cache! exec_query "SET SESSION AUTHORIZATION #{user}" end # REFERENTIAL INTEGRITY ==================================== def supports_disable_referential_integrity? #:nodoc: true end def disable_referential_integrity #:nodoc: if supports_disable_referential_integrity? then execute(tables.collect { |name| "ALTER TABLE #{quote_table_name(name)} DISABLE TRIGGER ALL" }.join(";")) end yield ensure if supports_disable_referential_integrity? then execute(tables.collect { |name| "ALTER TABLE #{quote_table_name(name)} ENABLE TRIGGER ALL" }.join(";")) end end # DATABASE STATEMENTS ====================================== def explain(arel, binds = []) sql = "EXPLAIN #{to_sql(arel)}" ExplainPrettyPrinter.new.pp(exec_query(sql, 'EXPLAIN', binds)) end class ExplainPrettyPrinter # :nodoc: # Pretty prints the result of a EXPLAIN in a way that resembles the output of the # PostgreSQL shell: # # QUERY PLAN # ------------------------------------------------------------------------------ # Nested Loop Left Join (cost=0.00..37.24 rows=8 width=0) # Join Filter: (posts.user_id = users.id) # -> Index Scan using users_pkey on users (cost=0.00..8.27 rows=1 width=4) # Index Cond: (id = 1) # -> Seq Scan on posts (cost=0.00..28.88 rows=8 width=4) # Filter: (posts.user_id = 1) # (6 rows) # def pp(result) header = result.columns.first lines = result.rows.map(&:first) # We add 2 because there's one char of padding at both sides, note # the extra hyphens in the example above. width = [header, *lines].map(&:length).max + 2 pp = [] pp << header.center(width).rstrip pp << '-' * width pp += lines.map {|line| " #{line}"} nrows = result.rows.length rows_label = nrows == 1 ? 'row' : 'rows' pp << "(#{nrows} #{rows_label})" pp.join("\n") + "\n" end end # Executes a SELECT query and returns an array of rows. Each row is an # array of field values. def select_rows(sql, name = nil) select_raw(sql, name).last end # Executes an INSERT query and returns the new record's ID def insert_sql(sql, name = nil, pk = nil, id_value = nil, sequence_name = nil) unless pk # Extract the table from the insert sql. Yuck. table_ref = extract_table_ref_from_insert_sql(sql) pk = primary_key(table_ref) if table_ref end if pk select_value("#{sql} RETURNING #{quote_column_name(pk)}") else super end end alias :create :insert # create a 2D array representing the result set def result_as_array(res) #:nodoc: # check if we have any binary column and if they need escaping ftypes = Array.new(res.nfields) do |i| [i, res.ftype(i)] end rows = res.values return rows unless ftypes.any? { |_, x| x == BYTEA_COLUMN_TYPE_OID || x == MONEY_COLUMN_TYPE_OID } typehash = ftypes.group_by { |_, type| type } binaries = typehash[BYTEA_COLUMN_TYPE_OID] || [] monies = typehash[MONEY_COLUMN_TYPE_OID] || [] rows.each do |row| # unescape string passed BYTEA field (OID == 17) binaries.each do |index, _| row[index] = unescape_bytea(row[index]) end # If this is a money type column and there are any currency symbols, # then strip them off. Indeed it would be prettier to do this in # PostgreSQLColumn.string_to_decimal but would break form input # fields that call value_before_type_cast. monies.each do |index, _| data = row[index] # Because money output is formatted according to the locale, there are two # cases to consider (note the decimal separators): # (1) $12,345,678.12 # (2) $12.345.678,12 case data when /^-?\D+[\d,]+\.\d{2}$/ # (1) data.gsub!(/[^-\d.]/, '') when /^-?\D+[\d.]+,\d{2}$/ # (2) data.gsub!(/[^-\d,]/, '').sub!(/,/, '.') end end end end # Queries the database and returns the results in an Array-like object def query(sql, name = nil) #:nodoc: log(sql, name) do result_as_array @connection.async_exec(sql) end end # Executes an SQL statement, returning a PGresult object on success # or raising a PGError exception otherwise. def execute(sql, name = nil) log(sql, name) do @connection.async_exec(sql) end end def substitute_at(column, index) Arel.sql("$#{index + 1}") end def exec_query(sql, name = 'SQL', binds = []) log(sql, name, binds) do result = binds.empty? ? exec_no_cache(sql, binds) : exec_cache(sql, binds) ret = ActiveRecord::Result.new(result.fields, result_as_array(result)) result.clear return ret end end def exec_delete(sql, name = 'SQL', binds = []) log(sql, name, binds) do result = binds.empty? ? exec_no_cache(sql, binds) : exec_cache(sql, binds) affected = result.cmd_tuples result.clear affected end end alias :exec_update :exec_delete def sql_for_insert(sql, pk, id_value, sequence_name, binds) unless pk # Extract the table from the insert sql. Yuck. table_ref = extract_table_ref_from_insert_sql(sql) pk = primary_key(table_ref) if table_ref end sql = "#{sql} RETURNING #{quote_column_name(pk)}" if pk [sql, binds] end # Executes an UPDATE query and returns the number of affected tuples. def update_sql(sql, name = nil) super.cmd_tuples end # Begins a transaction. def begin_db_transaction execute "BEGIN" end # Commits a transaction. def commit_db_transaction execute "COMMIT" end # Aborts a transaction. def rollback_db_transaction execute "ROLLBACK" end def outside_transaction? @connection.transaction_status == PGconn::PQTRANS_IDLE end def create_savepoint execute("SAVEPOINT #{current_savepoint_name}") end def rollback_to_savepoint execute("ROLLBACK TO SAVEPOINT #{current_savepoint_name}") end def release_savepoint execute("RELEASE SAVEPOINT #{current_savepoint_name}") end # SCHEMA STATEMENTS ======================================== # Drops the database specified on the +name+ attribute # and creates it again using the provided +options+. def recreate_database(name, options = {}) #:nodoc: drop_database(name) create_database(name, options) end # Create a new PostgreSQL database. Options include <tt>:owner</tt>, <tt>:template</tt>, # <tt>:encoding</tt>, <tt>:tablespace</tt>, and <tt>:connection_limit</tt> (note that MySQL uses # <tt>:charset</tt> while PostgreSQL uses <tt>:encoding</tt>). # # Example: # create_database config[:database], config # create_database 'foo_development', :encoding => 'unicode' def create_database(name, options = {}) options = options.reverse_merge(:encoding => "utf8") option_string = options.symbolize_keys.sum do |key, value| case key when :owner " OWNER = \"#{value}\"" when :template " TEMPLATE = \"#{value}\"" when :encoding " ENCODING = '#{value}'" when :tablespace " TABLESPACE = \"#{value}\"" when :connection_limit " CONNECTION LIMIT = #{value}" else "" end end execute "CREATE DATABASE #{quote_table_name(name)}#{option_string}" end # Drops a PostgreSQL database. # # Example: # drop_database 'matt_development' def drop_database(name) #:nodoc: execute "DROP DATABASE IF EXISTS #{quote_table_name(name)}" end # Returns the list of all tables in the schema search path or a specified schema. def tables(name = nil) query(<<-SQL, 'SCHEMA').map { |row| row[0] } SELECT tablename FROM pg_tables WHERE schemaname = ANY (current_schemas(false)) SQL end # Returns true if table exists. # If the schema is not specified as part of +name+ then it will only find tables within # the current schema search path (regardless of permissions to access tables in other schemas) def table_exists?(name) schema, table = Utils.extract_schema_and_table(name.to_s) return false unless table binds = [[nil, table]] binds << [nil, schema] if schema exec_query(<<-SQL, 'SCHEMA', binds).rows.first[0].to_i > 0 SELECT COUNT(*) FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind in ('v','r') AND c.relname = $1 AND n.nspname = #{schema ? '$2' : 'ANY (current_schemas(false))'} SQL end # Returns true if schema exists. def schema_exists?(name) exec_query(<<-SQL, 'SCHEMA', [[nil, name]]).rows.first[0].to_i > 0 SELECT COUNT(*) FROM pg_namespace WHERE nspname = $1 SQL end # Returns an array of indexes for the given table. def indexes(table_name, name = nil) result = query(<<-SQL, name) SELECT distinct i.relname, d.indisunique, d.indkey, pg_get_indexdef(d.indexrelid), t.oid FROM pg_class t INNER JOIN pg_index d ON t.oid = d.indrelid INNER JOIN pg_class i ON d.indexrelid = i.oid WHERE i.relkind = 'i' AND d.indisprimary = 'f' AND t.relname = '#{table_name}' AND i.relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = ANY (current_schemas(false)) ) ORDER BY i.relname SQL result.map do |row| index_name = row[0] unique = row[1] == 't' indkey = row[2].split(" ") inddef = row[3] oid = row[4] columns = Hash[query(<<-SQL, "Columns for index #{row[0]} on #{table_name}")] SELECT a.attnum, a.attname FROM pg_attribute a WHERE a.attrelid = #{oid} AND a.attnum IN (#{indkey.join(",")}) SQL column_names = columns.values_at(*indkey).compact # add info on sort order for columns (only desc order is explicitly specified, asc is the default) desc_order_columns = inddef.scan(/(\w+) DESC/).flatten orders = desc_order_columns.any? ? Hash[desc_order_columns.map {|order_column| [order_column, :desc]}] : {} column_names.empty? ? nil : IndexDefinition.new(table_name, index_name, unique, column_names, [], orders) end.compact end # Returns the list of all column definitions for a table. def columns(table_name, name = nil) # Limit, precision, and scale are all handled by the superclass. column_definitions(table_name).collect do |column_name, type, default, notnull| PostgreSQLColumn.new(column_name, default, type, notnull == 'f') end end # Returns the current database name. def current_database query('select current_database()')[0][0] end # Returns the current schema name. def current_schema query('SELECT current_schema', 'SCHEMA')[0][0] end # Returns the current database encoding format. def encoding query(<<-end_sql)[0][0] SELECT pg_encoding_to_char(pg_database.encoding) FROM pg_database WHERE pg_database.datname LIKE '#{current_database}' end_sql end # Sets the schema search path to a string of comma-separated schema names. # Names beginning with $ have to be quoted (e.g. $user => '$user'). # See: http://www.postgresql.org/docs/current/static/ddl-schemas.html # # This should be not be called manually but set in database.yml. def schema_search_path=(schema_csv) if schema_csv execute "SET search_path TO #{schema_csv}" @schema_search_path = schema_csv end end # Returns the active schema search path. def schema_search_path @schema_search_path ||= query('SHOW search_path', 'SCHEMA')[0][0] end # Returns the current client message level. def client_min_messages query('SHOW client_min_messages', 'SCHEMA')[0][0] end # Set the client message level. def client_min_messages=(level) execute("SET client_min_messages TO '#{level}'", 'SCHEMA') end # Returns the sequence name for a table's primary key or some other specified key. def default_sequence_name(table_name, pk = nil) #:nodoc: serial_sequence(table_name, pk || 'id').split('.').last rescue ActiveRecord::StatementInvalid "#{table_name}_#{pk || 'id'}_seq" end def serial_sequence(table, column) result = exec_query(<<-eosql, 'SCHEMA', [[nil, table], [nil, column]]) SELECT pg_get_serial_sequence($1, $2) eosql result.rows.first.first end # Resets the sequence of a table's primary key to the maximum value. def reset_pk_sequence!(table, pk = nil, sequence = nil) #:nodoc: unless pk and sequence default_pk, default_sequence = pk_and_sequence_for(table) pk ||= default_pk sequence ||= default_sequence end if @logger && pk && !sequence @logger.warn "#{table} has primary key #{pk} with no default sequence" end if pk && sequence quoted_sequence = quote_table_name(sequence) select_value <<-end_sql, 'Reset sequence' SELECT setval('#{quoted_sequence}', (SELECT COALESCE(MAX(#{quote_column_name pk})+(SELECT increment_by FROM #{quoted_sequence}), (SELECT min_value FROM #{quoted_sequence})) FROM #{quote_table_name(table)}), false) end_sql end end # Returns a table's primary key and belonging sequence. def pk_and_sequence_for(table) #:nodoc: # First try looking for a sequence with a dependency on the # given table's primary key. result = exec_query(<<-end_sql, 'SCHEMA').rows.first SELECT attr.attname, ns.nspname, seq.relname FROM pg_class seq INNER JOIN pg_depend dep ON seq.oid = dep.objid INNER JOIN pg_attribute attr ON attr.attrelid = dep.refobjid AND attr.attnum = dep.refobjsubid INNER JOIN pg_constraint cons ON attr.attrelid = cons.conrelid AND attr.attnum = cons.conkey[1] INNER JOIN pg_namespace ns ON seq.relnamespace = ns.oid WHERE seq.relkind = 'S' AND cons.contype = 'p' AND dep.refobjid = '#{quote_table_name(table)}'::regclass end_sql # [primary_key, sequence] if result.second == 'public' then sequence = result.last else sequence = result.second+'.'+result.last end [result.first, sequence] rescue nil end # Returns just a table's primary key def primary_key(table) row = exec_query(<<-end_sql, 'SCHEMA', [[nil, table]]).rows.first SELECT DISTINCT(attr.attname) FROM pg_attribute attr INNER JOIN pg_depend dep ON attr.attrelid = dep.refobjid AND attr.attnum = dep.refobjsubid INNER JOIN pg_constraint cons ON attr.attrelid = cons.conrelid AND attr.attnum = cons.conkey[1] WHERE cons.contype = 'p' AND dep.refobjid = $1::regclass end_sql row && row.first end # Renames a table. # # Example: # rename_table('octopuses', 'octopi') def rename_table(name, new_name) clear_cache! execute "ALTER TABLE #{quote_table_name(name)} RENAME TO #{quote_table_name(new_name)}" end # Adds a new column to the named table. # See TableDefinition#column for details of the options you can use. def add_column(table_name, column_name, type, options = {}) clear_cache! add_column_sql = "ALTER TABLE #{quote_table_name(table_name)} ADD COLUMN #{quote_column_name(column_name)} #{type_to_sql(type, options[:limit], options[:precision], options[:scale])}" add_column_options!(add_column_sql, options) execute add_column_sql end # Changes the column of a table. def change_column(table_name, column_name, type, options = {}) clear_cache! quoted_table_name = quote_table_name(table_name) execute "ALTER TABLE #{quoted_table_name} ALTER COLUMN #{quote_column_name(column_name)} TYPE #{type_to_sql(type, options[:limit], options[:precision], options[:scale])}" change_column_default(table_name, column_name, options[:default]) if options_include_default?(options) change_column_null(table_name, column_name, options[:null], options[:default]) if options.key?(:null) end # Changes the default value of a table column. def change_column_default(table_name, column_name, default) clear_cache! execute "ALTER TABLE #{quote_table_name(table_name)} ALTER COLUMN #{quote_column_name(column_name)} SET DEFAULT #{quote(default)}" end def change_column_null(table_name, column_name, null, default = nil) clear_cache! unless null || default.nil? execute("UPDATE #{quote_table_name(table_name)} SET #{quote_column_name(column_name)}=#{quote(default)} WHERE #{quote_column_name(column_name)} IS NULL") end execute("ALTER TABLE #{quote_table_name(table_name)} ALTER #{quote_column_name(column_name)} #{null ? 'DROP' : 'SET'} NOT NULL") end # Renames a column in a table. def rename_column(table_name, column_name, new_column_name) clear_cache! execute "ALTER TABLE #{quote_table_name(table_name)} RENAME COLUMN #{quote_column_name(column_name)} TO #{quote_column_name(new_column_name)}" end def remove_index!(table_name, index_name) #:nodoc: execute "DROP INDEX #{quote_table_name(index_name)}" end def rename_index(table_name, old_name, new_name) execute "ALTER INDEX #{quote_column_name(old_name)} RENAME TO #{quote_table_name(new_name)}" end def index_name_length 63 end # Maps logical Rails types to PostgreSQL-specific data types. def type_to_sql(type, limit = nil, precision = nil, scale = nil) return super unless type.to_s == 'integer' return 'integer' unless limit case limit when 1, 2; 'smallint' when 3, 4; 'integer' when 5..8; 'bigint' else raise(ActiveRecordError, "No integer type has byte size #{limit}. Use a numeric with precision 0 instead.") end end # Returns a SELECT DISTINCT clause for a given set of columns and a given ORDER BY clause. # # PostgreSQL requires the ORDER BY columns in the select list for distinct queries, and # requires that the ORDER BY include the distinct column. # # distinct("posts.id", "posts.created_at desc") def distinct(columns, orders) #:nodoc: return "DISTINCT #{columns}" if orders.empty? # Construct a clean list of column names from the ORDER BY clause, removing # any ASC/DESC modifiers order_columns = orders.collect { |s| s.gsub(/\s+(ASC|DESC)\s*/i, '') } order_columns.delete_if { |c| c.blank? } order_columns = order_columns.zip((0...order_columns.size).to_a).map { |s,i| "#{s} AS alias_#{i}" } "DISTINCT #{columns}, #{order_columns * ', '}" end module Utils extend self # Returns an array of <tt>[schema_name, table_name]</tt> extracted from +name+. # +schema_name+ is nil if not specified in +name+. # +schema_name+ and +table_name+ exclude surrounding quotes (regardless of whether provided in +name+) # +name+ supports the range of schema/table references understood by PostgreSQL, for example: # # * <tt>table_name</tt> # * <tt>"table.name"</tt> # * <tt>schema_name.table_name</tt> # * <tt>schema_name."table.name"</tt> # * <tt>"schema.name"."table name"</tt> def extract_schema_and_table(name) table, schema = name.scan(/[^".\s]+|"[^"]*"/)[0..1].collect{|m| m.gsub(/(^"|"$)/,'') }.reverse [schema, table] end end protected # Returns the version of the connected PostgreSQL server. def postgresql_version @connection.server_version end def translate_exception(exception, message) case exception.message when /duplicate key value violates unique constraint/ RecordNotUnique.new(message, exception) when /violates foreign key constraint/ InvalidForeignKey.new(message, exception) else super end end private FEATURE_NOT_SUPPORTED = "0A000" # :nodoc: def exec_no_cache(sql, binds) @connection.async_exec(sql) end def exec_cache(sql, binds) begin stmt_key = prepare_statement sql # Clear the queue @connection.get_last_result @connection.send_query_prepared(stmt_key, binds.map { |col, val| type_cast(val, col) }) @connection.block @connection.get_last_result rescue PGError => e # Get the PG code for the failure. Annoyingly, the code for # prepared statements whose return value may have changed is # FEATURE_NOT_SUPPORTED. Check here for more details: # http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/backend/utils/cache/plancache.c#l573 code = e.result.result_error_field(PGresult::PG_DIAG_SQLSTATE) if FEATURE_NOT_SUPPORTED == code @statements.delete sql_key(sql) retry else raise e end end end # Returns the statement identifier for the client side cache # of statements def sql_key(sql) "#{schema_search_path}-#{sql}" end # Prepare the statement if it hasn't been prepared, return # the statement key. def prepare_statement(sql) sql_key = sql_key(sql) unless @statements.key? sql_key nextkey = @statements.next_key @connection.prepare nextkey, sql @statements[sql_key] = nextkey end @statements[sql_key] end # The internal PostgreSQL identifier of the money data type. MONEY_COLUMN_TYPE_OID = 790 #:nodoc: # The internal PostgreSQL identifier of the BYTEA data type. BYTEA_COLUMN_TYPE_OID = 17 #:nodoc: # Connects to a PostgreSQL server and sets up the adapter depending on the # connected server's characteristics. def connect @connection = PGconn.connect(@connection_parameters) # Money type has a fixed precision of 10 in PostgreSQL 8.2 and below, and as of # PostgreSQL 8.3 it has a fixed precision of 19. PostgreSQLColumn.extract_precision # should know about this but can't detect it there, so deal with it here. PostgreSQLColumn.money_precision = (postgresql_version >= 80300) ? 19 : 10 configure_connection end # Configures the encoding, verbosity, schema search path, and time zone of the connection. # This is called by #connect and should not be called manually. def configure_connection if @config[:encoding] @connection.set_client_encoding(@config[:encoding]) end self.client_min_messages = @config[:min_messages] if @config[:min_messages] self.schema_search_path = @config[:schema_search_path] || @config[:schema_order] # Use standard-conforming strings if available so we don't have to do the E'...' dance. set_standard_conforming_strings # If using Active Record's time zone support configure the connection to return # TIMESTAMP WITH ZONE types in UTC. if ActiveRecord::Base.default_timezone == :utc execute("SET time zone 'UTC'", 'SCHEMA') elsif @local_tz execute("SET time zone '#{@local_tz}'", 'SCHEMA') end end # Returns the current ID of a table's sequence. def last_insert_id(sequence_name) #:nodoc: r = exec_query("SELECT currval($1)", 'SQL', [[nil, sequence_name]]) Integer(r.rows.first.first) end # Executes a SELECT query and returns the results, performing any data type # conversions that are required to be performed here instead of in PostgreSQLColumn. def select(sql, name = nil, binds = []) exec_query(sql, name, binds).to_a end def select_raw(sql, name = nil) res = execute(sql, name) results = result_as_array(res) fields = res.fields res.clear return fields, results end # Returns the list of a table's column names, data types, and default values. # # The underlying query is roughly: # SELECT column.name, column.type, default.value # FROM column LEFT JOIN default # ON column.table_id = default.table_id # AND column.num = default.column_num # WHERE column.table_id = get_table_id('table_name') # AND column.num > 0 # AND NOT column.is_dropped # ORDER BY column.num # # If the table name is not prefixed with a schema, the database will # take the first match from the schema search path. # # Query implementation notes: # - format_type includes the column size constraint, e.g. varchar(50) # - ::regclass is a function that gives the id for a table name def column_definitions(table_name) #:nodoc: exec_query(<<-end_sql, 'SCHEMA').rows SELECT a.attname, format_type(a.atttypid, a.atttypmod), d.adsrc, a.attnotnull FROM pg_attribute a LEFT JOIN pg_attrdef d ON a.attrelid = d.adrelid AND a.attnum = d.adnum WHERE a.attrelid = '#{quote_table_name(table_name)}'::regclass AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum end_sql end def extract_pg_identifier_from_name(name) match_data = name.start_with?('"') ? name.match(/\"([^\"]+)\"/) : name.match(/([^\.]+)/) if match_data rest = name[match_data[0].length, name.length] rest = rest[1, rest.length] if rest.start_with? "." [match_data[1], (rest.length > 0 ? rest : nil)] end end def extract_table_ref_from_insert_sql(sql) sql[/into\s+([^\(]*).*values\s*\(/i] $1.strip if $1 end def table_definition TableDefinition.new(self) end end end end Postgresql: restore previous behaviour regarding to nil values in connection specification Hash require 'active_record/connection_adapters/abstract_adapter' require 'active_support/core_ext/object/blank' require 'active_record/connection_adapters/statement_pool' # Make sure we're using pg high enough for PGResult#values gem 'pg', '~> 0.11' require 'pg' module ActiveRecord module ConnectionHandling # Establishes a connection to the database that's used by all Active Record objects def postgresql_connection(config) # :nodoc: config = config.symbolize_keys # Forward any unused config params to PGconn.connect. conn_params = config.except(:statement_limit, :encoding, :min_messages, :schema_search_path, :schema_order, :adapter, :pool, :wait_timeout) conn_params.delete_if { |k,v| v.nil? } # Map ActiveRecords param names to PGs. conn_params[:user] = conn_params.delete(:username) if conn_params[:username] conn_params[:dbname] = conn_params.delete(:database) if conn_params[:database] # The postgres drivers don't allow the creation of an unconnected PGconn object, # so just pass a nil connection object for the time being. ConnectionAdapters::PostgreSQLAdapter.new(nil, logger, conn_params, config) end end module ConnectionAdapters # PostgreSQL-specific extensions to column definitions in a table. class PostgreSQLColumn < Column #:nodoc: # Instantiates a new PostgreSQL column definition in a table. def initialize(name, default, sql_type = nil, null = true) super(name, self.class.extract_value_from_default(default), sql_type, null) end # :stopdoc: class << self attr_accessor :money_precision def string_to_time(string) return string unless String === string case string when 'infinity' then 1.0 / 0.0 when '-infinity' then -1.0 / 0.0 else super end end def cast_hstore(object) if Hash === object object.map { |k,v| "#{escape_hstore(k)}=>#{escape_hstore(v)}" }.join ', ' else kvs = object.scan(/(?<!\\)".*?(?<!\\)"/).map { |o| unescape_hstore(o[1...-1]) } Hash[kvs.each_slice(2).to_a] end end private HSTORE_ESCAPE = { ' ' => '\\ ', '\\' => '\\\\', '"' => '\\"', '=' => '\\=', } HSTORE_ESCAPE_RE = Regexp.union(HSTORE_ESCAPE.keys) HSTORE_UNESCAPE = HSTORE_ESCAPE.invert HSTORE_UNESCAPE_RE = Regexp.union(HSTORE_UNESCAPE.keys) def unescape_hstore(value) value.gsub(HSTORE_UNESCAPE_RE) do |match| HSTORE_UNESCAPE[match] end end def escape_hstore(value) value.gsub(HSTORE_ESCAPE_RE) do |match| HSTORE_ESCAPE[match] end end end # :startdoc: private def extract_limit(sql_type) case sql_type when /^bigint/i; 8 when /^smallint/i; 2 else super end end # Extracts the scale from PostgreSQL-specific data types. def extract_scale(sql_type) # Money type has a fixed scale of 2. sql_type =~ /^money/ ? 2 : super end # Extracts the precision from PostgreSQL-specific data types. def extract_precision(sql_type) if sql_type == 'money' self.class.money_precision else super end end # Maps PostgreSQL-specific data types to logical Rails types. def simplified_type(field_type) case field_type # Numeric and monetary types when /^(?:real|double precision)$/ :float # Monetary types when 'money' :decimal when 'hstore' :hstore # Character types when /^(?:character varying|bpchar)(?:\(\d+\))?$/ :string # Binary data types when 'bytea' :binary # Date/time types when /^timestamp with(?:out)? time zone$/ :datetime when 'interval' :string # Geometric types when /^(?:point|line|lseg|box|"?path"?|polygon|circle)$/ :string # Network address types when /^(?:cidr|inet|macaddr)$/ :string # Bit strings when /^bit(?: varying)?(?:\(\d+\))?$/ :string # XML type when 'xml' :xml # tsvector type when 'tsvector' :tsvector # Arrays when /^\D+\[\]$/ :string # Object identifier types when 'oid' :integer # UUID type when 'uuid' :string # Small and big integer types when /^(?:small|big)int$/ :integer # Pass through all types that are not specific to PostgreSQL. else super end end # Extracts the value from a PostgreSQL column default definition. def self.extract_value_from_default(default) case default # This is a performance optimization for Ruby 1.9.2 in development. # If the value is nil, we return nil straight away without checking # the regular expressions. If we check each regular expression, # Regexp#=== will call NilClass#to_str, which will trigger # method_missing (defined by whiny nil in ActiveSupport) which # makes this method very very slow. when NilClass nil # Numeric types when /\A\(?(-?\d+(\.\d*)?\)?)\z/ $1 # Character types when /\A'(.*)'::(?:character varying|bpchar|text)\z/m $1 # Character types (8.1 formatting) when /\AE'(.*)'::(?:character varying|bpchar|text)\z/m $1.gsub(/\\(\d\d\d)/) { $1.oct.chr } # Binary data types when /\A'(.*)'::bytea\z/m $1 # Date/time types when /\A'(.+)'::(?:time(?:stamp)? with(?:out)? time zone|date)\z/ $1 when /\A'(.*)'::interval\z/ $1 # Boolean type when 'true' true when 'false' false # Geometric types when /\A'(.*)'::(?:point|line|lseg|box|"?path"?|polygon|circle)\z/ $1 # Network address types when /\A'(.*)'::(?:cidr|inet|macaddr)\z/ $1 # Bit string types when /\AB'(.*)'::"?bit(?: varying)?"?\z/ $1 # XML type when /\A'(.*)'::xml\z/m $1 # Arrays when /\A'(.*)'::"?\D+"?\[\]\z/ $1 # Object identifier types when /\A-?\d+\z/ $1 else # Anything else is blank, some user type, or some function # and we can't know the value of that, so return nil. nil end end end # The PostgreSQL adapter works with the native C (https://bitbucket.org/ged/ruby-pg) driver. # # Options: # # * <tt>:host</tt> - Defaults to a Unix-domain socket in /tmp. On machines without Unix-domain sockets, # the default is to connect to localhost. # * <tt>:port</tt> - Defaults to 5432. # * <tt>:username</tt> - Defaults to be the same as the operating system name of the user running the application. # * <tt>:password</tt> - Password to be used if the server demands password authentication. # * <tt>:database</tt> - Defaults to be the same as the user name. # * <tt>:schema_search_path</tt> - An optional schema search path for the connection given # as a string of comma-separated schema names. This is backward-compatible with the <tt>:schema_order</tt> option. # * <tt>:encoding</tt> - An optional client encoding that is used in a <tt>SET client_encoding TO # <encoding></tt> call on the connection. # * <tt>:min_messages</tt> - An optional client min messages that is used in a # <tt>SET client_min_messages TO <min_messages></tt> call on the connection. # # Any further options are used as connection parameters to libpq. See # http://www.postgresql.org/docs/9.1/static/libpq-connect.html for the # list of parameters. # # In addition, default connection parameters of libpq can be set per environment variables. # See http://www.postgresql.org/docs/9.1/static/libpq-envars.html . class PostgreSQLAdapter < AbstractAdapter class TableDefinition < ActiveRecord::ConnectionAdapters::TableDefinition def xml(*args) options = args.extract_options! column(args[0], 'xml', options) end def tsvector(*args) options = args.extract_options! column(args[0], 'tsvector', options) end def hstore(name, options = {}) column(name, 'hstore', options) end end ADAPTER_NAME = 'PostgreSQL' NATIVE_DATABASE_TYPES = { :primary_key => "serial primary key", :string => { :name => "character varying", :limit => 255 }, :text => { :name => "text" }, :integer => { :name => "integer" }, :float => { :name => "float" }, :decimal => { :name => "decimal" }, :datetime => { :name => "timestamp" }, :timestamp => { :name => "timestamp" }, :time => { :name => "time" }, :date => { :name => "date" }, :binary => { :name => "bytea" }, :boolean => { :name => "boolean" }, :xml => { :name => "xml" }, :tsvector => { :name => "tsvector" } } # Returns 'PostgreSQL' as adapter name for identification purposes. def adapter_name ADAPTER_NAME end # Returns +true+, since this connection adapter supports prepared statement # caching. def supports_statement_cache? true end def supports_index_sort_order? true end class StatementPool < ConnectionAdapters::StatementPool def initialize(connection, max) super @counter = 0 @cache = Hash.new { |h,pid| h[pid] = {} } end def each(&block); cache.each(&block); end def key?(key); cache.key?(key); end def [](key); cache[key]; end def length; cache.length; end def next_key "a#{@counter + 1}" end def []=(sql, key) while @max <= cache.size dealloc(cache.shift.last) end @counter += 1 cache[sql] = key end def clear cache.each_value do |stmt_key| dealloc stmt_key end cache.clear end def delete(sql_key) dealloc cache[sql_key] cache.delete sql_key end private def cache @cache[$$] end def dealloc(key) @connection.query "DEALLOCATE #{key}" if connection_active? end def connection_active? @connection.status == PGconn::CONNECTION_OK rescue PGError false end end # Initializes and connects a PostgreSQL adapter. def initialize(connection, logger, connection_parameters, config) super(connection, logger) @connection_parameters, @config = connection_parameters, config @visitor = Arel::Visitors::PostgreSQL.new self # @local_tz is initialized as nil to avoid warnings when connect tries to use it @local_tz = nil @table_alias_length = nil connect @statements = StatementPool.new @connection, config.fetch(:statement_limit) { 1000 } if postgresql_version < 80200 raise "Your version of PostgreSQL (#{postgresql_version}) is too old, please upgrade!" end @local_tz = execute('SHOW TIME ZONE', 'SCHEMA').first["TimeZone"] end # Clears the prepared statements cache. def clear_cache! @statements.clear end # Is this connection alive and ready for queries? def active? @connection.status == PGconn::CONNECTION_OK rescue PGError false end # Close then reopen the connection. def reconnect! clear_cache! @connection.reset configure_connection end def reset! clear_cache! super end # Disconnects from the database if already connected. Otherwise, this # method does nothing. def disconnect! clear_cache! @connection.close rescue nil end def native_database_types #:nodoc: NATIVE_DATABASE_TYPES end # Returns true, since this connection adapter supports migrations. def supports_migrations? true end # Does PostgreSQL support finding primary key on non-Active Record tables? def supports_primary_key? #:nodoc: true end # Enable standard-conforming strings if available. def set_standard_conforming_strings old, self.client_min_messages = client_min_messages, 'panic' execute('SET standard_conforming_strings = on', 'SCHEMA') rescue nil ensure self.client_min_messages = old end def supports_insert_with_returning? true end def supports_ddl_transactions? true end # Returns true, since this connection adapter supports savepoints. def supports_savepoints? true end # Returns true. def supports_explain? true end # Returns the configured supported identifier length supported by PostgreSQL def table_alias_length @table_alias_length ||= query('SHOW max_identifier_length')[0][0].to_i end # QUOTING ================================================== # Escapes binary strings for bytea input to the database. def escape_bytea(value) @connection.escape_bytea(value) if value end # Unescapes bytea output from a database to the binary string it represents. # NOTE: This is NOT an inverse of escape_bytea! This is only to be used # on escaped binary output from database drive. def unescape_bytea(value) @connection.unescape_bytea(value) if value end # Quotes PostgreSQL-specific data types for SQL input. def quote(value, column = nil) #:nodoc: return super unless column case value when Float return super unless value.infinite? && column.type == :datetime "'#{value.to_s.downcase}'" when Numeric return super unless column.sql_type == 'money' # Not truly string input, so doesn't require (or allow) escape string syntax. "'#{value}'" when String case column.sql_type when 'bytea' then "'#{escape_bytea(value)}'" when 'xml' then "xml '#{quote_string(value)}'" when /^bit/ case value when /^[01]*$/ then "B'#{value}'" # Bit-string notation when /^[0-9A-F]*$/i then "X'#{value}'" # Hexadecimal notation end else super end else super end end def type_cast(value, column) return super unless column case value when String return super unless 'bytea' == column.sql_type { :value => value, :format => 1 } else super end end # Quotes strings for use in SQL input. def quote_string(s) #:nodoc: @connection.escape(s) end # Checks the following cases: # # - table_name # - "table.name" # - schema_name.table_name # - schema_name."table.name" # - "schema.name".table_name # - "schema.name"."table.name" def quote_table_name(name) schema, name_part = extract_pg_identifier_from_name(name.to_s) unless name_part quote_column_name(schema) else table_name, name_part = extract_pg_identifier_from_name(name_part) "#{quote_column_name(schema)}.#{quote_column_name(table_name)}" end end # Quotes column names for use in SQL queries. def quote_column_name(name) #:nodoc: PGconn.quote_ident(name.to_s) end # Quote date/time values for use in SQL input. Includes microseconds # if the value is a Time responding to usec. def quoted_date(value) #:nodoc: if value.acts_like?(:time) && value.respond_to?(:usec) "#{super}.#{sprintf("%06d", value.usec)}" else super end end # Set the authorized user for this session def session_auth=(user) clear_cache! exec_query "SET SESSION AUTHORIZATION #{user}" end # REFERENTIAL INTEGRITY ==================================== def supports_disable_referential_integrity? #:nodoc: true end def disable_referential_integrity #:nodoc: if supports_disable_referential_integrity? then execute(tables.collect { |name| "ALTER TABLE #{quote_table_name(name)} DISABLE TRIGGER ALL" }.join(";")) end yield ensure if supports_disable_referential_integrity? then execute(tables.collect { |name| "ALTER TABLE #{quote_table_name(name)} ENABLE TRIGGER ALL" }.join(";")) end end # DATABASE STATEMENTS ====================================== def explain(arel, binds = []) sql = "EXPLAIN #{to_sql(arel)}" ExplainPrettyPrinter.new.pp(exec_query(sql, 'EXPLAIN', binds)) end class ExplainPrettyPrinter # :nodoc: # Pretty prints the result of a EXPLAIN in a way that resembles the output of the # PostgreSQL shell: # # QUERY PLAN # ------------------------------------------------------------------------------ # Nested Loop Left Join (cost=0.00..37.24 rows=8 width=0) # Join Filter: (posts.user_id = users.id) # -> Index Scan using users_pkey on users (cost=0.00..8.27 rows=1 width=4) # Index Cond: (id = 1) # -> Seq Scan on posts (cost=0.00..28.88 rows=8 width=4) # Filter: (posts.user_id = 1) # (6 rows) # def pp(result) header = result.columns.first lines = result.rows.map(&:first) # We add 2 because there's one char of padding at both sides, note # the extra hyphens in the example above. width = [header, *lines].map(&:length).max + 2 pp = [] pp << header.center(width).rstrip pp << '-' * width pp += lines.map {|line| " #{line}"} nrows = result.rows.length rows_label = nrows == 1 ? 'row' : 'rows' pp << "(#{nrows} #{rows_label})" pp.join("\n") + "\n" end end # Executes a SELECT query and returns an array of rows. Each row is an # array of field values. def select_rows(sql, name = nil) select_raw(sql, name).last end # Executes an INSERT query and returns the new record's ID def insert_sql(sql, name = nil, pk = nil, id_value = nil, sequence_name = nil) unless pk # Extract the table from the insert sql. Yuck. table_ref = extract_table_ref_from_insert_sql(sql) pk = primary_key(table_ref) if table_ref end if pk select_value("#{sql} RETURNING #{quote_column_name(pk)}") else super end end alias :create :insert # create a 2D array representing the result set def result_as_array(res) #:nodoc: # check if we have any binary column and if they need escaping ftypes = Array.new(res.nfields) do |i| [i, res.ftype(i)] end rows = res.values return rows unless ftypes.any? { |_, x| x == BYTEA_COLUMN_TYPE_OID || x == MONEY_COLUMN_TYPE_OID } typehash = ftypes.group_by { |_, type| type } binaries = typehash[BYTEA_COLUMN_TYPE_OID] || [] monies = typehash[MONEY_COLUMN_TYPE_OID] || [] rows.each do |row| # unescape string passed BYTEA field (OID == 17) binaries.each do |index, _| row[index] = unescape_bytea(row[index]) end # If this is a money type column and there are any currency symbols, # then strip them off. Indeed it would be prettier to do this in # PostgreSQLColumn.string_to_decimal but would break form input # fields that call value_before_type_cast. monies.each do |index, _| data = row[index] # Because money output is formatted according to the locale, there are two # cases to consider (note the decimal separators): # (1) $12,345,678.12 # (2) $12.345.678,12 case data when /^-?\D+[\d,]+\.\d{2}$/ # (1) data.gsub!(/[^-\d.]/, '') when /^-?\D+[\d.]+,\d{2}$/ # (2) data.gsub!(/[^-\d,]/, '').sub!(/,/, '.') end end end end # Queries the database and returns the results in an Array-like object def query(sql, name = nil) #:nodoc: log(sql, name) do result_as_array @connection.async_exec(sql) end end # Executes an SQL statement, returning a PGresult object on success # or raising a PGError exception otherwise. def execute(sql, name = nil) log(sql, name) do @connection.async_exec(sql) end end def substitute_at(column, index) Arel.sql("$#{index + 1}") end def exec_query(sql, name = 'SQL', binds = []) log(sql, name, binds) do result = binds.empty? ? exec_no_cache(sql, binds) : exec_cache(sql, binds) ret = ActiveRecord::Result.new(result.fields, result_as_array(result)) result.clear return ret end end def exec_delete(sql, name = 'SQL', binds = []) log(sql, name, binds) do result = binds.empty? ? exec_no_cache(sql, binds) : exec_cache(sql, binds) affected = result.cmd_tuples result.clear affected end end alias :exec_update :exec_delete def sql_for_insert(sql, pk, id_value, sequence_name, binds) unless pk # Extract the table from the insert sql. Yuck. table_ref = extract_table_ref_from_insert_sql(sql) pk = primary_key(table_ref) if table_ref end sql = "#{sql} RETURNING #{quote_column_name(pk)}" if pk [sql, binds] end # Executes an UPDATE query and returns the number of affected tuples. def update_sql(sql, name = nil) super.cmd_tuples end # Begins a transaction. def begin_db_transaction execute "BEGIN" end # Commits a transaction. def commit_db_transaction execute "COMMIT" end # Aborts a transaction. def rollback_db_transaction execute "ROLLBACK" end def outside_transaction? @connection.transaction_status == PGconn::PQTRANS_IDLE end def create_savepoint execute("SAVEPOINT #{current_savepoint_name}") end def rollback_to_savepoint execute("ROLLBACK TO SAVEPOINT #{current_savepoint_name}") end def release_savepoint execute("RELEASE SAVEPOINT #{current_savepoint_name}") end # SCHEMA STATEMENTS ======================================== # Drops the database specified on the +name+ attribute # and creates it again using the provided +options+. def recreate_database(name, options = {}) #:nodoc: drop_database(name) create_database(name, options) end # Create a new PostgreSQL database. Options include <tt>:owner</tt>, <tt>:template</tt>, # <tt>:encoding</tt>, <tt>:tablespace</tt>, and <tt>:connection_limit</tt> (note that MySQL uses # <tt>:charset</tt> while PostgreSQL uses <tt>:encoding</tt>). # # Example: # create_database config[:database], config # create_database 'foo_development', :encoding => 'unicode' def create_database(name, options = {}) options = options.reverse_merge(:encoding => "utf8") option_string = options.symbolize_keys.sum do |key, value| case key when :owner " OWNER = \"#{value}\"" when :template " TEMPLATE = \"#{value}\"" when :encoding " ENCODING = '#{value}'" when :tablespace " TABLESPACE = \"#{value}\"" when :connection_limit " CONNECTION LIMIT = #{value}" else "" end end execute "CREATE DATABASE #{quote_table_name(name)}#{option_string}" end # Drops a PostgreSQL database. # # Example: # drop_database 'matt_development' def drop_database(name) #:nodoc: execute "DROP DATABASE IF EXISTS #{quote_table_name(name)}" end # Returns the list of all tables in the schema search path or a specified schema. def tables(name = nil) query(<<-SQL, 'SCHEMA').map { |row| row[0] } SELECT tablename FROM pg_tables WHERE schemaname = ANY (current_schemas(false)) SQL end # Returns true if table exists. # If the schema is not specified as part of +name+ then it will only find tables within # the current schema search path (regardless of permissions to access tables in other schemas) def table_exists?(name) schema, table = Utils.extract_schema_and_table(name.to_s) return false unless table binds = [[nil, table]] binds << [nil, schema] if schema exec_query(<<-SQL, 'SCHEMA', binds).rows.first[0].to_i > 0 SELECT COUNT(*) FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind in ('v','r') AND c.relname = $1 AND n.nspname = #{schema ? '$2' : 'ANY (current_schemas(false))'} SQL end # Returns true if schema exists. def schema_exists?(name) exec_query(<<-SQL, 'SCHEMA', [[nil, name]]).rows.first[0].to_i > 0 SELECT COUNT(*) FROM pg_namespace WHERE nspname = $1 SQL end # Returns an array of indexes for the given table. def indexes(table_name, name = nil) result = query(<<-SQL, name) SELECT distinct i.relname, d.indisunique, d.indkey, pg_get_indexdef(d.indexrelid), t.oid FROM pg_class t INNER JOIN pg_index d ON t.oid = d.indrelid INNER JOIN pg_class i ON d.indexrelid = i.oid WHERE i.relkind = 'i' AND d.indisprimary = 'f' AND t.relname = '#{table_name}' AND i.relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = ANY (current_schemas(false)) ) ORDER BY i.relname SQL result.map do |row| index_name = row[0] unique = row[1] == 't' indkey = row[2].split(" ") inddef = row[3] oid = row[4] columns = Hash[query(<<-SQL, "Columns for index #{row[0]} on #{table_name}")] SELECT a.attnum, a.attname FROM pg_attribute a WHERE a.attrelid = #{oid} AND a.attnum IN (#{indkey.join(",")}) SQL column_names = columns.values_at(*indkey).compact # add info on sort order for columns (only desc order is explicitly specified, asc is the default) desc_order_columns = inddef.scan(/(\w+) DESC/).flatten orders = desc_order_columns.any? ? Hash[desc_order_columns.map {|order_column| [order_column, :desc]}] : {} column_names.empty? ? nil : IndexDefinition.new(table_name, index_name, unique, column_names, [], orders) end.compact end # Returns the list of all column definitions for a table. def columns(table_name, name = nil) # Limit, precision, and scale are all handled by the superclass. column_definitions(table_name).collect do |column_name, type, default, notnull| PostgreSQLColumn.new(column_name, default, type, notnull == 'f') end end # Returns the current database name. def current_database query('select current_database()')[0][0] end # Returns the current schema name. def current_schema query('SELECT current_schema', 'SCHEMA')[0][0] end # Returns the current database encoding format. def encoding query(<<-end_sql)[0][0] SELECT pg_encoding_to_char(pg_database.encoding) FROM pg_database WHERE pg_database.datname LIKE '#{current_database}' end_sql end # Sets the schema search path to a string of comma-separated schema names. # Names beginning with $ have to be quoted (e.g. $user => '$user'). # See: http://www.postgresql.org/docs/current/static/ddl-schemas.html # # This should be not be called manually but set in database.yml. def schema_search_path=(schema_csv) if schema_csv execute "SET search_path TO #{schema_csv}" @schema_search_path = schema_csv end end # Returns the active schema search path. def schema_search_path @schema_search_path ||= query('SHOW search_path', 'SCHEMA')[0][0] end # Returns the current client message level. def client_min_messages query('SHOW client_min_messages', 'SCHEMA')[0][0] end # Set the client message level. def client_min_messages=(level) execute("SET client_min_messages TO '#{level}'", 'SCHEMA') end # Returns the sequence name for a table's primary key or some other specified key. def default_sequence_name(table_name, pk = nil) #:nodoc: serial_sequence(table_name, pk || 'id').split('.').last rescue ActiveRecord::StatementInvalid "#{table_name}_#{pk || 'id'}_seq" end def serial_sequence(table, column) result = exec_query(<<-eosql, 'SCHEMA', [[nil, table], [nil, column]]) SELECT pg_get_serial_sequence($1, $2) eosql result.rows.first.first end # Resets the sequence of a table's primary key to the maximum value. def reset_pk_sequence!(table, pk = nil, sequence = nil) #:nodoc: unless pk and sequence default_pk, default_sequence = pk_and_sequence_for(table) pk ||= default_pk sequence ||= default_sequence end if @logger && pk && !sequence @logger.warn "#{table} has primary key #{pk} with no default sequence" end if pk && sequence quoted_sequence = quote_table_name(sequence) select_value <<-end_sql, 'Reset sequence' SELECT setval('#{quoted_sequence}', (SELECT COALESCE(MAX(#{quote_column_name pk})+(SELECT increment_by FROM #{quoted_sequence}), (SELECT min_value FROM #{quoted_sequence})) FROM #{quote_table_name(table)}), false) end_sql end end # Returns a table's primary key and belonging sequence. def pk_and_sequence_for(table) #:nodoc: # First try looking for a sequence with a dependency on the # given table's primary key. result = exec_query(<<-end_sql, 'SCHEMA').rows.first SELECT attr.attname, ns.nspname, seq.relname FROM pg_class seq INNER JOIN pg_depend dep ON seq.oid = dep.objid INNER JOIN pg_attribute attr ON attr.attrelid = dep.refobjid AND attr.attnum = dep.refobjsubid INNER JOIN pg_constraint cons ON attr.attrelid = cons.conrelid AND attr.attnum = cons.conkey[1] INNER JOIN pg_namespace ns ON seq.relnamespace = ns.oid WHERE seq.relkind = 'S' AND cons.contype = 'p' AND dep.refobjid = '#{quote_table_name(table)}'::regclass end_sql # [primary_key, sequence] if result.second == 'public' then sequence = result.last else sequence = result.second+'.'+result.last end [result.first, sequence] rescue nil end # Returns just a table's primary key def primary_key(table) row = exec_query(<<-end_sql, 'SCHEMA', [[nil, table]]).rows.first SELECT DISTINCT(attr.attname) FROM pg_attribute attr INNER JOIN pg_depend dep ON attr.attrelid = dep.refobjid AND attr.attnum = dep.refobjsubid INNER JOIN pg_constraint cons ON attr.attrelid = cons.conrelid AND attr.attnum = cons.conkey[1] WHERE cons.contype = 'p' AND dep.refobjid = $1::regclass end_sql row && row.first end # Renames a table. # # Example: # rename_table('octopuses', 'octopi') def rename_table(name, new_name) clear_cache! execute "ALTER TABLE #{quote_table_name(name)} RENAME TO #{quote_table_name(new_name)}" end # Adds a new column to the named table. # See TableDefinition#column for details of the options you can use. def add_column(table_name, column_name, type, options = {}) clear_cache! add_column_sql = "ALTER TABLE #{quote_table_name(table_name)} ADD COLUMN #{quote_column_name(column_name)} #{type_to_sql(type, options[:limit], options[:precision], options[:scale])}" add_column_options!(add_column_sql, options) execute add_column_sql end # Changes the column of a table. def change_column(table_name, column_name, type, options = {}) clear_cache! quoted_table_name = quote_table_name(table_name) execute "ALTER TABLE #{quoted_table_name} ALTER COLUMN #{quote_column_name(column_name)} TYPE #{type_to_sql(type, options[:limit], options[:precision], options[:scale])}" change_column_default(table_name, column_name, options[:default]) if options_include_default?(options) change_column_null(table_name, column_name, options[:null], options[:default]) if options.key?(:null) end # Changes the default value of a table column. def change_column_default(table_name, column_name, default) clear_cache! execute "ALTER TABLE #{quote_table_name(table_name)} ALTER COLUMN #{quote_column_name(column_name)} SET DEFAULT #{quote(default)}" end def change_column_null(table_name, column_name, null, default = nil) clear_cache! unless null || default.nil? execute("UPDATE #{quote_table_name(table_name)} SET #{quote_column_name(column_name)}=#{quote(default)} WHERE #{quote_column_name(column_name)} IS NULL") end execute("ALTER TABLE #{quote_table_name(table_name)} ALTER #{quote_column_name(column_name)} #{null ? 'DROP' : 'SET'} NOT NULL") end # Renames a column in a table. def rename_column(table_name, column_name, new_column_name) clear_cache! execute "ALTER TABLE #{quote_table_name(table_name)} RENAME COLUMN #{quote_column_name(column_name)} TO #{quote_column_name(new_column_name)}" end def remove_index!(table_name, index_name) #:nodoc: execute "DROP INDEX #{quote_table_name(index_name)}" end def rename_index(table_name, old_name, new_name) execute "ALTER INDEX #{quote_column_name(old_name)} RENAME TO #{quote_table_name(new_name)}" end def index_name_length 63 end # Maps logical Rails types to PostgreSQL-specific data types. def type_to_sql(type, limit = nil, precision = nil, scale = nil) return super unless type.to_s == 'integer' return 'integer' unless limit case limit when 1, 2; 'smallint' when 3, 4; 'integer' when 5..8; 'bigint' else raise(ActiveRecordError, "No integer type has byte size #{limit}. Use a numeric with precision 0 instead.") end end # Returns a SELECT DISTINCT clause for a given set of columns and a given ORDER BY clause. # # PostgreSQL requires the ORDER BY columns in the select list for distinct queries, and # requires that the ORDER BY include the distinct column. # # distinct("posts.id", "posts.created_at desc") def distinct(columns, orders) #:nodoc: return "DISTINCT #{columns}" if orders.empty? # Construct a clean list of column names from the ORDER BY clause, removing # any ASC/DESC modifiers order_columns = orders.collect { |s| s.gsub(/\s+(ASC|DESC)\s*/i, '') } order_columns.delete_if { |c| c.blank? } order_columns = order_columns.zip((0...order_columns.size).to_a).map { |s,i| "#{s} AS alias_#{i}" } "DISTINCT #{columns}, #{order_columns * ', '}" end module Utils extend self # Returns an array of <tt>[schema_name, table_name]</tt> extracted from +name+. # +schema_name+ is nil if not specified in +name+. # +schema_name+ and +table_name+ exclude surrounding quotes (regardless of whether provided in +name+) # +name+ supports the range of schema/table references understood by PostgreSQL, for example: # # * <tt>table_name</tt> # * <tt>"table.name"</tt> # * <tt>schema_name.table_name</tt> # * <tt>schema_name."table.name"</tt> # * <tt>"schema.name"."table name"</tt> def extract_schema_and_table(name) table, schema = name.scan(/[^".\s]+|"[^"]*"/)[0..1].collect{|m| m.gsub(/(^"|"$)/,'') }.reverse [schema, table] end end protected # Returns the version of the connected PostgreSQL server. def postgresql_version @connection.server_version end def translate_exception(exception, message) case exception.message when /duplicate key value violates unique constraint/ RecordNotUnique.new(message, exception) when /violates foreign key constraint/ InvalidForeignKey.new(message, exception) else super end end private FEATURE_NOT_SUPPORTED = "0A000" # :nodoc: def exec_no_cache(sql, binds) @connection.async_exec(sql) end def exec_cache(sql, binds) begin stmt_key = prepare_statement sql # Clear the queue @connection.get_last_result @connection.send_query_prepared(stmt_key, binds.map { |col, val| type_cast(val, col) }) @connection.block @connection.get_last_result rescue PGError => e # Get the PG code for the failure. Annoyingly, the code for # prepared statements whose return value may have changed is # FEATURE_NOT_SUPPORTED. Check here for more details: # http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/backend/utils/cache/plancache.c#l573 code = e.result.result_error_field(PGresult::PG_DIAG_SQLSTATE) if FEATURE_NOT_SUPPORTED == code @statements.delete sql_key(sql) retry else raise e end end end # Returns the statement identifier for the client side cache # of statements def sql_key(sql) "#{schema_search_path}-#{sql}" end # Prepare the statement if it hasn't been prepared, return # the statement key. def prepare_statement(sql) sql_key = sql_key(sql) unless @statements.key? sql_key nextkey = @statements.next_key @connection.prepare nextkey, sql @statements[sql_key] = nextkey end @statements[sql_key] end # The internal PostgreSQL identifier of the money data type. MONEY_COLUMN_TYPE_OID = 790 #:nodoc: # The internal PostgreSQL identifier of the BYTEA data type. BYTEA_COLUMN_TYPE_OID = 17 #:nodoc: # Connects to a PostgreSQL server and sets up the adapter depending on the # connected server's characteristics. def connect @connection = PGconn.connect(@connection_parameters) # Money type has a fixed precision of 10 in PostgreSQL 8.2 and below, and as of # PostgreSQL 8.3 it has a fixed precision of 19. PostgreSQLColumn.extract_precision # should know about this but can't detect it there, so deal with it here. PostgreSQLColumn.money_precision = (postgresql_version >= 80300) ? 19 : 10 configure_connection end # Configures the encoding, verbosity, schema search path, and time zone of the connection. # This is called by #connect and should not be called manually. def configure_connection if @config[:encoding] @connection.set_client_encoding(@config[:encoding]) end self.client_min_messages = @config[:min_messages] if @config[:min_messages] self.schema_search_path = @config[:schema_search_path] || @config[:schema_order] # Use standard-conforming strings if available so we don't have to do the E'...' dance. set_standard_conforming_strings # If using Active Record's time zone support configure the connection to return # TIMESTAMP WITH ZONE types in UTC. if ActiveRecord::Base.default_timezone == :utc execute("SET time zone 'UTC'", 'SCHEMA') elsif @local_tz execute("SET time zone '#{@local_tz}'", 'SCHEMA') end end # Returns the current ID of a table's sequence. def last_insert_id(sequence_name) #:nodoc: r = exec_query("SELECT currval($1)", 'SQL', [[nil, sequence_name]]) Integer(r.rows.first.first) end # Executes a SELECT query and returns the results, performing any data type # conversions that are required to be performed here instead of in PostgreSQLColumn. def select(sql, name = nil, binds = []) exec_query(sql, name, binds).to_a end def select_raw(sql, name = nil) res = execute(sql, name) results = result_as_array(res) fields = res.fields res.clear return fields, results end # Returns the list of a table's column names, data types, and default values. # # The underlying query is roughly: # SELECT column.name, column.type, default.value # FROM column LEFT JOIN default # ON column.table_id = default.table_id # AND column.num = default.column_num # WHERE column.table_id = get_table_id('table_name') # AND column.num > 0 # AND NOT column.is_dropped # ORDER BY column.num # # If the table name is not prefixed with a schema, the database will # take the first match from the schema search path. # # Query implementation notes: # - format_type includes the column size constraint, e.g. varchar(50) # - ::regclass is a function that gives the id for a table name def column_definitions(table_name) #:nodoc: exec_query(<<-end_sql, 'SCHEMA').rows SELECT a.attname, format_type(a.atttypid, a.atttypmod), d.adsrc, a.attnotnull FROM pg_attribute a LEFT JOIN pg_attrdef d ON a.attrelid = d.adrelid AND a.attnum = d.adnum WHERE a.attrelid = '#{quote_table_name(table_name)}'::regclass AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum end_sql end def extract_pg_identifier_from_name(name) match_data = name.start_with?('"') ? name.match(/\"([^\"]+)\"/) : name.match(/([^\.]+)/) if match_data rest = name[match_data[0].length, name.length] rest = rest[1, rest.length] if rest.start_with? "." [match_data[1], (rest.length > 0 ? rest : nil)] end end def extract_table_ref_from_insert_sql(sql) sql[/into\s+([^\(]*).*values\s*\(/i] $1.strip if $1 end def table_definition TableDefinition.new(self) end end end end
require 'active_record/connection_adapters/abstract_adapter' require 'active_support/core_ext/object/blank' # Make sure we're using pg high enough for PGResult#values gem 'pg', '~> 0.11' require 'pg' module ActiveRecord class Base # Establishes a connection to the database that's used by all Active Record objects def self.postgresql_connection(config) # :nodoc: config = config.symbolize_keys host = config[:host] port = config[:port] || 5432 username = config[:username].to_s if config[:username] password = config[:password].to_s if config[:password] if config.key?(:database) database = config[:database] else raise ArgumentError, "No database specified. Missing argument: database." end # The postgres drivers don't allow the creation of an unconnected PGconn object, # so just pass a nil connection object for the time being. ConnectionAdapters::PostgreSQLAdapter.new(nil, logger, [host, port, nil, nil, database, username, password], config) end end module ConnectionAdapters # PostgreSQL-specific extensions to column definitions in a table. class PostgreSQLColumn < Column #:nodoc: # Instantiates a new PostgreSQL column definition in a table. def initialize(name, default, sql_type = nil, null = true) super(name, self.class.extract_value_from_default(default), sql_type, null) end # :stopdoc: class << self attr_accessor :money_precision def string_to_time(string) return string unless String === string case string when 'infinity' then 1.0 / 0.0 when '-infinity' then -1.0 / 0.0 else super end end end # :startdoc: private def extract_limit(sql_type) case sql_type when /^bigint/i; 8 when /^smallint/i; 2 else super end end # Extracts the scale from PostgreSQL-specific data types. def extract_scale(sql_type) # Money type has a fixed scale of 2. sql_type =~ /^money/ ? 2 : super end # Extracts the precision from PostgreSQL-specific data types. def extract_precision(sql_type) if sql_type == 'money' self.class.money_precision else super end end # Maps PostgreSQL-specific data types to logical Rails types. def simplified_type(field_type) case field_type # Numeric and monetary types when /^(?:real|double precision)$/ :float # Monetary types when 'money' :decimal # Character types when /^(?:character varying|bpchar)(?:\(\d+\))?$/ :string # Binary data types when 'bytea' :binary # Date/time types when /^timestamp with(?:out)? time zone$/ :datetime when 'interval' :string # Geometric types when /^(?:point|line|lseg|box|"?path"?|polygon|circle)$/ :string # Network address types when /^(?:cidr|inet|macaddr)$/ :string # Bit strings when /^bit(?: varying)?(?:\(\d+\))?$/ :string # XML type when 'xml' :xml # tsvector type when 'tsvector' :tsvector # Arrays when /^\D+\[\]$/ :string # Object identifier types when 'oid' :integer # UUID type when 'uuid' :string # Small and big integer types when /^(?:small|big)int$/ :integer # Pass through all types that are not specific to PostgreSQL. else super end end # Extracts the value from a PostgreSQL column default definition. def self.extract_value_from_default(default) case default # This is a performance optimization for Ruby 1.9.2 in development. # If the value is nil, we return nil straight away without checking # the regular expressions. If we check each regular expression, # Regexp#=== will call NilClass#to_str, which will trigger # method_missing (defined by whiny nil in ActiveSupport) which # makes this method very very slow. when NilClass nil # Numeric types when /\A\(?(-?\d+(\.\d*)?\)?)\z/ $1 # Character types when /\A'(.*)'::(?:character varying|bpchar|text)\z/m $1 # Character types (8.1 formatting) when /\AE'(.*)'::(?:character varying|bpchar|text)\z/m $1.gsub(/\\(\d\d\d)/) { $1.oct.chr } # Binary data types when /\A'(.*)'::bytea\z/m $1 # Date/time types when /\A'(.+)'::(?:time(?:stamp)? with(?:out)? time zone|date)\z/ $1 when /\A'(.*)'::interval\z/ $1 # Boolean type when 'true' true when 'false' false # Geometric types when /\A'(.*)'::(?:point|line|lseg|box|"?path"?|polygon|circle)\z/ $1 # Network address types when /\A'(.*)'::(?:cidr|inet|macaddr)\z/ $1 # Bit string types when /\AB'(.*)'::"?bit(?: varying)?"?\z/ $1 # XML type when /\A'(.*)'::xml\z/m $1 # Arrays when /\A'(.*)'::"?\D+"?\[\]\z/ $1 # Object identifier types when /\A-?\d+\z/ $1 else # Anything else is blank, some user type, or some function # and we can't know the value of that, so return nil. nil end end end # The PostgreSQL adapter works both with the native C (http://ruby.scripting.ca/postgres/) and the pure # Ruby (available both as gem and from http://rubyforge.org/frs/?group_id=234&release_id=1944) drivers. # # Options: # # * <tt>:host</tt> - Defaults to "localhost". # * <tt>:port</tt> - Defaults to 5432. # * <tt>:username</tt> - Defaults to nothing. # * <tt>:password</tt> - Defaults to nothing. # * <tt>:database</tt> - The name of the database. No default, must be provided. # * <tt>:schema_search_path</tt> - An optional schema search path for the connection given # as a string of comma-separated schema names. This is backward-compatible with the <tt>:schema_order</tt> option. # * <tt>:encoding</tt> - An optional client encoding that is used in a <tt>SET client_encoding TO # <encoding></tt> call on the connection. # * <tt>:min_messages</tt> - An optional client min messages that is used in a # <tt>SET client_min_messages TO <min_messages></tt> call on the connection. class PostgreSQLAdapter < AbstractAdapter class TableDefinition < ActiveRecord::ConnectionAdapters::TableDefinition def xml(*args) options = args.extract_options! column(args[0], 'xml', options) end def tsvector(*args) options = args.extract_options! column(args[0], 'tsvector', options) end end ADAPTER_NAME = 'PostgreSQL' NATIVE_DATABASE_TYPES = { :primary_key => "serial primary key", :string => { :name => "character varying", :limit => 255 }, :text => { :name => "text" }, :integer => { :name => "integer" }, :float => { :name => "float" }, :decimal => { :name => "decimal" }, :datetime => { :name => "timestamp" }, :timestamp => { :name => "timestamp" }, :time => { :name => "time" }, :date => { :name => "date" }, :binary => { :name => "bytea" }, :boolean => { :name => "boolean" }, :xml => { :name => "xml" }, :tsvector => { :name => "tsvector" } } # Returns 'PostgreSQL' as adapter name for identification purposes. def adapter_name ADAPTER_NAME end # Returns +true+, since this connection adapter supports prepared statement # caching. def supports_statement_cache? true end # Initializes and connects a PostgreSQL adapter. def initialize(connection, logger, connection_parameters, config) super(connection, logger) @connection_parameters, @config = connection_parameters, config # @local_tz is initialized as nil to avoid warnings when connect tries to use it @local_tz = nil @table_alias_length = nil @statements = {} connect if postgresql_version < 80200 raise "Your version of PostgreSQL (#{postgresql_version}) is too old, please upgrade!" end @local_tz = execute('SHOW TIME ZONE', 'SCHEMA').first["TimeZone"] end # Clears the prepared statements cache. def clear_cache! @statements.each_value do |value| @connection.query "DEALLOCATE #{value}" end @statements.clear end # Is this connection alive and ready for queries? def active? @connection.status == PGconn::CONNECTION_OK rescue PGError false end # Close then reopen the connection. def reconnect! clear_cache! @connection.reset configure_connection end def reset! clear_cache! super end # Disconnects from the database if already connected. Otherwise, this # method does nothing. def disconnect! clear_cache! @connection.close rescue nil end def native_database_types #:nodoc: NATIVE_DATABASE_TYPES end # Returns true, since this connection adapter supports migrations. def supports_migrations? true end # Does PostgreSQL support finding primary key on non-Active Record tables? def supports_primary_key? #:nodoc: true end # Enable standard-conforming strings if available. def set_standard_conforming_strings old, self.client_min_messages = client_min_messages, 'panic' execute('SET standard_conforming_strings = on', 'SCHEMA') rescue nil ensure self.client_min_messages = old end def supports_insert_with_returning? true end def supports_ddl_transactions? true end # Returns true, since this connection adapter supports savepoints. def supports_savepoints? true end # Returns the configured supported identifier length supported by PostgreSQL def table_alias_length @table_alias_length ||= query('SHOW max_identifier_length')[0][0].to_i end # QUOTING ================================================== # Escapes binary strings for bytea input to the database. def escape_bytea(value) @connection.escape_bytea(value) if value end # Unescapes bytea output from a database to the binary string it represents. # NOTE: This is NOT an inverse of escape_bytea! This is only to be used # on escaped binary output from database drive. def unescape_bytea(value) @connection.unescape_bytea(value) if value end # Quotes PostgreSQL-specific data types for SQL input. def quote(value, column = nil) #:nodoc: return super unless column case value when Float return super unless value.infinite? && column.type == :datetime "'#{value.to_s.downcase}'" when Numeric return super unless column.sql_type == 'money' # Not truly string input, so doesn't require (or allow) escape string syntax. "'#{value}'" when String case column.sql_type when 'bytea' then "'#{escape_bytea(value)}'" when 'xml' then "xml '#{quote_string(value)}'" when /^bit/ case value when /^[01]*$/ then "B'#{value}'" # Bit-string notation when /^[0-9A-F]*$/i then "X'#{value}'" # Hexadecimal notation end else super end else super end end def type_cast(value, column) return super unless column case value when String return super unless 'bytea' == column.sql_type { :value => value, :format => 1 } else super end end # Quotes strings for use in SQL input. def quote_string(s) #:nodoc: @connection.escape(s) end # Checks the following cases: # # - table_name # - "table.name" # - schema_name.table_name # - schema_name."table.name" # - "schema.name".table_name # - "schema.name"."table.name" def quote_table_name(name) schema, name_part = extract_pg_identifier_from_name(name.to_s) unless name_part quote_column_name(schema) else table_name, name_part = extract_pg_identifier_from_name(name_part) "#{quote_column_name(schema)}.#{quote_column_name(table_name)}" end end # Quotes column names for use in SQL queries. def quote_column_name(name) #:nodoc: PGconn.quote_ident(name.to_s) end # Quote date/time values for use in SQL input. Includes microseconds # if the value is a Time responding to usec. def quoted_date(value) #:nodoc: if value.acts_like?(:time) && value.respond_to?(:usec) "#{super}.#{sprintf("%06d", value.usec)}" else super end end # Set the authorized user for this session def session_auth=(user) clear_cache! exec_query "SET SESSION AUTHORIZATION #{user}" end # REFERENTIAL INTEGRITY ==================================== def supports_disable_referential_integrity? #:nodoc: true end def disable_referential_integrity #:nodoc: if supports_disable_referential_integrity? then execute(tables.collect { |name| "ALTER TABLE #{quote_table_name(name)} DISABLE TRIGGER ALL" }.join(";")) end yield ensure if supports_disable_referential_integrity? then execute(tables.collect { |name| "ALTER TABLE #{quote_table_name(name)} ENABLE TRIGGER ALL" }.join(";")) end end # DATABASE STATEMENTS ====================================== # Executes a SELECT query and returns an array of rows. Each row is an # array of field values. def select_rows(sql, name = nil) select_raw(sql, name).last end # Executes an INSERT query and returns the new record's ID def insert_sql(sql, name = nil, pk = nil, id_value = nil, sequence_name = nil) unless pk # Extract the table from the insert sql. Yuck. table_ref = extract_table_ref_from_insert_sql(sql) pk = primary_key(table_ref) if table_ref end if pk select_value("#{sql} RETURNING #{quote_column_name(pk)}") else super end end alias :create :insert # create a 2D array representing the result set def result_as_array(res) #:nodoc: # check if we have any binary column and if they need escaping ftypes = Array.new(res.nfields) do |i| [i, res.ftype(i)] end rows = res.values return rows unless ftypes.any? { |_, x| x == BYTEA_COLUMN_TYPE_OID || x == MONEY_COLUMN_TYPE_OID } typehash = ftypes.group_by { |_, type| type } binaries = typehash[BYTEA_COLUMN_TYPE_OID] || [] monies = typehash[MONEY_COLUMN_TYPE_OID] || [] rows.each do |row| # unescape string passed BYTEA field (OID == 17) binaries.each do |index, _| row[index] = unescape_bytea(row[index]) end # If this is a money type column and there are any currency symbols, # then strip them off. Indeed it would be prettier to do this in # PostgreSQLColumn.string_to_decimal but would break form input # fields that call value_before_type_cast. monies.each do |index, _| data = row[index] # Because money output is formatted according to the locale, there are two # cases to consider (note the decimal separators): # (1) $12,345,678.12 # (2) $12.345.678,12 case data when /^-?\D+[\d,]+\.\d{2}$/ # (1) data.gsub!(/[^-\d.]/, '') when /^-?\D+[\d.]+,\d{2}$/ # (2) data.gsub!(/[^-\d,]/, '').sub!(/,/, '.') end end end end # Queries the database and returns the results in an Array-like object def query(sql, name = nil) #:nodoc: log(sql, name) do result_as_array @connection.async_exec(sql) end end # Executes an SQL statement, returning a PGresult object on success # or raising a PGError exception otherwise. def execute(sql, name = nil) log(sql, name) do @connection.async_exec(sql) end end def substitute_at(column, index) Arel.sql("$#{index + 1}") end def exec_query(sql, name = 'SQL', binds = []) log(sql, name, binds) do result = binds.empty? ? exec_no_cache(sql, binds) : exec_cache(sql, binds) ret = ActiveRecord::Result.new(result.fields, result_as_array(result)) result.clear return ret end end def exec_delete(sql, name = 'SQL', binds = []) log(sql, name, binds) do result = binds.empty? ? exec_no_cache(sql, binds) : exec_cache(sql, binds) affected = result.cmd_tuples result.clear affected end end alias :exec_update :exec_delete def sql_for_insert(sql, pk, id_value, sequence_name, binds) unless pk # Extract the table from the insert sql. Yuck. table_ref = extract_table_ref_from_insert_sql(sql) pk = primary_key(table_ref) if table_ref end sql = "#{sql} RETURNING #{quote_column_name(pk)}" if pk [sql, binds] end # Executes an UPDATE query and returns the number of affected tuples. def update_sql(sql, name = nil) super.cmd_tuples end # Begins a transaction. def begin_db_transaction execute "BEGIN" end # Commits a transaction. def commit_db_transaction execute "COMMIT" end # Aborts a transaction. def rollback_db_transaction execute "ROLLBACK" end def outside_transaction? @connection.transaction_status == PGconn::PQTRANS_IDLE end def create_savepoint execute("SAVEPOINT #{current_savepoint_name}") end def rollback_to_savepoint execute("ROLLBACK TO SAVEPOINT #{current_savepoint_name}") end def release_savepoint execute("RELEASE SAVEPOINT #{current_savepoint_name}") end # SCHEMA STATEMENTS ======================================== def recreate_database(name) #:nodoc: drop_database(name) create_database(name) end # Create a new PostgreSQL database. Options include <tt>:owner</tt>, <tt>:template</tt>, # <tt>:encoding</tt>, <tt>:tablespace</tt>, and <tt>:connection_limit</tt> (note that MySQL uses # <tt>:charset</tt> while PostgreSQL uses <tt>:encoding</tt>). # # Example: # create_database config[:database], config # create_database 'foo_development', :encoding => 'unicode' def create_database(name, options = {}) options = options.reverse_merge(:encoding => "utf8") option_string = options.symbolize_keys.sum do |key, value| case key when :owner " OWNER = \"#{value}\"" when :template " TEMPLATE = \"#{value}\"" when :encoding " ENCODING = '#{value}'" when :tablespace " TABLESPACE = \"#{value}\"" when :connection_limit " CONNECTION LIMIT = #{value}" else "" end end execute "CREATE DATABASE #{quote_table_name(name)}#{option_string}" end # Drops a PostgreSQL database. # # Example: # drop_database 'matt_development' def drop_database(name) #:nodoc: execute "DROP DATABASE IF EXISTS #{quote_table_name(name)}" end # Returns the list of all tables in the schema search path or a specified schema. def tables(name = nil) query(<<-SQL, 'SCHEMA').map { |row| row[0] } SELECT tablename FROM pg_tables WHERE schemaname = ANY (current_schemas(false)) SQL end # Returns true if table exists. # If the schema is not specified as part of +name+ then it will only find tables within # the current schema search path (regardless of permissions to access tables in other schemas) def table_exists?(name) schema, table = Utils.extract_schema_and_table(name.to_s) return false unless table binds = [[nil, table]] binds << [nil, schema] if schema exec_query(<<-SQL, 'SCHEMA', binds).rows.first[0].to_i > 0 SELECT COUNT(*) FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind in ('v','r') AND c.relname = $1 AND n.nspname = #{schema ? '$2' : 'ANY (current_schemas(false))'} SQL end # Returns true if schema exists. def schema_exists?(name) exec_query(<<-SQL, 'SCHEMA', [[nil, name]]).rows.first[0].to_i > 0 SELECT COUNT(*) FROM pg_namespace WHERE nspname = $1 SQL end # Returns an array of indexes for the given table. def indexes(table_name, name = nil) schemas = schema_search_path.split(/,/).map { |p| quote(p) }.join(',') result = query(<<-SQL, name) SELECT distinct i.relname, d.indisunique, d.indkey, t.oid FROM pg_class t INNER JOIN pg_index d ON t.oid = d.indrelid INNER JOIN pg_class i ON d.indexrelid = i.oid WHERE i.relkind = 'i' AND d.indisprimary = 'f' AND t.relname = '#{table_name}' AND i.relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname IN (#{schemas}) ) ORDER BY i.relname SQL result.map do |row| index_name = row[0] unique = row[1] == 't' indkey = row[2].split(" ") oid = row[3] columns = Hash[query(<<-SQL, "Columns for index #{row[0]} on #{table_name}")] SELECT a.attnum, a.attname FROM pg_attribute a WHERE a.attrelid = #{oid} AND a.attnum IN (#{indkey.join(",")}) SQL column_names = columns.values_at(*indkey).compact column_names.empty? ? nil : IndexDefinition.new(table_name, index_name, unique, column_names) end.compact end # Returns the list of all column definitions for a table. def columns(table_name, name = nil) # Limit, precision, and scale are all handled by the superclass. column_definitions(table_name).collect do |column_name, type, default, notnull| PostgreSQLColumn.new(column_name, default, type, notnull == 'f') end end # Returns the current database name. def current_database query('select current_database()')[0][0] end # Returns the current schema name. def current_schema query('SELECT current_schema', 'SCHEMA')[0][0] end # Returns the current database encoding format. def encoding query(<<-end_sql)[0][0] SELECT pg_encoding_to_char(pg_database.encoding) FROM pg_database WHERE pg_database.datname LIKE '#{current_database}' end_sql end # Sets the schema search path to a string of comma-separated schema names. # Names beginning with $ have to be quoted (e.g. $user => '$user'). # See: http://www.postgresql.org/docs/current/static/ddl-schemas.html # # This should be not be called manually but set in database.yml. def schema_search_path=(schema_csv) if schema_csv execute "SET search_path TO #{schema_csv}" @schema_search_path = schema_csv end end # Returns the active schema search path. def schema_search_path @schema_search_path ||= query('SHOW search_path')[0][0] end # Returns the current client message level. def client_min_messages query('SHOW client_min_messages', 'SCHEMA')[0][0] end # Set the client message level. def client_min_messages=(level) execute("SET client_min_messages TO '#{level}'", 'SCHEMA') end # Returns the sequence name for a table's primary key or some other specified key. def default_sequence_name(table_name, pk = nil) #:nodoc: serial_sequence(table_name, pk || 'id').split('.').last rescue ActiveRecord::StatementInvalid "#{table_name}_#{pk || 'id'}_seq" end def serial_sequence(table, column) result = exec_query(<<-eosql, 'SCHEMA', [[nil, table], [nil, column]]) SELECT pg_get_serial_sequence($1, $2) eosql result.rows.first.first end # Resets the sequence of a table's primary key to the maximum value. def reset_pk_sequence!(table, pk = nil, sequence = nil) #:nodoc: unless pk and sequence default_pk, default_sequence = pk_and_sequence_for(table) pk ||= default_pk sequence ||= default_sequence end if @logger && pk && !sequence @logger.warn "#{table} has primary key #{pk} with no default sequence" end if pk && sequence quoted_sequence = quote_table_name(sequence) select_value <<-end_sql, 'Reset sequence' SELECT setval('#{quoted_sequence}', (SELECT COALESCE(MAX(#{quote_column_name pk})+(SELECT increment_by FROM #{quoted_sequence}), (SELECT min_value FROM #{quoted_sequence})) FROM #{quote_table_name(table)}), false) end_sql end end # Returns a table's primary key and belonging sequence. def pk_and_sequence_for(table) #:nodoc: # First try looking for a sequence with a dependency on the # given table's primary key. result = exec_query(<<-end_sql, 'SCHEMA').rows.first SELECT attr.attname, ns.nspname, seq.relname FROM pg_class seq INNER JOIN pg_depend dep ON seq.oid = dep.objid INNER JOIN pg_attribute attr ON attr.attrelid = dep.refobjid AND attr.attnum = dep.refobjsubid INNER JOIN pg_constraint cons ON attr.attrelid = cons.conrelid AND attr.attnum = cons.conkey[1] INNER JOIN pg_namespace ns ON seq.relnamespace = ns.oid WHERE seq.relkind = 'S' AND cons.contype = 'p' AND dep.refobjid = '#{quote_table_name(table)}'::regclass end_sql # [primary_key, sequence] if result.second == 'public' then sequence = result.last else sequence = result.second+'.'+result.last end [result.first, sequence] rescue nil end # Returns just a table's primary key def primary_key(table) row = exec_query(<<-end_sql, 'SCHEMA', [[nil, table]]).rows.first SELECT DISTINCT(attr.attname) FROM pg_attribute attr INNER JOIN pg_depend dep ON attr.attrelid = dep.refobjid AND attr.attnum = dep.refobjsubid INNER JOIN pg_constraint cons ON attr.attrelid = cons.conrelid AND attr.attnum = cons.conkey[1] WHERE cons.contype = 'p' AND dep.refobjid = $1::regclass end_sql row && row.first end # Renames a table. # # Example: # rename_table('octopuses', 'octopi') def rename_table(name, new_name) execute "ALTER TABLE #{quote_table_name(name)} RENAME TO #{quote_table_name(new_name)}" end # Adds a new column to the named table. # See TableDefinition#column for details of the options you can use. def add_column(table_name, column_name, type, options = {}) add_column_sql = "ALTER TABLE #{quote_table_name(table_name)} ADD COLUMN #{quote_column_name(column_name)} #{type_to_sql(type, options[:limit], options[:precision], options[:scale])}" add_column_options!(add_column_sql, options) execute add_column_sql end # Changes the column of a table. def change_column(table_name, column_name, type, options = {}) quoted_table_name = quote_table_name(table_name) execute "ALTER TABLE #{quoted_table_name} ALTER COLUMN #{quote_column_name(column_name)} TYPE #{type_to_sql(type, options[:limit], options[:precision], options[:scale])}" change_column_default(table_name, column_name, options[:default]) if options_include_default?(options) change_column_null(table_name, column_name, options[:null], options[:default]) if options.key?(:null) end # Changes the default value of a table column. def change_column_default(table_name, column_name, default) execute "ALTER TABLE #{quote_table_name(table_name)} ALTER COLUMN #{quote_column_name(column_name)} SET DEFAULT #{quote(default)}" end def change_column_null(table_name, column_name, null, default = nil) unless null || default.nil? execute("UPDATE #{quote_table_name(table_name)} SET #{quote_column_name(column_name)}=#{quote(default)} WHERE #{quote_column_name(column_name)} IS NULL") end execute("ALTER TABLE #{quote_table_name(table_name)} ALTER #{quote_column_name(column_name)} #{null ? 'DROP' : 'SET'} NOT NULL") end # Renames a column in a table. def rename_column(table_name, column_name, new_column_name) execute "ALTER TABLE #{quote_table_name(table_name)} RENAME COLUMN #{quote_column_name(column_name)} TO #{quote_column_name(new_column_name)}" end def remove_index!(table_name, index_name) #:nodoc: execute "DROP INDEX #{quote_table_name(index_name)}" end def rename_index(table_name, old_name, new_name) execute "ALTER INDEX #{quote_column_name(old_name)} RENAME TO #{quote_table_name(new_name)}" end def index_name_length 63 end # Maps logical Rails types to PostgreSQL-specific data types. def type_to_sql(type, limit = nil, precision = nil, scale = nil) return super unless type.to_s == 'integer' return 'integer' unless limit case limit when 1, 2; 'smallint' when 3, 4; 'integer' when 5..8; 'bigint' else raise(ActiveRecordError, "No integer type has byte size #{limit}. Use a numeric with precision 0 instead.") end end # Returns a SELECT DISTINCT clause for a given set of columns and a given ORDER BY clause. # # PostgreSQL requires the ORDER BY columns in the select list for distinct queries, and # requires that the ORDER BY include the distinct column. # # distinct("posts.id", "posts.created_at desc") def distinct(columns, orders) #:nodoc: return "DISTINCT #{columns}" if orders.empty? # Construct a clean list of column names from the ORDER BY clause, removing # any ASC/DESC modifiers order_columns = orders.collect { |s| s.gsub(/\s+(ASC|DESC)\s*/i, '') } order_columns.delete_if { |c| c.blank? } order_columns = order_columns.zip((0...order_columns.size).to_a).map { |s,i| "#{s} AS alias_#{i}" } "DISTINCT #{columns}, #{order_columns * ', '}" end module Utils # Returns an array of <tt>[schema_name, table_name]</tt> extracted from +name+. # +schema_name+ is nil if not specified in +name+. # +schema_name+ and +table_name+ exclude surrounding quotes (regardless of whether provided in +name+) # +name+ supports the range of schema/table references understood by PostgreSQL, for example: # # * <tt>table_name</tt> # * <tt>"table.name"</tt> # * <tt>schema_name.table_name</tt> # * <tt>schema_name."table.name"</tt> # * <tt>"schema.name"."table name"</tt> def self.extract_schema_and_table(name) table, schema = name.scan(/[^".\s]+|"[^"]*"/)[0..1].collect{|m| m.gsub(/(^"|"$)/,'') }.reverse [schema, table] end end protected # Returns the version of the connected PostgreSQL server. def postgresql_version @connection.server_version end def translate_exception(exception, message) case exception.message when /duplicate key value violates unique constraint/ RecordNotUnique.new(message, exception) when /violates foreign key constraint/ InvalidForeignKey.new(message, exception) else super end end private def exec_no_cache(sql, binds) @connection.async_exec(sql) end def exec_cache(sql, binds) unless @statements.key? sql nextkey = "a#{@statements.length + 1}" @connection.prepare nextkey, sql @statements[sql] = nextkey end key = @statements[sql] # Clear the queue @connection.get_last_result @connection.send_query_prepared(key, binds.map { |col, val| type_cast(val, col) }) @connection.block @connection.get_last_result end # The internal PostgreSQL identifier of the money data type. MONEY_COLUMN_TYPE_OID = 790 #:nodoc: # The internal PostgreSQL identifier of the BYTEA data type. BYTEA_COLUMN_TYPE_OID = 17 #:nodoc: # Connects to a PostgreSQL server and sets up the adapter depending on the # connected server's characteristics. def connect @connection = PGconn.connect(*@connection_parameters) # Money type has a fixed precision of 10 in PostgreSQL 8.2 and below, and as of # PostgreSQL 8.3 it has a fixed precision of 19. PostgreSQLColumn.extract_precision # should know about this but can't detect it there, so deal with it here. PostgreSQLColumn.money_precision = (postgresql_version >= 80300) ? 19 : 10 configure_connection end # Configures the encoding, verbosity, schema search path, and time zone of the connection. # This is called by #connect and should not be called manually. def configure_connection if @config[:encoding] @connection.set_client_encoding(@config[:encoding]) end self.client_min_messages = @config[:min_messages] if @config[:min_messages] self.schema_search_path = @config[:schema_search_path] || @config[:schema_order] # Use standard-conforming strings if available so we don't have to do the E'...' dance. set_standard_conforming_strings # If using Active Record's time zone support configure the connection to return # TIMESTAMP WITH ZONE types in UTC. if ActiveRecord::Base.default_timezone == :utc execute("SET time zone 'UTC'", 'SCHEMA') elsif @local_tz execute("SET time zone '#{@local_tz}'", 'SCHEMA') end end # Returns the current ID of a table's sequence. def last_insert_id(sequence_name) #:nodoc: r = exec_query("SELECT currval($1)", 'SQL', [[nil, sequence_name]]) Integer(r.rows.first.first) end # Executes a SELECT query and returns the results, performing any data type # conversions that are required to be performed here instead of in PostgreSQLColumn. def select(sql, name = nil, binds = []) exec_query(sql, name, binds).to_a end def select_raw(sql, name = nil) res = execute(sql, name) results = result_as_array(res) fields = res.fields res.clear return fields, results end # Returns the list of a table's column names, data types, and default values. # # The underlying query is roughly: # SELECT column.name, column.type, default.value # FROM column LEFT JOIN default # ON column.table_id = default.table_id # AND column.num = default.column_num # WHERE column.table_id = get_table_id('table_name') # AND column.num > 0 # AND NOT column.is_dropped # ORDER BY column.num # # If the table name is not prefixed with a schema, the database will # take the first match from the schema search path. # # Query implementation notes: # - format_type includes the column size constraint, e.g. varchar(50) # - ::regclass is a function that gives the id for a table name def column_definitions(table_name) #:nodoc: exec_query(<<-end_sql, 'SCHEMA').rows SELECT a.attname, format_type(a.atttypid, a.atttypmod), d.adsrc, a.attnotnull FROM pg_attribute a LEFT JOIN pg_attrdef d ON a.attrelid = d.adrelid AND a.attnum = d.adnum WHERE a.attrelid = '#{quote_table_name(table_name)}'::regclass AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum end_sql end def extract_pg_identifier_from_name(name) match_data = name.start_with?('"') ? name.match(/\"([^\"]+)\"/) : name.match(/([^\.]+)/) if match_data rest = name[match_data[0].length, name.length] rest = rest[1, rest.length] if rest.start_with? "." [match_data[1], (rest.length > 0 ? rest : nil)] end end def extract_table_ref_from_insert_sql(sql) sql[/into\s+([^\(]*).*values\s*\(/i] $1.strip if $1 end def table_definition TableDefinition.new(self) end end end end accept option for recreate db for postgres (same as mysql now) require 'active_record/connection_adapters/abstract_adapter' require 'active_support/core_ext/object/blank' # Make sure we're using pg high enough for PGResult#values gem 'pg', '~> 0.11' require 'pg' module ActiveRecord class Base # Establishes a connection to the database that's used by all Active Record objects def self.postgresql_connection(config) # :nodoc: config = config.symbolize_keys host = config[:host] port = config[:port] || 5432 username = config[:username].to_s if config[:username] password = config[:password].to_s if config[:password] if config.key?(:database) database = config[:database] else raise ArgumentError, "No database specified. Missing argument: database." end # The postgres drivers don't allow the creation of an unconnected PGconn object, # so just pass a nil connection object for the time being. ConnectionAdapters::PostgreSQLAdapter.new(nil, logger, [host, port, nil, nil, database, username, password], config) end end module ConnectionAdapters # PostgreSQL-specific extensions to column definitions in a table. class PostgreSQLColumn < Column #:nodoc: # Instantiates a new PostgreSQL column definition in a table. def initialize(name, default, sql_type = nil, null = true) super(name, self.class.extract_value_from_default(default), sql_type, null) end # :stopdoc: class << self attr_accessor :money_precision def string_to_time(string) return string unless String === string case string when 'infinity' then 1.0 / 0.0 when '-infinity' then -1.0 / 0.0 else super end end end # :startdoc: private def extract_limit(sql_type) case sql_type when /^bigint/i; 8 when /^smallint/i; 2 else super end end # Extracts the scale from PostgreSQL-specific data types. def extract_scale(sql_type) # Money type has a fixed scale of 2. sql_type =~ /^money/ ? 2 : super end # Extracts the precision from PostgreSQL-specific data types. def extract_precision(sql_type) if sql_type == 'money' self.class.money_precision else super end end # Maps PostgreSQL-specific data types to logical Rails types. def simplified_type(field_type) case field_type # Numeric and monetary types when /^(?:real|double precision)$/ :float # Monetary types when 'money' :decimal # Character types when /^(?:character varying|bpchar)(?:\(\d+\))?$/ :string # Binary data types when 'bytea' :binary # Date/time types when /^timestamp with(?:out)? time zone$/ :datetime when 'interval' :string # Geometric types when /^(?:point|line|lseg|box|"?path"?|polygon|circle)$/ :string # Network address types when /^(?:cidr|inet|macaddr)$/ :string # Bit strings when /^bit(?: varying)?(?:\(\d+\))?$/ :string # XML type when 'xml' :xml # tsvector type when 'tsvector' :tsvector # Arrays when /^\D+\[\]$/ :string # Object identifier types when 'oid' :integer # UUID type when 'uuid' :string # Small and big integer types when /^(?:small|big)int$/ :integer # Pass through all types that are not specific to PostgreSQL. else super end end # Extracts the value from a PostgreSQL column default definition. def self.extract_value_from_default(default) case default # This is a performance optimization for Ruby 1.9.2 in development. # If the value is nil, we return nil straight away without checking # the regular expressions. If we check each regular expression, # Regexp#=== will call NilClass#to_str, which will trigger # method_missing (defined by whiny nil in ActiveSupport) which # makes this method very very slow. when NilClass nil # Numeric types when /\A\(?(-?\d+(\.\d*)?\)?)\z/ $1 # Character types when /\A'(.*)'::(?:character varying|bpchar|text)\z/m $1 # Character types (8.1 formatting) when /\AE'(.*)'::(?:character varying|bpchar|text)\z/m $1.gsub(/\\(\d\d\d)/) { $1.oct.chr } # Binary data types when /\A'(.*)'::bytea\z/m $1 # Date/time types when /\A'(.+)'::(?:time(?:stamp)? with(?:out)? time zone|date)\z/ $1 when /\A'(.*)'::interval\z/ $1 # Boolean type when 'true' true when 'false' false # Geometric types when /\A'(.*)'::(?:point|line|lseg|box|"?path"?|polygon|circle)\z/ $1 # Network address types when /\A'(.*)'::(?:cidr|inet|macaddr)\z/ $1 # Bit string types when /\AB'(.*)'::"?bit(?: varying)?"?\z/ $1 # XML type when /\A'(.*)'::xml\z/m $1 # Arrays when /\A'(.*)'::"?\D+"?\[\]\z/ $1 # Object identifier types when /\A-?\d+\z/ $1 else # Anything else is blank, some user type, or some function # and we can't know the value of that, so return nil. nil end end end # The PostgreSQL adapter works both with the native C (http://ruby.scripting.ca/postgres/) and the pure # Ruby (available both as gem and from http://rubyforge.org/frs/?group_id=234&release_id=1944) drivers. # # Options: # # * <tt>:host</tt> - Defaults to "localhost". # * <tt>:port</tt> - Defaults to 5432. # * <tt>:username</tt> - Defaults to nothing. # * <tt>:password</tt> - Defaults to nothing. # * <tt>:database</tt> - The name of the database. No default, must be provided. # * <tt>:schema_search_path</tt> - An optional schema search path for the connection given # as a string of comma-separated schema names. This is backward-compatible with the <tt>:schema_order</tt> option. # * <tt>:encoding</tt> - An optional client encoding that is used in a <tt>SET client_encoding TO # <encoding></tt> call on the connection. # * <tt>:min_messages</tt> - An optional client min messages that is used in a # <tt>SET client_min_messages TO <min_messages></tt> call on the connection. class PostgreSQLAdapter < AbstractAdapter class TableDefinition < ActiveRecord::ConnectionAdapters::TableDefinition def xml(*args) options = args.extract_options! column(args[0], 'xml', options) end def tsvector(*args) options = args.extract_options! column(args[0], 'tsvector', options) end end ADAPTER_NAME = 'PostgreSQL' NATIVE_DATABASE_TYPES = { :primary_key => "serial primary key", :string => { :name => "character varying", :limit => 255 }, :text => { :name => "text" }, :integer => { :name => "integer" }, :float => { :name => "float" }, :decimal => { :name => "decimal" }, :datetime => { :name => "timestamp" }, :timestamp => { :name => "timestamp" }, :time => { :name => "time" }, :date => { :name => "date" }, :binary => { :name => "bytea" }, :boolean => { :name => "boolean" }, :xml => { :name => "xml" }, :tsvector => { :name => "tsvector" } } # Returns 'PostgreSQL' as adapter name for identification purposes. def adapter_name ADAPTER_NAME end # Returns +true+, since this connection adapter supports prepared statement # caching. def supports_statement_cache? true end # Initializes and connects a PostgreSQL adapter. def initialize(connection, logger, connection_parameters, config) super(connection, logger) @connection_parameters, @config = connection_parameters, config # @local_tz is initialized as nil to avoid warnings when connect tries to use it @local_tz = nil @table_alias_length = nil @statements = {} connect if postgresql_version < 80200 raise "Your version of PostgreSQL (#{postgresql_version}) is too old, please upgrade!" end @local_tz = execute('SHOW TIME ZONE', 'SCHEMA').first["TimeZone"] end # Clears the prepared statements cache. def clear_cache! @statements.each_value do |value| @connection.query "DEALLOCATE #{value}" end @statements.clear end # Is this connection alive and ready for queries? def active? @connection.status == PGconn::CONNECTION_OK rescue PGError false end # Close then reopen the connection. def reconnect! clear_cache! @connection.reset configure_connection end def reset! clear_cache! super end # Disconnects from the database if already connected. Otherwise, this # method does nothing. def disconnect! clear_cache! @connection.close rescue nil end def native_database_types #:nodoc: NATIVE_DATABASE_TYPES end # Returns true, since this connection adapter supports migrations. def supports_migrations? true end # Does PostgreSQL support finding primary key on non-Active Record tables? def supports_primary_key? #:nodoc: true end # Enable standard-conforming strings if available. def set_standard_conforming_strings old, self.client_min_messages = client_min_messages, 'panic' execute('SET standard_conforming_strings = on', 'SCHEMA') rescue nil ensure self.client_min_messages = old end def supports_insert_with_returning? true end def supports_ddl_transactions? true end # Returns true, since this connection adapter supports savepoints. def supports_savepoints? true end # Returns the configured supported identifier length supported by PostgreSQL def table_alias_length @table_alias_length ||= query('SHOW max_identifier_length')[0][0].to_i end # QUOTING ================================================== # Escapes binary strings for bytea input to the database. def escape_bytea(value) @connection.escape_bytea(value) if value end # Unescapes bytea output from a database to the binary string it represents. # NOTE: This is NOT an inverse of escape_bytea! This is only to be used # on escaped binary output from database drive. def unescape_bytea(value) @connection.unescape_bytea(value) if value end # Quotes PostgreSQL-specific data types for SQL input. def quote(value, column = nil) #:nodoc: return super unless column case value when Float return super unless value.infinite? && column.type == :datetime "'#{value.to_s.downcase}'" when Numeric return super unless column.sql_type == 'money' # Not truly string input, so doesn't require (or allow) escape string syntax. "'#{value}'" when String case column.sql_type when 'bytea' then "'#{escape_bytea(value)}'" when 'xml' then "xml '#{quote_string(value)}'" when /^bit/ case value when /^[01]*$/ then "B'#{value}'" # Bit-string notation when /^[0-9A-F]*$/i then "X'#{value}'" # Hexadecimal notation end else super end else super end end def type_cast(value, column) return super unless column case value when String return super unless 'bytea' == column.sql_type { :value => value, :format => 1 } else super end end # Quotes strings for use in SQL input. def quote_string(s) #:nodoc: @connection.escape(s) end # Checks the following cases: # # - table_name # - "table.name" # - schema_name.table_name # - schema_name."table.name" # - "schema.name".table_name # - "schema.name"."table.name" def quote_table_name(name) schema, name_part = extract_pg_identifier_from_name(name.to_s) unless name_part quote_column_name(schema) else table_name, name_part = extract_pg_identifier_from_name(name_part) "#{quote_column_name(schema)}.#{quote_column_name(table_name)}" end end # Quotes column names for use in SQL queries. def quote_column_name(name) #:nodoc: PGconn.quote_ident(name.to_s) end # Quote date/time values for use in SQL input. Includes microseconds # if the value is a Time responding to usec. def quoted_date(value) #:nodoc: if value.acts_like?(:time) && value.respond_to?(:usec) "#{super}.#{sprintf("%06d", value.usec)}" else super end end # Set the authorized user for this session def session_auth=(user) clear_cache! exec_query "SET SESSION AUTHORIZATION #{user}" end # REFERENTIAL INTEGRITY ==================================== def supports_disable_referential_integrity? #:nodoc: true end def disable_referential_integrity #:nodoc: if supports_disable_referential_integrity? then execute(tables.collect { |name| "ALTER TABLE #{quote_table_name(name)} DISABLE TRIGGER ALL" }.join(";")) end yield ensure if supports_disable_referential_integrity? then execute(tables.collect { |name| "ALTER TABLE #{quote_table_name(name)} ENABLE TRIGGER ALL" }.join(";")) end end # DATABASE STATEMENTS ====================================== # Executes a SELECT query and returns an array of rows. Each row is an # array of field values. def select_rows(sql, name = nil) select_raw(sql, name).last end # Executes an INSERT query and returns the new record's ID def insert_sql(sql, name = nil, pk = nil, id_value = nil, sequence_name = nil) unless pk # Extract the table from the insert sql. Yuck. table_ref = extract_table_ref_from_insert_sql(sql) pk = primary_key(table_ref) if table_ref end if pk select_value("#{sql} RETURNING #{quote_column_name(pk)}") else super end end alias :create :insert # create a 2D array representing the result set def result_as_array(res) #:nodoc: # check if we have any binary column and if they need escaping ftypes = Array.new(res.nfields) do |i| [i, res.ftype(i)] end rows = res.values return rows unless ftypes.any? { |_, x| x == BYTEA_COLUMN_TYPE_OID || x == MONEY_COLUMN_TYPE_OID } typehash = ftypes.group_by { |_, type| type } binaries = typehash[BYTEA_COLUMN_TYPE_OID] || [] monies = typehash[MONEY_COLUMN_TYPE_OID] || [] rows.each do |row| # unescape string passed BYTEA field (OID == 17) binaries.each do |index, _| row[index] = unescape_bytea(row[index]) end # If this is a money type column and there are any currency symbols, # then strip them off. Indeed it would be prettier to do this in # PostgreSQLColumn.string_to_decimal but would break form input # fields that call value_before_type_cast. monies.each do |index, _| data = row[index] # Because money output is formatted according to the locale, there are two # cases to consider (note the decimal separators): # (1) $12,345,678.12 # (2) $12.345.678,12 case data when /^-?\D+[\d,]+\.\d{2}$/ # (1) data.gsub!(/[^-\d.]/, '') when /^-?\D+[\d.]+,\d{2}$/ # (2) data.gsub!(/[^-\d,]/, '').sub!(/,/, '.') end end end end # Queries the database and returns the results in an Array-like object def query(sql, name = nil) #:nodoc: log(sql, name) do result_as_array @connection.async_exec(sql) end end # Executes an SQL statement, returning a PGresult object on success # or raising a PGError exception otherwise. def execute(sql, name = nil) log(sql, name) do @connection.async_exec(sql) end end def substitute_at(column, index) Arel.sql("$#{index + 1}") end def exec_query(sql, name = 'SQL', binds = []) log(sql, name, binds) do result = binds.empty? ? exec_no_cache(sql, binds) : exec_cache(sql, binds) ret = ActiveRecord::Result.new(result.fields, result_as_array(result)) result.clear return ret end end def exec_delete(sql, name = 'SQL', binds = []) log(sql, name, binds) do result = binds.empty? ? exec_no_cache(sql, binds) : exec_cache(sql, binds) affected = result.cmd_tuples result.clear affected end end alias :exec_update :exec_delete def sql_for_insert(sql, pk, id_value, sequence_name, binds) unless pk # Extract the table from the insert sql. Yuck. table_ref = extract_table_ref_from_insert_sql(sql) pk = primary_key(table_ref) if table_ref end sql = "#{sql} RETURNING #{quote_column_name(pk)}" if pk [sql, binds] end # Executes an UPDATE query and returns the number of affected tuples. def update_sql(sql, name = nil) super.cmd_tuples end # Begins a transaction. def begin_db_transaction execute "BEGIN" end # Commits a transaction. def commit_db_transaction execute "COMMIT" end # Aborts a transaction. def rollback_db_transaction execute "ROLLBACK" end def outside_transaction? @connection.transaction_status == PGconn::PQTRANS_IDLE end def create_savepoint execute("SAVEPOINT #{current_savepoint_name}") end def rollback_to_savepoint execute("ROLLBACK TO SAVEPOINT #{current_savepoint_name}") end def release_savepoint execute("RELEASE SAVEPOINT #{current_savepoint_name}") end # SCHEMA STATEMENTS ======================================== # Drops the database specified on the +name+ attribute # and creates it again using the provided +options+. def recreate_database(name, options = {}) #:nodoc: drop_database(name) create_database(name, options) end # Create a new PostgreSQL database. Options include <tt>:owner</tt>, <tt>:template</tt>, # <tt>:encoding</tt>, <tt>:tablespace</tt>, and <tt>:connection_limit</tt> (note that MySQL uses # <tt>:charset</tt> while PostgreSQL uses <tt>:encoding</tt>). # # Example: # create_database config[:database], config # create_database 'foo_development', :encoding => 'unicode' def create_database(name, options = {}) options = options.reverse_merge(:encoding => "utf8") option_string = options.symbolize_keys.sum do |key, value| case key when :owner " OWNER = \"#{value}\"" when :template " TEMPLATE = \"#{value}\"" when :encoding " ENCODING = '#{value}'" when :tablespace " TABLESPACE = \"#{value}\"" when :connection_limit " CONNECTION LIMIT = #{value}" else "" end end execute "CREATE DATABASE #{quote_table_name(name)}#{option_string}" end # Drops a PostgreSQL database. # # Example: # drop_database 'matt_development' def drop_database(name) #:nodoc: execute "DROP DATABASE IF EXISTS #{quote_table_name(name)}" end # Returns the list of all tables in the schema search path or a specified schema. def tables(name = nil) query(<<-SQL, 'SCHEMA').map { |row| row[0] } SELECT tablename FROM pg_tables WHERE schemaname = ANY (current_schemas(false)) SQL end # Returns true if table exists. # If the schema is not specified as part of +name+ then it will only find tables within # the current schema search path (regardless of permissions to access tables in other schemas) def table_exists?(name) schema, table = Utils.extract_schema_and_table(name.to_s) return false unless table binds = [[nil, table]] binds << [nil, schema] if schema exec_query(<<-SQL, 'SCHEMA', binds).rows.first[0].to_i > 0 SELECT COUNT(*) FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind in ('v','r') AND c.relname = $1 AND n.nspname = #{schema ? '$2' : 'ANY (current_schemas(false))'} SQL end # Returns true if schema exists. def schema_exists?(name) exec_query(<<-SQL, 'SCHEMA', [[nil, name]]).rows.first[0].to_i > 0 SELECT COUNT(*) FROM pg_namespace WHERE nspname = $1 SQL end # Returns an array of indexes for the given table. def indexes(table_name, name = nil) schemas = schema_search_path.split(/,/).map { |p| quote(p) }.join(',') result = query(<<-SQL, name) SELECT distinct i.relname, d.indisunique, d.indkey, t.oid FROM pg_class t INNER JOIN pg_index d ON t.oid = d.indrelid INNER JOIN pg_class i ON d.indexrelid = i.oid WHERE i.relkind = 'i' AND d.indisprimary = 'f' AND t.relname = '#{table_name}' AND i.relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname IN (#{schemas}) ) ORDER BY i.relname SQL result.map do |row| index_name = row[0] unique = row[1] == 't' indkey = row[2].split(" ") oid = row[3] columns = Hash[query(<<-SQL, "Columns for index #{row[0]} on #{table_name}")] SELECT a.attnum, a.attname FROM pg_attribute a WHERE a.attrelid = #{oid} AND a.attnum IN (#{indkey.join(",")}) SQL column_names = columns.values_at(*indkey).compact column_names.empty? ? nil : IndexDefinition.new(table_name, index_name, unique, column_names) end.compact end # Returns the list of all column definitions for a table. def columns(table_name, name = nil) # Limit, precision, and scale are all handled by the superclass. column_definitions(table_name).collect do |column_name, type, default, notnull| PostgreSQLColumn.new(column_name, default, type, notnull == 'f') end end # Returns the current database name. def current_database query('select current_database()')[0][0] end # Returns the current schema name. def current_schema query('SELECT current_schema', 'SCHEMA')[0][0] end # Returns the current database encoding format. def encoding query(<<-end_sql)[0][0] SELECT pg_encoding_to_char(pg_database.encoding) FROM pg_database WHERE pg_database.datname LIKE '#{current_database}' end_sql end # Sets the schema search path to a string of comma-separated schema names. # Names beginning with $ have to be quoted (e.g. $user => '$user'). # See: http://www.postgresql.org/docs/current/static/ddl-schemas.html # # This should be not be called manually but set in database.yml. def schema_search_path=(schema_csv) if schema_csv execute "SET search_path TO #{schema_csv}" @schema_search_path = schema_csv end end # Returns the active schema search path. def schema_search_path @schema_search_path ||= query('SHOW search_path')[0][0] end # Returns the current client message level. def client_min_messages query('SHOW client_min_messages', 'SCHEMA')[0][0] end # Set the client message level. def client_min_messages=(level) execute("SET client_min_messages TO '#{level}'", 'SCHEMA') end # Returns the sequence name for a table's primary key or some other specified key. def default_sequence_name(table_name, pk = nil) #:nodoc: serial_sequence(table_name, pk || 'id').split('.').last rescue ActiveRecord::StatementInvalid "#{table_name}_#{pk || 'id'}_seq" end def serial_sequence(table, column) result = exec_query(<<-eosql, 'SCHEMA', [[nil, table], [nil, column]]) SELECT pg_get_serial_sequence($1, $2) eosql result.rows.first.first end # Resets the sequence of a table's primary key to the maximum value. def reset_pk_sequence!(table, pk = nil, sequence = nil) #:nodoc: unless pk and sequence default_pk, default_sequence = pk_and_sequence_for(table) pk ||= default_pk sequence ||= default_sequence end if @logger && pk && !sequence @logger.warn "#{table} has primary key #{pk} with no default sequence" end if pk && sequence quoted_sequence = quote_table_name(sequence) select_value <<-end_sql, 'Reset sequence' SELECT setval('#{quoted_sequence}', (SELECT COALESCE(MAX(#{quote_column_name pk})+(SELECT increment_by FROM #{quoted_sequence}), (SELECT min_value FROM #{quoted_sequence})) FROM #{quote_table_name(table)}), false) end_sql end end # Returns a table's primary key and belonging sequence. def pk_and_sequence_for(table) #:nodoc: # First try looking for a sequence with a dependency on the # given table's primary key. result = exec_query(<<-end_sql, 'SCHEMA').rows.first SELECT attr.attname, ns.nspname, seq.relname FROM pg_class seq INNER JOIN pg_depend dep ON seq.oid = dep.objid INNER JOIN pg_attribute attr ON attr.attrelid = dep.refobjid AND attr.attnum = dep.refobjsubid INNER JOIN pg_constraint cons ON attr.attrelid = cons.conrelid AND attr.attnum = cons.conkey[1] INNER JOIN pg_namespace ns ON seq.relnamespace = ns.oid WHERE seq.relkind = 'S' AND cons.contype = 'p' AND dep.refobjid = '#{quote_table_name(table)}'::regclass end_sql # [primary_key, sequence] if result.second == 'public' then sequence = result.last else sequence = result.second+'.'+result.last end [result.first, sequence] rescue nil end # Returns just a table's primary key def primary_key(table) row = exec_query(<<-end_sql, 'SCHEMA', [[nil, table]]).rows.first SELECT DISTINCT(attr.attname) FROM pg_attribute attr INNER JOIN pg_depend dep ON attr.attrelid = dep.refobjid AND attr.attnum = dep.refobjsubid INNER JOIN pg_constraint cons ON attr.attrelid = cons.conrelid AND attr.attnum = cons.conkey[1] WHERE cons.contype = 'p' AND dep.refobjid = $1::regclass end_sql row && row.first end # Renames a table. # # Example: # rename_table('octopuses', 'octopi') def rename_table(name, new_name) execute "ALTER TABLE #{quote_table_name(name)} RENAME TO #{quote_table_name(new_name)}" end # Adds a new column to the named table. # See TableDefinition#column for details of the options you can use. def add_column(table_name, column_name, type, options = {}) add_column_sql = "ALTER TABLE #{quote_table_name(table_name)} ADD COLUMN #{quote_column_name(column_name)} #{type_to_sql(type, options[:limit], options[:precision], options[:scale])}" add_column_options!(add_column_sql, options) execute add_column_sql end # Changes the column of a table. def change_column(table_name, column_name, type, options = {}) quoted_table_name = quote_table_name(table_name) execute "ALTER TABLE #{quoted_table_name} ALTER COLUMN #{quote_column_name(column_name)} TYPE #{type_to_sql(type, options[:limit], options[:precision], options[:scale])}" change_column_default(table_name, column_name, options[:default]) if options_include_default?(options) change_column_null(table_name, column_name, options[:null], options[:default]) if options.key?(:null) end # Changes the default value of a table column. def change_column_default(table_name, column_name, default) execute "ALTER TABLE #{quote_table_name(table_name)} ALTER COLUMN #{quote_column_name(column_name)} SET DEFAULT #{quote(default)}" end def change_column_null(table_name, column_name, null, default = nil) unless null || default.nil? execute("UPDATE #{quote_table_name(table_name)} SET #{quote_column_name(column_name)}=#{quote(default)} WHERE #{quote_column_name(column_name)} IS NULL") end execute("ALTER TABLE #{quote_table_name(table_name)} ALTER #{quote_column_name(column_name)} #{null ? 'DROP' : 'SET'} NOT NULL") end # Renames a column in a table. def rename_column(table_name, column_name, new_column_name) execute "ALTER TABLE #{quote_table_name(table_name)} RENAME COLUMN #{quote_column_name(column_name)} TO #{quote_column_name(new_column_name)}" end def remove_index!(table_name, index_name) #:nodoc: execute "DROP INDEX #{quote_table_name(index_name)}" end def rename_index(table_name, old_name, new_name) execute "ALTER INDEX #{quote_column_name(old_name)} RENAME TO #{quote_table_name(new_name)}" end def index_name_length 63 end # Maps logical Rails types to PostgreSQL-specific data types. def type_to_sql(type, limit = nil, precision = nil, scale = nil) return super unless type.to_s == 'integer' return 'integer' unless limit case limit when 1, 2; 'smallint' when 3, 4; 'integer' when 5..8; 'bigint' else raise(ActiveRecordError, "No integer type has byte size #{limit}. Use a numeric with precision 0 instead.") end end # Returns a SELECT DISTINCT clause for a given set of columns and a given ORDER BY clause. # # PostgreSQL requires the ORDER BY columns in the select list for distinct queries, and # requires that the ORDER BY include the distinct column. # # distinct("posts.id", "posts.created_at desc") def distinct(columns, orders) #:nodoc: return "DISTINCT #{columns}" if orders.empty? # Construct a clean list of column names from the ORDER BY clause, removing # any ASC/DESC modifiers order_columns = orders.collect { |s| s.gsub(/\s+(ASC|DESC)\s*/i, '') } order_columns.delete_if { |c| c.blank? } order_columns = order_columns.zip((0...order_columns.size).to_a).map { |s,i| "#{s} AS alias_#{i}" } "DISTINCT #{columns}, #{order_columns * ', '}" end module Utils # Returns an array of <tt>[schema_name, table_name]</tt> extracted from +name+. # +schema_name+ is nil if not specified in +name+. # +schema_name+ and +table_name+ exclude surrounding quotes (regardless of whether provided in +name+) # +name+ supports the range of schema/table references understood by PostgreSQL, for example: # # * <tt>table_name</tt> # * <tt>"table.name"</tt> # * <tt>schema_name.table_name</tt> # * <tt>schema_name."table.name"</tt> # * <tt>"schema.name"."table name"</tt> def self.extract_schema_and_table(name) table, schema = name.scan(/[^".\s]+|"[^"]*"/)[0..1].collect{|m| m.gsub(/(^"|"$)/,'') }.reverse [schema, table] end end protected # Returns the version of the connected PostgreSQL server. def postgresql_version @connection.server_version end def translate_exception(exception, message) case exception.message when /duplicate key value violates unique constraint/ RecordNotUnique.new(message, exception) when /violates foreign key constraint/ InvalidForeignKey.new(message, exception) else super end end private def exec_no_cache(sql, binds) @connection.async_exec(sql) end def exec_cache(sql, binds) unless @statements.key? sql nextkey = "a#{@statements.length + 1}" @connection.prepare nextkey, sql @statements[sql] = nextkey end key = @statements[sql] # Clear the queue @connection.get_last_result @connection.send_query_prepared(key, binds.map { |col, val| type_cast(val, col) }) @connection.block @connection.get_last_result end # The internal PostgreSQL identifier of the money data type. MONEY_COLUMN_TYPE_OID = 790 #:nodoc: # The internal PostgreSQL identifier of the BYTEA data type. BYTEA_COLUMN_TYPE_OID = 17 #:nodoc: # Connects to a PostgreSQL server and sets up the adapter depending on the # connected server's characteristics. def connect @connection = PGconn.connect(*@connection_parameters) # Money type has a fixed precision of 10 in PostgreSQL 8.2 and below, and as of # PostgreSQL 8.3 it has a fixed precision of 19. PostgreSQLColumn.extract_precision # should know about this but can't detect it there, so deal with it here. PostgreSQLColumn.money_precision = (postgresql_version >= 80300) ? 19 : 10 configure_connection end # Configures the encoding, verbosity, schema search path, and time zone of the connection. # This is called by #connect and should not be called manually. def configure_connection if @config[:encoding] @connection.set_client_encoding(@config[:encoding]) end self.client_min_messages = @config[:min_messages] if @config[:min_messages] self.schema_search_path = @config[:schema_search_path] || @config[:schema_order] # Use standard-conforming strings if available so we don't have to do the E'...' dance. set_standard_conforming_strings # If using Active Record's time zone support configure the connection to return # TIMESTAMP WITH ZONE types in UTC. if ActiveRecord::Base.default_timezone == :utc execute("SET time zone 'UTC'", 'SCHEMA') elsif @local_tz execute("SET time zone '#{@local_tz}'", 'SCHEMA') end end # Returns the current ID of a table's sequence. def last_insert_id(sequence_name) #:nodoc: r = exec_query("SELECT currval($1)", 'SQL', [[nil, sequence_name]]) Integer(r.rows.first.first) end # Executes a SELECT query and returns the results, performing any data type # conversions that are required to be performed here instead of in PostgreSQLColumn. def select(sql, name = nil, binds = []) exec_query(sql, name, binds).to_a end def select_raw(sql, name = nil) res = execute(sql, name) results = result_as_array(res) fields = res.fields res.clear return fields, results end # Returns the list of a table's column names, data types, and default values. # # The underlying query is roughly: # SELECT column.name, column.type, default.value # FROM column LEFT JOIN default # ON column.table_id = default.table_id # AND column.num = default.column_num # WHERE column.table_id = get_table_id('table_name') # AND column.num > 0 # AND NOT column.is_dropped # ORDER BY column.num # # If the table name is not prefixed with a schema, the database will # take the first match from the schema search path. # # Query implementation notes: # - format_type includes the column size constraint, e.g. varchar(50) # - ::regclass is a function that gives the id for a table name def column_definitions(table_name) #:nodoc: exec_query(<<-end_sql, 'SCHEMA').rows SELECT a.attname, format_type(a.atttypid, a.atttypmod), d.adsrc, a.attnotnull FROM pg_attribute a LEFT JOIN pg_attrdef d ON a.attrelid = d.adrelid AND a.attnum = d.adnum WHERE a.attrelid = '#{quote_table_name(table_name)}'::regclass AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum end_sql end def extract_pg_identifier_from_name(name) match_data = name.start_with?('"') ? name.match(/\"([^\"]+)\"/) : name.match(/([^\.]+)/) if match_data rest = name[match_data[0].length, name.length] rest = rest[1, rest.length] if rest.start_with? "." [match_data[1], (rest.length > 0 ? rest : nil)] end end def extract_table_ref_from_insert_sql(sql) sql[/into\s+([^\(]*).*values\s*\(/i] $1.strip if $1 end def table_definition TableDefinition.new(self) end end end end
cli sample for sheets api # Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'google/apis/sheets_v4' require 'base_cli' module Samples # Examples for the Google Sheets API # # Sample usage: # # $ ./google-api-samples sheets get_values --spreadsheet_key='1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms' --range='Class Data!A1:F' # # Student Name Gender Class Level Home State Major Extracurricular Activity # Alexandra Female 4. Senior CA English Drama Club # Andrew Male 1. Freshman SD Math Lacrosse # Anna Female 1. Freshman NC English Basketball # Becky Female 2. Sophomore SD Art Baseball # Benjamin Male 4. Senior WI English Basketball # Carl Male 3. Junior MD Art Debate # Carrie Female 3. Junior NE English Track & Field # Dorothy Female 4. Senior MD Math Lacrosse # Dylan Male 1. Freshman MA Math Baseball # Edward Male 3. Junior FL English Drama Club # Ellen Female 1. Freshman WI Physics Drama Club # Fiona Female 1. Freshman MA Art Debate # John Male 3. Junior CA Physics Basketball # Jonathan Male 2. Sophomore SC Math Debate class Sheets < BaseCli Sheets = Google::Apis::SheetsV4 desc 'get_values', 'Get values from Google Spreadseets ' method_option :spreadsheet_key, type: :string, required: true method_option :range, type: :string, required: true def get_values sheets = Sheets::SheetsService.new sheets.authorization = user_credentials_for(Sheets::AUTH_SPREADSHEETS_READONLY) result = sheets.get_spreadsheet_values(options[:spreadsheet_key], options[:range]) puts result.inspect data = [] rows = result.values.length rows.times.each do |k| data.push(result.values[k]) end print_table(data) end default_task :get_vlaues end end
module FFI::Platform::POSIX #-- # Internal class for accessing timevals #++ class TimeVal < FFI::Struct config 'rbx.platform.timeval', :tv_sec, :tv_usec end end class File < IO include Enumerable class FileError < Exception; end class NoFileError < FileError; end class UnableToStat < FileError; end class PermissionError < FileError; end module Constants F_GETFL = Rubinius::Config['rbx.platform.fcntl.F_GETFL'] F_SETFL = Rubinius::Config['rbx.platform.fcntl.F_SETFL'] # O_ACCMODE is /undocumented/ for fcntl() on some platforms ACCMODE = Rubinius::Config['rbx.platform.fcntl.O_ACCMODE'] RDONLY = Rubinius::Config['rbx.platform.file.O_RDONLY'] WRONLY = Rubinius::Config['rbx.platform.file.O_WRONLY'] RDWR = Rubinius::Config['rbx.platform.file.O_RDWR'] CREAT = Rubinius::Config['rbx.platform.file.O_CREAT'] EXCL = Rubinius::Config['rbx.platform.file.O_EXCL'] NOCTTY = Rubinius::Config['rbx.platform.file.O_NOCTTY'] TRUNC = Rubinius::Config['rbx.platform.file.O_TRUNC'] APPEND = Rubinius::Config['rbx.platform.file.O_APPEND'] NONBLOCK = Rubinius::Config['rbx.platform.file.O_NONBLOCK'] SYNC = Rubinius::Config['rbx.platform.file.O_SYNC'] # TODO: these flags should probably be imported from Platform LOCK_SH = 0x01 LOCK_EX = 0x02 LOCK_NB = 0x04 LOCK_UN = 0x08 BINARY = 0x04 # TODO: "OK" constants aren't in File::Constants in MRI F_OK = 0 # test for existence of file X_OK = 1 # test for execute or search permission W_OK = 2 # test for write permission R_OK = 4 # test for read permission FNM_NOESCAPE = 0x01 FNM_PATHNAME = 0x02 FNM_DOTMATCH = 0x04 FNM_CASEFOLD = 0x08 end SEPARATOR = FFI::Platform::File::SEPARATOR Separator = FFI::Platform::File::SEPARATOR ALT_SEPARATOR = FFI::Platform::File::ALT_SEPARATOR PATH_SEPARATOR = FFI::Platform::File::PATH_SEPARATOR POSIX = FFI::Platform::POSIX attr_reader :path def initialize(path_or_fd, mode = "r", perm = 0666) if path_or_fd.kind_of? Integer super(path_or_fd, mode) @path = nil else path = StringValue(path_or_fd) fd = IO.sysopen(path, mode, perm) if fd < 0 begin Errno.handle path rescue Errno::EMFILE # true means force to run, don't ignore it. GC.run(true) fd = IO.sysopen(path, mode, perm) Errno.handle if fd < 0 end end @path = path super(fd) end end private :initialize # The mode_t type is 2 bytes (ushort). Instead of getting whatever # value happens to be in the least significant 16 bits, just set # the value to 0 if it is greater than 0xffff. Also, negative values # don't make any sense here. def clamp_short(value) mode = Type.coerce_to value, Integer, :to_int mode < 0 || mode > 0xffff ? 0 : mode end module_function :clamp_short ## # Returns the last access time for the named file as a Time object). # # File.atime("testfile") #=> Wed Apr 09 08:51:48 CDT 2003 def self.atime(path) Stat.new(path).atime end ## # Returns the last component of the filename given # in file_name, which must be formed using forward # slashes (``/’’) regardless of the separator used # on the local file system. If suffix is given and # present at the end of file_name, it is removed. # # File.basename("/home/gumby/work/ruby.rb") #=> "ruby.rb" # File.basename("/home/gumby/work/ruby.rb", ".rb") #=> "ruby" def self.basename(path,ext=undefined) path = StringValue(path) slash = "/" ext_not_present = ext.equal?(undefined) if pos = path.find_string_reverse(slash, path.size) # special case. If the string ends with a /, ignore it. if pos == path.size - 1 # Find the first non-/ from the right data = path.data found = false pos.downto(0) do |i| if data[i] != ?/ path = path.substring(0, i+1) found = true break end end # edge case, it's all /'s, return "/" return slash unless found # Now that we've trimmed the /'s at the end, search again pos = path.find_string_reverse(slash, path.size) if ext_not_present and !pos # No /'s found and ext not present, return path. return path end end path = path.substring(pos + 1, path.size - pos) if pos end return path if ext_not_present # special case. if ext is ".*", remove any extension ext = StringValue(ext) if ext == ".*" if pos = path.rindex(?.) return path.substring(0, pos) end elsif pos = path.rindex(ext) # Check that ext is the last thing in the string if pos == path.size - ext.size return path.substring(0, pos) end end return path end ## # Returns true if the named file is a block device. def self.blockdev?(path) st = Stat.stat path st ? st.blockdev? : false end ## # Returns true if the named file is a character device. def self.chardev?(path) st = Stat.stat path st ? st.chardev? : false end ## # Changes permission bits on the named file(s) to # the bit pattern represented by mode_int. Actual # effects are operating system dependent (see the # beginning of this section). On Unix systems, see # chmod(2) for details. Returns the number of files processed. # # File.chmod(0644, "testfile", "out") #=> 2 def self.chmod(mode, *paths) mode = clamp_short mode paths.each do |path| path = Type.coerce_to(path, String, :to_str) unless path.is_a? String POSIX.chmod(path, mode) end paths.size end ## # Equivalent to File::chmod, but does not follow symbolic # links (so it will change the permissions associated with # the link, not the file referenced by the link). # Often not available. def self.lchmod(mode, *paths) mode = Type.coerce_to(mode, Integer, :to_int) unless mode.is_a? Integer paths.each do |path| path = Type.coerce_to(path, String, :to_str) unless path.is_a? String POSIX.lchmod(path, mode) end paths.size end ## # Changes the owner and group of the # named file(s) to the given numeric owner # and group id‘s. Only a process with superuser # privileges may change the owner of a file. The # current owner of a file may change the file‘s # group to any group to which the owner belongs. # A nil or -1 owner or group id is ignored. # Returns the number of files processed. # # File.chown(nil, 100, "testfile") def self.chown(owner_int, group_int, *paths) owner_int = -1 if owner_int == nil group_int = -1 if group_int == nil paths.each { |path| POSIX.chown(path, owner_int, group_int) } paths.size end ## # Equivalent to File::chown, but does not follow # symbolic links (so it will change the owner # associated with the link, not the file referenced # by the link). Often not available. Returns number # of files in the argument list. def self.lchown(owner_int, group_int, *paths) owner_int = -1 if owner_int == nil group_int = -1 if group_int == nil paths.each { |path| POSIX.lchown(path, owner_int, group_int) } paths.size end ## # Returns the change time for the named file (the # time at which directory information about the # file was changed, not the file itself). # # File.ctime("testfile") #=> Wed Apr 09 08:53:13 CDT 2003 def self.ctime(path) Stat.new(path).ctime end ## # Returns true if the named file is a directory, false otherwise. # # File.directory?(".") def self.directory?(path) st = Stat.stat path st ? st.directory? : false end def self.last_nonslash(path,start=nil) # Find the first non-/ from the right data = path.data idx = nil start ||= (path.size - 1) start.downto(0) do |i| if data[i] != ?/ return i end end return nil end ## # Returns all components of the filename given in # file_name except the last one. The filename must be # formed using forward slashes (``/’’) regardless of # the separator used on the local file system. # # File.dirname("/home/gumby/work/ruby.rb") #=> "/home/gumby/work" def self.dirname(path) path = StringValue(path) # edge case return "." if path.empty? slash = "/" # pull off any /'s at the end to ignore chunk_size = last_nonslash(path) return "/" unless chunk_size if pos = path.find_string_reverse(slash, chunk_size) return "/" if pos == 0 path = path.substring(0, pos) return "/" if path == "/" return path unless path.suffix? slash # prune any trailing /'s idx = last_nonslash(path, pos) # edge case, only /'s, return / return "/" unless idx return path.substring(0, idx - 1) end return "." end ## # Returns true if the named file is executable by the # effective user id of this process. def self.executable?(path) st = Stat.stat path st ? st.executable? : false end ## # Returns true if the named file is executable by # the real user id of this process. def self.executable_real?(path) st = Stat.stat path st ? st.executable_real? : false end ## # Return true if the named file exists. def self.exist?(path) path = StringValue path POSIX.stat(path, Stat::EXISTS_STRUCT.pointer) == 0 ? true : false end ## # Converts a pathname to an absolute pathname. Relative # paths are referenced from the current working directory # of the process unless dir_string is given, in which case # it will be used as the starting point. The given pathname # may start with a ``~’’, which expands to the process owner‘s # home directory (the environment variable HOME must be set # correctly). "~user" expands to the named user‘s home directory. # # File.expand_path("~oracle/bin") #=> "/home/oracle/bin" # File.expand_path("../../bin", "/tmp/x") #=> "/bin" def self.expand_path(path, dir=nil) path = StringValue(path) first = path[0] if first == ?~ case path[1] when ?/ path = ENV["HOME"] + path.substring(1, path.size - 1) when nil return ENV["HOME"] else unless length = path.index("/", 1) length = path.size end name = path.substring 1, length - 1 unless dir = Rubinius.get_user_home(name) raise ArgumentError, "user #{name} does not exist" end path = dir + path.substring(length, path.size - length) end elsif first != ?/ if dir dir = File.expand_path dir else dir = Dir.pwd end path = "#{dir}/#{path}" end items = [] start = 0 size = path.size while index = path.index("/", start) or (start < size and index = size) length = index - start if length > 0 item = path.substring start, length if item == ".." items.pop elsif item != "." items << item end end start = index + 1 end return "/" if items.empty? str = "" iter = items.to_iter while iter.next str.append "/#{iter.item}" end return str end ## # Returns the extension (the portion of file name in # path after the period). # # File.extname("test.rb") #=> ".rb" # File.extname("a/b/d/test.rb") #=> ".rb" # File.extname("test") #=> "" # File.extname(".profile") #=> "" def self.extname(path) path = StringValue(path) path_size = path.size dot_idx = path.find_string_reverse(".", path_size) # No dots at all return "" unless dot_idx slash_idx = path.find_string_reverse("/", path_size) # pretend there is / just to the left of the start of the string slash_idx ||= -1 # no . in the last component of the path return "" if dot_idx < slash_idx # last component starts with a . return "" if dot_idx == slash_idx + 1 # last component ends with a . return "" if dot_idx == path_size - 1 return path.substring(dot_idx, path_size - dot_idx) end ## # Returns true if the named file exists and is a regular file. def self.file?(path) st = Stat.stat path st ? st.file? : false end ## # Returns true if path matches against pattern The pattern # is not a regular expression; instead it follows rules # similar to shell filename globbing. It may contain the # following metacharacters: # # *: Matches any file. Can be restricted by other values in the glob. * will match all files; c* will match all files beginning with c; *c will match all files ending with c; and c will match all files that have c in them (including at the beginning or end). Equivalent to / .* /x in regexp. # **: Matches directories recursively or files expansively. # ?: Matches any one character. Equivalent to /.{1}/ in regexp. # [set]: Matches any one character in set. Behaves exactly like character sets in Regexp, including set negation ([^a-z]). # <code></code>: Escapes the next metacharacter. # flags is a bitwise OR of the FNM_xxx parameters. The same glob pattern and flags are used by Dir::glob. # # File.fnmatch('cat', 'cat') #=> true : match entire string # File.fnmatch('cat', 'category') #=> false : only match partial string # File.fnmatch('c{at,ub}s', 'cats') #=> false : { } isn't supported # # File.fnmatch('c?t', 'cat') #=> true : '?' match only 1 character # File.fnmatch('c??t', 'cat') #=> false : ditto # File.fnmatch('c*', 'cats') #=> true : '*' match 0 or more characters # File.fnmatch('c*t', 'c/a/b/t') #=> true : ditto # File.fnmatch('ca[a-z]', 'cat') #=> true : inclusive bracket expression # File.fnmatch('ca[^t]', 'cat') #=> false : exclusive bracket expression ('^' or '!') # # File.fnmatch('cat', 'CAT') #=> false : case sensitive # File.fnmatch('cat', 'CAT', File::FNM_CASEFOLD) #=> true : case insensitive # # File.fnmatch('?', '/', File::FNM_PATHNAME) #=> false : wildcard doesn't match '/' on FNM_PATHNAME # File.fnmatch('*', '/', File::FNM_PATHNAME) #=> false : ditto # File.fnmatch('[/]', '/', File::FNM_PATHNAME) #=> false : ditto # # File.fnmatch('\?', '?') #=> true : escaped wildcard becomes ordinary # File.fnmatch('\a', 'a') #=> true : escaped ordinary remains ordinary # File.fnmatch('\a', '\a', File::FNM_NOESCAPE) #=> true : FNM_NOESACPE makes '\' ordinary # File.fnmatch('[\?]', '?') #=> true : can escape inside bracket expression # # File.fnmatch('*', '.profile') #=> false : wildcard doesn't match leading # File.fnmatch('*', '.profile', File::FNM_DOTMATCH) #=> true period by default. # File.fnmatch('.*', '.profile') #=> true # # rbfiles = '**' '/' '*.rb' # you don't have to do like this. just write in single string. # File.fnmatch(rbfiles, 'main.rb') #=> false # File.fnmatch(rbfiles, './main.rb') #=> false # File.fnmatch(rbfiles, 'lib/song.rb') #=> true # File.fnmatch('**.rb', 'main.rb') #=> true # File.fnmatch('**.rb', './main.rb') #=> false # File.fnmatch('**.rb', 'lib/song.rb') #=> true # File.fnmatch('*', 'dave/.profile') #=> true # # pattern = '*' '/' '*' # File.fnmatch(pattern, 'dave/.profile', File::FNM_PATHNAME) #=> false # File.fnmatch(pattern, 'dave/.profile', File::FNM_PATHNAME | File::FNM_DOTMATCH) #=> true # # pattern = '**' '/' 'foo' # File.fnmatch(pattern, 'a/b/c/foo', File::FNM_PATHNAME) #=> true # File.fnmatch(pattern, '/a/b/c/foo', File::FNM_PATHNAME) #=> true # File.fnmatch(pattern, 'c:/a/b/c/foo', File::FNM_PATHNAME) #=> true # File.fnmatch(pattern, 'a/.b/c/foo', File::FNM_PATHNAME) #=> false # File.fnmatch(pattern, 'a/.b/c/foo', File::FNM_PATHNAME | File::FNM_DOTMATCH) #=> true def self.fnmatch(pattern, path, flags=0) pattern = StringValue(pattern) path = StringValue(path) flags = Type.coerce_to(flags, Fixnum, :to_int) super pattern, path, flags end ## # Identifies the type of the named file; the return string is # one of "file", "directory", "characterSpecial", # "blockSpecial", "fifo", "link", "socket", or "unknown". # # File.ftype("testfile") #=> "file" # File.ftype("/dev/tty") #=> "characterSpecial" # File.ftype("/tmp/.X11-unix/X0") #=> "socket" def self.ftype(path) lstat(path).ftype end ## # Returns true if the named file exists and the effective # group id of the calling process is the owner of the file. # Returns false on Windows. def self.grpowned?(path) begin lstat(path).grpowned? rescue false end end ## # Returns true if the named files are identical. # # open("a", "w") {} # p File.identical?("a", "a") #=> true # p File.identical?("a", "./a") #=> true # File.link("a", "b") # p File.identical?("a", "b") #=> true # File.symlink("a", "c") # p File.identical?("a", "c") #=> true # open("d", "w") {} # p File.identical?("a", "d") #=> false def self.identical?(orig, copy) st_o = stat(StringValue(orig)) st_c = stat(StringValue(copy)) return false unless st_o.ino == st_c.ino return false unless st_o.ftype == st_c.ftype return false unless POSIX.access(orig, Constants::R_OK) return false unless POSIX.access(copy, Constants::R_OK) true end ## # Returns a new string formed by joining the strings using File::SEPARATOR. # # File.join("usr", "mail", "gumby") #=> "usr/mail/gumby" def self.join(*args) return '' if args.empty? sep = SEPARATOR # The first one is unrolled out of the loop to remove a condition # from the loop. It seems needless, but you'd be surprised how much hinges # on the performance of File.join # first = args.shift case first when String first = first.dup when Array recursion = Thread.detect_recursion(first) do first = join(*first) end raise ArgumentError, "recursive array" if recursion else # We need to use dup here, since it's possible that # StringValue gives us a direct object we shouldn't mutate first = StringValue(first).dup end ret = first args.each do |el| value = nil case el when String value = el when Array recursion = Thread.detect_recursion(el) do value = join(*el) end raise ArgumentError, "recursive array" if recursion else value = StringValue(el) end if value.prefix? sep ret.gsub!(/#{SEPARATOR}+$/, '') elsif not ret.suffix? sep ret << sep end ret << value end ret end ## # Creates a new name for an existing file using a hard link. # Will not overwrite new_name if it already exists (raising # a subclass of SystemCallError). Not available on all platforms. # # File.link("testfile", ".testfile") #=> 0 # IO.readlines(".testfile")[0] #=> "This is line one\n" def self.link(from, to) to = StringValue(to) from = StringValue(from) n = POSIX.link(from, to) Errno.handle if n == -1 n end ## # Same as File::stat, but does not follow the last symbolic link. # Instead, reports on the link itself. # # File.symlink("testfile", "link2test") #=> 0 # File.stat("testfile").size #=> 66 # File.lstat("link2test").size #=> 8 # File.stat("link2test").size #=> 66 def self.lstat(path) Stat.lstat path end ## # Returns the modification time for the named file as a Time object. # # File.mtime("testfile") #=> Tue Apr 08 12:58:04 CDT 2003 def self.mtime(path) Stat.new(path).mtime end ## # Returns true if the named file is a pipe. def self.pipe?(path) st = Stat.stat path st ? st.pipe? : false end ## # Returns true if the named file is readable by the effective # user id of this process. def self.readable?(path) st = Stat.stat path st ? st.readable? : false end ## # Returns true if the named file is readable by the real user # id of this process. def self.readable_real?(path) st = Stat.stat path st ? st.readable_real? : false end ## # Returns the name of the file referenced by the given link. # Not available on all platforms. # # File.symlink("testfile", "link2test") #=> 0 # File.readlink("link2test") #=> "testfile" def self.readlink(path) StringValue(path) FFI::MemoryPointer.new(1024) do |ptr| n = POSIX.readlink(path, ptr, 1024) Errno.handle if n == -1 return ptr.read_string(n) end end ## # Renames the given file to the new name. Raises a SystemCallError # if the file cannot be renamed. # # File.rename("afile", "afile.bak") #=> 0 def self.rename(from, to) to = StringValue(to) from = StringValue(from) n = POSIX.rename(from, to) Errno.handle if n == -1 n end ## # Returns the size of file_name. def self.size(path) if path.is_a? File Stat.from_fd(path.fileno).size else stat(path).size end end ## # Returns nil if file_name doesn‘t exist or has zero size, # the size of the file otherwise. def self.size?(path_or_file) s = 0 if path_or_file.is_a? File s = Stat.from_fd(path_or_file.fileno).size else st = Stat.stat path_or_file s = st.size if st end s > 0 ? s : nil end ## # Returns true if the named file is a socket. def self.socket?(path) st = Stat.stat path st ? st.socket? : false end ## # Splits the given string into a directory and a file component and returns them in a two-element array. See also File::dirname and File::basename. # # File.split("/home/gumby/.profile") #=> ["/home/gumby", ".profile"] def self.split(path) p = StringValue(path) [dirname(p), basename(p)] end ## # Returns a File::Stat object for the named file (see File::Stat). # # File.stat("testfile").mtime #=> Tue Apr 08 12:58:04 CDT 2003 def self.stat(path) Stat.new path end ## # Creates a symbolic link called new_name for the # existing file old_name. Raises a NotImplemented # exception on platforms that do not support symbolic links. # # File.symlink("testfile", "link2test") #=> 0 def self.symlink(from, to) to = StringValue(to) from = StringValue(from) n = POSIX.symlink(from, to) Errno.handle if n == -1 n end ## # Returns true if the named file is a symbolic link. def self.symlink?(path) Stat.lstat(path).symlink? rescue Errno::ENOENT, Errno::ENODIR false end ## # Copies a file from to to. If to is a directory, copies from to to/from. def self.syscopy(from, to) out = File.directory?(to) ? to + File.basename(from) : to open(out, 'w') do |f| f.write read(from).read end end ## # Truncates the file file_name to be at most integer # bytes long. Not available on all platforms. # # f = File.new("out", "w") # f.write("1234567890") #=> 10 # f.close #=> nil # File.truncate("out", 5) #=> 0 # File.size("out") #=> 5 def self.truncate(path, length) unless self.exist?(path) raise Errno::ENOENT, path end unless length.respond_to?(:to_int) raise TypeError, "can't convert #{length.class} into Integer" end n = POSIX.truncate(path, length) Errno.handle if n == -1 n end ## # Returns the current umask value for this process. # If the optional argument is given, set the umask # to that value and return the previous value. Umask # values are subtracted from the default permissions, # so a umask of 0222 would make a file read-only for # everyone. # # File.umask(0006) #=> 18 # File.umask #=> 6 def self.umask(mask = nil) if mask POSIX.umask(clamp_short(mask)) else old_mask = POSIX.umask(0) POSIX.umask(old_mask) old_mask end end ## # Deletes the named files, returning the number of names # passed as arguments. Raises an exception on any error. # # See also Dir::rmdir. def self.unlink(*paths) paths.each do |path| path = StringValue(path) n = POSIX.unlink(path) Errno.handle if n == -1 end paths.size end ## # Sets the access and modification times of each named # file to the first two arguments. Returns the number # of file names in the argument list. # #=> Integer def self.utime(a_in, m_in, *paths) FFI::MemoryPointer.new(POSIX::TimeVal, 2) do |ptr| atime = POSIX::TimeVal.new ptr mtime = POSIX::TimeVal.new ptr[1] atime[:tv_sec] = a_in.to_i atime[:tv_usec] = 0 mtime[:tv_sec] = m_in.to_i mtime[:tv_usec] = 0 paths.each do |path| if POSIX.utimes(path, ptr) != 0 Errno.handle end end end end ## # Returns true if the named file is writable by the effective # user id of this process. def self.writable?(path) st = Stat.stat path st ? st.writable? : false end ## # Returns true if the named file is writable by the real user # id of this process. def self.writable_real?(path) st = Stat.stat path st ? st.writable_real? : false end ## # Returns true if the named file exists and has a zero size. def self.zero?(path) st = Stat.stat path st ? st.zero? : false end ## # Returns true if the named file exists and the effective # used id of the calling process is the owner of the file. # File.owned?(file_name) => true or false def self.owned?(file_name) Stat.new(file_name).owned? end ## # Returns true if the named file has the setgid bit set. def self.setgid?(file_name) Stat.new(file_name).setgid? rescue Errno::ENOENT return false end ## # Returns true if the named file has the setuid bit set. def self.setuid?(file_name) Stat.new(file_name).setuid? rescue Errno::ENOENT return false end ## # Returns true if the named file has the sticky bit set. def self.sticky?(file_name) Stat.new(file_name).sticky? rescue Errno::ENOENT return false end ## # Returns true if the named file exists and the effective # used id of the calling process is the owner of the file. def self.owned?(file_name) Stat.new(file_name).owned? end class << self alias_method :delete, :unlink alias_method :exists?, :exist? alias_method :fnmatch?, :fnmatch end def atime Stat.new(@path).atime end def reopen(other, mode = 'r+') rewind unless closed? super(other, mode) end def chmod(mode) POSIX.fchmod(@descriptor, clamp_short(mode)) end def chown(owner_int, group_int) POSIX.fchown(@descriptor, owner_int || -1, group_int || -1) end def ctime Stat.new(@path).ctime end def flock(locking_constant) result = POSIX.flock(@descriptor, locking_constant) return false if result == -1 result end def lstat Stat.lstat @path end def mtime Stat.new(@path).mtime end def stat Stat.from_fd @descriptor end def truncate(length) length = Type.coerce_to(length, Integer, :to_int) raise Errno::EINVAL, "Can't truncate a file to a negative length" if length < 0 raise IOError, "File is closed" if closed? n = POSIX.ftruncate(@descriptor, length) Errno.handle if n == -1 n end def inspect return_string = "#<#{self.class}:0x#{object_id.to_s(16)} path=#{@path}" return_string << " (closed)" if closed? return_string << ">" end end # File # Inject the constants into IO class IO include File::Constants end class File::Stat class Struct < FFI::Struct config "rbx.platform.stat", :st_dev, :st_ino, :st_mode, :st_nlink, :st_uid, :st_gid, :st_rdev, :st_size, :st_blksize, :st_blocks, :st_atime, :st_mtime, :st_ctime end EXISTS_STRUCT = Struct.new include Comparable S_IRUSR = Rubinius::Config['rbx.platform.file.S_IRUSR'] S_IWUSR = Rubinius::Config['rbx.platform.file.S_IWUSR'] S_IXUSR = Rubinius::Config['rbx.platform.file.S_IXUSR'] S_IRGRP = Rubinius::Config['rbx.platform.file.S_IRGRP'] S_IWGRP = Rubinius::Config['rbx.platform.file.S_IWGRP'] S_IXGRP = Rubinius::Config['rbx.platform.file.S_IXGRP'] S_IROTH = Rubinius::Config['rbx.platform.file.S_IROTH'] S_IWOTH = Rubinius::Config['rbx.platform.file.S_IWOTH'] S_IXOTH = Rubinius::Config['rbx.platform.file.S_IXOTH'] S_IFMT = Rubinius::Config['rbx.platform.file.S_IFMT'] S_IFIFO = Rubinius::Config['rbx.platform.file.S_IFIFO'] S_IFCHR = Rubinius::Config['rbx.platform.file.S_IFCHR'] S_IFDIR = Rubinius::Config['rbx.platform.file.S_IFDIR'] S_IFBLK = Rubinius::Config['rbx.platform.file.S_IFBLK'] S_IFREG = Rubinius::Config['rbx.platform.file.S_IFREG'] S_IFLNK = Rubinius::Config['rbx.platform.file.S_IFLNK'] S_IFSOCK = Rubinius::Config['rbx.platform.file.S_IFSOCK'] S_IFWHT = Rubinius::Config['rbx.platform.file.S_IFWHT'] S_ISUID = Rubinius::Config['rbx.platform.file.S_ISUID'] S_ISGID = Rubinius::Config['rbx.platform.file.S_ISGID'] S_ISVTX = Rubinius::Config['rbx.platform.file.S_ISVTX'] POSIX = FFI::Platform::POSIX attr_reader :path def self.create(path) path = StringValue path stat = allocate Rubinius.privately { stat.setup path, Struct.new } end def self.stat(path) stat = create path result = POSIX.stat stat.path, stat.pointer return nil unless result == 0 stat end # -- # Stat.lstat raises whereas Stat.stat does not because most things # that use Stat.stat do not expect exceptions but most things that # uses Stat.lstat do. # ++ def self.lstat(path) stat = create path result = POSIX.lstat stat.path, stat.pointer Errno.handle path unless result == 0 stat end ## # File::Stat#from_fd is used to support IO#stat which does not necessarily # have a path. def self.from_fd(descriptor) stat = allocate struct = Struct.new result = POSIX.fstat descriptor, struct.pointer Errno.handle "file descriptor #{descriptor}" unless result == 0 Rubinius.privately { stat.setup nil, struct } end def initialize(path) @path = StringValue path @stat = Struct.new result = POSIX.stat @path, @stat.pointer Errno.handle path unless result == 0 end private :initialize def setup(path, struct) @path = path @stat = struct self end private :setup def pointer @stat.pointer end def atime Time.at @stat[:st_atime] end def blksize @stat[:st_blksize] end def blocks @stat[:st_blocks] end def blockdev? @stat[:st_mode] & S_IFMT == S_IFBLK end def chardev? @stat[:st_mode] & S_IFMT == S_IFCHR end def ctime Time.at @stat[:st_ctime] end def dev @stat[:st_dev] end def dev_major major = POSIX.major @stat[:st_dev] major < 0 ? nil : major end def dev_minor minor = POSIX.major @stat[:st_dev] minor < 0 ? nil : minor end def directory? @stat[:st_mode] & S_IFMT == S_IFDIR end def executable? return true if superuser? return @stat[:st_mode] & S_IXUSR != 0 if owned? return @stat[:st_mode] & S_IXGRP != 0 if grpowned? return @stat[:st_mode] & S_IXOTH != 0 end def executable_real? return true if rsuperuser? return @stat[:st_mode] & S_IXUSR != 0 if rowned? return @stat[:st_mode] & S_IXGRP != 0 if rgrpowned? return @stat[:st_mode] & S_IXOTH != 0 end def file? @stat[:st_mode] & S_IFMT == S_IFREG end def ftype if file? "file" elsif directory? "directory" elsif chardev? "characterSpecial" elsif blockdev? "blockSpecial" elsif pipe? "fifo" elsif socket? "socket" elsif symlink? "link" else "unknown" end end def gid @stat[:st_gid] end def grpowned? @stat[:st_gid] == POSIX.getegid end def ino @stat[:st_ino] end def inspect "#<File::Stat dev=0x#{self.dev.to_s(16)}, ino=#{self.ino}, " \ "mode=#{sprintf("%07d", self.mode.to_s(8).to_i)}, nlink=#{self.nlink}, " \ "uid=#{self.uid}, gid=#{self.gid}, rdev=0x#{self.rdev.to_s(16)}, " \ "size=#{self.size}, blksize=#{self.blksize}, blocks=#{self.blocks}, " \ "atime=#{self.atime}, mtime=#{self.mtime}, ctime=#{self.ctime}>" end def nlink @stat[:st_nlink] end def mtime Time.at @stat[:st_mtime] end def mode @stat[:st_mode] end def owned? @stat[:st_uid] == POSIX.geteuid end def path @path end def pipe? @stat[:st_mode] & S_IFMT == S_IFIFO end def rdev @stat[:st_rdev] end def rdev_major major = POSIX.major @stat[:st_rdev] major < 0 ? nil : major end def rdev_minor minor = POSIX.minor @stat[:st_rdev] minor < 0 ? nil : minor end def readable? return true if superuser? return @stat[:st_mode] & S_IRUSR != 0 if owned? return @stat[:st_mode] & S_IRGRP != 0 if grpowned? return @stat[:st_mode] & S_IROTH != 0 end def readable_real? return true if rsuperuser? return @stat[:st_mode] & S_IRUSR != 0 if rowned? return @stat[:st_mode] & S_IRGRP != 0 if rgrpowned? return @stat[:st_mode] & S_IROTH != 0 end def setgid? @stat[:st_mode] & S_ISGID != 0 end def setuid? @stat[:st_mode] & S_ISUID != 0 end def sticky? @stat[:st_mode] & S_ISVTX != 0 end def size @stat[:st_size] end def size? size == 0 ? nil : size end def socket? @stat[:st_mode] & S_IFMT == S_IFSOCK end def symlink? @stat[:st_mode] & S_IFMT == S_IFLNK end def uid @stat[:st_uid] end def writable? return true if superuser? return @stat[:st_mode] & S_IWUSR != 0 if owned? return @stat[:st_mode] & S_IWGRP != 0 if grpowned? return @stat[:st_mode] & S_IWOTH != 0 end def writable_real? return true if rsuperuser? return @stat[:st_mode] & S_IWUSR != 0 if rowned? return @stat[:st_mode] & S_IWGRP != 0 if rgrpowned? return @stat[:st_mode] & S_IWOTH != 0 end def zero? @stat[:st_size] == 0 end def <=> (other) return nil unless other.is_a?(File::Stat) self.mtime <=> other.mtime end def rgrpowned? @stat[:st_gid] == POSIX.getgid end private :rgrpowned? def rowned? @stat[:st_uid] == POSIX.getuid end private :rowned? def rsuperuser? POSIX.getuid == 0 end private :rsuperuser? def superuser? POSIX.geteuid == 0 end private :superuser? end # File::Stat Fixed File.size and .size?. module FFI::Platform::POSIX #-- # Internal class for accessing timevals #++ class TimeVal < FFI::Struct config 'rbx.platform.timeval', :tv_sec, :tv_usec end end class File < IO include Enumerable class FileError < Exception; end class NoFileError < FileError; end class UnableToStat < FileError; end class PermissionError < FileError; end module Constants F_GETFL = Rubinius::Config['rbx.platform.fcntl.F_GETFL'] F_SETFL = Rubinius::Config['rbx.platform.fcntl.F_SETFL'] # O_ACCMODE is /undocumented/ for fcntl() on some platforms ACCMODE = Rubinius::Config['rbx.platform.fcntl.O_ACCMODE'] RDONLY = Rubinius::Config['rbx.platform.file.O_RDONLY'] WRONLY = Rubinius::Config['rbx.platform.file.O_WRONLY'] RDWR = Rubinius::Config['rbx.platform.file.O_RDWR'] CREAT = Rubinius::Config['rbx.platform.file.O_CREAT'] EXCL = Rubinius::Config['rbx.platform.file.O_EXCL'] NOCTTY = Rubinius::Config['rbx.platform.file.O_NOCTTY'] TRUNC = Rubinius::Config['rbx.platform.file.O_TRUNC'] APPEND = Rubinius::Config['rbx.platform.file.O_APPEND'] NONBLOCK = Rubinius::Config['rbx.platform.file.O_NONBLOCK'] SYNC = Rubinius::Config['rbx.platform.file.O_SYNC'] # TODO: these flags should probably be imported from Platform LOCK_SH = 0x01 LOCK_EX = 0x02 LOCK_NB = 0x04 LOCK_UN = 0x08 BINARY = 0x04 # TODO: "OK" constants aren't in File::Constants in MRI F_OK = 0 # test for existence of file X_OK = 1 # test for execute or search permission W_OK = 2 # test for write permission R_OK = 4 # test for read permission FNM_NOESCAPE = 0x01 FNM_PATHNAME = 0x02 FNM_DOTMATCH = 0x04 FNM_CASEFOLD = 0x08 end SEPARATOR = FFI::Platform::File::SEPARATOR Separator = FFI::Platform::File::SEPARATOR ALT_SEPARATOR = FFI::Platform::File::ALT_SEPARATOR PATH_SEPARATOR = FFI::Platform::File::PATH_SEPARATOR POSIX = FFI::Platform::POSIX attr_reader :path def initialize(path_or_fd, mode = "r", perm = 0666) if path_or_fd.kind_of? Integer super(path_or_fd, mode) @path = nil else path = StringValue(path_or_fd) fd = IO.sysopen(path, mode, perm) if fd < 0 begin Errno.handle path rescue Errno::EMFILE # true means force to run, don't ignore it. GC.run(true) fd = IO.sysopen(path, mode, perm) Errno.handle if fd < 0 end end @path = path super(fd) end end private :initialize # The mode_t type is 2 bytes (ushort). Instead of getting whatever # value happens to be in the least significant 16 bits, just set # the value to 0 if it is greater than 0xffff. Also, negative values # don't make any sense here. def clamp_short(value) mode = Type.coerce_to value, Integer, :to_int mode < 0 || mode > 0xffff ? 0 : mode end module_function :clamp_short ## # Returns the last access time for the named file as a Time object). # # File.atime("testfile") #=> Wed Apr 09 08:51:48 CDT 2003 def self.atime(path) Stat.new(path).atime end ## # Returns the last component of the filename given # in file_name, which must be formed using forward # slashes (``/’’) regardless of the separator used # on the local file system. If suffix is given and # present at the end of file_name, it is removed. # # File.basename("/home/gumby/work/ruby.rb") #=> "ruby.rb" # File.basename("/home/gumby/work/ruby.rb", ".rb") #=> "ruby" def self.basename(path,ext=undefined) path = StringValue(path) slash = "/" ext_not_present = ext.equal?(undefined) if pos = path.find_string_reverse(slash, path.size) # special case. If the string ends with a /, ignore it. if pos == path.size - 1 # Find the first non-/ from the right data = path.data found = false pos.downto(0) do |i| if data[i] != ?/ path = path.substring(0, i+1) found = true break end end # edge case, it's all /'s, return "/" return slash unless found # Now that we've trimmed the /'s at the end, search again pos = path.find_string_reverse(slash, path.size) if ext_not_present and !pos # No /'s found and ext not present, return path. return path end end path = path.substring(pos + 1, path.size - pos) if pos end return path if ext_not_present # special case. if ext is ".*", remove any extension ext = StringValue(ext) if ext == ".*" if pos = path.rindex(?.) return path.substring(0, pos) end elsif pos = path.rindex(ext) # Check that ext is the last thing in the string if pos == path.size - ext.size return path.substring(0, pos) end end return path end ## # Returns true if the named file is a block device. def self.blockdev?(path) st = Stat.stat path st ? st.blockdev? : false end ## # Returns true if the named file is a character device. def self.chardev?(path) st = Stat.stat path st ? st.chardev? : false end ## # Changes permission bits on the named file(s) to # the bit pattern represented by mode_int. Actual # effects are operating system dependent (see the # beginning of this section). On Unix systems, see # chmod(2) for details. Returns the number of files processed. # # File.chmod(0644, "testfile", "out") #=> 2 def self.chmod(mode, *paths) mode = clamp_short mode paths.each do |path| path = Type.coerce_to(path, String, :to_str) unless path.is_a? String POSIX.chmod(path, mode) end paths.size end ## # Equivalent to File::chmod, but does not follow symbolic # links (so it will change the permissions associated with # the link, not the file referenced by the link). # Often not available. def self.lchmod(mode, *paths) mode = Type.coerce_to(mode, Integer, :to_int) unless mode.is_a? Integer paths.each do |path| path = Type.coerce_to(path, String, :to_str) unless path.is_a? String POSIX.lchmod(path, mode) end paths.size end ## # Changes the owner and group of the # named file(s) to the given numeric owner # and group id‘s. Only a process with superuser # privileges may change the owner of a file. The # current owner of a file may change the file‘s # group to any group to which the owner belongs. # A nil or -1 owner or group id is ignored. # Returns the number of files processed. # # File.chown(nil, 100, "testfile") def self.chown(owner_int, group_int, *paths) owner_int = -1 if owner_int == nil group_int = -1 if group_int == nil paths.each { |path| POSIX.chown(path, owner_int, group_int) } paths.size end ## # Equivalent to File::chown, but does not follow # symbolic links (so it will change the owner # associated with the link, not the file referenced # by the link). Often not available. Returns number # of files in the argument list. def self.lchown(owner_int, group_int, *paths) owner_int = -1 if owner_int == nil group_int = -1 if group_int == nil paths.each { |path| POSIX.lchown(path, owner_int, group_int) } paths.size end ## # Returns the change time for the named file (the # time at which directory information about the # file was changed, not the file itself). # # File.ctime("testfile") #=> Wed Apr 09 08:53:13 CDT 2003 def self.ctime(path) Stat.new(path).ctime end ## # Returns true if the named file is a directory, false otherwise. # # File.directory?(".") def self.directory?(path) st = Stat.stat path st ? st.directory? : false end def self.last_nonslash(path,start=nil) # Find the first non-/ from the right data = path.data idx = nil start ||= (path.size - 1) start.downto(0) do |i| if data[i] != ?/ return i end end return nil end ## # Returns all components of the filename given in # file_name except the last one. The filename must be # formed using forward slashes (``/’’) regardless of # the separator used on the local file system. # # File.dirname("/home/gumby/work/ruby.rb") #=> "/home/gumby/work" def self.dirname(path) path = StringValue(path) # edge case return "." if path.empty? slash = "/" # pull off any /'s at the end to ignore chunk_size = last_nonslash(path) return "/" unless chunk_size if pos = path.find_string_reverse(slash, chunk_size) return "/" if pos == 0 path = path.substring(0, pos) return "/" if path == "/" return path unless path.suffix? slash # prune any trailing /'s idx = last_nonslash(path, pos) # edge case, only /'s, return / return "/" unless idx return path.substring(0, idx - 1) end return "." end ## # Returns true if the named file is executable by the # effective user id of this process. def self.executable?(path) st = Stat.stat path st ? st.executable? : false end ## # Returns true if the named file is executable by # the real user id of this process. def self.executable_real?(path) st = Stat.stat path st ? st.executable_real? : false end ## # Return true if the named file exists. def self.exist?(path) path = StringValue path POSIX.stat(path, Stat::EXISTS_STRUCT.pointer) == 0 ? true : false end ## # Converts a pathname to an absolute pathname. Relative # paths are referenced from the current working directory # of the process unless dir_string is given, in which case # it will be used as the starting point. The given pathname # may start with a ``~’’, which expands to the process owner‘s # home directory (the environment variable HOME must be set # correctly). "~user" expands to the named user‘s home directory. # # File.expand_path("~oracle/bin") #=> "/home/oracle/bin" # File.expand_path("../../bin", "/tmp/x") #=> "/bin" def self.expand_path(path, dir=nil) path = StringValue(path) first = path[0] if first == ?~ case path[1] when ?/ path = ENV["HOME"] + path.substring(1, path.size - 1) when nil return ENV["HOME"] else unless length = path.index("/", 1) length = path.size end name = path.substring 1, length - 1 unless dir = Rubinius.get_user_home(name) raise ArgumentError, "user #{name} does not exist" end path = dir + path.substring(length, path.size - length) end elsif first != ?/ if dir dir = File.expand_path dir else dir = Dir.pwd end path = "#{dir}/#{path}" end items = [] start = 0 size = path.size while index = path.index("/", start) or (start < size and index = size) length = index - start if length > 0 item = path.substring start, length if item == ".." items.pop elsif item != "." items << item end end start = index + 1 end return "/" if items.empty? str = "" iter = items.to_iter while iter.next str.append "/#{iter.item}" end return str end ## # Returns the extension (the portion of file name in # path after the period). # # File.extname("test.rb") #=> ".rb" # File.extname("a/b/d/test.rb") #=> ".rb" # File.extname("test") #=> "" # File.extname(".profile") #=> "" def self.extname(path) path = StringValue(path) path_size = path.size dot_idx = path.find_string_reverse(".", path_size) # No dots at all return "" unless dot_idx slash_idx = path.find_string_reverse("/", path_size) # pretend there is / just to the left of the start of the string slash_idx ||= -1 # no . in the last component of the path return "" if dot_idx < slash_idx # last component starts with a . return "" if dot_idx == slash_idx + 1 # last component ends with a . return "" if dot_idx == path_size - 1 return path.substring(dot_idx, path_size - dot_idx) end ## # Returns true if the named file exists and is a regular file. def self.file?(path) st = Stat.stat path st ? st.file? : false end ## # Returns true if path matches against pattern The pattern # is not a regular expression; instead it follows rules # similar to shell filename globbing. It may contain the # following metacharacters: # # *: Matches any file. Can be restricted by other values in the glob. * will match all files; c* will match all files beginning with c; *c will match all files ending with c; and c will match all files that have c in them (including at the beginning or end). Equivalent to / .* /x in regexp. # **: Matches directories recursively or files expansively. # ?: Matches any one character. Equivalent to /.{1}/ in regexp. # [set]: Matches any one character in set. Behaves exactly like character sets in Regexp, including set negation ([^a-z]). # <code></code>: Escapes the next metacharacter. # flags is a bitwise OR of the FNM_xxx parameters. The same glob pattern and flags are used by Dir::glob. # # File.fnmatch('cat', 'cat') #=> true : match entire string # File.fnmatch('cat', 'category') #=> false : only match partial string # File.fnmatch('c{at,ub}s', 'cats') #=> false : { } isn't supported # # File.fnmatch('c?t', 'cat') #=> true : '?' match only 1 character # File.fnmatch('c??t', 'cat') #=> false : ditto # File.fnmatch('c*', 'cats') #=> true : '*' match 0 or more characters # File.fnmatch('c*t', 'c/a/b/t') #=> true : ditto # File.fnmatch('ca[a-z]', 'cat') #=> true : inclusive bracket expression # File.fnmatch('ca[^t]', 'cat') #=> false : exclusive bracket expression ('^' or '!') # # File.fnmatch('cat', 'CAT') #=> false : case sensitive # File.fnmatch('cat', 'CAT', File::FNM_CASEFOLD) #=> true : case insensitive # # File.fnmatch('?', '/', File::FNM_PATHNAME) #=> false : wildcard doesn't match '/' on FNM_PATHNAME # File.fnmatch('*', '/', File::FNM_PATHNAME) #=> false : ditto # File.fnmatch('[/]', '/', File::FNM_PATHNAME) #=> false : ditto # # File.fnmatch('\?', '?') #=> true : escaped wildcard becomes ordinary # File.fnmatch('\a', 'a') #=> true : escaped ordinary remains ordinary # File.fnmatch('\a', '\a', File::FNM_NOESCAPE) #=> true : FNM_NOESACPE makes '\' ordinary # File.fnmatch('[\?]', '?') #=> true : can escape inside bracket expression # # File.fnmatch('*', '.profile') #=> false : wildcard doesn't match leading # File.fnmatch('*', '.profile', File::FNM_DOTMATCH) #=> true period by default. # File.fnmatch('.*', '.profile') #=> true # # rbfiles = '**' '/' '*.rb' # you don't have to do like this. just write in single string. # File.fnmatch(rbfiles, 'main.rb') #=> false # File.fnmatch(rbfiles, './main.rb') #=> false # File.fnmatch(rbfiles, 'lib/song.rb') #=> true # File.fnmatch('**.rb', 'main.rb') #=> true # File.fnmatch('**.rb', './main.rb') #=> false # File.fnmatch('**.rb', 'lib/song.rb') #=> true # File.fnmatch('*', 'dave/.profile') #=> true # # pattern = '*' '/' '*' # File.fnmatch(pattern, 'dave/.profile', File::FNM_PATHNAME) #=> false # File.fnmatch(pattern, 'dave/.profile', File::FNM_PATHNAME | File::FNM_DOTMATCH) #=> true # # pattern = '**' '/' 'foo' # File.fnmatch(pattern, 'a/b/c/foo', File::FNM_PATHNAME) #=> true # File.fnmatch(pattern, '/a/b/c/foo', File::FNM_PATHNAME) #=> true # File.fnmatch(pattern, 'c:/a/b/c/foo', File::FNM_PATHNAME) #=> true # File.fnmatch(pattern, 'a/.b/c/foo', File::FNM_PATHNAME) #=> false # File.fnmatch(pattern, 'a/.b/c/foo', File::FNM_PATHNAME | File::FNM_DOTMATCH) #=> true def self.fnmatch(pattern, path, flags=0) pattern = StringValue(pattern) path = StringValue(path) flags = Type.coerce_to(flags, Fixnum, :to_int) super pattern, path, flags end ## # Identifies the type of the named file; the return string is # one of "file", "directory", "characterSpecial", # "blockSpecial", "fifo", "link", "socket", or "unknown". # # File.ftype("testfile") #=> "file" # File.ftype("/dev/tty") #=> "characterSpecial" # File.ftype("/tmp/.X11-unix/X0") #=> "socket" def self.ftype(path) lstat(path).ftype end ## # Returns true if the named file exists and the effective # group id of the calling process is the owner of the file. # Returns false on Windows. def self.grpowned?(path) begin lstat(path).grpowned? rescue false end end ## # Returns true if the named files are identical. # # open("a", "w") {} # p File.identical?("a", "a") #=> true # p File.identical?("a", "./a") #=> true # File.link("a", "b") # p File.identical?("a", "b") #=> true # File.symlink("a", "c") # p File.identical?("a", "c") #=> true # open("d", "w") {} # p File.identical?("a", "d") #=> false def self.identical?(orig, copy) st_o = stat(StringValue(orig)) st_c = stat(StringValue(copy)) return false unless st_o.ino == st_c.ino return false unless st_o.ftype == st_c.ftype return false unless POSIX.access(orig, Constants::R_OK) return false unless POSIX.access(copy, Constants::R_OK) true end ## # Returns a new string formed by joining the strings using File::SEPARATOR. # # File.join("usr", "mail", "gumby") #=> "usr/mail/gumby" def self.join(*args) return '' if args.empty? sep = SEPARATOR # The first one is unrolled out of the loop to remove a condition # from the loop. It seems needless, but you'd be surprised how much hinges # on the performance of File.join # first = args.shift case first when String first = first.dup when Array recursion = Thread.detect_recursion(first) do first = join(*first) end raise ArgumentError, "recursive array" if recursion else # We need to use dup here, since it's possible that # StringValue gives us a direct object we shouldn't mutate first = StringValue(first).dup end ret = first args.each do |el| value = nil case el when String value = el when Array recursion = Thread.detect_recursion(el) do value = join(*el) end raise ArgumentError, "recursive array" if recursion else value = StringValue(el) end if value.prefix? sep ret.gsub!(/#{SEPARATOR}+$/, '') elsif not ret.suffix? sep ret << sep end ret << value end ret end ## # Creates a new name for an existing file using a hard link. # Will not overwrite new_name if it already exists (raising # a subclass of SystemCallError). Not available on all platforms. # # File.link("testfile", ".testfile") #=> 0 # IO.readlines(".testfile")[0] #=> "This is line one\n" def self.link(from, to) to = StringValue(to) from = StringValue(from) n = POSIX.link(from, to) Errno.handle if n == -1 n end ## # Same as File::stat, but does not follow the last symbolic link. # Instead, reports on the link itself. # # File.symlink("testfile", "link2test") #=> 0 # File.stat("testfile").size #=> 66 # File.lstat("link2test").size #=> 8 # File.stat("link2test").size #=> 66 def self.lstat(path) Stat.lstat path end ## # Returns the modification time for the named file as a Time object. # # File.mtime("testfile") #=> Tue Apr 08 12:58:04 CDT 2003 def self.mtime(path) Stat.new(path).mtime end ## # Returns true if the named file is a pipe. def self.pipe?(path) st = Stat.stat path st ? st.pipe? : false end ## # Returns true if the named file is readable by the effective # user id of this process. def self.readable?(path) st = Stat.stat path st ? st.readable? : false end ## # Returns true if the named file is readable by the real user # id of this process. def self.readable_real?(path) st = Stat.stat path st ? st.readable_real? : false end ## # Returns the name of the file referenced by the given link. # Not available on all platforms. # # File.symlink("testfile", "link2test") #=> 0 # File.readlink("link2test") #=> "testfile" def self.readlink(path) StringValue(path) FFI::MemoryPointer.new(1024) do |ptr| n = POSIX.readlink(path, ptr, 1024) Errno.handle if n == -1 return ptr.read_string(n) end end ## # Renames the given file to the new name. Raises a SystemCallError # if the file cannot be renamed. # # File.rename("afile", "afile.bak") #=> 0 def self.rename(from, to) to = StringValue(to) from = StringValue(from) n = POSIX.rename(from, to) Errno.handle if n == -1 n end ## # Returns the size of file_name. def self.size(io_or_path) io = Type.convert_to io_or_path, IO, :to_io if io.is_a? IO Stat.from_fd(io.fileno).size else stat(io_or_path).size end end ## # Returns nil if file_name doesn‘t exist or has zero size, # the size of the file otherwise. def self.size?(io_or_path) s = 0 io = Type.convert_to io_or_path, IO, :to_io if io.is_a? IO s = Stat.from_fd(io.fileno).size else st = Stat.stat io_or_path s = st.size if st end s > 0 ? s : nil end ## # Returns true if the named file is a socket. def self.socket?(path) st = Stat.stat path st ? st.socket? : false end ## # Splits the given string into a directory and a file component and returns them in a two-element array. See also File::dirname and File::basename. # # File.split("/home/gumby/.profile") #=> ["/home/gumby", ".profile"] def self.split(path) p = StringValue(path) [dirname(p), basename(p)] end ## # Returns a File::Stat object for the named file (see File::Stat). # # File.stat("testfile").mtime #=> Tue Apr 08 12:58:04 CDT 2003 def self.stat(path) Stat.new path end ## # Creates a symbolic link called new_name for the # existing file old_name. Raises a NotImplemented # exception on platforms that do not support symbolic links. # # File.symlink("testfile", "link2test") #=> 0 def self.symlink(from, to) to = StringValue(to) from = StringValue(from) n = POSIX.symlink(from, to) Errno.handle if n == -1 n end ## # Returns true if the named file is a symbolic link. def self.symlink?(path) Stat.lstat(path).symlink? rescue Errno::ENOENT, Errno::ENODIR false end ## # Copies a file from to to. If to is a directory, copies from to to/from. def self.syscopy(from, to) out = File.directory?(to) ? to + File.basename(from) : to open(out, 'w') do |f| f.write read(from).read end end ## # Truncates the file file_name to be at most integer # bytes long. Not available on all platforms. # # f = File.new("out", "w") # f.write("1234567890") #=> 10 # f.close #=> nil # File.truncate("out", 5) #=> 0 # File.size("out") #=> 5 def self.truncate(path, length) unless self.exist?(path) raise Errno::ENOENT, path end unless length.respond_to?(:to_int) raise TypeError, "can't convert #{length.class} into Integer" end n = POSIX.truncate(path, length) Errno.handle if n == -1 n end ## # Returns the current umask value for this process. # If the optional argument is given, set the umask # to that value and return the previous value. Umask # values are subtracted from the default permissions, # so a umask of 0222 would make a file read-only for # everyone. # # File.umask(0006) #=> 18 # File.umask #=> 6 def self.umask(mask = nil) if mask POSIX.umask(clamp_short(mask)) else old_mask = POSIX.umask(0) POSIX.umask(old_mask) old_mask end end ## # Deletes the named files, returning the number of names # passed as arguments. Raises an exception on any error. # # See also Dir::rmdir. def self.unlink(*paths) paths.each do |path| path = StringValue(path) n = POSIX.unlink(path) Errno.handle if n == -1 end paths.size end ## # Sets the access and modification times of each named # file to the first two arguments. Returns the number # of file names in the argument list. # #=> Integer def self.utime(a_in, m_in, *paths) FFI::MemoryPointer.new(POSIX::TimeVal, 2) do |ptr| atime = POSIX::TimeVal.new ptr mtime = POSIX::TimeVal.new ptr[1] atime[:tv_sec] = a_in.to_i atime[:tv_usec] = 0 mtime[:tv_sec] = m_in.to_i mtime[:tv_usec] = 0 paths.each do |path| if POSIX.utimes(path, ptr) != 0 Errno.handle end end end end ## # Returns true if the named file is writable by the effective # user id of this process. def self.writable?(path) st = Stat.stat path st ? st.writable? : false end ## # Returns true if the named file is writable by the real user # id of this process. def self.writable_real?(path) st = Stat.stat path st ? st.writable_real? : false end ## # Returns true if the named file exists and has a zero size. def self.zero?(path) st = Stat.stat path st ? st.zero? : false end ## # Returns true if the named file exists and the effective # used id of the calling process is the owner of the file. # File.owned?(file_name) => true or false def self.owned?(file_name) Stat.new(file_name).owned? end ## # Returns true if the named file has the setgid bit set. def self.setgid?(file_name) Stat.new(file_name).setgid? rescue Errno::ENOENT return false end ## # Returns true if the named file has the setuid bit set. def self.setuid?(file_name) Stat.new(file_name).setuid? rescue Errno::ENOENT return false end ## # Returns true if the named file has the sticky bit set. def self.sticky?(file_name) Stat.new(file_name).sticky? rescue Errno::ENOENT return false end ## # Returns true if the named file exists and the effective # used id of the calling process is the owner of the file. def self.owned?(file_name) Stat.new(file_name).owned? end class << self alias_method :delete, :unlink alias_method :exists?, :exist? alias_method :fnmatch?, :fnmatch end def atime Stat.new(@path).atime end def reopen(other, mode = 'r+') rewind unless closed? super(other, mode) end def chmod(mode) POSIX.fchmod(@descriptor, clamp_short(mode)) end def chown(owner_int, group_int) POSIX.fchown(@descriptor, owner_int || -1, group_int || -1) end def ctime Stat.new(@path).ctime end def flock(locking_constant) result = POSIX.flock(@descriptor, locking_constant) return false if result == -1 result end def lstat Stat.lstat @path end def mtime Stat.new(@path).mtime end def stat Stat.from_fd @descriptor end def truncate(length) length = Type.coerce_to(length, Integer, :to_int) raise Errno::EINVAL, "Can't truncate a file to a negative length" if length < 0 raise IOError, "File is closed" if closed? n = POSIX.ftruncate(@descriptor, length) Errno.handle if n == -1 n end def inspect return_string = "#<#{self.class}:0x#{object_id.to_s(16)} path=#{@path}" return_string << " (closed)" if closed? return_string << ">" end end # File # Inject the constants into IO class IO include File::Constants end class File::Stat class Struct < FFI::Struct config "rbx.platform.stat", :st_dev, :st_ino, :st_mode, :st_nlink, :st_uid, :st_gid, :st_rdev, :st_size, :st_blksize, :st_blocks, :st_atime, :st_mtime, :st_ctime end EXISTS_STRUCT = Struct.new include Comparable S_IRUSR = Rubinius::Config['rbx.platform.file.S_IRUSR'] S_IWUSR = Rubinius::Config['rbx.platform.file.S_IWUSR'] S_IXUSR = Rubinius::Config['rbx.platform.file.S_IXUSR'] S_IRGRP = Rubinius::Config['rbx.platform.file.S_IRGRP'] S_IWGRP = Rubinius::Config['rbx.platform.file.S_IWGRP'] S_IXGRP = Rubinius::Config['rbx.platform.file.S_IXGRP'] S_IROTH = Rubinius::Config['rbx.platform.file.S_IROTH'] S_IWOTH = Rubinius::Config['rbx.platform.file.S_IWOTH'] S_IXOTH = Rubinius::Config['rbx.platform.file.S_IXOTH'] S_IFMT = Rubinius::Config['rbx.platform.file.S_IFMT'] S_IFIFO = Rubinius::Config['rbx.platform.file.S_IFIFO'] S_IFCHR = Rubinius::Config['rbx.platform.file.S_IFCHR'] S_IFDIR = Rubinius::Config['rbx.platform.file.S_IFDIR'] S_IFBLK = Rubinius::Config['rbx.platform.file.S_IFBLK'] S_IFREG = Rubinius::Config['rbx.platform.file.S_IFREG'] S_IFLNK = Rubinius::Config['rbx.platform.file.S_IFLNK'] S_IFSOCK = Rubinius::Config['rbx.platform.file.S_IFSOCK'] S_IFWHT = Rubinius::Config['rbx.platform.file.S_IFWHT'] S_ISUID = Rubinius::Config['rbx.platform.file.S_ISUID'] S_ISGID = Rubinius::Config['rbx.platform.file.S_ISGID'] S_ISVTX = Rubinius::Config['rbx.platform.file.S_ISVTX'] POSIX = FFI::Platform::POSIX attr_reader :path def self.create(path) path = StringValue path stat = allocate Rubinius.privately { stat.setup path, Struct.new } end def self.stat(path) stat = create path result = POSIX.stat stat.path, stat.pointer return nil unless result == 0 stat end # -- # Stat.lstat raises whereas Stat.stat does not because most things # that use Stat.stat do not expect exceptions but most things that # uses Stat.lstat do. # ++ def self.lstat(path) stat = create path result = POSIX.lstat stat.path, stat.pointer Errno.handle path unless result == 0 stat end ## # File::Stat#from_fd is used to support IO#stat which does not necessarily # have a path. def self.from_fd(descriptor) stat = allocate struct = Struct.new result = POSIX.fstat descriptor, struct.pointer Errno.handle "file descriptor #{descriptor}" unless result == 0 Rubinius.privately { stat.setup nil, struct } end def initialize(path) @path = StringValue path @stat = Struct.new result = POSIX.stat @path, @stat.pointer Errno.handle path unless result == 0 end private :initialize def setup(path, struct) @path = path @stat = struct self end private :setup def pointer @stat.pointer end def atime Time.at @stat[:st_atime] end def blksize @stat[:st_blksize] end def blocks @stat[:st_blocks] end def blockdev? @stat[:st_mode] & S_IFMT == S_IFBLK end def chardev? @stat[:st_mode] & S_IFMT == S_IFCHR end def ctime Time.at @stat[:st_ctime] end def dev @stat[:st_dev] end def dev_major major = POSIX.major @stat[:st_dev] major < 0 ? nil : major end def dev_minor minor = POSIX.major @stat[:st_dev] minor < 0 ? nil : minor end def directory? @stat[:st_mode] & S_IFMT == S_IFDIR end def executable? return true if superuser? return @stat[:st_mode] & S_IXUSR != 0 if owned? return @stat[:st_mode] & S_IXGRP != 0 if grpowned? return @stat[:st_mode] & S_IXOTH != 0 end def executable_real? return true if rsuperuser? return @stat[:st_mode] & S_IXUSR != 0 if rowned? return @stat[:st_mode] & S_IXGRP != 0 if rgrpowned? return @stat[:st_mode] & S_IXOTH != 0 end def file? @stat[:st_mode] & S_IFMT == S_IFREG end def ftype if file? "file" elsif directory? "directory" elsif chardev? "characterSpecial" elsif blockdev? "blockSpecial" elsif pipe? "fifo" elsif socket? "socket" elsif symlink? "link" else "unknown" end end def gid @stat[:st_gid] end def grpowned? @stat[:st_gid] == POSIX.getegid end def ino @stat[:st_ino] end def inspect "#<File::Stat dev=0x#{self.dev.to_s(16)}, ino=#{self.ino}, " \ "mode=#{sprintf("%07d", self.mode.to_s(8).to_i)}, nlink=#{self.nlink}, " \ "uid=#{self.uid}, gid=#{self.gid}, rdev=0x#{self.rdev.to_s(16)}, " \ "size=#{self.size}, blksize=#{self.blksize}, blocks=#{self.blocks}, " \ "atime=#{self.atime}, mtime=#{self.mtime}, ctime=#{self.ctime}>" end def nlink @stat[:st_nlink] end def mtime Time.at @stat[:st_mtime] end def mode @stat[:st_mode] end def owned? @stat[:st_uid] == POSIX.geteuid end def path @path end def pipe? @stat[:st_mode] & S_IFMT == S_IFIFO end def rdev @stat[:st_rdev] end def rdev_major major = POSIX.major @stat[:st_rdev] major < 0 ? nil : major end def rdev_minor minor = POSIX.minor @stat[:st_rdev] minor < 0 ? nil : minor end def readable? return true if superuser? return @stat[:st_mode] & S_IRUSR != 0 if owned? return @stat[:st_mode] & S_IRGRP != 0 if grpowned? return @stat[:st_mode] & S_IROTH != 0 end def readable_real? return true if rsuperuser? return @stat[:st_mode] & S_IRUSR != 0 if rowned? return @stat[:st_mode] & S_IRGRP != 0 if rgrpowned? return @stat[:st_mode] & S_IROTH != 0 end def setgid? @stat[:st_mode] & S_ISGID != 0 end def setuid? @stat[:st_mode] & S_ISUID != 0 end def sticky? @stat[:st_mode] & S_ISVTX != 0 end def size @stat[:st_size] end def size? size == 0 ? nil : size end def socket? @stat[:st_mode] & S_IFMT == S_IFSOCK end def symlink? @stat[:st_mode] & S_IFMT == S_IFLNK end def uid @stat[:st_uid] end def writable? return true if superuser? return @stat[:st_mode] & S_IWUSR != 0 if owned? return @stat[:st_mode] & S_IWGRP != 0 if grpowned? return @stat[:st_mode] & S_IWOTH != 0 end def writable_real? return true if rsuperuser? return @stat[:st_mode] & S_IWUSR != 0 if rowned? return @stat[:st_mode] & S_IWGRP != 0 if rgrpowned? return @stat[:st_mode] & S_IWOTH != 0 end def zero? @stat[:st_size] == 0 end def <=> (other) return nil unless other.is_a?(File::Stat) self.mtime <=> other.mtime end def rgrpowned? @stat[:st_gid] == POSIX.getgid end private :rgrpowned? def rowned? @stat[:st_uid] == POSIX.getuid end private :rowned? def rsuperuser? POSIX.getuid == 0 end private :rsuperuser? def superuser? POSIX.geteuid == 0 end private :superuser? end # File::Stat
unless Rubinius::Config['hash.hamt'] class Hash include Enumerable attr_reader :size attr_reader :capacity attr_reader :max_entries alias_method :length, :size Entries = Rubinius::Tuple # Initial size of Hash. MUST be a power of 2. MIN_SIZE = 16 # Allocate more storage when this full. This value grows with # the size of the Hash so that the max load factor is 0.75. MAX_ENTRIES = 12 class State attr_accessor :head attr_accessor :tail def self.from(state) new_state = new new_state.compare_by_identity if state and state.compare_by_identity? new_state end def initialize @compare_by_identity = false @head = nil @tail = nil end def compare_by_identity? @compare_by_identity end def compare_by_identity @compare_by_identity = true class << self def match?(this_key, this_hash, other_key, other_hash) Rubinius::Type.object_equal other_key, this_key end end self end def match?(this_key, this_hash, other_key, other_hash) other_hash == this_hash and (Rubinius::Type::object_equal(other_key, this_key) or other_key.eql?(this_key)) end end # Bucket stores key, value pairs in Hash. The key's hash # is also cached in the item and recalculated when the # Hash#rehash method is called. class Bucket attr_accessor :key attr_accessor :key_hash attr_accessor :value attr_accessor :link attr_accessor :previous attr_accessor :next attr_accessor :state def initialize(key, key_hash, value, state) @key = key @key_hash = key_hash @value = value @link = nil @state = state if tail = state.tail @previous = tail state.tail = tail.next = self else state.head = state.tail = self end end def delete(key, key_hash) if @state.match? @key, @key_hash, key, key_hash remove self end end def remove if @previous @previous.next = @next else @state.head = @next end if @next @next.previous = @previous else @state.tail = @previous end end end # An external iterator that returns entries in insertion order. While # somewhat following the API of Enumerator, it is named Iterator because it # does not provide <code>#each</code> and should not conflict with # +Enumerator+ in MRI 1.8.7+. Returned by <code>Hash#to_iter</code>. class Iterator def initialize(state) @state = state end # Returns the next object or +nil+. def next(item) if item return item if item = item.next else return @state.head end end end def self.new_from_literal(size) allocate end # Creates a fully-formed instance of Hash. def self.allocate hash = super() Rubinius.privately { hash.__setup__ } hash end def self.new_from_associate_array(associate_array) hash = new associate_array.each do |array| next unless array.respond_to? :to_ary array = array.to_ary unless (1..2).cover? array.size raise ArgumentError, "invalid number of elements (#{array.size} for 1..2)" end hash[array.at(0)] = array.at(1) end hash end private_class_method :new_from_associate_array def self.try_convert(obj) Rubinius::Type.try_convert obj, Hash, :to_hash end # #entries is a method provided by Enumerable which calls #to_a, # so we have to not collide with that. attr_reader_specific :entries, :__entries__ def self.[](*args) if args.size == 1 obj = args.first if hash = Rubinius::Type.check_convert_type(obj, Hash, :to_hash) new_hash = allocate.replace(hash) new_hash.default = nil return new_hash elsif associate_array = Rubinius::Type.check_convert_type(obj, Array, :to_ary) return new_from_associate_array(associate_array) end end return new if args.empty? if args.size & 1 == 1 raise ArgumentError, "Expected an even number, got #{args.length}" end hash = new i = 0 total = args.size while i < total hash[args[i]] = args[i+1] i += 2 end hash end def []=(key, value) Rubinius.check_frozen redistribute @entries if @size > @max_entries key_hash = Rubinius.privately { key.hash } index = key_hash & @mask item = @entries[index] unless item @entries[index] = new_bucket key, key_hash, value return value end if @state.match? item.key, item.key_hash, key, key_hash return item.value = value end last = item item = item.link while item if @state.match? item.key, item.key_hash, key, key_hash return item.value = value end last = item item = item.link end last.link = new_bucket key, key_hash, value value end alias_method :store, :[]= # Used internally to get around subclasses redefining #[]= alias_method :__store__, :[]= def ==(other) return true if self.equal? other unless other.kind_of? Hash return false unless other.respond_to? :to_hash return other == self end return false unless other.size == size Thread.detect_recursion self, other do each_item do |item| other_item = other.find_item(item.key) # Other doesn't even have this key return false unless other_item # Order of the comparison matters! We must compare our value with # the other Hash's value and not the other way around. unless Rubinius::Type::object_equal(item.value, other_item.value) or item.value == other_item.value return false end end end true end def assoc(key) each_item { |e| return e.key, e.value if key == e.key } end def compare_by_identity Rubinius.check_frozen @state = State.new unless @state @state.compare_by_identity self end def compare_by_identity? return false unless @state @state.compare_by_identity? end def default(key=undefined) if @default_proc and !undefined.equal?(key) @default_proc.call(self, key) else @default end end def default_proc @default_proc end # Sets the default proc to be executed on each key lookup def default_proc=(prc) Rubinius.check_frozen unless prc.nil? prc = Rubinius::Type.coerce_to prc, Proc, :to_proc if prc.lambda? and prc.arity != 2 raise TypeError, "default proc must have arity 2" end end @default = nil @default_proc = prc end def delete(key) Rubinius.check_frozen key_hash = Rubinius.privately { key.hash } index = key_index key_hash if item = @entries[index] if item.delete key, key_hash @entries[index] = item.link @size -= 1 return item.value end last = item while item = item.link if item.delete key, key_hash last.link = item.link @size -= 1 return item.value end last = item end end return yield(key) if block_given? end def dig(key, *remaining_keys) item = self[key] return item if remaining_keys.empty? || item.nil? raise TypeError, "#{item.class} does not have #dig method" unless item.respond_to?(:dig) item.dig(*remaining_keys) end def each_item return unless @state item = @state.head while item yield item item = item.next end end def each return to_enum(:each) { size } unless block_given? return unless @state item = @state.head while item yield [item.key, item.value] item = item.next end self end alias_method :each_pair, :each def fetch(key, default=undefined) if item = find_item(key) return item.value end return yield(key) if block_given? return default unless undefined.equal?(default) raise KeyError, "key #{key} not found" end def fetch_values(*keys, &block) keys.map { |key| fetch(key, &block) } end # Searches for an item matching +key+. Returns the item # if found. Otherwise returns +nil+. def find_item(key) key_hash = Rubinius.privately { key.hash } item = @entries[key_index(key_hash)] while item if @state.match? item.key, item.key_hash, key, key_hash return item end item = item.link end end def flatten(level=1) to_a.flatten(level) end def keep_if return to_enum(:keep_if) { size } unless block_given? Rubinius.check_frozen each_item { |e| delete e.key unless yield(e.key, e.value) } self end def initialize(default=undefined, &block) Rubinius.check_frozen if !undefined.equal?(default) and block raise ArgumentError, "Specify a default or a block, not both" end if block @default = nil @default_proc = block elsif !undefined.equal?(default) @default = default @default_proc = nil end self end private :initialize def merge!(other) Rubinius.check_frozen other = Rubinius::Type.coerce_to other, Hash, :to_hash if block_given? other.each_item do |item| key = item.key if key? key __store__ key, yield(key, self[key], item.value) else __store__ key, item.value end end else other.each_item do |item| __store__ item.key, item.value end end self end alias_method :update, :merge! # Returns a new +Bucket+ instance having +key+, +key_hash+, # and +value+. If +key+ is a kind of +String+, +key+ is # duped and frozen. def new_bucket(key, key_hash, value) if key.kind_of?(String) and !key.frozen? and !compare_by_identity? key = key.dup key.freeze end @size += 1 Bucket.new key, key_hash, value, @state end private :new_bucket # Adjusts the hash storage and redistributes the entries among # the new bins. Any Iterator instance will be invalid after a # call to #redistribute. Does not recalculate the cached key_hash # values. See +#rehash+. def redistribute(entries) capacity = @capacity # Rather than using __setup__, initialize the specific values we need to # change so we don't eg overwrite @state. @capacity = capacity * 2 @entries = Entries.new @capacity @mask = @capacity - 1 @max_entries = @max_entries * 2 i = -1 while (i += 1) < capacity next unless old = entries[i] while old old.link = nil if nxt = old.link index = key_index old.key_hash if item = @entries[index] old.link = item end @entries[index] = old old = nxt end end end def rassoc(value) each_item { |e| return e.key, e.value if value == e.value } end def replace(other) Rubinius.check_frozen other = Rubinius::Type.coerce_to other, Hash, :to_hash return self if self.equal? other # Normally this would be a call to __setup__, but that will create a new # unused Tuple that we would wind up replacing anyways. @capacity = other.capacity @entries = Entries.new @capacity @mask = @capacity - 1 @size = 0 @max_entries = other.max_entries @state = State.new @state.compare_by_identity if other.compare_by_identity? other.each_item do |item| __store__ item.key, item.value end @default = other.default @default_proc = other.default_proc self end alias_method :initialize_copy, :replace private :initialize_copy def select return to_enum(:select) { size } unless block_given? selected = Hash.allocate each_item do |item| if yield(item.key, item.value) selected[item.key] = item.value end end selected end def select! return to_enum(:select!) { size } unless block_given? Rubinius.check_frozen return nil if empty? size = @size each_item { |e| delete e.key unless yield(e.key, e.value) } return nil if size == @size self end def shift Rubinius.check_frozen return default(nil) if empty? item = @state.head delete item.key return item.key, item.value end # Sets the underlying data structures. # # @capacity is the maximum number of +@entries+. # @max_entries is the maximum number of entries before redistributing. # @size is the number of pairs, equivalent to <code>hsh.size</code>. # @entrien is the vector of storage for the item chains. def __setup__(capacity=MIN_SIZE, max=MAX_ENTRIES, size=0) @capacity = capacity @mask = capacity - 1 @max_entries = max @size = size @entries = Entries.new capacity @state = State.new end private :__setup__ def to_h if instance_of? Hash self else Hash.allocate.replace(to_hash) end end # Returns an external iterator for the bins. See +Iterator+ def to_iter Iterator.new @state end def eql?(other) # Just like ==, but uses eql? to compare values. return true if self.equal? other unless other.kind_of? Hash return false unless other.respond_to? :to_hash return other.eql?(self) end return false unless other.size == size Thread.detect_recursion self, other do each_item do |item| other_item = other.find_item(item.key) # Other doesn't even have this key return false unless other_item # Order of the comparison matters! We must compare our value with # the other Hash's value and not the other way around. unless Rubinius::Type::object_equal(item.value, other_item.value) or item.value.eql?(other_item.value) return false end end end true end def hash val = size Thread.detect_outermost_recursion self do each_item do |item| val ^= item.key.hash val ^= item.value.hash end end val end def [](key) if item = find_item(key) item.value else default key end end def clear Rubinius.check_frozen __setup__ self end def default=(value) @default_proc = nil @default = value end def delete_if(&block) return to_enum(:delete_if) { size } unless block_given? Rubinius.check_frozen select(&block).each { |k, v| delete k } self end def each_key return to_enum(:each_key) { size } unless block_given? each_item { |item| yield item.key } self end def each_value return to_enum(:each_value) { size } unless block_given? each_item { |item| yield item.value } self end # Returns true if there are no entries. def empty? @size == 0 end def index(value) each_item do |item| return item.key if item.value == value end end alias_method :key, :index def inspect out = [] return '{...}' if Thread.detect_recursion self do each_item do |item| str = item.key.inspect str << '=>' str << item.value.inspect out << str end end ret = "{#{out.join ', '}}" Rubinius::Type.infect(ret, self) unless empty? ret end alias_method :to_s, :inspect def invert inverted = {} each_item do |item| inverted[item.value] = item.key end inverted end def key?(key) find_item(key) != nil end alias_method :has_key?, :key? alias_method :include?, :key? alias_method :member?, :key? # Calculates the +@entries+ slot given a key_hash value. def key_index(key_hash) key_hash & @mask end private :key_index def keys ary = [] each_item do |item| ary << item.key end ary end def merge(other, &block) dup.merge!(other, &block) end # Recalculates the cached key_hash values and reorders the entries # into a new +@entries+ vector. Does NOT change the size of the # hash. See +#redistribute+. def rehash capacity = @capacity entries = @entries @entries = Entries.new @capacity i = -1 while (i += 1) < capacity next unless old = entries[i] while old old.link = nil if nxt = old.link index = key_index(old.key_hash = old.key.hash) if item = @entries[index] old.link = item end @entries[index] = old old = nxt end end self end def reject(&block) return to_enum(:reject) { size } unless block_given? hsh = dup.delete_if(&block) hsh.taint if tainted? hsh end def reject!(&block) return to_enum(:reject!) { size } unless block_given? Rubinius.check_frozen unless empty? size = @size delete_if(&block) return self if size != @size end nil end def sort(&block) to_a.sort(&block) end def to_a ary = [] each_item do |item| ary << [item.key, item.value] end Rubinius::Type.infect ary, self ary end def to_hash self end def to_proc method(:[]).to_proc end def value?(value) each_item do |item| return true if item.value == value end false end alias_method :has_value?, :value? def values ary = [] each_item do |item| ary << item.value end ary end def values_at(*args) args.map do |key| if item = find_item(key) item.value else default key end end end alias_method :indices, :values_at alias_method :indexes, :values_at end end Implement Hash#{>, >=, <, <=} unless Rubinius::Config['hash.hamt'] class Hash include Enumerable attr_reader :size attr_reader :capacity attr_reader :max_entries alias_method :length, :size Entries = Rubinius::Tuple # Initial size of Hash. MUST be a power of 2. MIN_SIZE = 16 # Allocate more storage when this full. This value grows with # the size of the Hash so that the max load factor is 0.75. MAX_ENTRIES = 12 class State attr_accessor :head attr_accessor :tail def self.from(state) new_state = new new_state.compare_by_identity if state and state.compare_by_identity? new_state end def initialize @compare_by_identity = false @head = nil @tail = nil end def compare_by_identity? @compare_by_identity end def compare_by_identity @compare_by_identity = true class << self def match?(this_key, this_hash, other_key, other_hash) Rubinius::Type.object_equal other_key, this_key end end self end def match?(this_key, this_hash, other_key, other_hash) other_hash == this_hash and (Rubinius::Type::object_equal(other_key, this_key) or other_key.eql?(this_key)) end end # Bucket stores key, value pairs in Hash. The key's hash # is also cached in the item and recalculated when the # Hash#rehash method is called. class Bucket attr_accessor :key attr_accessor :key_hash attr_accessor :value attr_accessor :link attr_accessor :previous attr_accessor :next attr_accessor :state def initialize(key, key_hash, value, state) @key = key @key_hash = key_hash @value = value @link = nil @state = state if tail = state.tail @previous = tail state.tail = tail.next = self else state.head = state.tail = self end end def delete(key, key_hash) if @state.match? @key, @key_hash, key, key_hash remove self end end def remove if @previous @previous.next = @next else @state.head = @next end if @next @next.previous = @previous else @state.tail = @previous end end end # An external iterator that returns entries in insertion order. While # somewhat following the API of Enumerator, it is named Iterator because it # does not provide <code>#each</code> and should not conflict with # +Enumerator+ in MRI 1.8.7+. Returned by <code>Hash#to_iter</code>. class Iterator def initialize(state) @state = state end # Returns the next object or +nil+. def next(item) if item return item if item = item.next else return @state.head end end end def self.new_from_literal(size) allocate end # Creates a fully-formed instance of Hash. def self.allocate hash = super() Rubinius.privately { hash.__setup__ } hash end def self.new_from_associate_array(associate_array) hash = new associate_array.each do |array| next unless array.respond_to? :to_ary array = array.to_ary unless (1..2).cover? array.size raise ArgumentError, "invalid number of elements (#{array.size} for 1..2)" end hash[array.at(0)] = array.at(1) end hash end private_class_method :new_from_associate_array def self.try_convert(obj) Rubinius::Type.try_convert obj, Hash, :to_hash end # #entries is a method provided by Enumerable which calls #to_a, # so we have to not collide with that. attr_reader_specific :entries, :__entries__ def self.[](*args) if args.size == 1 obj = args.first if hash = Rubinius::Type.check_convert_type(obj, Hash, :to_hash) new_hash = allocate.replace(hash) new_hash.default = nil return new_hash elsif associate_array = Rubinius::Type.check_convert_type(obj, Array, :to_ary) return new_from_associate_array(associate_array) end end return new if args.empty? if args.size & 1 == 1 raise ArgumentError, "Expected an even number, got #{args.length}" end hash = new i = 0 total = args.size while i < total hash[args[i]] = args[i+1] i += 2 end hash end def []=(key, value) Rubinius.check_frozen redistribute @entries if @size > @max_entries key_hash = Rubinius.privately { key.hash } index = key_hash & @mask item = @entries[index] unless item @entries[index] = new_bucket key, key_hash, value return value end if @state.match? item.key, item.key_hash, key, key_hash return item.value = value end last = item item = item.link while item if @state.match? item.key, item.key_hash, key, key_hash return item.value = value end last = item item = item.link end last.link = new_bucket key, key_hash, value value end alias_method :store, :[]= # Used internally to get around subclasses redefining #[]= alias_method :__store__, :[]= def ==(other) return true if self.equal? other unless other.kind_of? Hash return false unless other.respond_to? :to_hash return other == self end return false unless other.size == size Thread.detect_recursion self, other do each_item do |item| other_item = other.find_item(item.key) # Other doesn't even have this key return false unless other_item # Order of the comparison matters! We must compare our value with # the other Hash's value and not the other way around. unless Rubinius::Type::object_equal(item.value, other_item.value) or item.value == other_item.value return false end end end true end def <(other) other = Rubinius::Type.coerce_to(other, Hash, :to_hash) other > self end def <=(other) other = Rubinius::Type.coerce_to(other, Hash, :to_hash) other >= self end def >(other) other = Rubinius::Type.coerce_to(other, Hash, :to_hash) return false if size <= other.size self >= other end def >=(other) other = Rubinius::Type.coerce_to(other, Hash, :to_hash) return false if size < other.size other.each do |other_key, other_val| val = fetch(other_key, undefined) return false if undefined.equal?(val) || val != other_val end true end def assoc(key) each_item { |e| return e.key, e.value if key == e.key } end def compare_by_identity Rubinius.check_frozen @state = State.new unless @state @state.compare_by_identity self end def compare_by_identity? return false unless @state @state.compare_by_identity? end def default(key=undefined) if @default_proc and !undefined.equal?(key) @default_proc.call(self, key) else @default end end def default_proc @default_proc end # Sets the default proc to be executed on each key lookup def default_proc=(prc) Rubinius.check_frozen unless prc.nil? prc = Rubinius::Type.coerce_to prc, Proc, :to_proc if prc.lambda? and prc.arity != 2 raise TypeError, "default proc must have arity 2" end end @default = nil @default_proc = prc end def delete(key) Rubinius.check_frozen key_hash = Rubinius.privately { key.hash } index = key_index key_hash if item = @entries[index] if item.delete key, key_hash @entries[index] = item.link @size -= 1 return item.value end last = item while item = item.link if item.delete key, key_hash last.link = item.link @size -= 1 return item.value end last = item end end return yield(key) if block_given? end def dig(key, *remaining_keys) item = self[key] return item if remaining_keys.empty? || item.nil? raise TypeError, "#{item.class} does not have #dig method" unless item.respond_to?(:dig) item.dig(*remaining_keys) end def each_item return unless @state item = @state.head while item yield item item = item.next end end def each return to_enum(:each) { size } unless block_given? return unless @state item = @state.head while item yield [item.key, item.value] item = item.next end self end alias_method :each_pair, :each def fetch(key, default=undefined) if item = find_item(key) return item.value end return yield(key) if block_given? return default unless undefined.equal?(default) raise KeyError, "key #{key} not found" end def fetch_values(*keys, &block) keys.map { |key| fetch(key, &block) } end # Searches for an item matching +key+. Returns the item # if found. Otherwise returns +nil+. def find_item(key) key_hash = Rubinius.privately { key.hash } item = @entries[key_index(key_hash)] while item if @state.match? item.key, item.key_hash, key, key_hash return item end item = item.link end end def flatten(level=1) to_a.flatten(level) end def keep_if return to_enum(:keep_if) { size } unless block_given? Rubinius.check_frozen each_item { |e| delete e.key unless yield(e.key, e.value) } self end def initialize(default=undefined, &block) Rubinius.check_frozen if !undefined.equal?(default) and block raise ArgumentError, "Specify a default or a block, not both" end if block @default = nil @default_proc = block elsif !undefined.equal?(default) @default = default @default_proc = nil end self end private :initialize def merge!(other) Rubinius.check_frozen other = Rubinius::Type.coerce_to other, Hash, :to_hash if block_given? other.each_item do |item| key = item.key if key? key __store__ key, yield(key, self[key], item.value) else __store__ key, item.value end end else other.each_item do |item| __store__ item.key, item.value end end self end alias_method :update, :merge! # Returns a new +Bucket+ instance having +key+, +key_hash+, # and +value+. If +key+ is a kind of +String+, +key+ is # duped and frozen. def new_bucket(key, key_hash, value) if key.kind_of?(String) and !key.frozen? and !compare_by_identity? key = key.dup key.freeze end @size += 1 Bucket.new key, key_hash, value, @state end private :new_bucket # Adjusts the hash storage and redistributes the entries among # the new bins. Any Iterator instance will be invalid after a # call to #redistribute. Does not recalculate the cached key_hash # values. See +#rehash+. def redistribute(entries) capacity = @capacity # Rather than using __setup__, initialize the specific values we need to # change so we don't eg overwrite @state. @capacity = capacity * 2 @entries = Entries.new @capacity @mask = @capacity - 1 @max_entries = @max_entries * 2 i = -1 while (i += 1) < capacity next unless old = entries[i] while old old.link = nil if nxt = old.link index = key_index old.key_hash if item = @entries[index] old.link = item end @entries[index] = old old = nxt end end end def rassoc(value) each_item { |e| return e.key, e.value if value == e.value } end def replace(other) Rubinius.check_frozen other = Rubinius::Type.coerce_to other, Hash, :to_hash return self if self.equal? other # Normally this would be a call to __setup__, but that will create a new # unused Tuple that we would wind up replacing anyways. @capacity = other.capacity @entries = Entries.new @capacity @mask = @capacity - 1 @size = 0 @max_entries = other.max_entries @state = State.new @state.compare_by_identity if other.compare_by_identity? other.each_item do |item| __store__ item.key, item.value end @default = other.default @default_proc = other.default_proc self end alias_method :initialize_copy, :replace private :initialize_copy def select return to_enum(:select) { size } unless block_given? selected = Hash.allocate each_item do |item| if yield(item.key, item.value) selected[item.key] = item.value end end selected end def select! return to_enum(:select!) { size } unless block_given? Rubinius.check_frozen return nil if empty? size = @size each_item { |e| delete e.key unless yield(e.key, e.value) } return nil if size == @size self end def shift Rubinius.check_frozen return default(nil) if empty? item = @state.head delete item.key return item.key, item.value end # Sets the underlying data structures. # # @capacity is the maximum number of +@entries+. # @max_entries is the maximum number of entries before redistributing. # @size is the number of pairs, equivalent to <code>hsh.size</code>. # @entrien is the vector of storage for the item chains. def __setup__(capacity=MIN_SIZE, max=MAX_ENTRIES, size=0) @capacity = capacity @mask = capacity - 1 @max_entries = max @size = size @entries = Entries.new capacity @state = State.new end private :__setup__ def to_h if instance_of? Hash self else Hash.allocate.replace(to_hash) end end # Returns an external iterator for the bins. See +Iterator+ def to_iter Iterator.new @state end def eql?(other) # Just like ==, but uses eql? to compare values. return true if self.equal? other unless other.kind_of? Hash return false unless other.respond_to? :to_hash return other.eql?(self) end return false unless other.size == size Thread.detect_recursion self, other do each_item do |item| other_item = other.find_item(item.key) # Other doesn't even have this key return false unless other_item # Order of the comparison matters! We must compare our value with # the other Hash's value and not the other way around. unless Rubinius::Type::object_equal(item.value, other_item.value) or item.value.eql?(other_item.value) return false end end end true end def hash val = size Thread.detect_outermost_recursion self do each_item do |item| val ^= item.key.hash val ^= item.value.hash end end val end def [](key) if item = find_item(key) item.value else default key end end def clear Rubinius.check_frozen __setup__ self end def default=(value) @default_proc = nil @default = value end def delete_if(&block) return to_enum(:delete_if) { size } unless block_given? Rubinius.check_frozen select(&block).each { |k, v| delete k } self end def each_key return to_enum(:each_key) { size } unless block_given? each_item { |item| yield item.key } self end def each_value return to_enum(:each_value) { size } unless block_given? each_item { |item| yield item.value } self end # Returns true if there are no entries. def empty? @size == 0 end def index(value) each_item do |item| return item.key if item.value == value end end alias_method :key, :index def inspect out = [] return '{...}' if Thread.detect_recursion self do each_item do |item| str = item.key.inspect str << '=>' str << item.value.inspect out << str end end ret = "{#{out.join ', '}}" Rubinius::Type.infect(ret, self) unless empty? ret end alias_method :to_s, :inspect def invert inverted = {} each_item do |item| inverted[item.value] = item.key end inverted end def key?(key) find_item(key) != nil end alias_method :has_key?, :key? alias_method :include?, :key? alias_method :member?, :key? # Calculates the +@entries+ slot given a key_hash value. def key_index(key_hash) key_hash & @mask end private :key_index def keys ary = [] each_item do |item| ary << item.key end ary end def merge(other, &block) dup.merge!(other, &block) end # Recalculates the cached key_hash values and reorders the entries # into a new +@entries+ vector. Does NOT change the size of the # hash. See +#redistribute+. def rehash capacity = @capacity entries = @entries @entries = Entries.new @capacity i = -1 while (i += 1) < capacity next unless old = entries[i] while old old.link = nil if nxt = old.link index = key_index(old.key_hash = old.key.hash) if item = @entries[index] old.link = item end @entries[index] = old old = nxt end end self end def reject(&block) return to_enum(:reject) { size } unless block_given? hsh = dup.delete_if(&block) hsh.taint if tainted? hsh end def reject!(&block) return to_enum(:reject!) { size } unless block_given? Rubinius.check_frozen unless empty? size = @size delete_if(&block) return self if size != @size end nil end def sort(&block) to_a.sort(&block) end def to_a ary = [] each_item do |item| ary << [item.key, item.value] end Rubinius::Type.infect ary, self ary end def to_hash self end def to_proc method(:[]).to_proc end def value?(value) each_item do |item| return true if item.value == value end false end alias_method :has_value?, :value? def values ary = [] each_item do |item| ary << item.value end ary end def values_at(*args) args.map do |key| if item = find_item(key) item.value else default key end end end alias_method :indices, :values_at alias_method :indexes, :values_at end end
## # Namespace for coercion functions between various ruby objects. module Rubinius module Type ## # Returns an object of given class. If given object already is one, it is # returned. Otherwise tries obj.meth and returns the result if it is of the # right kind. TypeErrors are raised if the conversion method fails or the # conversion result is wrong. # # Uses Rubinius::Type.object_kind_of to bypass type check overrides. # # Equivalent to MRI's rb_convert_type(). def self.coerce_to(obj, cls, meth) return obj if object_kind_of?(obj, cls) begin ret = obj.__send__(meth) rescue Exception => orig raise TypeError, "Coercion error: #{obj.inspect}.#{meth} => #{cls} failed", orig end return ret if object_kind_of?(ret, cls) msg = "Coercion error: obj.#{meth} did NOT return a #{cls} (was #{object_class(ret)})" raise TypeError, msg end ## # Same as coerce_to but returns nil if conversion fails. # Corresponds to MRI's rb_check_convert_type() # def self.try_convert(obj, cls, meth) return obj if object_kind_of?(obj, cls) return nil unless object_respond_to?(obj, meth) begin ret = obj.__send__(meth) rescue Exception return nil end return ret if ret.nil? || object_kind_of?(ret, cls) msg = "Coercion error: obj.#{meth} did NOT return a #{cls} (was #{object_class(ret)})" raise TypeError, msg end def self.coerce_to_symbol(obj) if object_kind_of?(obj, Fixnum) raise ArgumentError, "Fixnums (#{obj}) cannot be used as symbols" end obj = obj.to_str if object_respond_to?(obj, :to_str) coerce_to(obj, Symbol, :to_sym) end def self.coerce_to_comparison(a, b) unless cmp = (a <=> b) raise ArgumentError, "comparison of #{a.inspect} with #{b.inspect} failed" end cmp end # Maps to rb_num2long in MRI def self.num2long(obj) if obj == nil raise TypeError, "no implicit conversion from nil to integer" else Integer(obj) end end def self.each_ancestor(mod) unless object_kind_of?(mod, Class) and singleton_class_object(mod) yield mod end sup = mod.direct_superclass() while sup if object_kind_of?(sup, IncludedModule) yield sup.module elsif object_kind_of?(sup, Class) yield sup unless singleton_class_object(sup) else yield sup end sup = sup.direct_superclass() end end end end Use object's #respond_to? in Type.try_convert. The Type predicates are used to protect critical parts of the Rubinius kernel from careless or stupid user code. In this case, since the (ridiculous) pattern of overriding both #respond_to? and #method_missing is somewhat engrained, we permit the potentially overridden #respond_to? to be called. If the user code behaves inconsistently, that's the user's problem. ## # Namespace for coercion functions between various ruby objects. module Rubinius module Type ## # Returns an object of given class. If given object already is one, it is # returned. Otherwise tries obj.meth and returns the result if it is of the # right kind. TypeErrors are raised if the conversion method fails or the # conversion result is wrong. # # Uses Rubinius::Type.object_kind_of to bypass type check overrides. # # Equivalent to MRI's rb_convert_type(). def self.coerce_to(obj, cls, meth) return obj if object_kind_of?(obj, cls) begin ret = obj.__send__(meth) rescue Exception => orig raise TypeError, "Coercion error: #{obj.inspect}.#{meth} => #{cls} failed", orig end return ret if object_kind_of?(ret, cls) msg = "Coercion error: obj.#{meth} did NOT return a #{cls} (was #{object_class(ret)})" raise TypeError, msg end ## # Same as coerce_to but returns nil if conversion fails. # Corresponds to MRI's rb_check_convert_type() # def self.try_convert(obj, cls, meth) return obj if object_kind_of?(obj, cls) return nil unless obj.respond_to?(meth) begin ret = obj.__send__(meth) rescue Exception return nil end return ret if ret.nil? || object_kind_of?(ret, cls) msg = "Coercion error: obj.#{meth} did NOT return a #{cls} (was #{object_class(ret)})" raise TypeError, msg end def self.coerce_to_symbol(obj) if object_kind_of?(obj, Fixnum) raise ArgumentError, "Fixnums (#{obj}) cannot be used as symbols" end obj = obj.to_str if obj.respond_to?(:to_str) coerce_to(obj, Symbol, :to_sym) end def self.coerce_to_comparison(a, b) unless cmp = (a <=> b) raise ArgumentError, "comparison of #{a.inspect} with #{b.inspect} failed" end cmp end # Maps to rb_num2long in MRI def self.num2long(obj) if obj == nil raise TypeError, "no implicit conversion from nil to integer" else Integer(obj) end end def self.each_ancestor(mod) unless object_kind_of?(mod, Class) and singleton_class_object(mod) yield mod end sup = mod.direct_superclass() while sup if object_kind_of?(sup, IncludedModule) yield sup.module elsif object_kind_of?(sup, Class) yield sup unless singleton_class_object(sup) else yield sup end sup = sup.direct_superclass() end end end end
require 'pathname' require 'CLIntegracon/diff' require 'CLIntegracon/formatter' module CLIntegracon class FileTreeSpec # @return [FileTreeSpecContext] # The context, which configures path and file behaviors attr_reader :context # @return [String] # The concrete spec folder attr_reader :spec_folder # @return [Pathname] # The concrete spec path def spec_path context.spec_path + spec_folder end # @return [Pathname] # The concrete before directory for this spec def before_path spec_path + context.before_dir end # @return [Pathname] # The concrete after directory for this spec def after_path spec_path + context.after_dir end # @return [Pathname] # The concrete temp directory for this spec def temp_path context.temp_path + spec_folder end # Init a spec with a given context # # @param [FileTreeSpecContext] context # The context, which configures path and file behaviors # # @param [String] spec_folder # The concrete spec folder # def initialize(context, spec_folder) @context = context @spec_folder = spec_folder end # Run this spec # # @param [Block<(FileTreeSpec)->()>] block # The block, which will be executed after chdir into the created temporary # directory. In this block you will likely run your modifications to the # file system and use the received FileTreeSpec instance to make asserts # with the test framework of your choice. # def run(&block) prepare! copy_files! Dir.chdir(temp_path) do block.call self end end # Compares the expected and produced directory by using the rules # defined in the context # # @param [Block<(Diff)->()>] diff_block # The block, where you will likely define a test for each file to compare. # It will receive a Diff of each of the expected and produced files. # def compare(&diff_block) transform_paths! glob_all(after_path).each do |relative_path| expected = after_path + relative_path next unless expected.file? block = special_behavior_for_path relative_path next if block == context.class.nop diff = diff_files(expected, relative_path) diff.preparator = block unless block.nil? diff_block.call diff end end # Compares the expected and produced directory by using the rules # defined in the context for unexpected files. # # This is separate because you probably don't want to define an extra # test case for each file, which wasn't expected at all. So you can # keep your test cases consistent. # # @param [Block<(Array)->()>] diff_block # The block, where you will likely define a test that no unexpected files exists. # It will receive an Array. # def check_unexpected_files(&block) expected_files = glob_all after_path produced_files = glob_all unexpected_files = produced_files - expected_files # Select only files unexpected_files.select! { |path| path.file? } # Filter ignored paths unexpected_files.reject! { |path| special_behavior_for_path(path) == context.class.nop } block.call unexpected_files end # Return a Formatter # # @return [Formatter] # def formatter @formatter ||= Formatter.new(self) end protected # Prepare the temporary directory # def prepare! context.prepare! temp_path.rmtree if temp_path.exist? temp_path.mkdir end # Copies the before subdirectory of the given tests folder in the temporary # directory. # def copy_files! source = before_path destination = temp_path FileUtils.cp_r("#{source}/.", destination) end # Applies the in the context configured transformations. # def transform_paths! context.transform_paths.each do |path, block| Dir.glob(path) do |produced_path| produced = Pathname(produced_path) block.call(produced) end end end # Searches recursively for all files and take care for including hidden files # if this is configured in the context. # # @param [String] path # The relative or absolute path to search in (optional) # # @return [Array<Pathname>] # def glob_all(path=nil) Dir.chdir path || '.' do Dir.glob("**/*", context.include_hidden_files? ? File::FNM_DOTMATCH : 0).sort.map { |path| Pathname(path) } end end # Find the special behavior for a given path # # @return [Block<(Pathname) -> to_s>] # This block takes the Pathname and transforms the file in a better comparable # state. If it returns nil, the file is ignored. # def special_behavior_for_path(path) context.special_paths.each do |key, block| matched = if key.is_a?(Regexp) path.to_s.match(key) else File.fnmatch(key, path) end next unless matched return block end return nil end # Compares two files to check if they are identical and produces a clear diff # to highlight the differences. # # @param [Pathname] expected # The file in the after directory # # @param [Pathname] relative_path # The file in the temp directory # # @return [Diff] # An object holding a diff # def diff_files(expected, relative_path) produced = temp_path + relative_path Diff.new(expected, produced, relative_path) end end end [Refactor] Don't shade method argument with block argument require 'pathname' require 'CLIntegracon/diff' require 'CLIntegracon/formatter' module CLIntegracon class FileTreeSpec # @return [FileTreeSpecContext] # The context, which configures path and file behaviors attr_reader :context # @return [String] # The concrete spec folder attr_reader :spec_folder # @return [Pathname] # The concrete spec path def spec_path context.spec_path + spec_folder end # @return [Pathname] # The concrete before directory for this spec def before_path spec_path + context.before_dir end # @return [Pathname] # The concrete after directory for this spec def after_path spec_path + context.after_dir end # @return [Pathname] # The concrete temp directory for this spec def temp_path context.temp_path + spec_folder end # Init a spec with a given context # # @param [FileTreeSpecContext] context # The context, which configures path and file behaviors # # @param [String] spec_folder # The concrete spec folder # def initialize(context, spec_folder) @context = context @spec_folder = spec_folder end # Run this spec # # @param [Block<(FileTreeSpec)->()>] block # The block, which will be executed after chdir into the created temporary # directory. In this block you will likely run your modifications to the # file system and use the received FileTreeSpec instance to make asserts # with the test framework of your choice. # def run(&block) prepare! copy_files! Dir.chdir(temp_path) do block.call self end end # Compares the expected and produced directory by using the rules # defined in the context # # @param [Block<(Diff)->()>] diff_block # The block, where you will likely define a test for each file to compare. # It will receive a Diff of each of the expected and produced files. # def compare(&diff_block) transform_paths! glob_all(after_path).each do |relative_path| expected = after_path + relative_path next unless expected.file? block = special_behavior_for_path relative_path next if block == context.class.nop diff = diff_files(expected, relative_path) diff.preparator = block unless block.nil? diff_block.call diff end end # Compares the expected and produced directory by using the rules # defined in the context for unexpected files. # # This is separate because you probably don't want to define an extra # test case for each file, which wasn't expected at all. So you can # keep your test cases consistent. # # @param [Block<(Array)->()>] diff_block # The block, where you will likely define a test that no unexpected files exists. # It will receive an Array. # def check_unexpected_files(&block) expected_files = glob_all after_path produced_files = glob_all unexpected_files = produced_files - expected_files # Select only files unexpected_files.select! { |path| path.file? } # Filter ignored paths unexpected_files.reject! { |path| special_behavior_for_path(path) == context.class.nop } block.call unexpected_files end # Return a Formatter # # @return [Formatter] # def formatter @formatter ||= Formatter.new(self) end protected # Prepare the temporary directory # def prepare! context.prepare! temp_path.rmtree if temp_path.exist? temp_path.mkdir end # Copies the before subdirectory of the given tests folder in the temporary # directory. # def copy_files! source = before_path destination = temp_path FileUtils.cp_r("#{source}/.", destination) end # Applies the in the context configured transformations. # def transform_paths! context.transform_paths.each do |path, block| Dir.glob(path) do |produced_path| produced = Pathname(produced_path) block.call(produced) end end end # Searches recursively for all files and take care for including hidden files # if this is configured in the context. # # @param [String] path # The relative or absolute path to search in (optional) # # @return [Array<Pathname>] # def glob_all(path=nil) Dir.chdir path || '.' do Dir.glob("**/*", context.include_hidden_files? ? File::FNM_DOTMATCH : 0).sort.map do |p| Pathname(p) end end end # Find the special behavior for a given path # # @return [Block<(Pathname) -> to_s>] # This block takes the Pathname and transforms the file in a better comparable # state. If it returns nil, the file is ignored. # def special_behavior_for_path(path) context.special_paths.each do |key, block| matched = if key.is_a?(Regexp) path.to_s.match(key) else File.fnmatch(key, path) end next unless matched return block end return nil end # Compares two files to check if they are identical and produces a clear diff # to highlight the differences. # # @param [Pathname] expected # The file in the after directory # # @param [Pathname] relative_path # The file in the temp directory # # @return [Diff] # An object holding a diff # def diff_files(expected, relative_path) produced = temp_path + relative_path Diff.new(expected, produced, relative_path) end end end
require 'abstract_unit' require 'controller/fake_models' require 'active_support/core_ext/object/inclusion' class FormHelperTest < ActionView::TestCase include RenderERBUtils tests ActionView::Helpers::FormHelper def form_for(*) @output_buffer = super end def setup super # Create "label" locale for testing I18n label helpers I18n.backend.store_translations 'label', { :activemodel => { :attributes => { :post => { :cost => "Total cost" } } }, :helpers => { :label => { :post => { :body => "Write entire text here", :color => { :red => "Rojo" }, :comments => { :body => "Write body here" } }, :tag => { :value => "Tag" } } } } # Create "submit" locale for testing I18n submit helpers I18n.backend.store_translations 'submit', { :helpers => { :submit => { :create => 'Create %{model}', :update => 'Confirm %{model} changes', :submit => 'Save changes', :another_post => { :update => 'Update your %{model}' } } } } @post = Post.new @comment = Comment.new def @post.errors() Class.new{ def [](field); field == "author_name" ? ["can't be empty"] : [] end def empty?() false end def count() 1 end def full_messages() [ "Author name can't be empty" ] end }.new end def @post.id; 123; end def @post.id_before_type_cast; 123; end def @post.to_param; '123'; end @post.persisted = true @post.title = "Hello World" @post.author_name = "" @post.body = "Back to the hill and over it again!" @post.secret = 1 @post.written_on = Date.new(2004, 6, 15) @post.comments = [] @post.comments << @comment @post.tags = [] @post.tags << Tag.new @blog_post = Blog::Post.new("And his name will be forty and four.", 44) end Routes = ActionDispatch::Routing::RouteSet.new Routes.draw do resources :posts do resources :comments end namespace :admin do resources :posts do resources :comments end end match "/foo", :to => "controller#action" root :to => "main#index" end def _routes Routes end include Routes.url_helpers def url_for(object) @url_for_options = object if object.is_a?(Hash) && object[:use_route].blank? && object[:controller].blank? object.merge!(:controller => "main", :action => "index") end super end def test_label assert_dom_equal('<label for="post_title">Title</label>', label("post", "title")) assert_dom_equal('<label for="post_title">The title goes here</label>', label("post", "title", "The title goes here")) assert_dom_equal( '<label class="title_label" for="post_title">Title</label>', label("post", "title", nil, :class => 'title_label') ) assert_dom_equal('<label for="post_secret">Secret?</label>', label("post", "secret?")) end def test_label_with_symbols assert_dom_equal('<label for="post_title">Title</label>', label(:post, :title)) assert_dom_equal('<label for="post_secret">Secret?</label>', label(:post, :secret?)) end def test_label_with_locales_strings old_locale, I18n.locale = I18n.locale, :label assert_dom_equal('<label for="post_body">Write entire text here</label>', label("post", "body")) ensure I18n.locale = old_locale end def test_label_with_human_attribute_name old_locale, I18n.locale = I18n.locale, :label assert_dom_equal('<label for="post_cost">Total cost</label>', label(:post, :cost)) ensure I18n.locale = old_locale end def test_label_with_locales_symbols old_locale, I18n.locale = I18n.locale, :label assert_dom_equal('<label for="post_body">Write entire text here</label>', label(:post, :body)) ensure I18n.locale = old_locale end def test_label_with_locales_and_options old_locale, I18n.locale = I18n.locale, :label assert_dom_equal('<label for="post_body" class="post_body">Write entire text here</label>', label(:post, :body, :class => 'post_body')) ensure I18n.locale = old_locale end def test_label_with_locales_and_value old_locale, I18n.locale = I18n.locale, :label assert_dom_equal('<label for="post_color_red">Rojo</label>', label(:post, :color, :value => "red")) ensure I18n.locale = old_locale end def test_label_with_locales_and_nested_attributes old_locale, I18n.locale = I18n.locale, :label form_for(@post, :html => { :id => 'create-post' }) do |f| f.fields_for(:comments) do |cf| concat cf.label(:body) end end expected = whole_form("/posts/123", "create-post" , "edit_post", :method => "put") do "<label for=\"post_comments_attributes_0_body\">Write body here</label>" end assert_dom_equal expected, output_buffer ensure I18n.locale = old_locale end def test_label_with_locales_fallback_and_nested_attributes old_locale, I18n.locale = I18n.locale, :label form_for(@post, :html => { :id => 'create-post' }) do |f| f.fields_for(:tags) do |cf| concat cf.label(:value) end end expected = whole_form("/posts/123", "create-post" , "edit_post", :method => "put") do "<label for=\"post_tags_attributes_0_value\">Tag</label>" end assert_dom_equal expected, output_buffer ensure I18n.locale = old_locale end def test_label_with_for_attribute_as_symbol assert_dom_equal('<label for="my_for">Title</label>', label(:post, :title, nil, :for => "my_for")) end def test_label_with_for_attribute_as_string assert_dom_equal('<label for="my_for">Title</label>', label(:post, :title, nil, "for" => "my_for")) end def test_label_with_id_attribute_as_symbol assert_dom_equal('<label for="post_title" id="my_id">Title</label>', label(:post, :title, nil, :id => "my_id")) end def test_label_with_id_attribute_as_string assert_dom_equal('<label for="post_title" id="my_id">Title</label>', label(:post, :title, nil, "id" => "my_id")) end def test_label_with_for_and_id_attributes_as_symbol assert_dom_equal('<label for="my_for" id="my_id">Title</label>', label(:post, :title, nil, :for => "my_for", :id => "my_id")) end def test_label_with_for_and_id_attributes_as_string assert_dom_equal('<label for="my_for" id="my_id">Title</label>', label(:post, :title, nil, "for" => "my_for", "id" => "my_id")) end def test_label_for_radio_buttons_with_value assert_dom_equal('<label for="post_title_great_title">The title goes here</label>', label("post", "title", "The title goes here", :value => "great_title")) assert_dom_equal('<label for="post_title_great_title">The title goes here</label>', label("post", "title", "The title goes here", :value => "great title")) end def test_label_with_block assert_dom_equal('<label for="post_title">The title, please:</label>', label(:post, :title) { "The title, please:" }) end def test_label_with_block_in_erb assert_equal "<label for=\"post_message\">\n Message\n <input id=\"post_message\" name=\"post[message]\" size=\"30\" type=\"text\" />\n</label>", view.render("test/label_with_block") end def test_text_field assert_dom_equal( '<input id="post_title" name="post[title]" size="30" type="text" value="Hello World" />', text_field("post", "title") ) assert_dom_equal( '<input id="post_title" name="post[title]" size="30" type="password" />', password_field("post", "title") ) assert_dom_equal( '<input id="post_title" name="post[title]" size="30" type="password" value="Hello World" />', password_field("post", "title", :value => @post.title) ) assert_dom_equal( '<input id="person_name" name="person[name]" size="30" type="password" />', password_field("person", "name") ) end def test_text_field_with_escapes @post.title = "<b>Hello World</b>" assert_dom_equal( '<input id="post_title" name="post[title]" size="30" type="text" value="&lt;b&gt;Hello World&lt;/b&gt;" />', text_field("post", "title") ) end def test_text_field_with_html_entities @post.title = "The HTML Entity for & is &amp;" assert_dom_equal( '<input id="post_title" name="post[title]" size="30" type="text" value="The HTML Entity for &amp; is &amp;amp;" />', text_field("post", "title") ) end def test_text_field_with_options expected = '<input id="post_title" name="post[title]" size="35" type="text" value="Hello World" />' assert_dom_equal expected, text_field("post", "title", "size" => 35) assert_dom_equal expected, text_field("post", "title", :size => 35) end def test_text_field_assuming_size expected = '<input id="post_title" maxlength="35" name="post[title]" size="35" type="text" value="Hello World" />' assert_dom_equal expected, text_field("post", "title", "maxlength" => 35) assert_dom_equal expected, text_field("post", "title", :maxlength => 35) end def test_text_field_removing_size expected = '<input id="post_title" maxlength="35" name="post[title]" type="text" value="Hello World" />' assert_dom_equal expected, text_field("post", "title", "maxlength" => 35, "size" => nil) assert_dom_equal expected, text_field("post", "title", :maxlength => 35, :size => nil) end def test_text_field_with_nil_value expected = '<input id="post_title" name="post[title]" size="30" type="text" />' assert_dom_equal expected, text_field("post", "title", :value => nil) end def test_text_field_doesnt_change_param_values object_name = 'post[]' expected = '<input id="post_123_title" name="post[123][title]" size="30" type="text" value="Hello World" />' assert_equal expected, text_field(object_name, "title") assert_equal object_name, "post[]" end def test_file_field_has_no_size expected = '<input id="user_avatar" name="user[avatar]" type="file" />' assert_dom_equal expected, file_field("user", "avatar") end def test_hidden_field assert_dom_equal '<input id="post_title" name="post[title]" type="hidden" value="Hello World" />', hidden_field("post", "title") assert_dom_equal '<input id="post_secret" name="post[secret]" type="hidden" value="1" />', hidden_field("post", "secret?") end def test_hidden_field_with_escapes @post.title = "<b>Hello World</b>" assert_dom_equal '<input id="post_title" name="post[title]" type="hidden" value="&lt;b&gt;Hello World&lt;/b&gt;" />', hidden_field("post", "title") end def test_hidden_field_with_nil_value expected = '<input id="post_title" name="post[title]" type="hidden" />' assert_dom_equal expected, hidden_field("post", "title", :value => nil) end def test_hidden_field_with_options assert_dom_equal '<input id="post_title" name="post[title]" type="hidden" value="Something Else" />', hidden_field("post", "title", :value => "Something Else") end def test_text_field_with_custom_type assert_dom_equal '<input id="user_email" size="30" name="user[email]" type="email" />', text_field("user", "email", :type => "email") end def test_check_box assert check_box("post", "secret").html_safe? assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret") ) @post.secret = 0 assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret") ) assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret" ,{"checked"=>"checked"}) ) @post.secret = true assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret") ) assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret?") ) @post.secret = ['0'] assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret") ) @post.secret = ['1'] assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret") ) end def test_check_box_with_explicit_checked_and_unchecked_values @post.secret = "on" assert_dom_equal( '<input name="post[secret]" type="hidden" value="off" /><input checked="checked" id="post_secret" name="post[secret]" type="checkbox" value="on" />', check_box("post", "secret", {}, "on", "off") ) end def test_check_box_with_multiple_behavior @post.comment_ids = [2,3] assert_dom_equal( '<input name="post[comment_ids][]" type="hidden" value="0" /><input id="post_comment_ids_1" name="post[comment_ids][]" type="checkbox" value="1" />', check_box("post", "comment_ids", { :multiple => true }, 1) ) assert_dom_equal( '<input name="post[comment_ids][]" type="hidden" value="0" /><input checked="checked" id="post_comment_ids_3" name="post[comment_ids][]" type="checkbox" value="3" />', check_box("post", "comment_ids", { :multiple => true }, 3) ) end def test_checkbox_disabled_disables_hidden_field assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" disabled="disabled"/><input checked="checked" disabled="disabled" id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret", { :disabled => true }) ) end def test_radio_button assert_dom_equal('<input checked="checked" id="post_title_hello_world" name="post[title]" type="radio" value="Hello World" />', radio_button("post", "title", "Hello World") ) assert_dom_equal('<input id="post_title_goodbye_world" name="post[title]" type="radio" value="Goodbye World" />', radio_button("post", "title", "Goodbye World") ) assert_dom_equal('<input id="item_subobject_title_inside_world" name="item[subobject][title]" type="radio" value="inside world"/>', radio_button("item[subobject]", "title", "inside world") ) end def test_radio_button_is_checked_with_integers assert_dom_equal('<input checked="checked" id="post_secret_1" name="post[secret]" type="radio" value="1" />', radio_button("post", "secret", "1") ) end def test_radio_button_with_negative_integer_value assert_dom_equal('<input id="post_secret_-1" name="post[secret]" type="radio" value="-1" />', radio_button("post", "secret", "-1")) end def test_radio_button_respects_passed_in_id assert_dom_equal('<input checked="checked" id="foo" name="post[secret]" type="radio" value="1" />', radio_button("post", "secret", "1", :id=>"foo") ) end def test_radio_button_with_booleans assert_dom_equal('<input id="post_secret_true" name="post[secret]" type="radio" value="true" />', radio_button("post", "secret", true) ) assert_dom_equal('<input id="post_secret_false" name="post[secret]" type="radio" value="false" />', radio_button("post", "secret", false) ) end def test_text_area assert_dom_equal( '<textarea cols="40" id="post_body" name="post[body]" rows="20">Back to the hill and over it again!</textarea>', text_area("post", "body") ) end def test_text_area_with_escapes @post.body = "Back to <i>the</i> hill and over it again!" assert_dom_equal( '<textarea cols="40" id="post_body" name="post[body]" rows="20">Back to &lt;i&gt;the&lt;/i&gt; hill and over it again!</textarea>', text_area("post", "body") ) end def test_text_area_with_alternate_value assert_dom_equal( '<textarea cols="40" id="post_body" name="post[body]" rows="20">Testing alternate values.</textarea>', text_area("post", "body", :value => 'Testing alternate values.') ) end def test_text_area_with_html_entities @post.body = "The HTML Entity for & is &amp;" assert_dom_equal( '<textarea cols="40" id="post_body" name="post[body]" rows="20">The HTML Entity for &amp; is &amp;amp;</textarea>', text_area("post", "body") ) end def test_text_area_with_size_option assert_dom_equal( '<textarea cols="183" id="post_body" name="post[body]" rows="820">Back to the hill and over it again!</textarea>', text_area("post", "body", :size => "183x820") ) end def test_search_field expected = %{<input id="contact_notes_query" size="30" name="contact[notes_query]" type="search" />} assert_dom_equal(expected, search_field("contact", "notes_query")) end def test_telephone_field expected = %{<input id="user_cell" size="30" name="user[cell]" type="tel" />} assert_dom_equal(expected, telephone_field("user", "cell")) end def test_url_field expected = %{<input id="user_homepage" size="30" name="user[homepage]" type="url" />} assert_dom_equal(expected, url_field("user", "homepage")) end def test_email_field expected = %{<input id="user_address" size="30" name="user[address]" type="email" />} assert_dom_equal(expected, email_field("user", "address")) end def test_number_field expected = %{<input name="order[quantity]" max="9" id="order_quantity" type="number" min="1" />} assert_dom_equal(expected, number_field("order", "quantity", :in => 1...10)) expected = %{<input name="order[quantity]" size="30" max="9" id="order_quantity" type="number" min="1" />} assert_dom_equal(expected, number_field("order", "quantity", :size => 30, :in => 1...10)) end def test_range_input expected = %{<input name="hifi[volume]" step="0.1" max="11" id="hifi_volume" type="range" min="0" />} assert_dom_equal(expected, range_field("hifi", "volume", :in => 0..11, :step => 0.1)) expected = %{<input name="hifi[volume]" step="0.1" size="30" max="11" id="hifi_volume" type="range" min="0" />} assert_dom_equal(expected, range_field("hifi", "volume", :size => 30, :in => 0..11, :step => 0.1)) end def test_explicit_name assert_dom_equal( '<input id="post_title" name="dont guess" size="30" type="text" value="Hello World" />', text_field("post", "title", "name" => "dont guess") ) assert_dom_equal( '<textarea cols="40" id="post_body" name="really!" rows="20">Back to the hill and over it again!</textarea>', text_area("post", "body", "name" => "really!") ) assert_dom_equal( '<input name="i mean it" type="hidden" value="0" /><input checked="checked" id="post_secret" name="i mean it" type="checkbox" value="1" />', check_box("post", "secret", "name" => "i mean it") ) assert_dom_equal text_field("post", "title", "name" => "dont guess"), text_field("post", "title", :name => "dont guess") assert_dom_equal text_area("post", "body", "name" => "really!"), text_area("post", "body", :name => "really!") assert_dom_equal check_box("post", "secret", "name" => "i mean it"), check_box("post", "secret", :name => "i mean it") end def test_explicit_id assert_dom_equal( '<input id="dont guess" name="post[title]" size="30" type="text" value="Hello World" />', text_field("post", "title", "id" => "dont guess") ) assert_dom_equal( '<textarea cols="40" id="really!" name="post[body]" rows="20">Back to the hill and over it again!</textarea>', text_area("post", "body", "id" => "really!") ) assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" id="i mean it" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret", "id" => "i mean it") ) assert_dom_equal text_field("post", "title", "id" => "dont guess"), text_field("post", "title", :id => "dont guess") assert_dom_equal text_area("post", "body", "id" => "really!"), text_area("post", "body", :id => "really!") assert_dom_equal check_box("post", "secret", "id" => "i mean it"), check_box("post", "secret", :id => "i mean it") end def test_nil_id assert_dom_equal( '<input name="post[title]" size="30" type="text" value="Hello World" />', text_field("post", "title", "id" => nil) ) assert_dom_equal( '<textarea cols="40" name="post[body]" rows="20">Back to the hill and over it again!</textarea>', text_area("post", "body", "id" => nil) ) assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret", "id" => nil) ) assert_dom_equal( '<input type="radio" name="post[secret]" value="0" />', radio_button("post", "secret", "0", "id" => nil) ) assert_dom_equal( '<select name="post[secret]"></select>', select("post", "secret", [], {}, "id" => nil) ) assert_dom_equal text_field("post", "title", "id" => nil), text_field("post", "title", :id => nil) assert_dom_equal text_area("post", "body", "id" => nil), text_area("post", "body", :id => nil) assert_dom_equal check_box("post", "secret", "id" => nil), check_box("post", "secret", :id => nil) assert_dom_equal radio_button("post", "secret", "0", "id" => nil), radio_button("post", "secret", "0", :id => nil) end def test_index assert_dom_equal( '<input name="post[5][title]" size="30" id="post_5_title" type="text" value="Hello World" />', text_field("post", "title", "index" => 5) ) assert_dom_equal( '<textarea cols="40" name="post[5][body]" id="post_5_body" rows="20">Back to the hill and over it again!</textarea>', text_area("post", "body", "index" => 5) ) assert_dom_equal( '<input name="post[5][secret]" type="hidden" value="0" /><input checked="checked" name="post[5][secret]" type="checkbox" value="1" id="post_5_secret" />', check_box("post", "secret", "index" => 5) ) assert_dom_equal( text_field("post", "title", "index" => 5), text_field("post", "title", "index" => 5) ) assert_dom_equal( text_area("post", "body", "index" => 5), text_area("post", "body", "index" => 5) ) assert_dom_equal( check_box("post", "secret", "index" => 5), check_box("post", "secret", "index" => 5) ) end def test_index_with_nil_id assert_dom_equal( '<input name="post[5][title]" size="30" type="text" value="Hello World" />', text_field("post", "title", "index" => 5, 'id' => nil) ) assert_dom_equal( '<textarea cols="40" name="post[5][body]" rows="20">Back to the hill and over it again!</textarea>', text_area("post", "body", "index" => 5, 'id' => nil) ) assert_dom_equal( '<input name="post[5][secret]" type="hidden" value="0" /><input checked="checked" name="post[5][secret]" type="checkbox" value="1" />', check_box("post", "secret", "index" => 5, 'id' => nil) ) assert_dom_equal( text_field("post", "title", "index" => 5, 'id' => nil), text_field("post", "title", :index => 5, :id => nil) ) assert_dom_equal( text_area("post", "body", "index" => 5, 'id' => nil), text_area("post", "body", :index => 5, :id => nil) ) assert_dom_equal( check_box("post", "secret", "index" => 5, 'id' => nil), check_box("post", "secret", :index => 5, :id => nil) ) end def test_auto_index pid = @post.id assert_dom_equal( "<label for=\"post_#{pid}_title\">Title</label>", label("post[]", "title") ) assert_dom_equal( "<input id=\"post_#{pid}_title\" name=\"post[#{pid}][title]\" size=\"30\" type=\"text\" value=\"Hello World\" />", text_field("post[]","title") ) assert_dom_equal( "<textarea cols=\"40\" id=\"post_#{pid}_body\" name=\"post[#{pid}][body]\" rows=\"20\">Back to the hill and over it again!</textarea>", text_area("post[]", "body") ) assert_dom_equal( "<input name=\"post[#{pid}][secret]\" type=\"hidden\" value=\"0\" /><input checked=\"checked\" id=\"post_#{pid}_secret\" name=\"post[#{pid}][secret]\" type=\"checkbox\" value=\"1\" />", check_box("post[]", "secret") ) assert_dom_equal( "<input checked=\"checked\" id=\"post_#{pid}_title_hello_world\" name=\"post[#{pid}][title]\" type=\"radio\" value=\"Hello World\" />", radio_button("post[]", "title", "Hello World") ) assert_dom_equal("<input id=\"post_#{pid}_title_goodbye_world\" name=\"post[#{pid}][title]\" type=\"radio\" value=\"Goodbye World\" />", radio_button("post[]", "title", "Goodbye World") ) end def test_auto_index_with_nil_id pid = @post.id assert_dom_equal( "<input name=\"post[#{pid}][title]\" size=\"30\" type=\"text\" value=\"Hello World\" />", text_field("post[]","title", :id => nil) ) assert_dom_equal( "<textarea cols=\"40\" name=\"post[#{pid}][body]\" rows=\"20\">Back to the hill and over it again!</textarea>", text_area("post[]", "body", :id => nil) ) assert_dom_equal( "<input name=\"post[#{pid}][secret]\" type=\"hidden\" value=\"0\" /><input checked=\"checked\" name=\"post[#{pid}][secret]\" type=\"checkbox\" value=\"1\" />", check_box("post[]", "secret", :id => nil) ) assert_dom_equal( "<input checked=\"checked\" name=\"post[#{pid}][title]\" type=\"radio\" value=\"Hello World\" />", radio_button("post[]", "title", "Hello World", :id => nil) ) assert_dom_equal("<input name=\"post[#{pid}][title]\" type=\"radio\" value=\"Goodbye World\" />", radio_button("post[]", "title", "Goodbye World", :id => nil) ) end def test_form_for_requires_block assert_raises(ArgumentError) do form_for(:post, @post, :html => { :id => 'create-post' }) end end def test_form_for form_for(@post, :html => { :id => 'create-post' }) do |f| concat f.label(:title) { "The Title" } concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) concat f.submit('Create post') concat f.button('Create post') end expected = whole_form("/posts/123", "create-post" , "edit_post", :method => "put") do "<label for='post_title'>The Title</label>" + "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" + "<input name='commit' type='submit' value='Create post' />" + "<button name='button' type='submit'>Create post</button>" end assert_dom_equal expected, output_buffer end def test_form_for_with_file_field_generate_multipart Post.send :attr_accessor, :file form_for(@post, :html => { :id => 'create-post' }) do |f| concat f.file_field(:file) end expected = whole_form("/posts/123", "create-post" , "edit_post", :method => "put", :multipart => true) do "<input name='post[file]' type='file' id='post_file' />" end assert_dom_equal expected, output_buffer end def test_fields_for_with_file_field_generate_multipart Comment.send :attr_accessor, :file form_for(@post) do |f| concat f.fields_for(:comment, @post) { |c| concat c.file_field(:file) } end expected = whole_form("/posts/123", "edit_post_123" , "edit_post", :method => "put", :multipart => true) do "<input name='post[comment][file]' type='file' id='post_comment_file' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_format form_for(@post, :format => :json, :html => { :id => "edit_post_123", :class => "edit_post" }) do |f| concat f.label(:title) end expected = whole_form("/posts/123.json", "edit_post_123" , "edit_post", :method => "put") do "<label for='post_title'>Title</label>" end assert_dom_equal expected, output_buffer end def test_form_for_with_model_using_relative_model_naming form_for(@blog_post) do |f| concat f.text_field :title concat f.submit('Edit post') end expected = whole_form("/posts/44", "edit_post_44" , "edit_post", :method => "put") do "<input name='post[title]' size='30' type='text' id='post_title' value='And his name will be forty and four.' />" + "<input name='commit' type='submit' value='Edit post' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_symbol_object_name form_for(@post, :as => "other_name", :html => { :id => 'create-post' }) do |f| concat f.label(:title, :class => 'post_title') concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) concat f.submit('Create post') end expected = whole_form("/posts/123", "create-post", "edit_other_name", :method => "put") do "<label for='other_name_title' class='post_title'>Title</label>" + "<input name='other_name[title]' size='30' id='other_name_title' value='Hello World' type='text' />" + "<textarea name='other_name[body]' id='other_name_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='other_name[secret]' value='0' type='hidden' />" + "<input name='other_name[secret]' checked='checked' id='other_name_secret' value='1' type='checkbox' />" + "<input name='commit' value='Create post' type='submit' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_method_as_part_of_html_options form_for(@post, :url => '/', :html => { :id => 'create-post', :method => :delete }) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form("/", "create-post", "edit_post", "delete") do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_method form_for(@post, :url => '/', :method => :delete, :html => { :id => 'create-post' }) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form("/", "create-post", "edit_post", "delete") do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_search_field # Test case for bug which would emit an "object" attribute # when used with form_for using a search_field form helper form_for(Post.new, :url => "/search", :html => { :id => 'search-post', :method => :get}) do |f| concat f.search_field(:title) end expected = whole_form("/search", "search-post", "new_post", "get") do "<input name='post[title]' size='30' type='search' id='post_title' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_remote form_for(@post, :url => '/', :remote => true, :html => { :id => 'create-post', :method => :put }) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form("/", "create-post", "edit_post", :method => "put", :remote => true) do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_remote_in_html form_for(@post, :url => '/', :html => { :remote => true, :id => 'create-post', :method => :put }) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form("/", "create-post", "edit_post", :method => "put", :remote => true) do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_remote_without_html @post.persisted = false def @post.id; nil; end form_for(@post, :remote => true) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form("/posts", 'new_post', 'new_post', :remote => true) do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_without_object form_for(:post, :html => { :id => 'create-post' }) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form("/", "create-post") do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_index form_for(@post, :as => "post[]") do |f| concat f.label(:title) concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form('/posts/123', 'edit_post[]', 'edit_post[]', 'put') do "<label for='post_123_title'>Title</label>" + "<input name='post[123][title]' size='30' type='text' id='post_123_title' value='Hello World' />" + "<textarea name='post[123][body]' id='post_123_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[123][secret]' type='hidden' value='0' />" + "<input name='post[123][secret]' checked='checked' type='checkbox' id='post_123_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_nil_index_option_override form_for(@post, :as => "post[]", :index => nil) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form('/posts/123', 'edit_post[]', 'edit_post[]', 'put') do "<input name='post[][title]' size='30' type='text' id='post__title' value='Hello World' />" + "<textarea name='post[][body]' id='post__body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[][secret]' type='hidden' value='0' />" + "<input name='post[][secret]' checked='checked' type='checkbox' id='post__secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_namespace form_for(@post, :namespace => 'namespace') do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form('/posts/123', 'namespace_edit_post_123', 'edit_post', 'put') do "<input name='post[title]' size='30' type='text' id='namespace_post_title' value='Hello World' />" + "<textarea name='post[body]' id='namespace_post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='namespace_post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_namespace_with_label form_for(@post, :namespace => 'namespace') do |f| concat f.label(:title) concat f.text_field(:title) end expected = whole_form('/posts/123', 'namespace_edit_post_123', 'edit_post', 'put') do "<label for='namespace_post_title'>Title</label>" + "<input name='post[title]' size='30' type='text' id='namespace_post_title' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_two_form_for_with_namespace form_for(@post, :namespace => 'namespace_1') do |f| concat f.label(:title) concat f.text_field(:title) end expected_1 = whole_form('/posts/123', 'namespace_1_edit_post_123', 'edit_post', 'put') do "<label for='namespace_1_post_title'>Title</label>" + "<input name='post[title]' size='30' type='text' id='namespace_1_post_title' value='Hello World' />" end assert_dom_equal expected_1, output_buffer form_for(@post, :namespace => 'namespace_2') do |f| concat f.label(:title) concat f.text_field(:title) end expected_2 = whole_form('/posts/123', 'namespace_2_edit_post_123', 'edit_post', 'put') do "<label for='namespace_2_post_title'>Title</label>" + "<input name='post[title]' size='30' type='text' id='namespace_2_post_title' value='Hello World' />" end assert_dom_equal expected_2, output_buffer end def test_fields_for_with_namespace @comment.body = 'Hello World' form_for(@post, :namespace => 'namespace') do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.fields_for(@comment) { |c| concat c.text_field(:body) } end expected = whole_form('/posts/123', 'namespace_edit_post_123', 'edit_post', 'put') do "<input name='post[title]' size='30' type='text' id='namespace_post_title' value='Hello World' />" + "<textarea name='post[body]' id='namespace_post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[comment][body]' size='30' type='text' id='namespace_post_comment_body' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_submit_with_object_as_new_record_and_locale_strings old_locale, I18n.locale = I18n.locale, :submit @post.persisted = false def @post.id; nil; end form_for(@post) do |f| concat f.submit end expected = whole_form('/posts', 'new_post', 'new_post') do "<input name='commit' type='submit' value='Create Post' />" end assert_dom_equal expected, output_buffer ensure I18n.locale = old_locale end def test_submit_with_object_as_existing_record_and_locale_strings old_locale, I18n.locale = I18n.locale, :submit form_for(@post) do |f| concat f.submit end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do "<input name='commit' type='submit' value='Confirm Post changes' />" end assert_dom_equal expected, output_buffer ensure I18n.locale = old_locale end def test_submit_without_object_and_locale_strings old_locale, I18n.locale = I18n.locale, :submit form_for(:post) do |f| concat f.submit :class => "extra" end expected = whole_form do "<input name='commit' class='extra' type='submit' value='Save changes' />" end assert_dom_equal expected, output_buffer ensure I18n.locale = old_locale end def test_submit_with_object_and_nested_lookup old_locale, I18n.locale = I18n.locale, :submit form_for(@post, :as => :another_post) do |f| concat f.submit end expected = whole_form('/posts/123', 'edit_another_post', 'edit_another_post', :method => 'put') do "<input name='commit' type='submit' value='Update your Post' />" end assert_dom_equal expected, output_buffer ensure I18n.locale = old_locale end def test_nested_fields_for @comment.body = 'Hello World' form_for(@post) do |f| concat f.fields_for(@comment) { |c| concat c.text_field(:body) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do "<input name='post[comment][body]' size='30' type='text' id='post_comment_body' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_nested_collections form_for(@post, :as => 'post[]') do |f| concat f.text_field(:title) concat f.fields_for('comment[]', @comment) { |c| concat c.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post[]', 'edit_post[]', 'put') do "<input name='post[123][title]' size='30' type='text' id='post_123_title' value='Hello World' />" + "<input name='post[123][comment][][name]' size='30' type='text' id='post_123_comment__name' value='new comment' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_index_and_parent_fields form_for(@post, :index => 1) do |c| concat c.text_field(:title) concat c.fields_for('comment', @comment, :index => 1) { |r| concat r.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', 'put') do "<input name='post[1][title]' size='30' type='text' id='post_1_title' value='Hello World' />" + "<input name='post[1][comment][1][name]' size='30' type='text' id='post_1_comment_1_name' value='new comment' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_index_and_nested_fields_for output_buffer = form_for(@post, :index => 1) do |f| concat f.fields_for(:comment, @post) { |c| concat c.text_field(:title) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', 'put') do "<input name='post[1][comment][title]' size='30' type='text' id='post_1_comment_title' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_index_on_both form_for(@post, :index => 1) do |f| concat f.fields_for(:comment, @post, :index => 5) { |c| concat c.text_field(:title) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', 'put') do "<input name='post[1][comment][5][title]' size='30' type='text' id='post_1_comment_5_title' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_auto_index form_for(@post, :as => "post[]") do |f| concat f.fields_for(:comment, @post) { |c| concat c.text_field(:title) } end expected = whole_form('/posts/123', 'edit_post[]', 'edit_post[]', 'put') do "<input name='post[123][comment][title]' size='30' type='text' id='post_123_comment_title' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_index_radio_button form_for(@post) do |f| concat f.fields_for(:comment, @post, :index => 5) { |c| concat c.radio_button(:title, "hello") } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', 'put') do "<input name='post[comment][5][title]' type='radio' id='post_comment_5_title_hello' value='hello' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_auto_index_on_both form_for(@post, :as => "post[]") do |f| concat f.fields_for("comment[]", @post) { |c| concat c.text_field(:title) } end expected = whole_form('/posts/123', 'edit_post[]', 'edit_post[]', 'put') do "<input name='post[123][comment][123][title]' size='30' type='text' id='post_123_comment_123_title' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_index_and_auto_index output_buffer = form_for(@post, :as => "post[]") do |f| concat f.fields_for(:comment, @post, :index => 5) { |c| concat c.text_field(:title) } end output_buffer << form_for(@post, :as => :post, :index => 1) do |f| concat f.fields_for("comment[]", @post) { |c| concat c.text_field(:title) } end expected = whole_form('/posts/123', 'edit_post[]', 'edit_post[]', 'put') do "<input name='post[123][comment][5][title]' size='30' type='text' id='post_123_comment_5_title' value='Hello World' />" end + whole_form('/posts/123', 'edit_post', 'edit_post', 'put') do "<input name='post[1][comment][123][title]' size='30' type='text' id='post_1_comment_123_title' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_a_new_record_on_a_nested_attributes_one_to_one_association @post.author = Author.new form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| concat af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="new author" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_explicitly_passed_object_on_a_nested_attributes_one_to_one_association form_for(@post) do |f| f.fields_for(:author, Author.new(123)) do |af| assert_not_nil af.object assert_equal 123, af.object.id end end end def test_nested_fields_for_with_an_existing_record_on_a_nested_attributes_one_to_one_association @post.author = Author.new(321) form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| concat af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' + '<input id="post_author_attributes_id" name="post[author_attributes][id]" type="hidden" value="321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_an_existing_record_on_a_nested_attributes_one_to_one_association_using_erb_and_inline_block @post.author = Author.new(321) form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' + '<input id="post_author_attributes_id" name="post[author_attributes][id]" type="hidden" value="321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_an_existing_record_on_a_nested_attributes_one_to_one_association_with_disabled_hidden_id @post.author = Author.new(321) form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:author, :include_id => false) { |af| af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_an_existing_record_on_a_nested_attributes_one_to_one_association_with_disabled_hidden_id_inherited @post.author = Author.new(321) form_for(@post, :include_id => false) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_an_existing_record_on_a_nested_attributes_one_to_one_association_with_disabled_hidden_id_override @post.author = Author.new(321) form_for(@post, :include_id => false) do |f| concat f.text_field(:title) concat f.fields_for(:author, :include_id => true) { |af| af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' + '<input id="post_author_attributes_id" name="post[author_attributes][id]" type="hidden" value="321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_one_to_one_association_with_explicit_hidden_field_placement @post.author = Author.new(321) form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| concat af.hidden_field(:id) concat af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_id" name="post[author_attributes][id]" type="hidden" value="321" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_collection_association @post.comments = Array.new(2) { |id| Comment.new(id + 1) } form_for(@post) do |f| concat f.text_field(:title) @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' + '<input id="post_comments_attributes_1_id" name="post[comments_attributes][1][id]" type="hidden" value="2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_collection_association_with_disabled_hidden_id @post.comments = Array.new(2) { |id| Comment.new(id + 1) } @post.author = Author.new(321) form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| concat af.text_field(:name) } @post.comments.each do |comment| concat f.fields_for(:comments, comment, :include_id => false) { |cf| concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' + '<input id="post_author_attributes_id" name="post[author_attributes][id]" type="hidden" value="321" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_collection_association_with_disabled_hidden_id_inherited @post.comments = Array.new(2) { |id| Comment.new(id + 1) } @post.author = Author.new(321) form_for(@post, :include_id => false) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| concat af.text_field(:name) } @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_collection_association_with_disabled_hidden_id_override @post.comments = Array.new(2) { |id| Comment.new(id + 1) } @post.author = Author.new(321) form_for(@post, :include_id => false) do |f| concat f.text_field(:title) concat f.fields_for(:author, :include_id => true) { |af| concat af.text_field(:name) } @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' + '<input id="post_author_attributes_id" name="post[author_attributes][id]" type="hidden" value="321" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_collection_association_using_erb_and_inline_block @post.comments = Array.new(2) { |id| Comment.new(id + 1) } form_for(@post) do |f| concat f.text_field(:title) @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' + '<input id="post_comments_attributes_1_id" name="post[comments_attributes][1][id]" type="hidden" value="2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_collection_association_with_explicit_hidden_field_placement @post.comments = Array.new(2) { |id| Comment.new(id + 1) } form_for(@post) do |f| concat f.text_field(:title) @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| concat cf.hidden_field(:id) concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="1" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_1_id" name="post[comments_attributes][1][id]" type="hidden" value="2" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_new_records_on_a_nested_attributes_collection_association @post.comments = [Comment.new, Comment.new] form_for(@post) do |f| concat f.text_field(:title) @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="new comment" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="new comment" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_and_new_records_on_a_nested_attributes_collection_association @post.comments = [Comment.new(321), Comment.new] form_for(@post) do |f| concat f.text_field(:title) @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #321" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="321" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="new comment" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_an_empty_supplied_attributes_collection form_for(@post) do |f| concat f.text_field(:title) f.fields_for(:comments, []) do |cf| concat cf.text_field(:name) end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_supplied_nested_attributes_collection @post.comments = Array.new(2) { |id| Comment.new(id + 1) } form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:comments, @post.comments) { |cf| concat cf.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' + '<input id="post_comments_attributes_1_id" name="post[comments_attributes][1][id]" type="hidden" value="2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_arel_like @post.comments = ArelLike.new form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:comments, @post.comments) { |cf| concat cf.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' + '<input id="post_comments_attributes_1_id" name="post[comments_attributes][1][id]" type="hidden" value="2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_supplied_nested_attributes_collection_different_from_record_one comments = Array.new(2) { |id| Comment.new(id + 1) } @post.comments = [] form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:comments, comments) { |cf| concat cf.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' + '<input id="post_comments_attributes_1_id" name="post[comments_attributes][1][id]" type="hidden" value="2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_on_a_nested_attributes_collection_association_yields_only_builder @post.comments = [Comment.new(321), Comment.new] yielded_comments = [] form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:comments) { |cf| concat cf.text_field(:name) yielded_comments << cf.object } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #321" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="321" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="new comment" />' end assert_dom_equal expected, output_buffer assert_equal yielded_comments, @post.comments end def test_nested_fields_for_with_child_index_option_override_on_a_nested_attributes_collection_association @post.comments = [] form_for(@post) do |f| concat f.fields_for(:comments, Comment.new(321), :child_index => 'abc') { |cf| concat cf.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input id="post_comments_attributes_abc_name" name="post[comments_attributes][abc][name]" size="30" type="text" value="comment #321" />' + '<input id="post_comments_attributes_abc_id" name="post[comments_attributes][abc][id]" type="hidden" value="321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_uses_unique_indices_for_different_collection_associations @post.comments = [Comment.new(321)] @post.tags = [Tag.new(123), Tag.new(456)] @post.comments[0].relevances = [] @post.tags[0].relevances = [] @post.tags[1].relevances = [] form_for(@post) do |f| concat f.fields_for(:comments, @post.comments[0]) { |cf| concat cf.text_field(:name) concat cf.fields_for(:relevances, CommentRelevance.new(314)) { |crf| concat crf.text_field(:value) } } concat f.fields_for(:tags, @post.tags[0]) { |tf| concat tf.text_field(:value) concat tf.fields_for(:relevances, TagRelevance.new(3141)) { |trf| concat trf.text_field(:value) } } concat f.fields_for('tags', @post.tags[1]) { |tf| concat tf.text_field(:value) concat tf.fields_for(:relevances, TagRelevance.new(31415)) { |trf| concat trf.text_field(:value) } } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #321" />' + '<input id="post_comments_attributes_0_relevances_attributes_0_value" name="post[comments_attributes][0][relevances_attributes][0][value]" size="30" type="text" value="commentrelevance #314" />' + '<input id="post_comments_attributes_0_relevances_attributes_0_id" name="post[comments_attributes][0][relevances_attributes][0][id]" type="hidden" value="314" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="321" />' + '<input id="post_tags_attributes_0_value" name="post[tags_attributes][0][value]" size="30" type="text" value="tag #123" />' + '<input id="post_tags_attributes_0_relevances_attributes_0_value" name="post[tags_attributes][0][relevances_attributes][0][value]" size="30" type="text" value="tagrelevance #3141" />' + '<input id="post_tags_attributes_0_relevances_attributes_0_id" name="post[tags_attributes][0][relevances_attributes][0][id]" type="hidden" value="3141" />' + '<input id="post_tags_attributes_0_id" name="post[tags_attributes][0][id]" type="hidden" value="123" />' + '<input id="post_tags_attributes_1_value" name="post[tags_attributes][1][value]" size="30" type="text" value="tag #456" />' + '<input id="post_tags_attributes_1_relevances_attributes_0_value" name="post[tags_attributes][1][relevances_attributes][0][value]" size="30" type="text" value="tagrelevance #31415" />' + '<input id="post_tags_attributes_1_relevances_attributes_0_id" name="post[tags_attributes][1][relevances_attributes][0][id]" type="hidden" value="31415" />' + '<input id="post_tags_attributes_1_id" name="post[tags_attributes][1][id]" type="hidden" value="456" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_hash_like_model @author = HashBackedAuthor.new form_for(@post) do |f| concat f.fields_for(:author, @author) { |af| concat af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="hash backed author" />' end assert_dom_equal expected, output_buffer end def test_fields_for output_buffer = fields_for(:post, @post) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" assert_dom_equal expected, output_buffer end def test_fields_for_with_index output_buffer = fields_for("post[]", @post) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<input name='post[123][title]' size='30' type='text' id='post_123_title' value='Hello World' />" + "<textarea name='post[123][body]' id='post_123_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[123][secret]' type='hidden' value='0' />" + "<input name='post[123][secret]' checked='checked' type='checkbox' id='post_123_secret' value='1' />" assert_dom_equal expected, output_buffer end def test_fields_for_with_nil_index_option_override output_buffer = fields_for("post[]", @post, :index => nil) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<input name='post[][title]' size='30' type='text' id='post__title' value='Hello World' />" + "<textarea name='post[][body]' id='post__body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[][secret]' type='hidden' value='0' />" + "<input name='post[][secret]' checked='checked' type='checkbox' id='post__secret' value='1' />" assert_dom_equal expected, output_buffer end def test_fields_for_with_index_option_override output_buffer = fields_for("post[]", @post, :index => "abc") do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<input name='post[abc][title]' size='30' type='text' id='post_abc_title' value='Hello World' />" + "<textarea name='post[abc][body]' id='post_abc_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[abc][secret]' type='hidden' value='0' />" + "<input name='post[abc][secret]' checked='checked' type='checkbox' id='post_abc_secret' value='1' />" assert_dom_equal expected, output_buffer end def test_fields_for_without_object output_buffer = fields_for(:post) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" assert_dom_equal expected, output_buffer end def test_fields_for_with_only_object output_buffer = fields_for(@post) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" assert_dom_equal expected, output_buffer end def test_fields_for_object_with_bracketed_name output_buffer = fields_for("author[post]", @post) do |f| concat f.label(:title) concat f.text_field(:title) end assert_dom_equal "<label for=\"author_post_title\">Title</label>" + "<input name='author[post][title]' size='30' type='text' id='author_post_title' value='Hello World' />", output_buffer end def test_fields_for_object_with_bracketed_name_and_index output_buffer = fields_for("author[post]", @post, :index => 1) do |f| concat f.label(:title) concat f.text_field(:title) end assert_dom_equal "<label for=\"author_post_1_title\">Title</label>" + "<input name='author[post][1][title]' size='30' type='text' id='author_post_1_title' value='Hello World' />", output_buffer end def test_form_builder_does_not_have_form_for_method assert ! ActionView::Helpers::FormBuilder.instance_methods.include?('form_for') end def test_form_for_and_fields_for form_for(@post, :as => :post, :html => { :id => 'create-post' }) do |post_form| concat post_form.text_field(:title) concat post_form.text_area(:body) concat fields_for(:parent_post, @post) { |parent_fields| concat parent_fields.check_box(:secret) } end expected = whole_form('/posts/123', 'create-post', 'edit_post', :method => 'put') do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='parent_post[secret]' type='hidden' value='0' />" + "<input name='parent_post[secret]' checked='checked' type='checkbox' id='parent_post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_and_fields_for_with_object form_for(@post, :as => :post, :html => { :id => 'create-post' }) do |post_form| concat post_form.text_field(:title) concat post_form.text_area(:body) concat post_form.fields_for(@comment) { |comment_fields| concat comment_fields.text_field(:name) } end expected = whole_form('/posts/123', 'create-post', 'edit_post', :method => 'put') do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[comment][name]' type='text' id='post_comment_name' value='new comment' size='30' />" end assert_dom_equal expected, output_buffer end def test_form_for_and_fields_for_with_non_nested_association_and_without_object form_for(@post) do |f| concat f.fields_for(:category) { |c| concat c.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', 'put') do "<input name='post[category][name]' type='text' size='30' id='post_category_name' />" end assert_dom_equal expected, output_buffer end class LabelledFormBuilder < ActionView::Helpers::FormBuilder (field_helpers - %w(hidden_field)).each do |selector| class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 def #{selector}(field, *args, &proc) ("<label for='\#{field}'>\#{field.to_s.humanize}:</label> " + super + "<br/>").html_safe end RUBY_EVAL end end def test_form_for_with_labelled_builder form_for(@post, :builder => LabelledFormBuilder) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do "<label for='title'>Title:</label> <input name='post[title]' size='30' type='text' id='post_title' value='Hello World' /><br/>" + "<label for='body'>Body:</label> <textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea><br/>" + "<label for='secret'>Secret:</label> <input name='post[secret]' type='hidden' value='0' /><input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' /><br/>" end assert_dom_equal expected, output_buffer end def hidden_fields(method = nil) txt = %{<div style="margin:0;padding:0;display:inline">} txt << %{<input name="utf8" type="hidden" value="&#x2713;" />} if method && !method.to_s.in?(['get', 'post']) txt << %{<input name="_method" type="hidden" value="#{method}" />} end txt << %{</div>} end def form_text(action = "/", id = nil, html_class = nil, remote = nil, multipart = nil, method = nil) txt = %{<form accept-charset="UTF-8" action="#{action}"} txt << %{ enctype="multipart/form-data"} if multipart txt << %{ data-remote="true"} if remote txt << %{ class="#{html_class}"} if html_class txt << %{ id="#{id}"} if id method = method.to_s == "get" ? "get" : "post" txt << %{ method="#{method}">} end def whole_form(action = "/", id = nil, html_class = nil, options = nil) contents = block_given? ? yield : "" if options.is_a?(Hash) method, remote, multipart = options.values_at(:method, :remote, :multipart) else method = options end form_text(action, id, html_class, remote, multipart, method) + hidden_fields(method) + contents + "</form>" end def test_default_form_builder old_default_form_builder, ActionView::Base.default_form_builder = ActionView::Base.default_form_builder, LabelledFormBuilder form_for(@post) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do "<label for='title'>Title:</label> <input name='post[title]' size='30' type='text' id='post_title' value='Hello World' /><br/>" + "<label for='body'>Body:</label> <textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea><br/>" + "<label for='secret'>Secret:</label> <input name='post[secret]' type='hidden' value='0' /><input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' /><br/>" end assert_dom_equal expected, output_buffer ensure ActionView::Base.default_form_builder = old_default_form_builder end def test_fields_for_with_labelled_builder output_buffer = fields_for(:post, @post, :builder => LabelledFormBuilder) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<label for='title'>Title:</label> <input name='post[title]' size='30' type='text' id='post_title' value='Hello World' /><br/>" + "<label for='body'>Body:</label> <textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea><br/>" + "<label for='secret'>Secret:</label> <input name='post[secret]' type='hidden' value='0' /><input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' /><br/>" assert_dom_equal expected, output_buffer end def test_form_for_with_labelled_builder_with_nested_fields_for_without_options_hash klass = nil form_for(@post, :builder => LabelledFormBuilder) do |f| f.fields_for(:comments, Comment.new) do |nested_fields| klass = nested_fields.class '' end end assert_equal LabelledFormBuilder, klass end def test_form_for_with_labelled_builder_with_nested_fields_for_with_options_hash klass = nil form_for(@post, :builder => LabelledFormBuilder) do |f| f.fields_for(:comments, Comment.new, :index => 'foo') do |nested_fields| klass = nested_fields.class '' end end assert_equal LabelledFormBuilder, klass end def test_form_for_with_labelled_builder_path path = nil form_for(@post, :builder => LabelledFormBuilder) do |f| path = f.to_partial_path '' end assert_equal 'labelled_form', path end class LabelledFormBuilderSubclass < LabelledFormBuilder; end def test_form_for_with_labelled_builder_with_nested_fields_for_with_custom_builder klass = nil form_for(@post, :builder => LabelledFormBuilder) do |f| f.fields_for(:comments, Comment.new, :builder => LabelledFormBuilderSubclass) do |nested_fields| klass = nested_fields.class '' end end assert_equal LabelledFormBuilderSubclass, klass end def test_form_for_with_html_options_adds_options_to_form_tag form_for(@post, :html => {:id => 'some_form', :class => 'some_class'}) do |f| end expected = whole_form("/posts/123", "some_form", "some_class", 'put') assert_dom_equal expected, output_buffer end def test_form_for_with_string_url_option form_for(@post, :url => 'http://www.otherdomain.com') do |f| end assert_equal whole_form("http://www.otherdomain.com", 'edit_post_123', 'edit_post', 'put'), output_buffer end def test_form_for_with_hash_url_option form_for(@post, :url => {:controller => 'controller', :action => 'action'}) do |f| end assert_equal 'controller', @url_for_options[:controller] assert_equal 'action', @url_for_options[:action] end def test_form_for_with_record_url_option form_for(@post, :url => @post) do |f| end expected = whole_form("/posts/123", 'edit_post_123', 'edit_post', 'put') assert_equal expected, output_buffer end def test_form_for_with_existing_object form_for(@post) do |f| end expected = whole_form("/posts/123", "edit_post_123", "edit_post", "put") assert_equal expected, output_buffer end def test_form_for_with_new_object post = Post.new post.persisted = false def post.id() nil end form_for(post) do |f| end expected = whole_form("/posts", "new_post", "new_post") assert_equal expected, output_buffer end def test_form_for_with_existing_object_in_list @comment.save form_for([@post, @comment]) {} expected = whole_form(post_comment_path(@post, @comment), "edit_comment_1", "edit_comment", "put") assert_dom_equal expected, output_buffer end def test_form_for_with_new_object_in_list form_for([@post, @comment]) {} expected = whole_form(post_comments_path(@post), "new_comment", "new_comment") assert_dom_equal expected, output_buffer end def test_form_for_with_existing_object_and_namespace_in_list @comment.save form_for([:admin, @post, @comment]) {} expected = whole_form(admin_post_comment_path(@post, @comment), "edit_comment_1", "edit_comment", "put") assert_dom_equal expected, output_buffer end def test_form_for_with_new_object_and_namespace_in_list form_for([:admin, @post, @comment]) {} expected = whole_form(admin_post_comments_path(@post), "new_comment", "new_comment") assert_dom_equal expected, output_buffer end def test_form_for_with_existing_object_and_custom_url form_for(@post, :url => "/super_posts") do |f| end expected = whole_form("/super_posts", "edit_post_123", "edit_post", "put") assert_equal expected, output_buffer end def test_fields_for_returns_block_result output = fields_for(Post.new) { |f| "fields" } assert_equal "fields", output end protected def protect_against_forgery? false end end Do not stub id here require 'abstract_unit' require 'controller/fake_models' require 'active_support/core_ext/object/inclusion' class FormHelperTest < ActionView::TestCase include RenderERBUtils tests ActionView::Helpers::FormHelper def form_for(*) @output_buffer = super end def setup super # Create "label" locale for testing I18n label helpers I18n.backend.store_translations 'label', { :activemodel => { :attributes => { :post => { :cost => "Total cost" } } }, :helpers => { :label => { :post => { :body => "Write entire text here", :color => { :red => "Rojo" }, :comments => { :body => "Write body here" } }, :tag => { :value => "Tag" } } } } # Create "submit" locale for testing I18n submit helpers I18n.backend.store_translations 'submit', { :helpers => { :submit => { :create => 'Create %{model}', :update => 'Confirm %{model} changes', :submit => 'Save changes', :another_post => { :update => 'Update your %{model}' } } } } @post = Post.new @comment = Comment.new def @post.errors() Class.new{ def [](field); field == "author_name" ? ["can't be empty"] : [] end def empty?() false end def count() 1 end def full_messages() [ "Author name can't be empty" ] end }.new end def @post.to_key; [123]; end def @post.id_before_type_cast; 123; end def @post.to_param; '123'; end @post.persisted = true @post.title = "Hello World" @post.author_name = "" @post.body = "Back to the hill and over it again!" @post.secret = 1 @post.written_on = Date.new(2004, 6, 15) @post.comments = [] @post.comments << @comment @post.tags = [] @post.tags << Tag.new @blog_post = Blog::Post.new("And his name will be forty and four.", 44) end Routes = ActionDispatch::Routing::RouteSet.new Routes.draw do resources :posts do resources :comments end namespace :admin do resources :posts do resources :comments end end match "/foo", :to => "controller#action" root :to => "main#index" end def _routes Routes end include Routes.url_helpers def url_for(object) @url_for_options = object if object.is_a?(Hash) && object[:use_route].blank? && object[:controller].blank? object.merge!(:controller => "main", :action => "index") end super end def test_label assert_dom_equal('<label for="post_title">Title</label>', label("post", "title")) assert_dom_equal('<label for="post_title">The title goes here</label>', label("post", "title", "The title goes here")) assert_dom_equal( '<label class="title_label" for="post_title">Title</label>', label("post", "title", nil, :class => 'title_label') ) assert_dom_equal('<label for="post_secret">Secret?</label>', label("post", "secret?")) end def test_label_with_symbols assert_dom_equal('<label for="post_title">Title</label>', label(:post, :title)) assert_dom_equal('<label for="post_secret">Secret?</label>', label(:post, :secret?)) end def test_label_with_locales_strings old_locale, I18n.locale = I18n.locale, :label assert_dom_equal('<label for="post_body">Write entire text here</label>', label("post", "body")) ensure I18n.locale = old_locale end def test_label_with_human_attribute_name old_locale, I18n.locale = I18n.locale, :label assert_dom_equal('<label for="post_cost">Total cost</label>', label(:post, :cost)) ensure I18n.locale = old_locale end def test_label_with_locales_symbols old_locale, I18n.locale = I18n.locale, :label assert_dom_equal('<label for="post_body">Write entire text here</label>', label(:post, :body)) ensure I18n.locale = old_locale end def test_label_with_locales_and_options old_locale, I18n.locale = I18n.locale, :label assert_dom_equal('<label for="post_body" class="post_body">Write entire text here</label>', label(:post, :body, :class => 'post_body')) ensure I18n.locale = old_locale end def test_label_with_locales_and_value old_locale, I18n.locale = I18n.locale, :label assert_dom_equal('<label for="post_color_red">Rojo</label>', label(:post, :color, :value => "red")) ensure I18n.locale = old_locale end def test_label_with_locales_and_nested_attributes old_locale, I18n.locale = I18n.locale, :label form_for(@post, :html => { :id => 'create-post' }) do |f| f.fields_for(:comments) do |cf| concat cf.label(:body) end end expected = whole_form("/posts/123", "create-post" , "edit_post", :method => "put") do "<label for=\"post_comments_attributes_0_body\">Write body here</label>" end assert_dom_equal expected, output_buffer ensure I18n.locale = old_locale end def test_label_with_locales_fallback_and_nested_attributes old_locale, I18n.locale = I18n.locale, :label form_for(@post, :html => { :id => 'create-post' }) do |f| f.fields_for(:tags) do |cf| concat cf.label(:value) end end expected = whole_form("/posts/123", "create-post" , "edit_post", :method => "put") do "<label for=\"post_tags_attributes_0_value\">Tag</label>" end assert_dom_equal expected, output_buffer ensure I18n.locale = old_locale end def test_label_with_for_attribute_as_symbol assert_dom_equal('<label for="my_for">Title</label>', label(:post, :title, nil, :for => "my_for")) end def test_label_with_for_attribute_as_string assert_dom_equal('<label for="my_for">Title</label>', label(:post, :title, nil, "for" => "my_for")) end def test_label_with_id_attribute_as_symbol assert_dom_equal('<label for="post_title" id="my_id">Title</label>', label(:post, :title, nil, :id => "my_id")) end def test_label_with_id_attribute_as_string assert_dom_equal('<label for="post_title" id="my_id">Title</label>', label(:post, :title, nil, "id" => "my_id")) end def test_label_with_for_and_id_attributes_as_symbol assert_dom_equal('<label for="my_for" id="my_id">Title</label>', label(:post, :title, nil, :for => "my_for", :id => "my_id")) end def test_label_with_for_and_id_attributes_as_string assert_dom_equal('<label for="my_for" id="my_id">Title</label>', label(:post, :title, nil, "for" => "my_for", "id" => "my_id")) end def test_label_for_radio_buttons_with_value assert_dom_equal('<label for="post_title_great_title">The title goes here</label>', label("post", "title", "The title goes here", :value => "great_title")) assert_dom_equal('<label for="post_title_great_title">The title goes here</label>', label("post", "title", "The title goes here", :value => "great title")) end def test_label_with_block assert_dom_equal('<label for="post_title">The title, please:</label>', label(:post, :title) { "The title, please:" }) end def test_label_with_block_in_erb assert_equal "<label for=\"post_message\">\n Message\n <input id=\"post_message\" name=\"post[message]\" size=\"30\" type=\"text\" />\n</label>", view.render("test/label_with_block") end def test_text_field assert_dom_equal( '<input id="post_title" name="post[title]" size="30" type="text" value="Hello World" />', text_field("post", "title") ) assert_dom_equal( '<input id="post_title" name="post[title]" size="30" type="password" />', password_field("post", "title") ) assert_dom_equal( '<input id="post_title" name="post[title]" size="30" type="password" value="Hello World" />', password_field("post", "title", :value => @post.title) ) assert_dom_equal( '<input id="person_name" name="person[name]" size="30" type="password" />', password_field("person", "name") ) end def test_text_field_with_escapes @post.title = "<b>Hello World</b>" assert_dom_equal( '<input id="post_title" name="post[title]" size="30" type="text" value="&lt;b&gt;Hello World&lt;/b&gt;" />', text_field("post", "title") ) end def test_text_field_with_html_entities @post.title = "The HTML Entity for & is &amp;" assert_dom_equal( '<input id="post_title" name="post[title]" size="30" type="text" value="The HTML Entity for &amp; is &amp;amp;" />', text_field("post", "title") ) end def test_text_field_with_options expected = '<input id="post_title" name="post[title]" size="35" type="text" value="Hello World" />' assert_dom_equal expected, text_field("post", "title", "size" => 35) assert_dom_equal expected, text_field("post", "title", :size => 35) end def test_text_field_assuming_size expected = '<input id="post_title" maxlength="35" name="post[title]" size="35" type="text" value="Hello World" />' assert_dom_equal expected, text_field("post", "title", "maxlength" => 35) assert_dom_equal expected, text_field("post", "title", :maxlength => 35) end def test_text_field_removing_size expected = '<input id="post_title" maxlength="35" name="post[title]" type="text" value="Hello World" />' assert_dom_equal expected, text_field("post", "title", "maxlength" => 35, "size" => nil) assert_dom_equal expected, text_field("post", "title", :maxlength => 35, :size => nil) end def test_text_field_with_nil_value expected = '<input id="post_title" name="post[title]" size="30" type="text" />' assert_dom_equal expected, text_field("post", "title", :value => nil) end def test_text_field_doesnt_change_param_values object_name = 'post[]' expected = '<input id="post_123_title" name="post[123][title]" size="30" type="text" value="Hello World" />' assert_equal expected, text_field(object_name, "title") assert_equal object_name, "post[]" end def test_file_field_has_no_size expected = '<input id="user_avatar" name="user[avatar]" type="file" />' assert_dom_equal expected, file_field("user", "avatar") end def test_hidden_field assert_dom_equal '<input id="post_title" name="post[title]" type="hidden" value="Hello World" />', hidden_field("post", "title") assert_dom_equal '<input id="post_secret" name="post[secret]" type="hidden" value="1" />', hidden_field("post", "secret?") end def test_hidden_field_with_escapes @post.title = "<b>Hello World</b>" assert_dom_equal '<input id="post_title" name="post[title]" type="hidden" value="&lt;b&gt;Hello World&lt;/b&gt;" />', hidden_field("post", "title") end def test_hidden_field_with_nil_value expected = '<input id="post_title" name="post[title]" type="hidden" />' assert_dom_equal expected, hidden_field("post", "title", :value => nil) end def test_hidden_field_with_options assert_dom_equal '<input id="post_title" name="post[title]" type="hidden" value="Something Else" />', hidden_field("post", "title", :value => "Something Else") end def test_text_field_with_custom_type assert_dom_equal '<input id="user_email" size="30" name="user[email]" type="email" />', text_field("user", "email", :type => "email") end def test_check_box assert check_box("post", "secret").html_safe? assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret") ) @post.secret = 0 assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret") ) assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret" ,{"checked"=>"checked"}) ) @post.secret = true assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret") ) assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret?") ) @post.secret = ['0'] assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret") ) @post.secret = ['1'] assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret") ) end def test_check_box_with_explicit_checked_and_unchecked_values @post.secret = "on" assert_dom_equal( '<input name="post[secret]" type="hidden" value="off" /><input checked="checked" id="post_secret" name="post[secret]" type="checkbox" value="on" />', check_box("post", "secret", {}, "on", "off") ) end def test_check_box_with_multiple_behavior @post.comment_ids = [2,3] assert_dom_equal( '<input name="post[comment_ids][]" type="hidden" value="0" /><input id="post_comment_ids_1" name="post[comment_ids][]" type="checkbox" value="1" />', check_box("post", "comment_ids", { :multiple => true }, 1) ) assert_dom_equal( '<input name="post[comment_ids][]" type="hidden" value="0" /><input checked="checked" id="post_comment_ids_3" name="post[comment_ids][]" type="checkbox" value="3" />', check_box("post", "comment_ids", { :multiple => true }, 3) ) end def test_checkbox_disabled_disables_hidden_field assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" disabled="disabled"/><input checked="checked" disabled="disabled" id="post_secret" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret", { :disabled => true }) ) end def test_radio_button assert_dom_equal('<input checked="checked" id="post_title_hello_world" name="post[title]" type="radio" value="Hello World" />', radio_button("post", "title", "Hello World") ) assert_dom_equal('<input id="post_title_goodbye_world" name="post[title]" type="radio" value="Goodbye World" />', radio_button("post", "title", "Goodbye World") ) assert_dom_equal('<input id="item_subobject_title_inside_world" name="item[subobject][title]" type="radio" value="inside world"/>', radio_button("item[subobject]", "title", "inside world") ) end def test_radio_button_is_checked_with_integers assert_dom_equal('<input checked="checked" id="post_secret_1" name="post[secret]" type="radio" value="1" />', radio_button("post", "secret", "1") ) end def test_radio_button_with_negative_integer_value assert_dom_equal('<input id="post_secret_-1" name="post[secret]" type="radio" value="-1" />', radio_button("post", "secret", "-1")) end def test_radio_button_respects_passed_in_id assert_dom_equal('<input checked="checked" id="foo" name="post[secret]" type="radio" value="1" />', radio_button("post", "secret", "1", :id=>"foo") ) end def test_radio_button_with_booleans assert_dom_equal('<input id="post_secret_true" name="post[secret]" type="radio" value="true" />', radio_button("post", "secret", true) ) assert_dom_equal('<input id="post_secret_false" name="post[secret]" type="radio" value="false" />', radio_button("post", "secret", false) ) end def test_text_area assert_dom_equal( '<textarea cols="40" id="post_body" name="post[body]" rows="20">Back to the hill and over it again!</textarea>', text_area("post", "body") ) end def test_text_area_with_escapes @post.body = "Back to <i>the</i> hill and over it again!" assert_dom_equal( '<textarea cols="40" id="post_body" name="post[body]" rows="20">Back to &lt;i&gt;the&lt;/i&gt; hill and over it again!</textarea>', text_area("post", "body") ) end def test_text_area_with_alternate_value assert_dom_equal( '<textarea cols="40" id="post_body" name="post[body]" rows="20">Testing alternate values.</textarea>', text_area("post", "body", :value => 'Testing alternate values.') ) end def test_text_area_with_html_entities @post.body = "The HTML Entity for & is &amp;" assert_dom_equal( '<textarea cols="40" id="post_body" name="post[body]" rows="20">The HTML Entity for &amp; is &amp;amp;</textarea>', text_area("post", "body") ) end def test_text_area_with_size_option assert_dom_equal( '<textarea cols="183" id="post_body" name="post[body]" rows="820">Back to the hill and over it again!</textarea>', text_area("post", "body", :size => "183x820") ) end def test_search_field expected = %{<input id="contact_notes_query" size="30" name="contact[notes_query]" type="search" />} assert_dom_equal(expected, search_field("contact", "notes_query")) end def test_telephone_field expected = %{<input id="user_cell" size="30" name="user[cell]" type="tel" />} assert_dom_equal(expected, telephone_field("user", "cell")) end def test_url_field expected = %{<input id="user_homepage" size="30" name="user[homepage]" type="url" />} assert_dom_equal(expected, url_field("user", "homepage")) end def test_email_field expected = %{<input id="user_address" size="30" name="user[address]" type="email" />} assert_dom_equal(expected, email_field("user", "address")) end def test_number_field expected = %{<input name="order[quantity]" max="9" id="order_quantity" type="number" min="1" />} assert_dom_equal(expected, number_field("order", "quantity", :in => 1...10)) expected = %{<input name="order[quantity]" size="30" max="9" id="order_quantity" type="number" min="1" />} assert_dom_equal(expected, number_field("order", "quantity", :size => 30, :in => 1...10)) end def test_range_input expected = %{<input name="hifi[volume]" step="0.1" max="11" id="hifi_volume" type="range" min="0" />} assert_dom_equal(expected, range_field("hifi", "volume", :in => 0..11, :step => 0.1)) expected = %{<input name="hifi[volume]" step="0.1" size="30" max="11" id="hifi_volume" type="range" min="0" />} assert_dom_equal(expected, range_field("hifi", "volume", :size => 30, :in => 0..11, :step => 0.1)) end def test_explicit_name assert_dom_equal( '<input id="post_title" name="dont guess" size="30" type="text" value="Hello World" />', text_field("post", "title", "name" => "dont guess") ) assert_dom_equal( '<textarea cols="40" id="post_body" name="really!" rows="20">Back to the hill and over it again!</textarea>', text_area("post", "body", "name" => "really!") ) assert_dom_equal( '<input name="i mean it" type="hidden" value="0" /><input checked="checked" id="post_secret" name="i mean it" type="checkbox" value="1" />', check_box("post", "secret", "name" => "i mean it") ) assert_dom_equal text_field("post", "title", "name" => "dont guess"), text_field("post", "title", :name => "dont guess") assert_dom_equal text_area("post", "body", "name" => "really!"), text_area("post", "body", :name => "really!") assert_dom_equal check_box("post", "secret", "name" => "i mean it"), check_box("post", "secret", :name => "i mean it") end def test_explicit_id assert_dom_equal( '<input id="dont guess" name="post[title]" size="30" type="text" value="Hello World" />', text_field("post", "title", "id" => "dont guess") ) assert_dom_equal( '<textarea cols="40" id="really!" name="post[body]" rows="20">Back to the hill and over it again!</textarea>', text_area("post", "body", "id" => "really!") ) assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" id="i mean it" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret", "id" => "i mean it") ) assert_dom_equal text_field("post", "title", "id" => "dont guess"), text_field("post", "title", :id => "dont guess") assert_dom_equal text_area("post", "body", "id" => "really!"), text_area("post", "body", :id => "really!") assert_dom_equal check_box("post", "secret", "id" => "i mean it"), check_box("post", "secret", :id => "i mean it") end def test_nil_id assert_dom_equal( '<input name="post[title]" size="30" type="text" value="Hello World" />', text_field("post", "title", "id" => nil) ) assert_dom_equal( '<textarea cols="40" name="post[body]" rows="20">Back to the hill and over it again!</textarea>', text_area("post", "body", "id" => nil) ) assert_dom_equal( '<input name="post[secret]" type="hidden" value="0" /><input checked="checked" name="post[secret]" type="checkbox" value="1" />', check_box("post", "secret", "id" => nil) ) assert_dom_equal( '<input type="radio" name="post[secret]" value="0" />', radio_button("post", "secret", "0", "id" => nil) ) assert_dom_equal( '<select name="post[secret]"></select>', select("post", "secret", [], {}, "id" => nil) ) assert_dom_equal text_field("post", "title", "id" => nil), text_field("post", "title", :id => nil) assert_dom_equal text_area("post", "body", "id" => nil), text_area("post", "body", :id => nil) assert_dom_equal check_box("post", "secret", "id" => nil), check_box("post", "secret", :id => nil) assert_dom_equal radio_button("post", "secret", "0", "id" => nil), radio_button("post", "secret", "0", :id => nil) end def test_index assert_dom_equal( '<input name="post[5][title]" size="30" id="post_5_title" type="text" value="Hello World" />', text_field("post", "title", "index" => 5) ) assert_dom_equal( '<textarea cols="40" name="post[5][body]" id="post_5_body" rows="20">Back to the hill and over it again!</textarea>', text_area("post", "body", "index" => 5) ) assert_dom_equal( '<input name="post[5][secret]" type="hidden" value="0" /><input checked="checked" name="post[5][secret]" type="checkbox" value="1" id="post_5_secret" />', check_box("post", "secret", "index" => 5) ) assert_dom_equal( text_field("post", "title", "index" => 5), text_field("post", "title", "index" => 5) ) assert_dom_equal( text_area("post", "body", "index" => 5), text_area("post", "body", "index" => 5) ) assert_dom_equal( check_box("post", "secret", "index" => 5), check_box("post", "secret", "index" => 5) ) end def test_index_with_nil_id assert_dom_equal( '<input name="post[5][title]" size="30" type="text" value="Hello World" />', text_field("post", "title", "index" => 5, 'id' => nil) ) assert_dom_equal( '<textarea cols="40" name="post[5][body]" rows="20">Back to the hill and over it again!</textarea>', text_area("post", "body", "index" => 5, 'id' => nil) ) assert_dom_equal( '<input name="post[5][secret]" type="hidden" value="0" /><input checked="checked" name="post[5][secret]" type="checkbox" value="1" />', check_box("post", "secret", "index" => 5, 'id' => nil) ) assert_dom_equal( text_field("post", "title", "index" => 5, 'id' => nil), text_field("post", "title", :index => 5, :id => nil) ) assert_dom_equal( text_area("post", "body", "index" => 5, 'id' => nil), text_area("post", "body", :index => 5, :id => nil) ) assert_dom_equal( check_box("post", "secret", "index" => 5, 'id' => nil), check_box("post", "secret", :index => 5, :id => nil) ) end def test_auto_index pid = 123 assert_dom_equal( "<label for=\"post_#{pid}_title\">Title</label>", label("post[]", "title") ) assert_dom_equal( "<input id=\"post_#{pid}_title\" name=\"post[#{pid}][title]\" size=\"30\" type=\"text\" value=\"Hello World\" />", text_field("post[]","title") ) assert_dom_equal( "<textarea cols=\"40\" id=\"post_#{pid}_body\" name=\"post[#{pid}][body]\" rows=\"20\">Back to the hill and over it again!</textarea>", text_area("post[]", "body") ) assert_dom_equal( "<input name=\"post[#{pid}][secret]\" type=\"hidden\" value=\"0\" /><input checked=\"checked\" id=\"post_#{pid}_secret\" name=\"post[#{pid}][secret]\" type=\"checkbox\" value=\"1\" />", check_box("post[]", "secret") ) assert_dom_equal( "<input checked=\"checked\" id=\"post_#{pid}_title_hello_world\" name=\"post[#{pid}][title]\" type=\"radio\" value=\"Hello World\" />", radio_button("post[]", "title", "Hello World") ) assert_dom_equal("<input id=\"post_#{pid}_title_goodbye_world\" name=\"post[#{pid}][title]\" type=\"radio\" value=\"Goodbye World\" />", radio_button("post[]", "title", "Goodbye World") ) end def test_auto_index_with_nil_id pid = 123 assert_dom_equal( "<input name=\"post[#{pid}][title]\" size=\"30\" type=\"text\" value=\"Hello World\" />", text_field("post[]","title", :id => nil) ) assert_dom_equal( "<textarea cols=\"40\" name=\"post[#{pid}][body]\" rows=\"20\">Back to the hill and over it again!</textarea>", text_area("post[]", "body", :id => nil) ) assert_dom_equal( "<input name=\"post[#{pid}][secret]\" type=\"hidden\" value=\"0\" /><input checked=\"checked\" name=\"post[#{pid}][secret]\" type=\"checkbox\" value=\"1\" />", check_box("post[]", "secret", :id => nil) ) assert_dom_equal( "<input checked=\"checked\" name=\"post[#{pid}][title]\" type=\"radio\" value=\"Hello World\" />", radio_button("post[]", "title", "Hello World", :id => nil) ) assert_dom_equal("<input name=\"post[#{pid}][title]\" type=\"radio\" value=\"Goodbye World\" />", radio_button("post[]", "title", "Goodbye World", :id => nil) ) end def test_form_for_requires_block assert_raises(ArgumentError) do form_for(:post, @post, :html => { :id => 'create-post' }) end end def test_form_for form_for(@post, :html => { :id => 'create-post' }) do |f| concat f.label(:title) { "The Title" } concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) concat f.submit('Create post') concat f.button('Create post') end expected = whole_form("/posts/123", "create-post" , "edit_post", :method => "put") do "<label for='post_title'>The Title</label>" + "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" + "<input name='commit' type='submit' value='Create post' />" + "<button name='button' type='submit'>Create post</button>" end assert_dom_equal expected, output_buffer end def test_form_for_with_file_field_generate_multipart Post.send :attr_accessor, :file form_for(@post, :html => { :id => 'create-post' }) do |f| concat f.file_field(:file) end expected = whole_form("/posts/123", "create-post" , "edit_post", :method => "put", :multipart => true) do "<input name='post[file]' type='file' id='post_file' />" end assert_dom_equal expected, output_buffer end def test_fields_for_with_file_field_generate_multipart Comment.send :attr_accessor, :file form_for(@post) do |f| concat f.fields_for(:comment, @post) { |c| concat c.file_field(:file) } end expected = whole_form("/posts/123", "edit_post_123" , "edit_post", :method => "put", :multipart => true) do "<input name='post[comment][file]' type='file' id='post_comment_file' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_format form_for(@post, :format => :json, :html => { :id => "edit_post_123", :class => "edit_post" }) do |f| concat f.label(:title) end expected = whole_form("/posts/123.json", "edit_post_123" , "edit_post", :method => "put") do "<label for='post_title'>Title</label>" end assert_dom_equal expected, output_buffer end def test_form_for_with_model_using_relative_model_naming form_for(@blog_post) do |f| concat f.text_field :title concat f.submit('Edit post') end expected = whole_form("/posts/44", "edit_post_44" , "edit_post", :method => "put") do "<input name='post[title]' size='30' type='text' id='post_title' value='And his name will be forty and four.' />" + "<input name='commit' type='submit' value='Edit post' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_symbol_object_name form_for(@post, :as => "other_name", :html => { :id => 'create-post' }) do |f| concat f.label(:title, :class => 'post_title') concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) concat f.submit('Create post') end expected = whole_form("/posts/123", "create-post", "edit_other_name", :method => "put") do "<label for='other_name_title' class='post_title'>Title</label>" + "<input name='other_name[title]' size='30' id='other_name_title' value='Hello World' type='text' />" + "<textarea name='other_name[body]' id='other_name_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='other_name[secret]' value='0' type='hidden' />" + "<input name='other_name[secret]' checked='checked' id='other_name_secret' value='1' type='checkbox' />" + "<input name='commit' value='Create post' type='submit' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_method_as_part_of_html_options form_for(@post, :url => '/', :html => { :id => 'create-post', :method => :delete }) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form("/", "create-post", "edit_post", "delete") do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_method form_for(@post, :url => '/', :method => :delete, :html => { :id => 'create-post' }) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form("/", "create-post", "edit_post", "delete") do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_search_field # Test case for bug which would emit an "object" attribute # when used with form_for using a search_field form helper form_for(Post.new, :url => "/search", :html => { :id => 'search-post', :method => :get}) do |f| concat f.search_field(:title) end expected = whole_form("/search", "search-post", "new_post", "get") do "<input name='post[title]' size='30' type='search' id='post_title' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_remote form_for(@post, :url => '/', :remote => true, :html => { :id => 'create-post', :method => :put }) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form("/", "create-post", "edit_post", :method => "put", :remote => true) do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_remote_in_html form_for(@post, :url => '/', :html => { :remote => true, :id => 'create-post', :method => :put }) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form("/", "create-post", "edit_post", :method => "put", :remote => true) do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_remote_without_html @post.persisted = false def @post.to_key; nil; end form_for(@post, :remote => true) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form("/posts", 'new_post', 'new_post', :remote => true) do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_without_object form_for(:post, :html => { :id => 'create-post' }) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form("/", "create-post") do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_index form_for(@post, :as => "post[]") do |f| concat f.label(:title) concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form('/posts/123', 'edit_post[]', 'edit_post[]', 'put') do "<label for='post_123_title'>Title</label>" + "<input name='post[123][title]' size='30' type='text' id='post_123_title' value='Hello World' />" + "<textarea name='post[123][body]' id='post_123_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[123][secret]' type='hidden' value='0' />" + "<input name='post[123][secret]' checked='checked' type='checkbox' id='post_123_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_nil_index_option_override form_for(@post, :as => "post[]", :index => nil) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form('/posts/123', 'edit_post[]', 'edit_post[]', 'put') do "<input name='post[][title]' size='30' type='text' id='post__title' value='Hello World' />" + "<textarea name='post[][body]' id='post__body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[][secret]' type='hidden' value='0' />" + "<input name='post[][secret]' checked='checked' type='checkbox' id='post__secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_namespace form_for(@post, :namespace => 'namespace') do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form('/posts/123', 'namespace_edit_post_123', 'edit_post', 'put') do "<input name='post[title]' size='30' type='text' id='namespace_post_title' value='Hello World' />" + "<textarea name='post[body]' id='namespace_post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='namespace_post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_namespace_with_label form_for(@post, :namespace => 'namespace') do |f| concat f.label(:title) concat f.text_field(:title) end expected = whole_form('/posts/123', 'namespace_edit_post_123', 'edit_post', 'put') do "<label for='namespace_post_title'>Title</label>" + "<input name='post[title]' size='30' type='text' id='namespace_post_title' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_two_form_for_with_namespace form_for(@post, :namespace => 'namespace_1') do |f| concat f.label(:title) concat f.text_field(:title) end expected_1 = whole_form('/posts/123', 'namespace_1_edit_post_123', 'edit_post', 'put') do "<label for='namespace_1_post_title'>Title</label>" + "<input name='post[title]' size='30' type='text' id='namespace_1_post_title' value='Hello World' />" end assert_dom_equal expected_1, output_buffer form_for(@post, :namespace => 'namespace_2') do |f| concat f.label(:title) concat f.text_field(:title) end expected_2 = whole_form('/posts/123', 'namespace_2_edit_post_123', 'edit_post', 'put') do "<label for='namespace_2_post_title'>Title</label>" + "<input name='post[title]' size='30' type='text' id='namespace_2_post_title' value='Hello World' />" end assert_dom_equal expected_2, output_buffer end def test_fields_for_with_namespace @comment.body = 'Hello World' form_for(@post, :namespace => 'namespace') do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.fields_for(@comment) { |c| concat c.text_field(:body) } end expected = whole_form('/posts/123', 'namespace_edit_post_123', 'edit_post', 'put') do "<input name='post[title]' size='30' type='text' id='namespace_post_title' value='Hello World' />" + "<textarea name='post[body]' id='namespace_post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[comment][body]' size='30' type='text' id='namespace_post_comment_body' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_submit_with_object_as_new_record_and_locale_strings old_locale, I18n.locale = I18n.locale, :submit @post.persisted = false def @post.to_key; nil; end form_for(@post) do |f| concat f.submit end expected = whole_form('/posts', 'new_post', 'new_post') do "<input name='commit' type='submit' value='Create Post' />" end assert_dom_equal expected, output_buffer ensure I18n.locale = old_locale end def test_submit_with_object_as_existing_record_and_locale_strings old_locale, I18n.locale = I18n.locale, :submit form_for(@post) do |f| concat f.submit end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do "<input name='commit' type='submit' value='Confirm Post changes' />" end assert_dom_equal expected, output_buffer ensure I18n.locale = old_locale end def test_submit_without_object_and_locale_strings old_locale, I18n.locale = I18n.locale, :submit form_for(:post) do |f| concat f.submit :class => "extra" end expected = whole_form do "<input name='commit' class='extra' type='submit' value='Save changes' />" end assert_dom_equal expected, output_buffer ensure I18n.locale = old_locale end def test_submit_with_object_and_nested_lookup old_locale, I18n.locale = I18n.locale, :submit form_for(@post, :as => :another_post) do |f| concat f.submit end expected = whole_form('/posts/123', 'edit_another_post', 'edit_another_post', :method => 'put') do "<input name='commit' type='submit' value='Update your Post' />" end assert_dom_equal expected, output_buffer ensure I18n.locale = old_locale end def test_nested_fields_for @comment.body = 'Hello World' form_for(@post) do |f| concat f.fields_for(@comment) { |c| concat c.text_field(:body) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do "<input name='post[comment][body]' size='30' type='text' id='post_comment_body' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_nested_collections form_for(@post, :as => 'post[]') do |f| concat f.text_field(:title) concat f.fields_for('comment[]', @comment) { |c| concat c.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post[]', 'edit_post[]', 'put') do "<input name='post[123][title]' size='30' type='text' id='post_123_title' value='Hello World' />" + "<input name='post[123][comment][][name]' size='30' type='text' id='post_123_comment__name' value='new comment' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_index_and_parent_fields form_for(@post, :index => 1) do |c| concat c.text_field(:title) concat c.fields_for('comment', @comment, :index => 1) { |r| concat r.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', 'put') do "<input name='post[1][title]' size='30' type='text' id='post_1_title' value='Hello World' />" + "<input name='post[1][comment][1][name]' size='30' type='text' id='post_1_comment_1_name' value='new comment' />" end assert_dom_equal expected, output_buffer end def test_form_for_with_index_and_nested_fields_for output_buffer = form_for(@post, :index => 1) do |f| concat f.fields_for(:comment, @post) { |c| concat c.text_field(:title) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', 'put') do "<input name='post[1][comment][title]' size='30' type='text' id='post_1_comment_title' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_index_on_both form_for(@post, :index => 1) do |f| concat f.fields_for(:comment, @post, :index => 5) { |c| concat c.text_field(:title) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', 'put') do "<input name='post[1][comment][5][title]' size='30' type='text' id='post_1_comment_5_title' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_auto_index form_for(@post, :as => "post[]") do |f| concat f.fields_for(:comment, @post) { |c| concat c.text_field(:title) } end expected = whole_form('/posts/123', 'edit_post[]', 'edit_post[]', 'put') do "<input name='post[123][comment][title]' size='30' type='text' id='post_123_comment_title' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_index_radio_button form_for(@post) do |f| concat f.fields_for(:comment, @post, :index => 5) { |c| concat c.radio_button(:title, "hello") } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', 'put') do "<input name='post[comment][5][title]' type='radio' id='post_comment_5_title_hello' value='hello' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_auto_index_on_both form_for(@post, :as => "post[]") do |f| concat f.fields_for("comment[]", @post) { |c| concat c.text_field(:title) } end expected = whole_form('/posts/123', 'edit_post[]', 'edit_post[]', 'put') do "<input name='post[123][comment][123][title]' size='30' type='text' id='post_123_comment_123_title' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_index_and_auto_index output_buffer = form_for(@post, :as => "post[]") do |f| concat f.fields_for(:comment, @post, :index => 5) { |c| concat c.text_field(:title) } end output_buffer << form_for(@post, :as => :post, :index => 1) do |f| concat f.fields_for("comment[]", @post) { |c| concat c.text_field(:title) } end expected = whole_form('/posts/123', 'edit_post[]', 'edit_post[]', 'put') do "<input name='post[123][comment][5][title]' size='30' type='text' id='post_123_comment_5_title' value='Hello World' />" end + whole_form('/posts/123', 'edit_post', 'edit_post', 'put') do "<input name='post[1][comment][123][title]' size='30' type='text' id='post_1_comment_123_title' value='Hello World' />" end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_a_new_record_on_a_nested_attributes_one_to_one_association @post.author = Author.new form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| concat af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="new author" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_explicitly_passed_object_on_a_nested_attributes_one_to_one_association form_for(@post) do |f| f.fields_for(:author, Author.new(123)) do |af| assert_not_nil af.object assert_equal 123, af.object.id end end end def test_nested_fields_for_with_an_existing_record_on_a_nested_attributes_one_to_one_association @post.author = Author.new(321) form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| concat af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' + '<input id="post_author_attributes_id" name="post[author_attributes][id]" type="hidden" value="321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_an_existing_record_on_a_nested_attributes_one_to_one_association_using_erb_and_inline_block @post.author = Author.new(321) form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' + '<input id="post_author_attributes_id" name="post[author_attributes][id]" type="hidden" value="321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_an_existing_record_on_a_nested_attributes_one_to_one_association_with_disabled_hidden_id @post.author = Author.new(321) form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:author, :include_id => false) { |af| af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_an_existing_record_on_a_nested_attributes_one_to_one_association_with_disabled_hidden_id_inherited @post.author = Author.new(321) form_for(@post, :include_id => false) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_an_existing_record_on_a_nested_attributes_one_to_one_association_with_disabled_hidden_id_override @post.author = Author.new(321) form_for(@post, :include_id => false) do |f| concat f.text_field(:title) concat f.fields_for(:author, :include_id => true) { |af| af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' + '<input id="post_author_attributes_id" name="post[author_attributes][id]" type="hidden" value="321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_one_to_one_association_with_explicit_hidden_field_placement @post.author = Author.new(321) form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| concat af.hidden_field(:id) concat af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_id" name="post[author_attributes][id]" type="hidden" value="321" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_collection_association @post.comments = Array.new(2) { |id| Comment.new(id + 1) } form_for(@post) do |f| concat f.text_field(:title) @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' + '<input id="post_comments_attributes_1_id" name="post[comments_attributes][1][id]" type="hidden" value="2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_collection_association_with_disabled_hidden_id @post.comments = Array.new(2) { |id| Comment.new(id + 1) } @post.author = Author.new(321) form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| concat af.text_field(:name) } @post.comments.each do |comment| concat f.fields_for(:comments, comment, :include_id => false) { |cf| concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' + '<input id="post_author_attributes_id" name="post[author_attributes][id]" type="hidden" value="321" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_collection_association_with_disabled_hidden_id_inherited @post.comments = Array.new(2) { |id| Comment.new(id + 1) } @post.author = Author.new(321) form_for(@post, :include_id => false) do |f| concat f.text_field(:title) concat f.fields_for(:author) { |af| concat af.text_field(:name) } @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_collection_association_with_disabled_hidden_id_override @post.comments = Array.new(2) { |id| Comment.new(id + 1) } @post.author = Author.new(321) form_for(@post, :include_id => false) do |f| concat f.text_field(:title) concat f.fields_for(:author, :include_id => true) { |af| concat af.text_field(:name) } @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="author #321" />' + '<input id="post_author_attributes_id" name="post[author_attributes][id]" type="hidden" value="321" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_collection_association_using_erb_and_inline_block @post.comments = Array.new(2) { |id| Comment.new(id + 1) } form_for(@post) do |f| concat f.text_field(:title) @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' + '<input id="post_comments_attributes_1_id" name="post[comments_attributes][1][id]" type="hidden" value="2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_nested_attributes_collection_association_with_explicit_hidden_field_placement @post.comments = Array.new(2) { |id| Comment.new(id + 1) } form_for(@post) do |f| concat f.text_field(:title) @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| concat cf.hidden_field(:id) concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="1" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_1_id" name="post[comments_attributes][1][id]" type="hidden" value="2" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_new_records_on_a_nested_attributes_collection_association @post.comments = [Comment.new, Comment.new] form_for(@post) do |f| concat f.text_field(:title) @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="new comment" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="new comment" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_and_new_records_on_a_nested_attributes_collection_association @post.comments = [Comment.new(321), Comment.new] form_for(@post) do |f| concat f.text_field(:title) @post.comments.each do |comment| concat f.fields_for(:comments, comment) { |cf| concat cf.text_field(:name) } end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #321" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="321" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="new comment" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_an_empty_supplied_attributes_collection form_for(@post) do |f| concat f.text_field(:title) f.fields_for(:comments, []) do |cf| concat cf.text_field(:name) end end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_supplied_nested_attributes_collection @post.comments = Array.new(2) { |id| Comment.new(id + 1) } form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:comments, @post.comments) { |cf| concat cf.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' + '<input id="post_comments_attributes_1_id" name="post[comments_attributes][1][id]" type="hidden" value="2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_arel_like @post.comments = ArelLike.new form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:comments, @post.comments) { |cf| concat cf.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' + '<input id="post_comments_attributes_1_id" name="post[comments_attributes][1][id]" type="hidden" value="2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_existing_records_on_a_supplied_nested_attributes_collection_different_from_record_one comments = Array.new(2) { |id| Comment.new(id + 1) } @post.comments = [] form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:comments, comments) { |cf| concat cf.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #1" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="1" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="comment #2" />' + '<input id="post_comments_attributes_1_id" name="post[comments_attributes][1][id]" type="hidden" value="2" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_on_a_nested_attributes_collection_association_yields_only_builder @post.comments = [Comment.new(321), Comment.new] yielded_comments = [] form_for(@post) do |f| concat f.text_field(:title) concat f.fields_for(:comments) { |cf| concat cf.text_field(:name) yielded_comments << cf.object } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input name="post[title]" size="30" type="text" id="post_title" value="Hello World" />' + '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #321" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="321" />' + '<input id="post_comments_attributes_1_name" name="post[comments_attributes][1][name]" size="30" type="text" value="new comment" />' end assert_dom_equal expected, output_buffer assert_equal yielded_comments, @post.comments end def test_nested_fields_for_with_child_index_option_override_on_a_nested_attributes_collection_association @post.comments = [] form_for(@post) do |f| concat f.fields_for(:comments, Comment.new(321), :child_index => 'abc') { |cf| concat cf.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input id="post_comments_attributes_abc_name" name="post[comments_attributes][abc][name]" size="30" type="text" value="comment #321" />' + '<input id="post_comments_attributes_abc_id" name="post[comments_attributes][abc][id]" type="hidden" value="321" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_uses_unique_indices_for_different_collection_associations @post.comments = [Comment.new(321)] @post.tags = [Tag.new(123), Tag.new(456)] @post.comments[0].relevances = [] @post.tags[0].relevances = [] @post.tags[1].relevances = [] form_for(@post) do |f| concat f.fields_for(:comments, @post.comments[0]) { |cf| concat cf.text_field(:name) concat cf.fields_for(:relevances, CommentRelevance.new(314)) { |crf| concat crf.text_field(:value) } } concat f.fields_for(:tags, @post.tags[0]) { |tf| concat tf.text_field(:value) concat tf.fields_for(:relevances, TagRelevance.new(3141)) { |trf| concat trf.text_field(:value) } } concat f.fields_for('tags', @post.tags[1]) { |tf| concat tf.text_field(:value) concat tf.fields_for(:relevances, TagRelevance.new(31415)) { |trf| concat trf.text_field(:value) } } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input id="post_comments_attributes_0_name" name="post[comments_attributes][0][name]" size="30" type="text" value="comment #321" />' + '<input id="post_comments_attributes_0_relevances_attributes_0_value" name="post[comments_attributes][0][relevances_attributes][0][value]" size="30" type="text" value="commentrelevance #314" />' + '<input id="post_comments_attributes_0_relevances_attributes_0_id" name="post[comments_attributes][0][relevances_attributes][0][id]" type="hidden" value="314" />' + '<input id="post_comments_attributes_0_id" name="post[comments_attributes][0][id]" type="hidden" value="321" />' + '<input id="post_tags_attributes_0_value" name="post[tags_attributes][0][value]" size="30" type="text" value="tag #123" />' + '<input id="post_tags_attributes_0_relevances_attributes_0_value" name="post[tags_attributes][0][relevances_attributes][0][value]" size="30" type="text" value="tagrelevance #3141" />' + '<input id="post_tags_attributes_0_relevances_attributes_0_id" name="post[tags_attributes][0][relevances_attributes][0][id]" type="hidden" value="3141" />' + '<input id="post_tags_attributes_0_id" name="post[tags_attributes][0][id]" type="hidden" value="123" />' + '<input id="post_tags_attributes_1_value" name="post[tags_attributes][1][value]" size="30" type="text" value="tag #456" />' + '<input id="post_tags_attributes_1_relevances_attributes_0_value" name="post[tags_attributes][1][relevances_attributes][0][value]" size="30" type="text" value="tagrelevance #31415" />' + '<input id="post_tags_attributes_1_relevances_attributes_0_id" name="post[tags_attributes][1][relevances_attributes][0][id]" type="hidden" value="31415" />' + '<input id="post_tags_attributes_1_id" name="post[tags_attributes][1][id]" type="hidden" value="456" />' end assert_dom_equal expected, output_buffer end def test_nested_fields_for_with_hash_like_model @author = HashBackedAuthor.new form_for(@post) do |f| concat f.fields_for(:author, @author) { |af| concat af.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do '<input id="post_author_attributes_name" name="post[author_attributes][name]" size="30" type="text" value="hash backed author" />' end assert_dom_equal expected, output_buffer end def test_fields_for output_buffer = fields_for(:post, @post) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" assert_dom_equal expected, output_buffer end def test_fields_for_with_index output_buffer = fields_for("post[]", @post) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<input name='post[123][title]' size='30' type='text' id='post_123_title' value='Hello World' />" + "<textarea name='post[123][body]' id='post_123_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[123][secret]' type='hidden' value='0' />" + "<input name='post[123][secret]' checked='checked' type='checkbox' id='post_123_secret' value='1' />" assert_dom_equal expected, output_buffer end def test_fields_for_with_nil_index_option_override output_buffer = fields_for("post[]", @post, :index => nil) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<input name='post[][title]' size='30' type='text' id='post__title' value='Hello World' />" + "<textarea name='post[][body]' id='post__body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[][secret]' type='hidden' value='0' />" + "<input name='post[][secret]' checked='checked' type='checkbox' id='post__secret' value='1' />" assert_dom_equal expected, output_buffer end def test_fields_for_with_index_option_override output_buffer = fields_for("post[]", @post, :index => "abc") do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<input name='post[abc][title]' size='30' type='text' id='post_abc_title' value='Hello World' />" + "<textarea name='post[abc][body]' id='post_abc_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[abc][secret]' type='hidden' value='0' />" + "<input name='post[abc][secret]' checked='checked' type='checkbox' id='post_abc_secret' value='1' />" assert_dom_equal expected, output_buffer end def test_fields_for_without_object output_buffer = fields_for(:post) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" assert_dom_equal expected, output_buffer end def test_fields_for_with_only_object output_buffer = fields_for(@post) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[secret]' type='hidden' value='0' />" + "<input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' />" assert_dom_equal expected, output_buffer end def test_fields_for_object_with_bracketed_name output_buffer = fields_for("author[post]", @post) do |f| concat f.label(:title) concat f.text_field(:title) end assert_dom_equal "<label for=\"author_post_title\">Title</label>" + "<input name='author[post][title]' size='30' type='text' id='author_post_title' value='Hello World' />", output_buffer end def test_fields_for_object_with_bracketed_name_and_index output_buffer = fields_for("author[post]", @post, :index => 1) do |f| concat f.label(:title) concat f.text_field(:title) end assert_dom_equal "<label for=\"author_post_1_title\">Title</label>" + "<input name='author[post][1][title]' size='30' type='text' id='author_post_1_title' value='Hello World' />", output_buffer end def test_form_builder_does_not_have_form_for_method assert ! ActionView::Helpers::FormBuilder.instance_methods.include?('form_for') end def test_form_for_and_fields_for form_for(@post, :as => :post, :html => { :id => 'create-post' }) do |post_form| concat post_form.text_field(:title) concat post_form.text_area(:body) concat fields_for(:parent_post, @post) { |parent_fields| concat parent_fields.check_box(:secret) } end expected = whole_form('/posts/123', 'create-post', 'edit_post', :method => 'put') do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='parent_post[secret]' type='hidden' value='0' />" + "<input name='parent_post[secret]' checked='checked' type='checkbox' id='parent_post_secret' value='1' />" end assert_dom_equal expected, output_buffer end def test_form_for_and_fields_for_with_object form_for(@post, :as => :post, :html => { :id => 'create-post' }) do |post_form| concat post_form.text_field(:title) concat post_form.text_area(:body) concat post_form.fields_for(@comment) { |comment_fields| concat comment_fields.text_field(:name) } end expected = whole_form('/posts/123', 'create-post', 'edit_post', :method => 'put') do "<input name='post[title]' size='30' type='text' id='post_title' value='Hello World' />" + "<textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea>" + "<input name='post[comment][name]' type='text' id='post_comment_name' value='new comment' size='30' />" end assert_dom_equal expected, output_buffer end def test_form_for_and_fields_for_with_non_nested_association_and_without_object form_for(@post) do |f| concat f.fields_for(:category) { |c| concat c.text_field(:name) } end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', 'put') do "<input name='post[category][name]' type='text' size='30' id='post_category_name' />" end assert_dom_equal expected, output_buffer end class LabelledFormBuilder < ActionView::Helpers::FormBuilder (field_helpers - %w(hidden_field)).each do |selector| class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 def #{selector}(field, *args, &proc) ("<label for='\#{field}'>\#{field.to_s.humanize}:</label> " + super + "<br/>").html_safe end RUBY_EVAL end end def test_form_for_with_labelled_builder form_for(@post, :builder => LabelledFormBuilder) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do "<label for='title'>Title:</label> <input name='post[title]' size='30' type='text' id='post_title' value='Hello World' /><br/>" + "<label for='body'>Body:</label> <textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea><br/>" + "<label for='secret'>Secret:</label> <input name='post[secret]' type='hidden' value='0' /><input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' /><br/>" end assert_dom_equal expected, output_buffer end def hidden_fields(method = nil) txt = %{<div style="margin:0;padding:0;display:inline">} txt << %{<input name="utf8" type="hidden" value="&#x2713;" />} if method && !method.to_s.in?(['get', 'post']) txt << %{<input name="_method" type="hidden" value="#{method}" />} end txt << %{</div>} end def form_text(action = "/", id = nil, html_class = nil, remote = nil, multipart = nil, method = nil) txt = %{<form accept-charset="UTF-8" action="#{action}"} txt << %{ enctype="multipart/form-data"} if multipart txt << %{ data-remote="true"} if remote txt << %{ class="#{html_class}"} if html_class txt << %{ id="#{id}"} if id method = method.to_s == "get" ? "get" : "post" txt << %{ method="#{method}">} end def whole_form(action = "/", id = nil, html_class = nil, options = nil) contents = block_given? ? yield : "" if options.is_a?(Hash) method, remote, multipart = options.values_at(:method, :remote, :multipart) else method = options end form_text(action, id, html_class, remote, multipart, method) + hidden_fields(method) + contents + "</form>" end def test_default_form_builder old_default_form_builder, ActionView::Base.default_form_builder = ActionView::Base.default_form_builder, LabelledFormBuilder form_for(@post) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = whole_form('/posts/123', 'edit_post_123', 'edit_post', :method => 'put') do "<label for='title'>Title:</label> <input name='post[title]' size='30' type='text' id='post_title' value='Hello World' /><br/>" + "<label for='body'>Body:</label> <textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea><br/>" + "<label for='secret'>Secret:</label> <input name='post[secret]' type='hidden' value='0' /><input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' /><br/>" end assert_dom_equal expected, output_buffer ensure ActionView::Base.default_form_builder = old_default_form_builder end def test_fields_for_with_labelled_builder output_buffer = fields_for(:post, @post, :builder => LabelledFormBuilder) do |f| concat f.text_field(:title) concat f.text_area(:body) concat f.check_box(:secret) end expected = "<label for='title'>Title:</label> <input name='post[title]' size='30' type='text' id='post_title' value='Hello World' /><br/>" + "<label for='body'>Body:</label> <textarea name='post[body]' id='post_body' rows='20' cols='40'>Back to the hill and over it again!</textarea><br/>" + "<label for='secret'>Secret:</label> <input name='post[secret]' type='hidden' value='0' /><input name='post[secret]' checked='checked' type='checkbox' id='post_secret' value='1' /><br/>" assert_dom_equal expected, output_buffer end def test_form_for_with_labelled_builder_with_nested_fields_for_without_options_hash klass = nil form_for(@post, :builder => LabelledFormBuilder) do |f| f.fields_for(:comments, Comment.new) do |nested_fields| klass = nested_fields.class '' end end assert_equal LabelledFormBuilder, klass end def test_form_for_with_labelled_builder_with_nested_fields_for_with_options_hash klass = nil form_for(@post, :builder => LabelledFormBuilder) do |f| f.fields_for(:comments, Comment.new, :index => 'foo') do |nested_fields| klass = nested_fields.class '' end end assert_equal LabelledFormBuilder, klass end def test_form_for_with_labelled_builder_path path = nil form_for(@post, :builder => LabelledFormBuilder) do |f| path = f.to_partial_path '' end assert_equal 'labelled_form', path end class LabelledFormBuilderSubclass < LabelledFormBuilder; end def test_form_for_with_labelled_builder_with_nested_fields_for_with_custom_builder klass = nil form_for(@post, :builder => LabelledFormBuilder) do |f| f.fields_for(:comments, Comment.new, :builder => LabelledFormBuilderSubclass) do |nested_fields| klass = nested_fields.class '' end end assert_equal LabelledFormBuilderSubclass, klass end def test_form_for_with_html_options_adds_options_to_form_tag form_for(@post, :html => {:id => 'some_form', :class => 'some_class'}) do |f| end expected = whole_form("/posts/123", "some_form", "some_class", 'put') assert_dom_equal expected, output_buffer end def test_form_for_with_string_url_option form_for(@post, :url => 'http://www.otherdomain.com') do |f| end assert_equal whole_form("http://www.otherdomain.com", 'edit_post_123', 'edit_post', 'put'), output_buffer end def test_form_for_with_hash_url_option form_for(@post, :url => {:controller => 'controller', :action => 'action'}) do |f| end assert_equal 'controller', @url_for_options[:controller] assert_equal 'action', @url_for_options[:action] end def test_form_for_with_record_url_option form_for(@post, :url => @post) do |f| end expected = whole_form("/posts/123", 'edit_post_123', 'edit_post', 'put') assert_equal expected, output_buffer end def test_form_for_with_existing_object form_for(@post) do |f| end expected = whole_form("/posts/123", "edit_post_123", "edit_post", "put") assert_equal expected, output_buffer end def test_form_for_with_new_object post = Post.new post.persisted = false def post.to_key; nil; end form_for(post) do |f| end expected = whole_form("/posts", "new_post", "new_post") assert_equal expected, output_buffer end def test_form_for_with_existing_object_in_list @comment.save form_for([@post, @comment]) {} expected = whole_form(post_comment_path(@post, @comment), "edit_comment_1", "edit_comment", "put") assert_dom_equal expected, output_buffer end def test_form_for_with_new_object_in_list form_for([@post, @comment]) {} expected = whole_form(post_comments_path(@post), "new_comment", "new_comment") assert_dom_equal expected, output_buffer end def test_form_for_with_existing_object_and_namespace_in_list @comment.save form_for([:admin, @post, @comment]) {} expected = whole_form(admin_post_comment_path(@post, @comment), "edit_comment_1", "edit_comment", "put") assert_dom_equal expected, output_buffer end def test_form_for_with_new_object_and_namespace_in_list form_for([:admin, @post, @comment]) {} expected = whole_form(admin_post_comments_path(@post), "new_comment", "new_comment") assert_dom_equal expected, output_buffer end def test_form_for_with_existing_object_and_custom_url form_for(@post, :url => "/super_posts") do |f| end expected = whole_form("/super_posts", "edit_post_123", "edit_post", "put") assert_equal expected, output_buffer end def test_fields_for_returns_block_result output = fields_for(Post.new) { |f| "fields" } assert_equal "fields", output end protected def protect_against_forgery? false end end
require 'thread' require 'active_support/core_ext/string/filters' module ActiveRecord # = Active Record Reflection module Reflection # :nodoc: extend ActiveSupport::Concern included do class_attribute :_reflections class_attribute :aggregate_reflections self._reflections = {} self.aggregate_reflections = {} end def self.create(macro, name, scope, options, ar) klass = case macro when :composed_of AggregateReflection when :has_many HasManyReflection when :has_one HasOneReflection when :belongs_to BelongsToReflection else raise "Unsupported Macro: #{macro}" end reflection = klass.new(name, scope, options, ar) options[:through] ? ThroughReflection.new(reflection) : reflection end def self.add_reflection(ar, name, reflection) ar.clear_reflections_cache ar._reflections = ar._reflections.merge(name.to_s => reflection) end def self.add_aggregate_reflection(ar, name, reflection) ar.aggregate_reflections = ar.aggregate_reflections.merge(name.to_s => reflection) end # \Reflection enables the ability to examine the associations and aggregations of # Active Record classes and objects. This information, for example, # can be used in a form builder that takes an Active Record object # and creates input fields for all of the attributes depending on their type # and displays the associations to other objects. # # MacroReflection class has info for AggregateReflection and AssociationReflection # classes. module ClassMethods # Returns an array of AggregateReflection objects for all the aggregations in the class. def reflect_on_all_aggregations aggregate_reflections.values end # Returns the AggregateReflection object for the named +aggregation+ (use the symbol). # # Account.reflect_on_aggregation(:balance) # => the balance AggregateReflection # def reflect_on_aggregation(aggregation) aggregate_reflections[aggregation.to_s] end # Returns a Hash of name of the reflection as the key and a AssociationReflection as the value. # # Account.reflections # => {"balance" => AggregateReflection} # # @api public def reflections @__reflections ||= begin ref = {} _reflections.each do |name, reflection| parent_reflection = reflection.parent_reflection if parent_reflection parent_name = parent_reflection.name ref[parent_name.to_s] = parent_reflection else ref[name] = reflection end end ref end end # Returns an array of AssociationReflection objects for all the # associations in the class. If you only want to reflect on a certain # association type, pass in the symbol (<tt>:has_many</tt>, <tt>:has_one</tt>, # <tt>:belongs_to</tt>) as the first parameter. # # Example: # # Account.reflect_on_all_associations # returns an array of all associations # Account.reflect_on_all_associations(:has_many) # returns an array of all has_many associations # # @api public def reflect_on_all_associations(macro = nil) association_reflections = reflections.values association_reflections.select! { |reflection| reflection.macro == macro } if macro association_reflections end # Returns the AssociationReflection object for the +association+ (use the symbol). # # Account.reflect_on_association(:owner) # returns the owner AssociationReflection # Invoice.reflect_on_association(:line_items).macro # returns :has_many # # @api public def reflect_on_association(association) reflections[association.to_s] end # @api private def _reflect_on_association(association) #:nodoc: _reflections[association.to_s] end # Returns an array of AssociationReflection objects for all associations which have <tt>:autosave</tt> enabled. # # @api public def reflect_on_all_autosave_associations reflections.values.select { |reflection| reflection.options[:autosave] } end def clear_reflections_cache #:nodoc: @__reflections = nil end end # Holds all the methods that are shared between MacroReflection, AssociationReflection # and ThroughReflection class AbstractReflection # :nodoc: def table_name klass.table_name end # Returns a new, unsaved instance of the associated class. +attributes+ will # be passed to the class's constructor. def build_association(attributes, &block) klass.new(attributes, &block) end def quoted_table_name klass.quoted_table_name end def primary_key_type klass.type_for_attribute(klass.primary_key) end # Returns the class name for the macro. # # <tt>composed_of :balance, class_name: 'Money'</tt> returns <tt>'Money'</tt> # <tt>has_many :clients</tt> returns <tt>'Client'</tt> def class_name @class_name ||= (options[:class_name] || derive_class_name).to_s end JoinKeys = Struct.new(:key, :foreign_key) # :nodoc: def join_keys(association_klass) JoinKeys.new(foreign_key, active_record_primary_key) end def constraints scope_chain.flatten end def counter_cache_column if belongs_to? if options[:counter_cache] == true "#{active_record.name.demodulize.underscore.pluralize}_count" elsif options[:counter_cache] options[:counter_cache].to_s end else options[:counter_cache] ? options[:counter_cache].to_s : "#{name}_count" end end # This shit is nasty. We need to avoid the following situation: # # * An associated record is deleted via record.destroy # * Hence the callbacks run, and they find a belongs_to on the record with a # :counter_cache options which points back at our owner. So they update the # counter cache. # * In which case, we must make sure to *not* update the counter cache, or else # it will be decremented twice. # # Hence this method. def inverse_which_updates_counter_cache return @inverse_which_updates_counter_cache if defined?(@inverse_which_updates_counter_cache) @inverse_which_updates_counter_cache = klass.reflect_on_all_associations(:belongs_to).find do |inverse| inverse.counter_cache_column == counter_cache_column end end alias inverse_updates_counter_cache? inverse_which_updates_counter_cache def inverse_updates_counter_in_memory? inverse_of && inverse_which_updates_counter_cache == inverse_of end # Returns whether a counter cache should be used for this association. # # The counter_cache option must be given on either the owner or inverse # association, and the column must be present on the owner. def has_cached_counter? options[:counter_cache] || inverse_which_updates_counter_cache && inverse_which_updates_counter_cache.options[:counter_cache] && !!active_record.columns_hash[counter_cache_column] end def counter_must_be_updated_by_has_many? !inverse_updates_counter_in_memory? && has_cached_counter? end def alias_candidate(name) "#{plural_name}_#{name}" end end # Base class for AggregateReflection and AssociationReflection. Objects of # AggregateReflection and AssociationReflection are returned by the Reflection::ClassMethods. # # MacroReflection # AggregateReflection # AssociationReflection # HasManyReflection # HasOneReflection # BelongsToReflection # ThroughReflection class MacroReflection < AbstractReflection # Returns the name of the macro. # # <tt>composed_of :balance, class_name: 'Money'</tt> returns <tt>:balance</tt> # <tt>has_many :clients</tt> returns <tt>:clients</tt> attr_reader :name attr_reader :scope # Returns the hash of options used for the macro. # # <tt>composed_of :balance, class_name: 'Money'</tt> returns <tt>{ class_name: "Money" }</tt> # <tt>has_many :clients</tt> returns <tt>{}</tt> attr_reader :options attr_reader :active_record attr_reader :plural_name # :nodoc: def initialize(name, scope, options, active_record) @name = name @scope = scope @options = options @active_record = active_record @klass = options[:anonymous_class] @plural_name = active_record.pluralize_table_names ? name.to_s.pluralize : name.to_s end def autosave=(autosave) @automatic_inverse_of = false @options[:autosave] = autosave parent_reflection = self.parent_reflection if parent_reflection parent_reflection.autosave = autosave end end # Returns the class for the macro. # # <tt>composed_of :balance, class_name: 'Money'</tt> returns the Money class # <tt>has_many :clients</tt> returns the Client class def klass @klass ||= compute_class(class_name) end def compute_class(name) name.constantize end # Returns +true+ if +self+ and +other_aggregation+ have the same +name+ attribute, +active_record+ attribute, # and +other_aggregation+ has an options hash assigned to it. def ==(other_aggregation) super || other_aggregation.kind_of?(self.class) && name == other_aggregation.name && !other_aggregation.options.nil? && active_record == other_aggregation.active_record end private def derive_class_name name.to_s.camelize end end # Holds all the meta-data about an aggregation as it was specified in the # Active Record class. class AggregateReflection < MacroReflection #:nodoc: def mapping mapping = options[:mapping] || [name, name] mapping.first.is_a?(Array) ? mapping : [mapping] end end # Holds all the meta-data about an association as it was specified in the # Active Record class. class AssociationReflection < MacroReflection #:nodoc: # Returns the target association's class. # # class Author < ActiveRecord::Base # has_many :books # end # # Author.reflect_on_association(:books).klass # # => Book # # <b>Note:</b> Do not call +klass.new+ or +klass.create+ to instantiate # a new association object. Use +build_association+ or +create_association+ # instead. This allows plugins to hook into association object creation. def klass @klass ||= compute_class(class_name) end def compute_class(name) active_record.send(:compute_type, name) end attr_reader :type, :foreign_type attr_accessor :parent_reflection # Reflection def initialize(name, scope, options, active_record) super @automatic_inverse_of = nil @type = options[:as] && (options[:foreign_type] || "#{options[:as]}_type") @foreign_type = options[:foreign_type] || "#{name}_type" @constructable = calculate_constructable(macro, options) @association_scope_cache = {} @scope_lock = Mutex.new end def association_scope_cache(conn, owner) key = conn.prepared_statements if polymorphic? key = [key, owner._read_attribute(@foreign_type)] end @association_scope_cache[key] ||= @scope_lock.synchronize { @association_scope_cache[key] ||= yield } end def constructable? # :nodoc: @constructable end def join_table @join_table ||= options[:join_table] || derive_join_table end def foreign_key @foreign_key ||= options[:foreign_key] || derive_foreign_key end def association_foreign_key @association_foreign_key ||= options[:association_foreign_key] || class_name.foreign_key end # klass option is necessary to support loading polymorphic associations def association_primary_key(klass = nil) options[:primary_key] || primary_key(klass || self.klass) end def active_record_primary_key @active_record_primary_key ||= options[:primary_key] || primary_key(active_record) end def check_validity! check_validity_of_inverse! end def check_validity_of_inverse! unless polymorphic? if has_inverse? && inverse_of.nil? raise InverseOfAssociationNotFoundError.new(self) end end end def check_preloadable! return unless scope if scope.arity > 0 raise ArgumentError, <<-MSG.squish The association scope '#{name}' is instance dependent (the scope block takes an argument). Preloading instance dependent scopes is not supported. MSG end end alias :check_eager_loadable! :check_preloadable! def join_id_for(owner) # :nodoc: owner[active_record_primary_key] end def through_reflection nil end def source_reflection self end # A chain of reflections from this one back to the owner. For more see the explanation in # ThroughReflection. def chain [self] end # This is for clearing cache on the reflection. Useful for tests that need to compare # SQL queries on associations. def clear_association_scope_cache # :nodoc: @association_scope_cache.clear end def nested? false end # An array of arrays of scopes. Each item in the outside array corresponds to a reflection # in the #chain. def scope_chain scope ? [[scope]] : [[]] end def has_inverse? inverse_name end def inverse_of return unless inverse_name @inverse_of ||= klass._reflect_on_association inverse_name end def polymorphic_inverse_of(associated_class) if has_inverse? if inverse_relationship = associated_class._reflect_on_association(options[:inverse_of]) inverse_relationship else raise InverseOfAssociationNotFoundError.new(self, associated_class) end end end # Returns the macro type. # # <tt>has_many :clients</tt> returns <tt>:has_many</tt> def macro; raise NotImplementedError; end # Returns whether or not this association reflection is for a collection # association. Returns +true+ if the +macro+ is either +has_many+ or # +has_and_belongs_to_many+, +false+ otherwise. def collection? false end # Returns whether or not the association should be validated as part of # the parent's validation. # # Unless you explicitly disable validation with # <tt>validate: false</tt>, validation will take place when: # # * you explicitly enable validation; <tt>validate: true</tt> # * you use autosave; <tt>autosave: true</tt> # * the association is a +has_many+ association def validate? !options[:validate].nil? ? options[:validate] : (options[:autosave] == true || collection?) end # Returns +true+ if +self+ is a +belongs_to+ reflection. def belongs_to?; false; end # Returns +true+ if +self+ is a +has_one+ reflection. def has_one?; false; end def association_class case macro when :belongs_to if polymorphic? Associations::BelongsToPolymorphicAssociation else Associations::BelongsToAssociation end when :has_many if options[:through] Associations::HasManyThroughAssociation else Associations::HasManyAssociation end when :has_one if options[:through] Associations::HasOneThroughAssociation else Associations::HasOneAssociation end end end def polymorphic? options[:polymorphic] end VALID_AUTOMATIC_INVERSE_MACROS = [:has_many, :has_one, :belongs_to] INVALID_AUTOMATIC_INVERSE_OPTIONS = [:conditions, :through, :polymorphic, :foreign_key] protected def actual_source_reflection # FIXME: this is a horrible name self end private def calculate_constructable(macro, options) case macro when :belongs_to !polymorphic? when :has_one !options[:through] else true end end # Attempts to find the inverse association name automatically. # If it cannot find a suitable inverse association name, it returns # nil. def inverse_name options.fetch(:inverse_of) do if @automatic_inverse_of == false nil else @automatic_inverse_of ||= automatic_inverse_of end end end # returns either nil or the inverse association name that it finds. def automatic_inverse_of if can_find_inverse_of_automatically?(self) inverse_name = ActiveSupport::Inflector.underscore(options[:as] || active_record.name.demodulize).to_sym begin reflection = klass._reflect_on_association(inverse_name) rescue NameError # Give up: we couldn't compute the klass type so we won't be able # to find any associations either. reflection = false end if valid_inverse_reflection?(reflection) return inverse_name end end false end # Checks if the inverse reflection that is returned from the # +automatic_inverse_of+ method is a valid reflection. We must # make sure that the reflection's active_record name matches up # with the current reflection's klass name. # # Note: klass will always be valid because when there's a NameError # from calling +klass+, +reflection+ will already be set to false. def valid_inverse_reflection?(reflection) reflection && klass.name == reflection.active_record.name && can_find_inverse_of_automatically?(reflection) end # Checks to see if the reflection doesn't have any options that prevent # us from being able to guess the inverse automatically. First, the # <tt>inverse_of</tt> option cannot be set to false. Second, we must # have <tt>has_many</tt>, <tt>has_one</tt>, <tt>belongs_to</tt> associations. # Third, we must not have options such as <tt>:polymorphic</tt> or # <tt>:foreign_key</tt> which prevent us from correctly guessing the # inverse association. # # Anything with a scope can additionally ruin our attempt at finding an # inverse, so we exclude reflections with scopes. def can_find_inverse_of_automatically?(reflection) reflection.options[:inverse_of] != false && VALID_AUTOMATIC_INVERSE_MACROS.include?(reflection.macro) && !INVALID_AUTOMATIC_INVERSE_OPTIONS.any? { |opt| reflection.options[opt] } && !reflection.scope end def derive_class_name class_name = name.to_s class_name = class_name.singularize if collection? class_name.camelize end def derive_foreign_key if belongs_to? "#{name}_id" elsif options[:as] "#{options[:as]}_id" else active_record.name.foreign_key end end def derive_join_table ModelSchema.derive_join_table_name active_record.table_name, klass.table_name end def primary_key(klass) klass.primary_key || raise(UnknownPrimaryKey.new(klass)) end end class HasManyReflection < AssociationReflection # :nodoc: def initialize(name, scope, options, active_record) super(name, scope, options, active_record) end def macro; :has_many; end def collection?; true; end end class HasOneReflection < AssociationReflection # :nodoc: def initialize(name, scope, options, active_record) super(name, scope, options, active_record) end def macro; :has_one; end def has_one?; true; end end class BelongsToReflection < AssociationReflection # :nodoc: def initialize(name, scope, options, active_record) super(name, scope, options, active_record) end def macro; :belongs_to; end def belongs_to?; true; end def join_keys(association_klass) key = polymorphic? ? association_primary_key(association_klass) : association_primary_key JoinKeys.new(key, foreign_key) end def join_id_for(owner) # :nodoc: owner[foreign_key] end end class HasAndBelongsToManyReflection < AssociationReflection # :nodoc: def initialize(name, scope, options, active_record) super end def macro; :has_and_belongs_to_many; end def collection? true end end # Holds all the meta-data about a :through association as it was specified # in the Active Record class. class ThroughReflection < AbstractReflection #:nodoc: attr_reader :delegate_reflection delegate :foreign_key, :foreign_type, :association_foreign_key, :active_record_primary_key, :type, :to => :source_reflection def initialize(delegate_reflection) @delegate_reflection = delegate_reflection @klass = delegate_reflection.options[:anonymous_class] @source_reflection_name = delegate_reflection.options[:source] end def klass @klass ||= delegate_reflection.compute_class(class_name) end # Returns the source of the through reflection. It checks both a singularized # and pluralized form for <tt>:belongs_to</tt> or <tt>:has_many</tt>. # # class Post < ActiveRecord::Base # has_many :taggings # has_many :tags, through: :taggings # end # # class Tagging < ActiveRecord::Base # belongs_to :post # belongs_to :tag # end # # tags_reflection = Post.reflect_on_association(:tags) # tags_reflection.source_reflection # # => <ActiveRecord::Reflection::BelongsToReflection: @name=:tag, @active_record=Tagging, @plural_name="tags"> # def source_reflection through_reflection.klass._reflect_on_association(source_reflection_name) end # Returns the AssociationReflection object specified in the <tt>:through</tt> option # of a HasManyThrough or HasOneThrough association. # # class Post < ActiveRecord::Base # has_many :taggings # has_many :tags, through: :taggings # end # # tags_reflection = Post.reflect_on_association(:tags) # tags_reflection.through_reflection # # => <ActiveRecord::Reflection::HasManyReflection: @name=:taggings, @active_record=Post, @plural_name="taggings"> # def through_reflection active_record._reflect_on_association(options[:through]) end # Returns an array of reflections which are involved in this association. Each item in the # array corresponds to a table which will be part of the query for this association. # # The chain is built by recursively calling #chain on the source reflection and the through # reflection. The base case for the recursion is a normal association, which just returns # [self] as its #chain. # # class Post < ActiveRecord::Base # has_many :taggings # has_many :tags, through: :taggings # end # # tags_reflection = Post.reflect_on_association(:tags) # tags_reflection.chain # # => [<ActiveRecord::Reflection::ThroughReflection: @delegate_reflection=#<ActiveRecord::Reflection::HasManyReflection: @name=:tags...>, # <ActiveRecord::Reflection::HasManyReflection: @name=:taggings, @options={}, @active_record=Post>] # def chain @chain ||= begin a = source_reflection.chain b = through_reflection.chain.map(&:dup) if options[:source_type] b[0] = PolymorphicReflection.new(b[0], self) end chain = a + b chain[0] = self # Use self so we don't lose the information from :source_type chain end end # This is for clearing cache on the reflection. Useful for tests that need to compare # SQL queries on associations. def clear_association_scope_cache # :nodoc: @chain = nil delegate_reflection.clear_association_scope_cache source_reflection.clear_association_scope_cache through_reflection.clear_association_scope_cache end # Consider the following example: # # class Person # has_many :articles # has_many :comment_tags, through: :articles # end # # class Article # has_many :comments # has_many :comment_tags, through: :comments, source: :tags # end # # class Comment # has_many :tags # end # # There may be scopes on Person.comment_tags, Article.comment_tags and/or Comment.tags, # but only Comment.tags will be represented in the #chain. So this method creates an array # of scopes corresponding to the chain. def scope_chain @scope_chain ||= begin scope_chain = source_reflection.scope_chain.map(&:dup) # Add to it the scope from this reflection (if any) scope_chain.first << scope if scope through_scope_chain = through_reflection.scope_chain.map(&:dup) if options[:source_type] type = foreign_type source_type = options[:source_type] through_scope_chain.first << lambda { |object| where(type => source_type) } end # Recursively fill out the rest of the array from the through reflection scope_chain + through_scope_chain end end def join_keys(association_klass) source_reflection.join_keys(association_klass) end # A through association is nested if there would be more than one join table def nested? chain.length > 2 end # We want to use the klass from this reflection, rather than just delegate straight to # the source_reflection, because the source_reflection may be polymorphic. We still # need to respect the source_reflection's :primary_key option, though. def association_primary_key(klass = nil) # Get the "actual" source reflection if the immediate source reflection has a # source reflection itself actual_source_reflection.options[:primary_key] || primary_key(klass || self.klass) end # Gets an array of possible <tt>:through</tt> source reflection names in both singular and plural form. # # class Post < ActiveRecord::Base # has_many :taggings # has_many :tags, through: :taggings # end # # tags_reflection = Post.reflect_on_association(:tags) # tags_reflection.source_reflection_names # # => [:tag, :tags] # def source_reflection_names options[:source] ? [options[:source]] : [name.to_s.singularize, name].uniq end def source_reflection_name # :nodoc: return @source_reflection_name if @source_reflection_name names = [name.to_s.singularize, name].collect(&:to_sym).uniq names = names.find_all { |n| through_reflection.klass._reflect_on_association(n) } if names.length > 1 example_options = options.dup example_options[:source] = source_reflection_names.first ActiveSupport::Deprecation.warn \ "Ambiguous source reflection for through association. Please " \ "specify a :source directive on your declaration like:\n" \ "\n" \ " class #{active_record.name} < ActiveRecord::Base\n" \ " #{macro} :#{name}, #{example_options}\n" \ " end" end @source_reflection_name = names.first end def source_options source_reflection.options end def through_options through_reflection.options end def join_id_for(owner) # :nodoc: source_reflection.join_id_for(owner) end def check_validity! if through_reflection.nil? raise HasManyThroughAssociationNotFoundError.new(active_record.name, self) end if through_reflection.polymorphic? if has_one? raise HasOneAssociationPolymorphicThroughError.new(active_record.name, self) else raise HasManyThroughAssociationPolymorphicThroughError.new(active_record.name, self) end end if source_reflection.nil? raise HasManyThroughSourceAssociationNotFoundError.new(self) end if options[:source_type] && !source_reflection.polymorphic? raise HasManyThroughAssociationPointlessSourceTypeError.new(active_record.name, self, source_reflection) end if source_reflection.polymorphic? && options[:source_type].nil? raise HasManyThroughAssociationPolymorphicSourceError.new(active_record.name, self, source_reflection) end if has_one? && through_reflection.collection? raise HasOneThroughCantAssociateThroughCollection.new(active_record.name, self, through_reflection) end check_validity_of_inverse! end def constraints scope_chain = source_reflection.constraints scope_chain << scope if scope scope_chain end protected def actual_source_reflection # FIXME: this is a horrible name source_reflection.send(:actual_source_reflection) end def primary_key(klass) klass.primary_key || raise(UnknownPrimaryKey.new(klass)) end private def derive_class_name # get the class_name of the belongs_to association of the through reflection options[:source_type] || source_reflection.class_name end delegate_methods = AssociationReflection.public_instance_methods - public_instance_methods delegate(*delegate_methods, to: :delegate_reflection) end class PolymorphicReflection < ThroughReflection # :nodoc: def initialize(reflection, previous_reflection) @reflection = reflection @previous_reflection = previous_reflection end def klass @reflection.klass end def scope @reflection.scope end def table_name @reflection.table_name end def plural_name @reflection.plural_name end def join_keys(association_klass) @reflection.join_keys(association_klass) end def type @reflection.type end def constraints [source_type_info] end def source_type_info type = @previous_reflection.foreign_type source_type = @previous_reflection.options[:source_type] lambda { |object| where(type => source_type) } end end class RuntimeReflection < PolymorphicReflection # :nodoc: attr_accessor :next def initialize(reflection, association) @reflection = reflection @association = association end def klass @association.klass end def table_name klass.table_name end def constraints @reflection.constraints end def source_type_info @reflection.source_type_info end def alias_candidate(name) "#{plural_name}_#{name}_join" end def alias_name Arel::Table.new(table_name) end def all_includes; yield; end end end end [ci skip] Remove useless "@api public/private" Other public APIs do not have these annotations. require 'thread' require 'active_support/core_ext/string/filters' module ActiveRecord # = Active Record Reflection module Reflection # :nodoc: extend ActiveSupport::Concern included do class_attribute :_reflections class_attribute :aggregate_reflections self._reflections = {} self.aggregate_reflections = {} end def self.create(macro, name, scope, options, ar) klass = case macro when :composed_of AggregateReflection when :has_many HasManyReflection when :has_one HasOneReflection when :belongs_to BelongsToReflection else raise "Unsupported Macro: #{macro}" end reflection = klass.new(name, scope, options, ar) options[:through] ? ThroughReflection.new(reflection) : reflection end def self.add_reflection(ar, name, reflection) ar.clear_reflections_cache ar._reflections = ar._reflections.merge(name.to_s => reflection) end def self.add_aggregate_reflection(ar, name, reflection) ar.aggregate_reflections = ar.aggregate_reflections.merge(name.to_s => reflection) end # \Reflection enables the ability to examine the associations and aggregations of # Active Record classes and objects. This information, for example, # can be used in a form builder that takes an Active Record object # and creates input fields for all of the attributes depending on their type # and displays the associations to other objects. # # MacroReflection class has info for AggregateReflection and AssociationReflection # classes. module ClassMethods # Returns an array of AggregateReflection objects for all the aggregations in the class. def reflect_on_all_aggregations aggregate_reflections.values end # Returns the AggregateReflection object for the named +aggregation+ (use the symbol). # # Account.reflect_on_aggregation(:balance) # => the balance AggregateReflection # def reflect_on_aggregation(aggregation) aggregate_reflections[aggregation.to_s] end # Returns a Hash of name of the reflection as the key and a AssociationReflection as the value. # # Account.reflections # => {"balance" => AggregateReflection} # def reflections @__reflections ||= begin ref = {} _reflections.each do |name, reflection| parent_reflection = reflection.parent_reflection if parent_reflection parent_name = parent_reflection.name ref[parent_name.to_s] = parent_reflection else ref[name] = reflection end end ref end end # Returns an array of AssociationReflection objects for all the # associations in the class. If you only want to reflect on a certain # association type, pass in the symbol (<tt>:has_many</tt>, <tt>:has_one</tt>, # <tt>:belongs_to</tt>) as the first parameter. # # Example: # # Account.reflect_on_all_associations # returns an array of all associations # Account.reflect_on_all_associations(:has_many) # returns an array of all has_many associations # def reflect_on_all_associations(macro = nil) association_reflections = reflections.values association_reflections.select! { |reflection| reflection.macro == macro } if macro association_reflections end # Returns the AssociationReflection object for the +association+ (use the symbol). # # Account.reflect_on_association(:owner) # returns the owner AssociationReflection # Invoice.reflect_on_association(:line_items).macro # returns :has_many # def reflect_on_association(association) reflections[association.to_s] end def _reflect_on_association(association) #:nodoc: _reflections[association.to_s] end # Returns an array of AssociationReflection objects for all associations which have <tt>:autosave</tt> enabled. def reflect_on_all_autosave_associations reflections.values.select { |reflection| reflection.options[:autosave] } end def clear_reflections_cache # :nodoc: @__reflections = nil end end # Holds all the methods that are shared between MacroReflection, AssociationReflection # and ThroughReflection class AbstractReflection # :nodoc: def table_name klass.table_name end # Returns a new, unsaved instance of the associated class. +attributes+ will # be passed to the class's constructor. def build_association(attributes, &block) klass.new(attributes, &block) end def quoted_table_name klass.quoted_table_name end def primary_key_type klass.type_for_attribute(klass.primary_key) end # Returns the class name for the macro. # # <tt>composed_of :balance, class_name: 'Money'</tt> returns <tt>'Money'</tt> # <tt>has_many :clients</tt> returns <tt>'Client'</tt> def class_name @class_name ||= (options[:class_name] || derive_class_name).to_s end JoinKeys = Struct.new(:key, :foreign_key) # :nodoc: def join_keys(association_klass) JoinKeys.new(foreign_key, active_record_primary_key) end def constraints scope_chain.flatten end def counter_cache_column if belongs_to? if options[:counter_cache] == true "#{active_record.name.demodulize.underscore.pluralize}_count" elsif options[:counter_cache] options[:counter_cache].to_s end else options[:counter_cache] ? options[:counter_cache].to_s : "#{name}_count" end end # This shit is nasty. We need to avoid the following situation: # # * An associated record is deleted via record.destroy # * Hence the callbacks run, and they find a belongs_to on the record with a # :counter_cache options which points back at our owner. So they update the # counter cache. # * In which case, we must make sure to *not* update the counter cache, or else # it will be decremented twice. # # Hence this method. def inverse_which_updates_counter_cache return @inverse_which_updates_counter_cache if defined?(@inverse_which_updates_counter_cache) @inverse_which_updates_counter_cache = klass.reflect_on_all_associations(:belongs_to).find do |inverse| inverse.counter_cache_column == counter_cache_column end end alias inverse_updates_counter_cache? inverse_which_updates_counter_cache def inverse_updates_counter_in_memory? inverse_of && inverse_which_updates_counter_cache == inverse_of end # Returns whether a counter cache should be used for this association. # # The counter_cache option must be given on either the owner or inverse # association, and the column must be present on the owner. def has_cached_counter? options[:counter_cache] || inverse_which_updates_counter_cache && inverse_which_updates_counter_cache.options[:counter_cache] && !!active_record.columns_hash[counter_cache_column] end def counter_must_be_updated_by_has_many? !inverse_updates_counter_in_memory? && has_cached_counter? end def alias_candidate(name) "#{plural_name}_#{name}" end end # Base class for AggregateReflection and AssociationReflection. Objects of # AggregateReflection and AssociationReflection are returned by the Reflection::ClassMethods. # # MacroReflection # AggregateReflection # AssociationReflection # HasManyReflection # HasOneReflection # BelongsToReflection # ThroughReflection class MacroReflection < AbstractReflection # Returns the name of the macro. # # <tt>composed_of :balance, class_name: 'Money'</tt> returns <tt>:balance</tt> # <tt>has_many :clients</tt> returns <tt>:clients</tt> attr_reader :name attr_reader :scope # Returns the hash of options used for the macro. # # <tt>composed_of :balance, class_name: 'Money'</tt> returns <tt>{ class_name: "Money" }</tt> # <tt>has_many :clients</tt> returns <tt>{}</tt> attr_reader :options attr_reader :active_record attr_reader :plural_name # :nodoc: def initialize(name, scope, options, active_record) @name = name @scope = scope @options = options @active_record = active_record @klass = options[:anonymous_class] @plural_name = active_record.pluralize_table_names ? name.to_s.pluralize : name.to_s end def autosave=(autosave) @automatic_inverse_of = false @options[:autosave] = autosave parent_reflection = self.parent_reflection if parent_reflection parent_reflection.autosave = autosave end end # Returns the class for the macro. # # <tt>composed_of :balance, class_name: 'Money'</tt> returns the Money class # <tt>has_many :clients</tt> returns the Client class def klass @klass ||= compute_class(class_name) end def compute_class(name) name.constantize end # Returns +true+ if +self+ and +other_aggregation+ have the same +name+ attribute, +active_record+ attribute, # and +other_aggregation+ has an options hash assigned to it. def ==(other_aggregation) super || other_aggregation.kind_of?(self.class) && name == other_aggregation.name && !other_aggregation.options.nil? && active_record == other_aggregation.active_record end private def derive_class_name name.to_s.camelize end end # Holds all the meta-data about an aggregation as it was specified in the # Active Record class. class AggregateReflection < MacroReflection #:nodoc: def mapping mapping = options[:mapping] || [name, name] mapping.first.is_a?(Array) ? mapping : [mapping] end end # Holds all the meta-data about an association as it was specified in the # Active Record class. class AssociationReflection < MacroReflection #:nodoc: # Returns the target association's class. # # class Author < ActiveRecord::Base # has_many :books # end # # Author.reflect_on_association(:books).klass # # => Book # # <b>Note:</b> Do not call +klass.new+ or +klass.create+ to instantiate # a new association object. Use +build_association+ or +create_association+ # instead. This allows plugins to hook into association object creation. def klass @klass ||= compute_class(class_name) end def compute_class(name) active_record.send(:compute_type, name) end attr_reader :type, :foreign_type attr_accessor :parent_reflection # Reflection def initialize(name, scope, options, active_record) super @automatic_inverse_of = nil @type = options[:as] && (options[:foreign_type] || "#{options[:as]}_type") @foreign_type = options[:foreign_type] || "#{name}_type" @constructable = calculate_constructable(macro, options) @association_scope_cache = {} @scope_lock = Mutex.new end def association_scope_cache(conn, owner) key = conn.prepared_statements if polymorphic? key = [key, owner._read_attribute(@foreign_type)] end @association_scope_cache[key] ||= @scope_lock.synchronize { @association_scope_cache[key] ||= yield } end def constructable? # :nodoc: @constructable end def join_table @join_table ||= options[:join_table] || derive_join_table end def foreign_key @foreign_key ||= options[:foreign_key] || derive_foreign_key end def association_foreign_key @association_foreign_key ||= options[:association_foreign_key] || class_name.foreign_key end # klass option is necessary to support loading polymorphic associations def association_primary_key(klass = nil) options[:primary_key] || primary_key(klass || self.klass) end def active_record_primary_key @active_record_primary_key ||= options[:primary_key] || primary_key(active_record) end def check_validity! check_validity_of_inverse! end def check_validity_of_inverse! unless polymorphic? if has_inverse? && inverse_of.nil? raise InverseOfAssociationNotFoundError.new(self) end end end def check_preloadable! return unless scope if scope.arity > 0 raise ArgumentError, <<-MSG.squish The association scope '#{name}' is instance dependent (the scope block takes an argument). Preloading instance dependent scopes is not supported. MSG end end alias :check_eager_loadable! :check_preloadable! def join_id_for(owner) # :nodoc: owner[active_record_primary_key] end def through_reflection nil end def source_reflection self end # A chain of reflections from this one back to the owner. For more see the explanation in # ThroughReflection. def chain [self] end # This is for clearing cache on the reflection. Useful for tests that need to compare # SQL queries on associations. def clear_association_scope_cache # :nodoc: @association_scope_cache.clear end def nested? false end # An array of arrays of scopes. Each item in the outside array corresponds to a reflection # in the #chain. def scope_chain scope ? [[scope]] : [[]] end def has_inverse? inverse_name end def inverse_of return unless inverse_name @inverse_of ||= klass._reflect_on_association inverse_name end def polymorphic_inverse_of(associated_class) if has_inverse? if inverse_relationship = associated_class._reflect_on_association(options[:inverse_of]) inverse_relationship else raise InverseOfAssociationNotFoundError.new(self, associated_class) end end end # Returns the macro type. # # <tt>has_many :clients</tt> returns <tt>:has_many</tt> def macro; raise NotImplementedError; end # Returns whether or not this association reflection is for a collection # association. Returns +true+ if the +macro+ is either +has_many+ or # +has_and_belongs_to_many+, +false+ otherwise. def collection? false end # Returns whether or not the association should be validated as part of # the parent's validation. # # Unless you explicitly disable validation with # <tt>validate: false</tt>, validation will take place when: # # * you explicitly enable validation; <tt>validate: true</tt> # * you use autosave; <tt>autosave: true</tt> # * the association is a +has_many+ association def validate? !options[:validate].nil? ? options[:validate] : (options[:autosave] == true || collection?) end # Returns +true+ if +self+ is a +belongs_to+ reflection. def belongs_to?; false; end # Returns +true+ if +self+ is a +has_one+ reflection. def has_one?; false; end def association_class case macro when :belongs_to if polymorphic? Associations::BelongsToPolymorphicAssociation else Associations::BelongsToAssociation end when :has_many if options[:through] Associations::HasManyThroughAssociation else Associations::HasManyAssociation end when :has_one if options[:through] Associations::HasOneThroughAssociation else Associations::HasOneAssociation end end end def polymorphic? options[:polymorphic] end VALID_AUTOMATIC_INVERSE_MACROS = [:has_many, :has_one, :belongs_to] INVALID_AUTOMATIC_INVERSE_OPTIONS = [:conditions, :through, :polymorphic, :foreign_key] protected def actual_source_reflection # FIXME: this is a horrible name self end private def calculate_constructable(macro, options) case macro when :belongs_to !polymorphic? when :has_one !options[:through] else true end end # Attempts to find the inverse association name automatically. # If it cannot find a suitable inverse association name, it returns # nil. def inverse_name options.fetch(:inverse_of) do if @automatic_inverse_of == false nil else @automatic_inverse_of ||= automatic_inverse_of end end end # returns either nil or the inverse association name that it finds. def automatic_inverse_of if can_find_inverse_of_automatically?(self) inverse_name = ActiveSupport::Inflector.underscore(options[:as] || active_record.name.demodulize).to_sym begin reflection = klass._reflect_on_association(inverse_name) rescue NameError # Give up: we couldn't compute the klass type so we won't be able # to find any associations either. reflection = false end if valid_inverse_reflection?(reflection) return inverse_name end end false end # Checks if the inverse reflection that is returned from the # +automatic_inverse_of+ method is a valid reflection. We must # make sure that the reflection's active_record name matches up # with the current reflection's klass name. # # Note: klass will always be valid because when there's a NameError # from calling +klass+, +reflection+ will already be set to false. def valid_inverse_reflection?(reflection) reflection && klass.name == reflection.active_record.name && can_find_inverse_of_automatically?(reflection) end # Checks to see if the reflection doesn't have any options that prevent # us from being able to guess the inverse automatically. First, the # <tt>inverse_of</tt> option cannot be set to false. Second, we must # have <tt>has_many</tt>, <tt>has_one</tt>, <tt>belongs_to</tt> associations. # Third, we must not have options such as <tt>:polymorphic</tt> or # <tt>:foreign_key</tt> which prevent us from correctly guessing the # inverse association. # # Anything with a scope can additionally ruin our attempt at finding an # inverse, so we exclude reflections with scopes. def can_find_inverse_of_automatically?(reflection) reflection.options[:inverse_of] != false && VALID_AUTOMATIC_INVERSE_MACROS.include?(reflection.macro) && !INVALID_AUTOMATIC_INVERSE_OPTIONS.any? { |opt| reflection.options[opt] } && !reflection.scope end def derive_class_name class_name = name.to_s class_name = class_name.singularize if collection? class_name.camelize end def derive_foreign_key if belongs_to? "#{name}_id" elsif options[:as] "#{options[:as]}_id" else active_record.name.foreign_key end end def derive_join_table ModelSchema.derive_join_table_name active_record.table_name, klass.table_name end def primary_key(klass) klass.primary_key || raise(UnknownPrimaryKey.new(klass)) end end class HasManyReflection < AssociationReflection # :nodoc: def initialize(name, scope, options, active_record) super(name, scope, options, active_record) end def macro; :has_many; end def collection?; true; end end class HasOneReflection < AssociationReflection # :nodoc: def initialize(name, scope, options, active_record) super(name, scope, options, active_record) end def macro; :has_one; end def has_one?; true; end end class BelongsToReflection < AssociationReflection # :nodoc: def initialize(name, scope, options, active_record) super(name, scope, options, active_record) end def macro; :belongs_to; end def belongs_to?; true; end def join_keys(association_klass) key = polymorphic? ? association_primary_key(association_klass) : association_primary_key JoinKeys.new(key, foreign_key) end def join_id_for(owner) # :nodoc: owner[foreign_key] end end class HasAndBelongsToManyReflection < AssociationReflection # :nodoc: def initialize(name, scope, options, active_record) super end def macro; :has_and_belongs_to_many; end def collection? true end end # Holds all the meta-data about a :through association as it was specified # in the Active Record class. class ThroughReflection < AbstractReflection #:nodoc: attr_reader :delegate_reflection delegate :foreign_key, :foreign_type, :association_foreign_key, :active_record_primary_key, :type, :to => :source_reflection def initialize(delegate_reflection) @delegate_reflection = delegate_reflection @klass = delegate_reflection.options[:anonymous_class] @source_reflection_name = delegate_reflection.options[:source] end def klass @klass ||= delegate_reflection.compute_class(class_name) end # Returns the source of the through reflection. It checks both a singularized # and pluralized form for <tt>:belongs_to</tt> or <tt>:has_many</tt>. # # class Post < ActiveRecord::Base # has_many :taggings # has_many :tags, through: :taggings # end # # class Tagging < ActiveRecord::Base # belongs_to :post # belongs_to :tag # end # # tags_reflection = Post.reflect_on_association(:tags) # tags_reflection.source_reflection # # => <ActiveRecord::Reflection::BelongsToReflection: @name=:tag, @active_record=Tagging, @plural_name="tags"> # def source_reflection through_reflection.klass._reflect_on_association(source_reflection_name) end # Returns the AssociationReflection object specified in the <tt>:through</tt> option # of a HasManyThrough or HasOneThrough association. # # class Post < ActiveRecord::Base # has_many :taggings # has_many :tags, through: :taggings # end # # tags_reflection = Post.reflect_on_association(:tags) # tags_reflection.through_reflection # # => <ActiveRecord::Reflection::HasManyReflection: @name=:taggings, @active_record=Post, @plural_name="taggings"> # def through_reflection active_record._reflect_on_association(options[:through]) end # Returns an array of reflections which are involved in this association. Each item in the # array corresponds to a table which will be part of the query for this association. # # The chain is built by recursively calling #chain on the source reflection and the through # reflection. The base case for the recursion is a normal association, which just returns # [self] as its #chain. # # class Post < ActiveRecord::Base # has_many :taggings # has_many :tags, through: :taggings # end # # tags_reflection = Post.reflect_on_association(:tags) # tags_reflection.chain # # => [<ActiveRecord::Reflection::ThroughReflection: @delegate_reflection=#<ActiveRecord::Reflection::HasManyReflection: @name=:tags...>, # <ActiveRecord::Reflection::HasManyReflection: @name=:taggings, @options={}, @active_record=Post>] # def chain @chain ||= begin a = source_reflection.chain b = through_reflection.chain.map(&:dup) if options[:source_type] b[0] = PolymorphicReflection.new(b[0], self) end chain = a + b chain[0] = self # Use self so we don't lose the information from :source_type chain end end # This is for clearing cache on the reflection. Useful for tests that need to compare # SQL queries on associations. def clear_association_scope_cache # :nodoc: @chain = nil delegate_reflection.clear_association_scope_cache source_reflection.clear_association_scope_cache through_reflection.clear_association_scope_cache end # Consider the following example: # # class Person # has_many :articles # has_many :comment_tags, through: :articles # end # # class Article # has_many :comments # has_many :comment_tags, through: :comments, source: :tags # end # # class Comment # has_many :tags # end # # There may be scopes on Person.comment_tags, Article.comment_tags and/or Comment.tags, # but only Comment.tags will be represented in the #chain. So this method creates an array # of scopes corresponding to the chain. def scope_chain @scope_chain ||= begin scope_chain = source_reflection.scope_chain.map(&:dup) # Add to it the scope from this reflection (if any) scope_chain.first << scope if scope through_scope_chain = through_reflection.scope_chain.map(&:dup) if options[:source_type] type = foreign_type source_type = options[:source_type] through_scope_chain.first << lambda { |object| where(type => source_type) } end # Recursively fill out the rest of the array from the through reflection scope_chain + through_scope_chain end end def join_keys(association_klass) source_reflection.join_keys(association_klass) end # A through association is nested if there would be more than one join table def nested? chain.length > 2 end # We want to use the klass from this reflection, rather than just delegate straight to # the source_reflection, because the source_reflection may be polymorphic. We still # need to respect the source_reflection's :primary_key option, though. def association_primary_key(klass = nil) # Get the "actual" source reflection if the immediate source reflection has a # source reflection itself actual_source_reflection.options[:primary_key] || primary_key(klass || self.klass) end # Gets an array of possible <tt>:through</tt> source reflection names in both singular and plural form. # # class Post < ActiveRecord::Base # has_many :taggings # has_many :tags, through: :taggings # end # # tags_reflection = Post.reflect_on_association(:tags) # tags_reflection.source_reflection_names # # => [:tag, :tags] # def source_reflection_names options[:source] ? [options[:source]] : [name.to_s.singularize, name].uniq end def source_reflection_name # :nodoc: return @source_reflection_name if @source_reflection_name names = [name.to_s.singularize, name].collect(&:to_sym).uniq names = names.find_all { |n| through_reflection.klass._reflect_on_association(n) } if names.length > 1 example_options = options.dup example_options[:source] = source_reflection_names.first ActiveSupport::Deprecation.warn \ "Ambiguous source reflection for through association. Please " \ "specify a :source directive on your declaration like:\n" \ "\n" \ " class #{active_record.name} < ActiveRecord::Base\n" \ " #{macro} :#{name}, #{example_options}\n" \ " end" end @source_reflection_name = names.first end def source_options source_reflection.options end def through_options through_reflection.options end def join_id_for(owner) # :nodoc: source_reflection.join_id_for(owner) end def check_validity! if through_reflection.nil? raise HasManyThroughAssociationNotFoundError.new(active_record.name, self) end if through_reflection.polymorphic? if has_one? raise HasOneAssociationPolymorphicThroughError.new(active_record.name, self) else raise HasManyThroughAssociationPolymorphicThroughError.new(active_record.name, self) end end if source_reflection.nil? raise HasManyThroughSourceAssociationNotFoundError.new(self) end if options[:source_type] && !source_reflection.polymorphic? raise HasManyThroughAssociationPointlessSourceTypeError.new(active_record.name, self, source_reflection) end if source_reflection.polymorphic? && options[:source_type].nil? raise HasManyThroughAssociationPolymorphicSourceError.new(active_record.name, self, source_reflection) end if has_one? && through_reflection.collection? raise HasOneThroughCantAssociateThroughCollection.new(active_record.name, self, through_reflection) end check_validity_of_inverse! end def constraints scope_chain = source_reflection.constraints scope_chain << scope if scope scope_chain end protected def actual_source_reflection # FIXME: this is a horrible name source_reflection.send(:actual_source_reflection) end def primary_key(klass) klass.primary_key || raise(UnknownPrimaryKey.new(klass)) end private def derive_class_name # get the class_name of the belongs_to association of the through reflection options[:source_type] || source_reflection.class_name end delegate_methods = AssociationReflection.public_instance_methods - public_instance_methods delegate(*delegate_methods, to: :delegate_reflection) end class PolymorphicReflection < ThroughReflection # :nodoc: def initialize(reflection, previous_reflection) @reflection = reflection @previous_reflection = previous_reflection end def klass @reflection.klass end def scope @reflection.scope end def table_name @reflection.table_name end def plural_name @reflection.plural_name end def join_keys(association_klass) @reflection.join_keys(association_klass) end def type @reflection.type end def constraints [source_type_info] end def source_type_info type = @previous_reflection.foreign_type source_type = @previous_reflection.options[:source_type] lambda { |object| where(type => source_type) } end end class RuntimeReflection < PolymorphicReflection # :nodoc: attr_accessor :next def initialize(reflection, association) @reflection = reflection @association = association end def klass @association.klass end def table_name klass.table_name end def constraints @reflection.constraints end def source_type_info @reflection.source_type_info end def alias_candidate(name) "#{plural_name}_#{name}_join" end def alias_name Arel::Table.new(table_name) end def all_includes; yield; end end end end
require "cases/helper" require 'models/topic' require 'models/reply' require 'models/subscriber' require 'models/movie' require 'models/keyboard' require 'models/mixed_case_monkey' class PrimaryKeysTest < ActiveRecord::TestCase fixtures :topics, :subscribers, :movies, :mixed_case_monkeys def test_to_key_with_default_primary_key topic = Topic.new assert topic.to_key.nil? topic = Topic.find(1) assert_equal topic.to_key, [1] end def test_to_key_with_customized_primary_key keyboard = Keyboard.new assert_nil keyboard.to_key keyboard.save assert_equal keyboard.to_key, [keyboard.id] end def test_to_key_with_primary_key_after_destroy topic = Topic.find(1) topic.destroy assert_equal topic.to_key, [1] end def test_integer_key topic = Topic.find(1) assert_equal(topics(:first).author_name, topic.author_name) topic = Topic.find(2) assert_equal(topics(:second).author_name, topic.author_name) topic = Topic.new topic.title = "New Topic" assert_nil topic.id assert_nothing_raised { topic.save! } id = topic.id topicReloaded = Topic.find(id) assert_equal("New Topic", topicReloaded.title) end def test_customized_primary_key_auto_assigns_on_save Keyboard.delete_all keyboard = Keyboard.new(:name => 'HHKB') assert_nothing_raised { keyboard.save! } assert_equal keyboard.id, Keyboard.find_by_name('HHKB').id end def test_customized_primary_key_can_be_get_before_saving keyboard = Keyboard.new assert_nil keyboard.id assert_nothing_raised { assert_nil keyboard.key_number } end def test_customized_string_primary_key_settable_before_save subscriber = Subscriber.new assert_nothing_raised { subscriber.id = 'webster123' } assert_equal 'webster123', subscriber.id assert_equal 'webster123', subscriber.nick end def test_string_key subscriber = Subscriber.find(subscribers(:first).nick) assert_equal(subscribers(:first).name, subscriber.name) subscriber = Subscriber.find(subscribers(:second).nick) assert_equal(subscribers(:second).name, subscriber.name) subscriber = Subscriber.new subscriber.id = "jdoe" assert_equal("jdoe", subscriber.id) subscriber.name = "John Doe" assert_nothing_raised { subscriber.save! } assert_equal("jdoe", subscriber.id) subscriberReloaded = Subscriber.find("jdoe") assert_equal("John Doe", subscriberReloaded.name) end def test_find_with_more_than_one_string_key assert_equal 2, Subscriber.find(subscribers(:first).nick, subscribers(:second).nick).length end def test_primary_key_prefix ActiveRecord::Base.primary_key_prefix_type = :table_name Topic.reset_primary_key assert_equal "topicid", Topic.primary_key ActiveRecord::Base.primary_key_prefix_type = :table_name_with_underscore Topic.reset_primary_key assert_equal "topic_id", Topic.primary_key ActiveRecord::Base.primary_key_prefix_type = nil Topic.reset_primary_key assert_equal "id", Topic.primary_key end def test_delete_should_quote_pkey assert_nothing_raised { MixedCaseMonkey.delete(1) } end def test_update_counters_should_quote_pkey_and_quote_counter_columns assert_nothing_raised { MixedCaseMonkey.update_counters(1, :fleaCount => 99) } end def test_find_with_one_id_should_quote_pkey assert_nothing_raised { MixedCaseMonkey.find(1) } end def test_find_with_multiple_ids_should_quote_pkey assert_nothing_raised { MixedCaseMonkey.find([1,2]) } end def test_instance_update_should_quote_pkey assert_nothing_raised { MixedCaseMonkey.find(1).save } end def test_instance_destroy_should_quote_pkey assert_nothing_raised { MixedCaseMonkey.find(1).destroy } end def test_supports_primary_key assert_nothing_raised NoMethodError do ActiveRecord::Base.connection.supports_primary_key? end end def test_primary_key_returns_value_if_it_exists if ActiveRecord::Base.connection.supports_primary_key? assert_equal 'id', ActiveRecord::Base.connection.primary_key('developers') end end def test_primary_key_returns_nil_if_it_does_not_exist if ActiveRecord::Base.connection.supports_primary_key? assert_nil ActiveRecord::Base.connection.primary_key('developers_projects') end end end expected value should come first in assert_equal Signed-off-by: José Valim <0c2436ea76ed86e37cc05f66dea18d48ef390882@gmail.com> require "cases/helper" require 'models/topic' require 'models/reply' require 'models/subscriber' require 'models/movie' require 'models/keyboard' require 'models/mixed_case_monkey' class PrimaryKeysTest < ActiveRecord::TestCase fixtures :topics, :subscribers, :movies, :mixed_case_monkeys def test_to_key_with_default_primary_key topic = Topic.new assert topic.to_key.nil? topic = Topic.find(1) assert_equal [1], topic.to_key end def test_to_key_with_customized_primary_key keyboard = Keyboard.new assert_nil keyboard.to_key keyboard.save assert_equal keyboard.to_key, [keyboard.id] end def test_to_key_with_primary_key_after_destroy topic = Topic.find(1) topic.destroy assert_equal [1], topic.to_key end def test_integer_key topic = Topic.find(1) assert_equal(topics(:first).author_name, topic.author_name) topic = Topic.find(2) assert_equal(topics(:second).author_name, topic.author_name) topic = Topic.new topic.title = "New Topic" assert_nil topic.id assert_nothing_raised { topic.save! } id = topic.id topicReloaded = Topic.find(id) assert_equal("New Topic", topicReloaded.title) end def test_customized_primary_key_auto_assigns_on_save Keyboard.delete_all keyboard = Keyboard.new(:name => 'HHKB') assert_nothing_raised { keyboard.save! } assert_equal keyboard.id, Keyboard.find_by_name('HHKB').id end def test_customized_primary_key_can_be_get_before_saving keyboard = Keyboard.new assert_nil keyboard.id assert_nothing_raised { assert_nil keyboard.key_number } end def test_customized_string_primary_key_settable_before_save subscriber = Subscriber.new assert_nothing_raised { subscriber.id = 'webster123' } assert_equal 'webster123', subscriber.id assert_equal 'webster123', subscriber.nick end def test_string_key subscriber = Subscriber.find(subscribers(:first).nick) assert_equal(subscribers(:first).name, subscriber.name) subscriber = Subscriber.find(subscribers(:second).nick) assert_equal(subscribers(:second).name, subscriber.name) subscriber = Subscriber.new subscriber.id = "jdoe" assert_equal("jdoe", subscriber.id) subscriber.name = "John Doe" assert_nothing_raised { subscriber.save! } assert_equal("jdoe", subscriber.id) subscriberReloaded = Subscriber.find("jdoe") assert_equal("John Doe", subscriberReloaded.name) end def test_find_with_more_than_one_string_key assert_equal 2, Subscriber.find(subscribers(:first).nick, subscribers(:second).nick).length end def test_primary_key_prefix ActiveRecord::Base.primary_key_prefix_type = :table_name Topic.reset_primary_key assert_equal "topicid", Topic.primary_key ActiveRecord::Base.primary_key_prefix_type = :table_name_with_underscore Topic.reset_primary_key assert_equal "topic_id", Topic.primary_key ActiveRecord::Base.primary_key_prefix_type = nil Topic.reset_primary_key assert_equal "id", Topic.primary_key end def test_delete_should_quote_pkey assert_nothing_raised { MixedCaseMonkey.delete(1) } end def test_update_counters_should_quote_pkey_and_quote_counter_columns assert_nothing_raised { MixedCaseMonkey.update_counters(1, :fleaCount => 99) } end def test_find_with_one_id_should_quote_pkey assert_nothing_raised { MixedCaseMonkey.find(1) } end def test_find_with_multiple_ids_should_quote_pkey assert_nothing_raised { MixedCaseMonkey.find([1,2]) } end def test_instance_update_should_quote_pkey assert_nothing_raised { MixedCaseMonkey.find(1).save } end def test_instance_destroy_should_quote_pkey assert_nothing_raised { MixedCaseMonkey.find(1).destroy } end def test_supports_primary_key assert_nothing_raised NoMethodError do ActiveRecord::Base.connection.supports_primary_key? end end def test_primary_key_returns_value_if_it_exists if ActiveRecord::Base.connection.supports_primary_key? assert_equal 'id', ActiveRecord::Base.connection.primary_key('developers') end end def test_primary_key_returns_nil_if_it_does_not_exist if ActiveRecord::Base.connection.supports_primary_key? assert_nil ActiveRecord::Base.connection.primary_key('developers_projects') end end end
lib = File.expand_path('lib', __dir__) $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) Gem::Specification.new do |gem| gem.name = 'administrate-field-belongs_to_search' gem.version = '0.7.0'.freeze gem.authors = ['Klas Eskilson'] gem.email = ['klas.eskilson@gmail.com'] gem.homepage = 'https://github.com/fishbrain/administrate-field-belongs_to_search' gem.summary = 'Plugin that adds search capabilities to belongs_to associations for Administrate' gem.license = 'MIT' gem.require_paths = %w[lib] gem.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(spec)/}) } gem.test_files = `git ls-files -z -- {spec}/*`.split("\x0") gem.add_dependency 'administrate', '>= 0.3', '< 1.0' gem.add_dependency 'jbuilder', '~> 2' gem.add_dependency 'rails', '>= 4.2', '< 7.0' gem.add_dependency 'selectize-rails', '~> 0.6' gem.add_development_dependency 'coveralls', '~> 0' gem.add_development_dependency 'factory_girl', '~> 4.8' gem.add_development_dependency 'rake', '~> 13.0' gem.add_development_dependency 'rspec', '~> 3.4' gem.add_development_dependency 'rubocop', '~> 0.75.0' gem.add_development_dependency 'simplecov', '~> 0' gem.add_development_dependency 'sqlite3', '~> 1.3' gem.description = <<-DESCRIPTION Add support to search through (potentially large) belongs_to associations in your Administrate dashboards. DESCRIPTION end Update rubocop requirement from ~> 0.75.0 to ~> 0.76.0 Updates the requirements on [rubocop](https://github.com/rubocop-hq/rubocop) to permit the latest version. - [Release notes](https://github.com/rubocop-hq/rubocop/releases) - [Changelog](https://github.com/rubocop-hq/rubocop/blob/master/CHANGELOG.md) - [Commits](https://github.com/rubocop-hq/rubocop/compare/v0.75.0...v0.76.0) Signed-off-by: dependabot-preview[bot] <5bdcd3c0d4d24ae3e71b3b452a024c6324c7e4bb@dependabot.com> lib = File.expand_path('lib', __dir__) $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) Gem::Specification.new do |gem| gem.name = 'administrate-field-belongs_to_search' gem.version = '0.7.0'.freeze gem.authors = ['Klas Eskilson'] gem.email = ['klas.eskilson@gmail.com'] gem.homepage = 'https://github.com/fishbrain/administrate-field-belongs_to_search' gem.summary = 'Plugin that adds search capabilities to belongs_to associations for Administrate' gem.license = 'MIT' gem.require_paths = %w[lib] gem.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(spec)/}) } gem.test_files = `git ls-files -z -- {spec}/*`.split("\x0") gem.add_dependency 'administrate', '>= 0.3', '< 1.0' gem.add_dependency 'jbuilder', '~> 2' gem.add_dependency 'rails', '>= 4.2', '< 7.0' gem.add_dependency 'selectize-rails', '~> 0.6' gem.add_development_dependency 'coveralls', '~> 0' gem.add_development_dependency 'factory_girl', '~> 4.8' gem.add_development_dependency 'rake', '~> 13.0' gem.add_development_dependency 'rspec', '~> 3.4' gem.add_development_dependency 'rubocop', '~> 0.76.0' gem.add_development_dependency 'simplecov', '~> 0' gem.add_development_dependency 'sqlite3', '~> 1.3' gem.description = <<-DESCRIPTION Add support to search through (potentially large) belongs_to associations in your Administrate dashboards. DESCRIPTION end
# frozen_string_literal: true # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Auto-generated by gapic-generator-ruby. DO NOT EDIT! module Google module Cloud module Speech module V1 # The top-level message sent by the client for the `Recognize` method. # @!attribute [rw] config # @return [::Google::Cloud::Speech::V1::RecognitionConfig] # Required. Provides information to the recognizer that specifies how to # process the request. # @!attribute [rw] audio # @return [::Google::Cloud::Speech::V1::RecognitionAudio] # Required. The audio data to be recognized. class RecognizeRequest include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # The top-level message sent by the client for the `LongRunningRecognize` # method. # @!attribute [rw] config # @return [::Google::Cloud::Speech::V1::RecognitionConfig] # Required. Provides information to the recognizer that specifies how to # process the request. # @!attribute [rw] audio # @return [::Google::Cloud::Speech::V1::RecognitionAudio] # Required. The audio data to be recognized. # @!attribute [rw] output_config # @return [::Google::Cloud::Speech::V1::TranscriptOutputConfig] # Optional. Specifies an optional destination for the recognition results. class LongRunningRecognizeRequest include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Specifies an optional destination for the recognition results. # @!attribute [rw] gcs_uri # @return [::String] # Specifies a Cloud Storage URI for the recognition results. Must be # specified in the format: `gs://bucket_name/object_name`, and the bucket # must already exist. class TranscriptOutputConfig include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # The top-level message sent by the client for the `StreamingRecognize` method. # Multiple `StreamingRecognizeRequest` messages are sent. The first message # must contain a `streaming_config` message and must not contain # `audio_content`. All subsequent messages must contain `audio_content` and # must not contain a `streaming_config` message. # @!attribute [rw] streaming_config # @return [::Google::Cloud::Speech::V1::StreamingRecognitionConfig] # Provides information to the recognizer that specifies how to process the # request. The first `StreamingRecognizeRequest` message must contain a # `streaming_config` message. # @!attribute [rw] audio_content # @return [::String] # The audio data to be recognized. Sequential chunks of audio data are sent # in sequential `StreamingRecognizeRequest` messages. The first # `StreamingRecognizeRequest` message must not contain `audio_content` data # and all subsequent `StreamingRecognizeRequest` messages must contain # `audio_content` data. The audio bytes must be encoded as specified in # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a # pure binary representation (not base64). See # [content limits](https://cloud.google.com/speech-to-text/quotas#content). class StreamingRecognizeRequest include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Provides information to the recognizer that specifies how to process the # request. # @!attribute [rw] config # @return [::Google::Cloud::Speech::V1::RecognitionConfig] # Required. Provides information to the recognizer that specifies how to # process the request. # @!attribute [rw] single_utterance # @return [::Boolean] # If `false` or omitted, the recognizer will perform continuous # recognition (continuing to wait for and process audio even if the user # pauses speaking) until the client closes the input stream (gRPC API) or # until the maximum time limit has been reached. May return multiple # `StreamingRecognitionResult`s with the `is_final` flag set to `true`. # # If `true`, the recognizer will detect a single spoken utterance. When it # detects that the user has paused or stopped speaking, it will return an # `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no # more than one `StreamingRecognitionResult` with the `is_final` flag set to # `true`. # # The `single_utterance` field can only be used with specified models, # otherwise an error is thrown. The `model` field in [`RecognitionConfig`][] # must be set to: # # * `command_and_search` # * `phone_call` AND additional field `useEnhanced`=`true` # * The `model` field is left undefined. In this case the API auto-selects # a model based on any other parameters that you set in # `RecognitionConfig`. # @!attribute [rw] interim_results # @return [::Boolean] # If `true`, interim results (tentative hypotheses) may be # returned as they become available (these interim results are indicated with # the `is_final=false` flag). # If `false` or omitted, only `is_final=true` result(s) are returned. class StreamingRecognitionConfig include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Provides information to the recognizer that specifies how to process the # request. # @!attribute [rw] encoding # @return [::Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding] # Encoding of audio data sent in all `RecognitionAudio` messages. # This field is optional for `FLAC` and `WAV` audio files and required # for all other audio formats. For details, see {::Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding AudioEncoding}. # @!attribute [rw] sample_rate_hertz # @return [::Integer] # Sample rate in Hertz of the audio data sent in all # `RecognitionAudio` messages. Valid values are: 8000-48000. # 16000 is optimal. For best results, set the sampling rate of the audio # source to 16000 Hz. If that's not possible, use the native sample rate of # the audio source (instead of re-sampling). # This field is optional for FLAC and WAV audio files, but is # required for all other audio formats. For details, see {::Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding AudioEncoding}. # @!attribute [rw] audio_channel_count # @return [::Integer] # The number of channels in the input audio data. # ONLY set this for MULTI-CHANNEL recognition. # Valid values for LINEAR16 and FLAC are `1`-`8`. # Valid values for OGG_OPUS are '1'-'254'. # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`. # If `0` or omitted, defaults to one channel (mono). # Note: We only recognize the first channel by default. # To perform independent recognition on each channel set # `enable_separate_recognition_per_channel` to 'true'. # @!attribute [rw] enable_separate_recognition_per_channel # @return [::Boolean] # This needs to be set to `true` explicitly and `audio_channel_count` > 1 # to get each channel recognized separately. The recognition result will # contain a `channel_tag` field to state which channel that result belongs # to. If this is not true, we will only recognize the first channel. The # request is billed cumulatively for all channels recognized: # `audio_channel_count` multiplied by the length of the audio. # @!attribute [rw] language_code # @return [::String] # Required. The language of the supplied audio as a # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. # Example: "en-US". # See [Language # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list # of the currently supported language codes. # @!attribute [rw] alternative_language_codes # @return [::Array<::String>] # A list of up to 3 additional # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags, # listing possible alternative languages of the supplied audio. # See [Language # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list # of the currently supported language codes. If alternative languages are # listed, recognition result will contain recognition in the most likely # language detected including the main language_code. The recognition result # will include the language tag of the language detected in the audio. Note: # This feature is only supported for Voice Command and Voice Search use cases # and performance may vary for other use cases (e.g., phone call # transcription). # @!attribute [rw] max_alternatives # @return [::Integer] # Maximum number of recognition hypotheses to be returned. # Specifically, the maximum number of `SpeechRecognitionAlternative` messages # within each `SpeechRecognitionResult`. # The server may return fewer than `max_alternatives`. # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of # one. If omitted, will return a maximum of one. # @!attribute [rw] profanity_filter # @return [::Boolean] # If set to `true`, the server will attempt to filter out # profanities, replacing all but the initial character in each filtered word # with asterisks, e.g. "f***". If set to `false` or omitted, profanities # won't be filtered out. # @!attribute [rw] adaptation # @return [::Google::Cloud::Speech::V1::SpeechAdaptation] # Speech adaptation configuration improves the accuracy of speech # recognition. For more information, see the [speech # adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) # documentation. # When speech adaptation is set it supersedes the `speech_contexts` field. # @!attribute [rw] speech_contexts # @return [::Array<::Google::Cloud::Speech::V1::SpeechContext>] # Array of {::Google::Cloud::Speech::V1::SpeechContext SpeechContext}. # A means to provide context to assist the speech recognition. For more # information, see # [speech # adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). # @!attribute [rw] enable_word_time_offsets # @return [::Boolean] # If `true`, the top result includes a list of words and # the start and end time offsets (timestamps) for those words. If # `false`, no word-level time offset information is returned. The default is # `false`. # @!attribute [rw] enable_word_confidence # @return [::Boolean] # If `true`, the top result includes a list of words and the # confidence for those words. If `false`, no word-level confidence # information is returned. The default is `false`. # @!attribute [rw] enable_automatic_punctuation # @return [::Boolean] # If 'true', adds punctuation to recognition result hypotheses. # This feature is only available in select languages. Setting this for # requests in other languages has no effect at all. # The default 'false' value does not add punctuation to result hypotheses. # @!attribute [rw] enable_spoken_punctuation # @return [::Google::Protobuf::BoolValue] # The spoken punctuation behavior for the call # If not set, uses default behavior based on model of choice # e.g. command_and_search will enable spoken punctuation by default # If 'true', replaces spoken punctuation with the corresponding symbols in # the request. For example, "how are you question mark" becomes "how are # you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation # for support. If 'false', spoken punctuation is not replaced. # @!attribute [rw] enable_spoken_emojis # @return [::Google::Protobuf::BoolValue] # The spoken emoji behavior for the call # If not set, uses default behavior based on model of choice # If 'true', adds spoken emoji formatting for the request. This will replace # spoken emojis with the corresponding Unicode symbols in the final # transcript. If 'false', spoken emojis are not replaced. # @!attribute [rw] diarization_config # @return [::Google::Cloud::Speech::V1::SpeakerDiarizationConfig] # Config to enable speaker diarization and set additional # parameters to make diarization better suited for your application. # Note: When this is enabled, we send all the words from the beginning of the # audio for the top alternative in every consecutive STREAMING responses. # This is done in order to improve our speaker tags as our models learn to # identify the speakers in the conversation over time. # For non-streaming requests, the diarization results will be provided only # in the top alternative of the FINAL SpeechRecognitionResult. # @!attribute [rw] metadata # @return [::Google::Cloud::Speech::V1::RecognitionMetadata] # Metadata regarding this request. # @!attribute [rw] model # @return [::String] # Which model to select for the given request. Select the model # best suited to your domain to get best results. If a model is not # explicitly specified, then we auto-select a model based on the parameters # in the RecognitionConfig. # <table> # <tr> # <td><b>Model</b></td> # <td><b>Description</b></td> # </tr> # <tr> # <td><code>command_and_search</code></td> # <td>Best for short queries such as voice commands or voice search.</td> # </tr> # <tr> # <td><code>phone_call</code></td> # <td>Best for audio that originated from a phone call (typically # recorded at an 8khz sampling rate).</td> # </tr> # <tr> # <td><code>video</code></td> # <td>Best for audio that originated from video or includes multiple # speakers. Ideally the audio is recorded at a 16khz or greater # sampling rate. This is a premium model that costs more than the # standard rate.</td> # </tr> # <tr> # <td><code>default</code></td> # <td>Best for audio that is not one of the specific audio models. # For example, long-form audio. Ideally the audio is high-fidelity, # recorded at a 16khz or greater sampling rate.</td> # </tr> # </table> # @!attribute [rw] use_enhanced # @return [::Boolean] # Set to true to use an enhanced model for speech recognition. # If `use_enhanced` is set to true and the `model` field is not set, then # an appropriate enhanced model is chosen if an enhanced model exists for # the audio. # # If `use_enhanced` is true and an enhanced version of the specified model # does not exist, then the speech is recognized using the standard version # of the specified model. class RecognitionConfig include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # The encoding of the audio data sent in the request. # # All encodings support only 1 channel (mono) audio, unless the # `audio_channel_count` and `enable_separate_recognition_per_channel` fields # are set. # # For best results, the audio source should be captured and transmitted using # a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech # recognition can be reduced if lossy codecs are used to capture or transmit # audio, particularly if background noise is present. Lossy codecs include # `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, `MP3`, # and `WEBM_OPUS`. # # The `FLAC` and `WAV` audio file formats include a header that describes the # included audio content. You can request recognition for `WAV` files that # contain either `LINEAR16` or `MULAW` encoded audio. # If you send `FLAC` or `WAV` audio file format in # your request, you do not need to specify an `AudioEncoding`; the audio # encoding format is determined from the file header. If you specify # an `AudioEncoding` when you send send `FLAC` or `WAV` audio, the # encoding configuration must match the encoding described in the audio # header; otherwise the request returns an # [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code. module AudioEncoding # Not specified. ENCODING_UNSPECIFIED = 0 # Uncompressed 16-bit signed little-endian samples (Linear PCM). LINEAR16 = 1 # `FLAC` (Free Lossless Audio # Codec) is the recommended encoding because it is # lossless--therefore recognition is not compromised--and # requires only about half the bandwidth of `LINEAR16`. `FLAC` stream # encoding supports 16-bit and 24-bit samples, however, not all fields in # `STREAMINFO` are supported. FLAC = 2 # 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. MULAW = 3 # Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000. AMR = 4 # Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000. AMR_WB = 5 # Opus encoded audio frames in Ogg container # ([OggOpus](https://wiki.xiph.org/OggOpus)). # `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000. OGG_OPUS = 6 # Although the use of lossy encodings is not recommended, if a very low # bitrate encoding is required, `OGG_OPUS` is highly preferred over # Speex encoding. The [Speex](https://speex.org/) encoding supported by # Cloud Speech API has a header byte in each block, as in MIME type # `audio/x-speex-with-header-byte`. # It is a variant of the RTP Speex encoding defined in # [RFC 5574](https://tools.ietf.org/html/rfc5574). # The stream is a sequence of blocks, one block per RTP packet. Each block # starts with a byte containing the length of the block, in bytes, followed # by one or more frames of Speex data, padded to an integral number of # bytes (octets) as specified in RFC 5574. In other words, each RTP header # is replaced with a single byte containing the block length. Only Speex # wideband is supported. `sample_rate_hertz` must be 16000. SPEEX_WITH_HEADER_BYTE = 7 # Opus encoded audio frames in WebM container # ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be # one of 8000, 12000, 16000, 24000, or 48000. WEBM_OPUS = 9 end end # Config to enable speaker diarization. # @!attribute [rw] enable_speaker_diarization # @return [::Boolean] # If 'true', enables speaker detection for each recognized word in # the top alternative of the recognition result using a speaker_tag provided # in the WordInfo. # @!attribute [rw] min_speaker_count # @return [::Integer] # Minimum number of speakers in the conversation. This range gives you more # flexibility by allowing the system to automatically determine the correct # number of speakers. If not set, the default value is 2. # @!attribute [rw] max_speaker_count # @return [::Integer] # Maximum number of speakers in the conversation. This range gives you more # flexibility by allowing the system to automatically determine the correct # number of speakers. If not set, the default value is 6. # @!attribute [r] speaker_tag # @return [::Integer] # Output only. Unused. class SpeakerDiarizationConfig include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Description of audio data to be recognized. # @!attribute [rw] interaction_type # @return [::Google::Cloud::Speech::V1::RecognitionMetadata::InteractionType] # The use case most closely describing the audio content to be recognized. # @!attribute [rw] industry_naics_code_of_audio # @return [::Integer] # The industry vertical to which this speech recognition request most # closely applies. This is most indicative of the topics contained # in the audio. Use the 6-digit NAICS code to identify the industry # vertical - see https://www.naics.com/search/. # @!attribute [rw] microphone_distance # @return [::Google::Cloud::Speech::V1::RecognitionMetadata::MicrophoneDistance] # The audio type that most closely describes the audio being recognized. # @!attribute [rw] original_media_type # @return [::Google::Cloud::Speech::V1::RecognitionMetadata::OriginalMediaType] # The original media the speech was recorded on. # @!attribute [rw] recording_device_type # @return [::Google::Cloud::Speech::V1::RecognitionMetadata::RecordingDeviceType] # The type of device the speech was recorded with. # @!attribute [rw] recording_device_name # @return [::String] # The device used to make the recording. Examples 'Nexus 5X' or # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or # 'Cardioid Microphone'. # @!attribute [rw] original_mime_type # @return [::String] # Mime type of the original audio file. For example `audio/m4a`, # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`. # A list of possible audio mime types is maintained at # http://www.iana.org/assignments/media-types/media-types.xhtml#audio # @!attribute [rw] audio_topic # @return [::String] # Description of the content. Eg. "Recordings of federal supreme court # hearings from 2012". class RecognitionMetadata include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Use case categories that the audio recognition request can be described # by. module InteractionType # Use case is either unknown or is something other than one of the other # values below. INTERACTION_TYPE_UNSPECIFIED = 0 # Multiple people in a conversation or discussion. For example in a # meeting with two or more people actively participating. Typically # all the primary people speaking would be in the same room (if not, # see PHONE_CALL) DISCUSSION = 1 # One or more persons lecturing or presenting to others, mostly # uninterrupted. PRESENTATION = 2 # A phone-call or video-conference in which two or more people, who are # not in the same room, are actively participating. PHONE_CALL = 3 # A recorded message intended for another person to listen to. VOICEMAIL = 4 # Professionally produced audio (eg. TV Show, Podcast). PROFESSIONALLY_PRODUCED = 5 # Transcribe spoken questions and queries into text. VOICE_SEARCH = 6 # Transcribe voice commands, such as for controlling a device. VOICE_COMMAND = 7 # Transcribe speech to text to create a written document, such as a # text-message, email or report. DICTATION = 8 end # Enumerates the types of capture settings describing an audio file. module MicrophoneDistance # Audio type is not known. MICROPHONE_DISTANCE_UNSPECIFIED = 0 # The audio was captured from a closely placed microphone. Eg. phone, # dictaphone, or handheld microphone. Generally if there speaker is within # 1 meter of the microphone. NEARFIELD = 1 # The speaker if within 3 meters of the microphone. MIDFIELD = 2 # The speaker is more than 3 meters away from the microphone. FARFIELD = 3 end # The original media the speech was recorded on. module OriginalMediaType # Unknown original media type. ORIGINAL_MEDIA_TYPE_UNSPECIFIED = 0 # The speech data is an audio recording. AUDIO = 1 # The speech data originally recorded on a video. VIDEO = 2 end # The type of device the speech was recorded with. module RecordingDeviceType # The recording device is unknown. RECORDING_DEVICE_TYPE_UNSPECIFIED = 0 # Speech was recorded on a smartphone. SMARTPHONE = 1 # Speech was recorded using a personal computer or tablet. PC = 2 # Speech was recorded over a phone line. PHONE_LINE = 3 # Speech was recorded in a vehicle. VEHICLE = 4 # Speech was recorded outdoors. OTHER_OUTDOOR_DEVICE = 5 # Speech was recorded indoors. OTHER_INDOOR_DEVICE = 6 end end # Provides "hints" to the speech recognizer to favor specific words and phrases # in the results. # @!attribute [rw] phrases # @return [::Array<::String>] # A list of strings containing words and phrases "hints" so that # the speech recognition is more likely to recognize them. This can be used # to improve the accuracy for specific words and phrases, for example, if # specific commands are typically spoken by the user. This can also be used # to add additional words to the vocabulary of the recognizer. See # [usage limits](https://cloud.google.com/speech-to-text/quotas#content). # # List items can also be set to classes for groups of words that represent # common concepts that occur in natural language. For example, rather than # providing phrase hints for every month of the year, using the $MONTH class # improves the likelihood of correctly transcribing audio that includes # months. # @!attribute [rw] boost # @return [::Float] # Hint Boost. Positive value will increase the probability that a specific # phrase will be recognized over other similar sounding phrases. The higher # the boost, the higher the chance of false positive recognition as well. # Negative boost values would correspond to anti-biasing. Anti-biasing is not # enabled, so negative boost will simply be ignored. Though `boost` can # accept a wide range of positive values, most use cases are best served with # values between 0 and 20. We recommend using a binary search approach to # finding the optimal value for your use case. class SpeechContext include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Contains audio data in the encoding specified in the `RecognitionConfig`. # Either `content` or `uri` must be supplied. Supplying both or neither # returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See # [content limits](https://cloud.google.com/speech-to-text/quotas#content). # @!attribute [rw] content # @return [::String] # The audio data bytes encoded as specified in # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a # pure binary representation, whereas JSON representations use base64. # @!attribute [rw] uri # @return [::String] # URI that points to a file that contains audio data bytes as specified in # `RecognitionConfig`. The file must not be compressed (for example, gzip). # Currently, only Google Cloud Storage URIs are # supported, which must be specified in the following format: # `gs://bucket_name/object_name` (other URI formats return # [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see # [Request URIs](https://cloud.google.com/storage/docs/reference-uris). class RecognitionAudio include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # The only message returned to the client by the `Recognize` method. It # contains the result as zero or more sequential `SpeechRecognitionResult` # messages. # @!attribute [rw] results # @return [::Array<::Google::Cloud::Speech::V1::SpeechRecognitionResult>] # Sequential list of transcription results corresponding to # sequential portions of audio. # @!attribute [rw] total_billed_time # @return [::Google::Protobuf::Duration] # When available, billed audio seconds for the corresponding request. class RecognizeResponse include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # The only message returned to the client by the `LongRunningRecognize` method. # It contains the result as zero or more sequential `SpeechRecognitionResult` # messages. It is included in the `result.response` field of the `Operation` # returned by the `GetOperation` call of the `google::longrunning::Operations` # service. # @!attribute [rw] results # @return [::Array<::Google::Cloud::Speech::V1::SpeechRecognitionResult>] # Sequential list of transcription results corresponding to # sequential portions of audio. # @!attribute [rw] total_billed_time # @return [::Google::Protobuf::Duration] # When available, billed audio seconds for the corresponding request. # @!attribute [rw] output_config # @return [::Google::Cloud::Speech::V1::TranscriptOutputConfig] # Original output config if present in the request. # @!attribute [rw] output_error # @return [::Google::Rpc::Status] # If the transcript output fails this field contains the relevant error. class LongRunningRecognizeResponse include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Describes the progress of a long-running `LongRunningRecognize` call. It is # included in the `metadata` field of the `Operation` returned by the # `GetOperation` call of the `google::longrunning::Operations` service. # @!attribute [rw] progress_percent # @return [::Integer] # Approximate percentage of audio processed thus far. Guaranteed to be 100 # when the audio is fully processed and the results are available. # @!attribute [rw] start_time # @return [::Google::Protobuf::Timestamp] # Time when the request was received. # @!attribute [rw] last_update_time # @return [::Google::Protobuf::Timestamp] # Time of the most recent processing update. # @!attribute [r] uri # @return [::String] # Output only. The URI of the audio file being transcribed. Empty if the audio was sent # as byte content. class LongRunningRecognizeMetadata include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # `StreamingRecognizeResponse` is the only message returned to the client by # `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse` # messages are streamed back to the client. If there is no recognizable # audio, and `single_utterance` is set to false, then no messages are streamed # back to the client. # # Here's an example of a series of `StreamingRecognizeResponse`s that might be # returned while processing audio: # # 1. results { alternatives { transcript: "tube" } stability: 0.01 } # # 2. results { alternatives { transcript: "to be a" } stability: 0.01 } # # 3. results { alternatives { transcript: "to be" } stability: 0.9 } # results { alternatives { transcript: " or not to be" } stability: 0.01 } # # 4. results { alternatives { transcript: "to be or not to be" # confidence: 0.92 } # alternatives { transcript: "to bee or not to bee" } # is_final: true } # # 5. results { alternatives { transcript: " that's" } stability: 0.01 } # # 6. results { alternatives { transcript: " that is" } stability: 0.9 } # results { alternatives { transcript: " the question" } stability: 0.01 } # # 7. results { alternatives { transcript: " that is the question" # confidence: 0.98 } # alternatives { transcript: " that was the question" } # is_final: true } # # Notes: # # - Only two of the above responses #4 and #7 contain final results; they are # indicated by `is_final: true`. Concatenating these together generates the # full transcript: "to be or not to be that is the question". # # - The others contain interim `results`. #3 and #6 contain two interim # `results`: the first portion has a high stability and is less likely to # change; the second portion has a low stability and is very likely to # change. A UI designer might choose to show only high stability `results`. # # - The specific `stability` and `confidence` values shown above are only for # illustrative purposes. Actual values may vary. # # - In each response, only one of these fields will be set: # `error`, # `speech_event_type`, or # one or more (repeated) `results`. # @!attribute [rw] error # @return [::Google::Rpc::Status] # If set, returns a {::Google::Rpc::Status google.rpc.Status} message that # specifies the error for the operation. # @!attribute [rw] results # @return [::Array<::Google::Cloud::Speech::V1::StreamingRecognitionResult>] # This repeated list contains zero or more results that # correspond to consecutive portions of the audio currently being processed. # It contains zero or one `is_final=true` result (the newly settled portion), # followed by zero or more `is_final=false` results (the interim results). # @!attribute [rw] speech_event_type # @return [::Google::Cloud::Speech::V1::StreamingRecognizeResponse::SpeechEventType] # Indicates the type of speech event. # @!attribute [rw] total_billed_time # @return [::Google::Protobuf::Duration] # When available, billed audio seconds for the stream. # Set only if this is the last response in the stream. class StreamingRecognizeResponse include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Indicates the type of speech event. module SpeechEventType # No speech event specified. SPEECH_EVENT_UNSPECIFIED = 0 # This event indicates that the server has detected the end of the user's # speech utterance and expects no additional speech. Therefore, the server # will not process additional audio (although it may subsequently return # additional results). The client should stop sending additional audio # data, half-close the gRPC connection, and wait for any additional results # until the server closes the gRPC connection. This event is only sent if # `single_utterance` was set to `true`, and is not used otherwise. END_OF_SINGLE_UTTERANCE = 1 end end # A streaming speech recognition result corresponding to a portion of the audio # that is currently being processed. # @!attribute [rw] alternatives # @return [::Array<::Google::Cloud::Speech::V1::SpeechRecognitionAlternative>] # May contain one or more recognition hypotheses (up to the # maximum specified in `max_alternatives`). # These alternatives are ordered in terms of accuracy, with the top (first) # alternative being the most probable, as ranked by the recognizer. # @!attribute [rw] is_final # @return [::Boolean] # If `false`, this `StreamingRecognitionResult` represents an # interim result that may change. If `true`, this is the final time the # speech service will return this particular `StreamingRecognitionResult`, # the recognizer will not return any further hypotheses for this portion of # the transcript and corresponding audio. # @!attribute [rw] stability # @return [::Float] # An estimate of the likelihood that the recognizer will not # change its guess about this interim result. Values range from 0.0 # (completely unstable) to 1.0 (completely stable). # This field is only provided for interim results (`is_final=false`). # The default of 0.0 is a sentinel value indicating `stability` was not set. # @!attribute [rw] result_end_time # @return [::Google::Protobuf::Duration] # Time offset of the end of this result relative to the # beginning of the audio. # @!attribute [rw] channel_tag # @return [::Integer] # For multi-channel audio, this is the channel number corresponding to the # recognized result for the audio from that channel. # For audio_channel_count = N, its output values can range from '1' to 'N'. # @!attribute [r] language_code # @return [::String] # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag # of the language in this result. This language code was detected to have # the most likelihood of being spoken in the audio. class StreamingRecognitionResult include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # A speech recognition result corresponding to a portion of the audio. # @!attribute [rw] alternatives # @return [::Array<::Google::Cloud::Speech::V1::SpeechRecognitionAlternative>] # May contain one or more recognition hypotheses (up to the # maximum specified in `max_alternatives`). # These alternatives are ordered in terms of accuracy, with the top (first) # alternative being the most probable, as ranked by the recognizer. # @!attribute [rw] channel_tag # @return [::Integer] # For multi-channel audio, this is the channel number corresponding to the # recognized result for the audio from that channel. # For audio_channel_count = N, its output values can range from '1' to 'N'. # @!attribute [rw] result_end_time # @return [::Google::Protobuf::Duration] # Time offset of the end of this result relative to the # beginning of the audio. # @!attribute [r] language_code # @return [::String] # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag # of the language in this result. This language code was detected to have # the most likelihood of being spoken in the audio. class SpeechRecognitionResult include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Alternative hypotheses (a.k.a. n-best list). # @!attribute [rw] transcript # @return [::String] # Transcript text representing the words that the user spoke. # @!attribute [rw] confidence # @return [::Float] # The confidence estimate between 0.0 and 1.0. A higher number # indicates an estimated greater likelihood that the recognized words are # correct. This field is set only for the top alternative of a non-streaming # result or, of a streaming result where `is_final=true`. # This field is not guaranteed to be accurate and users should not rely on it # to be always provided. # The default of 0.0 is a sentinel value indicating `confidence` was not set. # @!attribute [rw] words # @return [::Array<::Google::Cloud::Speech::V1::WordInfo>] # A list of word-specific information for each recognized word. # Note: When `enable_speaker_diarization` is true, you will see all the words # from the beginning of the audio. class SpeechRecognitionAlternative include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Word-specific information for recognized words. # @!attribute [rw] start_time # @return [::Google::Protobuf::Duration] # Time offset relative to the beginning of the audio, # and corresponding to the start of the spoken word. # This field is only set if `enable_word_time_offsets=true` and only # in the top hypothesis. # This is an experimental feature and the accuracy of the time offset can # vary. # @!attribute [rw] end_time # @return [::Google::Protobuf::Duration] # Time offset relative to the beginning of the audio, # and corresponding to the end of the spoken word. # This field is only set if `enable_word_time_offsets=true` and only # in the top hypothesis. # This is an experimental feature and the accuracy of the time offset can # vary. # @!attribute [rw] word # @return [::String] # The word corresponding to this set of information. # @!attribute [rw] confidence # @return [::Float] # The confidence estimate between 0.0 and 1.0. A higher number # indicates an estimated greater likelihood that the recognized words are # correct. This field is set only for the top alternative of a non-streaming # result or, of a streaming result where `is_final=true`. # This field is not guaranteed to be accurate and users should not rely on it # to be always provided. # The default of 0.0 is a sentinel value indicating `confidence` was not set. # @!attribute [r] speaker_tag # @return [::Integer] # Output only. A distinct integer value is assigned for every speaker within # the audio. This field specifies which one of those speakers was detected to # have spoken this word. Value ranges from '1' to diarization_speaker_count. # speaker_tag is set if enable_speaker_diarization = 'true' and only in the # top alternative. class WordInfo include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end end end end end docs(speech-v1): Document the latest_long and latest_short recognition models # frozen_string_literal: true # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Auto-generated by gapic-generator-ruby. DO NOT EDIT! module Google module Cloud module Speech module V1 # The top-level message sent by the client for the `Recognize` method. # @!attribute [rw] config # @return [::Google::Cloud::Speech::V1::RecognitionConfig] # Required. Provides information to the recognizer that specifies how to # process the request. # @!attribute [rw] audio # @return [::Google::Cloud::Speech::V1::RecognitionAudio] # Required. The audio data to be recognized. class RecognizeRequest include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # The top-level message sent by the client for the `LongRunningRecognize` # method. # @!attribute [rw] config # @return [::Google::Cloud::Speech::V1::RecognitionConfig] # Required. Provides information to the recognizer that specifies how to # process the request. # @!attribute [rw] audio # @return [::Google::Cloud::Speech::V1::RecognitionAudio] # Required. The audio data to be recognized. # @!attribute [rw] output_config # @return [::Google::Cloud::Speech::V1::TranscriptOutputConfig] # Optional. Specifies an optional destination for the recognition results. class LongRunningRecognizeRequest include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Specifies an optional destination for the recognition results. # @!attribute [rw] gcs_uri # @return [::String] # Specifies a Cloud Storage URI for the recognition results. Must be # specified in the format: `gs://bucket_name/object_name`, and the bucket # must already exist. class TranscriptOutputConfig include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # The top-level message sent by the client for the `StreamingRecognize` method. # Multiple `StreamingRecognizeRequest` messages are sent. The first message # must contain a `streaming_config` message and must not contain # `audio_content`. All subsequent messages must contain `audio_content` and # must not contain a `streaming_config` message. # @!attribute [rw] streaming_config # @return [::Google::Cloud::Speech::V1::StreamingRecognitionConfig] # Provides information to the recognizer that specifies how to process the # request. The first `StreamingRecognizeRequest` message must contain a # `streaming_config` message. # @!attribute [rw] audio_content # @return [::String] # The audio data to be recognized. Sequential chunks of audio data are sent # in sequential `StreamingRecognizeRequest` messages. The first # `StreamingRecognizeRequest` message must not contain `audio_content` data # and all subsequent `StreamingRecognizeRequest` messages must contain # `audio_content` data. The audio bytes must be encoded as specified in # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a # pure binary representation (not base64). See # [content limits](https://cloud.google.com/speech-to-text/quotas#content). class StreamingRecognizeRequest include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Provides information to the recognizer that specifies how to process the # request. # @!attribute [rw] config # @return [::Google::Cloud::Speech::V1::RecognitionConfig] # Required. Provides information to the recognizer that specifies how to # process the request. # @!attribute [rw] single_utterance # @return [::Boolean] # If `false` or omitted, the recognizer will perform continuous # recognition (continuing to wait for and process audio even if the user # pauses speaking) until the client closes the input stream (gRPC API) or # until the maximum time limit has been reached. May return multiple # `StreamingRecognitionResult`s with the `is_final` flag set to `true`. # # If `true`, the recognizer will detect a single spoken utterance. When it # detects that the user has paused or stopped speaking, it will return an # `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no # more than one `StreamingRecognitionResult` with the `is_final` flag set to # `true`. # # The `single_utterance` field can only be used with specified models, # otherwise an error is thrown. The `model` field in [`RecognitionConfig`][] # must be set to: # # * `command_and_search` # * `phone_call` AND additional field `useEnhanced`=`true` # * The `model` field is left undefined. In this case the API auto-selects # a model based on any other parameters that you set in # `RecognitionConfig`. # @!attribute [rw] interim_results # @return [::Boolean] # If `true`, interim results (tentative hypotheses) may be # returned as they become available (these interim results are indicated with # the `is_final=false` flag). # If `false` or omitted, only `is_final=true` result(s) are returned. class StreamingRecognitionConfig include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Provides information to the recognizer that specifies how to process the # request. # @!attribute [rw] encoding # @return [::Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding] # Encoding of audio data sent in all `RecognitionAudio` messages. # This field is optional for `FLAC` and `WAV` audio files and required # for all other audio formats. For details, see # {::Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding AudioEncoding}. # @!attribute [rw] sample_rate_hertz # @return [::Integer] # Sample rate in Hertz of the audio data sent in all # `RecognitionAudio` messages. Valid values are: 8000-48000. # 16000 is optimal. For best results, set the sampling rate of the audio # source to 16000 Hz. If that's not possible, use the native sample rate of # the audio source (instead of re-sampling). # This field is optional for FLAC and WAV audio files, but is # required for all other audio formats. For details, see # {::Google::Cloud::Speech::V1::RecognitionConfig::AudioEncoding AudioEncoding}. # @!attribute [rw] audio_channel_count # @return [::Integer] # The number of channels in the input audio data. # ONLY set this for MULTI-CHANNEL recognition. # Valid values for LINEAR16 and FLAC are `1`-`8`. # Valid values for OGG_OPUS are '1'-'254'. # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`. # If `0` or omitted, defaults to one channel (mono). # Note: We only recognize the first channel by default. # To perform independent recognition on each channel set # `enable_separate_recognition_per_channel` to 'true'. # @!attribute [rw] enable_separate_recognition_per_channel # @return [::Boolean] # This needs to be set to `true` explicitly and `audio_channel_count` > 1 # to get each channel recognized separately. The recognition result will # contain a `channel_tag` field to state which channel that result belongs # to. If this is not true, we will only recognize the first channel. The # request is billed cumulatively for all channels recognized: # `audio_channel_count` multiplied by the length of the audio. # @!attribute [rw] language_code # @return [::String] # Required. The language of the supplied audio as a # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. # Example: "en-US". # See [Language # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list # of the currently supported language codes. # @!attribute [rw] alternative_language_codes # @return [::Array<::String>] # A list of up to 3 additional # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags, # listing possible alternative languages of the supplied audio. # See [Language # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list # of the currently supported language codes. If alternative languages are # listed, recognition result will contain recognition in the most likely # language detected including the main language_code. The recognition result # will include the language tag of the language detected in the audio. Note: # This feature is only supported for Voice Command and Voice Search use cases # and performance may vary for other use cases (e.g., phone call # transcription). # @!attribute [rw] max_alternatives # @return [::Integer] # Maximum number of recognition hypotheses to be returned. # Specifically, the maximum number of `SpeechRecognitionAlternative` messages # within each `SpeechRecognitionResult`. # The server may return fewer than `max_alternatives`. # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of # one. If omitted, will return a maximum of one. # @!attribute [rw] profanity_filter # @return [::Boolean] # If set to `true`, the server will attempt to filter out # profanities, replacing all but the initial character in each filtered word # with asterisks, e.g. "f***". If set to `false` or omitted, profanities # won't be filtered out. # @!attribute [rw] adaptation # @return [::Google::Cloud::Speech::V1::SpeechAdaptation] # Speech adaptation configuration improves the accuracy of speech # recognition. For more information, see the [speech # adaptation](https://cloud.google.com/speech-to-text/docs/adaptation) # documentation. # When speech adaptation is set it supersedes the `speech_contexts` field. # @!attribute [rw] speech_contexts # @return [::Array<::Google::Cloud::Speech::V1::SpeechContext>] # Array of {::Google::Cloud::Speech::V1::SpeechContext SpeechContext}. # A means to provide context to assist the speech recognition. For more # information, see # [speech # adaptation](https://cloud.google.com/speech-to-text/docs/adaptation). # @!attribute [rw] enable_word_time_offsets # @return [::Boolean] # If `true`, the top result includes a list of words and # the start and end time offsets (timestamps) for those words. If # `false`, no word-level time offset information is returned. The default is # `false`. # @!attribute [rw] enable_word_confidence # @return [::Boolean] # If `true`, the top result includes a list of words and the # confidence for those words. If `false`, no word-level confidence # information is returned. The default is `false`. # @!attribute [rw] enable_automatic_punctuation # @return [::Boolean] # If 'true', adds punctuation to recognition result hypotheses. # This feature is only available in select languages. Setting this for # requests in other languages has no effect at all. # The default 'false' value does not add punctuation to result hypotheses. # @!attribute [rw] enable_spoken_punctuation # @return [::Google::Protobuf::BoolValue] # The spoken punctuation behavior for the call # If not set, uses default behavior based on model of choice # e.g. command_and_search will enable spoken punctuation by default # If 'true', replaces spoken punctuation with the corresponding symbols in # the request. For example, "how are you question mark" becomes "how are # you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation # for support. If 'false', spoken punctuation is not replaced. # @!attribute [rw] enable_spoken_emojis # @return [::Google::Protobuf::BoolValue] # The spoken emoji behavior for the call # If not set, uses default behavior based on model of choice # If 'true', adds spoken emoji formatting for the request. This will replace # spoken emojis with the corresponding Unicode symbols in the final # transcript. If 'false', spoken emojis are not replaced. # @!attribute [rw] diarization_config # @return [::Google::Cloud::Speech::V1::SpeakerDiarizationConfig] # Config to enable speaker diarization and set additional # parameters to make diarization better suited for your application. # Note: When this is enabled, we send all the words from the beginning of the # audio for the top alternative in every consecutive STREAMING responses. # This is done in order to improve our speaker tags as our models learn to # identify the speakers in the conversation over time. # For non-streaming requests, the diarization results will be provided only # in the top alternative of the FINAL SpeechRecognitionResult. # @!attribute [rw] metadata # @return [::Google::Cloud::Speech::V1::RecognitionMetadata] # Metadata regarding this request. # @!attribute [rw] model # @return [::String] # Which model to select for the given request. Select the model # best suited to your domain to get best results. If a model is not # explicitly specified, then we auto-select a model based on the parameters # in the RecognitionConfig. # <table> # <tr> # <td><b>Model</b></td> # <td><b>Description</b></td> # </tr> # <tr> # <td><code>latest_long</code></td> # <td>Best for long form content like media or conversation.</td> # </tr> # <tr> # <td><code>latest_short</code></td> # <td>Best for short form content like commands or single shot directed # speech.</td> # </tr> # <tr> # <td><code>command_and_search</code></td> # <td>Best for short queries such as voice commands or voice search.</td> # </tr> # <tr> # <td><code>command_and_search</code></td> # <td>Best for short queries such as voice commands or voice search.</td> # </tr> # <tr> # <td><code>phone_call</code></td> # <td>Best for audio that originated from a phone call (typically # recorded at an 8khz sampling rate).</td> # </tr> # <tr> # <td><code>video</code></td> # <td>Best for audio that originated from video or includes multiple # speakers. Ideally the audio is recorded at a 16khz or greater # sampling rate. This is a premium model that costs more than the # standard rate.</td> # </tr> # <tr> # <td><code>default</code></td> # <td>Best for audio that is not one of the specific audio models. # For example, long-form audio. Ideally the audio is high-fidelity, # recorded at a 16khz or greater sampling rate.</td> # </tr> # </table> # @!attribute [rw] use_enhanced # @return [::Boolean] # Set to true to use an enhanced model for speech recognition. # If `use_enhanced` is set to true and the `model` field is not set, then # an appropriate enhanced model is chosen if an enhanced model exists for # the audio. # # If `use_enhanced` is true and an enhanced version of the specified model # does not exist, then the speech is recognized using the standard version # of the specified model. class RecognitionConfig include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # The encoding of the audio data sent in the request. # # All encodings support only 1 channel (mono) audio, unless the # `audio_channel_count` and `enable_separate_recognition_per_channel` fields # are set. # # For best results, the audio source should be captured and transmitted using # a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech # recognition can be reduced if lossy codecs are used to capture or transmit # audio, particularly if background noise is present. Lossy codecs include # `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, `MP3`, # and `WEBM_OPUS`. # # The `FLAC` and `WAV` audio file formats include a header that describes the # included audio content. You can request recognition for `WAV` files that # contain either `LINEAR16` or `MULAW` encoded audio. # If you send `FLAC` or `WAV` audio file format in # your request, you do not need to specify an `AudioEncoding`; the audio # encoding format is determined from the file header. If you specify # an `AudioEncoding` when you send send `FLAC` or `WAV` audio, the # encoding configuration must match the encoding described in the audio # header; otherwise the request returns an # [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error # code. module AudioEncoding # Not specified. ENCODING_UNSPECIFIED = 0 # Uncompressed 16-bit signed little-endian samples (Linear PCM). LINEAR16 = 1 # `FLAC` (Free Lossless Audio # Codec) is the recommended encoding because it is # lossless--therefore recognition is not compromised--and # requires only about half the bandwidth of `LINEAR16`. `FLAC` stream # encoding supports 16-bit and 24-bit samples, however, not all fields in # `STREAMINFO` are supported. FLAC = 2 # 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. MULAW = 3 # Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000. AMR = 4 # Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000. AMR_WB = 5 # Opus encoded audio frames in Ogg container # ([OggOpus](https://wiki.xiph.org/OggOpus)). # `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000. OGG_OPUS = 6 # Although the use of lossy encodings is not recommended, if a very low # bitrate encoding is required, `OGG_OPUS` is highly preferred over # Speex encoding. The [Speex](https://speex.org/) encoding supported by # Cloud Speech API has a header byte in each block, as in MIME type # `audio/x-speex-with-header-byte`. # It is a variant of the RTP Speex encoding defined in # [RFC 5574](https://tools.ietf.org/html/rfc5574). # The stream is a sequence of blocks, one block per RTP packet. Each block # starts with a byte containing the length of the block, in bytes, followed # by one or more frames of Speex data, padded to an integral number of # bytes (octets) as specified in RFC 5574. In other words, each RTP header # is replaced with a single byte containing the block length. Only Speex # wideband is supported. `sample_rate_hertz` must be 16000. SPEEX_WITH_HEADER_BYTE = 7 # Opus encoded audio frames in WebM container # ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be # one of 8000, 12000, 16000, 24000, or 48000. WEBM_OPUS = 9 end end # Config to enable speaker diarization. # @!attribute [rw] enable_speaker_diarization # @return [::Boolean] # If 'true', enables speaker detection for each recognized word in # the top alternative of the recognition result using a speaker_tag provided # in the WordInfo. # @!attribute [rw] min_speaker_count # @return [::Integer] # Minimum number of speakers in the conversation. This range gives you more # flexibility by allowing the system to automatically determine the correct # number of speakers. If not set, the default value is 2. # @!attribute [rw] max_speaker_count # @return [::Integer] # Maximum number of speakers in the conversation. This range gives you more # flexibility by allowing the system to automatically determine the correct # number of speakers. If not set, the default value is 6. # @!attribute [r] speaker_tag # @return [::Integer] # Output only. Unused. class SpeakerDiarizationConfig include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Description of audio data to be recognized. # @!attribute [rw] interaction_type # @return [::Google::Cloud::Speech::V1::RecognitionMetadata::InteractionType] # The use case most closely describing the audio content to be recognized. # @!attribute [rw] industry_naics_code_of_audio # @return [::Integer] # The industry vertical to which this speech recognition request most # closely applies. This is most indicative of the topics contained # in the audio. Use the 6-digit NAICS code to identify the industry # vertical - see https://www.naics.com/search/. # @!attribute [rw] microphone_distance # @return [::Google::Cloud::Speech::V1::RecognitionMetadata::MicrophoneDistance] # The audio type that most closely describes the audio being recognized. # @!attribute [rw] original_media_type # @return [::Google::Cloud::Speech::V1::RecognitionMetadata::OriginalMediaType] # The original media the speech was recorded on. # @!attribute [rw] recording_device_type # @return [::Google::Cloud::Speech::V1::RecognitionMetadata::RecordingDeviceType] # The type of device the speech was recorded with. # @!attribute [rw] recording_device_name # @return [::String] # The device used to make the recording. Examples 'Nexus 5X' or # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or # 'Cardioid Microphone'. # @!attribute [rw] original_mime_type # @return [::String] # Mime type of the original audio file. For example `audio/m4a`, # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`. # A list of possible audio mime types is maintained at # http://www.iana.org/assignments/media-types/media-types.xhtml#audio # @!attribute [rw] audio_topic # @return [::String] # Description of the content. Eg. "Recordings of federal supreme court # hearings from 2012". class RecognitionMetadata include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Use case categories that the audio recognition request can be described # by. module InteractionType # Use case is either unknown or is something other than one of the other # values below. INTERACTION_TYPE_UNSPECIFIED = 0 # Multiple people in a conversation or discussion. For example in a # meeting with two or more people actively participating. Typically # all the primary people speaking would be in the same room (if not, # see PHONE_CALL) DISCUSSION = 1 # One or more persons lecturing or presenting to others, mostly # uninterrupted. PRESENTATION = 2 # A phone-call or video-conference in which two or more people, who are # not in the same room, are actively participating. PHONE_CALL = 3 # A recorded message intended for another person to listen to. VOICEMAIL = 4 # Professionally produced audio (eg. TV Show, Podcast). PROFESSIONALLY_PRODUCED = 5 # Transcribe spoken questions and queries into text. VOICE_SEARCH = 6 # Transcribe voice commands, such as for controlling a device. VOICE_COMMAND = 7 # Transcribe speech to text to create a written document, such as a # text-message, email or report. DICTATION = 8 end # Enumerates the types of capture settings describing an audio file. module MicrophoneDistance # Audio type is not known. MICROPHONE_DISTANCE_UNSPECIFIED = 0 # The audio was captured from a closely placed microphone. Eg. phone, # dictaphone, or handheld microphone. Generally if there speaker is within # 1 meter of the microphone. NEARFIELD = 1 # The speaker if within 3 meters of the microphone. MIDFIELD = 2 # The speaker is more than 3 meters away from the microphone. FARFIELD = 3 end # The original media the speech was recorded on. module OriginalMediaType # Unknown original media type. ORIGINAL_MEDIA_TYPE_UNSPECIFIED = 0 # The speech data is an audio recording. AUDIO = 1 # The speech data originally recorded on a video. VIDEO = 2 end # The type of device the speech was recorded with. module RecordingDeviceType # The recording device is unknown. RECORDING_DEVICE_TYPE_UNSPECIFIED = 0 # Speech was recorded on a smartphone. SMARTPHONE = 1 # Speech was recorded using a personal computer or tablet. PC = 2 # Speech was recorded over a phone line. PHONE_LINE = 3 # Speech was recorded in a vehicle. VEHICLE = 4 # Speech was recorded outdoors. OTHER_OUTDOOR_DEVICE = 5 # Speech was recorded indoors. OTHER_INDOOR_DEVICE = 6 end end # Provides "hints" to the speech recognizer to favor specific words and phrases # in the results. # @!attribute [rw] phrases # @return [::Array<::String>] # A list of strings containing words and phrases "hints" so that # the speech recognition is more likely to recognize them. This can be used # to improve the accuracy for specific words and phrases, for example, if # specific commands are typically spoken by the user. This can also be used # to add additional words to the vocabulary of the recognizer. See # [usage limits](https://cloud.google.com/speech-to-text/quotas#content). # # List items can also be set to classes for groups of words that represent # common concepts that occur in natural language. For example, rather than # providing phrase hints for every month of the year, using the $MONTH class # improves the likelihood of correctly transcribing audio that includes # months. # @!attribute [rw] boost # @return [::Float] # Hint Boost. Positive value will increase the probability that a specific # phrase will be recognized over other similar sounding phrases. The higher # the boost, the higher the chance of false positive recognition as well. # Negative boost values would correspond to anti-biasing. Anti-biasing is not # enabled, so negative boost will simply be ignored. Though `boost` can # accept a wide range of positive values, most use cases are best served with # values between 0 and 20. We recommend using a binary search approach to # finding the optimal value for your use case. class SpeechContext include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Contains audio data in the encoding specified in the `RecognitionConfig`. # Either `content` or `uri` must be supplied. Supplying both or neither # returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. # See [content limits](https://cloud.google.com/speech-to-text/quotas#content). # @!attribute [rw] content # @return [::String] # The audio data bytes encoded as specified in # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a # pure binary representation, whereas JSON representations use base64. # @!attribute [rw] uri # @return [::String] # URI that points to a file that contains audio data bytes as specified in # `RecognitionConfig`. The file must not be compressed (for example, gzip). # Currently, only Google Cloud Storage URIs are # supported, which must be specified in the following format: # `gs://bucket_name/object_name` (other URI formats return # [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). # For more information, see [Request # URIs](https://cloud.google.com/storage/docs/reference-uris). class RecognitionAudio include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # The only message returned to the client by the `Recognize` method. It # contains the result as zero or more sequential `SpeechRecognitionResult` # messages. # @!attribute [rw] results # @return [::Array<::Google::Cloud::Speech::V1::SpeechRecognitionResult>] # Sequential list of transcription results corresponding to # sequential portions of audio. # @!attribute [rw] total_billed_time # @return [::Google::Protobuf::Duration] # When available, billed audio seconds for the corresponding request. class RecognizeResponse include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # The only message returned to the client by the `LongRunningRecognize` method. # It contains the result as zero or more sequential `SpeechRecognitionResult` # messages. It is included in the `result.response` field of the `Operation` # returned by the `GetOperation` call of the `google::longrunning::Operations` # service. # @!attribute [rw] results # @return [::Array<::Google::Cloud::Speech::V1::SpeechRecognitionResult>] # Sequential list of transcription results corresponding to # sequential portions of audio. # @!attribute [rw] total_billed_time # @return [::Google::Protobuf::Duration] # When available, billed audio seconds for the corresponding request. # @!attribute [rw] output_config # @return [::Google::Cloud::Speech::V1::TranscriptOutputConfig] # Original output config if present in the request. # @!attribute [rw] output_error # @return [::Google::Rpc::Status] # If the transcript output fails this field contains the relevant error. class LongRunningRecognizeResponse include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Describes the progress of a long-running `LongRunningRecognize` call. It is # included in the `metadata` field of the `Operation` returned by the # `GetOperation` call of the `google::longrunning::Operations` service. # @!attribute [rw] progress_percent # @return [::Integer] # Approximate percentage of audio processed thus far. Guaranteed to be 100 # when the audio is fully processed and the results are available. # @!attribute [rw] start_time # @return [::Google::Protobuf::Timestamp] # Time when the request was received. # @!attribute [rw] last_update_time # @return [::Google::Protobuf::Timestamp] # Time of the most recent processing update. # @!attribute [r] uri # @return [::String] # Output only. The URI of the audio file being transcribed. Empty if the # audio was sent as byte content. class LongRunningRecognizeMetadata include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # `StreamingRecognizeResponse` is the only message returned to the client by # `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse` # messages are streamed back to the client. If there is no recognizable # audio, and `single_utterance` is set to false, then no messages are streamed # back to the client. # # Here's an example of a series of `StreamingRecognizeResponse`s that might be # returned while processing audio: # # 1. results { alternatives { transcript: "tube" } stability: 0.01 } # # 2. results { alternatives { transcript: "to be a" } stability: 0.01 } # # 3. results { alternatives { transcript: "to be" } stability: 0.9 } # results { alternatives { transcript: " or not to be" } stability: 0.01 } # # 4. results { alternatives { transcript: "to be or not to be" # confidence: 0.92 } # alternatives { transcript: "to bee or not to bee" } # is_final: true } # # 5. results { alternatives { transcript: " that's" } stability: 0.01 } # # 6. results { alternatives { transcript: " that is" } stability: 0.9 } # results { alternatives { transcript: " the question" } stability: 0.01 } # # 7. results { alternatives { transcript: " that is the question" # confidence: 0.98 } # alternatives { transcript: " that was the question" } # is_final: true } # # Notes: # # - Only two of the above responses #4 and #7 contain final results; they are # indicated by `is_final: true`. Concatenating these together generates the # full transcript: "to be or not to be that is the question". # # - The others contain interim `results`. #3 and #6 contain two interim # `results`: the first portion has a high stability and is less likely to # change; the second portion has a low stability and is very likely to # change. A UI designer might choose to show only high stability `results`. # # - The specific `stability` and `confidence` values shown above are only for # illustrative purposes. Actual values may vary. # # - In each response, only one of these fields will be set: # `error`, # `speech_event_type`, or # one or more (repeated) `results`. # @!attribute [rw] error # @return [::Google::Rpc::Status] # If set, returns a {::Google::Rpc::Status google.rpc.Status} message that # specifies the error for the operation. # @!attribute [rw] results # @return [::Array<::Google::Cloud::Speech::V1::StreamingRecognitionResult>] # This repeated list contains zero or more results that # correspond to consecutive portions of the audio currently being processed. # It contains zero or one `is_final=true` result (the newly settled portion), # followed by zero or more `is_final=false` results (the interim results). # @!attribute [rw] speech_event_type # @return [::Google::Cloud::Speech::V1::StreamingRecognizeResponse::SpeechEventType] # Indicates the type of speech event. # @!attribute [rw] total_billed_time # @return [::Google::Protobuf::Duration] # When available, billed audio seconds for the stream. # Set only if this is the last response in the stream. class StreamingRecognizeResponse include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Indicates the type of speech event. module SpeechEventType # No speech event specified. SPEECH_EVENT_UNSPECIFIED = 0 # This event indicates that the server has detected the end of the user's # speech utterance and expects no additional speech. Therefore, the server # will not process additional audio (although it may subsequently return # additional results). The client should stop sending additional audio # data, half-close the gRPC connection, and wait for any additional results # until the server closes the gRPC connection. This event is only sent if # `single_utterance` was set to `true`, and is not used otherwise. END_OF_SINGLE_UTTERANCE = 1 end end # A streaming speech recognition result corresponding to a portion of the audio # that is currently being processed. # @!attribute [rw] alternatives # @return [::Array<::Google::Cloud::Speech::V1::SpeechRecognitionAlternative>] # May contain one or more recognition hypotheses (up to the # maximum specified in `max_alternatives`). # These alternatives are ordered in terms of accuracy, with the top (first) # alternative being the most probable, as ranked by the recognizer. # @!attribute [rw] is_final # @return [::Boolean] # If `false`, this `StreamingRecognitionResult` represents an # interim result that may change. If `true`, this is the final time the # speech service will return this particular `StreamingRecognitionResult`, # the recognizer will not return any further hypotheses for this portion of # the transcript and corresponding audio. # @!attribute [rw] stability # @return [::Float] # An estimate of the likelihood that the recognizer will not # change its guess about this interim result. Values range from 0.0 # (completely unstable) to 1.0 (completely stable). # This field is only provided for interim results (`is_final=false`). # The default of 0.0 is a sentinel value indicating `stability` was not set. # @!attribute [rw] result_end_time # @return [::Google::Protobuf::Duration] # Time offset of the end of this result relative to the # beginning of the audio. # @!attribute [rw] channel_tag # @return [::Integer] # For multi-channel audio, this is the channel number corresponding to the # recognized result for the audio from that channel. # For audio_channel_count = N, its output values can range from '1' to 'N'. # @!attribute [r] language_code # @return [::String] # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) # language tag of the language in this result. This language code was # detected to have the most likelihood of being spoken in the audio. class StreamingRecognitionResult include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # A speech recognition result corresponding to a portion of the audio. # @!attribute [rw] alternatives # @return [::Array<::Google::Cloud::Speech::V1::SpeechRecognitionAlternative>] # May contain one or more recognition hypotheses (up to the # maximum specified in `max_alternatives`). # These alternatives are ordered in terms of accuracy, with the top (first) # alternative being the most probable, as ranked by the recognizer. # @!attribute [rw] channel_tag # @return [::Integer] # For multi-channel audio, this is the channel number corresponding to the # recognized result for the audio from that channel. # For audio_channel_count = N, its output values can range from '1' to 'N'. # @!attribute [rw] result_end_time # @return [::Google::Protobuf::Duration] # Time offset of the end of this result relative to the # beginning of the audio. # @!attribute [r] language_code # @return [::String] # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) # language tag of the language in this result. This language code was # detected to have the most likelihood of being spoken in the audio. class SpeechRecognitionResult include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Alternative hypotheses (a.k.a. n-best list). # @!attribute [rw] transcript # @return [::String] # Transcript text representing the words that the user spoke. # @!attribute [rw] confidence # @return [::Float] # The confidence estimate between 0.0 and 1.0. A higher number # indicates an estimated greater likelihood that the recognized words are # correct. This field is set only for the top alternative of a non-streaming # result or, of a streaming result where `is_final=true`. # This field is not guaranteed to be accurate and users should not rely on it # to be always provided. # The default of 0.0 is a sentinel value indicating `confidence` was not set. # @!attribute [rw] words # @return [::Array<::Google::Cloud::Speech::V1::WordInfo>] # A list of word-specific information for each recognized word. # Note: When `enable_speaker_diarization` is true, you will see all the words # from the beginning of the audio. class SpeechRecognitionAlternative include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Word-specific information for recognized words. # @!attribute [rw] start_time # @return [::Google::Protobuf::Duration] # Time offset relative to the beginning of the audio, # and corresponding to the start of the spoken word. # This field is only set if `enable_word_time_offsets=true` and only # in the top hypothesis. # This is an experimental feature and the accuracy of the time offset can # vary. # @!attribute [rw] end_time # @return [::Google::Protobuf::Duration] # Time offset relative to the beginning of the audio, # and corresponding to the end of the spoken word. # This field is only set if `enable_word_time_offsets=true` and only # in the top hypothesis. # This is an experimental feature and the accuracy of the time offset can # vary. # @!attribute [rw] word # @return [::String] # The word corresponding to this set of information. # @!attribute [rw] confidence # @return [::Float] # The confidence estimate between 0.0 and 1.0. A higher number # indicates an estimated greater likelihood that the recognized words are # correct. This field is set only for the top alternative of a non-streaming # result or, of a streaming result where `is_final=true`. # This field is not guaranteed to be accurate and users should not rely on it # to be always provided. # The default of 0.0 is a sentinel value indicating `confidence` was not set. # @!attribute [r] speaker_tag # @return [::Integer] # Output only. A distinct integer value is assigned for every speaker within # the audio. This field specifies which one of those speakers was detected to # have spoken this word. Value ranges from '1' to diarization_speaker_count. # speaker_tag is set if enable_speaker_diarization = 'true' and only in the # top alternative. class WordInfo include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end end end end end
require File.expand_path('../../../lib/rlint', __FILE__) # This file benchmarks the memory increase after parsing a particular Rlint # file and performing code analysis on the resulting tokens. # # For each iteration (the amount is set in the "AMOUNT" environment variable) # the increase is measured. Once finished the average increase is displayed as # well as the total memory usage at the end of the script. def memory_usage return `ps -o rss= #{Process.pid}`.strip.to_i end def benchmark_memory start_memory = memory_usage yield return memory_usage - start_memory end memory_kb = 0.0 amount = ENV['AMOUNT'] ? ENV['AMOUNT'].to_i : 100 path = File.expand_path('../../../lib/rlint/scope.rb', __FILE__) code = File.read(path, File.size(path)) amount.times do memory_kb += benchmark_memory do tokens = Rlint::Parser.new(code, path).parse iterator = Rlint::Iterator.new iterator.bind(Rlint::Analyze::CodingStyle) iterator.bind(Rlint::Analyze::Definitions) iterator.iterate(tokens) end end memory_kb /= amount memory_mb = memory_kb / 1024 memory_end_kb = memory_usage memory_end_mb = memory_end_kb / 1024 puts "Average memory increase for each iteration (total of #{amount})" puts puts "Kilobytes: #{memory_kb.round}" puts "Megabytes: #{memory_mb.round(2)}" puts puts 'End memory usage' puts puts "Kilobytes: #{memory_end_kb.round}" puts "Megabytes: #{memory_end_mb.round(2)}" Benchmark parsing parser.rb instead of scope.rb. Signed-off-by: Yorick Peterse <82349cb6397bb932b4bf3561b4ea2fad50571f50@gmail.com> require File.expand_path('../../../lib/rlint', __FILE__) # This file benchmarks the memory increase after parsing a particular Rlint # file and performing code analysis on the resulting tokens. # # For each iteration (the amount is set in the "AMOUNT" environment variable) # the increase is measured. Once finished the average increase is displayed as # well as the total memory usage at the end of the script. def memory_usage return `ps -o rss= #{Process.pid}`.strip.to_i end def benchmark_memory start_memory = memory_usage yield return memory_usage - start_memory end memory_kb = 0.0 amount = ENV['AMOUNT'] ? ENV['AMOUNT'].to_i : 100 path = File.expand_path('../../../lib/rlint/parser.rb', __FILE__) code = File.read(path, File.size(path)) amount.times do memory_kb += benchmark_memory do tokens = Rlint::Parser.new(code, path).parse iterator = Rlint::Iterator.new iterator.bind(Rlint::Analyze::CodingStyle) iterator.bind(Rlint::Analyze::Definitions) iterator.iterate(tokens) end end memory_kb /= amount memory_mb = memory_kb / 1024 memory_end_kb = memory_usage memory_end_mb = memory_end_kb / 1024 puts "Average memory increase for each iteration (total of #{amount})" puts puts "Kilobytes: #{memory_kb.round}" puts "Megabytes: #{memory_mb.round(2)}" puts puts 'End memory usage' puts puts "Kilobytes: #{memory_end_kb.round}" puts "Megabytes: #{memory_end_mb.round(2)}"
require 'bibliografia/bibliografia.rb' require 'spec_helper' describe Libro do before(:all) do @b1 = Libro::Bibliografia.new({:autor =>['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn =>['ISBN-13: 978-1937785499','ISBN-10: 1937785491']}) puts @b1.to_s @nodo = Libro::Node.new(@b1,nil) end describe 'Expectativas clase Bibliografia' do it ' Expectativas instancia Bibliografia' do expect(@b1).to be_instance_of(Libro::Bibliografia) end it ' Expectativa Debe existir uno o más autores' do expect{b2 = Libro::Bibliografia.new({:autor => nil,:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) end it ' Expectativa Debe existir un titulo ' do expect{b3 = Libro::Bibliografia.new({:autor =>['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => nil,:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) expect{b3 = Libro::Bibliografia.new({:autor =>['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => ['T1','T2'],:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) end it ' Expectativa Debe existir o no una serie ' do expect{b4 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers',:serie => nil,:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.not_to raise_error(ArgumentError) expect{b4 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers',:serie => ['SERIE1','SERIE2'],:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) end it ' Expectativa Debe existir una editorial ' do expect{b5 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => nil,:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) expect{b5 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => ['EDITORIAL1','EDITORIAL2'],:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) end it ' Expectativa Debe existir un número de edición ' do expect{b6 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => nil,:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) expect{b6 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => ['EDICION1','EDITION2'],:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) end it ' Expectativa Debe existir una fecha de publicación ' do expect{ b7 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => nil,:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) expect{ b7 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => ['FECHA1','FECHA2'],:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) end it ' Expectativa Debe existir uno o más números ISBN ' do expect{b8 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => nil})}.to raise_error(ArgumentError) end it ' Expectativa Existe un método para obtener el listado de autores' do expect(@b1).to respond_to(:autor) expect(@b1.autor).to contain_exactly('Dave Thomas', 'Andy Hunt', 'Chad Fowler') end it ' Expectativa Existe un método para obtener el título' do expect(@b1).to respond_to(:titulo) expect(@b1.titulo).to eq('Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide') end it ' Expectativa Existe un método para obtener la serie' do expect(@b1).to respond_to(:serie) expect(@b1.serie).to eq('The Facets of Ruby') end it ' Expectativa Existe un método para obtener la editorial' do expect(@b1).to respond_to(:editorial) expect(@b1.editorial).to eq('Pragmatic Bookshelf') end it ' Expectativa Existe un método para obtener el número de edición' do expect(@b1).to respond_to(:edicion) expect(@b1.edicion).to eq('4 edition') end it ' Expectativa Existe un método para obtener la fecha de publicación ' do expect(@b1).to respond_to(:fecha) expect(@b1.fecha).to eq('July 7, 2013') end it ' Expectativa Existe un método para obtener el listado de ISBN ' do expect(@b1).to respond_to(:isbn) expect(@b1.isbn).to contain_exactly('ISBN-13: 978-1937785499','ISBN-10: 1937785491') end it ' Expectativa Existe un método para obtener la referencia formateada ' do expect(@b1).to respond_to(:to_s) expect(@b1.to_s).to eq('Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ GuideDave ThomasAndy HuntChad FowlerThe Facets of RubyPragmatic Bookshelf4 editionJuly 7, 2013ISBN-13: 978-1937785499ISBN-10: 1937785491') end end describe 'Expectativas clase Lista de una Bibliografia' do it ' Expectativa Debe existir un Nodo de la lista con sus datos y su siguiente' do expect(@nodo.value).to eql(@b1) expect(@nodo.next).to eql(nil) end it ' Expectativa Debe existir una Lista con su cabeza' do expect{l1 = Libro::Lista.new(nil)}.to raise_error(ArgumentError) l1 = Libro::Lista.new(@nodo) #Se utiliza una expectativa con la comparción por identidad del objeto expect(l1).to respond_to(:head) expect(l1).to respond_to(:tail) expect(l1.head).to be(@nodo) end end end Corrección Añadiendo el enlace prev a la clase Libro::Node require 'bibliografia/bibliografia.rb' require 'spec_helper' describe Libro do before(:all) do @b1 = Libro::Bibliografia.new({:autor =>['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn =>['ISBN-13: 978-1937785499','ISBN-10: 1937785491']}) puts @b1.to_s @nodo = Libro::Node.new(@b1,nil,nil) end describe 'Expectativas clase Bibliografia' do it ' Expectativas instancia Bibliografia' do expect(@b1).to be_instance_of(Libro::Bibliografia) end it ' Expectativa Debe existir uno o más autores' do expect{b2 = Libro::Bibliografia.new({:autor => nil,:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) end it ' Expectativa Debe existir un titulo ' do expect{b3 = Libro::Bibliografia.new({:autor =>['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => nil,:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) expect{b3 = Libro::Bibliografia.new({:autor =>['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => ['T1','T2'],:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) end it ' Expectativa Debe existir o no una serie ' do expect{b4 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers',:serie => nil,:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.not_to raise_error(ArgumentError) expect{b4 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers',:serie => ['SERIE1','SERIE2'],:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) end it ' Expectativa Debe existir una editorial ' do expect{b5 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => nil,:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) expect{b5 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => ['EDITORIAL1','EDITORIAL2'],:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) end it ' Expectativa Debe existir un número de edición ' do expect{b6 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => nil,:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) expect{b6 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => ['EDICION1','EDITION2'],:fecha => 'July 7, 2013',:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) end it ' Expectativa Debe existir una fecha de publicación ' do expect{ b7 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => nil,:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) expect{ b7 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => ['FECHA1','FECHA2'],:isbn => ['ISBN-13: 978-1937785499','ISBN-10: 1937785491']})}.to raise_error(ArgumentError) end it ' Expectativa Debe existir uno o más números ISBN ' do expect{b8 = Libro::Bibliografia.new({:autor => ['Dave Thomas', 'Andy Hunt', 'Chad Fowler'],:titulo => 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide',:serie => 'The Facets of Ruby',:editorial => 'Pragmatic Bookshelf',:edicion => '4 edition',:fecha => 'July 7, 2013',:isbn => nil})}.to raise_error(ArgumentError) end it ' Expectativa Existe un método para obtener el listado de autores' do expect(@b1).to respond_to(:autor) expect(@b1.autor).to contain_exactly('Dave Thomas', 'Andy Hunt', 'Chad Fowler') end it ' Expectativa Existe un método para obtener el título' do expect(@b1).to respond_to(:titulo) expect(@b1.titulo).to eq('Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide') end it ' Expectativa Existe un método para obtener la serie' do expect(@b1).to respond_to(:serie) expect(@b1.serie).to eq('The Facets of Ruby') end it ' Expectativa Existe un método para obtener la editorial' do expect(@b1).to respond_to(:editorial) expect(@b1.editorial).to eq('Pragmatic Bookshelf') end it ' Expectativa Existe un método para obtener el número de edición' do expect(@b1).to respond_to(:edicion) expect(@b1.edicion).to eq('4 edition') end it ' Expectativa Existe un método para obtener la fecha de publicación ' do expect(@b1).to respond_to(:fecha) expect(@b1.fecha).to eq('July 7, 2013') end it ' Expectativa Existe un método para obtener el listado de ISBN ' do expect(@b1).to respond_to(:isbn) expect(@b1.isbn).to contain_exactly('ISBN-13: 978-1937785499','ISBN-10: 1937785491') end it ' Expectativa Existe un método para obtener la referencia formateada ' do expect(@b1).to respond_to(:to_s) expect(@b1.to_s).to eq('Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ GuideDave ThomasAndy HuntChad FowlerThe Facets of RubyPragmatic Bookshelf4 editionJuly 7, 2013ISBN-13: 978-1937785499ISBN-10: 1937785491') end end describe 'Expectativas clase Lista de una Bibliografia' do it ' Expectativa Debe existir un Nodo de la lista con sus datos y su siguiente' do expect(@nodo.value).to eql(@b1) expect(@nodo.next).to eql(nil) end it ' Expectativa Debe existir una Lista con su cabeza' do expect{l1 = Libro::Lista.new(nil)}.to raise_error(ArgumentError) l1 = Libro::Lista.new(@nodo) #Se utiliza una expectativa con la comparción por identidad del objeto expect(l1).to respond_to(:head) expect(l1).to respond_to(:tail) expect(l1.head).to be(@nodo) end end end
require 'spec_helper' require 'bibliografia/bibliog' require 'bibliografia/lista' describe Bibliog do before :all do @b1 = Bibliog.new(['Dave Thomas','Andy Hunt','Chad Fowler'], 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide', 'Pragmatic Bookshelf', 4, 'July 7', 2013, ['9781937785499', '1937785491'], 'The Facets of Ruby') @b2 = Bibliog.new(['Dave Thomas','Andy Hunt','Chad Fowler'], 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide', 'Pragmatic Bookshelf', 4, 'July 7', 2013, ['9781937785499', '1937785491']) end describe "# almacenamiento de autores" do it "Debe existir uno o mas autores" do @b1.autores.should eq(['Dave Thomas', 'Andy Hunt','Chad Fowler']) end end describe "# almacenamiento de titulo" do it "Debe existir un titulo" do @b1.titulo.should eq('Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide') end end describe "# almacenamiento de serie" do it "Debe existir una serie" do @b1.serie.should eq('The Facets of Ruby') end it "No debe existir una serie" do @b2.serie.should eq('none') end end describe "# almacenamiento de editorial" do it "Debe existir una editorial" do @b1.editorial.should eq('Pragmatic Bookshelf') end end describe "# almacenamiento de edicion" do it "Debe existir un numero de edicion" do @b1.edicion.should eq(4) end end describe "# almacenamiento de fecha" do it "Debe existir un dia y un mes" do @b1.mes.should eq('July 7') end it "Debe existir un año" do @b1.anno.should eq(2013) end end describe "# almacenamiento de uno o mas ISBN" do it "Debe existir uno o mas ISBN" do @b1.isbn.should eq(['9781937785499', '1937785491']) end end describe "# metodo para obtener los autores" do it "Debe existir un metodo para obtener la lista de autores" do @b1.get_autores.should eq("Dave Thomas, Andy Hunt, Chad Fowler") end end describe "# metodo para obtener el titulo" do it "Debe existir un metodo para obtener el titulo" do @b1.get_titulo.should eq("Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide") end end describe "# metodo para obtener la serie" do it "Debe existir un metodo para obtener la serie" do @b1.get_serie.should eq("The Facets of Ruby") end end describe "# metodo para obtener la editorial" do it "Debe existir un metodo para obtener la editorial" do @b1.get_editorial.should eq("Pragmatic Bookshelf") end end describe "# metodo para obtener el numero de edicion" do it "Debe existir un metodo para obtener el numero de edicion" do @b1.get_edicion.should eq("4") end end describe "# metodo para obtener la fecha" do it "Debe existir unmetodo para obtener la fecha" do @b1.get_fecha.should eq("July 7, 2013") end end describe "# metodo para obtener los ISBN" do it "Debe existir un metodo para obtener los ISBN" do @b1.get_isbn.should eq("ISBN-13: 978-1937785499\nISBN-10: 1937785491") end end describe "# formateo de la referencia" do it "Debe existir un metodo que formatee la referencia" do @b1.to_s.should eq("Dave Thomas, Andy Hunt, Chad Fowler.\nProgramming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide\n(The Facets of Ruby)\nPragmatic Bookshelf; 4 edition (July 7, 2013)\nISBN-13: 978-1937785499\nISBN-10: 1937785491") end end end describe Nodo do before :all do @nodo1 = Nodo.new(1,2,3); end describe "# almacenamiento de un valor" do it "Debe existir un valor" do @nodo1.value.should eq(1) end end describe "# almacenamiento de un siguiente" do it "Debe existir un siguiente" do @nodo1.next.should eq(2) end end describe "# almacenamiento de un anterior" do it "Debe existir un anterior" do @nodo1.prev.should eq(3) end end end describe Lista do before :each do @b1 = Bibliog.new(['Dave Thomas','Andy Hunt','Chad Fowler'], 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide', 'Pragmatic Bookshelf', 4, 'July 7', 2013, ['9781937785499', '1937785491'], 'The Facets of Ruby') @b2 = Bibliog.new('Scott Chacon', 'Pro Git 2009th Edition', 'Apress', 2009, 'August 27', 2009, ['9781430218333','1430218339'], 'Pro') @b3 = Bibliog.new(['David Flanagan','Yukihiro Matsumoto'], 'The Ruby Programming Language', 'O’Reilly Media', 1, 'February 4', 2008, ['0596516177','9780596516178']) @b4 = Bibliog.new(['David Chelimsky','Dave Astels','Bryan Helmkamp','Dan North','Zach Dennis','Aslak Hellesoy'], 'The RSpecBook: Behaviour Driven Development with RSpec, Cucumber, and Friends', 'Pragmatic Bookshelf', 1, 'December 25', 2010, ['1934356379','9781934356371'], 'The Facets of Ruby') @b5 = Bibliog.new('Richard E. Silverman','Git Pocket Guide', 'O’Reilly Media', 1, 'August 2', 2013, ['1449325866','9781449325862']) @lista = Lista.new() @lista2 = Lista.new() @lista3 = Lista.new() end describe "# creacion de una lista" do it "Debe existir un nodo inicio nulo" do @lista.inicio.should eq(nil) end it "Debe existir un nodo final nulo" do @lista.final.should eq(nil) end end describe "# metodo para ver si esta vacia" do it "Debe existir un metodo para ver si esta vacia" do @lista.vacia.should eq(true) end end describe "# metodo para insertar y metodo para extraer un elemento" do it "Debe existir un metodo para insertar un elemento y otro para extraerlo" do @lista2.insertar(@b1).should eq(true) @lista2.extraer.should eq(@b1) end end describe "# insertar y extraer varios elementos" do it "Se puede insertar y extraer varios elementos" do @lista3.insertar(@b1).should eq(true) @lista3.insertar(@b2).should eq(true) @lista3.insertar(@b3).should eq(true) @lista3.insertar(@b4).should eq(true) @lista3.insertar(@b5).should eq(true) @lista3.extraer.should eq(@b1) @lista3.extraer.should eq(@b2) @lista3.extraer.should eq(@b3) @lista3.extraer.should eq(@b4) @lista3.extraer.should eq(@b5) end end describe "# insertar(inicio) y extraer(final)" do it "Se puede insertar por el inicio y extraer por el final" do @lista3.insertar(@b2).should eq(true) @lista3.insertar_inicio(@b1).should eq(true) @lista3.extraer_final.should eq(@b2) end end end describe Libro do before :all do @l1 = Libro.new end describe "# comprobar la instancia del objeto" do it "Es una la instancia de Libro" do (@l1.instance_of?Libro).should eq(true) end end describe " comprobar la jerarquia de clases del objeto" do it "Pertenece a la jerarquia de clase de Bibliog" do (@l1.is_a?Bibliog).should eq(true) end it "Pertenece a la jerarquia de clase de Object" do (@l1.is_a?Object).should eq(true) end it "Pertenece a la jerarquia de clase de BasicObject" do (@l1.is_a?BasicObject).should eq(true) end end describe "# comprobar la instancia del objeto con su madre" do it "No es una instancia de Bibliog" do (@l1.instance_of?Bibliog).should eq(false) end end describe "# comprobar si responde a un metodo de su madre" do it "Debe responder a un metodo de su madre" do @l1.respond_to?(:autores).should eq(true) end end end describe Revista do before :all do @r1 = Revista.new end describe "# comprobar la instancia del objeto" do it "Es una instancia de Revista" do (@r1.instance_of?Revista).should eq(true) end end describe " comprobar la jerarquia de clases del objeto" do it "Pertenece a la jerarquia de clase de Bibliog" do (@r1.is_a?Bibliog).should eq(true) end it "Pertenece a la jerarquia de clase de Object" do (@r1.is_a?Object).should eq(true) end it "Pertenece a la jerarquia de clase de BasicObject" do (@r1.is_a?BasicObject).should eq(true) end end describe "# comprobar la instancia del objeto con su madre" do it "No es una instancia de Bibliog" do (@r1.instance_of?Bibliog).should eq(false) end end describe "# comprobar si responde a un metodo de su madre" do it "Debe responder a un metodo de su madre" do @r1.respond_to?(:autores).should eq(true) end end end describe Periodico do end Prueba comprobar la instancia del objeto. Superada require 'spec_helper' require 'bibliografia/bibliog' require 'bibliografia/lista' describe Bibliog do before :all do @b1 = Bibliog.new(['Dave Thomas','Andy Hunt','Chad Fowler'], 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide', 'Pragmatic Bookshelf', 4, 'July 7', 2013, ['9781937785499', '1937785491'], 'The Facets of Ruby') @b2 = Bibliog.new(['Dave Thomas','Andy Hunt','Chad Fowler'], 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide', 'Pragmatic Bookshelf', 4, 'July 7', 2013, ['9781937785499', '1937785491']) end describe "# almacenamiento de autores" do it "Debe existir uno o mas autores" do @b1.autores.should eq(['Dave Thomas', 'Andy Hunt','Chad Fowler']) end end describe "# almacenamiento de titulo" do it "Debe existir un titulo" do @b1.titulo.should eq('Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide') end end describe "# almacenamiento de serie" do it "Debe existir una serie" do @b1.serie.should eq('The Facets of Ruby') end it "No debe existir una serie" do @b2.serie.should eq('none') end end describe "# almacenamiento de editorial" do it "Debe existir una editorial" do @b1.editorial.should eq('Pragmatic Bookshelf') end end describe "# almacenamiento de edicion" do it "Debe existir un numero de edicion" do @b1.edicion.should eq(4) end end describe "# almacenamiento de fecha" do it "Debe existir un dia y un mes" do @b1.mes.should eq('July 7') end it "Debe existir un año" do @b1.anno.should eq(2013) end end describe "# almacenamiento de uno o mas ISBN" do it "Debe existir uno o mas ISBN" do @b1.isbn.should eq(['9781937785499', '1937785491']) end end describe "# metodo para obtener los autores" do it "Debe existir un metodo para obtener la lista de autores" do @b1.get_autores.should eq("Dave Thomas, Andy Hunt, Chad Fowler") end end describe "# metodo para obtener el titulo" do it "Debe existir un metodo para obtener el titulo" do @b1.get_titulo.should eq("Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide") end end describe "# metodo para obtener la serie" do it "Debe existir un metodo para obtener la serie" do @b1.get_serie.should eq("The Facets of Ruby") end end describe "# metodo para obtener la editorial" do it "Debe existir un metodo para obtener la editorial" do @b1.get_editorial.should eq("Pragmatic Bookshelf") end end describe "# metodo para obtener el numero de edicion" do it "Debe existir un metodo para obtener el numero de edicion" do @b1.get_edicion.should eq("4") end end describe "# metodo para obtener la fecha" do it "Debe existir unmetodo para obtener la fecha" do @b1.get_fecha.should eq("July 7, 2013") end end describe "# metodo para obtener los ISBN" do it "Debe existir un metodo para obtener los ISBN" do @b1.get_isbn.should eq("ISBN-13: 978-1937785499\nISBN-10: 1937785491") end end describe "# formateo de la referencia" do it "Debe existir un metodo que formatee la referencia" do @b1.to_s.should eq("Dave Thomas, Andy Hunt, Chad Fowler.\nProgramming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide\n(The Facets of Ruby)\nPragmatic Bookshelf; 4 edition (July 7, 2013)\nISBN-13: 978-1937785499\nISBN-10: 1937785491") end end end describe Nodo do before :all do @nodo1 = Nodo.new(1,2,3); end describe "# almacenamiento de un valor" do it "Debe existir un valor" do @nodo1.value.should eq(1) end end describe "# almacenamiento de un siguiente" do it "Debe existir un siguiente" do @nodo1.next.should eq(2) end end describe "# almacenamiento de un anterior" do it "Debe existir un anterior" do @nodo1.prev.should eq(3) end end end describe Lista do before :each do @b1 = Bibliog.new(['Dave Thomas','Andy Hunt','Chad Fowler'], 'Programming Ruby 1.9 & 2.0: The Pragmatic Programmers’ Guide', 'Pragmatic Bookshelf', 4, 'July 7', 2013, ['9781937785499', '1937785491'], 'The Facets of Ruby') @b2 = Bibliog.new('Scott Chacon', 'Pro Git 2009th Edition', 'Apress', 2009, 'August 27', 2009, ['9781430218333','1430218339'], 'Pro') @b3 = Bibliog.new(['David Flanagan','Yukihiro Matsumoto'], 'The Ruby Programming Language', 'O’Reilly Media', 1, 'February 4', 2008, ['0596516177','9780596516178']) @b4 = Bibliog.new(['David Chelimsky','Dave Astels','Bryan Helmkamp','Dan North','Zach Dennis','Aslak Hellesoy'], 'The RSpecBook: Behaviour Driven Development with RSpec, Cucumber, and Friends', 'Pragmatic Bookshelf', 1, 'December 25', 2010, ['1934356379','9781934356371'], 'The Facets of Ruby') @b5 = Bibliog.new('Richard E. Silverman','Git Pocket Guide', 'O’Reilly Media', 1, 'August 2', 2013, ['1449325866','9781449325862']) @lista = Lista.new() @lista2 = Lista.new() @lista3 = Lista.new() end describe "# creacion de una lista" do it "Debe existir un nodo inicio nulo" do @lista.inicio.should eq(nil) end it "Debe existir un nodo final nulo" do @lista.final.should eq(nil) end end describe "# metodo para ver si esta vacia" do it "Debe existir un metodo para ver si esta vacia" do @lista.vacia.should eq(true) end end describe "# metodo para insertar y metodo para extraer un elemento" do it "Debe existir un metodo para insertar un elemento y otro para extraerlo" do @lista2.insertar(@b1).should eq(true) @lista2.extraer.should eq(@b1) end end describe "# insertar y extraer varios elementos" do it "Se puede insertar y extraer varios elementos" do @lista3.insertar(@b1).should eq(true) @lista3.insertar(@b2).should eq(true) @lista3.insertar(@b3).should eq(true) @lista3.insertar(@b4).should eq(true) @lista3.insertar(@b5).should eq(true) @lista3.extraer.should eq(@b1) @lista3.extraer.should eq(@b2) @lista3.extraer.should eq(@b3) @lista3.extraer.should eq(@b4) @lista3.extraer.should eq(@b5) end end describe "# insertar(inicio) y extraer(final)" do it "Se puede insertar por el inicio y extraer por el final" do @lista3.insertar(@b2).should eq(true) @lista3.insertar_inicio(@b1).should eq(true) @lista3.extraer_final.should eq(@b2) end end end describe Libro do before :all do @l1 = Libro.new end describe "# comprobar la instancia del objeto" do it "Es una la instancia de Libro" do (@l1.instance_of?Libro).should eq(true) end end describe " comprobar la jerarquia de clases del objeto" do it "Pertenece a la jerarquia de clase de Bibliog" do (@l1.is_a?Bibliog).should eq(true) end it "Pertenece a la jerarquia de clase de Object" do (@l1.is_a?Object).should eq(true) end it "Pertenece a la jerarquia de clase de BasicObject" do (@l1.is_a?BasicObject).should eq(true) end end describe "# comprobar la instancia del objeto con su madre" do it "No es una instancia de Bibliog" do (@l1.instance_of?Bibliog).should eq(false) end end describe "# comprobar si responde a un metodo de su madre" do it "Debe responder a un metodo de su madre" do @l1.respond_to?(:autores).should eq(true) end end end describe Revista do before :all do @r1 = Revista.new end describe "# comprobar la instancia del objeto" do it "Es una instancia de Revista" do (@r1.instance_of?Revista).should eq(true) end end describe " comprobar la jerarquia de clases del objeto" do it "Pertenece a la jerarquia de clase de Bibliog" do (@r1.is_a?Bibliog).should eq(true) end it "Pertenece a la jerarquia de clase de Object" do (@r1.is_a?Object).should eq(true) end it "Pertenece a la jerarquia de clase de BasicObject" do (@r1.is_a?BasicObject).should eq(true) end end describe "# comprobar la instancia del objeto con su madre" do it "No es una instancia de Bibliog" do (@r1.instance_of?Bibliog).should eq(false) end end describe "# comprobar si responde a un metodo de su madre" do it "Debe responder a un metodo de su madre" do @r1.respond_to?(:autores).should eq(true) end end end describe Periodico do before :all do @r1 = Revista.new end describe "# comprobar la instancia del objeto" do it "Es una instancia de Revista" do (@r1.instance_of?Revista).should eq(true) end end end
require 'spec_helper' describe Bibliografia do before :each do @libro = Bibliografia::Referencia.new( ["Dave Thomas", "Andy Hunt", "Chad Fowler"], "Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide", "(The Facets of Ruby)" ) end it "deben de existir uno o más autores" do expect(@libro.autores).not_to be_empty end it "debe de existir un título" do expect(@libro.titulo).not_to be_nil end it "debe de existir una serie" do expect(@libro.serie).not_to be_nil end end Añadir expectativa editorial Añadida expectativa nueva editorial require 'spec_helper' describe Bibliografia do before :each do @libro = Bibliografia::Referencia.new( ["Dave Thomas", "Andy Hunt", "Chad Fowler"], "Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide", "(The Facets of Ruby)", "Pragmatic Bookshelf" ) end it "deben de existir uno o más autores" do expect(@libro.autores).not_to be_empty end it "debe de existir un título" do expect(@libro.titulo).not_to be_nil end it "debe de existir una serie" do expect(@libro.serie).not_to be_nil end it "debe existir una editorial" do expect(@libro.editorial).not_to be_nil end end
require 'spec_helper' describe Bibliografia do context "Referencia bibliográfica" do before :all do @libro = Bibliografia::Referencia.new( ["Dave Thomas", "Andy Hunt", "Chad Fowler"], "Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide", "(The Facets of Ruby)", "Pragmatic Bookshelf", "4 edition", "(July 7, 2013)", ["968-1937785499", "1937785491"] ) end it "deben de existir uno o más autores" do expect(@libro.autores).not_to be_empty end it "debe de existir un título" do expect(@libro.titulo).not_to be_nil end it "debe de existir una serie" do expect(@libro.serie).not_to be_nil end it "debe existir una editorial" do expect(@libro.editorial).not_to be_nil end it "debe existir un número de edición" do expect(@libro.num_edicion).not_to be_nil end it "debe existir una fecha de publicación" do expect(@libro.fecha_publicacion).not_to be_nil end it "debe existir uno o más números ISBN" do expect(@libro.num_isbns).not_to be_empty end it "debe existir un método para obtener el listado de autores" do expect(@libro.print_autor).to eq("Dave Thomas, Andy Hunt, Chad Fowler") end it "Existe método para obtener el titulo" do expect(@libro.titulo).to eq("Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide") end it "Existe un método para obtener la serie" do expect(@libro.serie).to eq("(The Facets of Ruby)") end it "Existe un método que devuelve la editorial" do expect(@libro.editorial).to eq("Pragmatic Bookshelf") end it "Existe un método para obtener el número de edición" do expect(@libro.num_edicion).to eq("4 edition") end it "Existe un método para obtener la fecha de publicación" do expect(@libro.fecha_publicacion).to eq("(July 7, 2013)") end it "Existe un método para obtener el listado ISBN" do expect(@libro.print_isbn).to eq("ISBN-13: 968-1937785499\nISBN-10: 1937785491\n") end it "Existe un método para obtener la referencia formateada" do expect(@libro.to_s).to eq ("Dave Thomas, Andy Hunt, Chad Fowler\nProgramming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide\n(The Facets of Ruby)\nPragmatic Bookshelf; 4 edition (July 7, 2013)\nISBN-13: 968-1937785499\nISBN-10: 1937785491\n") end end # context referencia context "Nodo" do before :all do @libro = Bibliografia::Referencia.new( ["Dave Thomas", "Andy Hunt", "Chad Fowler"], "Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide", "(The Facets of Ruby)", "Pragmatic Bookshelf", "4 edition", "(July 7, 2013)", ["968-1937785499", "1937785491"] ) @nudo = Node.new(@libro) end it "deben de existir un nodo conteniendo libro de '4 edition'" do expect(@nudo.value.num_edicion).to eq("4 edition") end it "Existe un método en Nodo para obtener la referencia formateada" do expect(@nudo.to_s).to eq("Dave Thomas, Andy Hunt, Chad Fowler\nProgramming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide\n(The Facets of Ruby)\nPragmatic Bookshelf; 4 edition (July 7, 2013)\nISBN-13: 968-1937785499\nISBN-10: 1937785491\n") end end # context nodo context "Lista Enlazada" do before :all do @libro1 = Bibliografia::Referencia.new( ["Dave Thomas", "Andy Hunt", "Chad Fowler"], "Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide", "(The Facets of Ruby)", "Pragmatic Bookshelf", "4 edition", "(July 7, 2013)", ["968-1937785499", "1937785491"] ) @libro2 = Bibliografia::Referencia.new( ["Scott Chacon"], "Pro Git 2009th Edition", "(Pro)", "Apress", "2009 edition", "(August 27, 2009)", ["978-1430218333", "1430218339"] ) @libro3 = Bibliografia::Referencia.new( ["David Flanagan", "Yukihiro Matsumoto"], "The Ruby Programming Language", "", "O’Reilly Media", "1 edition", "(February 4, 2008)", ["0596516177", "978-0596516178"] ) @libro4 = Bibliografia::Referencia.new( ["David Chelimsky", "Dave Astels", "Bryan Helmkamp", "Dan North", "Zach Dennis", "Aslak Hellesoy"], "The RSpec Book: Behaviour Driven Development with RSpec, Cucumber, and Friends", "(The Facets of Ruby)", "Pragmatic Bookshelf", "1 edition", "(December 25, 2010)", ["1934356379", "978-1934356371"] ) @libro5 = Bibliografia::Referencia.new( ["Richard E. Silverman"], "Git Pocket Guide", "", "O’Reilly Media", "1 edition", "(August 2, 2013)", ["1449325866", "978-1449325862"] ) @lista = List.new end # end before it "Se puede insertar un elemento por el inicio" do @lista.ins_start(@libro1) expect(@lista.head.is_a? Node).to eq(true) expect(@lista.head.value.is_a? Bibliografia::Referencia).to eq(true) expect(@lista.head.value.titulo).to eq("Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide") end it "Se pueden añadir varios elementos por el final" do @lista.ins_end(@libro2) @lista.ins_end(@libro3) @lista.ins_end(@libro4) @lista.ins_end(@libro5) expect(@lista.length).to eq(5) end it "Debe existir cada Nodo de la lista con sus datos y su siguiente" do e = @lista.head while e.next != nil expect(e.is_a? Node).to eq(true) expect(e.value.is_a? Bibliografia::Referencia).to eq(true) expect(e.next.is_a? Node).to eq(true) e = e.next end expect(e.is_a? Node).to eq(true) expect(e.value.is_a? Bibliografia::Referencia).to eq(true) end it "Cada elemento de la lista debe de ser un Nodo con un libro" do e = @lista.head while e.next != nil expect(e.value.is_a? Bibliografia::Referencia).to eq(true) e = e.next end end it "Cada elemento de la lista debe de ser un Nodo con un libro con Título" do e = @lista.head while e.next != nil expect(e.value.titulo).not_to be_empty e = e.next end expect(e.value.titulo).not_to be_empty end it "Cada elemento de la lista debe de ser un Nodo con un libro con Autor(es)" do e = @lista.head while e.next != nil expect(e.value.autores).not_to be_empty e = e.next end expect(e.value.autores).not_to be_empty end it "El segundo elemento de la lista es Libro2" do e = @lista.head e = @lista.head.next expect(e.value.to_s).to eq(@libro2.to_s) end it "Debe de existir una lista con su cabeza" do e = @lista.head expect(e.value.to_s).to eq(@libro1.to_s) end it "Se extrae el primer elemento de la lista" do e = @lista.extract_first expect(e.value.to_s).to eq(@libro1.to_s) expect(@lista.length).to eq(4) end end # context lista context "Lista Doblemente Enlazada" do before :all do @ref1 = Bibliografia::Referencia.new( ["Dave Thomas", "Andy Hunt", "Chad Fowler"], "Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide", "(The Facets of Ruby)", "Pragmatic Bookshelf", "4 edition", "(July 7, 2013)", ["968-1937785499", "1937785491"] ) @ref2 = Bibliografia::Referencia.new( ["Scott Chacon"], "Pro Git 2009th Edition", "(Pro)", "Apress", "2009 edition", "(August 27, 2009)", ["978-1430218333", "1430218339"] ) @ref3 = Bibliografia::Referencia.new( ["David Flanagan", "Yukihiro Matsumoto"], "The Ruby Programming Language", "", "O’Reilly Media", "1 edition", "(February 4, 2008)", ["0596516177", "978-0596516178"] ) @ref4 = Bibliografia::Referencia.new( ["David Chelimsky", "Dave Astels", "Bryan Helmkamp", "Dan North", "Zach Dennis", "Aslak Hellesoy"], "The RSpec Book: Behaviour Driven Development with RSpec, Cucumber, and Friends", "(The Facets of Ruby)", "Pragmatic Bookshelf", "1 edition", "(December 25, 2010)", ["1934356379", "978-1934356371"] ) @ref5 = Bibliografia::Referencia.new( ["Richard E. Silverman"], "Git Pocket Guide", "", "O’Reilly Media", "1 edition", "(August 2, 2013)", ["1449325866", "978-1449325862"] ) @libro1 = Bibliografia::Libro.new( ["Dave Thomas", "Andy Hunt", "Chad Fowler"], "Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide", "(The Facets of Ruby)", "Pragmatic Bookshelf", "4 edition", "(July 7, 2013)", ["968-1937785499", "1937785491"], :libro ) @libro2 = Bibliografia::Libro.new( ["Scott Chacon"], "Pro Git 2009th Edition", "(Pro)", "Apress", "2009 edition", "(August 27, 2009)", ["978-1430218333", "1430218339"], nil ) @lista = List2.new end # end before it "Se puede insertar un elemento por el inicio" do @lista.ins_start(@ref1) expect(@lista.head.is_a? Node).to eq(true) expect(@lista.head.value.is_a? Bibliografia::Referencia).to eq(true) expect(@lista.head.value.titulo).to eq("Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide") end it "Se pueden añadir varios elementos por el final" do @lista.ins_end(@ref2) @lista.ins_end(@ref3) @lista.ins_end(@ref4) @lista.ins_end(@ref5) expect(@lista.length).to eq(5) end it "Debe existir cada Nodo de la lista con sus datos y su siguiente" do e = @lista.head while e.next != nil expect(e.is_a? Node).to eq(true) expect(e.value.is_a? Bibliografia::Referencia).to eq(true) expect(e.next.is_a? Node).to eq(true) e = e.next end expect(e.is_a? Node).to eq(true) expect(e.value.is_a? Bibliografia::Referencia).to eq(true) end it "Debe existir cada Nodo de la lista con sus datos y su anterior" do e = @lista.tail while e.prev != nil expect(e.is_a? Node).to eq(true) expect(e.value.is_a? Bibliografia::Referencia).to eq(true) expect(e.prev.is_a? Node).to eq(true) e = e.prev end expect(e.is_a? Node).to eq(true) expect(e.value.is_a? Bibliografia::Referencia).to eq(true) end it "Cada elemento de la lista debe de ser un Nodo con un ref" do e = @lista.head while e.next != nil expect(e.value.is_a? Bibliografia::Referencia).to eq(true) e = e.next end end it "Cada elemento de la lista debe de ser un Nodo con un ref con Título" do e = @lista.head while e.next != nil expect(e.value.titulo).not_to be_empty e = e.next end expect(e.value.titulo).not_to be_empty end it "Cada elemento de la lista debe de ser un Nodo con un ref con Autor(es)" do e = @lista.head while e.next != nil expect(e.value.autores).not_to be_empty e = e.next end expect(e.value.autores).not_to be_empty end it "El segundo elemento de la lista es ref2" do e = @lista.head e = @lista.head.next expect(e.value.to_s).to eq(@ref2.to_s) end it "Debe de existir una lista con su cabeza" do e = @lista.head expect(e.value.to_s).to eq(@ref1.to_s) end it "Debe de existir una lista con su cola" do e = @lista.tail expect(e.value.to_s).to eq(@ref5.to_s) end it "Se extrae el primer elemento de la lista" do e = @lista.extract_first expect(e.value.to_s).to eq(@ref1.to_s) expect(@lista.length).to eq(4) end it "Se extrae el último elemento de la lista" do e = @lista.extract_last expect(e.value.to_s).to eq(@ref5.to_s) expect(@lista.length).to eq(3) end it "Se añaden 2 libros por el final" do @lista.ins_end(@libro1) @lista.ins_end(@libro2) expect(@lista.length).to eq(5) end it "El último elemento de la lista es un libro" do expect(@lista.tail.value.tipo).to eq(:libro) end end # context lista2 end # describe Nueva expectativa: Se añaden 3 publicaciones distintas por el final require 'spec_helper' describe Bibliografia do context "Referencia bibliográfica" do before :all do @libro = Bibliografia::Referencia.new( ["Dave Thomas", "Andy Hunt", "Chad Fowler"], "Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide", "(The Facets of Ruby)", "Pragmatic Bookshelf", "4 edition", "(July 7, 2013)", ["968-1937785499", "1937785491"] ) end it "deben de existir uno o más autores" do expect(@libro.autores).not_to be_empty end it "debe de existir un título" do expect(@libro.titulo).not_to be_nil end it "debe de existir una serie" do expect(@libro.serie).not_to be_nil end it "debe existir una editorial" do expect(@libro.editorial).not_to be_nil end it "debe existir un número de edición" do expect(@libro.num_edicion).not_to be_nil end it "debe existir una fecha de publicación" do expect(@libro.fecha_publicacion).not_to be_nil end it "debe existir uno o más números ISBN" do expect(@libro.num_isbns).not_to be_empty end it "debe existir un método para obtener el listado de autores" do expect(@libro.print_autor).to eq("Dave Thomas, Andy Hunt, Chad Fowler") end it "Existe método para obtener el titulo" do expect(@libro.titulo).to eq("Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide") end it "Existe un método para obtener la serie" do expect(@libro.serie).to eq("(The Facets of Ruby)") end it "Existe un método que devuelve la editorial" do expect(@libro.editorial).to eq("Pragmatic Bookshelf") end it "Existe un método para obtener el número de edición" do expect(@libro.num_edicion).to eq("4 edition") end it "Existe un método para obtener la fecha de publicación" do expect(@libro.fecha_publicacion).to eq("(July 7, 2013)") end it "Existe un método para obtener el listado ISBN" do expect(@libro.print_isbn).to eq("ISBN-13: 968-1937785499\nISBN-10: 1937785491\n") end it "Existe un método para obtener la referencia formateada" do expect(@libro.to_s).to eq ("Dave Thomas, Andy Hunt, Chad Fowler\nProgramming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide\n(The Facets of Ruby)\nPragmatic Bookshelf; 4 edition (July 7, 2013)\nISBN-13: 968-1937785499\nISBN-10: 1937785491\n") end end # context referencia context "Nodo" do before :all do @libro = Bibliografia::Referencia.new( ["Dave Thomas", "Andy Hunt", "Chad Fowler"], "Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide", "(The Facets of Ruby)", "Pragmatic Bookshelf", "4 edition", "(July 7, 2013)", ["968-1937785499", "1937785491"] ) @nudo = Node.new(@libro) end it "deben de existir un nodo conteniendo libro de '4 edition'" do expect(@nudo.value.num_edicion).to eq("4 edition") end it "Existe un método en Nodo para obtener la referencia formateada" do expect(@nudo.to_s).to eq("Dave Thomas, Andy Hunt, Chad Fowler\nProgramming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide\n(The Facets of Ruby)\nPragmatic Bookshelf; 4 edition (July 7, 2013)\nISBN-13: 968-1937785499\nISBN-10: 1937785491\n") end end # context nodo context "Lista Enlazada" do before :all do @libro1 = Bibliografia::Referencia.new( ["Dave Thomas", "Andy Hunt", "Chad Fowler"], "Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide", "(The Facets of Ruby)", "Pragmatic Bookshelf", "4 edition", "(July 7, 2013)", ["968-1937785499", "1937785491"] ) @libro2 = Bibliografia::Referencia.new( ["Scott Chacon"], "Pro Git 2009th Edition", "(Pro)", "Apress", "2009 edition", "(August 27, 2009)", ["978-1430218333", "1430218339"] ) @libro3 = Bibliografia::Referencia.new( ["David Flanagan", "Yukihiro Matsumoto"], "The Ruby Programming Language", "", "O’Reilly Media", "1 edition", "(February 4, 2008)", ["0596516177", "978-0596516178"] ) @libro4 = Bibliografia::Referencia.new( ["David Chelimsky", "Dave Astels", "Bryan Helmkamp", "Dan North", "Zach Dennis", "Aslak Hellesoy"], "The RSpec Book: Behaviour Driven Development with RSpec, Cucumber, and Friends", "(The Facets of Ruby)", "Pragmatic Bookshelf", "1 edition", "(December 25, 2010)", ["1934356379", "978-1934356371"] ) @libro5 = Bibliografia::Referencia.new( ["Richard E. Silverman"], "Git Pocket Guide", "", "O’Reilly Media", "1 edition", "(August 2, 2013)", ["1449325866", "978-1449325862"] ) @lista = List.new end # end before it "Se puede insertar un elemento por el inicio" do @lista.ins_start(@libro1) expect(@lista.head.is_a? Node).to eq(true) expect(@lista.head.value.is_a? Bibliografia::Referencia).to eq(true) expect(@lista.head.value.titulo).to eq("Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide") end it "Se pueden añadir varios elementos por el final" do @lista.ins_end(@libro2) @lista.ins_end(@libro3) @lista.ins_end(@libro4) @lista.ins_end(@libro5) expect(@lista.length).to eq(5) end it "Debe existir cada Nodo de la lista con sus datos y su siguiente" do e = @lista.head while e.next != nil expect(e.is_a? Node).to eq(true) expect(e.value.is_a? Bibliografia::Referencia).to eq(true) expect(e.next.is_a? Node).to eq(true) e = e.next end expect(e.is_a? Node).to eq(true) expect(e.value.is_a? Bibliografia::Referencia).to eq(true) end it "Cada elemento de la lista debe de ser un Nodo con un libro" do e = @lista.head while e.next != nil expect(e.value.is_a? Bibliografia::Referencia).to eq(true) e = e.next end end it "Cada elemento de la lista debe de ser un Nodo con un libro con Título" do e = @lista.head while e.next != nil expect(e.value.titulo).not_to be_empty e = e.next end expect(e.value.titulo).not_to be_empty end it "Cada elemento de la lista debe de ser un Nodo con un libro con Autor(es)" do e = @lista.head while e.next != nil expect(e.value.autores).not_to be_empty e = e.next end expect(e.value.autores).not_to be_empty end it "El segundo elemento de la lista es Libro2" do e = @lista.head e = @lista.head.next expect(e.value.to_s).to eq(@libro2.to_s) end it "Debe de existir una lista con su cabeza" do e = @lista.head expect(e.value.to_s).to eq(@libro1.to_s) end it "Se extrae el primer elemento de la lista" do e = @lista.extract_first expect(e.value.to_s).to eq(@libro1.to_s) expect(@lista.length).to eq(4) end end # context lista context "Lista Doblemente Enlazada" do before :all do @ref1 = Bibliografia::Referencia.new( ["Dave Thomas", "Andy Hunt", "Chad Fowler"], "Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide", "(The Facets of Ruby)", "Pragmatic Bookshelf", "4 edition", "(July 7, 2013)", ["968-1937785499", "1937785491"] ) @ref2 = Bibliografia::Referencia.new( ["Scott Chacon"], "Pro Git 2009th Edition", "(Pro)", "Apress", "2009 edition", "(August 27, 2009)", ["978-1430218333", "1430218339"] ) @ref3 = Bibliografia::Referencia.new( ["David Flanagan", "Yukihiro Matsumoto"], "The Ruby Programming Language", "", "O’Reilly Media", "1 edition", "(February 4, 2008)", ["0596516177", "978-0596516178"] ) @ref4 = Bibliografia::Referencia.new( ["David Chelimsky", "Dave Astels", "Bryan Helmkamp", "Dan North", "Zach Dennis", "Aslak Hellesoy"], "The RSpec Book: Behaviour Driven Development with RSpec, Cucumber, and Friends", "(The Facets of Ruby)", "Pragmatic Bookshelf", "1 edition", "(December 25, 2010)", ["1934356379", "978-1934356371"] ) @ref5 = Bibliografia::Referencia.new( ["Richard E. Silverman"], "Git Pocket Guide", "", "O’Reilly Media", "1 edition", "(August 2, 2013)", ["1449325866", "978-1449325862"] ) @libro1 = Bibliografia::Libro.new( ["Dave Thomas", "Andy Hunt", "Chad Fowler"], "Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide", "(The Facets of Ruby)", "Pragmatic Bookshelf", "4 edition", "(July 7, 2013)", ["968-1937785499", "1937785491"], :libro ) @libro2 = Bibliografia::Libro.new( ["Scott Chacon"], "Pro Git 2009th Edition", "(Pro)", "Apress", "2009 edition", "(August 27, 2009)", ["978-1430218333", "1430218339"], nil ) @pub1 = Bibliografia::Publicacion.new( ["Dave Thomas", "Andy Hunt", "Chad Fowler"], "Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide", "(The Facets of Ruby)", "Pragmatic Bookshelf", "4 edition", "(July 7, 2013)", ["968-1937785499", "1937785491"], :publicacion_periodica, :articulo_revista ) @pub2 = Bibliografia::Publicacion.new( ["Scott Chacon"], "Pro Git 2009th Edition", "(Pro)", "Apress", "2009 edition", "(August 27, 2009)", ["978-1430218333", "1430218339"], nil, :articulo_periodico ) @pub3 = Bibliografia::Publicacion.new( ["Scott Chacon"], "Pro Git 2009th Edition", "(Pro)", "Apress", "2009 edition", "(August 27, 2009)", ["978-1430218333", "1430218339"], nil, :documento_electronico ) @lista = List2.new end # end before it "Se puede insertar un elemento por el inicio" do @lista.ins_start(@ref1) expect(@lista.head.is_a? Node).to eq(true) expect(@lista.head.value.is_a? Bibliografia::Referencia).to eq(true) expect(@lista.head.value.titulo).to eq("Programming Ruby 1.9 & 2.0: The Pragmatic Programmers' Guide") end it "Se pueden añadir varios elementos por el final" do @lista.ins_end(@ref2) @lista.ins_end(@ref3) @lista.ins_end(@ref4) @lista.ins_end(@ref5) expect(@lista.length).to eq(5) end it "Debe existir cada Nodo de la lista con sus datos y su siguiente" do e = @lista.head while e.next != nil expect(e.is_a? Node).to eq(true) expect(e.value.is_a? Bibliografia::Referencia).to eq(true) expect(e.next.is_a? Node).to eq(true) e = e.next end expect(e.is_a? Node).to eq(true) expect(e.value.is_a? Bibliografia::Referencia).to eq(true) end it "Debe existir cada Nodo de la lista con sus datos y su anterior" do e = @lista.tail while e.prev != nil expect(e.is_a? Node).to eq(true) expect(e.value.is_a? Bibliografia::Referencia).to eq(true) expect(e.prev.is_a? Node).to eq(true) e = e.prev end expect(e.is_a? Node).to eq(true) expect(e.value.is_a? Bibliografia::Referencia).to eq(true) end it "Cada elemento de la lista debe de ser un Nodo con un ref" do e = @lista.head while e.next != nil expect(e.value.is_a? Bibliografia::Referencia).to eq(true) e = e.next end end it "Cada elemento de la lista debe de ser un Nodo con un ref con Título" do e = @lista.head while e.next != nil expect(e.value.titulo).not_to be_empty e = e.next end expect(e.value.titulo).not_to be_empty end it "Cada elemento de la lista debe de ser un Nodo con un ref con Autor(es)" do e = @lista.head while e.next != nil expect(e.value.autores).not_to be_empty e = e.next end expect(e.value.autores).not_to be_empty end it "El segundo elemento de la lista es ref2" do e = @lista.head e = @lista.head.next expect(e.value.to_s).to eq(@ref2.to_s) end it "Debe de existir una lista con su cabeza" do e = @lista.head expect(e.value.to_s).to eq(@ref1.to_s) end it "Debe de existir una lista con su cola" do e = @lista.tail expect(e.value.to_s).to eq(@ref5.to_s) end it "Se extrae el primer elemento de la lista" do e = @lista.extract_first expect(e.value.to_s).to eq(@ref1.to_s) expect(@lista.length).to eq(4) end it "Se extrae el último elemento de la lista" do e = @lista.extract_last expect(e.value.to_s).to eq(@ref5.to_s) expect(@lista.length).to eq(3) end it "Se añaden 2 libros por el final" do @lista.ins_end(@libro1) @lista.ins_end(@libro2) expect(@lista.length).to eq(5) end it "El último elemento de la lista es un libro" do expect(@lista.tail.value.tipo).to eq(:libro) end it "Se añaden 3 publicaciones por el final" do @lista.ins_end(@pub1) @lista.ins_end(@pub2) @lista.ins_end(@pub3) expect(@lista.length).to eq(8) end end # context lista2 end # describe
require 'spec_helper' require_relative '../../support/mocks/observer' module Cb module Utils describe Api do let(:api) { Api.instance } let(:path) { '/moom' } let(:options) { {} } describe '#factory' do it 'returns a new instance of the api class' do expect(Cb::Utils::Api.instance).to be_a_kind_of(Cb::Utils::Api) end context 'when we have observers' do before{ Cb.configuration.stub(:observers).and_return(Array(Mocks::Observer)) } it 'returns an instance of Api with the observers attached' do Mocks::Observer.should_receive(:new) Cb::Utils::Api.any_instance.should_receive(:add_observer) expect(Cb::Utils::Api.instance).to be_a_kind_of(Cb::Utils::Api) end end end describe '#cb_get' do context 'when we have observers' do let(:response) { { success: 'yeah' } } before{ Cb.configuration.stub(:observers).and_return(Array(Mocks::Observer)) Api.stub(:get).with(path, options).and_return(response) } it 'will notify the observers' do api.should_receive(:notify_observers).twice.and_call_original Mocks::Observer.any_instance.should_receive(:update).at_most(2).times api.cb_get(path) end end it 'sends #post to HttParty' do Api.should_receive(:get).with(path, options) api.cb_get(path) end context 'When Cb base_uri is configured' do before { Cb.configuration.base_uri = 'http://www.applecat.com' Api.stub(:get).with(path, options) } it 'sets base_uri on Api' do api.cb_get(path) expect(Api.base_uri).to eq 'http://www.applecat.com' end end context 'When a response is returned' do let(:response) { { success: 'yeah' } } before { Api.stub(:get).with(path, options).and_return(response) } it 'sends #validate to ResponseValidator with the response' do ResponseValidator.should_receive(:validate).with(response).and_return(response) api.cb_get(path) end end end describe '#cb_post' do context 'when we have observers' do let(:response) { { success: 'yeah' } } before{ Cb.configuration.stub(:observers).and_return(Array(Mocks::Observer)) Api.stub(:post).with(path, options).and_return(response) } it 'will notify the observers' do api.should_receive(:notify_observers).twice.and_call_original Mocks::Observer.any_instance.should_receive(:update).at_most(2).times api.cb_post(path) end end it 'sends #post to HttParty' do Api.should_receive(:post).with(path, options) api.cb_post(path) end context 'When Cb base_uri is configured' do before { Cb.configuration.base_uri = 'http://www.bananadog.org' Api.stub(:post).with(path, options) } it 'sets base_uri on Api' do api.cb_post(path) expect(Api.base_uri).to eq 'http://www.bananadog.org' end end context 'When a response is returned' do let(:response) { { success: 'yeah' } } before { Api.stub(:post).with(path, options).and_return(response) } it 'sends #validate to ResponseValidator with the response' do ResponseValidator.should_receive(:validate).with(response).and_return(response) api.cb_post(path) end end end describe '#cb_put' do context 'when we have observers' do let(:response) { { success: 'yeah' } } before{ Cb.configuration.stub(:observers).and_return(Array(Mocks::Observer)) Api.stub(:put).with(path, options).and_return(response) } it 'will notify the observers' do api.should_receive(:notify_observers).twice.and_call_original Mocks::Observer.any_instance.should_receive(:update).at_most(2).times api.cb_put(path) end end it 'sends #put to HttParty' do Api.should_receive(:put).with(path, options) api.cb_put(path) end context 'When Cb base_uri is configured' do before { Cb.configuration.base_uri = 'http://www.kylerox.org' Api.stub(:put).with(path, options) } it 'sets base_uri on Api' do api.cb_put(path) expect(Api.base_uri).to eq 'http://www.kylerox.org' end end context 'When a response is returned' do let(:response) { { success: 'yeah' } } before { Api.stub(:put).with(path, options).and_return(response) } it 'sends #validate to ResponseValidator with the response' do ResponseValidator.should_receive(:validate).with(response).and_return(response) api.cb_put(path) end end end describe '#cb_delete' do context 'when we have observers' do let(:response) { { success: 'yeah' } } before{ Cb.configuration.stub(:observers).and_return(Array(Mocks::Observer)) Api.stub(:delete).with(path, options).and_return(response) } it 'will notify the observers' do api.should_receive(:notify_observers).twice.and_call_original Mocks::Observer.any_instance.should_receive(:update).at_most(2).times api.cb_delete(path) end end it 'sends #delete to HttParty' do Api.should_receive(:delete).with(path, options) api.cb_delete(path) end context 'When Cb base_uri is configured' do before { Cb.configuration.base_uri = 'http://www.kylerox.org' Api.stub(:delete).with(path, options) } it 'sets base_uri on Api' do api.cb_delete(path) expect(Api.base_uri).to eq 'http://www.kylerox.org' end end context 'When a response is returned' do let(:response) { { success: 'yeah' } } before { Api.stub(:delete).with(path, options).and_return(response) } it 'sends #validate to ResponseValidator with the response' do ResponseValidator.should_receive(:validate).with(response).and_return(response) api.cb_delete(path) end end end describe '#execute_http_request' do context ':delete' do context 'when we have observers' do let(:response) { { success: 'yeah' } } before{ Cb.configuration.stub(:observers).and_return(Array(Mocks::Observer)) Api.stub(:delete).with(path, options).and_return(response) } it 'will notify the observers' do api.should_receive(:notify_observers).twice.and_call_original Mocks::Observer.any_instance.should_receive(:update).at_most(2).times api.execute_http_request(:delete, nil, path) end end it 'sends #delete to HttParty' do Api.should_receive(:delete).with(path, options) api.execute_http_request(:delete, nil, path) end context 'When Cb base_uri is configured' do before { Cb.configuration.base_uri = 'http://www.kylerox.org' Api.stub(:delete).with(path, options) } it 'sets base_uri on Api' do api.execute_http_request(:delete, nil, path) expect(Api.base_uri).to eq 'http://www.kylerox.org' end end context 'When a response is returned' do let(:response) { { success: 'yeah' } } before { Api.stub(:delete).with(path, options).and_return(response) } it 'sends #validate to ResponseValidator with the response' do ResponseValidator.should_receive(:validate).with(response).and_return(response) api.execute_http_request(:delete, nil, path) end end end end context 'base_uri gets the override' do let(:base_uri) { 'http://www.careerbuilder.com' } before { Cb.configuration.base_uri = 'http://www.kylerox.org' Api.stub(:delete).with(path, options) } it 'sets an override' do api.execute_http_request(:delete, base_uri, path) expect(Api.base_uri).to eq base_uri end it 'doesn\'t set an override' do api.execute_http_request(:delete, nil, path) expect(Api.base_uri).to eq 'http://www.kylerox.org' end end end end end fixing tests require 'spec_helper' require_relative '../../support/mocks/observer' module Cb module Utils describe Api do let(:api) { Api.instance } let(:path) { '/moom' } let(:options) { {} } describe '#factory' do it 'returns a new instance of the api class' do expect(Cb::Utils::Api.instance).to be_a_kind_of(Cb::Utils::Api) end context 'when we have observers' do before{ Cb.configuration.stub(:observers).and_return(Array(Mocks::Observer)) } it 'returns an instance of Api with the observers attached' do Mocks::Observer.should_receive(:new) Cb::Utils::Api.any_instance.should_receive(:add_observer) expect(Cb::Utils::Api.instance).to be_a_kind_of(Cb::Utils::Api) end end end describe '#cb_get' do context 'when we have observers' do let(:response) { { success: 'yeah' } } before{ Cb.configuration.stub(:observers).and_return(Array(Mocks::Observer)) Api.stub(:get).with(path, options).and_return(response) } it 'will notify the observers' do api.should_receive(:notify_observers).twice.and_call_original Mocks::Observer.any_instance.should_receive(:update).at_most(2).times api.cb_get(path) end end it 'sends #post to HttParty' do Api.should_receive(:get).with(path, options) api.cb_get(path) end context 'When Cb base_uri is configured' do before { Cb.configuration.base_uri = 'http://www.applecat.com' Api.stub(:get).with(path, options) } it 'sets base_uri on Api' do api.cb_get(path) expect(Api.base_uri).to eq 'http://www.applecat.com' end end context 'When a response is returned' do let(:response) { { success: 'yeah' } } before { Api.stub(:get).with(path, options).and_return(response) } it 'sends #validate to ResponseValidator with the response' do ResponseValidator.should_receive(:validate).with(response).and_return(response) api.cb_get(path) end end end describe '#cb_post' do context 'when we have observers' do let(:response) { { success: 'yeah' } } before{ Cb.configuration.stub(:observers).and_return(Array(Mocks::Observer)) Api.stub(:post).with(path, options).and_return(response) } it 'will notify the observers' do api.should_receive(:notify_observers).twice.and_call_original Mocks::Observer.any_instance.should_receive(:update).at_most(2).times api.cb_post(path) end end it 'sends #post to HttParty' do Api.should_receive(:post).with(path, options) api.cb_post(path) end context 'When Cb base_uri is configured' do before { Cb.configuration.base_uri = 'http://www.bananadog.org' Api.stub(:post).with(path, options) } it 'sets base_uri on Api' do api.cb_post(path) expect(Api.base_uri).to eq 'http://www.bananadog.org' end end context 'When a response is returned' do let(:response) { { success: 'yeah' } } before { Api.stub(:post).with(path, options).and_return(response) } it 'sends #validate to ResponseValidator with the response' do ResponseValidator.should_receive(:validate).with(response).and_return(response) api.cb_post(path) end end end describe '#cb_put' do context 'when we have observers' do let(:response) { { success: 'yeah' } } before{ Cb.configuration.stub(:observers).and_return(Array(Mocks::Observer)) Api.stub(:put).with(path, options).and_return(response) } it 'will notify the observers' do api.should_receive(:notify_observers).twice.and_call_original Mocks::Observer.any_instance.should_receive(:update).at_most(2).times api.cb_put(path) end end it 'sends #put to HttParty' do Api.should_receive(:put).with(path, options) api.cb_put(path) end context 'When Cb base_uri is configured' do before { Cb.configuration.base_uri = 'http://www.kylerox.org' Api.stub(:put).with(path, options) } it 'sets base_uri on Api' do api.cb_put(path) expect(Api.base_uri).to eq 'http://www.kylerox.org' end end context 'When a response is returned' do let(:response) { { success: 'yeah' } } before { Api.stub(:put).with(path, options).and_return(response) } it 'sends #validate to ResponseValidator with the response' do ResponseValidator.should_receive(:validate).with(response).and_return(response) api.cb_put(path) end end end describe '#cb_delete' do context 'when we have observers' do let(:response) { { success: 'yeah' } } before{ Cb.configuration.stub(:observers).and_return(Array(Mocks::Observer)) Api.stub(:delete).with(path, options).and_return(response) } it 'will notify the observers' do api.should_receive(:notify_observers).twice.and_call_original Mocks::Observer.any_instance.should_receive(:update).at_most(2).times api.cb_delete(path) end end it 'sends #delete to HttParty' do Api.should_receive(:delete).with(path, options) api.cb_delete(path) end context 'When Cb base_uri is configured' do before { Cb.configuration.base_uri = 'http://www.kylerox.org' Api.stub(:delete).with(path, options) } it 'sets base_uri on Api' do api.cb_delete(path) expect(Api.base_uri).to eq 'http://www.kylerox.org' end end context 'When a response is returned' do let(:response) { { success: 'yeah' } } before { Api.stub(:delete).with(path, options).and_return(response) } it 'sends #validate to ResponseValidator with the response' do ResponseValidator.should_receive(:validate).with(response).and_return(response) api.cb_delete(path) end end end describe '#execute_http_request' do context ':delete' do context 'when we have observers' do let(:response) { { success: 'yeah' } } before{ Cb.configuration.stub(:observers).and_return(Array(Mocks::Observer)) Api.stub(:delete).with(path, options).and_return(response) } it 'will notify the observers' do api.should_receive(:notify_observers).twice.and_call_original Mocks::Observer.any_instance.should_receive(:update).at_most(2).times api.execute_http_request(:delete, nil, path) end end it 'sends #delete to HttParty' do Api.should_receive(:delete).with(path, options) api.execute_http_request(:delete, nil, path) end context 'When Cb base_uri is configured' do before { Cb.configuration.base_uri = 'http://www.kylerox.org' Api.stub(:delete).with(path, options) } it 'sets base_uri on Api' do api.execute_http_request(:delete, nil, path) expect(Api.base_uri).to eq 'http://www.kylerox.org' end end context 'When a response is returned' do let(:response) { { success: 'yeah' } } before { Api.stub(:delete).with(path, options).and_return(response) } it 'sends #validate to ResponseValidator with the response' do ResponseValidator.should_receive(:validate).with(response).and_return(response) api.execute_http_request(:delete, nil, path) end end end end describe '#base_uri' do let(:base_uri) { 'http://www.careerbuilder.com' } let(:default_uri) { 'http://www.kylerox.org' } before { Cb.configuration.base_uri = default_uri Api.stub(:delete).with(path, options) } context 'passes a base uri' do before { api.execute_http_request(:delete, base_uri, path) } it 'sets an override' do expect(Api.base_uri).to eq base_uri end end context 'passes nil' do before { api.execute_http_request(:delete, nil, path) } it 'doesn\'t set an override' do expect(Api.base_uri).to eq default_uri end end end end end end
require 'spec_helper' describe 'gitlab', type: :class do on_supported_os.each do |os, facts| context "on #{os}" do let(:facts) do facts end context 'with default params' do it { is_expected.to contain_class('gitlab::params') } it { is_expected.to contain_class('gitlab::install').that_comes_before('Class[gitlab::config]') } it { is_expected.to contain_class('gitlab::config') } it { is_expected.to contain_class('gitlab::service').that_subscribes_to('Class[gitlab::config]') } it { is_expected.to contain_exec('gitlab_reconfigure') } it { is_expected.to contain_file('/etc/gitlab/gitlab.rb') } it { is_expected.to contain_service('gitlab-runsvdir') } it { is_expected.to contain_package('gitlab-ce').with_ensure('installed') } it { is_expected.to contain_class('gitlab') } case facts[:osfamily] when 'Debian' it { is_expected.to contain_apt__source('gitlab_official_ce') } when 'RedHat' it { is_expected.to contain_yumrepo('gitlab_official_ce') } end end context 'with class specific parameters' do describe 'edition = ee' do let(:params) { { edition: 'ee' } } it { is_expected.to contain_package('gitlab-ee').with_ensure('installed') } case facts[:osfamily] when 'Debian' it { is_expected.to contain_apt__source('gitlab_official_ee') } when 'RedHat' it { is_expected.to contain_yumrepo('gitlab_official_ee') } end end describe 'external_url' do let(:params) { { external_url: 'http://gitlab.mycompany.com/' } } it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*external_url 'http:\/\/gitlab\.mycompany\.com\/'$}) } end describe 'external_port' do let(:params) { { external_port: 9654 } } it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*external_port '9654'$}) } end describe 'nginx' do let(:params) do { nginx: { 'enable' => true, 'listen_port' => 80 } } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*nginx\['enable'\] = true$}). with_content(%r{^\s*nginx\['listen_port'\] = ('|)80('|)$}) } end describe 'letsencrypt' do let(:params) do { letsencrypt: { 'enable' => true, 'contact_emails' => ['test@example.com'] } } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*letsencrypt\['enable'\] = true$}). with_content(%r{^\s*letsencrypt\['contact_emails'\] = \["test@example.com"\]$}) } end describe 'secrets' do let(:params) do { secrets: { 'gitlab_shell' => { 'secret_token' => 'mysupersecrettoken1' }, 'gitlab_rails' => { 'secret_token' => 'mysupersecrettoken2' }, 'gitlab_ci' => { 'secret_token' => 'null', 'secret_key_base' => 'mysupersecrettoken3', 'db_key_base' => 'mysupersecrettoken4' } } } end it { is_expected.to contain_file('/etc/gitlab/gitlab-secrets.json'). \ with_content(%r{{\n \"gitlab_shell\": {\n \"secret_token\": \"mysupersecrettoken1\"\n },\n \"gitlab_rails\": {\n \"secret_token\": \"mysupersecrettoken2\"\n },\n \"gitlab_ci\": {\n \"secret_token\": \"null\",\n \"secret_key_base\": \"mysupersecrettoken3\",\n \"db_key_base\": \"mysupersecrettoken4\"\n }\n}\n}m) } end describe 'gitlab_rails with hash value' do let(:params) do { gitlab_rails: { 'ldap_enabled' => true, 'ldap_servers' => { 'main' => { 'label' => 'LDAP', 'host' => '_your_ldap_server', 'port' => 389, 'uid' => 'sAMAccountName', 'method' => 'plain', 'bind_dn' => '_the_full_dn_of_the_user_you_will_bind_with', 'password' => '_the_password_of_the_bind_user', 'active_directory' => true, 'allow_username_or_email_login' => false, 'block_auto_created_users' => false, 'base' => '', 'user_filter' => '' } }, 'omniauth_providers' => [ { 'name' => 'google_oauth2', 'app_id' => 'YOUR APP ID', 'app_secret' => 'YOUR APP SECRET', 'args' => { 'access_type' => 'offline', 'approval_prompt' => '' } } ] } } end let(:expected_content) do { gitlab_rb__ldap_servers: %(gitlab_rails['ldap_servers'] = {\"main\"=>{\"active_directory\"=>true, \"allow_username_or_email_login\"=>false, \"base\"=>\"\", \"bind_dn\"=>\"_the_full_dn_of_the_user_you_will_bind_with\", \"block_auto_created_users\"=>false, \"host\"=>\"_your_ldap_server\", \"label\"=>\"LDAP\", \"method\"=>\"plain\", \"password\"=>\"_the_password_of_the_bind_user\", \"port\"=>389, \"uid\"=>\"sAMAccountName\", \"user_filter\"=>\"\"}}\n) } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*gitlab_rails\['ldap_enabled'\] = true$}). with_content(%r{\s*#{Regexp.quote(expected_content[:gitlab_rb__ldap_servers])}}m). with_content(%r{^\s*gitlab_rails\['omniauth_providers'\] = \[{\"app_id\"=>\"YOUR APP ID\", \"app_secret\"=>\"YOUR APP SECRET\", \"args\"=>{\"access_type\"=>\"offline\", \"approval_prompt\"=>\"\"}, \"name\"=>\"google_oauth2\"}\]$}) } end describe 'gitlab_git_http_server with hash value' do let(:params) do { gitlab_git_http_server: { 'enable' => true } } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*gitlab_git_http_server\['enable'\] = true$}) } end describe 'gitlab_rails with string value' do let(:params) do { gitlab_rails: { 'backup_path' => '/opt/gitlab_backup' } } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*gitlab_rails\['backup_path'\] = "\/opt\/gitlab_backup"$}) } end describe 'rack_attack_git_basic_auth with Numbers and Strings' do let(:params) do { gitlab_rails: { 'rack_attack_git_basic_auth' => { 'enable' => true, 'ip_whitelist' => ['127.0.0.1', '10.0.0.0'], 'maxretry' => 10, 'findtime' => 60, 'bantime' => 3600 } } } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*gitlab_rails\['rack_attack_git_basic_auth'\] = {\"bantime\"=>3600, \"enable\"=>true, \"findtime\"=>60, \"ip_whitelist\"=>\[\"127.0.0.1\", \"10.0.0.0\"\], \"maxretry\"=>10}$}) } end describe 'mattermost external URL' do let(:params) { { mattermost_external_url: 'https://mattermost.myserver.tld' } } it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*mattermost_external_url 'https:\/\/mattermost\.myserver\.tld'$}) } end describe 'mattermost with hash value' do let(:params) do { mattermost: { 'enable' => true } } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*mattermost\['enable'\] = true$}) } end describe 'with manage_package => false' do let(:params) { { manage_package: false } } it { is_expected.not_to contain_package('gitlab-ce') } it { is_expected.not_to contain_package('gitlab-ee') } end describe 'with roles' do let(:params) do { 'roles' => ['redis_sentinel_role', 'redis_master_role'] } end let(:expected_content) do { roles: %(roles [\"redis_sentinel_role\", \"redis_master_role\"]) } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb') \ .with_content(%r{\s*#{Regexp.quote(expected_content[:roles])}}m) } end describe 'with data_dirs' do let(:params) do { 'git_data_dirs' => { 'default' => { 'path' => '/git-data/data' } } } end let(:expected_content) do { datadirs: %(git_data_dirs({\"default\"=>{\"path\"=>\"/git-data/data\"}})\n) } end it do is_expected.to contain_file('/etc/gitlab/gitlab.rb'). with_content(%r{\s*#{Regexp.quote(expected_content[:datadirs])}}m) end end describe 'with store_git_keys_in_db' do let(:params) { { store_git_keys_in_db: true } } it do is_expected.to contain_file('/opt/gitlab-shell/authorized_keys') end end end end end context 'on usupported os' do let(:facts) do { osfamily: 'Solaris', operatingsystem: 'Nexenta' } end describe 'gitlab class without any parameters on Solaris/Nexenta' do it { is_expected.to compile.and_raise_error(%r{is not supported}) } end end end style corrections in specs require 'spec_helper' describe 'gitlab', type: :class do on_supported_os.each do |os, facts| context "on #{os}" do let(:facts) do facts end context 'with default params' do it { is_expected.to contain_class('gitlab::params') } it { is_expected.to contain_class('gitlab::install').that_comes_before('Class[gitlab::config]') } it { is_expected.to contain_class('gitlab::config') } it { is_expected.to contain_class('gitlab::service').that_subscribes_to('Class[gitlab::config]') } it { is_expected.to contain_exec('gitlab_reconfigure') } it { is_expected.to contain_file('/etc/gitlab/gitlab.rb') } it { is_expected.to contain_service('gitlab-runsvdir') } it { is_expected.to contain_package('gitlab-ce').with_ensure('installed') } it { is_expected.to contain_class('gitlab') } case facts[:osfamily] when 'Debian' it { is_expected.to contain_apt__source('gitlab_official_ce') } when 'RedHat' it { is_expected.to contain_yumrepo('gitlab_official_ce') } end end context 'with class specific parameters' do describe 'edition = ee' do let(:params) { { edition: 'ee' } } it { is_expected.to contain_package('gitlab-ee').with_ensure('installed') } case facts[:osfamily] when 'Debian' it { is_expected.to contain_apt__source('gitlab_official_ee') } when 'RedHat' it { is_expected.to contain_yumrepo('gitlab_official_ee') } end end describe 'external_url' do let(:params) { { external_url: 'http://gitlab.mycompany.com/' } } it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*external_url 'http:\/\/gitlab\.mycompany\.com\/'$}) } end describe 'external_port' do let(:params) { { external_port: 9654 } } it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*external_port '9654'$}) } end describe 'nginx' do let(:params) do { nginx: { 'enable' => true, 'listen_port' => 80 } } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*nginx\['enable'\] = true$}). with_content(%r{^\s*nginx\['listen_port'\] = ('|)80('|)$}) } end describe 'letsencrypt' do let(:params) do { letsencrypt: { 'enable' => true, 'contact_emails' => ['test@example.com'] } } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*letsencrypt\['enable'\] = true$}). with_content(%r{^\s*letsencrypt\['contact_emails'\] = \["test@example.com"\]$}) } end describe 'secrets' do let(:params) do { secrets: { 'gitlab_shell' => { 'secret_token' => 'mysupersecrettoken1' }, 'gitlab_rails' => { 'secret_token' => 'mysupersecrettoken2' }, 'gitlab_ci' => { 'secret_token' => 'null', 'secret_key_base' => 'mysupersecrettoken3', 'db_key_base' => 'mysupersecrettoken4' } } } end it { is_expected.to contain_file('/etc/gitlab/gitlab-secrets.json'). \ with_content(%r{{\n \"gitlab_shell\": {\n \"secret_token\": \"mysupersecrettoken1\"\n },\n \"gitlab_rails\": {\n \"secret_token\": \"mysupersecrettoken2\"\n },\n \"gitlab_ci\": {\n \"secret_token\": \"null\",\n \"secret_key_base\": \"mysupersecrettoken3\",\n \"db_key_base\": \"mysupersecrettoken4\"\n }\n}\n}m) } end describe 'gitlab_rails with hash value' do let(:params) do { gitlab_rails: { 'ldap_enabled' => true, 'ldap_servers' => { 'main' => { 'label' => 'LDAP', 'host' => '_your_ldap_server', 'port' => 389, 'uid' => 'sAMAccountName', 'method' => 'plain', 'bind_dn' => '_the_full_dn_of_the_user_you_will_bind_with', 'password' => '_the_password_of_the_bind_user', 'active_directory' => true, 'allow_username_or_email_login' => false, 'block_auto_created_users' => false, 'base' => '', 'user_filter' => '' } }, 'omniauth_providers' => [ { 'name' => 'google_oauth2', 'app_id' => 'YOUR APP ID', 'app_secret' => 'YOUR APP SECRET', 'args' => { 'access_type' => 'offline', 'approval_prompt' => '' } } ] } } end let(:expected_content) do { gitlab_rb__ldap_servers: %(gitlab_rails['ldap_servers'] = {\"main\"=>{\"active_directory\"=>true, \"allow_username_or_email_login\"=>false, \"base\"=>\"\", \"bind_dn\"=>\"_the_full_dn_of_the_user_you_will_bind_with\", \"block_auto_created_users\"=>false, \"host\"=>\"_your_ldap_server\", \"label\"=>\"LDAP\", \"method\"=>\"plain\", \"password\"=>\"_the_password_of_the_bind_user\", \"port\"=>389, \"uid\"=>\"sAMAccountName\", \"user_filter\"=>\"\"}}\n) } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*gitlab_rails\['ldap_enabled'\] = true$}). with_content(%r{\s*#{Regexp.quote(expected_content[:gitlab_rb__ldap_servers])}}m). with_content(%r{^\s*gitlab_rails\['omniauth_providers'\] = \[{\"app_id\"=>\"YOUR APP ID\", \"app_secret\"=>\"YOUR APP SECRET\", \"args\"=>{\"access_type\"=>\"offline\", \"approval_prompt\"=>\"\"}, \"name\"=>\"google_oauth2\"}\]$}) } end describe 'gitlab_git_http_server with hash value' do let(:params) do { gitlab_git_http_server: { 'enable' => true } } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*gitlab_git_http_server\['enable'\] = true$}) } end describe 'gitlab_rails with string value' do let(:params) do { gitlab_rails: { 'backup_path' => '/opt/gitlab_backup' } } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*gitlab_rails\['backup_path'\] = "\/opt\/gitlab_backup"$}) } end describe 'rack_attack_git_basic_auth with Numbers and Strings' do let(:params) do { gitlab_rails: { 'rack_attack_git_basic_auth' => { 'enable' => true, 'ip_whitelist' => ['127.0.0.1', '10.0.0.0'], 'maxretry' => 10, 'findtime' => 60, 'bantime' => 3600 } } } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*gitlab_rails\['rack_attack_git_basic_auth'\] = {\"bantime\"=>3600, \"enable\"=>true, \"findtime\"=>60, \"ip_whitelist\"=>\[\"127.0.0.1\", \"10.0.0.0\"\], \"maxretry\"=>10}$}) } end describe 'mattermost external URL' do let(:params) { { mattermost_external_url: 'https://mattermost.myserver.tld' } } it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*mattermost_external_url 'https:\/\/mattermost\.myserver\.tld'$}) } end describe 'mattermost with hash value' do let(:params) do { mattermost: { 'enable' => true } } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). \ with_content(%r{^\s*mattermost\['enable'\] = true$}) } end describe 'with manage_package => false' do let(:params) { { manage_package: false } } it { is_expected.not_to contain_package('gitlab-ce') } it { is_expected.not_to contain_package('gitlab-ee') } end describe 'with roles' do let(:params) do { 'roles' => %w['redis_sentinel_role' 'redis_master_role'] } end let(:expected_content) do { roles: %(roles [\"redis_sentinel_role\", \"redis_master_role\"]) } end it { is_expected.to contain_file('/etc/gitlab/gitlab.rb'). with_content(%r{\s*#{Regexp.quote(expected_content[:roles])}}m) } end describe 'with data_dirs' do let(:params) do { 'git_data_dirs' => { 'default' => { 'path' => '/git-data/data' } } } end let(:expected_content) do { datadirs: %(git_data_dirs({\"default\"=>{\"path\"=>\"/git-data/data\"}})\n) } end it do is_expected.to contain_file('/etc/gitlab/gitlab.rb'). with_content(%r{\s*#{Regexp.quote(expected_content[:datadirs])}}m) end end describe 'with store_git_keys_in_db' do let(:params) { { store_git_keys_in_db: true } } it do is_expected.to contain_file('/opt/gitlab-shell/authorized_keys') end end end end end context 'on usupported os' do let(:facts) do { osfamily: 'Solaris', operatingsystem: 'Nexenta' } end describe 'gitlab class without any parameters on Solaris/Nexenta' do it { is_expected.to compile.and_raise_error(%r{is not supported}) } end end end
require File.dirname(__FILE__) + '/base' describe Heroku::CommandLine do context "credentials" do before do @wrapper = Heroku::CommandLine.new @wrapper.stub!(:display) end it "reads credentials from the credentials file" do sandbox = "/tmp/wrapper_spec_#{Process.pid}" File.open(sandbox, "w") { |f| f.write "user\npass\n" } @wrapper.stub!(:credentials_file).and_return(sandbox) @wrapper.get_credentials.should == %w(user pass) end it "takes the user from the first line and the password from the second line" do @wrapper.stub!(:get_credentials).and_return(%w(user pass)) @wrapper.user.should == 'user' @wrapper.password.should == 'pass' end it "asks for credentials when the file doesn't exist" do sandbox = "/tmp/wrapper_spec_#{Process.pid}" FileUtils.rm_rf(sandbox) @wrapper.stub!(:credentials_file).and_return(sandbox) @wrapper.should_receive(:ask_for_credentials).and_return([ 'u', 'p']) @wrapper.should_receive(:save_credentials).with('u', 'p') @wrapper.get_credentials.should == [ 'u', 'p' ] end it "writes the credentials to a file" do sandbox = "/tmp/wrapper_spec_#{Process.pid}" FileUtils.rm_rf(sandbox) @wrapper.stub!(:credentials_file).and_return(sandbox) @wrapper.write_credentials('one', 'two') File.read(sandbox).should == "one\ntwo\n" end it "writes credentials and uploads authkey when credentials are saved" do @wrapper.should_receive(:write_credentials).with('a', 'b') @wrapper.should_receive(:upload_authkey) @wrapper.save_credentials('a', 'b') end it "save_credentials deletes the credentials when the upload authkey is unauthorized" do @wrapper.stub!(:write_credentials) @wrapper.should_receive(:upload_authkey).and_raise(Heroku::Client::Unauthorized) @wrapper.should_receive(:delete_credentials) lambda { @wrapper.save_credentials('a', 'b') }.should raise_error(Heroku::Client::Unauthorized) end it "deletes the credentials file" do FileUtils.should_receive(:rm_f).with(@wrapper.credentials_file) @wrapper.delete_credentials end it "uploads the ssh authkey" do @wrapper.should_receive(:authkey).and_return('my key') heroku = mock("heroku client") @wrapper.should_receive(:init_heroku).and_return(heroku) heroku.should_receive(:upload_authkey).with('my key') @wrapper.upload_authkey end end context "actions" do before do @wrapper = Heroku::CommandLine.new @wrapper.stub!(:display) @wrapper.stub!(:get_credentials).and_return(%w(user pass)) end it "creates without a name" do @wrapper.heroku.should_receive(:create).with(nil).and_return("untitled-123") @wrapper.create([]) end it "creates with a name" do @wrapper.heroku.should_receive(:create).with('myapp').and_return("myapp") @wrapper.create([ 'myapp' ]) end end end added spec for command_line authkey method require File.dirname(__FILE__) + '/base' describe Heroku::CommandLine do context "credentials" do before do @wrapper = Heroku::CommandLine.new @wrapper.stub!(:display) end it "reads credentials from the credentials file" do sandbox = "/tmp/wrapper_spec_#{Process.pid}" File.open(sandbox, "w") { |f| f.write "user\npass\n" } @wrapper.stub!(:credentials_file).and_return(sandbox) @wrapper.get_credentials.should == %w(user pass) end it "takes the user from the first line and the password from the second line" do @wrapper.stub!(:get_credentials).and_return(%w(user pass)) @wrapper.user.should == 'user' @wrapper.password.should == 'pass' end it "asks for credentials when the file doesn't exist" do sandbox = "/tmp/wrapper_spec_#{Process.pid}" FileUtils.rm_rf(sandbox) @wrapper.stub!(:credentials_file).and_return(sandbox) @wrapper.should_receive(:ask_for_credentials).and_return([ 'u', 'p']) @wrapper.should_receive(:save_credentials).with('u', 'p') @wrapper.get_credentials.should == [ 'u', 'p' ] end it "writes the credentials to a file" do sandbox = "/tmp/wrapper_spec_#{Process.pid}" FileUtils.rm_rf(sandbox) @wrapper.stub!(:credentials_file).and_return(sandbox) @wrapper.write_credentials('one', 'two') File.read(sandbox).should == "one\ntwo\n" end it "writes credentials and uploads authkey when credentials are saved" do @wrapper.should_receive(:write_credentials).with('a', 'b') @wrapper.should_receive(:upload_authkey) @wrapper.save_credentials('a', 'b') end it "save_credentials deletes the credentials when the upload authkey is unauthorized" do @wrapper.stub!(:write_credentials) @wrapper.should_receive(:upload_authkey).and_raise(Heroku::Client::Unauthorized) @wrapper.should_receive(:delete_credentials) lambda { @wrapper.save_credentials('a', 'b') }.should raise_error(Heroku::Client::Unauthorized) end it "deletes the credentials file" do FileUtils.should_receive(:rm_f).with(@wrapper.credentials_file) @wrapper.delete_credentials end it "gets the rsa key from the user's home directory" do ENV.should_receive(:[]).with('HOME').and_return('/Users/joe') File.should_receive(:read).with('/Users/joe/.ssh/id_rsa.pub').and_return('ssh-rsa somehexkey') @wrapper.authkey.should == 'ssh-rsa somehexkey' end it "uploads the ssh authkey" do @wrapper.should_receive(:authkey).and_return('my key') heroku = mock("heroku client") @wrapper.should_receive(:init_heroku).and_return(heroku) heroku.should_receive(:upload_authkey).with('my key') @wrapper.upload_authkey end end context "actions" do before do @wrapper = Heroku::CommandLine.new @wrapper.stub!(:display) @wrapper.stub!(:get_credentials).and_return(%w(user pass)) end it "creates without a name" do @wrapper.heroku.should_receive(:create).with(nil).and_return("untitled-123") @wrapper.create([]) end it "creates with a name" do @wrapper.heroku.should_receive(:create).with('myapp').and_return("myapp") @wrapper.create([ 'myapp' ]) end end end
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with this # work for additional information regarding copyright ownership. The ASF # licenses this file to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. require File.expand_path(File.join(File.dirname(__FILE__), '..', 'spec_helpers')) describe Buildr::Console do describe 'console_dimensions' do it 'should return a value' do Buildr::Console.console_dimensions.should_not be_nil end if $stdout.isatty && !ENV["TRAVIS"] end describe 'color' do describe 'when use_color is true' do before do Buildr::Console.use_color = true end it 'should emit red code when asked' do Buildr::Console.color('message', :red).should eql("\e[31mmessage\e[0m") end it 'should emit green code when asked' do Buildr::Console.color('message', :green).should eql("\e[32mmessage\e[0m") end it 'should emit blue code when asked' do Buildr::Console.color('message', :blue).should eql("\e[34mmessage\e[0m") end end if $stdout.isatty describe ' use_color is false' do before do Buildr::Console.use_color = false end it 'should not emit red code when asked' do Buildr::Console.color('message', :red).should eql("message") end it 'should not emit green code when asked' do Buildr::Console.color('message', :green).should eql("message") end it 'should not emit blue code when asked' do Buildr::Console.color('message', :blue).should eql("message") end end end end Add guard so that tests do not run under windows MRI git-svn-id: d8f3215415546ce936cf3b822120ca56e5ebeaa0@1535912 13f79535-47bb-0310-9956-ffa450edef68 # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with this # work for additional information regarding copyright ownership. The ASF # licenses this file to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. require File.expand_path(File.join(File.dirname(__FILE__), '..', 'spec_helpers')) describe Buildr::Console do describe 'console_dimensions' do it 'should return a value' do Buildr::Console.console_dimensions.should_not be_nil end if $stdout.isatty && !ENV["TRAVIS"] && (!Buildr::Util.win_os? || Buildr::Util.java_platform?) end describe 'color' do describe 'when use_color is true' do before do Buildr::Console.use_color = true end it 'should emit red code when asked' do Buildr::Console.color('message', :red).should eql("\e[31mmessage\e[0m") end it 'should emit green code when asked' do Buildr::Console.color('message', :green).should eql("\e[32mmessage\e[0m") end it 'should emit blue code when asked' do Buildr::Console.color('message', :blue).should eql("\e[34mmessage\e[0m") end end if $stdout.isatty && (!Buildr::Util.win_os? || Buildr::Util.java_platform?) describe ' use_color is false' do before do Buildr::Console.use_color = false end it 'should not emit red code when asked' do Buildr::Console.color('message', :red).should eql("message") end it 'should not emit green code when asked' do Buildr::Console.color('message', :green).should eql("message") end it 'should not emit blue code when asked' do Buildr::Console.color('message', :blue).should eql("message") end end end end
add spec for serf installation require 'spec_helper' describe command('serf --version') do it { should return_stdout /Serf v[[0-9]+.?]+/ } end describe command('which serf') do it { should return_stdout "/usr/bin/serf" } end
# encoding: utf-8 # vim: ft=ruby expandtab shiftwidth=2 tabstop=2 require 'spec_helper' require 'yaml' require 'shellwords' describe file('/usr/local/share/wp-i18n/makepot.php') do let(:disable_sudo) { true } it { should be_file } end describe command('grunt --version') do let(:disable_sudo) { true } its(:exit_status) { should eq 0 } end describe command('grunt-init --version') do let(:disable_sudo) { true } its(:exit_status) { should eq 0 } end describe file('/home/vagrant/.grunt-init/hatamoto/README.md') do let(:disable_sudo) { true } it { should be_file } end describe file('/home/vagrant/.grunt-init/iemoto/README.md') do let(:disable_sudo) { true } it { should be_file } end describe command('/usr/local/bin/phpunit --version') do let(:disable_sudo) { true } its(:exit_status) { should eq 0 } end describe file('/tmp/wordpress/license.txt') do let(:disable_sudo) { true } it { should be_file } end describe command('/usr/local/bin/composer --version') do let(:disable_sudo) { true } its(:exit_status) { should eq 0 } end describe command('~/.composer/vendor/bin/phpcs --version') do let(:disable_sudo) { true } its(:exit_status) { should eq 0 } end describe file('/vagrant/Movefile') do let(:disable_sudo) { true } it { should be_file } end describe command('wordmove help') do let(:disable_sudo) { true } its(:exit_status) { should eq 0 } end # describe command('wpcs --version') do # let(:disable_sudo) { true } # let(:pre_command) { 'source ~/.bash_profile' } # its(:exit_status) { should eq 0 } # end add tests for wordpress env for phpunit # encoding: utf-8 # vim: ft=ruby expandtab shiftwidth=2 tabstop=2 require 'spec_helper' require 'yaml' require 'shellwords' describe file('/usr/local/share/wp-i18n/makepot.php') do let(:disable_sudo) { true } it { should be_file } end describe command('grunt --version') do let(:disable_sudo) { true } its(:exit_status) { should eq 0 } end describe command('grunt-init --version') do let(:disable_sudo) { true } its(:exit_status) { should eq 0 } end describe file('/home/vagrant/.grunt-init/hatamoto/README.md') do let(:disable_sudo) { true } it { should be_file } end describe file('/home/vagrant/.grunt-init/iemoto/README.md') do let(:disable_sudo) { true } it { should be_file } end describe command('/usr/local/bin/phpunit --version') do let(:disable_sudo) { true } its(:exit_status) { should eq 0 } end describe file('/tmp/wordpress') do it { should be_directory } it { should be_owned_by 'vagrant' } it { should be_writable.by_user('vagrant') } end describe file('/tmp/wordpress-tests-lib') do it { should be_directory } it { should be_owned_by 'vagrant' } it { should be_writable.by_user('vagrant') } end describe file('/tmp/wordpress.tar.gz') do it { should be_file } it { should be_owned_by 'vagrant' } it { should be_writable.by_user('vagrant') } end describe file('/tmp/wordpress/license.txt') do let(:disable_sudo) { true } it { should be_file } end describe command('/usr/local/bin/composer --version') do let(:disable_sudo) { true } its(:exit_status) { should eq 0 } end describe command('~/.composer/vendor/bin/phpcs --version') do let(:disable_sudo) { true } its(:exit_status) { should eq 0 } end describe file('/vagrant/Movefile') do let(:disable_sudo) { true } it { should be_file } end describe command('wordmove help') do let(:disable_sudo) { true } its(:exit_status) { should eq 0 } end # describe command('wpcs --version') do # let(:disable_sudo) { true } # let(:pre_command) { 'source ~/.bash_profile' } # its(:exit_status) { should eq 0 } # end
# frozen_string_literal: true FactoryGirl.define do TEST_RSA_KEYS = {}.freeze factory :rsa_key, class: OpenSSL::PKey::RSA do transient { bits 2048 } initialize_with do TEST_RSA_KEYS[bits.to_i] ||= OpenSSL::PKey::RSA.new(bits) end skip_create end factory :certificate, class: OpenSSL::X509::Certificate do transient do rsa_key { create(:rsa_key) } subject_dn { "CN=#{SecureRandom.urlsafe_base64}" } issuer_dn { subject_dn } digest_class OpenSSL::Digest::SHA256 end public_key { rsa_key.public_key } issuer { OpenSSL::X509::Name.parse(issuer_dn) } subject { OpenSSL::X509::Name.parse(subject_dn) } not_before { Time.now } not_after { 1.hour.from_now } serial 0 version 2 after(:build) do |cert, attr| cert.sign(attr.rsa_key, attr.digest_class.new) end skip_create end factory :keypair do transient do rsa_key { create(:rsa_key) } x509_certificate { create(:certificate, rsa_key: rsa_key) } end fingerprint do OpenSSL::Digest::SHA1.new(x509_certificate.to_der).to_s.downcase end certificate { x509_certificate.to_pem } key { rsa_key.to_pem } end end Roll back overzealous freeze in keypair factory # frozen_string_literal: true FactoryGirl.define do TEST_RSA_KEYS = {} # rubocop:disable Style/MutableConstant factory :rsa_key, class: OpenSSL::PKey::RSA do transient { bits 2048 } initialize_with do TEST_RSA_KEYS[bits.to_i] ||= OpenSSL::PKey::RSA.new(bits) end skip_create end factory :certificate, class: OpenSSL::X509::Certificate do transient do rsa_key { create(:rsa_key) } subject_dn { "CN=#{SecureRandom.urlsafe_base64}" } issuer_dn { subject_dn } digest_class OpenSSL::Digest::SHA256 end public_key { rsa_key.public_key } issuer { OpenSSL::X509::Name.parse(issuer_dn) } subject { OpenSSL::X509::Name.parse(subject_dn) } not_before { Time.now } not_after { 1.hour.from_now } serial 0 version 2 after(:build) do |cert, attr| cert.sign(attr.rsa_key, attr.digest_class.new) end skip_create end factory :keypair do transient do rsa_key { create(:rsa_key) } x509_certificate { create(:certificate, rsa_key: rsa_key) } end fingerprint do OpenSSL::Digest::SHA1.new(x509_certificate.to_der).to_s.downcase end certificate { x509_certificate.to_pem } key { rsa_key.to_pem } end end
require_relative 'spec_helper' require_relative '../lib/gitlab_shell' require_relative '../lib/gitlab_access_status' describe GitlabShell do before do FileUtils.mkdir_p(tmp_repos_path) end after do FileUtils.rm_rf(tmp_repos_path) end subject do ARGV[0] = key_id GitlabShell.new.tap do |shell| shell.stub(exec_cmd: :exec_called) shell.stub(api: api) end end let(:api) do double(GitlabNet).tap do |api| api.stub(discover: { 'name' => 'John Doe' }) api.stub(check_access: GitAccessStatus.new(true)) end end let(:key_id) { "key-#{rand(100) + 100}" } let(:tmp_repos_path) { File.join(ROOT_PATH, 'tmp', 'repositories') } before do GitlabConfig.any_instance.stub(repos_path: tmp_repos_path, audit_usernames: false) end describe :initialize do before { ssh_cmd 'git-receive-pack' } its(:key_id) { should == key_id } its(:repos_path) { should == tmp_repos_path } end describe :parse_cmd do describe 'git' do context 'w/o namespace' do before do ssh_cmd 'git-upload-pack gitlab-ci.git' subject.send :parse_cmd end its(:repo_name) { should == 'gitlab-ci.git' } its(:git_cmd) { should == 'git-upload-pack' } end context 'namespace' do before do ssh_cmd 'git-upload-pack dmitriy.zaporozhets/gitlab-ci.git' subject.send :parse_cmd end its(:repo_name) { should == 'dmitriy.zaporozhets/gitlab-ci.git' } its(:git_cmd) { should == 'git-upload-pack' } end context 'with an invalid number of arguments' do before { ssh_cmd 'foobar' } it "should raise an DisallowedCommandError" do expect { subject.send :parse_cmd }.to raise_error(GitlabShell::DisallowedCommandError) end end end describe 'git-annex' do let(:repo_path) { File.join(tmp_repos_path, 'dzaporozhets/gitlab.git') } before do GitlabConfig.any_instance.stub(git_annex_enabled?: true) # Create existing project FileUtils.mkdir_p(repo_path) cmd = %W(git --git-dir=#{repo_path} init --bare) system(*cmd) ssh_cmd 'git-annex-shell inannex /~/dzaporozhets/gitlab.git SHA256E' subject.send :parse_cmd end its(:repo_name) { should == 'dzaporozhets/gitlab.git' } its(:git_cmd) { should == 'git-annex-shell' } it 'should init git-annex' do File.exists?(File.join(tmp_repos_path, 'dzaporozhets/gitlab.git/annex')).should be_true end end end describe :exec do context 'git-upload-pack' do before { ssh_cmd 'git-upload-pack gitlab-ci.git' } after { subject.exec } it "should process the command" do subject.should_receive(:process_cmd).with() end it "should execute the command" do subject.should_receive(:exec_cmd).with("git-upload-pack", File.join(tmp_repos_path, 'gitlab-ci.git')) end it "should set the GL_ID environment variable" do ENV.should_receive("[]=").with("GL_ID", key_id) end it "should log the command execution" do message = "gitlab-shell: executing git command " message << "<git-upload-pack #{File.join(tmp_repos_path, 'gitlab-ci.git')}> " message << "for user with key #{key_id}." $logger.should_receive(:info).with(message) end it "should use usernames if configured to do so" do GitlabConfig.any_instance.stub(audit_usernames: true) $logger.should_receive(:info) { |msg| msg.should =~ /for John Doe/ } end end context 'git-receive-pack' do before { ssh_cmd 'git-receive-pack gitlab-ci.git' } after { subject.exec } it "should process the command" do subject.should_receive(:process_cmd).with() end it "should execute the command" do subject.should_receive(:exec_cmd).with("git-receive-pack", File.join(tmp_repos_path, 'gitlab-ci.git')) end it "should log the command execution" do message = "gitlab-shell: executing git command " message << "<git-receive-pack #{File.join(tmp_repos_path, 'gitlab-ci.git')}> " message << "for user with key #{key_id}." $logger.should_receive(:info).with(message) end end context 'arbitrary command' do before { ssh_cmd 'arbitrary command' } after { subject.exec } it "should not process the command" do subject.should_not_receive(:process_cmd) end it "should not execute the command" do subject.should_not_receive(:exec_cmd) end it "should log the attempt" do message = "gitlab-shell: Attempt to execute disallowed command <arbitrary command> by user with key #{key_id}." $logger.should_receive(:warn).with(message) end end context 'no command' do before { ssh_cmd nil } after { subject.exec } it "should call api.discover" do api.should_receive(:discover).with(key_id) end end context "failed connection" do before { ssh_cmd 'git-upload-pack gitlab-ci.git' api.stub(:check_access).and_raise(GitlabNet::ApiUnreachableError) } after { subject.exec } it "should not process the command" do subject.should_not_receive(:process_cmd) end it "should not execute the command" do subject.should_not_receive(:exec_cmd) end end describe 'git-annex' do before do GitlabConfig.any_instance.stub(git_annex_enabled?: true) ssh_cmd 'git-annex-shell commit /~/gitlab-ci.git SHA256' end after { subject.exec } it "should execute the command" do subject.should_receive(:exec_cmd).with("git-annex-shell", "commit", File.join(tmp_repos_path, 'gitlab-ci.git'), "SHA256") end end end describe :validate_access do before { ssh_cmd 'git-upload-pack gitlab-ci.git' } after { subject.exec } it "should call api.check_access" do api.should_receive(:check_access). with('git-upload-pack', 'gitlab-ci.git', key_id, '_any') end it "should disallow access and log the attempt if check_access returns false status" do api.stub(check_access: GitAccessStatus.new(false)) message = "gitlab-shell: Access denied for git command <git-upload-pack gitlab-ci.git> " message << "by user with key #{key_id}." $logger.should_receive(:warn).with(message) end end describe :exec_cmd do let(:shell) { GitlabShell.new } before { Kernel.stub!(:exec) } it "uses Kernel::exec method" do Kernel.should_receive(:exec).with(kind_of(Hash), 1, unsetenv_others: true).once shell.send :exec_cmd, 1 end end describe :api do let(:shell) { GitlabShell.new } subject { shell.send :api } it { should be_a(GitlabNet) } end describe :escape_path do let(:shell) { GitlabShell.new } before { File.stub(:absolute_path) { 'y' } } subject { -> { shell.send(:escape_path, 'z') } } it { should raise_error(GitlabShell::InvalidRepositoryPathError) } end def ssh_cmd(cmd) ENV['SSH_ORIGINAL_COMMAND'] = cmd end end Update GitlabShell tests. require_relative 'spec_helper' require_relative '../lib/gitlab_shell' require_relative '../lib/gitlab_access_status' describe GitlabShell do before do FileUtils.mkdir_p(tmp_repos_path) end after do FileUtils.rm_rf(tmp_repos_path) end subject do ARGV[0] = key_id GitlabShell.new(key_id, ssh_cmd).tap do |shell| shell.stub(exec_cmd: :exec_called) shell.stub(api: api) end end let(:api) do double(GitlabNet).tap do |api| api.stub(discover: { 'name' => 'John Doe' }) api.stub(check_access: GitAccessStatus.new(true)) end end let(:key_id) { "key-#{rand(100) + 100}" } let(:ssh_cmd) { nil } let(:tmp_repos_path) { File.join(ROOT_PATH, 'tmp', 'repositories') } before do GitlabConfig.any_instance.stub(repos_path: tmp_repos_path, audit_usernames: false) end describe :initialize do let(:ssh_cmd) { 'git-receive-pack' } its(:key_id) { should == key_id } its(:repos_path) { should == tmp_repos_path } end describe :parse_cmd do describe 'git' do context 'w/o namespace' do let(:ssh_cmd) { 'git-upload-pack gitlab-ci.git' } before do subject.send :parse_cmd end its(:repo_name) { should == 'gitlab-ci.git' } its(:git_cmd) { should == 'git-upload-pack' } end context 'namespace' do let(:ssh_cmd) { 'git-upload-pack dmitriy.zaporozhets/gitlab-ci.git' } before do subject.send :parse_cmd end its(:repo_name) { should == 'dmitriy.zaporozhets/gitlab-ci.git' } its(:git_cmd) { should == 'git-upload-pack' } end context 'with an invalid number of arguments' do let(:ssh_cmd) { 'foobar' } it "should raise an DisallowedCommandError" do expect { subject.send :parse_cmd }.to raise_error(GitlabShell::DisallowedCommandError) end end end describe 'git-annex' do let(:repo_path) { File.join(tmp_repos_path, 'dzaporozhets/gitlab.git') } let(:ssh_cmd) { 'git-annex-shell inannex /~/dzaporozhets/gitlab.git SHA256E' } before do GitlabConfig.any_instance.stub(git_annex_enabled?: true) # Create existing project FileUtils.mkdir_p(repo_path) cmd = %W(git --git-dir=#{repo_path} init --bare) system(*cmd) subject.send :parse_cmd end its(:repo_name) { should == 'dzaporozhets/gitlab.git' } its(:git_cmd) { should == 'git-annex-shell' } it 'should init git-annex' do File.exists?(File.join(tmp_repos_path, 'dzaporozhets/gitlab.git/annex')).should be_true end end end describe :exec do context 'git-upload-pack' do let(:ssh_cmd) { 'git-upload-pack gitlab-ci.git' } after { subject.exec } it "should process the command" do subject.should_receive(:process_cmd).with() end it "should execute the command" do subject.should_receive(:exec_cmd).with("git-upload-pack", File.join(tmp_repos_path, 'gitlab-ci.git')) end it "should log the command execution" do message = "gitlab-shell: executing git command " message << "<git-upload-pack #{File.join(tmp_repos_path, 'gitlab-ci.git')}> " message << "for user with key #{key_id}." $logger.should_receive(:info).with(message) end it "should use usernames if configured to do so" do GitlabConfig.any_instance.stub(audit_usernames: true) $logger.should_receive(:info) { |msg| msg.should =~ /for John Doe/ } end end context 'git-receive-pack' do let(:ssh_cmd) { 'git-receive-pack gitlab-ci.git' } after { subject.exec } it "should process the command" do subject.should_receive(:process_cmd).with() end it "should execute the command" do subject.should_receive(:exec_cmd).with("git-receive-pack", File.join(tmp_repos_path, 'gitlab-ci.git')) end it "should log the command execution" do message = "gitlab-shell: executing git command " message << "<git-receive-pack #{File.join(tmp_repos_path, 'gitlab-ci.git')}> " message << "for user with key #{key_id}." $logger.should_receive(:info).with(message) end end context 'arbitrary command' do let(:ssh_cmd) { 'arbitrary command' } after { subject.exec } it "should not process the command" do subject.should_not_receive(:process_cmd) end it "should not execute the command" do subject.should_not_receive(:exec_cmd) end it "should log the attempt" do message = "gitlab-shell: Attempt to execute disallowed command <arbitrary command> by user with key #{key_id}." $logger.should_receive(:warn).with(message) end end context 'no command' do after { subject.exec } it "should call api.discover" do api.should_receive(:discover).with(key_id) end end context "failed connection" do let(:ssh_cmd) { 'git-upload-pack gitlab-ci.git' } before { api.stub(:check_access).and_raise(GitlabNet::ApiUnreachableError) } after { subject.exec } it "should not process the command" do subject.should_not_receive(:process_cmd) end it "should not execute the command" do subject.should_not_receive(:exec_cmd) end end describe 'git-annex' do let(:ssh_cmd) { 'git-annex-shell commit /~/gitlab-ci.git SHA256' } before do GitlabConfig.any_instance.stub(git_annex_enabled?: true) end after { subject.exec } it "should execute the command" do subject.should_receive(:exec_cmd).with("git-annex-shell", "commit", File.join(tmp_repos_path, 'gitlab-ci.git'), "SHA256") end end end describe :validate_access do let(:ssh_cmd) { 'git-upload-pack gitlab-ci.git' } after { subject.exec } it "should call api.check_access" do api.should_receive(:check_access). with('git-upload-pack', 'gitlab-ci.git', key_id, '_any') end it "should disallow access and log the attempt if check_access returns false status" do api.stub(check_access: GitAccessStatus.new(false)) message = "gitlab-shell: Access denied for git command <git-upload-pack gitlab-ci.git> " message << "by user with key #{key_id}." $logger.should_receive(:warn).with(message) end end describe :exec_cmd do let(:shell) { GitlabShell.new(key_id, ssh_cmd) } before { Kernel.stub!(:exec) } it "uses Kernel::exec method" do Kernel.should_receive(:exec).with(kind_of(Hash), 1, unsetenv_others: true).once shell.send :exec_cmd, 1 end end describe :api do let(:shell) { GitlabShell.new(key_id, ssh_cmd) } subject { shell.send :api } it { should be_a(GitlabNet) } end describe :escape_path do let(:shell) { GitlabShell.new(key_id, ssh_cmd) } before { File.stub(:absolute_path) { 'y' } } subject { -> { shell.send(:escape_path, 'z') } } it { should raise_error(GitlabShell::InvalidRepositoryPathError) } end end
require 'spec_helper' RSpec.describe HTTParty::Request do context "SSL certificate verification" do before do WebMock.disable! end after do WebMock.enable! end it "should fail when no trusted CA list is specified, by default" do expect do ssl_verify_test(nil, nil, "selfsigned.crt") end.to raise_error OpenSSL::SSL::SSLError end it "should work when no trusted CA list is specified, when the verify option is set to false" do expect(ssl_verify_test(nil, nil, "selfsigned.crt", verify: false).parsed_response).to eq({'success' => true}) end it "should fail when no trusted CA list is specified, with a bogus hostname, by default" do expect do ssl_verify_test(nil, nil, "bogushost.crt") end.to raise_error OpenSSL::SSL::SSLError end it "should work when no trusted CA list is specified, even with a bogus hostname, when the verify option is set to true" do expect(ssl_verify_test(nil, nil, "bogushost.crt", verify: false).parsed_response).to eq({'success' => true}) end it "should work when using ssl_ca_file with a self-signed CA" do expect(ssl_verify_test(:ssl_ca_file, "selfsigned.crt", "selfsigned.crt").parsed_response).to eq({'success' => true}) end it "should work when using ssl_ca_file with a certificate authority" do expect(ssl_verify_test(:ssl_ca_file, "ca.crt", "server.crt").parsed_response).to eq({'success' => true}) end it "should work when using ssl_ca_path with a certificate authority" do http = Net::HTTP.new('www.google.com', 443) response = double(Net::HTTPResponse, :[] => '', body: '', to_hash: {}) allow(http).to receive(:request).and_return(response) expect(Net::HTTP).to receive(:new).with('www.google.com', 443).and_return(http) expect(http).to receive(:ca_path=).with('/foo/bar') HTTParty.get('https://www.google.com', ssl_ca_path: '/foo/bar') end it "should fail when using ssl_ca_file and the server uses an unrecognized certificate authority" do expect do ssl_verify_test(:ssl_ca_file, "ca.crt", "selfsigned.crt") end.to raise_error(OpenSSL::SSL::SSLError) end it "should fail when using ssl_ca_path and the server uses an unrecognized certificate authority" do expect do ssl_verify_test(:ssl_ca_path, ".", "selfsigned.crt") end.to raise_error(OpenSSL::SSL::SSLError) end it "should fail when using ssl_ca_file and the server uses a bogus hostname" do expect do ssl_verify_test(:ssl_ca_file, "ca.crt", "bogushost.crt") end.to raise_error(OpenSSL::SSL::SSLError) end it "should fail when using ssl_ca_path and the server uses a bogus hostname" do expect do ssl_verify_test(:ssl_ca_path, ".", "bogushost.crt") end.to raise_error(OpenSSL::SSL::SSLError) end it "should provide the certificate used by the server via peer_cert" do peer_cert = nil ssl_verify_test(:ssl_ca_file, "ca.crt", "server.crt") do |response| peer_cert ||= response.connection.peer_cert end expect(peer_cert).to be_a OpenSSL::X509::Certificate end end end Fix ssl_spec Fix issue with mock require 'spec_helper' RSpec.describe HTTParty::Request do context "SSL certificate verification" do before do WebMock.disable! end after do WebMock.enable! end it "should fail when no trusted CA list is specified, by default" do expect do ssl_verify_test(nil, nil, "selfsigned.crt") end.to raise_error OpenSSL::SSL::SSLError end it "should work when no trusted CA list is specified, when the verify option is set to false" do expect(ssl_verify_test(nil, nil, "selfsigned.crt", verify: false).parsed_response).to eq({'success' => true}) end it "should fail when no trusted CA list is specified, with a bogus hostname, by default" do expect do ssl_verify_test(nil, nil, "bogushost.crt") end.to raise_error OpenSSL::SSL::SSLError end it "should work when no trusted CA list is specified, even with a bogus hostname, when the verify option is set to true" do expect(ssl_verify_test(nil, nil, "bogushost.crt", verify: false).parsed_response).to eq({'success' => true}) end it "should work when using ssl_ca_file with a self-signed CA" do expect(ssl_verify_test(:ssl_ca_file, "selfsigned.crt", "selfsigned.crt").parsed_response).to eq({'success' => true}) end it "should work when using ssl_ca_file with a certificate authority" do expect(ssl_verify_test(:ssl_ca_file, "ca.crt", "server.crt").parsed_response).to eq({'success' => true}) end it "should work when using ssl_ca_path with a certificate authority" do http = Net::HTTP.new('www.google.com', 443) response = double(Net::HTTPResponse, :[] => '', body: '', to_hash: {}, delete: nil) allow(http).to receive(:request).and_return(response) expect(Net::HTTP).to receive(:new).with('www.google.com', 443).and_return(http) expect(http).to receive(:ca_path=).with('/foo/bar') HTTParty.get('https://www.google.com', ssl_ca_path: '/foo/bar') end it "should fail when using ssl_ca_file and the server uses an unrecognized certificate authority" do expect do ssl_verify_test(:ssl_ca_file, "ca.crt", "selfsigned.crt") end.to raise_error(OpenSSL::SSL::SSLError) end it "should fail when using ssl_ca_path and the server uses an unrecognized certificate authority" do expect do ssl_verify_test(:ssl_ca_path, ".", "selfsigned.crt") end.to raise_error(OpenSSL::SSL::SSLError) end it "should fail when using ssl_ca_file and the server uses a bogus hostname" do expect do ssl_verify_test(:ssl_ca_file, "ca.crt", "bogushost.crt") end.to raise_error(OpenSSL::SSL::SSLError) end it "should fail when using ssl_ca_path and the server uses a bogus hostname" do expect do ssl_verify_test(:ssl_ca_path, ".", "bogushost.crt") end.to raise_error(OpenSSL::SSL::SSLError) end it "should provide the certificate used by the server via peer_cert" do peer_cert = nil ssl_verify_test(:ssl_ca_file, "ca.crt", "server.crt") do |response| peer_cert ||= response.connection.peer_cert end expect(peer_cert).to be_a OpenSSL::X509::Certificate end end end
require 'spec_helper' describe Gitlab::Gitolite do let(:project) { double('Project', path: 'diaspora') } let(:gitolite_config) { double('Gitlab::GitoliteConfig') } let(:gitolite) { Gitlab::Gitolite.new } before do gitolite.stub(config: gitolite_config) end it { should respond_to :set_key } it { should respond_to :remove_key } it { should respond_to :update_repository } it { should respond_to :create_repository } it { should respond_to :remove_repository } it { gitolite.url_to_repo('diaspora').should == Gitlab.config.gitolite.ssh_path_prefix + "diaspora.git" } it "should call config update" do gitolite_config.should_receive(:update_project!) gitolite.update_repository(project.id) end end fix tests require 'spec_helper' describe Gitlab::Gitolite do let(:project) { double('Project', id: 7, path: 'diaspora') } let(:gitolite_config) { double('Gitlab::GitoliteConfig') } let(:gitolite) { Gitlab::Gitolite.new } before do gitolite.stub(config: gitolite_config) Project.stub(find: project) end it { should respond_to :set_key } it { should respond_to :remove_key } it { should respond_to :update_repository } it { should respond_to :create_repository } it { should respond_to :remove_repository } it { gitolite.url_to_repo('diaspora').should == Gitlab.config.gitolite.ssh_path_prefix + "diaspora.git" } it "should call config update" do gitolite_config.should_receive(:update_project!) gitolite.update_repository(project.id) end end
require 'spec_helper' describe Asset do it "should have content" do a = Asset.create a.errors.messages.should == { :name => ["can't be blank"], :content_type => ["can't be blank"]} a = Asset.create(:content => Url.create!(:url => "http://example.com")) a.errors.messages.should == { :name => ["can't be blank"]} end it "should have a name" do a = Asset.create a.errors.messages.should == {:name => ["can't be blank"], :content_type => ["can't be blank"]} a = Asset.create(:name => "Testname") a.errors.messages.should == {:content_type => ["can't be blank"]} end it "should not fail without a user assigned" do a = Asset.create(:content => Url.create!(:url => "http://example.com"), :name => "Testname") a.save! end end add specs for classifications in Asset nothing changed, just more test coverage require 'spec_helper' describe Asset do it "should have content" do a = Asset.create a.errors.messages.should == { :name => ["can't be blank"], :content_type => ["can't be blank"]} a = Asset.create(:content => Url.create!(:url => "http://example.com")) a.errors.messages.should == { :name => ["can't be blank"]} end it "should have a name" do a = Asset.create a.errors.messages.should == {:name => ["can't be blank"], :content_type => ["can't be blank"]} a = Asset.create(:name => "Testname") a.errors.messages.should == {:content_type => ["can't be blank"]} end it "should not fail without a user assigned" do a = Asset.create(:content => Url.create!(:url => "http://example.com"), :name => "Testname") a.save! end context "with classifications" do before(:each) do a = Asset.create!(:content => Url.create!(:url => "http://example.com"), :name => "Testasset") a.classifications.should == [] c1 = Classification.create!(:asset => a) c2 = Classification.create!(:asset => a) @asset_id = a.id end it "should have a list of associated classifications" do a = Asset.find @asset_id a.classifications.should == Classification.all end it "should destroy classifications when object is destroyed" do c_all = Classification.all a = Asset.find @asset_id a.destroy c_all.should_not == Classification.all end end end
require 'rails_helper' RSpec.describe Drama, type: :model do before(:all) do DatabaseCleaner.strategy = :transaction end before(:each) do DatabaseCleaner.start end after(:each) do DatabaseCleaner.clean end let!(:drama) { FactoryGirl.create(:drama, poster_file_name: 'not_a_real_poster.png') } it "fetches a drama" do expect(Drama.fetch.count).to eq(1) end it "returns array of its rating weights" do expect(Drama.fetch.count).to eq(1) end end finished test for all ratings require 'rails_helper' RSpec.describe Drama, type: :model do before(:all) do DatabaseCleaner.strategy = :transaction end before(:each) do DatabaseCleaner.start end after(:each) do DatabaseCleaner.clean end let!(:drama) { FactoryGirl.create(:drama, poster_file_name: 'not_a_real_poster.png') } let!(:rating_one) { FactoryGirl.create(:rating, drama: drama) } let!(:rating_two) { FactoryGirl.create(:rating, drama: drama, weight: 1) } it "fetches a drama" do expect(Drama.fetch.count).to eq(1) end it "returns array of its rating weights" do expect(drama.all_ratings).to eq([5, 1]) end end
require File.dirname(__FILE__) + '/../spec_helper' describe Event do before(:each) do @event = Event.new end describe "in general" do it "should be valid" do event = Event.new(:title => "Event title", :start_time => Time.parse('2008.04.12')) event.should be_valid end it "should add a http:// prefix to urls without one" do event = Event.new(:title => "Event title", :start_time => Time.parse('2008.04.12'), :url => 'google.com') event.should be_valid end end describe "when checking time status" do fixtures :events it "should be old if event ended before today" do events(:old_event).should be_old end it "should be current if event is happening today" do events(:tomorrow).should be_current end it "should be ongoing if it began before today but ends today or later" do events(:ongoing_event).should be_ongoing end it "should be considered a multi-day event if it spans multiple days" do events(:ongoing_event).should be_multiday end it "should be considered a multi-day event if it crosses a day boundry and is longer than the minimum duration (#{Event::MIN_MULTIDAY_DURATION.inspect})" do Event.new(:start_time => Date.today - 1.second, :end_time => Date.today + Event::MIN_MULTIDAY_DURATION).should be_multiday end it "should not be considered a multi-day event if it crosses a day boundry, but is not longer than the minimum duration (#{Event::MIN_MULTIDAY_DURATION.inspect})" do Event.new(:start_time => Date.today - 1.second, :end_time => Date.today - 1.second + Event::MIN_MULTIDAY_DURATION).should_not be_multiday end end describe "dealing with tags" do before(:each) do @tags = "some, tags" @event.title = "Tagging Day" @event.start_time = Time.now end it "should be taggable" do Tag # need to reference Tag class in order to load it. @event.tag_list.should == "" end it "should tag itself if it is an extant record" do # On next line, please retain the space between the "?" and ")"; # it solves a fold issue in the SciTE text editor @event.stub!(:new_record? ).and_return(false) @event.should_receive(:tag_with).with(@tags).and_return(@event) @event.tag_list = @tags end it "should just cache tagging if it is a new record" do @event.should_not_receive(:save) @event.should_not_receive(:tag_with) @event.new_record?.should == true @event.tag_list = @tags @event.tag_list.should == @tags end it "should tag itself when saved for the first time if there are cached tags" do @event.new_record?.should == true @event.should_receive(:tag_with).with(@tags).and_return(@event) @event.tag_list = @tags @event.save end it "should use tags with punctuation" do tags = [".net", "foo-bar"] @event.tag_list = tags.join(", ") @event.save @event.reload @event.tags.map(&:name).should == tags end it "should not interpret numeric tags as IDs" do tag = "123" @event.tag_list = tag @event.save @event.reload @event.tags.first.name.should == "123" end end describe "when parsing" do before(:each) do @basic_hcal = read_sample('hcal_basic.xml') @basic_venue = mock_model(Venue, :title => 'Argent Hotel, San Francisco, CA') @basic_event = Event.new( :title => 'Web 2.0 Conference', :url => 'http://www.web2con.com/', :start_time => Time.parse('2007-10-05'), :end_time => nil, :venue => @basic_venue) end it "should parse an AbstractEvent into an Event" do event = Event.new(:title => "EventTitle", :description => "EventDescription", :start_time => Time.parse("2008-05-20"), :end_time => Time.parse("2008-05-22")) Event.should_receive(:new).and_return(event) abstract_event = SourceParser::AbstractEvent.new("EventTitle", "EventDescription", Time.parse("2008-05-20"), Time.parse("2008-05-22")) Event.from_abstract_event(abstract_event).should == event end it "should parse an Event into an hCalendar" do actual_hcal = @basic_event.to_hcal actual_hcal.should =~ Regexp.new(@basic_hcal.gsub(/\s+/, '\s+')) # Ignore spacing changes end it "should parse an Event into an iCalendar" do actual_ical = @basic_event.to_ical abstract_events = SourceParser.to_abstract_events(:content => actual_ical, :skip_old => false) abstract_events.size.should == 1 abstract_event = abstract_events.first abstract_event.title.should == @basic_event.title abstract_event.url.should == @basic_event.url # TODO implement venue generation #abstract_event.location.title.should == @basic_event.venue.title abstract_event.location.should be_nil end it "should parse an Event into an iCalendar without a URL and generate it" do generated_url = "http://foo.bar/" @basic_event.url = nil actual_ical = @basic_event.to_ical(:url_helper => lambda{|event| generated_url}) abstract_events = SourceParser.to_abstract_events(:content => actual_ical, :skip_old => false) abstract_events.size.should == 1 abstract_event = abstract_events.first abstract_event.title.should == @basic_event.title abstract_event.url.should == generated_url # TODO implement venue generation #abstract_event.location.title.should == @basic_event.venue.title abstract_event.location.should be_nil end end describe "when processing date" do before(:each) do @event = Event.new(:title => "MyEvent") end # TODO: write integration specs for the following 2 tests it "should find all events with duplicate titles" do Event.should_receive(:find_by_sql).with("SELECT DISTINCT a.* from events a, events b WHERE a.id <> b.id AND ( a.title = b.title ) ORDER BY a.title") Event.find_duplicates_by(:title) end it "should find all events with duplicate titles and urls" do Event.should_receive(:find_by_sql).with("SELECT DISTINCT a.* from events a, events b WHERE a.id <> b.id AND ( a.title = b.title AND a.url = b.url ) ORDER BY a.title,a.url") Event.find_duplicates_by([:title,:url]) end it "should fail to validate if end_time is earlier than start time " do @event.start_time = Time.now @event.end_time = @event.start_time - 2.hours @event.save.should be_false @event.should have(1).error_on(:end_time) end it "should fail to validate if start time is set to invalid value" do @event.start_time = "0/0/0" @event.should_not be_valid @event.should have(1).error_on(:start_time) end end describe "time_for" do before(:each) do @date = "2009-01-02" @time = "03:45" @date_time = "#{@date} #{@time}" @value = Time.parse(@date_time) end it "should return nil for a NilClass" do Event.time_for(nil).should be_nil end it "should return time for a String" do Event.time_for(@date_time).should == @value end it "should return time for an Array of Strings" do Event.time_for([@date, @time]).should == @value end it "should return time for a Time" do Event.time_for(@value).should == @value end it "should return exception for an invalid date expressed as a String" do Event.time_for("0/0/0").should be_a_kind_of(Exception) end it "should raise exception for an invalid type" do lambda { Event.time_for(Event) }.should raise_error(TypeError) end end describe "when finding by dates" do before(:all) do @today_midnight = Time.today @yesterday = @today_midnight.yesterday @tomorrow = @today_midnight.tomorrow @this_venue = Venue.create!(:title => "This venue") @started_before_today_and_ends_after_today = Event.create!( :title => "Event in progress", :start_time => @yesterday, :end_time => @tomorrow, :venue_id => @this_venue.id) @started_midnight_and_continuing_after = Event.create!( :title => "Midnight start", :start_time => @today_midnight, :end_time => @tomorrow, :venue_id => @this_venue.id) @started_and_ended_yesterday = Event.create!( :title => "Yesterday start", :start_time => @yesterday, :end_time => @yesterday.end_of_day, :venue_id => @this_venue.id) @started_today_and_no_end_time = Event.create!( :title => "nil end time", :start_time => @today_midnight, :end_time => nil, :venue_id => @this_venue.id) @starts_and_ends_tomorrow = Event.create!( :title => "starts and ends tomorrow", :start_time => @tomorrow, :end_time => @tomorrow.end_of_day, :venue_id => @this_venue.id) @starts_after_tomorrow = Event.create!( :title => "Starting after tomorrow", :start_time => @tomorrow + 1.day, :venue_id => @this_venue.id) @future_events_for_this_venue = @this_venue.find_future_events end describe "for overview" do # TODO: consider writing the following specs as view specs # either in addition to, or instead of, model specs before(:all) do @overview = Event.select_for_overview end describe "events today" do it "should include events that started before today and end after today" do @overview[:today].should include(@started_before_today_and_ends_after_today) end it "should include events that started earlier today" do @overview[:today].should include(@started_midnight_and_continuing_after) end it "should not include events that ended before today" do @overview[:today].should_not include(@started_and_ended_yesterday) end it "should not include events that start tomorrow" do @overview[:today].should_not include(@starts_and_ends_tomorrow) end end describe "events tomorrow" do it "should not include events that start after tomorrow" do @overview[:tomorrow].should_not include(@starts_after_tomorrow) end end describe "determining if we should show the more link" do it "should set :more? to true if there are events past the future cutoff" do Event.should_receive(:count).with(:conditions => ["start_time > ?", Time.today + 2.weeks]).and_return(10) Event.select_for_overview[:more?].should be_true end it "should set :more? to false if there are not events past the future cutoff" do Event.should_receive(:count).with(:conditions => ["start_time > ?", Time.today + 2.weeks]).and_return(0) Event.select_for_overview[:more?].should be_false end end end describe "for future events" do before(:all) do @future_events = Event.find_future_events end it "should include events that started earlier today" do @future_events.should include(@started_midnight_and_continuing_after) end it "should include events with no end time that started today" do @future_events.should include(@started_today_and_no_end_time) end it "should include events that started before today and ended after today" do events = Event.find_future_events("start_time") events.should include(@started_before_today_and_ends_after_today) end it "should include events with no end time that started today" do @future_events.should include(@started_today_and_no_end_time) end it "should not include events that ended before today" do @future_events.should_not include(@started_and_ended_yesterday) end end describe "for future events with venue" do before(:all) do @another_venue = Venue.create!(:title => "Another venue") @future_event_another_venue = Event.create!( :title => "Starting after tomorrow", :start_time => @tomorrow + 1.day, :venue_id => @another_venue.id) @future_event_no_venue = Event.create!( :title => "Starting after tomorrow", :start_time => @tomorrow + 1.day) end # TODO Consider moving these examples elsewhere because they don't appear to relate to this scope. This comment applies to the examples from here... it "should include events that started earlier today" do @future_events_for_this_venue.should include(@started_midnight_and_continuing_after) end it "should include events with no end time that started today" do @future_events_for_this_venue.should include(@started_today_and_no_end_time) end it "should include events that started before today and ended after today" do @future_events_for_this_venue.should include(@started_before_today_and_ends_after_today) end it "should not include events that ended before today" do @future_events_for_this_venue.should_not include(@started_and_ended_yesterday) end # TODO ...to here. it "should not include events for another venue" do @future_events_for_this_venue.should_not include(@future_event_another_venue) end it "should not include events with no venue" do @future_events_for_this_venue.should_not include(@future_event_no_venue) end end describe "for date range" do it "should include events that started earlier today" do events = Event.find_by_dates(@today_midnight, @tomorrow, order = "start_time") events.should include(@started_midnight_and_continuing_after) end it "should include events that started before today and end after today" do events = Event.find_by_dates(@today_midnight, @tomorrow, order = "start_time") events.should include(@started_before_today_and_ends_after_today) end it "should not include past events" do events = Event.find_by_dates(@today_midnight, @tomorrow, order = "start_time") events.should_not include(@started_and_ended_yesterday) end it "should exclude events that start after the end of the range" do events = Event.find_by_dates(@tomorrow, @tomorrow, order = "start_time") events.should_not include(@started_today_and_no_end_time) end end end describe "when searching" do it "should find events" do Event.should_receive(:find_with_solr).and_return([]) Event.search("myquery").should be_empty end it "should find events and group them" do current_event = mock_model(Event, :current? => true, :duplicate_of_id => nil) past_event = mock_model(Event, :current? => false, :duplicate_of_id => nil) Event.should_receive(:find_with_solr).and_return([current_event, past_event]) Event.search_grouped_by_currentness("myquery").should == { :current => [current_event], :past => [past_event], } end it "should find events and sort them by event name" do event_Z = Event.new(:title => "Zipadeedoodah", :start_time => (Time.now + 1.week)) event_A = Event.new(:title => "Antidisestablishmentarism", :start_time => (Time.now + 2.weeks)) event_O = Event.new(:title => "Ooooooo! Oooooooooooooo!", :start_time => (Time.now + 3.weeks)) event_o = Event.new(:title => "ommmmmmmmmmm...", :start_time => (Time.now + 4.weeks)) Event.should_receive(:find_with_solr).and_return([event_A, event_Z, event_O, event_o]) Event.search_grouped_by_currentness("myquery", :order => 'name').should == { :current => [event_A, event_o, event_O, event_Z], :past => [] } end end describe "when associating with venues" do fixtures :venues before(:each) do @venue = venues(:cubespace) end it "should not change a venue to a nil venue" do @event.associate_with_venue(nil).should be_nil end it "should associate a venue if one wasn't set before" do @event.associate_with_venue(@venue).should == @venue end it "should change an existing venue to a different one" do @event.venue = venues(:duplicate_venue) @event.associate_with_venue(@venue).should == @venue end it "should clear an existing venue if given a nil venue" do @event.venue = @venue @event.associate_with_venue(nil).should be_nil @event.venue.should be_nil end it "should associate venue by title" do Venue.should_receive(:find_or_initialize_by_title).and_return(@venue) @event.associate_with_venue(@venue.title).should == @venue end it "should associate venue by id" do @event.associate_with_venue(@venue.id).should == @venue end it "should raise an exception if there's a loop in the duplicates chain" do venue1 = stub_model(Venue, :id => 123) venue2 = stub_model(Venue, :id => 321, :duplicate_of => venue1) venue1.stub!(:duplicate_of => venue2) Venue.should_receive(:find).and_return do |key| case key when 123 then venue1 when 321 then venue2 else raise ArgumentError, "Unknown key: #{key.inspect}" end end lambda { @event.associate_with_venue(venue1.id) }.should raise_error(DuplicateCheckingError) end it "should raise an exception if associated with an unknown type" do lambda { @event.associate_with_venue(mock_model(SourceParser)) }.should raise_error(TypeError) end describe "and searching" do it "should find events and sort them by venue name" do event_A = Event.new(:title => "Zipadeedoodah", :start_time => (Time.now + 1.week)) event_o = Event.new(:title => "Antidisestablishmentarism", :start_time => (Time.now + 2.weeks)) event_O = Event.new(:title => "Ooooooo! Oooooooooooooo!", :start_time => (Time.now + 3.weeks)) event_Z = Event.new(:title => "ommmmmmmmmmm...", :start_time => (Time.now + 4.weeks)) event_A.venue = Venue.new(:title => "Acme Hotel") event_o.venue = Venue.new(:title => "opbmusic Studios") event_O.venue = Venue.new(:title => "Oz") event_Z.venue = Venue.new(:title => "Zippers and Things") Event.should_receive(:find_with_solr).and_return([event_A, event_Z, event_O, event_o]) Event.search_grouped_by_currentness("myquery", :order => 'venue').should == { :current => [event_A, event_o, event_O, event_Z], :past => [] } end end end describe "with finding duplicates" do it "should find all events with duplicate titles" do Event.should_receive(:find_by_sql).with("SELECT DISTINCT a.* from events a, events b WHERE a.id <> b.id AND ( a.title = b.title ) ORDER BY a.title") Event.find(:duplicates, :by => :title ) end it "should find all events with duplicate titles and urls" do Event.should_receive(:find_by_sql).with("SELECT DISTINCT a.* from events a, events b WHERE a.id <> b.id AND ( a.title = b.title AND a.url = b.url ) ORDER BY a.title,a.url") Event.find(:duplicates, :by => [:title,:url]) end it "should find all events that have not been marked as duplicate" do Event.should_receive(:find_without_duplicate_support).with(:all, {}) Event.find(:non_duplicates) end it "should find all events that have been marked as duplicate" do Event.should_receive(:find_without_duplicate_support).with(:all, {}) Event.find(:marked_duplicates) end end describe "with finding duplicates (integration test)" do fixtures :events before(:each) do @event = events(:calagator_codesprint) end # Find duplicates, create another event with the given attributes, and find duplicates again # TODO Refactor #find_duplicates_create_a_clone_and_find_again and its uses into something simpler, like #assert_duplicate_count. def find_duplicates_create_a_clone_and_find_again(find_duplicates_arguments, clone_attributes, create_class = Event) before_results = create_class.find(:duplicates, :by => find_duplicates_arguments) clone = create_class.create!(clone_attributes) after_results = Event.find(:duplicates, :by => find_duplicates_arguments) return [before_results.sort_by(&:created_at), after_results.sort_by(&:created_at)] end it "should find duplicate title by title" do pre, post = find_duplicates_create_a_clone_and_find_again(:title, {:title => @event.title, :start_time => @event.start_time} ) post.size.should == pre.size + 2 end it "should find duplicate title by any" do # TODO figure out why the #find_duplicates_create_a_clone_and_find_again isn't giving expected results and a workaround was needed. #pre, post = find_duplicates_create_a_clone_and_find_again(:any, {:title => @event.title, :start_time => @event.start_time} ) #post.size.should == pre.size + 2 dup_title = Event.create!({:title => @event.title, :start_time => @event.start_time + 1.minute}) Event.find(:duplicates, :by => :any).should include(dup_title) end it "should not find duplicate title by url" do pre, post = find_duplicates_create_a_clone_and_find_again(:url, {:title => @event.title, :start_time => @event.start_time} ) post.size.should == pre.size end it "should find complete duplicates by all" do pre, post = find_duplicates_create_a_clone_and_find_again(:all, @event.attributes) post.size.should == pre.size + 2 end it "should not find incomplete duplicates by all" do pre, post = find_duplicates_create_a_clone_and_find_again(:all, @event.attributes.merge(:title => "SpaceCube", :start_time => @event.start_time )) post.size.should == pre.size end it "should find duplicate for matching multiple fields" do pre, post = find_duplicates_create_a_clone_and_find_again([:title, :start_time], {:title => @event.title, :start_time => @event.start_time }) post.size.should == pre.size + 2 end it "should not find duplicates for mismatching multiple fields" do pre, post = find_duplicates_create_a_clone_and_find_again([:title, :start_time], {:title => "SpaceCube", :start_time => @event.start_time }) post.size.should == pre.size end end describe "when squashing duplicates (integration test)" do fixtures :events before(:each) do @event = events(:calagator_codesprint) end it "should consolidate associations, and merge tags" do @event.tag_list = "first, second" # master event contains one duplicate tag, and one unique tag clone = Event.create!(@event.attributes) clone.tag_list = "second, third" # duplicate event also contains one duplicate tag, and one unique tag clone.save! clone.reload clone.should_not be_duplicate Event.squash(:master => @event, :duplicates => clone) @event.tag_list.should == "first, second, third" # master now contains all three tags clone.duplicate_of.should == @event end end describe "when checking for squashing" do before(:all) do @today = Time.today @master = Event.create!(:title => "Master", :start_time => @today) @slave1 = Event.create!(:title => "1st slave", :start_time => @today, :duplicate_of_id => @master.id) @slave2 = Event.create!(:title => "2nd slave", :start_time => @today, :duplicate_of_id => @slave1.id) @orphan = Event.create!(:title => "orphan", :start_time => @today, :duplicate_of_id => 999999) end it "should recognize a master" do @master.should be_a_master end it "should recognize a slave" do @slave1.should be_a_slave end it "should not think that a slave is a master" do @slave2.should_not be_a_master end it "should not think that a master is a slave" do @master.should_not be_a_slave end it "should return the progenitor of a child" do @slave1.progenitor.should == @master end it "should return the progenitor of a grandchild" do @slave2.progenitor.should == @master end it "should return a master as its own progenitor" do @master.progenitor.should == @master end it "should return a marked duplicate as progenitor if it is orphaned" do @orphan.progenitor.should == @orphan end it "should return the progenitor if an imported event has an exact duplicate" do @abstract_event = SourceParser::AbstractEvent.new @abstract_event.title = @slave2.title @abstract_event.start_time = @slave2.start_time.to_s Event.from_abstract_event(@abstract_event).should == @master end end describe "when versioning" do it "should have versions" do Event.new.versions.should == [] end it "should create a new version after updating" do event = Event.create!(:title => "Event title", :start_time => Time.parse('2008.04.12')) event.versions.count.should == 1 event.title = "New Title" event.save! event.versions.count.should == 2 end end describe "when normalizing line-endings in the description" do before(:each) do @event = Event.new end it "should not molest contents without carriage-returns" do @event.description = "foo\nbar" @event.description.should == "foo\nbar" end it "should replace CRLF with LF" do @event.description = "foo\r\nbar" @event.description.should == "foo\nbar" end it "should replace stand-alone CR with LF" do @event.description = "foo\rbar" @event.description.should == "foo\nbar" end end describe "when cloning" do fixtures :events, :venues before(:each) do @original = events(:calagator_codesprint) @clone = @original.to_clone end it "should be a new record" do @clone.should be_a_new_record end it "should not have an id" do @clone.id.should be_nil end it "should not have start or end time" do @clone.start_time.should be_nil @clone.end_time.should be_nil end it "should duplicate title, description, venue, url and tag_list" do @clone.title.should == @original.title @clone.description.should == @original.description @clone.url.should == @original.url @clone.venue.should == @original.venue @clone.tag_list.should == @original.tag_list end end end Added specs for converting Events to iCalendar format. require File.dirname(__FILE__) + '/../spec_helper' describe Event do def valid_event_attributes { :start_time => Time.now, :title => "A newfangled event" } end before(:each) do @event = Event.new end describe "in general" do it "should be valid" do event = Event.new(:title => "Event title", :start_time => Time.parse('2008.04.12')) event.should be_valid end it "should add a http:// prefix to urls without one" do event = Event.new(:title => "Event title", :start_time => Time.parse('2008.04.12'), :url => 'google.com') event.should be_valid end end describe "when checking time status" do fixtures :events it "should be old if event ended before today" do events(:old_event).should be_old end it "should be current if event is happening today" do events(:tomorrow).should be_current end it "should be ongoing if it began before today but ends today or later" do events(:ongoing_event).should be_ongoing end it "should be considered a multi-day event if it spans multiple days" do events(:ongoing_event).should be_multiday end it "should be considered a multi-day event if it crosses a day boundry and is longer than the minimum duration (#{Event::MIN_MULTIDAY_DURATION.inspect})" do Event.new(:start_time => Date.today - 1.second, :end_time => Date.today + Event::MIN_MULTIDAY_DURATION).should be_multiday end it "should not be considered a multi-day event if it crosses a day boundry, but is not longer than the minimum duration (#{Event::MIN_MULTIDAY_DURATION.inspect})" do Event.new(:start_time => Date.today - 1.second, :end_time => Date.today - 1.second + Event::MIN_MULTIDAY_DURATION).should_not be_multiday end end describe "dealing with tags" do before(:each) do @tags = "some, tags" @event.title = "Tagging Day" @event.start_time = Time.now end it "should be taggable" do Tag # need to reference Tag class in order to load it. @event.tag_list.should == "" end it "should tag itself if it is an extant record" do # On next line, please retain the space between the "?" and ")"; # it solves a fold issue in the SciTE text editor @event.stub!(:new_record? ).and_return(false) @event.should_receive(:tag_with).with(@tags).and_return(@event) @event.tag_list = @tags end it "should just cache tagging if it is a new record" do @event.should_not_receive(:save) @event.should_not_receive(:tag_with) @event.new_record?.should == true @event.tag_list = @tags @event.tag_list.should == @tags end it "should tag itself when saved for the first time if there are cached tags" do @event.new_record?.should == true @event.should_receive(:tag_with).with(@tags).and_return(@event) @event.tag_list = @tags @event.save end it "should use tags with punctuation" do tags = [".net", "foo-bar"] @event.tag_list = tags.join(", ") @event.save @event.reload @event.tags.map(&:name).should == tags end it "should not interpret numeric tags as IDs" do tag = "123" @event.tag_list = tag @event.save @event.reload @event.tags.first.name.should == "123" end end describe "when parsing" do before(:each) do @basic_hcal = read_sample('hcal_basic.xml') @basic_venue = mock_model(Venue, :title => 'Argent Hotel, San Francisco, CA') @basic_event = Event.new( :title => 'Web 2.0 Conference', :url => 'http://www.web2con.com/', :start_time => Time.parse('2007-10-05'), :end_time => nil, :venue => @basic_venue) end it "should parse an AbstractEvent into an Event" do event = Event.new(:title => "EventTitle", :description => "EventDescription", :start_time => Time.parse("2008-05-20"), :end_time => Time.parse("2008-05-22")) Event.should_receive(:new).and_return(event) abstract_event = SourceParser::AbstractEvent.new("EventTitle", "EventDescription", Time.parse("2008-05-20"), Time.parse("2008-05-22")) Event.from_abstract_event(abstract_event).should == event end it "should parse an Event into an hCalendar" do actual_hcal = @basic_event.to_hcal actual_hcal.should =~ Regexp.new(@basic_hcal.gsub(/\s+/, '\s+')) # Ignore spacing changes end it "should parse an Event into an iCalendar" do actual_ical = @basic_event.to_ical abstract_events = SourceParser.to_abstract_events(:content => actual_ical, :skip_old => false) abstract_events.size.should == 1 abstract_event = abstract_events.first abstract_event.title.should == @basic_event.title abstract_event.url.should == @basic_event.url # TODO implement venue generation #abstract_event.location.title.should == @basic_event.venue.title abstract_event.location.should be_nil end it "should parse an Event into an iCalendar without a URL and generate it" do generated_url = "http://foo.bar/" @basic_event.url = nil actual_ical = @basic_event.to_ical(:url_helper => lambda{|event| generated_url}) abstract_events = SourceParser.to_abstract_events(:content => actual_ical, :skip_old => false) abstract_events.size.should == 1 abstract_event = abstract_events.first abstract_event.title.should == @basic_event.title abstract_event.url.should == generated_url # TODO implement venue generation #abstract_event.location.title.should == @basic_event.venue.title abstract_event.location.should be_nil end end describe "when processing date" do before(:each) do @event = Event.new(:title => "MyEvent") end # TODO: write integration specs for the following 2 tests it "should find all events with duplicate titles" do Event.should_receive(:find_by_sql).with("SELECT DISTINCT a.* from events a, events b WHERE a.id <> b.id AND ( a.title = b.title ) ORDER BY a.title") Event.find_duplicates_by(:title) end it "should find all events with duplicate titles and urls" do Event.should_receive(:find_by_sql).with("SELECT DISTINCT a.* from events a, events b WHERE a.id <> b.id AND ( a.title = b.title AND a.url = b.url ) ORDER BY a.title,a.url") Event.find_duplicates_by([:title,:url]) end it "should fail to validate if end_time is earlier than start time " do @event.start_time = Time.now @event.end_time = @event.start_time - 2.hours @event.save.should be_false @event.should have(1).error_on(:end_time) end it "should fail to validate if start time is set to invalid value" do @event.start_time = "0/0/0" @event.should_not be_valid @event.should have(1).error_on(:start_time) end end describe "time_for" do before(:each) do @date = "2009-01-02" @time = "03:45" @date_time = "#{@date} #{@time}" @value = Time.parse(@date_time) end it "should return nil for a NilClass" do Event.time_for(nil).should be_nil end it "should return time for a String" do Event.time_for(@date_time).should == @value end it "should return time for an Array of Strings" do Event.time_for([@date, @time]).should == @value end it "should return time for a Time" do Event.time_for(@value).should == @value end it "should return exception for an invalid date expressed as a String" do Event.time_for("0/0/0").should be_a_kind_of(Exception) end it "should raise exception for an invalid type" do lambda { Event.time_for(Event) }.should raise_error(TypeError) end end describe "when finding by dates" do before(:all) do @today_midnight = Time.today @yesterday = @today_midnight.yesterday @tomorrow = @today_midnight.tomorrow @this_venue = Venue.create!(:title => "This venue") @started_before_today_and_ends_after_today = Event.create!( :title => "Event in progress", :start_time => @yesterday, :end_time => @tomorrow, :venue_id => @this_venue.id) @started_midnight_and_continuing_after = Event.create!( :title => "Midnight start", :start_time => @today_midnight, :end_time => @tomorrow, :venue_id => @this_venue.id) @started_and_ended_yesterday = Event.create!( :title => "Yesterday start", :start_time => @yesterday, :end_time => @yesterday.end_of_day, :venue_id => @this_venue.id) @started_today_and_no_end_time = Event.create!( :title => "nil end time", :start_time => @today_midnight, :end_time => nil, :venue_id => @this_venue.id) @starts_and_ends_tomorrow = Event.create!( :title => "starts and ends tomorrow", :start_time => @tomorrow, :end_time => @tomorrow.end_of_day, :venue_id => @this_venue.id) @starts_after_tomorrow = Event.create!( :title => "Starting after tomorrow", :start_time => @tomorrow + 1.day, :venue_id => @this_venue.id) @future_events_for_this_venue = @this_venue.find_future_events end describe "for overview" do # TODO: consider writing the following specs as view specs # either in addition to, or instead of, model specs before(:all) do @overview = Event.select_for_overview end describe "events today" do it "should include events that started before today and end after today" do @overview[:today].should include(@started_before_today_and_ends_after_today) end it "should include events that started earlier today" do @overview[:today].should include(@started_midnight_and_continuing_after) end it "should not include events that ended before today" do @overview[:today].should_not include(@started_and_ended_yesterday) end it "should not include events that start tomorrow" do @overview[:today].should_not include(@starts_and_ends_tomorrow) end end describe "events tomorrow" do it "should not include events that start after tomorrow" do @overview[:tomorrow].should_not include(@starts_after_tomorrow) end end describe "determining if we should show the more link" do it "should set :more? to true if there are events past the future cutoff" do Event.should_receive(:count).with(:conditions => ["start_time > ?", Time.today + 2.weeks]).and_return(10) Event.select_for_overview[:more?].should be_true end it "should set :more? to false if there are not events past the future cutoff" do Event.should_receive(:count).with(:conditions => ["start_time > ?", Time.today + 2.weeks]).and_return(0) Event.select_for_overview[:more?].should be_false end end end describe "for future events" do before(:all) do @future_events = Event.find_future_events end it "should include events that started earlier today" do @future_events.should include(@started_midnight_and_continuing_after) end it "should include events with no end time that started today" do @future_events.should include(@started_today_and_no_end_time) end it "should include events that started before today and ended after today" do events = Event.find_future_events("start_time") events.should include(@started_before_today_and_ends_after_today) end it "should include events with no end time that started today" do @future_events.should include(@started_today_and_no_end_time) end it "should not include events that ended before today" do @future_events.should_not include(@started_and_ended_yesterday) end end describe "for future events with venue" do before(:all) do @another_venue = Venue.create!(:title => "Another venue") @future_event_another_venue = Event.create!( :title => "Starting after tomorrow", :start_time => @tomorrow + 1.day, :venue_id => @another_venue.id) @future_event_no_venue = Event.create!( :title => "Starting after tomorrow", :start_time => @tomorrow + 1.day) end # TODO Consider moving these examples elsewhere because they don't appear to relate to this scope. This comment applies to the examples from here... it "should include events that started earlier today" do @future_events_for_this_venue.should include(@started_midnight_and_continuing_after) end it "should include events with no end time that started today" do @future_events_for_this_venue.should include(@started_today_and_no_end_time) end it "should include events that started before today and ended after today" do @future_events_for_this_venue.should include(@started_before_today_and_ends_after_today) end it "should not include events that ended before today" do @future_events_for_this_venue.should_not include(@started_and_ended_yesterday) end # TODO ...to here. it "should not include events for another venue" do @future_events_for_this_venue.should_not include(@future_event_another_venue) end it "should not include events with no venue" do @future_events_for_this_venue.should_not include(@future_event_no_venue) end end describe "for date range" do it "should include events that started earlier today" do events = Event.find_by_dates(@today_midnight, @tomorrow, order = "start_time") events.should include(@started_midnight_and_continuing_after) end it "should include events that started before today and end after today" do events = Event.find_by_dates(@today_midnight, @tomorrow, order = "start_time") events.should include(@started_before_today_and_ends_after_today) end it "should not include past events" do events = Event.find_by_dates(@today_midnight, @tomorrow, order = "start_time") events.should_not include(@started_and_ended_yesterday) end it "should exclude events that start after the end of the range" do events = Event.find_by_dates(@tomorrow, @tomorrow, order = "start_time") events.should_not include(@started_today_and_no_end_time) end end end describe "when searching" do it "should find events" do Event.should_receive(:find_with_solr).and_return([]) Event.search("myquery").should be_empty end it "should find events and group them" do current_event = mock_model(Event, :current? => true, :duplicate_of_id => nil) past_event = mock_model(Event, :current? => false, :duplicate_of_id => nil) Event.should_receive(:find_with_solr).and_return([current_event, past_event]) Event.search_grouped_by_currentness("myquery").should == { :current => [current_event], :past => [past_event], } end it "should find events and sort them by event name" do event_Z = Event.new(:title => "Zipadeedoodah", :start_time => (Time.now + 1.week)) event_A = Event.new(:title => "Antidisestablishmentarism", :start_time => (Time.now + 2.weeks)) event_O = Event.new(:title => "Ooooooo! Oooooooooooooo!", :start_time => (Time.now + 3.weeks)) event_o = Event.new(:title => "ommmmmmmmmmm...", :start_time => (Time.now + 4.weeks)) Event.should_receive(:find_with_solr).and_return([event_A, event_Z, event_O, event_o]) Event.search_grouped_by_currentness("myquery", :order => 'name').should == { :current => [event_A, event_o, event_O, event_Z], :past => [] } end end describe "when associating with venues" do fixtures :venues before(:each) do @venue = venues(:cubespace) end it "should not change a venue to a nil venue" do @event.associate_with_venue(nil).should be_nil end it "should associate a venue if one wasn't set before" do @event.associate_with_venue(@venue).should == @venue end it "should change an existing venue to a different one" do @event.venue = venues(:duplicate_venue) @event.associate_with_venue(@venue).should == @venue end it "should clear an existing venue if given a nil venue" do @event.venue = @venue @event.associate_with_venue(nil).should be_nil @event.venue.should be_nil end it "should associate venue by title" do Venue.should_receive(:find_or_initialize_by_title).and_return(@venue) @event.associate_with_venue(@venue.title).should == @venue end it "should associate venue by id" do @event.associate_with_venue(@venue.id).should == @venue end it "should raise an exception if there's a loop in the duplicates chain" do venue1 = stub_model(Venue, :id => 123) venue2 = stub_model(Venue, :id => 321, :duplicate_of => venue1) venue1.stub!(:duplicate_of => venue2) Venue.should_receive(:find).and_return do |key| case key when 123 then venue1 when 321 then venue2 else raise ArgumentError, "Unknown key: #{key.inspect}" end end lambda { @event.associate_with_venue(venue1.id) }.should raise_error(DuplicateCheckingError) end it "should raise an exception if associated with an unknown type" do lambda { @event.associate_with_venue(mock_model(SourceParser)) }.should raise_error(TypeError) end describe "and searching" do it "should find events and sort them by venue name" do event_A = Event.new(:title => "Zipadeedoodah", :start_time => (Time.now + 1.week)) event_o = Event.new(:title => "Antidisestablishmentarism", :start_time => (Time.now + 2.weeks)) event_O = Event.new(:title => "Ooooooo! Oooooooooooooo!", :start_time => (Time.now + 3.weeks)) event_Z = Event.new(:title => "ommmmmmmmmmm...", :start_time => (Time.now + 4.weeks)) event_A.venue = Venue.new(:title => "Acme Hotel") event_o.venue = Venue.new(:title => "opbmusic Studios") event_O.venue = Venue.new(:title => "Oz") event_Z.venue = Venue.new(:title => "Zippers and Things") Event.should_receive(:find_with_solr).and_return([event_A, event_Z, event_O, event_o]) Event.search_grouped_by_currentness("myquery", :order => 'venue').should == { :current => [event_A, event_o, event_O, event_Z], :past => [] } end end end describe "with finding duplicates" do it "should find all events with duplicate titles" do Event.should_receive(:find_by_sql).with("SELECT DISTINCT a.* from events a, events b WHERE a.id <> b.id AND ( a.title = b.title ) ORDER BY a.title") Event.find(:duplicates, :by => :title ) end it "should find all events with duplicate titles and urls" do Event.should_receive(:find_by_sql).with("SELECT DISTINCT a.* from events a, events b WHERE a.id <> b.id AND ( a.title = b.title AND a.url = b.url ) ORDER BY a.title,a.url") Event.find(:duplicates, :by => [:title,:url]) end it "should find all events that have not been marked as duplicate" do Event.should_receive(:find_without_duplicate_support).with(:all, {}) Event.find(:non_duplicates) end it "should find all events that have been marked as duplicate" do Event.should_receive(:find_without_duplicate_support).with(:all, {}) Event.find(:marked_duplicates) end end describe "with finding duplicates (integration test)" do fixtures :events before(:each) do @event = events(:calagator_codesprint) end # Find duplicates, create another event with the given attributes, and find duplicates again # TODO Refactor #find_duplicates_create_a_clone_and_find_again and its uses into something simpler, like #assert_duplicate_count. def find_duplicates_create_a_clone_and_find_again(find_duplicates_arguments, clone_attributes, create_class = Event) before_results = create_class.find(:duplicates, :by => find_duplicates_arguments) clone = create_class.create!(clone_attributes) after_results = Event.find(:duplicates, :by => find_duplicates_arguments) return [before_results.sort_by(&:created_at), after_results.sort_by(&:created_at)] end it "should find duplicate title by title" do pre, post = find_duplicates_create_a_clone_and_find_again(:title, {:title => @event.title, :start_time => @event.start_time} ) post.size.should == pre.size + 2 end it "should find duplicate title by any" do # TODO figure out why the #find_duplicates_create_a_clone_and_find_again isn't giving expected results and a workaround was needed. #pre, post = find_duplicates_create_a_clone_and_find_again(:any, {:title => @event.title, :start_time => @event.start_time} ) #post.size.should == pre.size + 2 dup_title = Event.create!({:title => @event.title, :start_time => @event.start_time + 1.minute}) Event.find(:duplicates, :by => :any).should include(dup_title) end it "should not find duplicate title by url" do pre, post = find_duplicates_create_a_clone_and_find_again(:url, {:title => @event.title, :start_time => @event.start_time} ) post.size.should == pre.size end it "should find complete duplicates by all" do pre, post = find_duplicates_create_a_clone_and_find_again(:all, @event.attributes) post.size.should == pre.size + 2 end it "should not find incomplete duplicates by all" do pre, post = find_duplicates_create_a_clone_and_find_again(:all, @event.attributes.merge(:title => "SpaceCube", :start_time => @event.start_time )) post.size.should == pre.size end it "should find duplicate for matching multiple fields" do pre, post = find_duplicates_create_a_clone_and_find_again([:title, :start_time], {:title => @event.title, :start_time => @event.start_time }) post.size.should == pre.size + 2 end it "should not find duplicates for mismatching multiple fields" do pre, post = find_duplicates_create_a_clone_and_find_again([:title, :start_time], {:title => "SpaceCube", :start_time => @event.start_time }) post.size.should == pre.size end end describe "when squashing duplicates (integration test)" do fixtures :events before(:each) do @event = events(:calagator_codesprint) end it "should consolidate associations, and merge tags" do @event.tag_list = "first, second" # master event contains one duplicate tag, and one unique tag clone = Event.create!(@event.attributes) clone.tag_list = "second, third" # duplicate event also contains one duplicate tag, and one unique tag clone.save! clone.reload clone.should_not be_duplicate Event.squash(:master => @event, :duplicates => clone) @event.tag_list.should == "first, second, third" # master now contains all three tags clone.duplicate_of.should == @event end end describe "when checking for squashing" do before(:all) do @today = Time.today @master = Event.create!(:title => "Master", :start_time => @today) @slave1 = Event.create!(:title => "1st slave", :start_time => @today, :duplicate_of_id => @master.id) @slave2 = Event.create!(:title => "2nd slave", :start_time => @today, :duplicate_of_id => @slave1.id) @orphan = Event.create!(:title => "orphan", :start_time => @today, :duplicate_of_id => 999999) end it "should recognize a master" do @master.should be_a_master end it "should recognize a slave" do @slave1.should be_a_slave end it "should not think that a slave is a master" do @slave2.should_not be_a_master end it "should not think that a master is a slave" do @master.should_not be_a_slave end it "should return the progenitor of a child" do @slave1.progenitor.should == @master end it "should return the progenitor of a grandchild" do @slave2.progenitor.should == @master end it "should return a master as its own progenitor" do @master.progenitor.should == @master end it "should return a marked duplicate as progenitor if it is orphaned" do @orphan.progenitor.should == @orphan end it "should return the progenitor if an imported event has an exact duplicate" do @abstract_event = SourceParser::AbstractEvent.new @abstract_event.title = @slave2.title @abstract_event.start_time = @slave2.start_time.to_s Event.from_abstract_event(@abstract_event).should == @master end end describe "when versioning" do it "should have versions" do Event.new.versions.should == [] end it "should create a new version after updating" do event = Event.create!(:title => "Event title", :start_time => Time.parse('2008.04.12')) event.versions.count.should == 1 event.title = "New Title" event.save! event.versions.count.should == 2 end end describe "when normalizing line-endings in the description" do before(:each) do @event = Event.new end it "should not molest contents without carriage-returns" do @event.description = "foo\nbar" @event.description.should == "foo\nbar" end it "should replace CRLF with LF" do @event.description = "foo\r\nbar" @event.description.should == "foo\nbar" end it "should replace stand-alone CR with LF" do @event.description = "foo\rbar" @event.description.should == "foo\nbar" end end describe "when cloning" do fixtures :events, :venues before(:each) do @original = events(:calagator_codesprint) @clone = @original.to_clone end it "should be a new record" do @clone.should be_a_new_record end it "should not have an id" do @clone.id.should be_nil end it "should not have start or end time" do @clone.start_time.should be_nil @clone.end_time.should be_nil end it "should duplicate title, description, venue, url and tag_list" do @clone.title.should == @original.title @clone.description.should == @original.description @clone.url.should == @original.url @clone.venue.should == @original.venue @clone.tag_list.should == @original.tag_list end end describe "when converting to iCal" do fixtures :events def ical_roundtrip(events, opts = {}) parsed_events = Vpim::Icalendar.decode( Event.to_ical(events, opts) ).first.events if events.is_a?(Event) parsed_events.first else parsed_events end end it "should produce parsable iCal output" do lambda { ical_roundtrip( events(:tomorrow) ) }.should_not raise_error end it "should represent an event without an end time as a 1-hour block" do ical_roundtrip( events(:tomorrow) ).duration.should == 1.hours end it "should set the appropriate end time if one is given" do event = Event.new(valid_event_attributes) event.end_time = event.start_time + 2.hours ical_roundtrip( event ).duration.should == 2.hours end { :summary => :title, :created => :created_at, :lastmod => :updated_at, :description => :description, :url => :url, :dtstart => :start_time, :dtstamp => :created_at }.each do |ical_attribute, model_attribute| it "should map the Event's #{model_attribute} attribute to '#{ical_attribute}' in the iCalendar output" do events(:tomorrow).send(model_attribute).should == ical_roundtrip( events(:tomorrow) ).send(ical_attribute) end end it "should call the URL helper to generate a UID" do ical_roundtrip( Event.new(valid_event_attributes), :url_helper => lambda {|e| "UID'D!" }).uid.should == "UID'D!" end it "should strip HTML from the description" do ical_roundtrip( Event.new(valid_event_attributes.merge( :description => "<blink>OMFG HTML IS TEH AWESOME</blink>") ) ).description.should_not include "<blink>" end it "should include tags in the description" do event = events(:tomorrow) event.tag_list = "tags, folksonomy, categorization" ical_roundtrip(event).description.should include event.tag_list end it "should use the event's URL on Calagator if no URL is provided (and a url helper is given)" do ical_roundtrip( Event.create( valid_event_attributes ), :url_helper => lambda{|e| "FAKE"} ).url.should == "FAKE" end it "should create multi-day entries for multi-day events" do event = Event.create( valid_event_attributes.merge(:end_time => valid_event_attributes[:start_time] + 4.days) ) parsed_event = ical_roundtrip( event ) # UTC is used here because we're currently outputting _all_ iCalendar times as UTC. # We really need to make it so that isn't happening. # # FIXME: Time zone data should be included in iCalendar output. Really. start_time = Time.today.utc + Time.today.gmtoff parsed_event.dtstart.should == start_time parsed_event.dtend.should == start_time + 5.days end end end
require 'spec_helper' describe Image do it { should have_db_column(:album_id).of_type(:integer) } it { should have_db_column(:asset).of_type(:string) } it { should have_db_column(:title).of_type(:string) } it { should have_db_column(:title_ua).of_type(:string) } it { should have_db_column(:desc).of_type(:text) } it { should have_db_column(:desc_ua).of_type(:text) } it { should have_db_column(:place).of_type(:string) } it { should have_db_column(:place_ua).of_type(:string) } it { should have_db_column(:date).of_type(:date) } it { should have_db_column(:published_at).of_type(:datetime) } it { should have_db_column(:tags_cache).of_type(:string) } it { should have_db_column(:flickr_photo_id).of_type(:string).with_options(limit: 11) } it { should have_db_column(:deviantart_link).of_type(:string) } it { should have_db_column(:istockphoto_link).of_type(:string) } it { should have_db_column(:shutterstock_link).of_type(:string) } it { should have_db_column(:flickr_comment_time).of_type(:integer).with_options(default: 0) } it { should have_db_column(:is_for_sale).of_type(:boolean).with_options(default: false) } it { should have_db_column(:image_width).of_type(:integer) } it { should have_db_column(:image_height).of_type(:integer) } it { should have_db_index(:album_id) } it { should have_db_index(:published_at) } it { should validate_presence_of(:album) } it { should validate_presence_of(:title) } it { should validate_numericality_of(:flickr_photo_id) } describe 'generators' do before :each do @image = FactoryGirl.create(:image) end it 'should be valid' do expect(@image).to be_valid end end describe 'scopes' do describe '.published' do pending end describe '.from_published_album' do pending end end describe 'instance methods' do before :each do @image = FactoryGirl.build(:image) end it 'should return to_param' do expect(@image.to_param).to eq("#{@image.id}-#{@image.title.parameterize}") end it 'should have published_at_checkbox' do expect(@image.published_at_checkbox).to eq @image.published_at.present? end describe 'published_at' do describe 'should be updated' do before :each do @image.published_at = nil end it 'when value is nil' do @image.published_at_checkbox = '1' expect(@image.published_at).to_not eq nil end end describe 'should be preserved' do it 'when value is not nil initially' do published_at_cached = @image.published_at @image.published_at_checkbox = '1' expect(@image.published_at).to eq published_at_cached end end it 'should set published_at to nil' do @image.published_at_checkbox = '0' expect(@image.published_at).to eq nil end end describe 'tags_resolved' do before do @image = FactoryGirl.create(:image) @image.tags_resolved = 'apple, banana, cucumber' end it 'should return well-formatted tags' do expect(@image.reload.tags_resolved).to eq 'apple, banana, cucumber' expect(Tag.count).to eq 3 expect(ImageTag.count).to eq 3 end end end end added pending image scope specs require 'spec_helper' describe Image do it { should have_db_column(:album_id).of_type(:integer) } it { should have_db_column(:asset).of_type(:string) } it { should have_db_column(:title).of_type(:string) } it { should have_db_column(:title_ua).of_type(:string) } it { should have_db_column(:desc).of_type(:text) } it { should have_db_column(:desc_ua).of_type(:text) } it { should have_db_column(:place).of_type(:string) } it { should have_db_column(:place_ua).of_type(:string) } it { should have_db_column(:date).of_type(:date) } it { should have_db_column(:published_at).of_type(:datetime) } it { should have_db_column(:tags_cache).of_type(:string) } it { should have_db_column(:flickr_photo_id).of_type(:string).with_options(limit: 11) } it { should have_db_column(:deviantart_link).of_type(:string) } it { should have_db_column(:istockphoto_link).of_type(:string) } it { should have_db_column(:shutterstock_link).of_type(:string) } it { should have_db_column(:flickr_comment_time).of_type(:integer).with_options(default: 0) } it { should have_db_column(:is_for_sale).of_type(:boolean).with_options(default: false) } it { should have_db_column(:image_width).of_type(:integer) } it { should have_db_column(:image_height).of_type(:integer) } it { should have_db_index(:album_id) } it { should have_db_index(:published_at) } it { should validate_presence_of(:album) } it { should validate_presence_of(:title) } it { should validate_numericality_of(:flickr_photo_id) } describe 'generators' do before :each do @image = FactoryGirl.create(:image) end it 'should be valid' do expect(@image).to be_valid end end describe 'scopes' do describe '.published' do before do @image_1 = FactoryGirl.create(:image, published_at: 1.minute.ago) @image_2 = FactoryGirl.create(:image, published_at: 1.minutes.ago) image_3 = FactoryGirl.create(:image, published_at: nil) end it 'returns published images' do expect(Image.published.map(&:id)).to match_array([@image_1.id, @image_2.id]) end end describe '.from_published_albums' do before do album_1 = FactoryGirl.create(:album, is_published: true, is_upload_to_stock: true) @image_1 = FactoryGirl.create(:image, album: album_1) album_2 = FactoryGirl.create(:album, is_published: true, is_upload_to_stock: true) @image_2 = FactoryGirl.create(:image, album: album_2) album_3 = FactoryGirl.create(:album, is_published: true, is_upload_to_stock: false) image_3 = FactoryGirl.create(:image, album: album_3) album_4 = FactoryGirl.create(:album, is_published: false, is_upload_to_stock: true) image_4 = FactoryGirl.create(:image, album: album_4) end it 'returns images from published Albums' do expect(Image.from_published_albums.map(&:id)).to match_array([@image_1.id, @image_2.id]) end end end describe 'instance methods' do before :each do @image = FactoryGirl.build(:image) end it 'should return to_param' do expect(@image.to_param).to eq("#{@image.id}-#{@image.title.parameterize}") end it 'should have published_at_checkbox' do expect(@image.published_at_checkbox).to eq @image.published_at.present? end describe 'published_at' do describe 'should be updated' do before :each do @image.published_at = nil end it 'when value is nil' do @image.published_at_checkbox = '1' expect(@image.published_at).to_not eq nil end end describe 'should be preserved' do it 'when value is not nil initially' do published_at_cached = @image.published_at @image.published_at_checkbox = '1' expect(@image.published_at).to eq published_at_cached end end it 'should set published_at to nil' do @image.published_at_checkbox = '0' expect(@image.published_at).to eq nil end end describe 'tags_resolved' do before do @image = FactoryGirl.create(:image) @image.tags_resolved = 'apple, banana, cucumber' end it 'should return well-formatted tags' do expect(@image.reload.tags_resolved).to eq 'apple, banana, cucumber' expect(Tag.count).to eq 3 expect(ImageTag.count).to eq 3 end end end end
require 'spec_helper' describe Image do let(:valid_attributes) do extend ActionDispatch::TestProcess { :user_id => 1, :file => fixture_file_upload('chicken_rice.jpg') } end it 'will have a key' do image = Image.create! valid_attributes Image.all.should have(1).item Image.first.key.should match(/\S{6}/) end end describe 'never show recent uploads from non-featured profiles' do let(:featured_valid_attributes) do extend ActionDispatch::TestProcess { :user_id => 1, :file => fixture_file_upload('chicken_rice.jpg') } end let(:valid_attributes) do extend ActionDispatch::TestProcess { :user_id => 2, :file => fixture_file_upload('chicken_rice.jpg') } end it 'will never get recent images from non-featured profiles' do show_on_homepage = Image.create! featured_valid_attributes cannot_show_on_homepage = Image.create! valid_attributes featured_profile = create(:user) # User ID 1. regular_profile = create(:user) # User ID 2. featured_profile.featured = true featured_profile.save Image.recently_uploaded.should eq([show_on_homepage]) end end Testing behaviour of public images in this block require 'spec_helper' describe Image do let(:valid_attributes) do extend ActionDispatch::TestProcess { :user_id => 1, :file => fixture_file_upload('chicken_rice.jpg') } end it 'will have a key' do image = Image.create! valid_attributes Image.all.should have(1).item Image.first.key.should match(/\S{6}/) end end describe 'never show recent uploads from non-featured profiles' do let(:featured_valid_attributes) do extend ActionDispatch::TestProcess { :user_id => 1, :file => fixture_file_upload('chicken_rice.jpg'), :public => true } end let(:valid_attributes) do extend ActionDispatch::TestProcess { :user_id => 2, :file => fixture_file_upload('chicken_rice.jpg'), :public => true } end it 'will never get recent images from non-featured profiles' do show_on_homepage = Image.create! featured_valid_attributes cannot_show_on_homepage = Image.create! valid_attributes featured_profile = create(:user) # User ID 1. regular_profile = create(:user) # User ID 2. featured_profile.featured = true featured_profile.save Image.recently_uploaded.should eq([show_on_homepage]) end end
require 'rails_helper' RSpec.describe Paper, type: :model do pending "add some examples to (or delete) #{__FILE__}" it "Should not validate without title" do @paper = Paper.new(venue: "mind 49: 433-460", year: 1950) expect(@paper).to_not be_valid end end Failing test for #31 require 'rails_helper' RSpec.describe Paper, type: :model do pending "add some examples to (or delete) #{__FILE__}" it "Should not validate without title" do @paper = Paper.new(venue: "mind 49: 433-460", year: 1950) expect(@paper).to_not be_valid end it "Should not validate without venue" do @paper = Paper.new(title: "Add interesting title here", year: 1950) expect(@paper).to_not be_valid end it "Should not validate without year" do @paper = Paper.new(title: "Add interesting title here", venue: "mind 49: 433-460") expect(@paper).to_not be_valid end end
RSpec.describe Reply do describe "#has_icons?" do let(:user) { create(:user) } context "without character" do let(:reply) { create(:reply, user: user) } it "is true with avatar" do icon = create(:icon, user: user) user.update!(avatar: icon) user.reload expect(reply.character).to be_nil expect(reply.has_icons?).to eq(true) end it "is false without avatar" do expect(reply.character).to be_nil expect(reply.has_icons?).not_to eq(true) end end context "with character" do let(:character) { create(:character, user: user) } let(:reply) { create(:reply, user: user, character: character) } it "is true with default icon" do icon = create(:icon, user: user) character.update!(default_icon: icon) expect(reply.has_icons?).to eq(true) end it "is false without galleries" do expect(reply.has_icons?).not_to eq(true) end it "is true with icons in galleries" do gallery = create(:gallery, user: user) gallery.icons << create(:icon, user: user) character.galleries << gallery expect(reply.has_icons?).to eq(true) end it "is false without icons in galleries" do character.galleries << create(:gallery, user: user) expect(reply.has_icons?).not_to eq(true) end end end describe "#notify_other_authors" do before(:each) do ResqueSpec.reset! end it "does nothing if skip_notify is set" do notified_user = create(:user, email_notifications: true) post = create(:post, user: notified_user) create(:reply, post: post, skip_notify: true) expect(UserMailer).to have_queue_size_of(0) end it "does nothing if the previous reply was yours" do notified_user = create(:user, email_notifications: true) post = create(:post, user: notified_user) reply = create(:reply, post: post, skip_notify: true) create(:reply, post: post, user: reply.user) expect(UserMailer).to have_queue_size_of(0) end it "does nothing if the post was yours on the first reply" do notified_user = create(:user, email_notifications: true) post = create(:post, user: notified_user) create(:reply, post: post, user: notified_user) expect(UserMailer).to have_queue_size_of(0) end it "sends to all other active authors if previous reply wasn't yours" do post = create(:post) expect(post.user.email_notifications).not_to eq(true) user = create(:user) user.update_columns(email: nil) # rubocop:disable Rails/SkipsModelValidations create(:reply, user: user, post: post, skip_notify: true) notified_user = create(:user, email_notifications: true) create(:reply, user: notified_user, post: post, skip_notify: true) another_notified_user = create(:user, email_notifications: true) create(:reply, user: another_notified_user, post: post, skip_notify: true) # skips users who have the post set as ignored for tags owed purposes (or who can't tag) a_user_who_doesnt_owe = create(:user, email_notifications: true) create(:reply, user: a_user_who_doesnt_owe, post: post, skip_notify: true) post.opt_out_of_owed(a_user_who_doesnt_owe) reply = create(:reply, post: post) expect(UserMailer).to have_queue_size_of(2) expect(UserMailer).to have_queued(:post_has_new_reply, [notified_user.id, reply.id]) expect(UserMailer).to have_queued(:post_has_new_reply, [another_notified_user.id, reply.id]) end it "sends if the post was yours but previous reply wasn't" do notified_user = create(:user, email_notifications: true) post = create(:post, user: notified_user) another_notified_user = create(:user, email_notifications: true) create(:reply, user: another_notified_user, post: post, skip_notify: true) reply = create(:reply, post: post, user: notified_user) expect(UserMailer).to have_queue_size_of(1) expect(UserMailer).to have_queued(:post_has_new_reply, [another_notified_user.id, reply.id]) end end describe "authors interactions" do it "does not update can_owe upon creating a reply" do post = create(:post) reply = create(:reply, post: post) expect(post.author_for(reply.user).can_owe).to be(true) create(:reply, user: reply.user, post: post) expect(post.author_for(reply.user).can_owe).to be(true) author = post.author_for(reply.user) author.can_owe = false author.save! expect(post.author_for(reply.user).can_owe).to be(false) create(:reply, user: reply.user, post: post) expect(post.author_for(reply.user).can_owe).to be(false) end end describe ".ordered" do let(:post) { create(:post) } it "orders replies" do first_reply = create(:reply, post: post) second_reply = create(:reply, post: post) third_reply = create(:reply, post: post) expect(post.replies.ordered).to eq([first_reply, second_reply, third_reply]) end it "orders replies by reply_order, not created_at" do first_reply = Timecop.freeze(post.created_at + 1.second) { create(:reply, post: post) } second_reply = Timecop.freeze(first_reply.created_at - 5.seconds) { create(:reply, post: post) } third_reply = Timecop.freeze(first_reply.created_at - 3.seconds) { create(:reply, post: post) } expect(post.replies.ordered).not_to eq(post.replies.order(:created_at)) expect(post.replies.order(:created_at)).to eq([second_reply, third_reply, first_reply]) expect(post.replies.ordered).to eq([first_reply, second_reply, third_reply]) end it "orders replies by reply order not ID" do first_reply = create(:reply, post: post) second_reply = create(:reply, post: post) third_reply = create(:reply, post: post) second_reply.update_columns(reply_order: 2) # rubocop:disable Rails/SkipsModelValidations third_reply.update_columns(reply_order: 1) # rubocop:disable Rails/SkipsModelValidations expect(post.replies.ordered).to eq([first_reply, third_reply, second_reply]) end end end Split another test RSpec.describe Reply do describe "#has_icons?" do let(:user) { create(:user) } context "without character" do let(:reply) { create(:reply, user: user) } it "is true with avatar" do icon = create(:icon, user: user) user.update!(avatar: icon) user.reload expect(reply.character).to be_nil expect(reply.has_icons?).to eq(true) end it "is false without avatar" do expect(reply.character).to be_nil expect(reply.has_icons?).not_to eq(true) end end context "with character" do let(:character) { create(:character, user: user) } let(:reply) { create(:reply, user: user, character: character) } it "is true with default icon" do icon = create(:icon, user: user) character.update!(default_icon: icon) expect(reply.has_icons?).to eq(true) end it "is false without galleries" do expect(reply.has_icons?).not_to eq(true) end it "is true with icons in galleries" do gallery = create(:gallery, user: user) gallery.icons << create(:icon, user: user) character.galleries << gallery expect(reply.has_icons?).to eq(true) end it "is false without icons in galleries" do character.galleries << create(:gallery, user: user) expect(reply.has_icons?).not_to eq(true) end end end describe "#notify_other_authors" do before(:each) do ResqueSpec.reset! end it "does nothing if skip_notify is set" do notified_user = create(:user, email_notifications: true) post = create(:post, user: notified_user) create(:reply, post: post, skip_notify: true) expect(UserMailer).to have_queue_size_of(0) end it "does nothing if the previous reply was yours" do notified_user = create(:user, email_notifications: true) post = create(:post, user: notified_user) reply = create(:reply, post: post, skip_notify: true) create(:reply, post: post, user: reply.user) expect(UserMailer).to have_queue_size_of(0) end it "does nothing if the post was yours on the first reply" do notified_user = create(:user, email_notifications: true) post = create(:post, user: notified_user) create(:reply, post: post, user: notified_user) expect(UserMailer).to have_queue_size_of(0) end it "does not send to authors with notifications off" do post = create(:post) expect(post.user.email_notifications).not_to eq(true) create(:reply, post: post) expect(UserMailer).to have_queue_size_of(0) end it "does not send to emailless users" do user = create(:user) user.update_columns(email: nil) # rubocop:disable Rails/SkipsModelValidations post = create(:post, user: user) create(:reply, post: post) expect(UserMailer).to have_queue_size_of(0) end it "does not send to users who have opted out of owed" do user = create(:user, email_notifications: true) post = create(:post, user: user) post.opt_out_of_owed(user) create(:reply, post: post) expect(UserMailer).to have_queue_size_of(0) end it "sends to all other active authors if previous reply wasn't yours" do notified_user = create(:user, email_notifications: true) post = create(:post, user: notified_user) another_notified_user = create(:user, email_notifications: true) create(:reply, user: another_notified_user, post: post, skip_notify: true) reply = create(:reply, post: post) expect(UserMailer).to have_queue_size_of(2) expect(UserMailer).to have_queued(:post_has_new_reply, [notified_user.id, reply.id]) expect(UserMailer).to have_queued(:post_has_new_reply, [another_notified_user.id, reply.id]) end it "sends if the post was yours but previous reply wasn't" do notified_user = create(:user, email_notifications: true) post = create(:post, user: notified_user) another_notified_user = create(:user, email_notifications: true) create(:reply, user: another_notified_user, post: post, skip_notify: true) reply = create(:reply, post: post, user: notified_user) expect(UserMailer).to have_queue_size_of(1) expect(UserMailer).to have_queued(:post_has_new_reply, [another_notified_user.id, reply.id]) end end describe "authors interactions" do it "does not update can_owe upon creating a reply" do post = create(:post) reply = create(:reply, post: post) expect(post.author_for(reply.user).can_owe).to be(true) create(:reply, user: reply.user, post: post) expect(post.author_for(reply.user).can_owe).to be(true) author = post.author_for(reply.user) author.can_owe = false author.save! expect(post.author_for(reply.user).can_owe).to be(false) create(:reply, user: reply.user, post: post) expect(post.author_for(reply.user).can_owe).to be(false) end end describe ".ordered" do let(:post) { create(:post) } it "orders replies" do first_reply = create(:reply, post: post) second_reply = create(:reply, post: post) third_reply = create(:reply, post: post) expect(post.replies.ordered).to eq([first_reply, second_reply, third_reply]) end it "orders replies by reply_order, not created_at" do first_reply = Timecop.freeze(post.created_at + 1.second) { create(:reply, post: post) } second_reply = Timecop.freeze(first_reply.created_at - 5.seconds) { create(:reply, post: post) } third_reply = Timecop.freeze(first_reply.created_at - 3.seconds) { create(:reply, post: post) } expect(post.replies.ordered).not_to eq(post.replies.order(:created_at)) expect(post.replies.order(:created_at)).to eq([second_reply, third_reply, first_reply]) expect(post.replies.ordered).to eq([first_reply, second_reply, third_reply]) end it "orders replies by reply order not ID" do first_reply = create(:reply, post: post) second_reply = create(:reply, post: post) third_reply = create(:reply, post: post) second_reply.update_columns(reply_order: 2) # rubocop:disable Rails/SkipsModelValidations third_reply.update_columns(reply_order: 1) # rubocop:disable Rails/SkipsModelValidations expect(post.replies.ordered).to eq([first_reply, third_reply, second_reply]) end end end
require File.dirname(__FILE__) + '/../spec_helper.rb' describe Taxon do elastic_models( Observation, Taxon ) before(:all) do load_test_taxa @taxon = @Calypte_anna end it "should have a working #grafted method" do expect(@taxon).to respond_to(:grafted?) expect(@taxon.grafted?).to be(true) ungrafted = Taxon.create( :name => 'Pseudacris crucifer', # Spring Peeper :rank => 'species' ) expect(ungrafted.grafted?).to be(false) expect(@Animalia.grafted?).to be(true) end it "species_or_lower? should be false for Animalia" do expect(@Animalia.species_or_lower?).to be(false) end it "species_or_lower? should be true for Pseudacris regilla" do expect(@Pseudacris_regilla.species_or_lower?).to be(true) end it "has rank levels for stateofmatter and root" do expect( Taxon::STATEOFMATTER_LEVEL ).to eq 100 expect( Taxon::ROOT_LEVEL ).to eq 100 expect( Taxon::ROOT_LEVEL ).to eq Taxon::STATEOFMATTER_LEVEL end end describe Taxon, "creation" do elastic_models( Observation, Taxon ) it "should set an iconic taxon if this taxon was grafted" do load_test_taxa taxon = Taxon.make!( name: "Pseudacris imaginarius", rank: Taxon::SPECIES ) taxon.parent = @Pseudacris taxon.save! expect( taxon ).to be_grafted taxon.reload expect( taxon.iconic_taxon ).to eq @Amphibia end it "should create a taxon name with the same name after save" do t = Taxon.make! expect( t.taxon_names ).not_to be_empty expect( t.taxon_names.map(&:name) ).to include( t.name ) end it "should create a taxon name with the same name after save even if invalid on source_identifier" do source_identifier = "foo" source = Source.make! existing = TaxonName.make!(:source => source, :source_identifier => source_identifier) t = Taxon.make!(:source => source, :source_identifier => source_identifier) expect(t.taxon_names.map(&:name)).to include(t.name) end it "should capitalize its name" do taxon = Taxon.new(:name => 'balderdash', :rank => 'genus') taxon.save expect(taxon.name).to eq 'Balderdash' end it "should capitalize genushybrids with leading x correclty" do taxon = Taxon.make!( name: "× chitalpa", rank: Taxon::GENUSHYBRID ) expect( taxon.name ).to eq "× Chitalpa" taxon = Taxon.make!( name: "× Chitalpa", rank: Taxon::GENUSHYBRID ) expect( taxon.name ).to eq "× Chitalpa" end it "should capitalize Foo x Bar style genushybrids correctly" do taxon = Taxon.make!( name: "foo × bar", rank: Taxon::GENUSHYBRID ) expect( taxon.name ).to eq "Foo × Bar" taxon = Taxon.make!( name: "Foo × Bar", rank: Taxon::GENUSHYBRID ) expect( taxon.name ).to eq "Foo × Bar" end it "should capitalize hybrid species in genushybrids correctly" do taxon = Taxon.make!( name: "Foo bar × Baz roq", rank: Taxon::HYBRID ) expect( taxon.name ).to eq "Foo bar × Baz roq" end it "should not fail on poorly-formatted hybrid names" do [ "Carex × leutzii pseudofulva", "Calystegia sepium roseata × c tuguriorum" ].each do |name| taxon = Taxon.make!( name: name, rank: Taxon::HYBRID ) expect( taxon ).to be_valid end end it "should capitalize hybrid names of the form Genus species1 x species2" do taxon = Taxon.make!( name: "genusone speciesone × speciestwo", rank: Taxon::HYBRID ) expect( taxon.name ).to eq "Genusone speciesone × speciestwo" end it "should set the rank_level based on the rank" do t = Taxon.make! expect( t.rank_level ).to eq Taxon::RANK_LEVELS[t.rank] end it "should remove leading rank from the name" do t = Taxon.make!( name: "Gen Pseudacris" ) expect( t.name ).to eq "Pseudacris" end it "should remove internal 'var' from name" do t = Taxon.make!( name: "Quercus agrifolia var. agrifolia" ) expect( t.name ).to eq "Quercus agrifolia agrifolia" end it "should remove internal 'ssp' from name" do t = Taxon.make!( name: "Quercus agrifolia ssp. agrifolia" ) expect( t.name ).to eq "Quercus agrifolia agrifolia" end it "should remove internal 'subsp' from name" do t = Taxon.make!( name: "Quercus agrifolia subsp. agrifolia" ) expect( t.name ).to eq "Quercus agrifolia agrifolia" end it "should allow fo as a specific epithet" do name = "Mahafalytenus fo" t = Taxon.make!( name: name ) expect( t.name ).to eq name end it "should create TaxonAncestors" do parent = Taxon.make!( rank: Taxon::GENUS ) t = Taxon.make!( rank: Taxon::SPECIES, parent: parent ) t.reload expect( t.taxon_ancestors ).not_to be_blank end it "should strip trailing space" do expect( Taxon.make!( name: "Trailing space " ).name ).to eq "Trailing space" end it "should strip leading space" do expect( Taxon.make!( name: " Leading space" ).name ).to eq "Leading space" end it "should prevent creating a taxon with a rank coarser than the parent" do parent = Taxon.make!( rank: Taxon::GENUS ) taxon = Taxon.new(name: 'balderdash', rank: Taxon::FAMILY, parent: parent ) taxon.save taxon.valid? expect(taxon.errors).not_to be_blank end it "should prevent creating an active taxon with an inactive parent" do parent = Taxon.make!( rank: Taxon::GENUS, is_active: false ) taxon = Taxon.new(name: 'balderdash', rank: Taxon::SPECIES, parent: parent ) taxon.save expect(taxon.errors).not_to be_blank end it "should allow creating an active taxon with an inactive parent if output of draft taxon change" do input_taxon = Taxon.make!( rank: Taxon::GENUS, is_active: true ) output_taxon = Taxon.make!( rank: Taxon::GENUS, is_active: false ) swap = TaxonSwap.make swap.add_input_taxon(input_taxon) swap.add_output_taxon(output_taxon) swap.save! taxon = Taxon.new(name: 'balderdash', rank: Taxon::SPECIES, parent: output_taxon ) taxon.save taxon.valid? expect(taxon.errors).to be_blank end it "should prevent grafting an active taxon to an inactive parent" do parent = Taxon.make!( rank: Taxon::GENUS, is_active: false ) taxon = Taxon.make!(name: 'balderdash', rank: Taxon::SPECIES) expect(taxon.parent_id).not_to be(parent.id) taxon.parent = parent taxon.save taxon.reload expect(taxon.parent_id).not_to be(parent.id) end it "should allow grafting an active taxon to an inactive parent if output of draft taxon change" do input_taxon = Taxon.make!( rank: Taxon::GENUS, is_active: true ) output_taxon = Taxon.make!( rank: Taxon::GENUS, is_active: false ) swap = TaxonSwap.make swap.add_input_taxon(input_taxon) swap.add_output_taxon(output_taxon) swap.save! taxon = Taxon.make!(name: 'balderdash', rank: Taxon::SPECIES) expect(taxon.parent_id).not_to be(output_taxon.id) taxon.parent = output_taxon taxon.save taxon.reload expect(taxon.parent_id).to be(output_taxon.id) end end describe Taxon, "updating" do elastic_models( Observation, Taxon ) it "should update the ancestry col of all associated listed_taxa" it "should not destroy photos that have observations" do t = Taxon.make! o = Observation.make! p = Photo.make! t.photos << p make_observation_photo( observation: o, photo: p ) t.photos = [Photo.make!] o.reload expect(o.photos).not_to be_blank end it "should strip trailing space" do t = Taxon.make!( name: "No trailing space" ) t.update_attributes( name: "Trailing space " ) expect( t.name ).to eq "Trailing space" end it "should strip leading space" do t = Taxon.make!( name: "No leading space" ) t.update_attributes( name: " Leading space" ) expect( t.name ).to eq "Leading space" end it "should prevent updating a taxon rank to be coarser than the parent" do parent = Taxon.make!( rank: Taxon::GENUS ) taxon = Taxon.new(name: 'balderdash', rank: Taxon::SPECIES, parent: parent ) taxon.save taxon.valid? expect(taxon.errors).to be_blank taxon.update_attributes( rank: Taxon::FAMILY ) expect(taxon.errors).not_to be_blank end it "should prevent updating a taxon rank to be same rank as child" do parent = Taxon.make!( rank: Taxon::GENUS ) taxon = Taxon.new(name: 'balderdash', rank: Taxon::SPECIES, parent: parent ) taxon.save taxon.valid? expect(taxon.errors).to be_blank parent.update_attributes( rank: Taxon::SPECIES ) expect(parent.errors).not_to be_blank end it "should prevent updating a taxon to be inactive if it has active children" do taxon = Taxon.make!(name: 'balderdash', rank: Taxon::GENUS ) child = Taxon.make!(name: 'balderdash foo', rank: Taxon::SPECIES, parent: taxon ) taxon.valid? expect(taxon.errors).to be_blank taxon.update_attributes( is_active: false ) expect(taxon.errors).not_to be_blank end it "should allow updating a taxon to be inactive if it has active children but move children is checked" do taxon = Taxon.make!(name: 'balderdash', rank: Taxon::GENUS ) child = Taxon.make!(name: 'balderdash foo', rank: Taxon::SPECIES, parent: taxon ) taxon.valid? expect(taxon.errors).to be_blank taxon.update_attributes( is_active: false, skip_only_inactive_children_if_inactive: true ) expect(taxon.errors).to be_blank end it "should prevent updating a taxon to be active if it has an inactive parent" do parent = Taxon.make!(name: 'balderdash', rank: Taxon::GENUS, is_active: false ) taxon = Taxon.make!(name: 'balderdash foo', rank: Taxon::SPECIES, parent: parent, is_active: false ) taxon.valid? expect(taxon.errors).to be_blank taxon.update_attributes( is_active: true ) expect(taxon.errors).not_to be_blank end it "should allow updating a taxon to be active if it has an inactive parent if output of draft taxon change" do input_taxon = Taxon.make!( rank: Taxon::GENUS, is_active: true ) output_taxon = Taxon.make!(name: 'balderdash', rank: Taxon::GENUS, is_active: false ) swap = TaxonSwap.make swap.add_input_taxon(input_taxon) swap.add_output_taxon(output_taxon) swap.save! taxon = Taxon.make!(name: 'balderdash foo', rank: Taxon::SPECIES, parent: output_taxon, is_active: false ) taxon.valid? expect(taxon.errors).to be_blank taxon.update_attributes( is_active: true ) expect(taxon.errors).to be_blank end describe "auto_description" do it "should remove the wikipedia_summary when it changes to false" do t = Taxon.make!( wikipedia_summary: "foo" ) expect( t.wikipedia_summary ).not_to be_blank t.update_attributes( auto_description: false ) t.reload expect( t.wikipedia_summary ).to be_blank end end it "should assign the updater if explicitly assigned" do creator = make_curator updater = make_curator t = Taxon.make!( creator: creator, updater: creator, rank: Taxon::FAMILY ) expect( t.updater ).to eq creator t.reload t.update_attributes( rank: Taxon::GENUS, updater: updater ) t.reload expect( t.updater ).to eq updater end it "should nilify the updater if not explicitly assigned" do creator = make_curator updater = make_curator t = Taxon.make!( creator: creator, updater: creator, rank: Taxon::FAMILY ) expect( t.updater ).to eq creator t = Taxon.find_by_id( t.id ) t.update_attributes( rank: Taxon::GENUS ) t.reload expect( t.updater ).to be_blank end describe "reindexing identifications" do elastic_models( Identification ) it "should happen when the rank_level changes" do t = Taxon.make!( rank: Taxon::SUBCLASS ) i = Identification.make!( taxon: t ) Delayed::Worker.new.work_off t.reload expect( t.rank_level ).to eq Taxon::SUBCLASS_LEVEL i_es = Identification.elastic_search( where: { id: i.id } ).results.results.first expect( i_es.taxon.rank_level ).to eq t.rank_level t.update_attributes( rank: Taxon::CLASS ) Delayed::Worker.new.work_off t.reload expect( t.rank_level ).to eq Taxon::CLASS_LEVEL i_es = Identification.elastic_search( where: { id: i.id } ).results.results.first expect( i_es.taxon.rank_level ).to eq t.rank_level end end end describe Taxon, "destruction" do elastic_models( Observation, Taxon ) it "should work" do Taxon.make!.destroy end it "should queue a job to destroy descendants if orphaned" do load_test_taxa Delayed::Job.delete_all stamp = Time.now @Apodiformes.destroy jobs = Delayed::Job.where("created_at >= ?", stamp) expect(jobs.select{|j| j.handler =~ /apply_orphan_strategy/m}).not_to be_blank end end describe Taxon, "orphan descendant destruction" do elastic_models( Observation, Taxon ) before(:each) do load_test_taxa end it "should work" do child_ancestry_was = @Apodiformes.child_ancestry @Apodiformes.update_attributes(:parent => nil) Taxon.update_descendants_with_new_ancestry(@Apodiformes.id, child_ancestry_was) expect(@Apodiformes.descendants).to include(@Calypte_anna) child_ancestry_was = @Apodiformes.child_ancestry @Apodiformes.destroy Taxon.apply_orphan_strategy(child_ancestry_was) expect(Taxon.find_by_id(@Calypte_anna.id)).to be_blank end end describe Taxon, "making iconic" do before(:each) do load_test_taxa end it "should set the iconic taxa of descendant taxa to this taxon" do expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) @Apodiformes.update_attributes(:is_iconic => true) @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon_id).to be(@Apodiformes.id) end it "should queue a job to change the iconic taxon of descendent observations" do expect { @Apodiformes.update_attributes(:is_iconic => true) }.to change(Delayed::Job, :count).by_at_least(1) end it "should NOT set the iconic taxa of descendant taxa if they descend from a lower iconic taxon" do expect(@Aves).to be_is_iconic expect(@Chordata).not_to be_is_iconic expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) @Chordata.update_attributes(:is_iconic => true) @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) end end describe "Updating iconic taxon" do before(:each) do load_test_taxa end it "should set the iconic taxa of descendant taxa" do expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) @Calypte.update_attributes(:iconic_taxon => @Apodiformes) @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon_id).to be(@Apodiformes.id) end it "should queue a job to change the iconic taxon of descendent observations" do expect { @Calypte.update_attributes(:iconic_taxon => @Apodiformes) }.to change(Delayed::Job, :count).by_at_least(1) end it "should NOT set the iconic taxa of descendant taxa if they descend from a lower iconic taxon" do expect(@Aves).to be_is_iconic expect(@Chordata).not_to be_is_iconic expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) @Chordata.update_attributes(:iconic_taxon => @Plantae) @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) end end describe Taxon, "set_iconic_taxon_for_observations_of" do elastic_models( Observation, Taxon ) before(:each) do load_test_taxa end it "should set the iconic taxon for observations of descendant taxa" do obs = without_delay { Observation.make!(:taxon => @Calypte_anna) } expect(@Calypte_anna.iconic_taxon.name).to eq @Aves.name expect(obs.iconic_taxon.name).to eq @Calypte_anna.iconic_taxon.name @Calypte.update_attributes(:iconic_taxon => @Amphibia) expect(@Calypte.iconic_taxon.name).to eq @Amphibia.name @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon.name).to eq @Amphibia.name Taxon.set_iconic_taxon_for_observations_of(@Calypte) obs.reload expect(obs.iconic_taxon.name).to eq @Amphibia.name end it "should not change the iconc taxon for observations of other taxa" do bird_obs = Observation.make!(:taxon => @Calypte_anna) frog_obs = Observation.make!(:taxon => @Pseudacris_regilla) expect(bird_obs.iconic_taxon).to eq @Aves expect(frog_obs.iconic_taxon).to eq @Amphibia @Pseudacris.update_attributes(:iconic_taxon => @Plantae) Taxon.set_iconic_taxon_for_observations_of(@Pseudacris) frog_obs.reload expect(frog_obs.iconic_taxon).to eq @Plantae bird_obs.reload expect(bird_obs.iconic_taxon).to eq @Aves end it "should NOT set the iconic taxa of observations of descendant taxa if they descend from a lower iconic taxon" do expect(@Aves).to be_is_iconic expect(@Chordata).not_to be_is_iconic expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) expect(@Calypte_anna.ancestor_ids).to include(@Aves.id) expect(@Calypte_anna.ancestor_ids).to include(@Chordata.id) obs = Observation.make!(:taxon => @Calypte_anna) expect(obs.iconic_taxon).to eq @Aves @Chordata.update_attributes(:iconic_taxon => @Plantae) Taxon.set_iconic_taxon_for_observations_of(@Chordata) @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon).to eq @Aves obs.reload expect(obs.iconic_taxon).to eq @Aves end end describe Taxon, "normalize_rank" do it "should normalize weird ranks" do expect(Taxon.normalize_rank('sp')).to eq 'species' expect(Taxon.normalize_rank('ssp')).to eq 'subspecies' expect(Taxon.normalize_rank('Gen')).to eq 'genus' end it "should normalize ranks with punctuation" do expect(Taxon.normalize_rank('super-order')).to eq 'superorder' end end describe Taxon, "unique name" do it "should be the default_name by default" do taxon = Taxon.make!(:name => "I am galactus") expect(taxon.unique_name).to eq taxon.default_name.name.downcase end it "should be the scientific name if the common name is already another taxon's unique name" do taxon = Taxon.make! common_name = TaxonName.make!(:name => "Most Awesome Radicalbird", :taxon => taxon, :lexicon => TaxonName::LEXICONS[:ENGLISH]) taxon.save taxon.reload expect(taxon.unique_name).to eq taxon.common_name.name.downcase new_taxon = Taxon.make!(:name => "Ballywickia purhiensis", :rank => 'species') new_taxon.taxon_names << TaxonName.make!( :name => taxon.common_name.name, :lexicon => TaxonName::LEXICONS[:ENGLISH] ) new_taxon.reload expect(new_taxon.unique_name).to eq new_taxon.name.downcase end it "should be nil if all else fails" do taxon = Taxon.make! # unique name should be the common name common_name = TaxonName.make!( :taxon => taxon, :lexicon => TaxonName::LEXICONS[:ENGLISH]) other_taxon = new_taxon = Taxon.make!(:name => taxon.name) # unique name should be the sciname new_taxon = Taxon.make!(:name => taxon.name) new_common_name = TaxonName.make!(:name => common_name.name, :taxon => new_taxon, :lexicon => TaxonName::LEXICONS[:ENGLISH]) new_taxon.reload new_taxon.taxon_names.each do |tn| puts "#{tn} was invalid: " + tn.errors.full_messages.join(', ') unless tn.valid? end puts "new_taxon was invalid: " + new_taxon.errors.full_messages.join(', ') unless new_taxon.valid? expect(new_taxon.unique_name).to be_nil end it "should work if there are synonyms in different lexicons" do taxon = Taxon.make! name1 = TaxonName.make!(:taxon => taxon, :name => "foo", :lexicon => TaxonName::LEXICONS[:ENGLISH]) name2 = TaxonName.make!(:taxon => taxon, :name => "Foo", :lexicon => TaxonName::LEXICONS[:SPANISH]) taxon.reload expect(taxon.unique_name).not_to be_blank expect(taxon.unique_name).to eq "foo" end it "should not contain punctuation" do taxon = Taxon.make! TaxonName.make!(:taxon => taxon, :name => "St. Gerome's Radical Snake", :lexicon => TaxonName::LEXICONS[:ENGLISH]) taxon.reload expect(taxon.unique_name).not_to match(/[\.\'\?\!\\\/]/) end end describe Taxon, "common_name" do it "should default to English if present" do t = Taxon.make! tn_en = TaxonName.make!(:taxon => t, :name => "Red Devil", :lexicon => TaxonName::LEXICONS[:ENGLISH]) tn_es = TaxonName.make!(:taxon => t, :name => "Diablo Rojo", :lexicon => TaxonName::LEXICONS[:SPANISH]) tn_un = TaxonName.make!(:taxon => t, :name => "run away!", :lexicon => 'unspecified') expect(t.common_name).to eq(tn_en) end it "should not default to first common if no English or unknown" do t = Taxon.make! tn_es = TaxonName.make!(:taxon => t, :name => "Diablo Rojo", :lexicon => TaxonName::LEXICONS[:SPANISH]) expect(t.common_name).to be_blank end end describe Taxon, "tags_to_taxa" do it "should find Animalia and Mollusca" do animalia = Taxon.make!( rank: Taxon::PHYLUM, name: "Animalia" ) aves = Taxon.make!( rank: Taxon::CLASS, name: "Aves", parent: animalia ) taxa = Taxon.tags_to_taxa( ["Animalia", "Aves"] ) expect( taxa ).to include( animalia ) expect( taxa ).to include( aves ) end it "should work on taxonomic machine tags" do animalia = Taxon.make!( rank: Taxon::PHYLUM, name: "Animalia" ) aves = Taxon.make!( rank: Taxon::CLASS, name: "Aves", parent: animalia ) calypte_anna = Taxon.make!( rank: Taxon::SPECIES, name: "Calypte anna" ) taxa = Taxon.tags_to_taxa( [ "taxonomy:kingdom=Animalia", "taxonomy:class=Aves", "taxonomy:binomial=Calypte anna" ] ) expect( taxa ).to include( animalia ) expect( taxa ).to include( aves ) expect( taxa ).to include( calypte_anna ) end it "should not find inactive taxa" do active_taxon = Taxon.make! inactive_taxon = Taxon.make!(:name => active_taxon.name, :is_active => false) taxa = Taxon.tags_to_taxa([active_taxon.name]) expect(taxa).to include(active_taxon) expect(taxa).not_to include(inactive_taxon) end it "should work for sp" do taxon = Taxon.make!( rank: Taxon::GENUS, name: "Mycena" ) taxa = Taxon.tags_to_taxa( ["#{taxon.name} sp"] ) expect( taxa ).to include( taxon ) end it "should work for sp." do taxon = Taxon.make!( rank: Taxon::GENUS, name: "Mycena" ) taxa = Taxon.tags_to_taxa( ["#{taxon.name} sp."] ) expect( taxa ).to include( taxon ) end it "should not strip out sp from Spizella" do t = Taxon.make!(:name => 'Spizella') taxa = Taxon.tags_to_taxa(['Spizella']) expect(taxa).to include(t) end it "should choose names before codes" do code_name = TaxonName.make!(:name => "HOME", :lexicon => "AOU Codes") name_name = TaxonName.make!(:name => "Golden-crowned Sparrow", :lexicon => "AOU Codes") taxa = Taxon.tags_to_taxa([code_name.name, name_name.name]) expect(taxa.first).to eq name_name.taxon end it "should not match a code if it's not an exact match" do code_name = TaxonName.make!(:name => "HOME", :lexicon => "AOU Codes") taxa = Taxon.tags_to_taxa([code_name.name.downcase]) expect(taxa).to be_blank end it "should favor longer names" do short_name = TaxonName.make!(:name => "bork", :lexicon => "English") long_name = TaxonName.make!(:name => "Giant Dour-Crested Mopple Hopper", :lexicon => "English") taxa = Taxon.tags_to_taxa([short_name.name, long_name.name]) expect(taxa.first).to eq long_name.taxon end it "should work there are inexact matches" do t = Taxon.make! TaxonName.make!(:name => "Nutria", :taxon => t, :lexicon => "English") TaxonName.make!(:name => "nutria", :taxon => t, :lexicon => "French") expect(Taxon.tags_to_taxa(%w(Nutria))).to include t end it "should not match problematic names" do Taxon::PROBLEM_NAMES.each do |name| t = Taxon.make(:name => name.capitalize) if t.valid? expect( Taxon.tags_to_taxa( [name, name.capitalize] ) ).to be_blank end end end it "should not match scientifc names that are 2 letters or less" do %w(Aa Io La).each do |name| t = Taxon.make!( name: name, rank: Taxon::GENUS ) expect( Taxon.tags_to_taxa( [name, name.downcase ] ) ).to be_blank end end it "should not match abbreviated month names" do %w(Mar May Jun Nov).each do |name| t = Taxon.make!( name: name, rank: Taxon::GENUS ) expect( Taxon.tags_to_taxa( [name, name.downcase ] ) ).to be_blank end end end describe Taxon, "merging" do elastic_models( Observation, Taxon ) before(:all) { load_test_taxa } before(:each) do # load_test_taxa @keeper = Taxon.make!( name: "Calypte keeper", rank: Taxon::SPECIES, parent: @Calypte ) @reject = Taxon.make!( :name => "Calypte reject", rank: Taxon::SPECIES, parent: @Calypte ) @has_many_assocs = Taxon.reflections.select{|k,v| v.macro == :has_many}.map{|k,v| k} @has_many_assocs.each {|assoc| @reject.send(assoc, :force_reload => true)} end it "should move the reject's children to the keeper" do child = Taxon.make!( name: "Calypte reject rejectus", parent: @reject, rank: Taxon::SUBSPECIES ) rejected_children = @reject.children expect(rejected_children).not_to be_empty @keeper.merge( @reject ) rejected_children.each do |c| c.reload expect( c.parent_id ).to eq @keeper.parent_id end end it "should move the reject's taxon_names to the keeper" do rejected_taxon_names = @reject.taxon_names expect(rejected_taxon_names).not_to be_empty @keeper.merge(@reject) rejected_taxon_names.each do |taxon_name| taxon_name.reload expect(taxon_name.taxon_id).to be(@keeper.id) end end it "should move the reject's taxon_names to the keeper even if they don't have a lexicon" do @reject.taxon_names << TaxonName.new(:name => "something") rejected_taxon_names = @reject.taxon_names expect(rejected_taxon_names).not_to be_empty @keeper.merge(@reject) rejected_taxon_names.each do |taxon_name| taxon_name.reload expect(taxon_name.taxon_id).to be(@keeper.id) end end it "should move the reject's observations to the keeper" do 2.times do Observation.make!(:taxon => @reject) end rejected_observations = @reject.observations.all expect(rejected_observations).not_to be_empty @keeper.merge(@reject) rejected_observations.each do |observation| observation.reload expect(observation.taxon_id).to be(@keeper.id) end end it "should move the reject's listed_taxa to the keeper" do 3.times do ListedTaxon.make!(:taxon => @reject) end rejected_listed_taxa = @reject.listed_taxa.all expect(rejected_listed_taxa).not_to be_empty @keeper.merge(@reject) rejected_listed_taxa.each do |listed_taxon| listed_taxon.reload expect(listed_taxon.taxon_id).to be(@keeper.id) end end it "should move the reject's list_rules to the keeper" do rule = ListRule.make!(:operand => @Amphibia, :operator => "in_taxon?") reject = rule.operand keeper = Taxon.make keeper.name = "Amphibiatwo" keeper.unique_name = "Amphibiatwo" keeper.save keeper.update_attributes(:parent => reject.parent) keeper.merge(reject) rule.reload expect(rule.operand_id).to be(keeper.id) end it "should move the reject's identifications to the keeper" do 3.times do Identification.make!(:taxon => @reject) end rejected_identifications = @reject.identifications.all expect(rejected_identifications).not_to be_empty @keeper.merge(@reject) rejected_identifications.each do |identification| identification.reload expect(identification.taxon_id).to be(@keeper.id) end end it "should move the reject's taxon_links to the keeper" do 3.times do TaxonLink.make!(:taxon => @reject) end rejected_taxon_links = @reject.taxon_links.all expect(rejected_taxon_links).not_to be_empty @keeper.merge(@reject) rejected_taxon_links.each do |taxon_link| taxon_link.reload expect(taxon_link.taxon_id).to be(@keeper.id) end end it "should move the reject's taxon_photos to the keeper" do 3.times do TaxonPhoto.make!(:taxon => @reject) end rejected_taxon_photos = @reject.taxon_photos.all expect(rejected_taxon_photos).not_to be_empty @keeper.merge(@reject) rejected_taxon_photos.each do |taxon_photo| taxon_photo.reload expect(taxon_photo.taxon_id).to be(@keeper.id) end end it "should mark scinames not matching the keeper as invalid" do old_sciname = @reject.scientific_name expect(old_sciname).to be_is_valid @keeper.merge(@reject) old_sciname.reload expect(old_sciname).not_to be_is_valid end it "should delete duplicate taxon_names from the reject" do old_sciname = @reject.scientific_name synonym = old_sciname.dup synonym.is_valid = false @keeper.taxon_names << synonym @keeper.merge(@reject) expect(TaxonName.find_by_id(old_sciname.id)).to be_nil end it "should delete listed_taxa from the reject that are invalid" it "should destroy the reject" do @keeper.merge(@reject) expect(Taxon.find_by_id(@reject.id)).to be_nil end it "should not create duplicate listed taxa" do lt1 = ListedTaxon.make!(:taxon => @keeper) lt2 = ListedTaxon.make!(:taxon => @reject, :list => lt1.list) @keeper.merge(@reject) expect(lt1.list.listed_taxa.where(taxon_id: @keeper.id).count).to eq 1 end it "should set iconic taxa on children" do reject = Taxon.make!(rank: "species") child = Taxon.make!(parent: reject, rank: "subspecies") expect(child.iconic_taxon_id).not_to eq @keeper.iconic_taxon_id expect(child.iconic_taxon_id).to eq reject.iconic_taxon_id @keeper.merge(reject) child.reload expect(child.iconic_taxon_id).to eq @keeper.iconic_taxon_id end it "should set iconic taxa on descendants" do expect(@Calypte_anna.iconic_taxon_id).not_to eq @Pseudacris.iconic_taxon_id @Pseudacris.merge(@Calypte) @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon_id).to eq @Pseudacris.iconic_taxon_id end it "should queue a job to set iconic taxon on observations of descendants" do Delayed::Job.delete_all stamp = Time.now @Pseudacris.merge(@Calypte) jobs = Delayed::Job.where("created_at >= ?", stamp) expect(jobs.select{|j| j.handler =~ /set_iconic_taxon_for_observations_of/m}).not_to be_blank end it "should remove duplicate schemes" do ts = TaxonScheme.make! t1 = Taxon.make! t1.taxon_schemes << ts t2 = Taxon.make! t2.taxon_schemes << ts t1.merge(t2) t1.reload expect(t1.taxon_schemes.size).to eq(1) end it "should set iconic taxon for observations of reject" do reject = Taxon.make! o = without_delay {Observation.make!(:taxon => reject)} expect(o.iconic_taxon).to be_blank without_delay {@keeper.merge(reject)} o.reload expect(o.iconic_taxon).to eq(@keeper.iconic_taxon) end it "should update subscriptions" do s = Subscription.make!(:resource => @reject) @keeper.merge(@reject) s.reload expect(s.resource).to eq @keeper end it "should not alter with subscriptions to other classess" do reject = Taxon.make!(:id => 888) keeper = Taxon.make!(:id => 999) o = Observation.make!(:id => 888) s = Subscription.make!(:resource => o) keeper.merge(reject) s.reload expect(s.resource).to eq(o) end it "should work with denormalized ancestries" do AncestryDenormalizer.truncate expect(TaxonAncestor.count).to eq 0 AncestryDenormalizer.denormalize expect { @keeper.merge(@reject) }.not_to raise_error end end describe Taxon, "moving" do elastic_models( Observation, Taxon, Identification ) before(:all) do load_test_taxa end let(:obs) do t = Taxon.make!( name: "Calypte test", rank: Taxon::SPECIES, parent: @Calypte ) obs = Observation.make!( taxon: t ) end let(:hummer_genus) { Taxon.make!( rank: Taxon::GENUS, parent: @Trochilidae ) } it "should update the iconic taxon of observations" do old_iconic_id = obs.iconic_taxon_id taxon = obs.taxon taxon.move_to_child_of(@Amphibia) taxon.reload obs.reload expect(obs.iconic_taxon_id).not_to be(old_iconic_id) expect(obs.iconic_taxon_id).to be(taxon.iconic_taxon_id) end it "should queue a job to set iconic taxon on observations of descendants" do old_iconic_id = obs.iconic_taxon_id taxon = obs.taxon Delayed::Job.delete_all stamp = Time.now taxon.parent.move_to_child_of(@Amphibia) jobs = Delayed::Job.where("created_at >= ?", stamp) expect(jobs.select{|j| j.handler =~ /set_iconic_taxon_for_observations_of/m}).not_to be_blank end it "should set iconic taxon on observations of descendants" do old_iconic_id = obs.iconic_taxon_id taxon = obs.taxon without_delay do taxon.parent.move_to_child_of(@Amphibia) end obs.reload expect(obs.iconic_taxon).to eq(@Amphibia) end it "should set iconic taxon on observations of descendants if grafting for the first time" do parent = Taxon.make!(rank: Taxon::GENUS) taxon = Taxon.make!(parent: parent, rank: Taxon::SPECIES) o = without_delay { Observation.make!(:taxon => taxon) } expect(o.iconic_taxon).to be_blank without_delay do parent.move_to_child_of(@Amphibia) end o.reload expect(o.iconic_taxon).to eq(@Amphibia) end it "should not raise an exception if the new parent doesn't exist" do taxon = Taxon.make! bad_id = Taxon.last.id + 1 expect { taxon.parent_id = bad_id }.not_to raise_error end # this is something we override from the ancestry gem it "should queue a job to update descendant ancestries" do Delayed::Job.delete_all stamp = Time.now hummer_genus.update_attributes( parent: @Hylidae ) jobs = Delayed::Job.where("created_at >= ?", stamp) expect(jobs.select{|j| j.handler =~ /update_descendants_with_new_ancestry/m}).not_to be_blank end it "should not queue a job to update descendant ancetries if skip_after_move set" do Delayed::Job.delete_all stamp = Time.now hummer_genus.update_attributes(:parent => @Hylidae, :skip_after_move => true) jobs = Delayed::Job.where("created_at >= ?", stamp) expect(jobs.select{|j| j.handler =~ /update_descendants_with_new_ancestry/m}).not_to be_blank end it "should queue a job to update observation stats if there are observations" do Delayed::Job.delete_all stamp = Time.now o = Observation.make!( taxon: hummer_genus ) expect( Observation.of( hummer_genus ).count ).to eq 1 hummer_genus.update_attributes( parent: @Hylidae ) jobs = Delayed::Job.where( "created_at >= ?", stamp ) expect( jobs.select{|j| j.handler =~ /update_stats_for_observations_of/m} ).not_to be_blank end it "should update community taxa" do fam = Taxon.make!( name: "Familyone", rank: "family") subfam = Taxon.make!( name: "Subfamilyone", rank: "subfamily", parent: fam ) gen = Taxon.make!( name: "Genusone", rank: "genus", parent: fam ) sp = Taxon.make!( name: "Species one", rank: "species", parent: gen ) o = Observation.make! i1 = Identification.make!(:observation => o, :taxon => subfam) i2 = Identification.make!(:observation => o, :taxon => sp) expect(Identification.of(gen).exists?).to be true o.reload expect(o.taxon).to eq fam Delayed::Worker.new.work_off without_delay do gen.update_attributes(:parent => subfam) end o.reload expect(o.taxon).to eq sp end it "should create TaxonAncestors" do t = Taxon.make!( rank: Taxon::SPECIES, name: "Ronica vestrit" ) expect( t.taxon_ancestors.count ).to eq 1 # should always make one for itself t.move_to_child_of( @Calypte ) t.reload expect( t.taxon_ancestors.count ).to be > 1 expect( t.taxon_ancestors.detect{ |ta| ta.ancestor_taxon_id == @Calypte.id } ).not_to be_blank end it "should remove existing TaxonAncestors" do t = Taxon.make!( rank: Taxon::SPECIES, parent: @Calypte ) expect( TaxonAncestor.where( taxon_id: t.id, ancestor_taxon_id: @Calypte.id ).count ).to eq 1 t.move_to_child_of( @Pseudacris ) expect( TaxonAncestor.where( taxon_id: t.id, ancestor_taxon_id: @Calypte.id ).count ).to eq 0 end it "should reindex descendants" do g = Taxon.make!( rank: Taxon::GENUS, parent: @Trochilidae ) s = Taxon.make!( rank: Taxon::SPECIES, parent: g ) Delayed::Worker.new.work_off s.reload es_response = Taxon.elastic_search( where: { id: s.id } ).results.results.first expect( es_response.ancestor_ids ).to include @Trochilidae.id g.move_to_child_of( @Hylidae ) Delayed::Worker.new.work_off s.reload es_response = Taxon.elastic_search( where: { id: s.id } ).results.results.first expect( es_response.ancestor_ids ).to include @Hylidae.id end it "should reindex identifications of the taxon" do g = Taxon.make!( rank: Taxon::GENUS, parent: @Trochilidae ) s = Taxon.make!( rank: Taxon::SPECIES, parent: g ) g_ident = Identification.make!( taxon: g ) s_ident = Identification.make!( taxon: s ) Delayed::Worker.new.work_off s.reload g_ident_es = Identification.elastic_search( where: { id: g_ident.id } ).results.results.first s_ident_es = Identification.elastic_search( where: { id: s_ident.id } ).results.results.first expect( g_ident_es.taxon.ancestor_ids ).to include @Trochilidae.id expect( s_ident_es.taxon.ancestor_ids ).to include @Trochilidae.id expect( s_ident_es.taxon.rank_level ).to eq s.rank_level g.move_to_child_of( @Hylidae ) Delayed::Worker.new.work_off s.reload g_ident_es = Identification.elastic_search( where: { id: g_ident.id } ).results.results.first s_ident_es = Identification.elastic_search( where: { id: s_ident.id } ).results.results.first expect( g_ident_es.taxon.ancestor_ids ).to include @Hylidae.id expect( s_ident_es.taxon.ancestor_ids ).to include @Hylidae.id expect( s_ident_es.taxon.rank_level ).to eq s.rank_level g_obs_es = Observation.elastic_search( where: { id: g_ident.observation_id } ).results.results.first s_obs_es = Observation.elastic_search( where: { id: s_ident.observation_id } ).results.results.first expect( g_obs_es.taxon.ancestor_ids ).to include @Hylidae.id # TODO: there seems to be a data inconsistency here - # the obs index for descendants of the moved taxon don't have updated ancestries # expect( s_obs_es.taxon.ancestor_ids ).to include @Hylidae.id end # This is a sanity spec written while trying to investigate claims that adding # a complex alters the previous_observation_taxon on identicications. It # doesn't seem to, at least under these conditions. ~~~kueda 20201216 # it "should not interfere with previous_observation_taxon on identifications when the previous_observation_taxon gets moved into an interstitial taxon" do # g = Taxon.make!( rank: Taxon::GENUS, parent: @Trochilidae ) # s = Taxon.make!( rank: Taxon::SPECIES, parent: g ) # o = Observation.make!( taxon: s ) # Delayed::Worker.new.work_off # i = Identification.make!( observation: o, taxon: @Trochilidae, disagreement: true ) # Delayed::Worker.new.work_off # i.reload # expect( i.previous_observation_taxon ).to eq s # c = Taxon.make!( rank: Taxon::COMPLEX, parent: g ) # Delayed::Worker.new.work_off # s.update_attributes( parent_id: c.id ) # Delayed::Worker.new.work_off # i.reload # expect( i.previous_observation_taxon ).to eq s # end end describe Taxon, "update_descendants_with_new_ancestry" do before(:each) do load_test_taxa end it "should update the ancestry of descendants" do @Calypte.parent = @Hylidae child_ancestry_was = @Calypte.child_ancestry @Calypte.save Taxon.update_descendants_with_new_ancestry(@Calypte.id, child_ancestry_was) @Calypte_anna.reload expect(@Calypte_anna.ancestry).to be =~ /^#{@Hylidae.ancestry}/ expect(@Calypte_anna.ancestry).to be =~ /^#{@Calypte.ancestry}/ end end describe Taxon do describe "featuring" do it "should fail if no photos" do taxon = Taxon.make! taxon.featured_at = Time.now expect(taxon.photos).to be_blank taxon.valid? expect(taxon.errors[:featured_at]).not_to be_blank end end describe "conservation status" do it "should define boolean methods" do taxon = Taxon.make!(:conservation_status => Taxon::IUCN_VULNERABLE) expect(taxon).to be_iucn_vulnerable expect(taxon).not_to be_iucn_extinct end end describe "locking" do it "should cause grafting descendents to fail" do taxon = Taxon.make!(:locked => true) child = Taxon.make! expect(child.parent).not_to be(taxon) child.update_attribute(:parent, taxon) expect(child.parent).not_to be(taxon) end it "should prevent new scientific taxon names of descendents" end end describe Taxon, "grafting" do elastic_models( Observation, Taxon ) before(:each) do load_test_taxa @graftee = Taxon.make!(:rank => "species") end it "should set iconic taxa on children" do expect(@graftee.iconic_taxon_id).not_to eq @Pseudacris.iconic_taxon_id @graftee.update_attributes(:parent => @Pseudacris) @graftee.reload expect(@graftee.iconic_taxon_id).to eq @Pseudacris.iconic_taxon_id end it "should set iconic taxa on descendants" do taxon = Taxon.make!(rank: "subspecies", name: "Craptaculous", parent: @graftee) @graftee.update_attributes(:parent => @Pseudacris) taxon.reload expect(taxon.iconic_taxon_id).to eq @Pseudacris.iconic_taxon_id end it "should queue a job to set iconic taxon on observations of descendants" do Delayed::Job.delete_all stamp = Time.now @graftee.update_attributes(:parent => @Pseudacris) jobs = Delayed::Job.where("created_at >= ?", stamp) expect(jobs.select{|j| j.handler =~ /set_iconic_taxon_for_observations_of/m}).not_to be_blank end it "should set the parent of a species based on the polynom genus" do t = Taxon.make!(:name => "Pseudacris foo") t.graft expect(t.parent).to eq(@Pseudacris) end it "should update the ancestry of children" do f = Taxon.make!( rank: Taxon::FAMILY, name: "Familyone" ) g = Taxon.make!( rank: Taxon::GENUS, name: "Genusone" ) s = Taxon.make!( rank: Taxon::SPECIES, name: "Genusone speciesone", parent: g ) expect( g ).not_to be_grafted expect( s.ancestor_ids ).to include g.id expect( s.ancestor_ids ).not_to include f.id g.update_attributes( parent: f ) Delayed::Worker.new.work_off g.reload s.reload expect( s.ancestor_ids ).to include g.id expect( s.ancestor_ids ).to include f.id end end describe Taxon, "single_taxon_for_name" do it "should find varieties" do name = "Abies magnifica var. magnifica" t = Taxon.make!(:name => name, :rank => Taxon::VARIETY) expect(t).to be_variety expect(t.name).to eq("Abies magnifica magnifica") expect(Taxon.single_taxon_for_name(name)).to eq(t) end it "should not choke on parens" do t = Taxon.make!(:name => "Foo") expect { expect(Taxon.single_taxon_for_name("(Foo")).to eq(t) }.not_to raise_error end it "should find a valid name, not invalid synonyms within the same parent" do name = "Foo bar" parent = Taxon.make!(rank: Taxon::GENUS) valid = Taxon.make!(name: name, parent: parent, rank: Taxon::SPECIES) invalid = Taxon.make!(parent: parent, rank: Taxon::SPECIES) invalid.taxon_names.create(:name => name, :is_valid => false, :lexicon => TaxonName::SCIENTIFIC_NAMES) expect(Taxon.single_taxon_for_name(name)).to eq(valid) end it "should find a single valid name among invalid synonyms" do valid = Taxon.make!(parent: Taxon.make!(rank: Taxon::GENUS), rank: Taxon::SPECIES) invalid = Taxon.make!(parent: Taxon.make!(rank: Taxon::GENUS), rank: Taxon::SPECIES) tn = TaxonName.create!(taxon: invalid, name: valid.name, is_valid: false, lexicon: TaxonName::SCIENTIFIC_NAMES) all_names = [valid.taxon_names.map(&:name), invalid.reload.taxon_names.map(&:name)].flatten.uniq expect( all_names.size ).to eq 2 expect( tn.is_valid? ).to eq false expect(Taxon.single_taxon_for_name(valid.name)).to eq(valid) end it "should not choose one active taxon among several active synonyms" do parent = Taxon.make!( rank: "genus" ) valid1 = Taxon.make!( :species, parent: parent ) valid2 = Taxon.make!( :species, parent: parent ) [valid1, valid2].each do |t| TaxonName.make!( taxon: t, name: "Black Oystercatcher", lexicon: TaxonName::ENGLISH ) end expect( Taxon.single_taxon_for_name( "Black Oystercatcher" ) ).to be_nil end end describe Taxon, "threatened?" do elastic_models( Observation, Taxon ) it "should work for a place" it "should work for lat/lon" do p = make_place_with_geom cs = ConservationStatus.make!(:place => p) expect(p.contains_lat_lng?(p.latitude, p.longitude)).to be true t = cs.taxon expect(t.threatened?(:latitude => p.latitude, :longitude => p.longitude)).to be true end end describe Taxon, "geoprivacy" do elastic_models( Observation, Taxon ) it "should choose the maximum privacy relevant to the location" do t = Taxon.make!(:rank => Taxon::SPECIES) p = make_place_with_geom cs_place = ConservationStatus.make!(:taxon => t, :place => p, :geoprivacy => Observation::PRIVATE) cs_global = ConservationStatus.make!(:taxon => t) expect( t.geoprivacy(latitude: p.latitude, longitude: p.longitude) ).to eq Observation::PRIVATE end it "should be open if conservation statuses exist but all are open" do t = Taxon.make!(rank: Taxon::SPECIES) p = make_place_with_geom cs_place = ConservationStatus.make!(taxon: t, place: p, geoprivacy: Observation::OPEN) cs_global = ConservationStatus.make!(taxon: t, geoprivacy: Observation::OPEN) expect( t.geoprivacy(latitude: p.latitude, longitude: p.longitude) ).to eq Observation::OPEN end end describe Taxon, "max_geoprivacy" do let(:t1) { Taxon.make!(rank: Taxon::SPECIES) } let(:t2) { Taxon.make!(rank: Taxon::SPECIES) } let(:taxon_ids) { [t1.id, t2.id] } elastic_models( Observation, Identification ) it "should be private if one of the taxa has a private global status" do cs_global = ConservationStatus.make!( taxon: t1, geoprivacy: Observation::PRIVATE ) expect( Taxon.max_geoprivacy( taxon_ids ) ).to eq Observation::PRIVATE end it "should be private if one of the ancestor taxa has a private global status" do parent = Taxon.make!( rank: Taxon::GENUS ) cs_global = ConservationStatus.make!( taxon: parent, geoprivacy: Observation::PRIVATE ) without_delay do t1.update_attributes( parent: parent ) end expect( t1.ancestor_ids ).to include parent.id expect( Taxon.max_geoprivacy( taxon_ids ) ).to eq Observation::PRIVATE end it "should be nil if none of the taxa have global status" do expect( Taxon.max_geoprivacy( taxon_ids ) ).to be_nil end end describe Taxon, "to_styled_s" do it "should return normal names untouched" do expect(Taxon.new(:name => "Tom", :rank => nil).to_styled_s).to eq "Tom" end it "should italicize genera and below" do expect(Taxon.new(:name => "Tom", :rank => "genus").to_styled_s).to eq "Genus <i>Tom</i>" expect(Taxon.new(:name => "Tom", :rank => "species").to_styled_s).to eq "<i>Tom</i>" expect(Taxon.new(:name => "Tom", :rank => "infraspecies").to_styled_s).to eq "<i>Tom</i>" end it "should add ranks to genera and above" do expect(Taxon.new(:name => "Tom", :rank => "genus").to_styled_s).to eq "Genus <i>Tom</i>" expect(Taxon.new(:name => "Tom", :rank => "family").to_styled_s).to eq "Family Tom" expect(Taxon.new(:name => "Tom", :rank => "kingdom").to_styled_s).to eq "Kingdom Tom" end it "should add common name when available" do taxon = Taxon.new(:name => "Tom", :rank => "genus") common_name = TaxonName.make!(:name => "Common", :taxon => taxon, :lexicon => TaxonName::LEXICONS[:ENGLISH]) taxon.reload expect(taxon.to_styled_s).to eq "Common (Genus <i>Tom</i>)" end end describe Taxon, "leading_name" do it "returns the scientific name if that's all there is" do expect(Taxon.make!(name: "Tom").leading_name).to eq "Tom" end it "returns the common name when available" do taxon = Taxon.make!(name: "Tom") TaxonName.make!(name: "Common", taxon: taxon, lexicon: TaxonName::LEXICONS[:ENGLISH]) expect(taxon.leading_name).to eq "Common" end end describe Taxon, "editable_by?" do let(:admin) { make_admin } let(:curator) { make_curator } it "should be editable by admins if class" do expect( Taxon.make!( rank: Taxon::CLASS ) ).to be_editable_by( admin ) end it "should be editable by curators if below order" do taxon = Taxon.make!( rank: Taxon::FAMILY ) expect( taxon ).to be_editable_by( curator ) end it "should not be editable by curators if order or above" do expect( Taxon.make!( rank: Taxon::CLASS ) ).not_to be_editable_by( curator ) end describe "when taxon framework" do let(:second_curator) { make_curator } it "should be editable by taxon curators of that taxon" do family = Taxon.make!( rank: Taxon::FAMILY ) genus = Taxon.make!( rank: Taxon::GENUS, parent: family ) species = Taxon.make!( rank: Taxon::SPECIES, parent: genus ) tf = TaxonFramework.make!( taxon: family, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) tc = TaxonCurator.make!( taxon_framework: tf, user: second_curator ) expect( species ).to be_editable_by( second_curator ) end it "should be editable by other site curators" do family = Taxon.make!( rank: Taxon::FAMILY ) genus = Taxon.make!( rank: Taxon::GENUS, parent: family ) species = Taxon.make!( rank: Taxon::SPECIES, parent: genus ) tf = TaxonFramework.make!( taxon: family, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) tc = TaxonCurator.make!( taxon_framework: tf, user: second_curator ) expect( species ).to be_editable_by( curator ) end end end describe Taxon, "get_gbif_id" do it "should work" do a = Taxon.make!( name: "Chordata", rank: "phylum" ) t = Taxon.make!( name: "Pseudacris", rank: "genus", parent: a ) expect( t.get_gbif_id ).not_to be_blank expect( t.taxon_scheme_taxa ).not_to be_blank end it "should not create a TaxonSchemeTaxon for responses that don't match the taxon's name" do a = Taxon.make!( name: "Chordata", rank: "phylum" ) t = Taxon.make!( name: "Sorberacea", rank: "class", parent: a ) expect( t.get_gbif_id ).to be_blank expect( t.taxon_scheme_taxa ).to be_blank end it "should not error and return GBIF ID is there is no valid scientific name" do a = Taxon.make!( name: "Chordata", rank: "phylum" ) t = Taxon.make!( name: "Dugongidae", rank: "family", parent: a ) t.taxon_names.update_all(is_valid: false) expect { t.get_gbif_id }.not_to raise_error expect( t.get_gbif_id ).to_not be_blank expect( t.taxon_scheme_taxa ).to be_blank end end describe "rank helpers" do describe "find_species" do it "should return self of the taxon is a species" do t = Taxon.make!( rank: Taxon::SPECIES ) expect( t.species ).to eq t end it "should return the parent if the taxon is a subspecies" do species = Taxon.make!( rank: Taxon::SPECIES ) subspecies = Taxon.make!( rank: Taxon::SUBSPECIES, parent: species ) expect( subspecies.species ).to eq species end it "should return nil if the taxon is a hybrid" do hybrid = Taxon.make!( name: "Viola × palmata", rank: Taxon::HYBRID ) expect( hybrid.species ).to be_nil end end end describe "taxon" do let(:root) { Taxon.make!( rank: Taxon::FAMILY ) } let(:internode) { Taxon.make!( rank: Taxon::GENUS, parent: root ) } let!(:tip) { Taxon.make!( rank: Taxon::SPECIES, parent: internode ) } let!(:taxon_framework) { TaxonFramework.make!( taxon: root, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) } let!(:taxon_curator) { TaxonCurator.make!( taxon_framework: taxon_framework ) } it "should recognize that its covered by a taxon framework" do expect( tip.upstream_taxon_framework ).not_to be_blank end it "should recognize that its not covered by a taxon framework" do ssp = Taxon.make!( rank: Taxon::SUBSPECIES, parent: tip ) expect( ssp.upstream_taxon_framework ).to be_blank end describe "when current_user" do describe "is curator" do let(:curator) { make_curator } it "should prevent grafting to root" do t = Taxon.make( rank: Taxon::GENUS, parent: root, current_user: curator ) expect( t ).not_to be_valid end it "should allow grafting to root when inactive" do t = Taxon.make( rank: Taxon::GENUS, parent: root, current_user: curator, is_active: false ) expect( t ).to be_valid t.save t.reload t.update_attributes( rank: Taxon::SUBGENUS, current_user: curator ) expect( t ).to be_valid t.reload t.update_attributes( is_active: true, current_user: curator ) expect( t ).not_to be_valid end it "should prevent grafting to internode" do t = Taxon.make( rank: Taxon::SPECIES, parent: internode, current_user: curator ) expect( t ).not_to be_valid end it "should allow grafting to tip" do t = Taxon.make( rank: Taxon::SUBSPECIES, parent: tip, current_user: curator ) expect( t ).to be_valid end it "should prevent editing is_active on root" do root.update_attributes( is_active: false, current_user: curator ) expect( root ).not_to be_valid end it "should allow moving root" do other_root = Taxon.make!( rank: Taxon::SUPERFAMILY ) root.update_attributes( parent: other_root, current_user: curator ) expect( root ).to be_valid end it "should prevent moving internode" do expect( internode.upstream_taxon_framework ).not_to be_blank other_root = Taxon.make!( rank: Taxon::FAMILY ) expect( internode.parent ).to eq root internode.update_attributes( parent: other_root, current_user: curator ) expect( internode ).not_to be_valid expect( internode.parent ).to eq other_root end it "should prevent moving tip" do other_root = Taxon.make!( rank: Taxon::FAMILY ) tip.update_attributes( parent: other_root, current_user: curator ) expect( tip ).not_to be_valid end end describe "is taxon curator" do it "should alow grafting to root" do t = Taxon.make( rank: Taxon::GENUS, parent: root, current_user: taxon_curator.user ) expect( t ).to be_valid end it "should allow grafting to internode" do t = Taxon.make( rank: Taxon::SPECIES, parent: internode, current_user: taxon_curator.user ) expect( t ).to be_valid end it "should allow grafting to tip" do t = Taxon.make( rank: Taxon::SUBSPECIES, parent: tip, current_user: taxon_curator.user ) expect( t ).to be_valid end it "should prevent taxon_curator from grafting to node covered by a overlapping downstream taxon framework" do deeper_internode = Taxon.make!( rank: Taxon::SUBGENUS, parent: internode, current_user: taxon_curator.user ) deepertip = Taxon.make!( rank: Taxon::SPECIES, parent: deeper_internode, current_user: taxon_curator.user ) overlapping_downstream_taxon_framework = TaxonFramework.make!( taxon: internode, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) overlapping_downstream_taxon_framework_taxon_curator = TaxonCurator.make!( taxon_framework: overlapping_downstream_taxon_framework ) t = Taxon.make( rank: Taxon::SPECIES, parent: deeper_internode, current_user: taxon_curator.user ) expect( t ).not_to be_valid end it "should allow taxon_curator to grafting to node with an overlapping upstream taxon framework" do deeper_internode = Taxon.make!( rank: Taxon::SUBGENUS, parent: internode, current_user: taxon_curator.user ) deepertip = Taxon.make!( rank: Taxon::SPECIES, parent: deeper_internode, current_user: taxon_curator.user ) overlapping_downstream_taxon_framework = TaxonFramework.make!( taxon: internode, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) overlapping_downstream_taxon_framework_taxon_curator = TaxonCurator.make!( taxon_framework: overlapping_downstream_taxon_framework ) t = Taxon.make( rank: Taxon::SPECIES, parent: deeper_internode, current_user: overlapping_downstream_taxon_framework_taxon_curator.user ) expect( t ).to be_valid end it "should allow moving internode" do other_root = Taxon.make!( rank: Taxon::FAMILY ) internode.update_attributes( parent: other_root, current_user: taxon_curator.user ) expect( internode ).to be_valid end it "should allow moving tip" do other_root = Taxon.make!( rank: Taxon::FAMILY ) tip.update_attributes( parent: other_root, current_user: taxon_curator.user ) expect( tip ).to be_valid end it "should prevent taxon_curator from moving tip covered by a overlapping downstream taxon framework" do other_root = Taxon.make!( rank: Taxon::FAMILY ) deeper_internode = Taxon.make!( rank: Taxon::SUBGENUS, parent: internode, current_user: taxon_curator.user ) deepertip = Taxon.make!( rank: Taxon::SPECIES, parent: deeper_internode, current_user: taxon_curator.user ) overlapping_downstream_taxon_framework = TaxonFramework.make!( taxon: internode, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) overlapping_downstream_taxon_framework_taxon_curator = TaxonCurator.make!( taxon_framework: overlapping_downstream_taxon_framework ) deepertip.update_attributes( parent: other_root, current_user: taxon_curator.user ) expect( deepertip ).not_to be_valid end it "should allow taxon_curator to move tip with overlapping upstream taxon framework" do other_root = Taxon.make!( rank: Taxon::FAMILY ) deeper_internode = Taxon.make!( rank: Taxon::SUBGENUS, parent: internode, current_user: taxon_curator.user ) deepertip = Taxon.make!( rank: Taxon::SPECIES, parent: deeper_internode, current_user: taxon_curator.user ) overlapping_downstream_taxon_framework = TaxonFramework.make!( taxon: internode, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) overlapping_downstream_taxon_framework_taxon_curator = TaxonCurator.make!( taxon_framework: overlapping_downstream_taxon_framework ) deepertip.update_attributes( parent: other_root, current_user: overlapping_downstream_taxon_framework_taxon_curator.user ) expect( deepertip ).to be_valid end end end end describe "complete_species_count" do it "should be nil if no complete taxon framework" do t = Taxon.make! expect( t.complete_species_count ).to be_nil end it "should be set if complete taxon framework exists" do ancestor = Taxon.make!( rank: Taxon::FAMILY ) taxon_framework = TaxonFramework.make!( taxon: ancestor, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES], complete: true) taxon_curator = TaxonCurator.make!( taxon_framework: taxon_framework ) t = Taxon.make!( parent: ancestor, rank: Taxon::GENUS, current_user: taxon_curator.user ) expect( t.complete_species_count ).not_to be_nil expect( t.complete_species_count ).to eq 0 end it "should be nil if complete ancestor exists but it is complete at a higher rank" do superfamily = Taxon.make!( rank: Taxon::SUPERFAMILY ) taxon_framework = TaxonFramework.make!( taxon: superfamily, rank_level: Taxon::RANK_LEVELS[Taxon::GENUS], complete: true) taxon_curator = TaxonCurator.make!( taxon_framework: taxon_framework ) family = Taxon.make!( rank: Taxon::FAMILY, parent: superfamily, current_user: taxon_curator.user ) genus = Taxon.make!( rank: Taxon::GENUS, parent: family, current_user: taxon_curator.user ) species = Taxon.make!( rank: Taxon::SPECIES, parent: genus, current_user: taxon_curator.user ) expect( genus.complete_species_count ).to be_nil end describe "when complete taxon framework" do let(:taxon) { Taxon.make!( rank: Taxon::FAMILY ) } let(:taxon_framework) { TaxonFramework.make!( complete: true, taxon: taxon) } let(:taxon_curator) { TaxonCurator.make!( taxon_framework: taxon_framework ) } it "should count species" do species = Taxon.make!( rank: Taxon::SPECIES, parent: taxon, current_user: taxon_curator.user ) expect( taxon.complete_species_count ).to eq 1 end it "should not count genera" do genus = Taxon.make!( rank: Taxon::GENUS, parent: taxon, current_user: taxon_curator.user ) expect( taxon.complete_species_count ).to eq 0 end it "should not count hybrids" do hybrid = Taxon.make!( rank: Taxon::HYBRID, parent: taxon, current_user: taxon_curator.user ) expect( taxon.complete_species_count ).to eq 0 end it "should not count extinct species" do extinct_species = Taxon.make!( rank: Taxon::SPECIES, parent: taxon, current_user: taxon_curator.user ) ConservationStatus.make!( taxon: extinct_species, iucn: Taxon::IUCN_EXTINCT, status: "extinct" ) extinct_species.reload expect( extinct_species.conservation_statuses.first.iucn ).to eq Taxon::IUCN_EXTINCT expect( extinct_species.conservation_statuses.first.place ).to be_blank expect( taxon.complete_species_count ).to eq 0 end it "should count species with place-specific non-extinct conservation statuses" do cs_species = Taxon.make!( rank: Taxon::SPECIES, parent: taxon, current_user: taxon_curator.user ) ConservationStatus.make!( taxon: cs_species, iucn: Taxon::IUCN_VULNERABLE, status: "VU" ) cs_species.reload expect( cs_species.conservation_statuses.first.iucn ).to eq Taxon::IUCN_VULNERABLE expect( cs_species.conservation_statuses.first.place ).to be_blank expect( taxon.complete_species_count ).to eq 1 end it "should not count inactive taxa" do species = Taxon.make!( rank: Taxon::SPECIES, parent: taxon, is_active: false, current_user: taxon_curator.user ) expect( taxon.complete_species_count ).to eq 0 end end end describe "current_synonymous_taxa" do let(:curator) { make_curator } it "should be the outputs of a split if the split's input was swapped" do swap = make_taxon_swap( committer: curator ) swap.commit Delayed::Worker.new.work_off split = make_taxon_split( input_taxon: swap.output_taxon, committer: curator ) split.commit Delayed::Worker.new.work_off expect( swap.input_taxon.current_synonymous_taxa.map(&:id).sort ).to eq split.output_taxa.map(&:id).sort end it "should follow splits past subsequent changes" do split1 = make_taxon_split( committer: curator ) split1.commit Delayed::Worker.new.work_off swap = make_taxon_swap( committer: curator, input_taxon: split1.output_taxa[0] ) swap.commit Delayed::Worker.new.work_off split2 = make_taxon_split( committer: curator, input_taxon: split1.output_taxa[1] ) split2.commit Delayed::Worker.new.work_off split3 = make_taxon_split( committer: curator, input_taxon: split2.output_taxa[0] ) split3.commit Delayed::Worker.new.work_off expect( split1.input_taxon.current_synonymous_taxa.map(&:id).sort ).to eq [ swap.output_taxon.id, split2.output_taxa[1].id, split3.output_taxa.map(&:id) ].flatten.sort end end describe "current_synonymous_taxon" do let(:curator) { make_curator } it "should be the output of a first-order swap" do swap = make_taxon_swap( committer: curator ) swap.commit expect( swap.input_taxon.current_synonymous_taxon ).to eq swap.output_taxon end it "should be the output of a second-order swap" do swap1 = make_taxon_swap( committer: curator ) swap1.commit swap2 = make_taxon_swap( input_taxon: swap1.output_taxon, committer: curator ) swap2.commit expect( swap1.input_taxon.current_synonymous_taxon ).to eq swap2.output_taxon end it "should not get stuck in a 1-hop loop" do swap1 = make_taxon_swap( committer: curator ) swap1.commit swap2 = make_taxon_swap( input_taxon: swap1.output_taxon, output_taxon: swap1.input_taxon, committer: curator ) swap2.commit expect( swap1.input_taxon.current_synonymous_taxon ).to be_nil expect( swap1.output_taxon.current_synonymous_taxon ).to eq swap1.input_taxon end it "should not get stuck in a 2-hop loop" do swap1 = make_taxon_swap( committer: curator ) swap1.commit swap2 = make_taxon_swap( input_taxon: swap1.output_taxon, committer: curator ) swap2.commit swap3 = make_taxon_swap( input_taxon: swap2.output_taxon, output_taxon: swap1.input_taxon, committer: curator ) swap3.commit expect( swap1.input_taxon.current_synonymous_taxon ).to be_nil expect( swap1.output_taxon.current_synonymous_taxon ).to eq swap1.input_taxon end it "should not get stuck in a loop if the taxon has been the input in multiple splits due to reversion" do split1 = make_taxon_split( committer: curator ) split1.commit split2 = make_taxon_split( committer: curator, input_taxon: split1.input_taxon ) split2.commit split1.output_taxa.each do |output_taxon| expect( split1.input_taxon.current_synonymous_taxa ).not_to include output_taxon end split2.output_taxa.each do |output_taxon| expect( split2.input_taxon.current_synonymous_taxa ).to include output_taxon end expect( split1.input_taxon.current_synonymous_taxon ).to be_blank end it "should not get stuck in a no-hop loop" do swap1 = make_taxon_swap( committer: curator ) swap1.commit # creating a case that shouldnt be possible with current code # but is possible with older data created before curent validations swap2 = make_taxon_swap( input_taxon: swap1.output_taxon, output_taxon: swap1.output_taxon, committer: curator, validate: false ) swap2.commit swap1.input_taxon.update_attributes(is_active: false) swap1.output_taxon.update_attributes(is_active: false) expect( swap1.input_taxon.current_synonymous_taxon ).to be_nil expect( swap1.output_taxon.current_synonymous_taxon ).to be_nil end it "should be blank if swapped and then split" do swap = make_taxon_swap( committer: curator ) swap.commit split = make_taxon_split( committer: curator, input_taxon: swap.output_taxon ) split.commit expect( swap.input_taxon.current_synonymous_taxon ).to be_blank end end describe Taxon, "set_photo_from_observations" do elastic_models( Observation, Taxon ) it "does not throw an error if observation photo positions are nil" do t = Taxon.make!( rank: "species" ) o = make_research_grade_observation( taxon: t ) ObservationPhoto.make!( observation: o, position: 0, photo: Photo.make!( user: o.user ) ) ObservationPhoto.make!( observation: o, position: nil, photo: Photo.make!( user: o.user ) ) expect{ t.set_photo_from_observations }.to_not raise_error end end describe "taxon_framework_relationship" do describe "when taxon has a taxon framework relationship" do it "should update taxon framework relationship relationship when taxon name changes" do genus = Taxon.make!( name: "Taricha", rank: Taxon::GENUS ) species = Taxon.make!( name: "Taricha torosa", rank: Taxon::SPECIES, parent: genus ) tf = TaxonFramework.make!( taxon: genus ) tfr = TaxonFrameworkRelationship.make! species.save species.update_attributes( taxon_framework_relationship_id: tfr.id ) species.reload et = ExternalTaxon.new( name: species.name, rank: "species", parent_name: species.parent.name, parent_rank: species.parent.rank, taxon_framework_relationship_id: tfr.id ) et.save tfr.reload expect(tfr.relationship).to eq "match" species.update_attributes( name: "Taricha granulosa" ) tfr.reload expect( tfr.relationship ).to eq "one_to_one" end end end Add complex taxon name validation spec require File.dirname(__FILE__) + '/../spec_helper.rb' describe Taxon do elastic_models( Observation, Taxon ) before(:all) do load_test_taxa @taxon = @Calypte_anna end it "should have a working #grafted method" do expect(@taxon).to respond_to(:grafted?) expect(@taxon.grafted?).to be(true) ungrafted = Taxon.create( :name => 'Pseudacris crucifer', # Spring Peeper :rank => 'species' ) expect(ungrafted.grafted?).to be(false) expect(@Animalia.grafted?).to be(true) end it "species_or_lower? should be false for Animalia" do expect(@Animalia.species_or_lower?).to be(false) end it "species_or_lower? should be true for Pseudacris regilla" do expect(@Pseudacris_regilla.species_or_lower?).to be(true) end it "has rank levels for stateofmatter and root" do expect( Taxon::STATEOFMATTER_LEVEL ).to eq 100 expect( Taxon::ROOT_LEVEL ).to eq 100 expect( Taxon::ROOT_LEVEL ).to eq Taxon::STATEOFMATTER_LEVEL end end describe Taxon, "creation" do elastic_models( Observation, Taxon ) it "should set an iconic taxon if this taxon was grafted" do load_test_taxa taxon = Taxon.make!( name: "Pseudacris imaginarius", rank: Taxon::SPECIES ) taxon.parent = @Pseudacris taxon.save! expect( taxon ).to be_grafted taxon.reload expect( taxon.iconic_taxon ).to eq @Amphibia end it "should create a taxon name with the same name after save" do t = Taxon.make! expect( t.taxon_names ).not_to be_empty expect( t.taxon_names.map(&:name) ).to include( t.name ) end it "should create a taxon name with the same name after save even if invalid on source_identifier" do source_identifier = "foo" source = Source.make! existing = TaxonName.make!(:source => source, :source_identifier => source_identifier) t = Taxon.make!(:source => source, :source_identifier => source_identifier) expect(t.taxon_names.map(&:name)).to include(t.name) end it "should capitalize its name" do taxon = Taxon.new(:name => 'balderdash', :rank => 'genus') taxon.save expect(taxon.name).to eq 'Balderdash' end it "should capitalize genushybrids with leading x correclty" do taxon = Taxon.make!( name: "× chitalpa", rank: Taxon::GENUSHYBRID ) expect( taxon.name ).to eq "× Chitalpa" taxon = Taxon.make!( name: "× Chitalpa", rank: Taxon::GENUSHYBRID ) expect( taxon.name ).to eq "× Chitalpa" end it "should capitalize Foo x Bar style genushybrids correctly" do taxon = Taxon.make!( name: "foo × bar", rank: Taxon::GENUSHYBRID ) expect( taxon.name ).to eq "Foo × Bar" taxon = Taxon.make!( name: "Foo × Bar", rank: Taxon::GENUSHYBRID ) expect( taxon.name ).to eq "Foo × Bar" end it "should capitalize hybrid species in genushybrids correctly" do taxon = Taxon.make!( name: "Foo bar × Baz roq", rank: Taxon::HYBRID ) expect( taxon.name ).to eq "Foo bar × Baz roq" end it "should not fail on poorly-formatted hybrid names" do [ "Carex × leutzii pseudofulva", "Calystegia sepium roseata × c tuguriorum" ].each do |name| taxon = Taxon.make!( name: name, rank: Taxon::HYBRID ) expect( taxon ).to be_valid end end it "should capitalize hybrid names of the form Genus species1 x species2" do taxon = Taxon.make!( name: "genusone speciesone × speciestwo", rank: Taxon::HYBRID ) expect( taxon.name ).to eq "Genusone speciesone × speciestwo" end it "should set the rank_level based on the rank" do t = Taxon.make! expect( t.rank_level ).to eq Taxon::RANK_LEVELS[t.rank] end it "should remove leading rank from the name" do t = Taxon.make!( name: "Gen Pseudacris" ) expect( t.name ).to eq "Pseudacris" end it "should remove internal 'var' from name" do t = Taxon.make!( name: "Quercus agrifolia var. agrifolia" ) expect( t.name ).to eq "Quercus agrifolia agrifolia" end it "should remove internal 'ssp' from name" do t = Taxon.make!( name: "Quercus agrifolia ssp. agrifolia" ) expect( t.name ).to eq "Quercus agrifolia agrifolia" end it "should remove internal 'subsp' from name" do t = Taxon.make!( name: "Quercus agrifolia subsp. agrifolia" ) expect( t.name ).to eq "Quercus agrifolia agrifolia" end it "should allow fo as a specific epithet" do name = "Mahafalytenus fo" t = Taxon.make!( name: name ) expect( t.name ).to eq name end it "should create TaxonAncestors" do parent = Taxon.make!( rank: Taxon::GENUS ) t = Taxon.make!( rank: Taxon::SPECIES, parent: parent ) t.reload expect( t.taxon_ancestors ).not_to be_blank end it "should strip trailing space" do expect( Taxon.make!( name: "Trailing space " ).name ).to eq "Trailing space" end it "should strip leading space" do expect( Taxon.make!( name: " Leading space" ).name ).to eq "Leading space" end it "should prevent creating a taxon with a rank coarser than the parent" do parent = Taxon.make!( rank: Taxon::GENUS ) taxon = Taxon.new(name: 'balderdash', rank: Taxon::FAMILY, parent: parent ) taxon.save taxon.valid? expect(taxon.errors).not_to be_blank end it "should prevent creating an active taxon with an inactive parent" do parent = Taxon.make!( rank: Taxon::GENUS, is_active: false ) taxon = Taxon.new(name: 'balderdash', rank: Taxon::SPECIES, parent: parent ) taxon.save expect(taxon.errors).not_to be_blank end it "should allow creating an active taxon with an inactive parent if output of draft taxon change" do input_taxon = Taxon.make!( rank: Taxon::GENUS, is_active: true ) output_taxon = Taxon.make!( rank: Taxon::GENUS, is_active: false ) swap = TaxonSwap.make swap.add_input_taxon(input_taxon) swap.add_output_taxon(output_taxon) swap.save! taxon = Taxon.new(name: 'balderdash', rank: Taxon::SPECIES, parent: output_taxon ) taxon.save taxon.valid? expect(taxon.errors).to be_blank end it "should prevent grafting an active taxon to an inactive parent" do parent = Taxon.make!( rank: Taxon::GENUS, is_active: false ) taxon = Taxon.make!(name: 'balderdash', rank: Taxon::SPECIES) expect(taxon.parent_id).not_to be(parent.id) taxon.parent = parent taxon.save taxon.reload expect(taxon.parent_id).not_to be(parent.id) end it "should allow grafting an active taxon to an inactive parent if output of draft taxon change" do input_taxon = Taxon.make!( rank: Taxon::GENUS, is_active: true ) output_taxon = Taxon.make!( rank: Taxon::GENUS, is_active: false ) swap = TaxonSwap.make swap.add_input_taxon(input_taxon) swap.add_output_taxon(output_taxon) swap.save! taxon = Taxon.make!(name: 'balderdash', rank: Taxon::SPECIES) expect(taxon.parent_id).not_to be(output_taxon.id) taxon.parent = output_taxon taxon.save taxon.reload expect(taxon.parent_id).to be(output_taxon.id) end context "for complex" do before(:all) { load_test_taxa } let(:child_species) { Taxon.make! name: "Lontra canadensis", rank: "species", parent: @Hylidae } let(:child_complex) { Taxon.make! name: "Lontra canadensis", rank: "complex", parent: @Hylidae } let(:child_complex2) { Taxon.make! name: "Lontra canadensis", rank: "complex", parent: @Hylidae } it "should validate unique name scoped to ancestry and rank" do child_complex expect{ child_complex2 }.to raise_exception ActiveRecord::RecordInvalid, "Validation failed: Name already used as a child complex of this taxon's parent" end it "should allow sibling if no other complex shares name" do child_species expect{ child_complex }.not_to raise_exception end end end describe Taxon, "updating" do elastic_models( Observation, Taxon ) it "should update the ancestry col of all associated listed_taxa" it "should not destroy photos that have observations" do t = Taxon.make! o = Observation.make! p = Photo.make! t.photos << p make_observation_photo( observation: o, photo: p ) t.photos = [Photo.make!] o.reload expect(o.photos).not_to be_blank end it "should strip trailing space" do t = Taxon.make!( name: "No trailing space" ) t.update_attributes( name: "Trailing space " ) expect( t.name ).to eq "Trailing space" end it "should strip leading space" do t = Taxon.make!( name: "No leading space" ) t.update_attributes( name: " Leading space" ) expect( t.name ).to eq "Leading space" end it "should prevent updating a taxon rank to be coarser than the parent" do parent = Taxon.make!( rank: Taxon::GENUS ) taxon = Taxon.new(name: 'balderdash', rank: Taxon::SPECIES, parent: parent ) taxon.save taxon.valid? expect(taxon.errors).to be_blank taxon.update_attributes( rank: Taxon::FAMILY ) expect(taxon.errors).not_to be_blank end it "should prevent updating a taxon rank to be same rank as child" do parent = Taxon.make!( rank: Taxon::GENUS ) taxon = Taxon.new(name: 'balderdash', rank: Taxon::SPECIES, parent: parent ) taxon.save taxon.valid? expect(taxon.errors).to be_blank parent.update_attributes( rank: Taxon::SPECIES ) expect(parent.errors).not_to be_blank end it "should prevent updating a taxon to be inactive if it has active children" do taxon = Taxon.make!(name: 'balderdash', rank: Taxon::GENUS ) child = Taxon.make!(name: 'balderdash foo', rank: Taxon::SPECIES, parent: taxon ) taxon.valid? expect(taxon.errors).to be_blank taxon.update_attributes( is_active: false ) expect(taxon.errors).not_to be_blank end it "should allow updating a taxon to be inactive if it has active children but move children is checked" do taxon = Taxon.make!(name: 'balderdash', rank: Taxon::GENUS ) child = Taxon.make!(name: 'balderdash foo', rank: Taxon::SPECIES, parent: taxon ) taxon.valid? expect(taxon.errors).to be_blank taxon.update_attributes( is_active: false, skip_only_inactive_children_if_inactive: true ) expect(taxon.errors).to be_blank end it "should prevent updating a taxon to be active if it has an inactive parent" do parent = Taxon.make!(name: 'balderdash', rank: Taxon::GENUS, is_active: false ) taxon = Taxon.make!(name: 'balderdash foo', rank: Taxon::SPECIES, parent: parent, is_active: false ) taxon.valid? expect(taxon.errors).to be_blank taxon.update_attributes( is_active: true ) expect(taxon.errors).not_to be_blank end it "should allow updating a taxon to be active if it has an inactive parent if output of draft taxon change" do input_taxon = Taxon.make!( rank: Taxon::GENUS, is_active: true ) output_taxon = Taxon.make!(name: 'balderdash', rank: Taxon::GENUS, is_active: false ) swap = TaxonSwap.make swap.add_input_taxon(input_taxon) swap.add_output_taxon(output_taxon) swap.save! taxon = Taxon.make!(name: 'balderdash foo', rank: Taxon::SPECIES, parent: output_taxon, is_active: false ) taxon.valid? expect(taxon.errors).to be_blank taxon.update_attributes( is_active: true ) expect(taxon.errors).to be_blank end describe "auto_description" do it "should remove the wikipedia_summary when it changes to false" do t = Taxon.make!( wikipedia_summary: "foo" ) expect( t.wikipedia_summary ).not_to be_blank t.update_attributes( auto_description: false ) t.reload expect( t.wikipedia_summary ).to be_blank end end it "should assign the updater if explicitly assigned" do creator = make_curator updater = make_curator t = Taxon.make!( creator: creator, updater: creator, rank: Taxon::FAMILY ) expect( t.updater ).to eq creator t.reload t.update_attributes( rank: Taxon::GENUS, updater: updater ) t.reload expect( t.updater ).to eq updater end it "should nilify the updater if not explicitly assigned" do creator = make_curator updater = make_curator t = Taxon.make!( creator: creator, updater: creator, rank: Taxon::FAMILY ) expect( t.updater ).to eq creator t = Taxon.find_by_id( t.id ) t.update_attributes( rank: Taxon::GENUS ) t.reload expect( t.updater ).to be_blank end describe "reindexing identifications" do elastic_models( Identification ) it "should happen when the rank_level changes" do t = Taxon.make!( rank: Taxon::SUBCLASS ) i = Identification.make!( taxon: t ) Delayed::Worker.new.work_off t.reload expect( t.rank_level ).to eq Taxon::SUBCLASS_LEVEL i_es = Identification.elastic_search( where: { id: i.id } ).results.results.first expect( i_es.taxon.rank_level ).to eq t.rank_level t.update_attributes( rank: Taxon::CLASS ) Delayed::Worker.new.work_off t.reload expect( t.rank_level ).to eq Taxon::CLASS_LEVEL i_es = Identification.elastic_search( where: { id: i.id } ).results.results.first expect( i_es.taxon.rank_level ).to eq t.rank_level end end end describe Taxon, "destruction" do elastic_models( Observation, Taxon ) it "should work" do Taxon.make!.destroy end it "should queue a job to destroy descendants if orphaned" do load_test_taxa Delayed::Job.delete_all stamp = Time.now @Apodiformes.destroy jobs = Delayed::Job.where("created_at >= ?", stamp) expect(jobs.select{|j| j.handler =~ /apply_orphan_strategy/m}).not_to be_blank end end describe Taxon, "orphan descendant destruction" do elastic_models( Observation, Taxon ) before(:each) do load_test_taxa end it "should work" do child_ancestry_was = @Apodiformes.child_ancestry @Apodiformes.update_attributes(:parent => nil) Taxon.update_descendants_with_new_ancestry(@Apodiformes.id, child_ancestry_was) expect(@Apodiformes.descendants).to include(@Calypte_anna) child_ancestry_was = @Apodiformes.child_ancestry @Apodiformes.destroy Taxon.apply_orphan_strategy(child_ancestry_was) expect(Taxon.find_by_id(@Calypte_anna.id)).to be_blank end end describe Taxon, "making iconic" do before(:each) do load_test_taxa end it "should set the iconic taxa of descendant taxa to this taxon" do expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) @Apodiformes.update_attributes(:is_iconic => true) @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon_id).to be(@Apodiformes.id) end it "should queue a job to change the iconic taxon of descendent observations" do expect { @Apodiformes.update_attributes(:is_iconic => true) }.to change(Delayed::Job, :count).by_at_least(1) end it "should NOT set the iconic taxa of descendant taxa if they descend from a lower iconic taxon" do expect(@Aves).to be_is_iconic expect(@Chordata).not_to be_is_iconic expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) @Chordata.update_attributes(:is_iconic => true) @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) end end describe "Updating iconic taxon" do before(:each) do load_test_taxa end it "should set the iconic taxa of descendant taxa" do expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) @Calypte.update_attributes(:iconic_taxon => @Apodiformes) @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon_id).to be(@Apodiformes.id) end it "should queue a job to change the iconic taxon of descendent observations" do expect { @Calypte.update_attributes(:iconic_taxon => @Apodiformes) }.to change(Delayed::Job, :count).by_at_least(1) end it "should NOT set the iconic taxa of descendant taxa if they descend from a lower iconic taxon" do expect(@Aves).to be_is_iconic expect(@Chordata).not_to be_is_iconic expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) @Chordata.update_attributes(:iconic_taxon => @Plantae) @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) end end describe Taxon, "set_iconic_taxon_for_observations_of" do elastic_models( Observation, Taxon ) before(:each) do load_test_taxa end it "should set the iconic taxon for observations of descendant taxa" do obs = without_delay { Observation.make!(:taxon => @Calypte_anna) } expect(@Calypte_anna.iconic_taxon.name).to eq @Aves.name expect(obs.iconic_taxon.name).to eq @Calypte_anna.iconic_taxon.name @Calypte.update_attributes(:iconic_taxon => @Amphibia) expect(@Calypte.iconic_taxon.name).to eq @Amphibia.name @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon.name).to eq @Amphibia.name Taxon.set_iconic_taxon_for_observations_of(@Calypte) obs.reload expect(obs.iconic_taxon.name).to eq @Amphibia.name end it "should not change the iconc taxon for observations of other taxa" do bird_obs = Observation.make!(:taxon => @Calypte_anna) frog_obs = Observation.make!(:taxon => @Pseudacris_regilla) expect(bird_obs.iconic_taxon).to eq @Aves expect(frog_obs.iconic_taxon).to eq @Amphibia @Pseudacris.update_attributes(:iconic_taxon => @Plantae) Taxon.set_iconic_taxon_for_observations_of(@Pseudacris) frog_obs.reload expect(frog_obs.iconic_taxon).to eq @Plantae bird_obs.reload expect(bird_obs.iconic_taxon).to eq @Aves end it "should NOT set the iconic taxa of observations of descendant taxa if they descend from a lower iconic taxon" do expect(@Aves).to be_is_iconic expect(@Chordata).not_to be_is_iconic expect(@Calypte_anna.iconic_taxon_id).to be(@Aves.id) expect(@Calypte_anna.ancestor_ids).to include(@Aves.id) expect(@Calypte_anna.ancestor_ids).to include(@Chordata.id) obs = Observation.make!(:taxon => @Calypte_anna) expect(obs.iconic_taxon).to eq @Aves @Chordata.update_attributes(:iconic_taxon => @Plantae) Taxon.set_iconic_taxon_for_observations_of(@Chordata) @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon).to eq @Aves obs.reload expect(obs.iconic_taxon).to eq @Aves end end describe Taxon, "normalize_rank" do it "should normalize weird ranks" do expect(Taxon.normalize_rank('sp')).to eq 'species' expect(Taxon.normalize_rank('ssp')).to eq 'subspecies' expect(Taxon.normalize_rank('Gen')).to eq 'genus' end it "should normalize ranks with punctuation" do expect(Taxon.normalize_rank('super-order')).to eq 'superorder' end end describe Taxon, "unique name" do it "should be the default_name by default" do taxon = Taxon.make!(:name => "I am galactus") expect(taxon.unique_name).to eq taxon.default_name.name.downcase end it "should be the scientific name if the common name is already another taxon's unique name" do taxon = Taxon.make! common_name = TaxonName.make!(:name => "Most Awesome Radicalbird", :taxon => taxon, :lexicon => TaxonName::LEXICONS[:ENGLISH]) taxon.save taxon.reload expect(taxon.unique_name).to eq taxon.common_name.name.downcase new_taxon = Taxon.make!(:name => "Ballywickia purhiensis", :rank => 'species') new_taxon.taxon_names << TaxonName.make!( :name => taxon.common_name.name, :lexicon => TaxonName::LEXICONS[:ENGLISH] ) new_taxon.reload expect(new_taxon.unique_name).to eq new_taxon.name.downcase end it "should be nil if all else fails" do taxon = Taxon.make! # unique name should be the common name common_name = TaxonName.make!( :taxon => taxon, :lexicon => TaxonName::LEXICONS[:ENGLISH]) other_taxon = new_taxon = Taxon.make!(:name => taxon.name) # unique name should be the sciname new_taxon = Taxon.make!(:name => taxon.name) new_common_name = TaxonName.make!(:name => common_name.name, :taxon => new_taxon, :lexicon => TaxonName::LEXICONS[:ENGLISH]) new_taxon.reload new_taxon.taxon_names.each do |tn| puts "#{tn} was invalid: " + tn.errors.full_messages.join(', ') unless tn.valid? end puts "new_taxon was invalid: " + new_taxon.errors.full_messages.join(', ') unless new_taxon.valid? expect(new_taxon.unique_name).to be_nil end it "should work if there are synonyms in different lexicons" do taxon = Taxon.make! name1 = TaxonName.make!(:taxon => taxon, :name => "foo", :lexicon => TaxonName::LEXICONS[:ENGLISH]) name2 = TaxonName.make!(:taxon => taxon, :name => "Foo", :lexicon => TaxonName::LEXICONS[:SPANISH]) taxon.reload expect(taxon.unique_name).not_to be_blank expect(taxon.unique_name).to eq "foo" end it "should not contain punctuation" do taxon = Taxon.make! TaxonName.make!(:taxon => taxon, :name => "St. Gerome's Radical Snake", :lexicon => TaxonName::LEXICONS[:ENGLISH]) taxon.reload expect(taxon.unique_name).not_to match(/[\.\'\?\!\\\/]/) end end describe Taxon, "common_name" do it "should default to English if present" do t = Taxon.make! tn_en = TaxonName.make!(:taxon => t, :name => "Red Devil", :lexicon => TaxonName::LEXICONS[:ENGLISH]) tn_es = TaxonName.make!(:taxon => t, :name => "Diablo Rojo", :lexicon => TaxonName::LEXICONS[:SPANISH]) tn_un = TaxonName.make!(:taxon => t, :name => "run away!", :lexicon => 'unspecified') expect(t.common_name).to eq(tn_en) end it "should not default to first common if no English or unknown" do t = Taxon.make! tn_es = TaxonName.make!(:taxon => t, :name => "Diablo Rojo", :lexicon => TaxonName::LEXICONS[:SPANISH]) expect(t.common_name).to be_blank end end describe Taxon, "tags_to_taxa" do it "should find Animalia and Mollusca" do animalia = Taxon.make!( rank: Taxon::PHYLUM, name: "Animalia" ) aves = Taxon.make!( rank: Taxon::CLASS, name: "Aves", parent: animalia ) taxa = Taxon.tags_to_taxa( ["Animalia", "Aves"] ) expect( taxa ).to include( animalia ) expect( taxa ).to include( aves ) end it "should work on taxonomic machine tags" do animalia = Taxon.make!( rank: Taxon::PHYLUM, name: "Animalia" ) aves = Taxon.make!( rank: Taxon::CLASS, name: "Aves", parent: animalia ) calypte_anna = Taxon.make!( rank: Taxon::SPECIES, name: "Calypte anna" ) taxa = Taxon.tags_to_taxa( [ "taxonomy:kingdom=Animalia", "taxonomy:class=Aves", "taxonomy:binomial=Calypte anna" ] ) expect( taxa ).to include( animalia ) expect( taxa ).to include( aves ) expect( taxa ).to include( calypte_anna ) end it "should not find inactive taxa" do active_taxon = Taxon.make! inactive_taxon = Taxon.make!(:name => active_taxon.name, :is_active => false) taxa = Taxon.tags_to_taxa([active_taxon.name]) expect(taxa).to include(active_taxon) expect(taxa).not_to include(inactive_taxon) end it "should work for sp" do taxon = Taxon.make!( rank: Taxon::GENUS, name: "Mycena" ) taxa = Taxon.tags_to_taxa( ["#{taxon.name} sp"] ) expect( taxa ).to include( taxon ) end it "should work for sp." do taxon = Taxon.make!( rank: Taxon::GENUS, name: "Mycena" ) taxa = Taxon.tags_to_taxa( ["#{taxon.name} sp."] ) expect( taxa ).to include( taxon ) end it "should not strip out sp from Spizella" do t = Taxon.make!(:name => 'Spizella') taxa = Taxon.tags_to_taxa(['Spizella']) expect(taxa).to include(t) end it "should choose names before codes" do code_name = TaxonName.make!(:name => "HOME", :lexicon => "AOU Codes") name_name = TaxonName.make!(:name => "Golden-crowned Sparrow", :lexicon => "AOU Codes") taxa = Taxon.tags_to_taxa([code_name.name, name_name.name]) expect(taxa.first).to eq name_name.taxon end it "should not match a code if it's not an exact match" do code_name = TaxonName.make!(:name => "HOME", :lexicon => "AOU Codes") taxa = Taxon.tags_to_taxa([code_name.name.downcase]) expect(taxa).to be_blank end it "should favor longer names" do short_name = TaxonName.make!(:name => "bork", :lexicon => "English") long_name = TaxonName.make!(:name => "Giant Dour-Crested Mopple Hopper", :lexicon => "English") taxa = Taxon.tags_to_taxa([short_name.name, long_name.name]) expect(taxa.first).to eq long_name.taxon end it "should work there are inexact matches" do t = Taxon.make! TaxonName.make!(:name => "Nutria", :taxon => t, :lexicon => "English") TaxonName.make!(:name => "nutria", :taxon => t, :lexicon => "French") expect(Taxon.tags_to_taxa(%w(Nutria))).to include t end it "should not match problematic names" do Taxon::PROBLEM_NAMES.each do |name| t = Taxon.make(:name => name.capitalize) if t.valid? expect( Taxon.tags_to_taxa( [name, name.capitalize] ) ).to be_blank end end end it "should not match scientifc names that are 2 letters or less" do %w(Aa Io La).each do |name| t = Taxon.make!( name: name, rank: Taxon::GENUS ) expect( Taxon.tags_to_taxa( [name, name.downcase ] ) ).to be_blank end end it "should not match abbreviated month names" do %w(Mar May Jun Nov).each do |name| t = Taxon.make!( name: name, rank: Taxon::GENUS ) expect( Taxon.tags_to_taxa( [name, name.downcase ] ) ).to be_blank end end end describe Taxon, "merging" do elastic_models( Observation, Taxon ) before(:all) { load_test_taxa } before(:each) do # load_test_taxa @keeper = Taxon.make!( name: "Calypte keeper", rank: Taxon::SPECIES, parent: @Calypte ) @reject = Taxon.make!( :name => "Calypte reject", rank: Taxon::SPECIES, parent: @Calypte ) @has_many_assocs = Taxon.reflections.select{|k,v| v.macro == :has_many}.map{|k,v| k} @has_many_assocs.each {|assoc| @reject.send(assoc, :force_reload => true)} end it "should move the reject's children to the keeper" do child = Taxon.make!( name: "Calypte reject rejectus", parent: @reject, rank: Taxon::SUBSPECIES ) rejected_children = @reject.children expect(rejected_children).not_to be_empty @keeper.merge( @reject ) rejected_children.each do |c| c.reload expect( c.parent_id ).to eq @keeper.parent_id end end it "should move the reject's taxon_names to the keeper" do rejected_taxon_names = @reject.taxon_names expect(rejected_taxon_names).not_to be_empty @keeper.merge(@reject) rejected_taxon_names.each do |taxon_name| taxon_name.reload expect(taxon_name.taxon_id).to be(@keeper.id) end end it "should move the reject's taxon_names to the keeper even if they don't have a lexicon" do @reject.taxon_names << TaxonName.new(:name => "something") rejected_taxon_names = @reject.taxon_names expect(rejected_taxon_names).not_to be_empty @keeper.merge(@reject) rejected_taxon_names.each do |taxon_name| taxon_name.reload expect(taxon_name.taxon_id).to be(@keeper.id) end end it "should move the reject's observations to the keeper" do 2.times do Observation.make!(:taxon => @reject) end rejected_observations = @reject.observations.all expect(rejected_observations).not_to be_empty @keeper.merge(@reject) rejected_observations.each do |observation| observation.reload expect(observation.taxon_id).to be(@keeper.id) end end it "should move the reject's listed_taxa to the keeper" do 3.times do ListedTaxon.make!(:taxon => @reject) end rejected_listed_taxa = @reject.listed_taxa.all expect(rejected_listed_taxa).not_to be_empty @keeper.merge(@reject) rejected_listed_taxa.each do |listed_taxon| listed_taxon.reload expect(listed_taxon.taxon_id).to be(@keeper.id) end end it "should move the reject's list_rules to the keeper" do rule = ListRule.make!(:operand => @Amphibia, :operator => "in_taxon?") reject = rule.operand keeper = Taxon.make keeper.name = "Amphibiatwo" keeper.unique_name = "Amphibiatwo" keeper.save keeper.update_attributes(:parent => reject.parent) keeper.merge(reject) rule.reload expect(rule.operand_id).to be(keeper.id) end it "should move the reject's identifications to the keeper" do 3.times do Identification.make!(:taxon => @reject) end rejected_identifications = @reject.identifications.all expect(rejected_identifications).not_to be_empty @keeper.merge(@reject) rejected_identifications.each do |identification| identification.reload expect(identification.taxon_id).to be(@keeper.id) end end it "should move the reject's taxon_links to the keeper" do 3.times do TaxonLink.make!(:taxon => @reject) end rejected_taxon_links = @reject.taxon_links.all expect(rejected_taxon_links).not_to be_empty @keeper.merge(@reject) rejected_taxon_links.each do |taxon_link| taxon_link.reload expect(taxon_link.taxon_id).to be(@keeper.id) end end it "should move the reject's taxon_photos to the keeper" do 3.times do TaxonPhoto.make!(:taxon => @reject) end rejected_taxon_photos = @reject.taxon_photos.all expect(rejected_taxon_photos).not_to be_empty @keeper.merge(@reject) rejected_taxon_photos.each do |taxon_photo| taxon_photo.reload expect(taxon_photo.taxon_id).to be(@keeper.id) end end it "should mark scinames not matching the keeper as invalid" do old_sciname = @reject.scientific_name expect(old_sciname).to be_is_valid @keeper.merge(@reject) old_sciname.reload expect(old_sciname).not_to be_is_valid end it "should delete duplicate taxon_names from the reject" do old_sciname = @reject.scientific_name synonym = old_sciname.dup synonym.is_valid = false @keeper.taxon_names << synonym @keeper.merge(@reject) expect(TaxonName.find_by_id(old_sciname.id)).to be_nil end it "should delete listed_taxa from the reject that are invalid" it "should destroy the reject" do @keeper.merge(@reject) expect(Taxon.find_by_id(@reject.id)).to be_nil end it "should not create duplicate listed taxa" do lt1 = ListedTaxon.make!(:taxon => @keeper) lt2 = ListedTaxon.make!(:taxon => @reject, :list => lt1.list) @keeper.merge(@reject) expect(lt1.list.listed_taxa.where(taxon_id: @keeper.id).count).to eq 1 end it "should set iconic taxa on children" do reject = Taxon.make!(rank: "species") child = Taxon.make!(parent: reject, rank: "subspecies") expect(child.iconic_taxon_id).not_to eq @keeper.iconic_taxon_id expect(child.iconic_taxon_id).to eq reject.iconic_taxon_id @keeper.merge(reject) child.reload expect(child.iconic_taxon_id).to eq @keeper.iconic_taxon_id end it "should set iconic taxa on descendants" do expect(@Calypte_anna.iconic_taxon_id).not_to eq @Pseudacris.iconic_taxon_id @Pseudacris.merge(@Calypte) @Calypte_anna.reload expect(@Calypte_anna.iconic_taxon_id).to eq @Pseudacris.iconic_taxon_id end it "should queue a job to set iconic taxon on observations of descendants" do Delayed::Job.delete_all stamp = Time.now @Pseudacris.merge(@Calypte) jobs = Delayed::Job.where("created_at >= ?", stamp) expect(jobs.select{|j| j.handler =~ /set_iconic_taxon_for_observations_of/m}).not_to be_blank end it "should remove duplicate schemes" do ts = TaxonScheme.make! t1 = Taxon.make! t1.taxon_schemes << ts t2 = Taxon.make! t2.taxon_schemes << ts t1.merge(t2) t1.reload expect(t1.taxon_schemes.size).to eq(1) end it "should set iconic taxon for observations of reject" do reject = Taxon.make! o = without_delay {Observation.make!(:taxon => reject)} expect(o.iconic_taxon).to be_blank without_delay {@keeper.merge(reject)} o.reload expect(o.iconic_taxon).to eq(@keeper.iconic_taxon) end it "should update subscriptions" do s = Subscription.make!(:resource => @reject) @keeper.merge(@reject) s.reload expect(s.resource).to eq @keeper end it "should not alter with subscriptions to other classess" do reject = Taxon.make!(:id => 888) keeper = Taxon.make!(:id => 999) o = Observation.make!(:id => 888) s = Subscription.make!(:resource => o) keeper.merge(reject) s.reload expect(s.resource).to eq(o) end it "should work with denormalized ancestries" do AncestryDenormalizer.truncate expect(TaxonAncestor.count).to eq 0 AncestryDenormalizer.denormalize expect { @keeper.merge(@reject) }.not_to raise_error end end describe Taxon, "moving" do elastic_models( Observation, Taxon, Identification ) before(:all) do load_test_taxa end let(:obs) do t = Taxon.make!( name: "Calypte test", rank: Taxon::SPECIES, parent: @Calypte ) obs = Observation.make!( taxon: t ) end let(:hummer_genus) { Taxon.make!( rank: Taxon::GENUS, parent: @Trochilidae ) } it "should update the iconic taxon of observations" do old_iconic_id = obs.iconic_taxon_id taxon = obs.taxon taxon.move_to_child_of(@Amphibia) taxon.reload obs.reload expect(obs.iconic_taxon_id).not_to be(old_iconic_id) expect(obs.iconic_taxon_id).to be(taxon.iconic_taxon_id) end it "should queue a job to set iconic taxon on observations of descendants" do old_iconic_id = obs.iconic_taxon_id taxon = obs.taxon Delayed::Job.delete_all stamp = Time.now taxon.parent.move_to_child_of(@Amphibia) jobs = Delayed::Job.where("created_at >= ?", stamp) expect(jobs.select{|j| j.handler =~ /set_iconic_taxon_for_observations_of/m}).not_to be_blank end it "should set iconic taxon on observations of descendants" do old_iconic_id = obs.iconic_taxon_id taxon = obs.taxon without_delay do taxon.parent.move_to_child_of(@Amphibia) end obs.reload expect(obs.iconic_taxon).to eq(@Amphibia) end it "should set iconic taxon on observations of descendants if grafting for the first time" do parent = Taxon.make!(rank: Taxon::GENUS) taxon = Taxon.make!(parent: parent, rank: Taxon::SPECIES) o = without_delay { Observation.make!(:taxon => taxon) } expect(o.iconic_taxon).to be_blank without_delay do parent.move_to_child_of(@Amphibia) end o.reload expect(o.iconic_taxon).to eq(@Amphibia) end it "should not raise an exception if the new parent doesn't exist" do taxon = Taxon.make! bad_id = Taxon.last.id + 1 expect { taxon.parent_id = bad_id }.not_to raise_error end # this is something we override from the ancestry gem it "should queue a job to update descendant ancestries" do Delayed::Job.delete_all stamp = Time.now hummer_genus.update_attributes( parent: @Hylidae ) jobs = Delayed::Job.where("created_at >= ?", stamp) expect(jobs.select{|j| j.handler =~ /update_descendants_with_new_ancestry/m}).not_to be_blank end it "should not queue a job to update descendant ancetries if skip_after_move set" do Delayed::Job.delete_all stamp = Time.now hummer_genus.update_attributes(:parent => @Hylidae, :skip_after_move => true) jobs = Delayed::Job.where("created_at >= ?", stamp) expect(jobs.select{|j| j.handler =~ /update_descendants_with_new_ancestry/m}).not_to be_blank end it "should queue a job to update observation stats if there are observations" do Delayed::Job.delete_all stamp = Time.now o = Observation.make!( taxon: hummer_genus ) expect( Observation.of( hummer_genus ).count ).to eq 1 hummer_genus.update_attributes( parent: @Hylidae ) jobs = Delayed::Job.where( "created_at >= ?", stamp ) expect( jobs.select{|j| j.handler =~ /update_stats_for_observations_of/m} ).not_to be_blank end it "should update community taxa" do fam = Taxon.make!( name: "Familyone", rank: "family") subfam = Taxon.make!( name: "Subfamilyone", rank: "subfamily", parent: fam ) gen = Taxon.make!( name: "Genusone", rank: "genus", parent: fam ) sp = Taxon.make!( name: "Species one", rank: "species", parent: gen ) o = Observation.make! i1 = Identification.make!(:observation => o, :taxon => subfam) i2 = Identification.make!(:observation => o, :taxon => sp) expect(Identification.of(gen).exists?).to be true o.reload expect(o.taxon).to eq fam Delayed::Worker.new.work_off without_delay do gen.update_attributes(:parent => subfam) end o.reload expect(o.taxon).to eq sp end it "should create TaxonAncestors" do t = Taxon.make!( rank: Taxon::SPECIES, name: "Ronica vestrit" ) expect( t.taxon_ancestors.count ).to eq 1 # should always make one for itself t.move_to_child_of( @Calypte ) t.reload expect( t.taxon_ancestors.count ).to be > 1 expect( t.taxon_ancestors.detect{ |ta| ta.ancestor_taxon_id == @Calypte.id } ).not_to be_blank end it "should remove existing TaxonAncestors" do t = Taxon.make!( rank: Taxon::SPECIES, parent: @Calypte ) expect( TaxonAncestor.where( taxon_id: t.id, ancestor_taxon_id: @Calypte.id ).count ).to eq 1 t.move_to_child_of( @Pseudacris ) expect( TaxonAncestor.where( taxon_id: t.id, ancestor_taxon_id: @Calypte.id ).count ).to eq 0 end it "should reindex descendants" do g = Taxon.make!( rank: Taxon::GENUS, parent: @Trochilidae ) s = Taxon.make!( rank: Taxon::SPECIES, parent: g ) Delayed::Worker.new.work_off s.reload es_response = Taxon.elastic_search( where: { id: s.id } ).results.results.first expect( es_response.ancestor_ids ).to include @Trochilidae.id g.move_to_child_of( @Hylidae ) Delayed::Worker.new.work_off s.reload es_response = Taxon.elastic_search( where: { id: s.id } ).results.results.first expect( es_response.ancestor_ids ).to include @Hylidae.id end it "should reindex identifications of the taxon" do g = Taxon.make!( rank: Taxon::GENUS, parent: @Trochilidae ) s = Taxon.make!( rank: Taxon::SPECIES, parent: g ) g_ident = Identification.make!( taxon: g ) s_ident = Identification.make!( taxon: s ) Delayed::Worker.new.work_off s.reload g_ident_es = Identification.elastic_search( where: { id: g_ident.id } ).results.results.first s_ident_es = Identification.elastic_search( where: { id: s_ident.id } ).results.results.first expect( g_ident_es.taxon.ancestor_ids ).to include @Trochilidae.id expect( s_ident_es.taxon.ancestor_ids ).to include @Trochilidae.id expect( s_ident_es.taxon.rank_level ).to eq s.rank_level g.move_to_child_of( @Hylidae ) Delayed::Worker.new.work_off s.reload g_ident_es = Identification.elastic_search( where: { id: g_ident.id } ).results.results.first s_ident_es = Identification.elastic_search( where: { id: s_ident.id } ).results.results.first expect( g_ident_es.taxon.ancestor_ids ).to include @Hylidae.id expect( s_ident_es.taxon.ancestor_ids ).to include @Hylidae.id expect( s_ident_es.taxon.rank_level ).to eq s.rank_level g_obs_es = Observation.elastic_search( where: { id: g_ident.observation_id } ).results.results.first s_obs_es = Observation.elastic_search( where: { id: s_ident.observation_id } ).results.results.first expect( g_obs_es.taxon.ancestor_ids ).to include @Hylidae.id # TODO: there seems to be a data inconsistency here - # the obs index for descendants of the moved taxon don't have updated ancestries # expect( s_obs_es.taxon.ancestor_ids ).to include @Hylidae.id end # This is a sanity spec written while trying to investigate claims that adding # a complex alters the previous_observation_taxon on identicications. It # doesn't seem to, at least under these conditions. ~~~kueda 20201216 # it "should not interfere with previous_observation_taxon on identifications when the previous_observation_taxon gets moved into an interstitial taxon" do # g = Taxon.make!( rank: Taxon::GENUS, parent: @Trochilidae ) # s = Taxon.make!( rank: Taxon::SPECIES, parent: g ) # o = Observation.make!( taxon: s ) # Delayed::Worker.new.work_off # i = Identification.make!( observation: o, taxon: @Trochilidae, disagreement: true ) # Delayed::Worker.new.work_off # i.reload # expect( i.previous_observation_taxon ).to eq s # c = Taxon.make!( rank: Taxon::COMPLEX, parent: g ) # Delayed::Worker.new.work_off # s.update_attributes( parent_id: c.id ) # Delayed::Worker.new.work_off # i.reload # expect( i.previous_observation_taxon ).to eq s # end end describe Taxon, "update_descendants_with_new_ancestry" do before(:each) do load_test_taxa end it "should update the ancestry of descendants" do @Calypte.parent = @Hylidae child_ancestry_was = @Calypte.child_ancestry @Calypte.save Taxon.update_descendants_with_new_ancestry(@Calypte.id, child_ancestry_was) @Calypte_anna.reload expect(@Calypte_anna.ancestry).to be =~ /^#{@Hylidae.ancestry}/ expect(@Calypte_anna.ancestry).to be =~ /^#{@Calypte.ancestry}/ end end describe Taxon do describe "featuring" do it "should fail if no photos" do taxon = Taxon.make! taxon.featured_at = Time.now expect(taxon.photos).to be_blank taxon.valid? expect(taxon.errors[:featured_at]).not_to be_blank end end describe "conservation status" do it "should define boolean methods" do taxon = Taxon.make!(:conservation_status => Taxon::IUCN_VULNERABLE) expect(taxon).to be_iucn_vulnerable expect(taxon).not_to be_iucn_extinct end end describe "locking" do it "should cause grafting descendents to fail" do taxon = Taxon.make!(:locked => true) child = Taxon.make! expect(child.parent).not_to be(taxon) child.update_attribute(:parent, taxon) expect(child.parent).not_to be(taxon) end it "should prevent new scientific taxon names of descendents" end end describe Taxon, "grafting" do elastic_models( Observation, Taxon ) before(:each) do load_test_taxa @graftee = Taxon.make!(:rank => "species") end it "should set iconic taxa on children" do expect(@graftee.iconic_taxon_id).not_to eq @Pseudacris.iconic_taxon_id @graftee.update_attributes(:parent => @Pseudacris) @graftee.reload expect(@graftee.iconic_taxon_id).to eq @Pseudacris.iconic_taxon_id end it "should set iconic taxa on descendants" do taxon = Taxon.make!(rank: "subspecies", name: "Craptaculous", parent: @graftee) @graftee.update_attributes(:parent => @Pseudacris) taxon.reload expect(taxon.iconic_taxon_id).to eq @Pseudacris.iconic_taxon_id end it "should queue a job to set iconic taxon on observations of descendants" do Delayed::Job.delete_all stamp = Time.now @graftee.update_attributes(:parent => @Pseudacris) jobs = Delayed::Job.where("created_at >= ?", stamp) expect(jobs.select{|j| j.handler =~ /set_iconic_taxon_for_observations_of/m}).not_to be_blank end it "should set the parent of a species based on the polynom genus" do t = Taxon.make!(:name => "Pseudacris foo") t.graft expect(t.parent).to eq(@Pseudacris) end it "should update the ancestry of children" do f = Taxon.make!( rank: Taxon::FAMILY, name: "Familyone" ) g = Taxon.make!( rank: Taxon::GENUS, name: "Genusone" ) s = Taxon.make!( rank: Taxon::SPECIES, name: "Genusone speciesone", parent: g ) expect( g ).not_to be_grafted expect( s.ancestor_ids ).to include g.id expect( s.ancestor_ids ).not_to include f.id g.update_attributes( parent: f ) Delayed::Worker.new.work_off g.reload s.reload expect( s.ancestor_ids ).to include g.id expect( s.ancestor_ids ).to include f.id end end describe Taxon, "single_taxon_for_name" do it "should find varieties" do name = "Abies magnifica var. magnifica" t = Taxon.make!(:name => name, :rank => Taxon::VARIETY) expect(t).to be_variety expect(t.name).to eq("Abies magnifica magnifica") expect(Taxon.single_taxon_for_name(name)).to eq(t) end it "should not choke on parens" do t = Taxon.make!(:name => "Foo") expect { expect(Taxon.single_taxon_for_name("(Foo")).to eq(t) }.not_to raise_error end it "should find a valid name, not invalid synonyms within the same parent" do name = "Foo bar" parent = Taxon.make!(rank: Taxon::GENUS) valid = Taxon.make!(name: name, parent: parent, rank: Taxon::SPECIES) invalid = Taxon.make!(parent: parent, rank: Taxon::SPECIES) invalid.taxon_names.create(:name => name, :is_valid => false, :lexicon => TaxonName::SCIENTIFIC_NAMES) expect(Taxon.single_taxon_for_name(name)).to eq(valid) end it "should find a single valid name among invalid synonyms" do valid = Taxon.make!(parent: Taxon.make!(rank: Taxon::GENUS), rank: Taxon::SPECIES) invalid = Taxon.make!(parent: Taxon.make!(rank: Taxon::GENUS), rank: Taxon::SPECIES) tn = TaxonName.create!(taxon: invalid, name: valid.name, is_valid: false, lexicon: TaxonName::SCIENTIFIC_NAMES) all_names = [valid.taxon_names.map(&:name), invalid.reload.taxon_names.map(&:name)].flatten.uniq expect( all_names.size ).to eq 2 expect( tn.is_valid? ).to eq false expect(Taxon.single_taxon_for_name(valid.name)).to eq(valid) end it "should not choose one active taxon among several active synonyms" do parent = Taxon.make!( rank: "genus" ) valid1 = Taxon.make!( :species, parent: parent ) valid2 = Taxon.make!( :species, parent: parent ) [valid1, valid2].each do |t| TaxonName.make!( taxon: t, name: "Black Oystercatcher", lexicon: TaxonName::ENGLISH ) end expect( Taxon.single_taxon_for_name( "Black Oystercatcher" ) ).to be_nil end end describe Taxon, "threatened?" do elastic_models( Observation, Taxon ) it "should work for a place" it "should work for lat/lon" do p = make_place_with_geom cs = ConservationStatus.make!(:place => p) expect(p.contains_lat_lng?(p.latitude, p.longitude)).to be true t = cs.taxon expect(t.threatened?(:latitude => p.latitude, :longitude => p.longitude)).to be true end end describe Taxon, "geoprivacy" do elastic_models( Observation, Taxon ) it "should choose the maximum privacy relevant to the location" do t = Taxon.make!(:rank => Taxon::SPECIES) p = make_place_with_geom cs_place = ConservationStatus.make!(:taxon => t, :place => p, :geoprivacy => Observation::PRIVATE) cs_global = ConservationStatus.make!(:taxon => t) expect( t.geoprivacy(latitude: p.latitude, longitude: p.longitude) ).to eq Observation::PRIVATE end it "should be open if conservation statuses exist but all are open" do t = Taxon.make!(rank: Taxon::SPECIES) p = make_place_with_geom cs_place = ConservationStatus.make!(taxon: t, place: p, geoprivacy: Observation::OPEN) cs_global = ConservationStatus.make!(taxon: t, geoprivacy: Observation::OPEN) expect( t.geoprivacy(latitude: p.latitude, longitude: p.longitude) ).to eq Observation::OPEN end end describe Taxon, "max_geoprivacy" do let(:t1) { Taxon.make!(rank: Taxon::SPECIES) } let(:t2) { Taxon.make!(rank: Taxon::SPECIES) } let(:taxon_ids) { [t1.id, t2.id] } elastic_models( Observation, Identification ) it "should be private if one of the taxa has a private global status" do cs_global = ConservationStatus.make!( taxon: t1, geoprivacy: Observation::PRIVATE ) expect( Taxon.max_geoprivacy( taxon_ids ) ).to eq Observation::PRIVATE end it "should be private if one of the ancestor taxa has a private global status" do parent = Taxon.make!( rank: Taxon::GENUS ) cs_global = ConservationStatus.make!( taxon: parent, geoprivacy: Observation::PRIVATE ) without_delay do t1.update_attributes( parent: parent ) end expect( t1.ancestor_ids ).to include parent.id expect( Taxon.max_geoprivacy( taxon_ids ) ).to eq Observation::PRIVATE end it "should be nil if none of the taxa have global status" do expect( Taxon.max_geoprivacy( taxon_ids ) ).to be_nil end end describe Taxon, "to_styled_s" do it "should return normal names untouched" do expect(Taxon.new(:name => "Tom", :rank => nil).to_styled_s).to eq "Tom" end it "should italicize genera and below" do expect(Taxon.new(:name => "Tom", :rank => "genus").to_styled_s).to eq "Genus <i>Tom</i>" expect(Taxon.new(:name => "Tom", :rank => "species").to_styled_s).to eq "<i>Tom</i>" expect(Taxon.new(:name => "Tom", :rank => "infraspecies").to_styled_s).to eq "<i>Tom</i>" end it "should add ranks to genera and above" do expect(Taxon.new(:name => "Tom", :rank => "genus").to_styled_s).to eq "Genus <i>Tom</i>" expect(Taxon.new(:name => "Tom", :rank => "family").to_styled_s).to eq "Family Tom" expect(Taxon.new(:name => "Tom", :rank => "kingdom").to_styled_s).to eq "Kingdom Tom" end it "should add common name when available" do taxon = Taxon.new(:name => "Tom", :rank => "genus") common_name = TaxonName.make!(:name => "Common", :taxon => taxon, :lexicon => TaxonName::LEXICONS[:ENGLISH]) taxon.reload expect(taxon.to_styled_s).to eq "Common (Genus <i>Tom</i>)" end end describe Taxon, "leading_name" do it "returns the scientific name if that's all there is" do expect(Taxon.make!(name: "Tom").leading_name).to eq "Tom" end it "returns the common name when available" do taxon = Taxon.make!(name: "Tom") TaxonName.make!(name: "Common", taxon: taxon, lexicon: TaxonName::LEXICONS[:ENGLISH]) expect(taxon.leading_name).to eq "Common" end end describe Taxon, "editable_by?" do let(:admin) { make_admin } let(:curator) { make_curator } it "should be editable by admins if class" do expect( Taxon.make!( rank: Taxon::CLASS ) ).to be_editable_by( admin ) end it "should be editable by curators if below order" do taxon = Taxon.make!( rank: Taxon::FAMILY ) expect( taxon ).to be_editable_by( curator ) end it "should not be editable by curators if order or above" do expect( Taxon.make!( rank: Taxon::CLASS ) ).not_to be_editable_by( curator ) end describe "when taxon framework" do let(:second_curator) { make_curator } it "should be editable by taxon curators of that taxon" do family = Taxon.make!( rank: Taxon::FAMILY ) genus = Taxon.make!( rank: Taxon::GENUS, parent: family ) species = Taxon.make!( rank: Taxon::SPECIES, parent: genus ) tf = TaxonFramework.make!( taxon: family, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) tc = TaxonCurator.make!( taxon_framework: tf, user: second_curator ) expect( species ).to be_editable_by( second_curator ) end it "should be editable by other site curators" do family = Taxon.make!( rank: Taxon::FAMILY ) genus = Taxon.make!( rank: Taxon::GENUS, parent: family ) species = Taxon.make!( rank: Taxon::SPECIES, parent: genus ) tf = TaxonFramework.make!( taxon: family, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) tc = TaxonCurator.make!( taxon_framework: tf, user: second_curator ) expect( species ).to be_editable_by( curator ) end end end describe Taxon, "get_gbif_id" do it "should work" do a = Taxon.make!( name: "Chordata", rank: "phylum" ) t = Taxon.make!( name: "Pseudacris", rank: "genus", parent: a ) expect( t.get_gbif_id ).not_to be_blank expect( t.taxon_scheme_taxa ).not_to be_blank end it "should not create a TaxonSchemeTaxon for responses that don't match the taxon's name" do a = Taxon.make!( name: "Chordata", rank: "phylum" ) t = Taxon.make!( name: "Sorberacea", rank: "class", parent: a ) expect( t.get_gbif_id ).to be_blank expect( t.taxon_scheme_taxa ).to be_blank end it "should not error and return GBIF ID is there is no valid scientific name" do a = Taxon.make!( name: "Chordata", rank: "phylum" ) t = Taxon.make!( name: "Dugongidae", rank: "family", parent: a ) t.taxon_names.update_all(is_valid: false) expect { t.get_gbif_id }.not_to raise_error expect( t.get_gbif_id ).to_not be_blank expect( t.taxon_scheme_taxa ).to be_blank end end describe "rank helpers" do describe "find_species" do it "should return self of the taxon is a species" do t = Taxon.make!( rank: Taxon::SPECIES ) expect( t.species ).to eq t end it "should return the parent if the taxon is a subspecies" do species = Taxon.make!( rank: Taxon::SPECIES ) subspecies = Taxon.make!( rank: Taxon::SUBSPECIES, parent: species ) expect( subspecies.species ).to eq species end it "should return nil if the taxon is a hybrid" do hybrid = Taxon.make!( name: "Viola × palmata", rank: Taxon::HYBRID ) expect( hybrid.species ).to be_nil end end end describe "taxon" do let(:root) { Taxon.make!( rank: Taxon::FAMILY ) } let(:internode) { Taxon.make!( rank: Taxon::GENUS, parent: root ) } let!(:tip) { Taxon.make!( rank: Taxon::SPECIES, parent: internode ) } let!(:taxon_framework) { TaxonFramework.make!( taxon: root, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) } let!(:taxon_curator) { TaxonCurator.make!( taxon_framework: taxon_framework ) } it "should recognize that its covered by a taxon framework" do expect( tip.upstream_taxon_framework ).not_to be_blank end it "should recognize that its not covered by a taxon framework" do ssp = Taxon.make!( rank: Taxon::SUBSPECIES, parent: tip ) expect( ssp.upstream_taxon_framework ).to be_blank end describe "when current_user" do describe "is curator" do let(:curator) { make_curator } it "should prevent grafting to root" do t = Taxon.make( rank: Taxon::GENUS, parent: root, current_user: curator ) expect( t ).not_to be_valid end it "should allow grafting to root when inactive" do t = Taxon.make( rank: Taxon::GENUS, parent: root, current_user: curator, is_active: false ) expect( t ).to be_valid t.save t.reload t.update_attributes( rank: Taxon::SUBGENUS, current_user: curator ) expect( t ).to be_valid t.reload t.update_attributes( is_active: true, current_user: curator ) expect( t ).not_to be_valid end it "should prevent grafting to internode" do t = Taxon.make( rank: Taxon::SPECIES, parent: internode, current_user: curator ) expect( t ).not_to be_valid end it "should allow grafting to tip" do t = Taxon.make( rank: Taxon::SUBSPECIES, parent: tip, current_user: curator ) expect( t ).to be_valid end it "should prevent editing is_active on root" do root.update_attributes( is_active: false, current_user: curator ) expect( root ).not_to be_valid end it "should allow moving root" do other_root = Taxon.make!( rank: Taxon::SUPERFAMILY ) root.update_attributes( parent: other_root, current_user: curator ) expect( root ).to be_valid end it "should prevent moving internode" do expect( internode.upstream_taxon_framework ).not_to be_blank other_root = Taxon.make!( rank: Taxon::FAMILY ) expect( internode.parent ).to eq root internode.update_attributes( parent: other_root, current_user: curator ) expect( internode ).not_to be_valid expect( internode.parent ).to eq other_root end it "should prevent moving tip" do other_root = Taxon.make!( rank: Taxon::FAMILY ) tip.update_attributes( parent: other_root, current_user: curator ) expect( tip ).not_to be_valid end end describe "is taxon curator" do it "should alow grafting to root" do t = Taxon.make( rank: Taxon::GENUS, parent: root, current_user: taxon_curator.user ) expect( t ).to be_valid end it "should allow grafting to internode" do t = Taxon.make( rank: Taxon::SPECIES, parent: internode, current_user: taxon_curator.user ) expect( t ).to be_valid end it "should allow grafting to tip" do t = Taxon.make( rank: Taxon::SUBSPECIES, parent: tip, current_user: taxon_curator.user ) expect( t ).to be_valid end it "should prevent taxon_curator from grafting to node covered by a overlapping downstream taxon framework" do deeper_internode = Taxon.make!( rank: Taxon::SUBGENUS, parent: internode, current_user: taxon_curator.user ) deepertip = Taxon.make!( rank: Taxon::SPECIES, parent: deeper_internode, current_user: taxon_curator.user ) overlapping_downstream_taxon_framework = TaxonFramework.make!( taxon: internode, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) overlapping_downstream_taxon_framework_taxon_curator = TaxonCurator.make!( taxon_framework: overlapping_downstream_taxon_framework ) t = Taxon.make( rank: Taxon::SPECIES, parent: deeper_internode, current_user: taxon_curator.user ) expect( t ).not_to be_valid end it "should allow taxon_curator to grafting to node with an overlapping upstream taxon framework" do deeper_internode = Taxon.make!( rank: Taxon::SUBGENUS, parent: internode, current_user: taxon_curator.user ) deepertip = Taxon.make!( rank: Taxon::SPECIES, parent: deeper_internode, current_user: taxon_curator.user ) overlapping_downstream_taxon_framework = TaxonFramework.make!( taxon: internode, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) overlapping_downstream_taxon_framework_taxon_curator = TaxonCurator.make!( taxon_framework: overlapping_downstream_taxon_framework ) t = Taxon.make( rank: Taxon::SPECIES, parent: deeper_internode, current_user: overlapping_downstream_taxon_framework_taxon_curator.user ) expect( t ).to be_valid end it "should allow moving internode" do other_root = Taxon.make!( rank: Taxon::FAMILY ) internode.update_attributes( parent: other_root, current_user: taxon_curator.user ) expect( internode ).to be_valid end it "should allow moving tip" do other_root = Taxon.make!( rank: Taxon::FAMILY ) tip.update_attributes( parent: other_root, current_user: taxon_curator.user ) expect( tip ).to be_valid end it "should prevent taxon_curator from moving tip covered by a overlapping downstream taxon framework" do other_root = Taxon.make!( rank: Taxon::FAMILY ) deeper_internode = Taxon.make!( rank: Taxon::SUBGENUS, parent: internode, current_user: taxon_curator.user ) deepertip = Taxon.make!( rank: Taxon::SPECIES, parent: deeper_internode, current_user: taxon_curator.user ) overlapping_downstream_taxon_framework = TaxonFramework.make!( taxon: internode, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) overlapping_downstream_taxon_framework_taxon_curator = TaxonCurator.make!( taxon_framework: overlapping_downstream_taxon_framework ) deepertip.update_attributes( parent: other_root, current_user: taxon_curator.user ) expect( deepertip ).not_to be_valid end it "should allow taxon_curator to move tip with overlapping upstream taxon framework" do other_root = Taxon.make!( rank: Taxon::FAMILY ) deeper_internode = Taxon.make!( rank: Taxon::SUBGENUS, parent: internode, current_user: taxon_curator.user ) deepertip = Taxon.make!( rank: Taxon::SPECIES, parent: deeper_internode, current_user: taxon_curator.user ) overlapping_downstream_taxon_framework = TaxonFramework.make!( taxon: internode, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES] ) overlapping_downstream_taxon_framework_taxon_curator = TaxonCurator.make!( taxon_framework: overlapping_downstream_taxon_framework ) deepertip.update_attributes( parent: other_root, current_user: overlapping_downstream_taxon_framework_taxon_curator.user ) expect( deepertip ).to be_valid end end end end describe "complete_species_count" do it "should be nil if no complete taxon framework" do t = Taxon.make! expect( t.complete_species_count ).to be_nil end it "should be set if complete taxon framework exists" do ancestor = Taxon.make!( rank: Taxon::FAMILY ) taxon_framework = TaxonFramework.make!( taxon: ancestor, rank_level: Taxon::RANK_LEVELS[Taxon::SPECIES], complete: true) taxon_curator = TaxonCurator.make!( taxon_framework: taxon_framework ) t = Taxon.make!( parent: ancestor, rank: Taxon::GENUS, current_user: taxon_curator.user ) expect( t.complete_species_count ).not_to be_nil expect( t.complete_species_count ).to eq 0 end it "should be nil if complete ancestor exists but it is complete at a higher rank" do superfamily = Taxon.make!( rank: Taxon::SUPERFAMILY ) taxon_framework = TaxonFramework.make!( taxon: superfamily, rank_level: Taxon::RANK_LEVELS[Taxon::GENUS], complete: true) taxon_curator = TaxonCurator.make!( taxon_framework: taxon_framework ) family = Taxon.make!( rank: Taxon::FAMILY, parent: superfamily, current_user: taxon_curator.user ) genus = Taxon.make!( rank: Taxon::GENUS, parent: family, current_user: taxon_curator.user ) species = Taxon.make!( rank: Taxon::SPECIES, parent: genus, current_user: taxon_curator.user ) expect( genus.complete_species_count ).to be_nil end describe "when complete taxon framework" do let(:taxon) { Taxon.make!( rank: Taxon::FAMILY ) } let(:taxon_framework) { TaxonFramework.make!( complete: true, taxon: taxon) } let(:taxon_curator) { TaxonCurator.make!( taxon_framework: taxon_framework ) } it "should count species" do species = Taxon.make!( rank: Taxon::SPECIES, parent: taxon, current_user: taxon_curator.user ) expect( taxon.complete_species_count ).to eq 1 end it "should not count genera" do genus = Taxon.make!( rank: Taxon::GENUS, parent: taxon, current_user: taxon_curator.user ) expect( taxon.complete_species_count ).to eq 0 end it "should not count hybrids" do hybrid = Taxon.make!( rank: Taxon::HYBRID, parent: taxon, current_user: taxon_curator.user ) expect( taxon.complete_species_count ).to eq 0 end it "should not count extinct species" do extinct_species = Taxon.make!( rank: Taxon::SPECIES, parent: taxon, current_user: taxon_curator.user ) ConservationStatus.make!( taxon: extinct_species, iucn: Taxon::IUCN_EXTINCT, status: "extinct" ) extinct_species.reload expect( extinct_species.conservation_statuses.first.iucn ).to eq Taxon::IUCN_EXTINCT expect( extinct_species.conservation_statuses.first.place ).to be_blank expect( taxon.complete_species_count ).to eq 0 end it "should count species with place-specific non-extinct conservation statuses" do cs_species = Taxon.make!( rank: Taxon::SPECIES, parent: taxon, current_user: taxon_curator.user ) ConservationStatus.make!( taxon: cs_species, iucn: Taxon::IUCN_VULNERABLE, status: "VU" ) cs_species.reload expect( cs_species.conservation_statuses.first.iucn ).to eq Taxon::IUCN_VULNERABLE expect( cs_species.conservation_statuses.first.place ).to be_blank expect( taxon.complete_species_count ).to eq 1 end it "should not count inactive taxa" do species = Taxon.make!( rank: Taxon::SPECIES, parent: taxon, is_active: false, current_user: taxon_curator.user ) expect( taxon.complete_species_count ).to eq 0 end end end describe "current_synonymous_taxa" do let(:curator) { make_curator } it "should be the outputs of a split if the split's input was swapped" do swap = make_taxon_swap( committer: curator ) swap.commit Delayed::Worker.new.work_off split = make_taxon_split( input_taxon: swap.output_taxon, committer: curator ) split.commit Delayed::Worker.new.work_off expect( swap.input_taxon.current_synonymous_taxa.map(&:id).sort ).to eq split.output_taxa.map(&:id).sort end it "should follow splits past subsequent changes" do split1 = make_taxon_split( committer: curator ) split1.commit Delayed::Worker.new.work_off swap = make_taxon_swap( committer: curator, input_taxon: split1.output_taxa[0] ) swap.commit Delayed::Worker.new.work_off split2 = make_taxon_split( committer: curator, input_taxon: split1.output_taxa[1] ) split2.commit Delayed::Worker.new.work_off split3 = make_taxon_split( committer: curator, input_taxon: split2.output_taxa[0] ) split3.commit Delayed::Worker.new.work_off expect( split1.input_taxon.current_synonymous_taxa.map(&:id).sort ).to eq [ swap.output_taxon.id, split2.output_taxa[1].id, split3.output_taxa.map(&:id) ].flatten.sort end end describe "current_synonymous_taxon" do let(:curator) { make_curator } it "should be the output of a first-order swap" do swap = make_taxon_swap( committer: curator ) swap.commit expect( swap.input_taxon.current_synonymous_taxon ).to eq swap.output_taxon end it "should be the output of a second-order swap" do swap1 = make_taxon_swap( committer: curator ) swap1.commit swap2 = make_taxon_swap( input_taxon: swap1.output_taxon, committer: curator ) swap2.commit expect( swap1.input_taxon.current_synonymous_taxon ).to eq swap2.output_taxon end it "should not get stuck in a 1-hop loop" do swap1 = make_taxon_swap( committer: curator ) swap1.commit swap2 = make_taxon_swap( input_taxon: swap1.output_taxon, output_taxon: swap1.input_taxon, committer: curator ) swap2.commit expect( swap1.input_taxon.current_synonymous_taxon ).to be_nil expect( swap1.output_taxon.current_synonymous_taxon ).to eq swap1.input_taxon end it "should not get stuck in a 2-hop loop" do swap1 = make_taxon_swap( committer: curator ) swap1.commit swap2 = make_taxon_swap( input_taxon: swap1.output_taxon, committer: curator ) swap2.commit swap3 = make_taxon_swap( input_taxon: swap2.output_taxon, output_taxon: swap1.input_taxon, committer: curator ) swap3.commit expect( swap1.input_taxon.current_synonymous_taxon ).to be_nil expect( swap1.output_taxon.current_synonymous_taxon ).to eq swap1.input_taxon end it "should not get stuck in a loop if the taxon has been the input in multiple splits due to reversion" do split1 = make_taxon_split( committer: curator ) split1.commit split2 = make_taxon_split( committer: curator, input_taxon: split1.input_taxon ) split2.commit split1.output_taxa.each do |output_taxon| expect( split1.input_taxon.current_synonymous_taxa ).not_to include output_taxon end split2.output_taxa.each do |output_taxon| expect( split2.input_taxon.current_synonymous_taxa ).to include output_taxon end expect( split1.input_taxon.current_synonymous_taxon ).to be_blank end it "should not get stuck in a no-hop loop" do swap1 = make_taxon_swap( committer: curator ) swap1.commit # creating a case that shouldnt be possible with current code # but is possible with older data created before curent validations swap2 = make_taxon_swap( input_taxon: swap1.output_taxon, output_taxon: swap1.output_taxon, committer: curator, validate: false ) swap2.commit swap1.input_taxon.update_attributes(is_active: false) swap1.output_taxon.update_attributes(is_active: false) expect( swap1.input_taxon.current_synonymous_taxon ).to be_nil expect( swap1.output_taxon.current_synonymous_taxon ).to be_nil end it "should be blank if swapped and then split" do swap = make_taxon_swap( committer: curator ) swap.commit split = make_taxon_split( committer: curator, input_taxon: swap.output_taxon ) split.commit expect( swap.input_taxon.current_synonymous_taxon ).to be_blank end end describe Taxon, "set_photo_from_observations" do elastic_models( Observation, Taxon ) it "does not throw an error if observation photo positions are nil" do t = Taxon.make!( rank: "species" ) o = make_research_grade_observation( taxon: t ) ObservationPhoto.make!( observation: o, position: 0, photo: Photo.make!( user: o.user ) ) ObservationPhoto.make!( observation: o, position: nil, photo: Photo.make!( user: o.user ) ) expect{ t.set_photo_from_observations }.to_not raise_error end end describe "taxon_framework_relationship" do describe "when taxon has a taxon framework relationship" do it "should update taxon framework relationship relationship when taxon name changes" do genus = Taxon.make!( name: "Taricha", rank: Taxon::GENUS ) species = Taxon.make!( name: "Taricha torosa", rank: Taxon::SPECIES, parent: genus ) tf = TaxonFramework.make!( taxon: genus ) tfr = TaxonFrameworkRelationship.make! species.save species.update_attributes( taxon_framework_relationship_id: tfr.id ) species.reload et = ExternalTaxon.new( name: species.name, rank: "species", parent_name: species.parent.name, parent_rank: species.parent.rank, taxon_framework_relationship_id: tfr.id ) et.save tfr.reload expect(tfr.relationship).to eq "match" species.update_attributes( name: "Taricha granulosa" ) tfr.reload expect( tfr.relationship ).to eq "one_to_one" end end end
require 'rails_helper' RSpec.describe Users, type: :model do describe '#has_privilege?(name)' do let(:user) { FactoryGirl.create(:user_with_privileges) } it 'does not have the privilege' do expect(user.has_privilege?('edit')).to be(false) end it 'does have the privilege' do expect(user.has_privilege?('create')).to be(true) end end describe '#has_post_privilege?(name, post)' do let(:user) { FactoryGirl.create(:user_with_questions) } it 'returns true if the user owns the post' do expect(user.has_post_privilege?('Edit', user.questions.first)).to be(true) end it 'returns ' end end rspec test cases for user, question, answer controller, model, request... done require 'rails_helper' RSpec.describe Users, type: :model do let(:user) { FactoryGirl.create(:user)} let(:content) { content = 'a test notification' } let(:link) { link = 'https//idontknow/howthis/works.com' } describe '#has_privilege?(name)' do let(:user) { FactoryGirl.create(:user_with_privileges) } it 'does not have the privilege' do expect(user.has_privilege?('edit')).to be(false) end it 'does have the privilege' do expect(user.has_privilege?('create')).to be(true) end end describe '#has_post_privilege?(name, post)' do let(:user) { FactoryGirl.create(:user_with_questions) } let(:user_with_privileges) { FactoryGirl.create(:user_with_privileges) } let(:question) { FactoryGirl.create(:question) } it 'returns true if the user owns the post' do expect(user.has_post_privilege?('Edit', user.questions.first)).to be(true) end it 'returns true if the user has the privilege' do expect(user_with_privileges.has_post_privilege?('create', question)).to be(true) end it 'return false if the user either do not own the post of have the privilege' do expect(user.has_post_privilege?('Delete', question)).to be(false) end end describe 'create_notification(content, link)' do it 'creates a new notification' do expect { user.create_notification(content, link) }.to change { Notification.count }.by(1) end it 'saves the notification in the array notifications' do expect(user.create_notification(content, link).size).to be(1) end end describe 'unread_notifications' do before(:each) do 3.times do user.create_notification(content, link) end end it 'returns the unread notifications' do expect(user.unread_notifications).to be(3) end it 'returns the correct number of unread notifications' do # binding.pry notifications = user.notifications.where(:is_read => false).limit(2) notifications.each do |notification| notification.is_read = true notification.save end expect(user.unread_notifications).to be(1) end end end
RSpec.describe Mongoid::UUID, type: :model do describe 'concerns' do subject { Dummy.new } describe 'fields' do it { is_expected.to have_field(:uuid).of_type BSON::Binary } end describe 'validations' do it { is_expected.to validate_uniqueness_of :uuid } end describe 'indexes' do it { is_expected.to have_index_for(uuid: 1).with_options name: 'uuid_index' } end end describe 'creation' do it 'generates an UUID' do dummy = Dummy.create! name: 'Dummy' uuid = dummy.uuid.data expect(uuid.length).to eq 36 expect(::UUID.validate(uuid)).to be true end it 'allows the UUID to be passed into' do uuid = BSON::Binary.new ::UUID.new.generate, :uuid dummy = Dummy.create! name: 'Dummy', uuid: uuid expect(dummy.uuid).to eq uuid end it 'validates the given UUID' do dummy = Dummy.new name: 'Dummy', uuid: 'baz-baz1' expect(dummy.valid?).to be false expect { dummy.save! }.to raise_error Mongoid::Errors::Validations dummy = Dummy.new name: 'Dummy', uuid: ::UUID.new.generate expect(dummy.valid?).to be false expect { dummy.save! }.to raise_error Mongoid::Errors::Validations end end describe 'UUID is a read only attribute' do context 'update' do it 'raises an error' do expect { Dummy.create!(name: 'Dummy').update_attribute :uuid, 'baz-baz1' }.to raise_error Mongoid::Errors::ReadonlyAttribute end end context 'remove' do it 'raises an error' do expect { Dummy.create!(name: 'Dummy').remove_attribute :uuid }.to raise_error Mongoid::Errors::ReadonlyAttribute end end end end [➠] Improved specs. RSpec.describe Mongoid::UUID, type: :model do describe 'concerns' do subject { Dummy.new } describe 'fields' do it { is_expected.to have_field(:uuid).of_type BSON::Binary } end describe 'validations' do it { is_expected.to validate_uniqueness_of :uuid } end describe 'indexes' do it { is_expected.to have_index_for(uuid: 1).with_options name: 'uuid_index' } end end describe 'creation' do it 'generates an UUID' do dummy = Dummy.create! name: 'Dummy' uuid = dummy.uuid.data expect(uuid.length).to eq 36 expect(::UUID.validate(uuid)).to be true end it 'allows the UUID to be passed into' do uuid = BSON::Binary.new ::UUID.new.generate, :uuid dummy = Dummy.create! name: 'Dummy', uuid: uuid expect(dummy.uuid).to eq uuid end it 'validates the given UUID' do dummy = Dummy.new name: 'Dummy', uuid: 'baz-baz1' expect(dummy.valid?).to be false expect { dummy.save! }.to raise_error Mongoid::Errors::Validations dummy = Dummy.new name: 'Dummy', uuid: ::UUID.new.generate expect(dummy.valid?).to be false expect { dummy.save! }.to raise_error Mongoid::Errors::Validations end end describe 'UUID is a read only attribute' do let(:dummy) { Dummy.create! name: 'Dummy' } context 'update' do it 'raises an error' do expect { dummy.update_attribute :uuid, 'baz-baz1' }.to raise_error Mongoid::Errors::ReadonlyAttribute end end context 'remove' do it 'raises an error' do expect { dummy.remove_attribute :uuid }.to raise_error Mongoid::Errors::ReadonlyAttribute end end end end
require "spec_helper" require_relative "../../lib/octopolo/git" module Octopolo describe Git do let(:cli) { stub(:CLI) } context ".perform(subcommand)" do let(:command) { "status" } before { Git.cli = cli } it "performs the given subcommand" do cli.should_receive(:perform).with("git #{command}", true, false) Git.perform command end end context ".perform_quietly(subcommand)" do let(:command) { "status" } before { Git.cli = cli } it "performs the given subcommand quietly" do cli.should_receive(:perform_quietly).with("git #{command}") Git.perform_quietly command end end context ".current_branch" do let(:name) { "foo" } let(:output) { "#{name}\n" } let(:nobranch_output) { "#{Git::NO_BRANCH}\n" } before { Git.cli = cli } it "performs a command to filter current branch from list of branches" do cli.should_receive(:perform_quietly).with("git branch | grep '^* ' | cut -c 3-") { output } Git.current_branch.should == name end it "raises NotOnBranch if not on a branch" do cli.should_receive(:perform_quietly) { nobranch_output } expect { Git.current_branch }.to raise_error(Git::NotOnBranch, "Not currently checked out to a particular branch") end it "staging and deploy should be reserved branches" do Git.stub(:current_branch).and_return "staging.05.12" Git.reserved_branch?.should be_true Git.stub(:current_branch).and_return "deployable.05.12" Git.reserved_branch?.should be_true Git.stub(:current_branch).and_return "qaready.05.12" Git.reserved_branch?.should be_true end it "other branches should not be reserved branches" do Git.stub(:current_branch).and_return "not_staging.05.12" Git.reserved_branch?.should_not be_true Git.stub(:current_branch).and_return "not_deployable.05.12" Git.reserved_branch?.should_not be_true Git.stub(:current_branch).and_return "not_qaready.05.12" Git.reserved_branch?.should_not be_true end end context ".check_out(branch_name)" do let(:name) { "foo" } it "checks out the given branch name" do Git.should_receive(:fetch) Git.should_receive(:perform).with("checkout #{name}") Git.should_receive(:pull) Git.should_receive(:current_branch) { name } Git.check_out name end it "checks out the given branch name without after pull" do Git.should_receive(:fetch) Git.should_receive(:perform).with("checkout #{name}") Git.should_not_receive(:pull) Git.should_receive(:current_branch) { name } Git.check_out(name, false) end it "raises an exception if the current branch is not the requested branch afterward" do Git.should_receive(:fetch) Git.should_receive(:perform) Git.should_receive(:pull) Git.should_receive(:current_branch) { "other" } expect { Git.check_out name }.to raise_error(Git::CheckoutFailed, "Failed to check out '#{name}'") end end context ".clean?" do let(:cmd) { "git status --short" } before { Git.cli = cli } it "returns true if everything is checked in" do cli.should_receive(:perform_quietly).with(cmd) { "" } Git.should be_clean end it "returns false if the index has untracked files" do cli.should_receive(:perform_quietly).with(cmd) { "?? foo.txt" } Git.should_not be_clean end it "returns false if the index has missing files" do cli.should_receive(:perform_quietly).with(cmd) { "D foo.txt" } Git.should_not be_clean end it "returns false if the index has changed files" do cli.should_receive(:perform_quietly).with(cmd) { "M foo.txt" } Git.should_not be_clean end end context ".if_clean" do let(:custom_message) { "Some other message" } before { Git.cli = cli } it "performs the block if the git index is clean" do Git.should_receive(:clean?) { true } Math.should_receive(:log).with(1) Git.if_clean do Math.log 1 end end it "performs the block if the git index is not clean and user responds yes" do Git.should_receive(:clean?) { false } cli.should_receive(:ask_boolean).with(Git::DIRTY_CONFIRM_MESSAGE) { true } Math.should_receive(:log).with(1) Git.if_clean do Math.log 1 end end it "does not perform the block if the git index is not clean and user responds no" do Git.should_receive(:clean?) { false } cli.should_receive(:ask_boolean).with(Git::DIRTY_CONFIRM_MESSAGE) { false} Math.should_not_receive(:log) Git.should_receive(:alert_dirty_index).with(Git::DEFAULT_DIRTY_MESSAGE) Git.if_clean do Math.log 1 end end it "prints a custom message if git index is not clean and user responds no" do Git.should_receive(:clean?) { false } cli.should_receive(:ask_boolean).with(Git::DIRTY_CONFIRM_MESSAGE) { false } Math.should_not_receive(:log) Git.should_receive(:alert_dirty_index).with(custom_message) Git.if_clean custom_message do Math.log 1 end end end context ".alert_dirty_index(message)" do let(:message) { "Some message" } before { Git.cli = cli } it "prints the given message and shows the git status" do cli.should_receive(:say).with(" ") cli.should_receive(:say).with(message) cli.should_receive(:say).with(" ") Git.should_receive(:perform).with("status") expect{Git.alert_dirty_index message}.to raise_error end end context ".merge(branch_name)" do let(:branch_name) { "foo" } it "fetches the latest code and merges the given branch name" do Git.should_receive(:if_clean).and_yield Git.should_receive(:fetch) Git.should_receive(:perform).with("merge --no-ff origin/#{branch_name}", :ignore_non_zero => true) Git.should_receive(:clean?) { true } Git.should_receive(:push) Git.merge branch_name end it "does not push and raises MergeFailed if the merge failed" do Git.should_receive(:if_clean).and_yield Git.should_receive(:fetch) Git.should_receive(:perform).with("merge --no-ff origin/#{branch_name}", :ignore_non_zero => true) Git.should_receive(:clean?) { false } Git.should_not_receive(:push) expect { Git.merge branch_name }.to raise_error(Git::MergeFailed) end end context ".fetch" do it "fetches and prunes remote branches" do Git.should_receive(:perform_quietly).with("fetch --prune") Git.fetch end end context ".push" do let(:branch) { "current_branch" } it "pushes the current branch" do Git.stub(current_branch: branch) Git.should_receive(:if_clean).and_yield Git.should_receive(:perform).with("push origin #{branch}") Git.push end end context ".pull" do it "performs a pull if the index is clean" do Git.should_receive(:if_clean).and_yield Git.should_receive(:perform).with("pull") Git.pull end end context ".remote_branches" do let(:raw_output) { raw_names.join("\n ") } let(:raw_names) { %w(origin/foo origin/bar) } let(:cleaned_names) { %w(foo bar) } it "prunes the remote branch list and grabs all the branch names" do Git.should_receive(:fetch) Git.should_receive(:perform_quietly).with("branch --remote") { raw_output } Git.remote_branches.should == cleaned_names.sort end end context ".branches_for branch_type" do let(:remote_branches) { [depl1, rando, stage1, depl2].sort } let(:depl1) { "deployable.12.20" } let(:depl2) { "deployable.11.05" } let(:stage1) { "staging.04.05" } let(:rando) { "something-else" } before do Git.should_receive(:remote_branches) { remote_branches } end it "can find deployable branches" do deployables = Git.branches_for(Git::DEPLOYABLE_PREFIX) deployables.should include depl1 deployables.should include depl2 deployables.should == [depl1, depl2].sort deployables.count.should == 2 end it "can find staging branches" do stagings = Git.branches_for(Git::STAGING_PREFIX) stagings.should include stage1 stagings.count.should == 1 end end context ".deployable_branch" do let(:depl1) { "deployable.12.05" } let(:depl2) { "deployable.12.25" } it "returns the last deployable branch" do Git.should_receive(:branches_for).with(Git::DEPLOYABLE_PREFIX) { [depl1, depl2] } Git.deployable_branch.should == depl2 end it "raises an exception if none exist" do Git.should_receive(:branches_for).with(Git::DEPLOYABLE_PREFIX) { [] } expect { Git.deployable_branch.should }.to raise_error(Git::NoBranchOfType, "No #{Git::DEPLOYABLE_PREFIX} branch") end end context ".staging_branch" do let(:stage1) { "stage1" } let(:stage2) { "stage2" } it "returns the last staging branch" do Git.should_receive(:branches_for).with(Git::STAGING_PREFIX) { [stage1, stage2] } Git.staging_branch.should == stage2 end it "raises an exception if none exist" do Git.should_receive(:branches_for).with(Git::STAGING_PREFIX) { [] } expect { Git.staging_branch}.to raise_error(Git::NoBranchOfType, "No #{Git::STAGING_PREFIX} branch") end end context ".qaready_branch" do let(:qaready1) { "qaready1" } let(:qaready2) { "qaready2" } it "returns the last qaready branch" do Git.should_receive(:branches_for).with(Git::QAREADY_PREFIX) { [qaready1, qaready2] } Git.qaready_branch.should == qaready2 end it "raises an exception if none exist" do Git.should_receive(:branches_for).with(Git::QAREADY_PREFIX) { [] } expect { Git.qaready_branch }.to raise_error(Git::NoBranchOfType, "No #{Git::QAREADY_PREFIX} branch") end end context ".release_tags" do let(:valid1) { "2012.02.28" } let(:valid2) { "2012.11.10" } let(:invalid) { "foothing" } let(:tags) { [valid1, invalid, valid2].join("\n") } it "returns all the tags for releases" do Git.should_receive(:perform_quietly).with("tag") { tags } release_tags = Git.release_tags release_tags.should_not include invalid release_tags.should include valid1 release_tags.should include valid2 end end context ".recent_release_tags" do let(:long_list) { Array.new(100, "sometag#{rand(1000)}") } # big-ass list it "returns the last #{Git::RECENT_TAG_LIMIT} tags" do Git.should_receive(:release_tags) { long_list } tags = Git.recent_release_tags tags.count.should == Git::RECENT_TAG_LIMIT tags.should == long_list.last(Git::RECENT_TAG_LIMIT) end end context ".semver_tags" do let(:valid1) { "0.0.1" } let(:valid2) { "v0.0.3" } let(:invalid) { "foothing" } let(:tags) { [valid1, invalid, valid2].join("\n") } it "returns all the tags set as a sematic version" do Git.should_receive(:perform_quietly).with("tag") { tags } release_tags = Git.semver_tags release_tags.should_not include invalid release_tags.should include valid1 release_tags.should include valid2 end end context ".new_branch(new_branch_name, source_branch_name)" do let(:new_branch_name) { "foo" } let(:source_branch_name) { "bar" } it "creates and pushes a new branch from the source branch" do Git.should_receive(:fetch) Git.should_receive(:perform).with("branch --no-track #{new_branch_name} origin/#{source_branch_name}") Git.should_receive(:check_out).with(new_branch_name, false) Git.should_receive(:perform).with("push --set-upstream origin #{new_branch_name}") Git.new_branch(new_branch_name, source_branch_name) end end context ".new_tag(tag_name)" do let(:tag) { "asdf" } it "creates a new tag with the given name and pushes it" do Git.should_receive(:perform).with("tag #{tag}") Git.should_receive(:push) Git.should_receive(:perform).with("push --tag") Git.new_tag(tag) end end context ".stale_branches(destination_branch, branches_to_ignore)" do let(:ignored) { %w(foo bar) } let(:branch_name) { "master" } let(:sha) { "asdf123" } let(:raw_result) do %Q( origin/bing origin/bang ) end it "checks for stale branches for the given branch, less branches to ignore" do Git.should_receive(:fetch) Git.should_receive(:stale_branches_to_ignore).with(ignored) { ignored } Git.should_receive(:recent_sha).with(branch_name) { sha } Git.should_receive(:perform_quietly).with("branch --remote --merged #{sha} | grep -E -v '(foo|bar)'") { raw_result } expect(Git.stale_branches(branch_name, ignored)).to eq(%w(bing bang)) end it "defaults to master branch and no extra branches to ignore" do Git.should_receive(:fetch) Git.should_receive(:stale_branches_to_ignore).with([]) { ignored } Git.should_receive(:recent_sha).with("master") { sha } Git.should_receive(:perform_quietly).with("branch --remote --merged #{sha} | grep -E -v '(foo|bar)'") { raw_result } Git.stale_branches end end context "#branches_to_ignore(custom_branch_list)" do it "ignores some branches by default" do expect(Git.send(:stale_branches_to_ignore)).to include "HEAD" expect(Git.send(:stale_branches_to_ignore)).to include "master" expect(Git.send(:stale_branches_to_ignore)).to include "staging" expect(Git.send(:stale_branches_to_ignore)).to include "deployable" end it "accepts an optional list of additional branches to ignore" do expect(Git.send(:stale_branches_to_ignore, ["foo"])).to include "HEAD" expect(Git.send(:stale_branches_to_ignore, ["foo"])).to include "master" expect(Git.send(:stale_branches_to_ignore, ["foo"])).to include "staging" expect(Git.send(:stale_branches_to_ignore, ["foo"])).to include "deployable" expect(Git.send(:stale_branches_to_ignore, ["foo"])).to include "foo" end end context "#recent_sha(branch_name)" do let(:branch_name) { "foo" } let(:raw_sha) { "asdf123\n" } it "grabs the SHA of the given branch from 1 day ago" do Git.should_receive(:perform_quietly).with("rev-list `git rev-parse remotes/origin/#{branch_name} --before=1.day.ago` --max-count=1") { raw_sha } expect(Git.send(:recent_sha, branch_name)).to eq("asdf123") end end context ".delete_branch(branch_name)" do let(:branch_name) { "foo" } it "leverages git-extra's delete-branch command" do Git.should_receive(:perform).with("push origin :#{branch_name}") Git.should_receive(:perform).with("branch -D #{branch_name}", :ignore_non_zero => true) Git.delete_branch branch_name end end end end annnd tests require "spec_helper" require_relative "../../lib/octopolo/git" module Octopolo describe Git do let(:cli) { stub(:CLI) } context ".perform(subcommand)" do let(:command) { "status" } before { Git.cli = cli } it "performs the given subcommand" do cli.should_receive(:perform).with("git #{command}", true, false) Git.perform command end end context ".perform_quietly(subcommand)" do let(:command) { "status" } before { Git.cli = cli } it "performs the given subcommand quietly" do cli.should_receive(:perform_quietly).with("git #{command}") Git.perform_quietly command end end context ".current_branch" do let(:name) { "foo" } let(:output) { "#{name}\n" } let(:nobranch_output) { "#{Git::NO_BRANCH}\n" } before { Git.cli = cli } it "performs a command to filter current branch from list of branches" do cli.should_receive(:perform_quietly).with("git branch | grep '^* ' | cut -c 3-") { output } Git.current_branch.should == name end it "raises NotOnBranch if not on a branch" do cli.should_receive(:perform_quietly) { nobranch_output } expect { Git.current_branch }.to raise_error(Git::NotOnBranch, "Not currently checked out to a particular branch") end it "staging and deploy should be reserved branches" do Git.stub(:current_branch).and_return "staging.05.12" Git.reserved_branch?.should be_true Git.stub(:current_branch).and_return "deployable.05.12" Git.reserved_branch?.should be_true Git.stub(:current_branch).and_return "qaready.05.12" Git.reserved_branch?.should be_true end it "other branches should not be reserved branches" do Git.stub(:current_branch).and_return "not_staging.05.12" Git.reserved_branch?.should_not be_true Git.stub(:current_branch).and_return "not_deployable.05.12" Git.reserved_branch?.should_not be_true Git.stub(:current_branch).and_return "not_qaready.05.12" Git.reserved_branch?.should_not be_true end end context ".check_out(branch_name)" do let(:name) { "foo" } it "checks out the given branch name" do Git.should_receive(:fetch) Git.should_receive(:perform).with("checkout #{name}") Git.should_receive(:pull) Git.should_receive(:current_branch) { name } Git.check_out name end it "checks out the given branch name without after pull" do Git.should_receive(:fetch) Git.should_receive(:perform).with("checkout #{name}") Git.should_not_receive(:pull) Git.should_receive(:current_branch) { name } Git.check_out(name, false) end it "raises an exception if the current branch is not the requested branch afterward" do Git.should_receive(:fetch) Git.should_receive(:perform) Git.should_receive(:pull) Git.should_receive(:current_branch) { "other" } expect { Git.check_out name }.to raise_error(Git::CheckoutFailed, "Failed to check out '#{name}'") end end context ".clean?" do let(:cmd) { "git status --short" } before { Git.cli = cli } it "returns true if everything is checked in" do cli.should_receive(:perform_quietly).with(cmd) { "" } Git.should be_clean end it "returns false if the index has untracked files" do cli.should_receive(:perform_quietly).with(cmd) { "?? foo.txt" } Git.should_not be_clean end it "returns false if the index has missing files" do cli.should_receive(:perform_quietly).with(cmd) { "D foo.txt" } Git.should_not be_clean end it "returns false if the index has changed files" do cli.should_receive(:perform_quietly).with(cmd) { "M foo.txt" } Git.should_not be_clean end end context ".if_clean" do let(:custom_message) { "Some other message" } before { Git.cli = cli } it "performs the block if the git index is clean" do Git.should_receive(:clean?) { true } Math.should_receive(:log).with(1) Git.if_clean do Math.log 1 end end it "performs the block if the git index is not clean and user responds yes" do Git.should_receive(:clean?) { false } cli.should_receive(:ask_boolean).with(Git::DIRTY_CONFIRM_MESSAGE) { true } Math.should_receive(:log).with(1) Git.if_clean do Math.log 1 end end it "does not perform the block if the git index is not clean and user responds no" do Git.should_receive(:clean?) { false } cli.should_receive(:ask_boolean).with(Git::DIRTY_CONFIRM_MESSAGE) { false} Math.should_not_receive(:log) Git.should_receive(:alert_dirty_index).with(Git::DEFAULT_DIRTY_MESSAGE) expect do Git.if_clean do Math.log 1 end end.to raise_error(SystemExit) end it "prints a custom message if git index is not clean and user responds no" do Git.should_receive(:clean?) { false } cli.should_receive(:ask_boolean).with(Git::DIRTY_CONFIRM_MESSAGE) { false } Math.should_not_receive(:log) Git.should_receive(:alert_dirty_index).with(custom_message) expect do Git.if_clean custom_message do Math.log 1 end end.to raise_error(SystemExit) end end context ".alert_dirty_index(message)" do let(:message) { "Some message" } before { Git.cli = cli } it "prints the given message and shows the git status" do cli.should_receive(:say).with(" ") cli.should_receive(:say).with(message) cli.should_receive(:say).with(" ") Git.should_receive(:perform).with("status") expect{Git.alert_dirty_index message}.to raise_error end end context ".merge(branch_name)" do let(:branch_name) { "foo" } it "fetches the latest code and merges the given branch name" do Git.should_receive(:if_clean).and_yield Git.should_receive(:fetch) Git.should_receive(:perform).with("merge --no-ff origin/#{branch_name}", :ignore_non_zero => true) Git.should_receive(:clean?) { true } Git.should_receive(:push) Git.merge branch_name end it "does not push and raises MergeFailed if the merge failed" do Git.should_receive(:if_clean).and_yield Git.should_receive(:fetch) Git.should_receive(:perform).with("merge --no-ff origin/#{branch_name}", :ignore_non_zero => true) Git.should_receive(:clean?) { false } Git.should_not_receive(:push) expect { Git.merge branch_name }.to raise_error(Git::MergeFailed) end end context ".fetch" do it "fetches and prunes remote branches" do Git.should_receive(:perform_quietly).with("fetch --prune") Git.fetch end end context ".push" do let(:branch) { "current_branch" } it "pushes the current branch" do Git.stub(current_branch: branch) Git.should_receive(:if_clean).and_yield Git.should_receive(:perform).with("push origin #{branch}") Git.push end end context ".pull" do it "performs a pull if the index is clean" do Git.should_receive(:if_clean).and_yield Git.should_receive(:perform).with("pull") Git.pull end end context ".remote_branches" do let(:raw_output) { raw_names.join("\n ") } let(:raw_names) { %w(origin/foo origin/bar) } let(:cleaned_names) { %w(foo bar) } it "prunes the remote branch list and grabs all the branch names" do Git.should_receive(:fetch) Git.should_receive(:perform_quietly).with("branch --remote") { raw_output } Git.remote_branches.should == cleaned_names.sort end end context ".branches_for branch_type" do let(:remote_branches) { [depl1, rando, stage1, depl2].sort } let(:depl1) { "deployable.12.20" } let(:depl2) { "deployable.11.05" } let(:stage1) { "staging.04.05" } let(:rando) { "something-else" } before do Git.should_receive(:remote_branches) { remote_branches } end it "can find deployable branches" do deployables = Git.branches_for(Git::DEPLOYABLE_PREFIX) deployables.should include depl1 deployables.should include depl2 deployables.should == [depl1, depl2].sort deployables.count.should == 2 end it "can find staging branches" do stagings = Git.branches_for(Git::STAGING_PREFIX) stagings.should include stage1 stagings.count.should == 1 end end context ".deployable_branch" do let(:depl1) { "deployable.12.05" } let(:depl2) { "deployable.12.25" } it "returns the last deployable branch" do Git.should_receive(:branches_for).with(Git::DEPLOYABLE_PREFIX) { [depl1, depl2] } Git.deployable_branch.should == depl2 end it "raises an exception if none exist" do Git.should_receive(:branches_for).with(Git::DEPLOYABLE_PREFIX) { [] } expect { Git.deployable_branch.should }.to raise_error(Git::NoBranchOfType, "No #{Git::DEPLOYABLE_PREFIX} branch") end end context ".staging_branch" do let(:stage1) { "stage1" } let(:stage2) { "stage2" } it "returns the last staging branch" do Git.should_receive(:branches_for).with(Git::STAGING_PREFIX) { [stage1, stage2] } Git.staging_branch.should == stage2 end it "raises an exception if none exist" do Git.should_receive(:branches_for).with(Git::STAGING_PREFIX) { [] } expect { Git.staging_branch}.to raise_error(Git::NoBranchOfType, "No #{Git::STAGING_PREFIX} branch") end end context ".qaready_branch" do let(:qaready1) { "qaready1" } let(:qaready2) { "qaready2" } it "returns the last qaready branch" do Git.should_receive(:branches_for).with(Git::QAREADY_PREFIX) { [qaready1, qaready2] } Git.qaready_branch.should == qaready2 end it "raises an exception if none exist" do Git.should_receive(:branches_for).with(Git::QAREADY_PREFIX) { [] } expect { Git.qaready_branch }.to raise_error(Git::NoBranchOfType, "No #{Git::QAREADY_PREFIX} branch") end end context ".release_tags" do let(:valid1) { "2012.02.28" } let(:valid2) { "2012.11.10" } let(:invalid) { "foothing" } let(:tags) { [valid1, invalid, valid2].join("\n") } it "returns all the tags for releases" do Git.should_receive(:perform_quietly).with("tag") { tags } release_tags = Git.release_tags release_tags.should_not include invalid release_tags.should include valid1 release_tags.should include valid2 end end context ".recent_release_tags" do let(:long_list) { Array.new(100, "sometag#{rand(1000)}") } # big-ass list it "returns the last #{Git::RECENT_TAG_LIMIT} tags" do Git.should_receive(:release_tags) { long_list } tags = Git.recent_release_tags tags.count.should == Git::RECENT_TAG_LIMIT tags.should == long_list.last(Git::RECENT_TAG_LIMIT) end end context ".semver_tags" do let(:valid1) { "0.0.1" } let(:valid2) { "v0.0.3" } let(:invalid) { "foothing" } let(:tags) { [valid1, invalid, valid2].join("\n") } it "returns all the tags set as a sematic version" do Git.should_receive(:perform_quietly).with("tag") { tags } release_tags = Git.semver_tags release_tags.should_not include invalid release_tags.should include valid1 release_tags.should include valid2 end end context ".new_branch(new_branch_name, source_branch_name)" do let(:new_branch_name) { "foo" } let(:source_branch_name) { "bar" } it "creates and pushes a new branch from the source branch" do Git.should_receive(:fetch) Git.should_receive(:perform).with("branch --no-track #{new_branch_name} origin/#{source_branch_name}") Git.should_receive(:check_out).with(new_branch_name, false) Git.should_receive(:perform).with("push --set-upstream origin #{new_branch_name}") Git.new_branch(new_branch_name, source_branch_name) end end context ".new_tag(tag_name)" do let(:tag) { "asdf" } it "creates a new tag with the given name and pushes it" do Git.should_receive(:perform).with("tag #{tag}") Git.should_receive(:push) Git.should_receive(:perform).with("push --tag") Git.new_tag(tag) end end context ".stale_branches(destination_branch, branches_to_ignore)" do let(:ignored) { %w(foo bar) } let(:branch_name) { "master" } let(:sha) { "asdf123" } let(:raw_result) do %Q( origin/bing origin/bang ) end it "checks for stale branches for the given branch, less branches to ignore" do Git.should_receive(:fetch) Git.should_receive(:stale_branches_to_ignore).with(ignored) { ignored } Git.should_receive(:recent_sha).with(branch_name) { sha } Git.should_receive(:perform_quietly).with("branch --remote --merged #{sha} | grep -E -v '(foo|bar)'") { raw_result } expect(Git.stale_branches(branch_name, ignored)).to eq(%w(bing bang)) end it "defaults to master branch and no extra branches to ignore" do Git.should_receive(:fetch) Git.should_receive(:stale_branches_to_ignore).with([]) { ignored } Git.should_receive(:recent_sha).with("master") { sha } Git.should_receive(:perform_quietly).with("branch --remote --merged #{sha} | grep -E -v '(foo|bar)'") { raw_result } Git.stale_branches end end context "#branches_to_ignore(custom_branch_list)" do it "ignores some branches by default" do expect(Git.send(:stale_branches_to_ignore)).to include "HEAD" expect(Git.send(:stale_branches_to_ignore)).to include "master" expect(Git.send(:stale_branches_to_ignore)).to include "staging" expect(Git.send(:stale_branches_to_ignore)).to include "deployable" end it "accepts an optional list of additional branches to ignore" do expect(Git.send(:stale_branches_to_ignore, ["foo"])).to include "HEAD" expect(Git.send(:stale_branches_to_ignore, ["foo"])).to include "master" expect(Git.send(:stale_branches_to_ignore, ["foo"])).to include "staging" expect(Git.send(:stale_branches_to_ignore, ["foo"])).to include "deployable" expect(Git.send(:stale_branches_to_ignore, ["foo"])).to include "foo" end end context "#recent_sha(branch_name)" do let(:branch_name) { "foo" } let(:raw_sha) { "asdf123\n" } it "grabs the SHA of the given branch from 1 day ago" do Git.should_receive(:perform_quietly).with("rev-list `git rev-parse remotes/origin/#{branch_name} --before=1.day.ago` --max-count=1") { raw_sha } expect(Git.send(:recent_sha, branch_name)).to eq("asdf123") end end context ".delete_branch(branch_name)" do let(:branch_name) { "foo" } it "leverages git-extra's delete-branch command" do Git.should_receive(:perform).with("push origin :#{branch_name}") Git.should_receive(:perform).with("branch -D #{branch_name}", :ignore_non_zero => true) Git.delete_branch branch_name end end end end
# -*- encoding: utf-8 -*- # vim: sw=2 ts=2 require 'spec_helper' require 'qiniu/auth' require 'qiniu/storage' require 'digest/sha1' module Qiniu module Storage shared_examples "Upload Specs" do before :all do Config.settings[:multi_region] = true @key = Digest::SHA1.hexdigest((Time.now.to_i+rand(100)).to_s) @key = make_unique_key_in_bucket(@key) puts "key=#{@key}" @localfile_5m = "5M.txt" File.open(@localfile_5m, "w"){|f| 5242888.times{ f.write(rand(9).to_s) }} @key_5m = Digest::SHA1.hexdigest(@localfile_5m+Time.now.to_s) @key_5m = make_unique_key_in_bucket(@key_5m) puts "key_5m=#{@key_5m}" @localfile_4m = "4M.txt" File.open(@localfile_4m, "w"){|f| (1 << 22).times{ f.write(rand(9).to_s) }} @key_4m = Digest::SHA1.hexdigest(@localfile_4m+Time.now.to_s) @key_4m = make_unique_key_in_bucket(@key_4m) puts "key_4m=#{@key_4m}" @localfile_8m = "8M.txt" File.open(@localfile_8m, "w"){|f| (1 << 23).times{ f.write(rand(9).to_s) }} @key_8m = Digest::SHA1.hexdigest(@localfile_8m+Time.now.to_s) @key_8m = make_unique_key_in_bucket(@key_8m) puts "key_8m=#{@key_8m}" @localfile_1m = "1M.txt" File.open(@localfile_1m, "w"){|f| (1 << 20).times{ f.write(rand(9).to_s) }} @key_1m = Digest::SHA1.hexdigest(@localfile_1m+Time.now.to_s) @key_1m = make_unique_key_in_bucket(@key_1m) puts "key_1m=#{@key_1m}" end after :all do ### 清除本地临时文件 File.unlink(@localfile_5m) if File.exists?(@localfile_5m) File.unlink(@localfile_4m) if File.exists?(@localfile_4m) File.unlink(@localfile_8m) if File.exists?(@localfile_8m) File.unlink(@localfile_1m) if File.exists?(@localfile_1m) end ### 测试单文件直传 context ".upload_with_token" do before do Qiniu::Storage.delete(@bucket, @key) end after do code, data = Qiniu::Storage.delete(@bucket, @key) puts data.inspect code.should == 200 end it "should works" do upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) code, data, raw_headers = Qiniu::Storage.upload_with_token( uptoken, __FILE__, @bucket, @key, nil, nil, nil, true ) code.should == 200 puts data.inspect puts raw_headers.inspect code, data = Qiniu::Storage.stat(@bucket, @key) puts data.inspect code.should == 200 end end context ".upload_with_token_2" do before do Qiniu::Storage.delete(@bucket, @key) end after do code, data = Qiniu::Storage.delete(@bucket, @key) puts data.inspect code.should == 200 end it "should works" do @bucket = "z0-bucket" upopts = {:scope => @bucket, :expires_in => 3600, :endUser => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) code, data, raw_headers = Qiniu::Storage.upload_with_token_2( uptoken, __FILE__, @key, nil, bucket: @bucket ) code.should == 200 puts data.inspect puts raw_headers.inspect code, data = Qiniu::Storage.stat(@bucket, @key) puts data.inspect code.should == 200 end end # .upload_with_token_2 context ".upload_with_put_policy" do before do Qiniu::Storage.delete(@bucket, @key) end after do code, data = Qiniu::Storage.delete(@bucket, @key) puts data.inspect code.should == 200 end it "should works" do pp = Qiniu::Auth::PutPolicy.new(@bucket, @key) pp.end_user = "why404@gmail.com" puts 'put_policy=' + pp.to_json code, data, raw_headers = Qiniu::Storage.upload_with_put_policy( pp, __FILE__, @key + '-not-equal', nil, bucket: @bucket ) code.should_not == 200 puts data.inspect puts raw_headers.inspect code, data, raw_headers = Qiniu::Storage.upload_with_put_policy( pp, __FILE__, @key, nil, bucket: @bucket ) code.should == 200 puts data.inspect puts raw_headers.inspect code, data = Qiniu::Storage.stat(@bucket, @key) puts data.inspect code.should == 200 end end # .upload_with_put_policy context ".upload_buffer_with_put_policy" do before do Qiniu::Storage.delete(@bucket, @key) end after do code, data = Qiniu::Storage.delete(@bucket, @key) puts data.inspect code.should == 200 end it "should works" do pp = Qiniu::Auth::PutPolicy.new(@bucket, @key) pp.end_user = "amethyst.black@gmail.com" puts 'put_policy=' + pp.to_json test_line = 'This is a test line for testing put_buffer function.' code, data, raw_headers = Qiniu::Storage.upload_buffer_with_put_policy( pp, test_line, @key, nil, bucket: @bucket ) code.should == 200 puts data.inspect puts raw_headers.inspect code, data = Qiniu::Storage.stat(@bucket, @key) puts data.inspect code.should == 200 end end # .upload_buffer_with_put_policy ### 测试断点续上传 # context ".resumable_upload_with_token" do # before do # Qiniu::Storage.delete(@bucket, @key_5m) # end # # after do # code, data = Qiniu::Storage.delete(@bucket, @key_5m) # puts data.inspect # code.should == 200 # end # # it "should works" do # upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} # uptoken = Qiniu.generate_upload_token(upopts) # code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( # uptoken, # @localfile_5m, # @bucket, # @key_5m, # nil, # nil, # nil, # nil, # nil, # nil, # 'v1', # 4 * 1024 * 1024 # ) # (code/100).should == 2 # puts data.inspect # puts raw_headers.inspect # puts "key_5m=#{@key_5m}" # # code, data = Qiniu::Storage.stat(@bucket, @key_5m) # puts data.inspect # code.should == 200 # end # end # # context ".resumable_upload_with_token2" do # before do # Qiniu::Storage.delete(@bucket, @key_4m) # end # # after do # code, data = Qiniu::Storage.delete(@bucket, @key_4m) # puts data.inspect # code.should == 200 # end # # it "should works" do # upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} # uptoken = Qiniu.generate_upload_token(upopts) # code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( # uptoken, # @localfile_4m, # @bucket, # @key_4m # ) # (code/100).should == 2 # puts data.inspect # puts raw_headers.inspect # puts "key_4m=#{@key_4m}" # # code, data = Qiniu::Storage.stat(@bucket, @key_4m) # puts data.inspect # code.should == 200 # end # end # # context ".resumable_upload_with_token3" do # before do # Qiniu::Storage.delete(@bucket, @key_8m) # end # # after do # code, data = Qiniu::Storage.delete(@bucket, @key_8m) # puts data.inspect # code.should == 200 # end # # it "should works" do # upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} # uptoken = Qiniu.generate_upload_token(upopts) # code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( # uptoken, # @localfile_8m, # @bucket, # @key_8m # ) # (code/100).should == 2 # puts data.inspect # puts raw_headers.inspect # puts "key_8m=#{@key_8m}" # # code, data = Qiniu::Storage.stat(@bucket, @key_8m) # puts data.inspect # code.should == 200 # end # end # # context ".resumable_upload_with_token4" do # before do # Qiniu::Storage.delete(@bucket, @key_1m) # end # # after do # code, data = Qiniu::Storage.delete(@bucket, @key_1m) # puts data.inspect # code.should == 200 # end # # it "should works" do # upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} # uptoken = Qiniu.generate_upload_token(upopts) # code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( # uptoken, # @localfile_1m, # @bucket, # @key_1m # ) # (code/100).should == 2 # puts data.inspect # puts raw_headers.inspect # puts "key_1m=#{@key_1m}" # # code, data = Qiniu::Storage.stat(@bucket, @key_1m) # puts data.inspect # code.should == 200 # end # end context ".resumable_upload_with_token_v2" do before do Qiniu::Storage.delete(@bucket, @key_1m) end after do code, data = Qiniu::Storage.delete(@bucket, @key_1m) puts data.inspect code.should == 200 end it "should works" do upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) puts "uptoken is #{uptoken}" code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( uptoken, @localfile_1m, @bucket, @key_1m, nil, nil, nil, nil, nil, nil, 'v2', 4 * 1024 * 1024 ) (code/100).should == 2 puts data.inspect puts raw_headers.inspect puts "key_1m=#{@key_1m}" code, data = Qiniu::Storage.stat(@bucket, @key_1m) puts data.inspect code.should == 200 end end context ".resumable_upload_with_token2_v2" do before do Qiniu::Storage.delete(@bucket, @key_4m) end after do code, data = Qiniu::Storage.delete(@bucket, @key_4m) puts data.inspect code.should == 200 end it "should works" do upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( uptoken, @localfile_4m, @bucket, @key_5m, nil, nil, nil, nil, nil, nil, 'v2', 4 * 1024 * 1024 ) (code/100).should == 2 puts data.inspect puts raw_headers.inspect puts "key_4m=#{@key_4m}" code, data = Qiniu::Storage.stat(@bucket, @key_4m) puts data.inspect code.should == 200 end end context ".resumable_upload_with_token3_v2" do before do Qiniu::Storage.delete(@bucket, @key_8m) end after do code, data = Qiniu::Storage.delete(@bucket, @key_8m) puts data.inspect code.should == 200 end it "should works" do upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( uptoken, @localfile_8m, @bucket, @key_8m, nil, nil, nil, nil, nil, nil, 'v2', 4 * 1024 * 1024 ) (code/100).should == 2 puts data.inspect puts raw_headers.inspect puts "key_8m=#{@key_8m}" code, data = Qiniu::Storage.stat(@bucket, @key_8m) puts data.inspect code.should == 200 end end context ".resumable_upload_with_token4_v2" do before do Qiniu::Storage.delete(@bucket, @key_5m) end after do code, data = Qiniu::Storage.delete(@bucket, @key_5m) puts data.inspect code.should == 200 end it "should works" do upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( uptoken, @localfile_5m, @bucket, @key_5m, nil, nil, nil, nil, nil, nil, 'v2' ) (code/100).should == 2 puts data.inspect puts raw_headers.inspect puts "key_5m=#{@key_5m}" code, data = Qiniu::Storage.stat(@bucket, @key_5m) puts data.inspect code.should == 200 end end end describe 'for na0 bucket' do before :all do @bucket = 'rubysdk-na0' end include_examples 'Upload Specs' end describe 'for as0 bucket' do before :all do @bucket = 'rubysdk-as0' end include_examples 'Upload Specs' it 'should raise BucketIsMissing error' do upopts = {:scope => @bucket, :expires_in => 3600, :endUser => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) expect do Qiniu::Storage.upload_with_token_2( uptoken, __FILE__, @key, ) end.to raise_error('upload_with_token_2 requires :bucket option when multi_region is enabled') end end end # module Storage end # module Qiniu update test # -*- encoding: utf-8 -*- # vim: sw=2 ts=2 require 'spec_helper' require 'qiniu/auth' require 'qiniu/storage' require 'digest/sha1' module Qiniu module Storage shared_examples "Upload Specs" do before :all do Config.settings[:multi_region] = true @key = Digest::SHA1.hexdigest((Time.now.to_i+rand(100)).to_s) @key = make_unique_key_in_bucket(@key) puts "key=#{@key}" @localfile_5m = "5M.txt" File.open(@localfile_5m, "w"){|f| 5242888.times{ f.write(rand(9).to_s) }} @key_5m = Digest::SHA1.hexdigest(@localfile_5m+Time.now.to_s) @key_5m = make_unique_key_in_bucket(@key_5m) puts "key_5m=#{@key_5m}" @localfile_4m = "4M.txt" File.open(@localfile_4m, "w"){|f| (1 << 22).times{ f.write(rand(9).to_s) }} @key_4m = Digest::SHA1.hexdigest(@localfile_4m+Time.now.to_s) @key_4m = make_unique_key_in_bucket(@key_4m) puts "key_4m=#{@key_4m}" @localfile_8m = "8M.txt" File.open(@localfile_8m, "w"){|f| (1 << 23).times{ f.write(rand(9).to_s) }} @key_8m = Digest::SHA1.hexdigest(@localfile_8m+Time.now.to_s) @key_8m = make_unique_key_in_bucket(@key_8m) puts "key_8m=#{@key_8m}" @localfile_1m = "1M.txt" File.open(@localfile_1m, "w"){|f| (1 << 20).times{ f.write(rand(9).to_s) }} @key_1m = Digest::SHA1.hexdigest(@localfile_1m+Time.now.to_s) @key_1m = make_unique_key_in_bucket(@key_1m) puts "key_1m=#{@key_1m}" end after :all do ### 清除本地临时文件 File.unlink(@localfile_5m) if File.exists?(@localfile_5m) File.unlink(@localfile_4m) if File.exists?(@localfile_4m) File.unlink(@localfile_8m) if File.exists?(@localfile_8m) File.unlink(@localfile_1m) if File.exists?(@localfile_1m) end ### 测试单文件直传 context ".upload_with_token" do before do Qiniu::Storage.delete(@bucket, @key) end after do code, data = Qiniu::Storage.delete(@bucket, @key) puts data.inspect code.should == 200 end it "should works" do upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) code, data, raw_headers = Qiniu::Storage.upload_with_token( uptoken, __FILE__, @bucket, @key, nil, nil, nil, true ) code.should == 200 puts data.inspect puts raw_headers.inspect code, data = Qiniu::Storage.stat(@bucket, @key) puts data.inspect code.should == 200 end end context ".upload_with_token_2" do before do Qiniu::Storage.delete(@bucket, @key) end after do code, data = Qiniu::Storage.delete(@bucket, @key) puts data.inspect code.should == 200 end it "should works" do upopts = {:scope => @bucket, :expires_in => 3600, :endUser => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) code, data, raw_headers = Qiniu::Storage.upload_with_token_2( uptoken, __FILE__, @key, nil, bucket: @bucket ) code.should == 200 puts data.inspect puts raw_headers.inspect code, data = Qiniu::Storage.stat(@bucket, @key) puts data.inspect code.should == 200 end end # .upload_with_token_2 context ".upload_with_put_policy" do before do Qiniu::Storage.delete(@bucket, @key) end after do code, data = Qiniu::Storage.delete(@bucket, @key) puts data.inspect code.should == 200 end it "should works" do pp = Qiniu::Auth::PutPolicy.new(@bucket, @key) pp.end_user = "why404@gmail.com" puts 'put_policy=' + pp.to_json code, data, raw_headers = Qiniu::Storage.upload_with_put_policy( pp, __FILE__, @key + '-not-equal', nil, bucket: @bucket ) code.should_not == 200 puts data.inspect puts raw_headers.inspect code, data, raw_headers = Qiniu::Storage.upload_with_put_policy( pp, __FILE__, @key, nil, bucket: @bucket ) code.should == 200 puts data.inspect puts raw_headers.inspect code, data = Qiniu::Storage.stat(@bucket, @key) puts data.inspect code.should == 200 end end # .upload_with_put_policy context ".upload_buffer_with_put_policy" do before do Qiniu::Storage.delete(@bucket, @key) end after do code, data = Qiniu::Storage.delete(@bucket, @key) puts data.inspect code.should == 200 end it "should works" do pp = Qiniu::Auth::PutPolicy.new(@bucket, @key) pp.end_user = "amethyst.black@gmail.com" puts 'put_policy=' + pp.to_json test_line = 'This is a test line for testing put_buffer function.' code, data, raw_headers = Qiniu::Storage.upload_buffer_with_put_policy( pp, test_line, @key, nil, bucket: @bucket ) code.should == 200 puts data.inspect puts raw_headers.inspect code, data = Qiniu::Storage.stat(@bucket, @key) puts data.inspect code.should == 200 end end # .upload_buffer_with_put_policy ### 测试断点续上传 # context ".resumable_upload_with_token" do # before do # Qiniu::Storage.delete(@bucket, @key_5m) # end # # after do # code, data = Qiniu::Storage.delete(@bucket, @key_5m) # puts data.inspect # code.should == 200 # end # # it "should works" do # upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} # uptoken = Qiniu.generate_upload_token(upopts) # code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( # uptoken, # @localfile_5m, # @bucket, # @key_5m, # nil, # nil, # nil, # nil, # nil, # nil, # 'v1', # 4 * 1024 * 1024 # ) # (code/100).should == 2 # puts data.inspect # puts raw_headers.inspect # puts "key_5m=#{@key_5m}" # # code, data = Qiniu::Storage.stat(@bucket, @key_5m) # puts data.inspect # code.should == 200 # end # end # # context ".resumable_upload_with_token2" do # before do # Qiniu::Storage.delete(@bucket, @key_4m) # end # # after do # code, data = Qiniu::Storage.delete(@bucket, @key_4m) # puts data.inspect # code.should == 200 # end # # it "should works" do # upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} # uptoken = Qiniu.generate_upload_token(upopts) # code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( # uptoken, # @localfile_4m, # @bucket, # @key_4m # ) # (code/100).should == 2 # puts data.inspect # puts raw_headers.inspect # puts "key_4m=#{@key_4m}" # # code, data = Qiniu::Storage.stat(@bucket, @key_4m) # puts data.inspect # code.should == 200 # end # end # # context ".resumable_upload_with_token3" do # before do # Qiniu::Storage.delete(@bucket, @key_8m) # end # # after do # code, data = Qiniu::Storage.delete(@bucket, @key_8m) # puts data.inspect # code.should == 200 # end # # it "should works" do # upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} # uptoken = Qiniu.generate_upload_token(upopts) # code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( # uptoken, # @localfile_8m, # @bucket, # @key_8m # ) # (code/100).should == 2 # puts data.inspect # puts raw_headers.inspect # puts "key_8m=#{@key_8m}" # # code, data = Qiniu::Storage.stat(@bucket, @key_8m) # puts data.inspect # code.should == 200 # end # end # # context ".resumable_upload_with_token4" do # before do # Qiniu::Storage.delete(@bucket, @key_1m) # end # # after do # code, data = Qiniu::Storage.delete(@bucket, @key_1m) # puts data.inspect # code.should == 200 # end # # it "should works" do # upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} # uptoken = Qiniu.generate_upload_token(upopts) # code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( # uptoken, # @localfile_1m, # @bucket, # @key_1m # ) # (code/100).should == 2 # puts data.inspect # puts raw_headers.inspect # puts "key_1m=#{@key_1m}" # # code, data = Qiniu::Storage.stat(@bucket, @key_1m) # puts data.inspect # code.should == 200 # end # end context ".resumable_upload_with_token_v2" do before do Qiniu::Storage.delete(@bucket, @key_1m) end after do code, data = Qiniu::Storage.delete(@bucket, @key_1m) puts data.inspect code.should == 200 end it "should works" do upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) puts "uptoken is #{uptoken}" code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( uptoken, @localfile_1m, @bucket, @key_1m, nil, nil, nil, nil, nil, nil, 'v2', 4 * 1024 * 1024 ) (code/100).should == 2 puts data.inspect puts raw_headers.inspect puts "key_1m=#{@key_1m}" code, data = Qiniu::Storage.stat(@bucket, @key_1m) puts data.inspect code.should == 200 end end context ".resumable_upload_with_token2_v2" do before do Qiniu::Storage.delete(@bucket, @key_4m) end after do code, data = Qiniu::Storage.delete(@bucket, @key_4m) puts data.inspect code.should == 200 end it "should works" do upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( uptoken, @localfile_4m, @bucket, @key_5m, nil, nil, nil, nil, nil, nil, 'v2', 4 * 1024 * 1024 ) (code/100).should == 2 puts data.inspect puts raw_headers.inspect puts "key_4m=#{@key_4m}" code, data = Qiniu::Storage.stat(@bucket, @key_4m) puts data.inspect code.should == 200 end end context ".resumable_upload_with_token3_v2" do before do Qiniu::Storage.delete(@bucket, @key_8m) end after do code, data = Qiniu::Storage.delete(@bucket, @key_8m) puts data.inspect code.should == 200 end it "should works" do upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( uptoken, @localfile_8m, @bucket, @key_8m, nil, nil, nil, nil, nil, nil, 'v2', 4 * 1024 * 1024 ) (code/100).should == 2 puts data.inspect puts raw_headers.inspect puts "key_8m=#{@key_8m}" code, data = Qiniu::Storage.stat(@bucket, @key_8m) puts data.inspect code.should == 200 end end context ".resumable_upload_with_token4_v2" do before do Qiniu::Storage.delete(@bucket, @key_5m) end after do code, data = Qiniu::Storage.delete(@bucket, @key_5m) puts data.inspect code.should == 200 end it "should works" do upopts = {:scope => @bucket, :expires_in => 3600, :customer => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) code, data, raw_headers = Qiniu::Storage.resumable_upload_with_token( uptoken, @localfile_5m, @bucket, @key_5m, nil, nil, nil, nil, nil, nil, 'v2' ) (code/100).should == 2 puts data.inspect puts raw_headers.inspect puts "key_5m=#{@key_5m}" code, data = Qiniu::Storage.stat(@bucket, @key_5m) puts data.inspect code.should == 200 end end end describe 'for na0 bucket' do before :all do @bucket = 'rubysdk-na0' end include_examples 'Upload Specs' end describe 'for as0 bucket' do before :all do @bucket = 'rubysdk-as0' end include_examples 'Upload Specs' it 'should raise BucketIsMissing error' do upopts = {:scope => @bucket, :expires_in => 3600, :endUser => "why404@gmail.com"} uptoken = Qiniu.generate_upload_token(upopts) expect do Qiniu::Storage.upload_with_token_2( uptoken, __FILE__, @key, ) end.to raise_error('upload_with_token_2 requires :bucket option when multi_region is enabled') end end end # module Storage end # module Qiniu
require 'spec_helper' describe QML::JSArray do let(:array_script) do <<-JS [1, 2, 3] JS end let(:array) { engine.evaluate(array_script) } describe '#each' do it 'enumerates each values' do expect(array.each.to_a).to eq [1,2,3] end end describe '#to_a' do it 'converts it to an array' do expect(array.to_a).to eq [1,2,3] end end describe '#length' do it 'returns length' do expect(array.length).to eq 3 end end end Fix JSArray spec require 'spec_helper' describe QML::JSArray do let(:array_script) do <<-JS [1, 2, 3] JS end let(:array) { QML.engine.evaluate(array_script) } describe '#each' do it 'enumerates each values' do expect(array.each.to_a).to eq [1,2,3] end end describe '#to_a' do it 'converts it to an array' do expect(array.to_a).to eq [1,2,3] end end describe '#length' do it 'returns length' do expect(array.length).to eq 3 end end end
require "spec_helper" describe Rchess::Board do include ChessGameStrings PIECE_NAMES = [ :rook, :knight, :bishop, :king, :queen, :bishop, :knight, :rook ] let(:board) { Rchess::Board.new } describe "initialization" do it "accepts an optional CSV string of past moves" do prepopulated_board = Rchess::Board.new(ambiguous_knights_csv) expect(prepopulated_board["f6"].name).to eq :knight expect(prepopulated_board["f6"].lettercase).to eq :uppercase expect(prepopulated_board["d4"].name).to eq :pawn expect(prepopulated_board["d4"].lettercase).to eq :lowercase end it "allows access to its 64 squares with chess notation" do expect(board["a8"].name).to eq :rook expect(board["d1"].name).to eq :king end it "distinguishes sides by case" do expect(board["d8"].lettercase).to eq :uppercase expect(board["d1"].lettercase).to eq :lowercase end it "has empty squares" do expect(board["d4"].name).to eq :empty end it "places all the pieces in their correct positions" do expect_pieces_are_placed_correctly_for_rank("8", :uppercase) expect_pawns_are_placed_correctly_for_rank("7", :uppercase) expect_pawns_are_placed_correctly_for_rank("2", :lowercase) expect_pieces_are_placed_correctly_for_rank("1", :lowercase) end def expect_pieces_are_placed_correctly_for_rank(rank, lettercase) ("a".."h").map { |file| file + rank } .zip(PIECE_NAMES).each do |pos, piece| expect(board[pos].name).to eq piece expect(board[pos].lettercase).to eq lettercase end end def expect_pawns_are_placed_correctly_for_rank(rank, lettercase) ("a".."h").map { |file| file + rank }.each do |pos| expect(board[pos].name).to eq :pawn expect(board[pos].lettercase).to eq lettercase end end end describe "parsing moves" do it "handles piece names and destinations" do expect(board.parse("ne2")).to eq ["n", :no_origin, "e2"] end it "handles piece names, origins, and destinations" do expect(board.parse("nge2")).to eq ["n", "g", "e2"] end end describe "moving pieces" do it "finds the position of a piece" do piece1 = board["a1"] piece2 = board["d2"] expect(board.find_file_and_rank(piece1)).to eq "a1" expect(board.find_file_and_rank(piece2)).to eq "d2" end it "moves pieces across the board" do board.commit_move("nc3") expect(board["b1"].name).to eq :empty expect(board["c3"].name).to eq :knight end xit "prevents a piece from moving if it is blocked by another piece" do expect(board.commit_move("ra3")).to eq :illegal_move expect(board["a3"].name).to eq :empty expect(board["a1"].name).to eq :rook end xit "allows pieces to capture other pieces" do before_capture = Rchess::Board.new(before_capture_csv) expect(board["h6"].lettercase).to eq :lowercase board.commit_move("Bh6") expect(board["h6"].lettercase).to eq :uppercase end describe "return values of a move" do it "returns :success if the move succeeded" do expect(board.commit_move "nc3" ).to eq :success end it "returns :illegal_move if no pieces were identified to be moved" do expect(board.commit_move "nd5" ).to eq :illegal_move end it "returns :ambiguous_move if more than one piece was identified" do ambiguous_board = Rchess::Board.new ambiguous_knights_csv expect(ambiguous_board.commit_move ambiguous_move ).to eq :ambiguous_move end end it "accepts an originating file in a move" do board = Rchess::Board.new(ambiguous_knights_csv) expect(board.commit_move(unambiguous_knight_move)).to eq :success expect(board["e2"].name).to eq :knight end it "finds an implied piece by its move" do board.commit_move("Pe6") expect(board["e7"].name).to eq :empty expect(board["e6"].name).to eq :pawn expect(board["e6"].lettercase).to eq :uppercase end it "supports castling" it "supports true chess notation" it "supports pawn promotion" end end Assert against correct variable in test require "spec_helper" describe Rchess::Board do include ChessGameStrings PIECE_NAMES = [ :rook, :knight, :bishop, :king, :queen, :bishop, :knight, :rook ] let(:board) { Rchess::Board.new } describe "initialization" do it "accepts an optional CSV string of past moves" do prepopulated_board = Rchess::Board.new(ambiguous_knights_csv) expect(prepopulated_board["f6"].name).to eq :knight expect(prepopulated_board["f6"].lettercase).to eq :uppercase expect(prepopulated_board["d4"].name).to eq :pawn expect(prepopulated_board["d4"].lettercase).to eq :lowercase end it "allows access to its 64 squares with chess notation" do expect(board["a8"].name).to eq :rook expect(board["d1"].name).to eq :king end it "distinguishes sides by case" do expect(board["d8"].lettercase).to eq :uppercase expect(board["d1"].lettercase).to eq :lowercase end it "has empty squares" do expect(board["d4"].name).to eq :empty end it "places all the pieces in their correct positions" do expect_pieces_are_placed_correctly_for_rank("8", :uppercase) expect_pawns_are_placed_correctly_for_rank("7", :uppercase) expect_pawns_are_placed_correctly_for_rank("2", :lowercase) expect_pieces_are_placed_correctly_for_rank("1", :lowercase) end def expect_pieces_are_placed_correctly_for_rank(rank, lettercase) ("a".."h").map { |file| file + rank } .zip(PIECE_NAMES).each do |pos, piece| expect(board[pos].name).to eq piece expect(board[pos].lettercase).to eq lettercase end end def expect_pawns_are_placed_correctly_for_rank(rank, lettercase) ("a".."h").map { |file| file + rank }.each do |pos| expect(board[pos].name).to eq :pawn expect(board[pos].lettercase).to eq lettercase end end end describe "parsing moves" do it "handles piece names and destinations" do expect(board.parse("ne2")).to eq ["n", :no_origin, "e2"] end it "handles piece names, origins, and destinations" do expect(board.parse("nge2")).to eq ["n", "g", "e2"] end end describe "moving pieces" do it "finds the position of a piece" do piece1 = board["a1"] piece2 = board["d2"] expect(board.find_file_and_rank(piece1)).to eq "a1" expect(board.find_file_and_rank(piece2)).to eq "d2" end it "moves pieces across the board" do board.commit_move("nc3") expect(board["b1"].name).to eq :empty expect(board["c3"].name).to eq :knight end xit "prevents a piece from moving if it is blocked by another piece" do expect(board.commit_move("ra3")).to eq :illegal_move expect(board["a3"].name).to eq :empty expect(board["a1"].name).to eq :rook end it "allows pieces to capture other pieces" do before_capture = Rchess::Board.new(before_capture_csv) expect(before_capture["h6"].lettercase).to eq :lowercase board.commit_move("Bh6") expect(board["h6"].lettercase).to eq :uppercase end describe "return values of a move" do it "returns :success if the move succeeded" do expect(board.commit_move "nc3" ).to eq :success end it "returns :illegal_move if no pieces were identified to be moved" do expect(board.commit_move "nd5" ).to eq :illegal_move end it "returns :ambiguous_move if more than one piece was identified" do ambiguous_board = Rchess::Board.new ambiguous_knights_csv expect(ambiguous_board.commit_move ambiguous_move ).to eq :ambiguous_move end end it "accepts an originating file in a move" do board = Rchess::Board.new(ambiguous_knights_csv) expect(board.commit_move(unambiguous_knight_move)).to eq :success expect(board["e2"].name).to eq :knight end it "finds an implied piece by its move" do board.commit_move("Pe6") expect(board["e7"].name).to eq :empty expect(board["e6"].name).to eq :pawn expect(board["e6"].lettercase).to eq :uppercase end it "supports castling" it "supports true chess notation" it "supports pawn promotion" end end
# -*- coding: utf-8 -*- require File.expand_path(File.join(File.dirname(__FILE__), '..', 'spec_helper')) require File.expand_path(File.join(File.dirname(__FILE__), '..', '..', 'lib', 'rwsc', 'version')) describe Rwsc::VERSION do subject { Rwsc::VERSION::STRING } it { should == "#{Rwsc::VERSION::MAJOR}." + "#{Rwsc::VERSION::MINOR}." + "#{Rwsc::VERSION::TINY}" } end modify indent . # -*- coding: utf-8 -*- require File.expand_path(File.join(File.dirname(__FILE__), '..', 'spec_helper')) require File.expand_path(File.join(File.dirname(__FILE__), '..', '..', 'lib', 'rwsc', 'version')) describe Rwsc::VERSION do subject { Rwsc::VERSION::STRING } it { should == "#{Rwsc::VERSION::MAJOR}." + "#{Rwsc::VERSION::MINOR}." + "#{Rwsc::VERSION::TINY}" } end
require 'date' require 'spec_helper' require 'bibliografia' describe Bibliografia do before :each do nombres = Array.new nombres = %w{ Autor1 Autor2 } isbn = { "isbn-10" => " 1937785491", "isbn-13" => " 978-1937785499" } @b1 = Bibliografia::Bibliografia.new(nombres, "Titulo", "Editorial", "Numero de edicion 4", Date.new(2015,10,31), isbn) #Clase Bibliografia del MODULO Bibliografia!!!!!!! #@b2 = Bibliografia::Bibliografia.new(nombres, "Titulo") end #before each context "#Autores" do it "Debe existir uno o mas autores" do @b1.autores.length.should >= 1 end end #context context "#Titulo" do it "Debe existir un titulo" do @b1.titulo.should eq("Titulo") end end #context # context "#Serie" do # it "Debe existir o no una serie" do # expect(defined? @b1.serie).to eq(serie) # expect(defined? @b2.serie).to eq(false) # end # end #context context "#Editorial" do it "Debe existir una editorial" do @b1.editorial.should eq("Editorial") end end context "#Numero de edicion" do it "Debe existir un numero de edicion" do @b1.n_edicion.should eq("Numero de edicion 4") end end #context context "#Fecha Publicacion" do it "Debe existir una fecha de publicacion" do @b1.fecha.strftime("%d/%m/%Y").should eq("31/10/2015") end end context "#ISBN" do it "Debe existir un ISBN o mas" do @b1.isbn.size.should >= 1 end end context "#Obtener Autores" do it "Debe poderse obtener los autores" do @b1.getAutores.should eq("Autor1","Autor2") end end end #describe C21: Falla prueba (Obtener ISBN) require 'date' require 'spec_helper' require 'bibliografia' describe Bibliografia do before :each do nombres = Array.new nombres = %w{ Autor1 Autor2 } isbn = { "isbn-10" => " 1937785491", "isbn-13" => " 978-1937785499" } @b1 = Bibliografia::Bibliografia.new(nombres, "Titulo", "Editorial", "Numero de edicion 4", Date.new(2015,10,31), isbn) #Clase Bibliografia del MODULO Bibliografia!!!!!!! #@b2 = Bibliografia::Bibliografia.new(nombres, "Titulo") end #before each context "#Autores" do it "Debe existir uno o mas autores" do @b1.autores.length.should >= 1 end end #context context "#Titulo" do it "Debe existir un titulo" do @b1.titulo.should eq("Titulo") end end #context # context "#Serie" do # it "Debe existir o no una serie" do # expect(defined? @b1.serie).to eq(serie) # expect(defined? @b2.serie).to eq(false) # end # end #context context "#Editorial" do it "Debe existir una editorial" do @b1.editorial.should eq("Editorial") end end context "#Numero de edicion" do it "Debe existir un numero de edicion" do @b1.n_edicion.should eq("Numero de edicion 4") end end #context context "#Fecha Publicacion" do it "Debe existir una fecha de publicacion" do @b1.fecha.strftime("%d/%m/%Y").should eq("31/10/2015") end end context "#ISBN" do it "Debe existir un ISBN o mas" do @b1.isbn.size.should >= 1 end end context "#Obtener Autores" do it "Debe poderse obtener los autores" do @b1.getAutores.should eq("Autor1","Autor2") end end context "#Obtener ISBN" do it "Debe poderse obtener los o el ISBN" do @b1.getISBN.should eq({ "isbn-10" => " 1937785491", "isbn-13" => " 978-1937785499" }) end end end #describe
RSpec.configure do |config| config.after :each do ObjectSpace.each_object(File) do |file| next if file.closed? next if file.path == '/dev/null' next if file.path == Rails.root.join('log/test.log').to_s fail "You have not closed #{file.path}" end end config.after :each do memory = GetProcessMem.new.mb.to_i fail "Memory is too high: #{memory} MB" if memory > 320 end config.after :suite do memory = GetProcessMem.new.mb.to_i puts "Test memory is #{memory} MB" end if ENV['COVERAGE'] config.after :suite do examples = config.reporter.examples.count duration = Time.zone.now - config.start_time average = duration / examples if duration > 2.minutes || average > 0.25 fail "Tests took too long: total=#{duration.to_i}s average=#{average.round(5)}s" end end end end Increase test memory limit to stop random failures RSpec.configure do |config| config.after :each do ObjectSpace.each_object(File) do |file| next if file.closed? next if file.path == '/dev/null' next if file.path == Rails.root.join('log/test.log').to_s fail "You have not closed #{file.path}" end end config.after :each do memory = GetProcessMem.new.mb.to_i fail "Memory is too high: #{memory} MB" if memory > 320 end config.after :suite do memory = GetProcessMem.new.mb.to_i puts "Test memory is #{memory} MB" end if ENV['COVERAGE'] config.after :suite do examples = config.reporter.examples.count duration = Time.zone.now - config.start_time average = duration / examples if duration > 2.minutes || average > 0.26 fail "Tests took too long: total=#{duration.to_i}s average=#{average.round(5)}s" end end end end
require_relative 'spec_helper' describe Asciidoctor::PDF::ThemeLoader do subject { described_class } context '#load' do it 'should not fail if theme data is empty' do theme = subject.new.load '' (expect theme).not_to be_nil (expect theme).to be_an OpenStruct (expect theme.to_h).to be_empty end it 'should not fail if theme data is false' do theme = subject.new.load false (expect theme).not_to be_nil (expect theme).to be_an OpenStruct (expect theme.to_h).to be_empty end it 'should store flattened keys in OpenStruct' do theme_data = SafeYAML.load <<~EOS page: size: A4 base: font: family: Times-Roman border_width: 0.5 admonition: label: font_style: bold EOS theme = subject.new.load theme_data (expect theme).to be_an OpenStruct (expect theme).to respond_to :page_size (expect theme).to respond_to :base_font_family (expect theme).to respond_to :base_border_width (expect theme).to respond_to :admonition_label_font_style end it 'should replace hyphens in key names with underscores' do theme_data = SafeYAML.load <<~EOS page-size: A4 base: font-family: Times-Roman abstract: title-font-size: 20 admonition: icon: tip: stroke-color: FFFF00 EOS theme = subject.new.load theme_data (expect theme).to be_an OpenStruct (expect theme).to respond_to :page_size (expect theme).to respond_to :base_font_family (expect theme).to respond_to :abstract_title_font_size (expect theme).to respond_to :admonition_icon_tip (expect theme.admonition_icon_tip).to have_key :stroke_color end it 'should not replace hyphens with underscores in role names' do theme_data = SafeYAML.load <<~EOS role: flaming-red: font-color: ff0000 so-very-blue: font: color: 0000ff EOS theme = subject.new.load theme_data (expect theme).to be_an OpenStruct (expect theme).to respond_to 'role_flaming-red_font_color' (expect theme['role_flaming-red_font_color']).to eql 'FF0000' (expect theme).to respond_to 'role_so-very-blue_font_color' (expect theme['role_so-very-blue_font_color']).to eql '0000FF' end it 'should convert keys that end in content to a string' do theme_data = SafeYAML.load <<~EOS menu: caret_content: - '>' ulist: marker: disc: content: 0 footer: recto: left: content: true EOS theme = subject.new.load theme_data (expect theme).to be_an OpenStruct (expect theme.menu_caret_content).to eql '[">"]' (expect theme.ulist_marker_disc_content).to eql '0' (expect theme.footer_recto_left_content).to eql 'true' end it 'should allow font catalog and font fallbacks to be defined as flat keys' do theme_data = SafeYAML.load <<~EOS font_catalog: Serif: normal: /path/to/serif-font.ttf Fallback: normal: /path/to/fallback-font.ttf font_fallbacks: - Fallback EOS theme = subject.new.load theme_data (expect theme.font_catalog).to be_a Hash (expect theme.font_catalog['Serif']).to be_a Hash (expect theme.font_catalog['Serif']['normal']).to eql '/path/to/serif-font.ttf' (expect theme.font_fallbacks).to be_a Array (expect theme.font_fallbacks).to eql ['Fallback'] end end context '.load_file' do it 'should not fail if theme file is empty' do theme = subject.load_file fixture_file 'empty-theme.yml' (expect theme).to be_an OpenStruct (expect theme).to eql subject.load_base_theme end it 'should fail if theme is indented using tabs' do expect { subject.load_file fixture_file 'tab-indentation-theme.yml' }.to raise_exception RuntimeError end it 'should load and extend themes specified by extends array' do input_file = fixture_file 'extended-custom-theme.yml' theme = subject.load_file input_file, nil, fixtures_dir (expect theme.base_align).to eql 'justify' (expect theme.base_font_family).to eql 'Times-Roman' (expect theme.base_font_color).to eql 'FF0000' end it 'should extend built-in default theme if value of extends entry is default' do input_file = fixture_file 'extended-red-theme.yml' theme = subject.load_file input_file, nil, fixtures_dir (expect theme.base_font_family).to eql 'Noto Serif' (expect theme.base_font_color).to eql '0000FF' end end context '.load_theme' do it 'should load base theme if theme name is base' do theme = subject.load_theme 'base' (expect theme).not_to be_nil (expect theme).to be_an OpenStruct (expect theme.base_font_family).to eql 'Helvetica' (expect theme.heading_font_family).to be_nil (expect theme).to eql subject.load_base_theme end it 'should load default theme if no arguments are given' do theme = subject.load_theme (expect theme).not_to be_nil (expect theme).to be_an OpenStruct (expect theme.heading_font_family).to eql 'Noto Serif' end it 'should not inherit from base theme when loading default theme' do theme = subject.load_theme # NOTE table_border_style is only set in the base theme (expect theme.table_border_style).to be_nil end it 'should inherit from base theme when loading custom theme' do theme = subject.load_theme fixture_file 'empty-theme.yml' (expect theme.table_border_style).to eql 'solid' end it 'should not inherit from base theme if custom theme extends nothing' do theme = subject.load_theme fixture_file 'extends-nil-empty-theme.yml' (expect theme.table_border_style).to be_nil end it 'should not inherit from base theme if custom theme extends default' do theme = subject.load_theme 'extended-default-theme.yml', fixtures_dir (expect theme.table_border_style).to be_nil end it 'should not inherit from base theme if custom theme extends nil' do theme = subject.load_theme 'extended-extends-nil-theme.yml', fixtures_dir (expect theme.base_font_family).to eql 'Times-Roman' (expect theme.heading_font_family).to eql 'Times-Roman' (expect theme.base_font_size).to be_nil end it 'should inherit from base theme if custom theme extends base' do base_theme = subject.load_base_theme theme = subject.load_theme fixture_file 'extended-base-theme.yml' (expect theme.base_font_family).not_to eql base_theme.base_font_family (expect theme.base_font_color).not_to eql base_theme.base_font_color (expect theme.base_font_size).to eql base_theme.base_font_size end it 'should look for file ending in -theme.yml when resolving custom theme' do theme = subject.load_theme 'custom', fixtures_dir (expect theme.base_font_family).to eql 'Times-Roman' (expect theme.__dir__).to eql fixtures_dir end it 'should set __dir__ to dirname of theme file if theme path not set' do theme = subject.load_theme fixture_file 'custom-theme.yml' (expect theme.base_font_family).to eql 'Times-Roman' (expect theme.__dir__).to eql fixtures_dir end it 'should load specified file ending with .yml if path is not given' do theme = subject.load_theme fixture_file 'custom-theme.yml' (expect theme.base_font_family).to eql 'Times-Roman' end it 'should load specified file ending with .yml from specified path' do theme = subject.load_theme 'custom-theme.yml', fixtures_dir (expect theme.base_font_family).to eql 'Times-Roman' end it 'should load extended themes relative to theme file when theme_path is not specified' do theme = subject.load_theme fixture_file 'extended-custom-theme.yml' (expect theme.__dir__).to eql fixtures_dir (expect theme.base_align).to eql 'justify' (expect theme.base_font_family).to eql 'Times-Roman' (expect theme.base_font_color).to eql 'FF0000' end it 'should ensure required keys are set' do theme = subject.load_theme 'extends-nil-empty-theme.yml', fixtures_dir (expect theme.__dir__).to eql fixtures_dir (expect theme.base_align).to eql 'left' (expect theme.base_line_height).to eql 1 (expect theme.base_font_color).to eql '000000' (expect theme.code_font_family).to eql 'Courier' (expect theme.conum_font_family).to eql 'Courier' (expect theme.to_h.keys).to have_size 6 end it 'should not overwrite required keys with default values if already set' do theme = subject.load_theme 'extended-default-theme.yml', fixtures_dir (expect theme.base_align).to eql 'justify' (expect theme.code_font_family).to eql 'M+ 1mn' (expect theme.conum_font_family).to eql 'M+ 1mn' end end context '.resolve_theme_file' do it 'should expand reference to home directory in theme dir when resolving theme file from name' do expected_path = File.join Dir.home, '.local/share/asciidoctor-pdf/custom-theme.yml' expected_dir = File.dirname expected_path theme_path, theme_dir = subject.resolve_theme_file 'custom', '~/.local/share/asciidoctor-pdf' (expect theme_path).to eql expected_path (expect theme_dir).to eql expected_dir end it 'should expand reference to home directory in theme dir when resolving theme file from filename' do expected_path = File.join Dir.home, '.local/share/asciidoctor-pdf/custom-theme.yml' expected_dir = File.dirname expected_path theme_path, theme_dir = subject.resolve_theme_file 'custom-theme.yml', '~/.local/share/asciidoctor-pdf' (expect theme_path).to eql expected_path (expect theme_dir).to eql expected_dir end it 'should expand reference to home directory in theme file when resolving theme file' do expected_path = File.join Dir.home, '.local/share/asciidoctor-pdf/custom-theme.yml' expected_dir = File.dirname expected_path theme_path, theme_dir = subject.resolve_theme_file '~/.local/share/asciidoctor-pdf/custom-theme.yml' (expect theme_path).to eql expected_path (expect theme_dir).to eql expected_dir end end context 'data types' do it 'should resolve null color value as nil' do theme_data = SafeYAML.load <<~EOS page: background_color: null EOS theme = subject.new.load theme_data (expect theme.page_background_color).to be_nil end end context 'interpolation' do it 'should resolve variable reference with underscores to previously defined key' do theme_data = SafeYAML.load <<~EOS brand: blue: '0000FF' base: font_color: $brand_blue heading: font_color: $base_font_color EOS theme = subject.new.load theme_data (expect theme.base_font_color).to eql '0000FF' (expect theme.heading_font_color).to eql theme.base_font_color end it 'should resolve variable reference with hyphens to previously defined key' do theme_data = SafeYAML.load <<~EOS brand: blue: '0000FF' base: font_color: $brand-blue heading: font_color: $base-font-color EOS theme = subject.new.load theme_data (expect theme.base_font_color).to eql '0000FF' (expect theme.heading_font_color).to eql theme.base_font_color end it 'should interpolate variables in value' do theme_data = SafeYAML.load <<~EOS brand: font_family_name: Noto font_family_variant: Serif base: font_family: $brand_font_family_name $brand_font_family_variant heading: font_family: $brand_font_family_name Sans EOS theme = subject.new.load theme_data (expect theme.base_font_family).to eql 'Noto Serif' (expect theme.heading_font_family).to eql 'Noto Sans' end it 'should interpolate computed value' do theme_data = SafeYAML.load <<~EOS base: font_size: 10 line_height_length: 12 line_height: $base_line_height_length / $base_font_size font_size_large: $base_font_size * 1.25 font_size_min: $base_font_size * 3 / 4 blockquote: border_width: 5 padding: [0, $base_line_height_length - 2, $base_line_height_length * -0.75, $base_line_height_length + $blockquote_border_width / 2] EOS theme = subject.new.load theme_data (expect theme.base_line_height).to eql 1.2 (expect theme.base_font_size_large).to eql 12.5 (expect theme.base_font_size_min).to eql 7.5 (expect theme.blockquote_padding).to eql [0, 10, -9, 14.5] end it 'should not compute value if operator is not surrounded by spaces on either side' do theme_data = SafeYAML.load <<~EOS brand: ten: 10 a_string: ten*10 another_string: ten-10 EOS theme = subject.new.load theme_data (expect theme.brand_ten).to eql 10 (expect theme.brand_a_string).to eql 'ten*10' (expect theme.brand_another_string).to eql 'ten-10' end it 'should apply precision functions to value' do theme_data = SafeYAML.load <<~EOS base: font_size: 10.5 heading: h1_font_size: ceil($base_font_size * 2.6) h2_font_size: floor($base_font_size * 2.1) h3_font_size: round($base_font_size * 1.5) EOS theme = subject.new.load theme_data (expect theme.heading_h1_font_size).to eql 28 (expect theme.heading_h2_font_size).to eql 22 (expect theme.heading_h3_font_size).to eql 16 end it 'should wrap cmyk color values in color type if key ends with _color' do theme_data = SafeYAML.load <<~EOS page: background_color: [0, 0, 0, 0] base: font_color: [100, 100, 100, 100] heading: font-color: [0, 0, 0, 0.92] link: font-color: [67.33%, 31.19%, 0, 20.78%] literal: font-color: [0%, 0%, 0%, 0.87] EOS theme = subject.new.load theme_data (expect theme.page_background_color).to eql 'FFFFFF' (expect theme.page_background_color).to be_a subject::HexColorValue (expect theme.base_font_color).to eql '000000' (expect theme.base_font_color).to be_a subject::HexColorValue (expect theme.heading_font_color).to eql [0, 0, 0, 92] (expect theme.heading_font_color).to be_a subject::CMYKColorValue (expect theme.link_font_color).to eql [67.33, 31.19, 0, 20.78] (expect theme.link_font_color).to be_a subject::CMYKColorValue (expect theme.literal_font_color).to eql [0, 0, 0, 87] (expect theme.literal_font_color).to be_a subject::CMYKColorValue end it 'should wrap hex color values in color type if key ends with _color' do theme_data = SafeYAML.load <<~EOS page: background_color: 'ffffff' base: font_color: '000000' heading: font-color: 333333 link: font-color: 428bca literal: font-color: 222 EOS theme = subject.new.load theme_data (expect theme.page_background_color).to eql 'FFFFFF' (expect theme.page_background_color).to be_a subject::HexColorValue (expect theme.base_font_color).to eql '000000' (expect theme.base_font_color).to be_a subject::HexColorValue # NOTE this assertion tests that the value can be an integer, not a string (expect theme.heading_font_color).to eql '333333' (expect theme.heading_font_color).to be_a subject::HexColorValue (expect theme.link_font_color).to eql '428BCA' (expect theme.link_font_color).to be_a subject::HexColorValue (expect theme.literal_font_color).to eql '222222' (expect theme.literal_font_color).to be_a subject::HexColorValue end it 'should coerce rgb color values to hex and wrap in color type if key ends with _color' do theme_data = SafeYAML.load <<~EOS page: background_color: [255, 255, 255] base: font_color: [0, 0, 0] heading: font-color: [51, 51, 51] link: font-color: [66, 139, 202] literal: font-color: ['34', '34', '34'] EOS theme = subject.new.load theme_data (expect theme.page_background_color).to eql 'FFFFFF' (expect theme.page_background_color).to be_a subject::HexColorValue (expect theme.base_font_color).to eql '000000' (expect theme.base_font_color).to be_a subject::HexColorValue (expect theme.heading_font_color).to eql '333333' (expect theme.heading_font_color).to be_a subject::HexColorValue (expect theme.link_font_color).to eql '428BCA' (expect theme.link_font_color).to be_a subject::HexColorValue (expect theme.literal_font_color).to eql '222222' (expect theme.literal_font_color).to be_a subject::HexColorValue end it 'should not wrap value in color type if key does not end with _color' do theme_data = SafeYAML.load <<~EOS menu: caret: content: 4a4a4a EOS theme = subject.new.load theme_data (expect theme.menu_caret_content).to eql '4a4a4a' (expect theme.menu_caret_content).not_to be_a subject::HexColorValue end # NOTE this only works when the theme is read from a file it 'should allow hex color values to be prefixed with # for any key' do theme = subject.load_theme 'hex-color-shorthand', fixtures_dir (expect theme.base_font_color).to eql '222222' (expect theme.base_border_color).to eql 'DDDDDD' (expect theme.page_background_color).to eql 'FEFEFE' (expect theme.link_font_color).to eql '428BCA' (expect theme.literal_font_color).to eql 'AA0000' (expect theme.footer_font_color).to eql '000099' (expect theme.footer_background_color).to be_nil end # NOTE this is only relevant when the theme is read from a file it 'should not coerce color-like values to string if key does not end with color' do theme = subject.load_theme 'color-like-value', fixtures_dir (expect theme.footer_height).to eql 100 end it 'should coerce content key to a string' do theme_data = SafeYAML.load <<~EOS vars: foo: bar footer: recto: left: content: $vars_foo right: content: 10 EOS theme = subject.new.load theme_data (expect theme.footer_recto_left_content).to eql 'bar' (expect theme.footer_recto_right_content).to be_a String (expect theme.footer_recto_right_content).to eql '10' end it 'should resolve variable references in font catalog' do theme_data = SafeYAML.load <<~EOS vars: serif-font: /path/to/serif-font.ttf font: catalog: Serif: normal: $vars-serif-font EOS theme = subject.new.load theme_data (expect theme.font_catalog).to be_a Hash (expect theme.font_catalog['Serif']).to be_a Hash (expect theme.font_catalog['Serif']['normal']).to eql '/path/to/serif-font.ttf' end end end move color data type tests to data types context require_relative 'spec_helper' describe Asciidoctor::PDF::ThemeLoader do subject { described_class } context '#load' do it 'should not fail if theme data is empty' do theme = subject.new.load '' (expect theme).not_to be_nil (expect theme).to be_an OpenStruct (expect theme.to_h).to be_empty end it 'should not fail if theme data is false' do theme = subject.new.load false (expect theme).not_to be_nil (expect theme).to be_an OpenStruct (expect theme.to_h).to be_empty end it 'should store flattened keys in OpenStruct' do theme_data = SafeYAML.load <<~EOS page: size: A4 base: font: family: Times-Roman border_width: 0.5 admonition: label: font_style: bold EOS theme = subject.new.load theme_data (expect theme).to be_an OpenStruct (expect theme).to respond_to :page_size (expect theme).to respond_to :base_font_family (expect theme).to respond_to :base_border_width (expect theme).to respond_to :admonition_label_font_style end it 'should replace hyphens in key names with underscores' do theme_data = SafeYAML.load <<~EOS page-size: A4 base: font-family: Times-Roman abstract: title-font-size: 20 admonition: icon: tip: stroke-color: FFFF00 EOS theme = subject.new.load theme_data (expect theme).to be_an OpenStruct (expect theme).to respond_to :page_size (expect theme).to respond_to :base_font_family (expect theme).to respond_to :abstract_title_font_size (expect theme).to respond_to :admonition_icon_tip (expect theme.admonition_icon_tip).to have_key :stroke_color end it 'should not replace hyphens with underscores in role names' do theme_data = SafeYAML.load <<~EOS role: flaming-red: font-color: ff0000 so-very-blue: font: color: 0000ff EOS theme = subject.new.load theme_data (expect theme).to be_an OpenStruct (expect theme).to respond_to 'role_flaming-red_font_color' (expect theme['role_flaming-red_font_color']).to eql 'FF0000' (expect theme).to respond_to 'role_so-very-blue_font_color' (expect theme['role_so-very-blue_font_color']).to eql '0000FF' end it 'should convert keys that end in content to a string' do theme_data = SafeYAML.load <<~EOS menu: caret_content: - '>' ulist: marker: disc: content: 0 footer: recto: left: content: true EOS theme = subject.new.load theme_data (expect theme).to be_an OpenStruct (expect theme.menu_caret_content).to eql '[">"]' (expect theme.ulist_marker_disc_content).to eql '0' (expect theme.footer_recto_left_content).to eql 'true' end it 'should allow font catalog and font fallbacks to be defined as flat keys' do theme_data = SafeYAML.load <<~EOS font_catalog: Serif: normal: /path/to/serif-font.ttf Fallback: normal: /path/to/fallback-font.ttf font_fallbacks: - Fallback EOS theme = subject.new.load theme_data (expect theme.font_catalog).to be_a Hash (expect theme.font_catalog['Serif']).to be_a Hash (expect theme.font_catalog['Serif']['normal']).to eql '/path/to/serif-font.ttf' (expect theme.font_fallbacks).to be_a Array (expect theme.font_fallbacks).to eql ['Fallback'] end end context '.load_file' do it 'should not fail if theme file is empty' do theme = subject.load_file fixture_file 'empty-theme.yml' (expect theme).to be_an OpenStruct (expect theme).to eql subject.load_base_theme end it 'should fail if theme is indented using tabs' do expect { subject.load_file fixture_file 'tab-indentation-theme.yml' }.to raise_exception RuntimeError end it 'should load and extend themes specified by extends array' do input_file = fixture_file 'extended-custom-theme.yml' theme = subject.load_file input_file, nil, fixtures_dir (expect theme.base_align).to eql 'justify' (expect theme.base_font_family).to eql 'Times-Roman' (expect theme.base_font_color).to eql 'FF0000' end it 'should extend built-in default theme if value of extends entry is default' do input_file = fixture_file 'extended-red-theme.yml' theme = subject.load_file input_file, nil, fixtures_dir (expect theme.base_font_family).to eql 'Noto Serif' (expect theme.base_font_color).to eql '0000FF' end end context '.load_theme' do it 'should load base theme if theme name is base' do theme = subject.load_theme 'base' (expect theme).not_to be_nil (expect theme).to be_an OpenStruct (expect theme.base_font_family).to eql 'Helvetica' (expect theme.heading_font_family).to be_nil (expect theme).to eql subject.load_base_theme end it 'should load default theme if no arguments are given' do theme = subject.load_theme (expect theme).not_to be_nil (expect theme).to be_an OpenStruct (expect theme.heading_font_family).to eql 'Noto Serif' end it 'should not inherit from base theme when loading default theme' do theme = subject.load_theme # NOTE table_border_style is only set in the base theme (expect theme.table_border_style).to be_nil end it 'should inherit from base theme when loading custom theme' do theme = subject.load_theme fixture_file 'empty-theme.yml' (expect theme.table_border_style).to eql 'solid' end it 'should not inherit from base theme if custom theme extends nothing' do theme = subject.load_theme fixture_file 'extends-nil-empty-theme.yml' (expect theme.table_border_style).to be_nil end it 'should not inherit from base theme if custom theme extends default' do theme = subject.load_theme 'extended-default-theme.yml', fixtures_dir (expect theme.table_border_style).to be_nil end it 'should not inherit from base theme if custom theme extends nil' do theme = subject.load_theme 'extended-extends-nil-theme.yml', fixtures_dir (expect theme.base_font_family).to eql 'Times-Roman' (expect theme.heading_font_family).to eql 'Times-Roman' (expect theme.base_font_size).to be_nil end it 'should inherit from base theme if custom theme extends base' do base_theme = subject.load_base_theme theme = subject.load_theme fixture_file 'extended-base-theme.yml' (expect theme.base_font_family).not_to eql base_theme.base_font_family (expect theme.base_font_color).not_to eql base_theme.base_font_color (expect theme.base_font_size).to eql base_theme.base_font_size end it 'should look for file ending in -theme.yml when resolving custom theme' do theme = subject.load_theme 'custom', fixtures_dir (expect theme.base_font_family).to eql 'Times-Roman' (expect theme.__dir__).to eql fixtures_dir end it 'should set __dir__ to dirname of theme file if theme path not set' do theme = subject.load_theme fixture_file 'custom-theme.yml' (expect theme.base_font_family).to eql 'Times-Roman' (expect theme.__dir__).to eql fixtures_dir end it 'should load specified file ending with .yml if path is not given' do theme = subject.load_theme fixture_file 'custom-theme.yml' (expect theme.base_font_family).to eql 'Times-Roman' end it 'should load specified file ending with .yml from specified path' do theme = subject.load_theme 'custom-theme.yml', fixtures_dir (expect theme.base_font_family).to eql 'Times-Roman' end it 'should load extended themes relative to theme file when theme_path is not specified' do theme = subject.load_theme fixture_file 'extended-custom-theme.yml' (expect theme.__dir__).to eql fixtures_dir (expect theme.base_align).to eql 'justify' (expect theme.base_font_family).to eql 'Times-Roman' (expect theme.base_font_color).to eql 'FF0000' end it 'should ensure required keys are set' do theme = subject.load_theme 'extends-nil-empty-theme.yml', fixtures_dir (expect theme.__dir__).to eql fixtures_dir (expect theme.base_align).to eql 'left' (expect theme.base_line_height).to eql 1 (expect theme.base_font_color).to eql '000000' (expect theme.code_font_family).to eql 'Courier' (expect theme.conum_font_family).to eql 'Courier' (expect theme.to_h.keys).to have_size 6 end it 'should not overwrite required keys with default values if already set' do theme = subject.load_theme 'extended-default-theme.yml', fixtures_dir (expect theme.base_align).to eql 'justify' (expect theme.code_font_family).to eql 'M+ 1mn' (expect theme.conum_font_family).to eql 'M+ 1mn' end end context '.resolve_theme_file' do it 'should expand reference to home directory in theme dir when resolving theme file from name' do expected_path = File.join Dir.home, '.local/share/asciidoctor-pdf/custom-theme.yml' expected_dir = File.dirname expected_path theme_path, theme_dir = subject.resolve_theme_file 'custom', '~/.local/share/asciidoctor-pdf' (expect theme_path).to eql expected_path (expect theme_dir).to eql expected_dir end it 'should expand reference to home directory in theme dir when resolving theme file from filename' do expected_path = File.join Dir.home, '.local/share/asciidoctor-pdf/custom-theme.yml' expected_dir = File.dirname expected_path theme_path, theme_dir = subject.resolve_theme_file 'custom-theme.yml', '~/.local/share/asciidoctor-pdf' (expect theme_path).to eql expected_path (expect theme_dir).to eql expected_dir end it 'should expand reference to home directory in theme file when resolving theme file' do expected_path = File.join Dir.home, '.local/share/asciidoctor-pdf/custom-theme.yml' expected_dir = File.dirname expected_path theme_path, theme_dir = subject.resolve_theme_file '~/.local/share/asciidoctor-pdf/custom-theme.yml' (expect theme_path).to eql expected_path (expect theme_dir).to eql expected_dir end end context 'data types' do it 'should resolve null color value as nil' do theme_data = SafeYAML.load <<~EOS page: background_color: null EOS theme = subject.new.load theme_data (expect theme.page_background_color).to be_nil end it 'should wrap cmyk color values in color type if key ends with _color' do theme_data = SafeYAML.load <<~EOS page: background_color: [0, 0, 0, 0] base: font_color: [100, 100, 100, 100] heading: font-color: [0, 0, 0, 0.92] link: font-color: [67.33%, 31.19%, 0, 20.78%] literal: font-color: [0%, 0%, 0%, 0.87] EOS theme = subject.new.load theme_data (expect theme.page_background_color).to eql 'FFFFFF' (expect theme.page_background_color).to be_a subject::HexColorValue (expect theme.base_font_color).to eql '000000' (expect theme.base_font_color).to be_a subject::HexColorValue (expect theme.heading_font_color).to eql [0, 0, 0, 92] (expect theme.heading_font_color).to be_a subject::CMYKColorValue (expect theme.link_font_color).to eql [67.33, 31.19, 0, 20.78] (expect theme.link_font_color).to be_a subject::CMYKColorValue (expect theme.literal_font_color).to eql [0, 0, 0, 87] (expect theme.literal_font_color).to be_a subject::CMYKColorValue end it 'should wrap hex color values in color type if key ends with _color' do theme_data = SafeYAML.load <<~EOS page: background_color: 'ffffff' base: font_color: '000000' heading: font-color: 333333 link: font-color: 428bca literal: font-color: 222 EOS theme = subject.new.load theme_data (expect theme.page_background_color).to eql 'FFFFFF' (expect theme.page_background_color).to be_a subject::HexColorValue (expect theme.base_font_color).to eql '000000' (expect theme.base_font_color).to be_a subject::HexColorValue # NOTE this assertion tests that the value can be an integer, not a string (expect theme.heading_font_color).to eql '333333' (expect theme.heading_font_color).to be_a subject::HexColorValue (expect theme.link_font_color).to eql '428BCA' (expect theme.link_font_color).to be_a subject::HexColorValue (expect theme.literal_font_color).to eql '222222' (expect theme.literal_font_color).to be_a subject::HexColorValue end it 'should coerce rgb color values to hex and wrap in color type if key ends with _color' do theme_data = SafeYAML.load <<~EOS page: background_color: [255, 255, 255] base: font_color: [0, 0, 0] heading: font-color: [51, 51, 51] link: font-color: [66, 139, 202] literal: font-color: ['34', '34', '34'] EOS theme = subject.new.load theme_data (expect theme.page_background_color).to eql 'FFFFFF' (expect theme.page_background_color).to be_a subject::HexColorValue (expect theme.base_font_color).to eql '000000' (expect theme.base_font_color).to be_a subject::HexColorValue (expect theme.heading_font_color).to eql '333333' (expect theme.heading_font_color).to be_a subject::HexColorValue (expect theme.link_font_color).to eql '428BCA' (expect theme.link_font_color).to be_a subject::HexColorValue (expect theme.literal_font_color).to eql '222222' (expect theme.literal_font_color).to be_a subject::HexColorValue end it 'should not wrap value in color type if key does not end with _color' do theme_data = SafeYAML.load <<~EOS menu: caret: content: 4a4a4a EOS theme = subject.new.load theme_data (expect theme.menu_caret_content).to eql '4a4a4a' (expect theme.menu_caret_content).not_to be_a subject::HexColorValue end # NOTE this only works when the theme is read from a file it 'should allow hex color values to be prefixed with # for any key' do theme = subject.load_theme 'hex-color-shorthand', fixtures_dir (expect theme.base_font_color).to eql '222222' (expect theme.base_border_color).to eql 'DDDDDD' (expect theme.page_background_color).to eql 'FEFEFE' (expect theme.link_font_color).to eql '428BCA' (expect theme.literal_font_color).to eql 'AA0000' (expect theme.footer_font_color).to eql '000099' (expect theme.footer_background_color).to be_nil end # NOTE this is only relevant when the theme is read from a file it 'should not coerce color-like values to string if key does not end with color' do theme = subject.load_theme 'color-like-value', fixtures_dir (expect theme.footer_height).to eql 100 end it 'should coerce content key to a string' do theme_data = SafeYAML.load <<~EOS vars: foo: bar footer: recto: left: content: $vars_foo right: content: 10 EOS theme = subject.new.load theme_data (expect theme.footer_recto_left_content).to eql 'bar' (expect theme.footer_recto_right_content).to be_a String (expect theme.footer_recto_right_content).to eql '10' end end context 'interpolation' do it 'should resolve variable reference with underscores to previously defined key' do theme_data = SafeYAML.load <<~EOS brand: blue: '0000FF' base: font_color: $brand_blue heading: font_color: $base_font_color EOS theme = subject.new.load theme_data (expect theme.base_font_color).to eql '0000FF' (expect theme.heading_font_color).to eql theme.base_font_color end it 'should resolve variable reference with hyphens to previously defined key' do theme_data = SafeYAML.load <<~EOS brand: blue: '0000FF' base: font_color: $brand-blue heading: font_color: $base-font-color EOS theme = subject.new.load theme_data (expect theme.base_font_color).to eql '0000FF' (expect theme.heading_font_color).to eql theme.base_font_color end it 'should interpolate variables in value' do theme_data = SafeYAML.load <<~EOS brand: font_family_name: Noto font_family_variant: Serif base: font_family: $brand_font_family_name $brand_font_family_variant heading: font_family: $brand_font_family_name Sans EOS theme = subject.new.load theme_data (expect theme.base_font_family).to eql 'Noto Serif' (expect theme.heading_font_family).to eql 'Noto Sans' end it 'should interpolate computed value' do theme_data = SafeYAML.load <<~EOS base: font_size: 10 line_height_length: 12 line_height: $base_line_height_length / $base_font_size font_size_large: $base_font_size * 1.25 font_size_min: $base_font_size * 3 / 4 blockquote: border_width: 5 padding: [0, $base_line_height_length - 2, $base_line_height_length * -0.75, $base_line_height_length + $blockquote_border_width / 2] EOS theme = subject.new.load theme_data (expect theme.base_line_height).to eql 1.2 (expect theme.base_font_size_large).to eql 12.5 (expect theme.base_font_size_min).to eql 7.5 (expect theme.blockquote_padding).to eql [0, 10, -9, 14.5] end it 'should not compute value if operator is not surrounded by spaces on either side' do theme_data = SafeYAML.load <<~EOS brand: ten: 10 a_string: ten*10 another_string: ten-10 EOS theme = subject.new.load theme_data (expect theme.brand_ten).to eql 10 (expect theme.brand_a_string).to eql 'ten*10' (expect theme.brand_another_string).to eql 'ten-10' end it 'should apply precision functions to value' do theme_data = SafeYAML.load <<~EOS base: font_size: 10.5 heading: h1_font_size: ceil($base_font_size * 2.6) h2_font_size: floor($base_font_size * 2.1) h3_font_size: round($base_font_size * 1.5) EOS theme = subject.new.load theme_data (expect theme.heading_h1_font_size).to eql 28 (expect theme.heading_h2_font_size).to eql 22 (expect theme.heading_h3_font_size).to eql 16 end it 'should resolve variable references in font catalog' do theme_data = SafeYAML.load <<~EOS vars: serif-font: /path/to/serif-font.ttf font: catalog: Serif: normal: $vars-serif-font EOS theme = subject.new.load theme_data (expect theme.font_catalog).to be_a Hash (expect theme.font_catalog['Serif']).to be_a Hash (expect theme.font_catalog['Serif']['normal']).to eql '/path/to/serif-font.ttf' end end end
# coding: utf-8 require 'spec_helper' require 'transpec/cli' module Transpec describe CLI do include FileHelper subject(:cli) { CLI.new } describe '.run' do it 'invokes #run' do args = ['foo', 'bar'] CLI.any_instance.should_receive(:run).with(args) CLI.run(args) end end describe '#forced?' do subject { cli.forced? } context 'by default' do it { should be_false } end end describe '#run' do before do cli.stub(:puts) cli.stub(:warn) cli.stub(:target_files).and_return(args) end subject { cli.run(args) } let(:args) { ['some_file.rb'] } let(:rewriter) do rewriter = double('rewriter').as_null_object rewriter.stub(:invalid_context_errors).and_return([]) rewriter end shared_examples 'rewrites files' do it 'rewrites files' do rewriter.should_receive(:rewrite_file!) cli.run(args) end it 'returns true' do should be_true end end shared_context 'stubbed rewriter' do before do Rewriter.stub(:new).and_return(rewriter) end end context 'when git is available' do include_context 'stubbed rewriter' before { Git.stub(:command_available?).and_return(true) } context 'and inside of a repository' do before { Git.stub(:inside_of_repository?).and_return(true) } context 'and the repository is not clean' do before { Git.stub(:clean?).and_return(false) } context '#forced? is false' do before { cli.stub(:forced?).and_return(false) } it 'aborts processing' do rewriter.should_not_receive(:rewrite_file!) cli.run(args).should be_false end it 'warns to the user' do cli.should_receive(:warn) do |arg| arg.should include('clean') end cli.run(args) end end context '#forced? is true' do before { cli.stub(:forced?).and_return(true) } include_examples 'rewrites files' end end context 'and the repository is clean' do before { Git.stub(:clean?).and_return(true) } include_examples 'rewrites files' end end context 'and not inside of a repository' do before { Git.stub(:inside_of_repository?).and_return(false) } include_examples 'rewrites files' end end context 'when git is not available' do include_context 'stubbed rewriter' before { Git.stub(:command_available?).and_return(false) } include_examples 'rewrites files' end context 'when a syntax error is raised while processing files' do include_context 'isolated environment' let(:args) { [invalid_syntax_file_path, valid_syntax_file_path] } let(:invalid_syntax_file_path) { 'invalid_example.rb' } let(:valid_syntax_file_path) { 'valid_example.rb' } before do create_file(invalid_syntax_file_path, 'This is invalid syntax <') create_file(valid_syntax_file_path, 'this_is_valid_syntax') end it 'warns to the user' do cli.should_receive(:warn) do |message| message.should include('Syntax error') end cli.run(args) end it 'continues processing files' do cli.should_receive(:puts).with("Processing #{invalid_syntax_file_path}") cli.should_receive(:puts).with("Processing #{valid_syntax_file_path}") cli.run(args) end end context 'when any other error is raised while running' do include_context 'stubbed rewriter' before do cli.stub(:parse_options).and_raise(ArgumentError, 'No such file or directory - non-existent-file') end it 'return false' do should be_false end it 'prints message of the exception' do cli.should_receive(:warn).with('No such file or directory - non-existent-file') cli.run([]) end end context 'when no target paths are specified' do include_context 'isolated environment' include_context 'stubbed rewriter' let(:args) { [] } context 'and there is "spec" directory' do before { Dir.mkdir('spec') } it 'targets files in the "spec" directoy' do cli.should_receive(:target_files).with(['spec']) cli.run(args) end end context 'and there is not "spec" directory' do it 'aborts' do should be_false end end end end describe '#process_file' do include_context 'isolated environment' let(:file_path) { 'example.rb' } before do create_file(file_path, source) cli.stub(:puts) end context 'when the source has a monkey-patched expectation outside of example group context' do let(:source) do <<-END describe 'example group' do class SomeClass def some_method 1.should == 1 end end it 'is an example' do SomeClass.new.some_method end end END end it 'warns to user' do cli.should_receive(:warn) do |message| message.should =~ /cannot/i message.should =~ /context/i end cli.process_file(file_path) end end end describe '#parse_options' do subject { cli.parse_options(args) } let(:args) { ['some_file', '--negative-form', 'to_not', 'some_dir'] } it 'return non-option arguments' do should == ['some_file', 'some_dir'] end it 'does not mutate the passed array' do cli.parse_options(args) args.should == ['some_file', '--negative-form', 'to_not', 'some_dir'] end describe '-f/--force option' do let(:args) { ['--force'] } it 'sets #forced? true' do cli.parse_options(args) cli.should be_forced end end describe '-d/--disable option' do [ ['expect_to_matcher', :convert_to_expect_to_matcher?], ['expect_to_receive', :convert_to_expect_to_receive?], ['allow_to_receive', :convert_to_allow_to_receive?], ['deprecated', :replace_deprecated_method?] ].each do |cli_type, config_attr| context "when #{cli_type.inspect} is specified" do let(:args) { ['--disable', cli_type] } it "sets Configuration##{config_attr} false" do cli.parse_options(args) cli.configuration.send(config_attr).should be_false end end end context 'when multiple types are specified with comma' do let(:args) { ['--disable', 'allow_to_receive,deprecated'] } it 'handles all of them' do cli.parse_options(args) cli.configuration.convert_to_allow_to_receive?.should be_false cli.configuration.replace_deprecated_method?.should be_false end end context 'when unknown type is specified' do let(:args) { ['--disable', 'unknown'] } it 'raises error' do -> { cli.parse_options(args) }.should raise_error(ArgumentError) { |error| error.message.should == 'Unknown conversion type "unknown"' } end end end describe '-n/--negative-form option' do ['not_to', 'to_not'].each do |form| context "when #{form.inspect} is specified" do let(:args) { ['--negative-form', form] } it "sets Configuration#negative_form_of_to? #{form.inspect}" do cli.parse_options(args) cli.configuration.negative_form_of_to.should == form end end end end describe '-p/--no-parentheses-matcher-arg option' do let(:args) { ['--no-parentheses-matcher-arg'] } it 'sets Configuration#parenthesize_matcher_arg? false' do cli.parse_options(args) cli.configuration.parenthesize_matcher_arg.should be_false end end describe '--no-color option' do before do Sickill::Rainbow.enabled = true end let(:args) { ['--no-color'] } it 'disables color in the output' do cli.parse_options(args) Sickill::Rainbow.enabled.should be_false end end describe '--version option' do before do cli.stub(:puts) cli.stub(:exit) end let(:args) { ['--version'] } it 'shows version' do cli.should_receive(:puts).with(Version.to_s) cli.parse_options(args) end it 'exits' do cli.should_receive(:exit) cli.parse_options(args) end end end describe '#target_files' do include_context 'isolated environment' before do ['file', 'file.rb', 'dir/file', 'dir/file.rb'].each do |path| create_file(path, '') end end subject(:target_files) { cli.target_files(paths) } context 'when no path is passed' do let(:paths) { [] } it 'returns empty array' do should be_empty end end context 'when a file path with .rb extension is passed' do let(:paths) { ['file.rb'] } it 'returns the path' do should == ['file.rb'] end end context 'when a file path without extension is passed' do let(:paths) { ['file'] } it 'returns the path' do should == ['file'] end end context 'when a non-existent path is passed' do let(:paths) { ['non-existent-file'] } it 'raises error' do -> { target_files }.should raise_error(ArgumentError) { |error| error.message.should == 'No such file or directory "non-existent-file"' } end end context 'when a directory path is passed' do let(:paths) { ['dir'] } it 'returns file paths with .rb extension in the directory recursively' do should == ['dir/file.rb'] end end end end end Refactor CLI spec # coding: utf-8 require 'spec_helper' require 'transpec/cli' module Transpec describe CLI do include FileHelper subject(:cli) { CLI.new } describe '.run' do it 'invokes #run' do args = ['foo', 'bar'] CLI.any_instance.should_receive(:run).with(args) CLI.run(args) end end describe '#forced?' do subject { cli.forced? } context 'by default' do it { should be_false } end end describe '#run' do include_context 'isolated environment' subject { cli.run(args) } let(:args) { [file_path] } let(:file_path) { 'spec/example_spec.rb' } before do cli.stub(:puts) cli.stub(:warn) create_file(file_path, <<-END describe 'something' do it 'is 1' do 1.should == 1 end end END ) end shared_examples 'rewrites files' do it 'rewrites files' do cli.should_receive(:process_file) cli.run(args) end it 'returns true' do should be_true end end context 'when git is available' do before { Git.stub(:command_available?).and_return(true) } context 'and inside of a repository' do before { Git.stub(:inside_of_repository?).and_return(true) } context 'and the repository is not clean' do before { Git.stub(:clean?).and_return(false) } context '#forced? is false' do before { cli.stub(:forced?).and_return(false) } it 'aborts processing' do cli.should_not_receive(:process_file) cli.run(args).should be_false end it 'warns to the user' do cli.should_receive(:warn) do |arg| arg.should include('clean') end cli.run(args) end end context '#forced? is true' do before { cli.stub(:forced?).and_return(true) } include_examples 'rewrites files' end end context 'and the repository is clean' do before { Git.stub(:clean?).and_return(true) } include_examples 'rewrites files' end end context 'and not inside of a repository' do before { Git.stub(:inside_of_repository?).and_return(false) } include_examples 'rewrites files' end end context 'when git is not available' do before { Git.stub(:command_available?).and_return(false) } include_examples 'rewrites files' end context 'when a syntax error is raised while processing files' do let(:args) { [invalid_syntax_file_path, valid_syntax_file_path] } let(:invalid_syntax_file_path) { 'invalid_example.rb' } let(:valid_syntax_file_path) { 'valid_example.rb' } before do create_file(invalid_syntax_file_path, 'This is invalid syntax <') create_file(valid_syntax_file_path, 'this_is_valid_syntax') end it 'warns to the user' do cli.should_receive(:warn) do |message| message.should include('Syntax error') end cli.run(args) end it 'continues processing files' do cli.should_receive(:puts).with("Processing #{invalid_syntax_file_path}") cli.should_receive(:puts).with("Processing #{valid_syntax_file_path}") cli.run(args) end end context 'when any other error is raised while running' do let(:args) { ['non-existent-file'] } it 'return false' do should be_false end it 'prints message of the exception' do cli.should_receive(:warn).with(/No such file or directory/) cli.run(args) end end context 'when no target paths are specified' do let(:args) { [] } context 'and there is "spec" directory' do let(:file_path) { 'spec/example_spec.rb' } it 'targets files in the "spec" directoy' do cli.should_receive(:target_files).with(['spec']) cli.run(args) end end context 'and there is not "spec" directory' do let(:file_path) { 'example_spec.rb' } it 'aborts' do should be_false end end end end describe '#process_file' do include_context 'isolated environment' let(:file_path) { 'example.rb' } before do create_file(file_path, source) cli.stub(:puts) end context 'when the source has a monkey-patched expectation outside of example group context' do let(:source) do <<-END describe 'example group' do class SomeClass def some_method 1.should == 1 end end it 'is an example' do SomeClass.new.some_method end end END end it 'warns to user' do cli.should_receive(:warn) do |message| message.should =~ /cannot/i message.should =~ /context/i end cli.process_file(file_path) end end end describe '#parse_options' do subject { cli.parse_options(args) } let(:args) { ['some_file', '--negative-form', 'to_not', 'some_dir'] } it 'return non-option arguments' do should == ['some_file', 'some_dir'] end it 'does not mutate the passed array' do cli.parse_options(args) args.should == ['some_file', '--negative-form', 'to_not', 'some_dir'] end describe '-f/--force option' do let(:args) { ['--force'] } it 'sets #forced? true' do cli.parse_options(args) cli.should be_forced end end describe '-d/--disable option' do [ ['expect_to_matcher', :convert_to_expect_to_matcher?], ['expect_to_receive', :convert_to_expect_to_receive?], ['allow_to_receive', :convert_to_allow_to_receive?], ['deprecated', :replace_deprecated_method?] ].each do |cli_type, config_attr| context "when #{cli_type.inspect} is specified" do let(:args) { ['--disable', cli_type] } it "sets Configuration##{config_attr} false" do cli.parse_options(args) cli.configuration.send(config_attr).should be_false end end end context 'when multiple types are specified with comma' do let(:args) { ['--disable', 'allow_to_receive,deprecated'] } it 'handles all of them' do cli.parse_options(args) cli.configuration.convert_to_allow_to_receive?.should be_false cli.configuration.replace_deprecated_method?.should be_false end end context 'when unknown type is specified' do let(:args) { ['--disable', 'unknown'] } it 'raises error' do -> { cli.parse_options(args) }.should raise_error(ArgumentError) { |error| error.message.should == 'Unknown conversion type "unknown"' } end end end describe '-n/--negative-form option' do ['not_to', 'to_not'].each do |form| context "when #{form.inspect} is specified" do let(:args) { ['--negative-form', form] } it "sets Configuration#negative_form_of_to? #{form.inspect}" do cli.parse_options(args) cli.configuration.negative_form_of_to.should == form end end end end describe '-p/--no-parentheses-matcher-arg option' do let(:args) { ['--no-parentheses-matcher-arg'] } it 'sets Configuration#parenthesize_matcher_arg? false' do cli.parse_options(args) cli.configuration.parenthesize_matcher_arg.should be_false end end describe '--no-color option' do before do Sickill::Rainbow.enabled = true end let(:args) { ['--no-color'] } it 'disables color in the output' do cli.parse_options(args) Sickill::Rainbow.enabled.should be_false end end describe '--version option' do before do cli.stub(:puts) cli.stub(:exit) end let(:args) { ['--version'] } it 'shows version' do cli.should_receive(:puts).with(Version.to_s) cli.parse_options(args) end it 'exits' do cli.should_receive(:exit) cli.parse_options(args) end end end describe '#target_files' do include_context 'isolated environment' before do ['file', 'file.rb', 'dir/file', 'dir/file.rb'].each do |path| create_file(path, '') end end subject(:target_files) { cli.target_files(paths) } context 'when no path is passed' do let(:paths) { [] } it 'returns empty array' do should be_empty end end context 'when a file path with .rb extension is passed' do let(:paths) { ['file.rb'] } it 'returns the path' do should == ['file.rb'] end end context 'when a file path without extension is passed' do let(:paths) { ['file'] } it 'returns the path' do should == ['file'] end end context 'when a non-existent path is passed' do let(:paths) { ['non-existent-file'] } it 'raises error' do -> { target_files }.should raise_error(ArgumentError) { |error| error.message.should == 'No such file or directory "non-existent-file"' } end end context 'when a directory path is passed' do let(:paths) { ['dir'] } it 'returns file paths with .rb extension in the directory recursively' do should == ['dir/file.rb'] end end end end end
require 'spec_helper' module WebsocketRails describe Channel do subject { Channel.new :awesome_channel } let(:connection) { double('connection') } before do connection.stub!(:trigger) end it "should maintain a pool of subscribed connections" do subject.subscribers.should == [] end describe "#subscribe" do before do connection.stub(:user).and_return({}) WebsocketRails.config.stub(:broadcast_subscriber_events?).and_return(true) end it "should trigger an event when subscriber joins" do subject.should_receive(:trigger).with("subscriber_join", connection.user) subject.subscribe connection end it "should add the connection to the subscriber pool" do subject.subscribe connection subject.subscribers.include?(connection).should be_true end end describe "#unsubscribe" do before do connection.stub(:user).and_return({}) WebsocketRails.config.stub(:broadcast_subscriber_events?).and_return(true) end it "should remove connection from subscriber pool" do subject.subscribe connection subject.unsubscribe connection subject.subscribers.include?(connection).should be_false end it "should do nothing if connection is not subscribed to channel" do subject.unsubscribe connection subject.subscribers.include?(connection).should be_false end it "should trigger an event when subscriber parts" do subject.subscribers << connection subject.should_receive(:trigger).with('subscriber_part', connection.user) subject.unsubscribe connection end end describe "#trigger" do it "should create a new event and trigger it on all subscribers" do event = double('event').as_null_object Event.should_receive(:new) do |name,options| name.should == 'event' options[:data].should == 'data' event end connection.should_receive(:trigger).with(event) subject.subscribers << connection subject.trigger 'event', 'data' end end describe "#trigger_event" do it "should forward the event to the subscribers" do event = double('event').as_null_object subject.should_receive(:send_data).with(event) subject.trigger_event event end end context "private channels" do before do subject.subscribers << connection end it "should be public by default" do subject.instance_variable_get(:@private).should_not be_true end describe "#make_private" do it "should set the @private instance variable to true" do subject.make_private subject.instance_variable_get(:@private).should be_true end context "when Configuration#keep_subscribers_when_private? is false" do it "should clear any existing subscribers in the channel" do subject.subscribers.count.should == 1 subject.make_private subject.subscribers.count.should == 0 end end context "when Configuration#keep_subscribers_when_private? is true" do before do WebsocketRails.config.keep_subscribers_when_private = true end it "should leave the existing subscribers in the channel" do subject.subscribers.count.should == 1 subject.make_private subject.subscribers.count.should == 1 end end end describe "#is_private?" do it "should return true if the channel is private" do subject.instance_variable_set(:@private,true) subject.is_private?.should be_true end it "should return false if the channel is public" do subject.instance_variable_set(:@private,false) subject.is_private?.should_not be_true end end end end end updated channel spec for #trigger_event require 'spec_helper' module WebsocketRails describe Channel do subject { Channel.new :awesome_channel } let(:connection) { double('connection') } before do connection.stub!(:trigger) end it "should maintain a pool of subscribed connections" do subject.subscribers.should == [] end describe "#subscribe" do before do connection.stub(:user).and_return({}) WebsocketRails.config.stub(:broadcast_subscriber_events?).and_return(true) end it "should trigger an event when subscriber joins" do subject.should_receive(:trigger).with("subscriber_join", connection.user) subject.subscribe connection end it "should add the connection to the subscriber pool" do subject.subscribe connection subject.subscribers.include?(connection).should be_true end end describe "#unsubscribe" do before do connection.stub(:user).and_return({}) WebsocketRails.config.stub(:broadcast_subscriber_events?).and_return(true) end it "should remove connection from subscriber pool" do subject.subscribe connection subject.unsubscribe connection subject.subscribers.include?(connection).should be_false end it "should do nothing if connection is not subscribed to channel" do subject.unsubscribe connection subject.subscribers.include?(connection).should be_false end it "should trigger an event when subscriber parts" do subject.subscribers << connection subject.should_receive(:trigger).with('subscriber_part', connection.user) subject.unsubscribe connection end end describe "#trigger" do it "should create a new event and trigger it on all subscribers" do event = double('event').as_null_object Event.should_receive(:new) do |name,options| name.should == 'event' options[:data].should == 'data' event end connection.should_receive(:trigger).with(event) subject.subscribers << connection subject.trigger 'event', 'data' end end describe "#trigger_event" do it "should forward the event to subscribers if token matches" do event = Event.new 'awesome_event', {:channel => 'awesome_channel', :token => subject.token} subject.should_receive(:send_data).with(event) subject.trigger_event event end it "should ignore the event if the token is invalid" do event = Event.new 'invalid_event', {:channel => 'awesome_channel', :token => 'invalid_token'} subject.should_not_receive(:send_data).with(event) subject.trigger_event event end end context "private channels" do before do subject.subscribers << connection end it "should be public by default" do subject.instance_variable_get(:@private).should_not be_true end describe "#make_private" do it "should set the @private instance variable to true" do subject.make_private subject.instance_variable_get(:@private).should be_true end context "when Configuration#keep_subscribers_when_private? is false" do it "should clear any existing subscribers in the channel" do subject.subscribers.count.should == 1 subject.make_private subject.subscribers.count.should == 0 end end context "when Configuration#keep_subscribers_when_private? is true" do before do WebsocketRails.config.keep_subscribers_when_private = true end it "should leave the existing subscribers in the channel" do subject.subscribers.count.should == 1 subject.make_private subject.subscribers.count.should == 1 end end end describe "#is_private?" do it "should return true if the channel is private" do subject.instance_variable_set(:@private,true) subject.is_private?.should be_true end it "should return false if the channel is public" do subject.instance_variable_set(:@private,false) subject.is_private?.should_not be_true end end end end end
require 'helper' describe "EventMachine::WebSocket::Handler" do def handler(request, secure = false) connection = Object.new EM::WebSocket::HandlerFactory.build(connection, format_request(request), secure) end before :each do @request = { :port => 80, :method => "GET", :path => "/demo", :headers => { 'Host' => 'example.com', 'Connection' => 'Upgrade', 'Sec-WebSocket-Key2' => '12998 5 Y3 1 .P00', 'Sec-WebSocket-Protocol' => 'sample', 'Upgrade' => 'WebSocket', 'Sec-WebSocket-Key1' => '4 @1 46546xW%0l 1 5', 'Origin' => 'http://example.com' }, :body => '^n:ds[4U' } @secure_request = @request.merge(:port => 443) @response = { :headers => { "Upgrade" => "WebSocket", "Connection" => "Upgrade", "Sec-WebSocket-Location" => "ws://example.com/demo", "Sec-WebSocket-Origin" => "http://example.com", "Sec-WebSocket-Protocol" => "sample" }, :body => "8jKS\'y:G*Co,Wxa-" } @secure_response = @response.merge(:headers => @response[:headers].merge('Sec-WebSocket-Location' => "wss://example.com/demo")) end it "should handle good request" do handler(@request).should send_handshake(@response) end it "should handle good request to secure default port if secure mode is enabled" do handler(@secure_request, true).should send_handshake(@secure_response) end it "should not handle good request to secure default port if secure mode is disabled" do handler(@secure_request, false).should_not send_handshake(@secure_response) end it "should handle good request on nondefault port" do @request[:port] = 8081 @request[:headers]['Host'] = 'example.com:8081' @response[:headers]['Sec-WebSocket-Location'] = 'ws://example.com:8081/demo' handler(@request).should send_handshake(@response) end it "should handle good request to secure nondefault port" do @secure_request[:port] = 8081 @secure_request[:headers]['Host'] = 'example.com:8081' @secure_response[:headers]['Sec-WebSocket-Location'] = 'wss://example.com:8081/demo' handler(@secure_request, true).should send_handshake(@secure_response) end it "should handle good request with no protocol" do @request[:headers].delete('Sec-WebSocket-Protocol') @response[:headers].delete("Sec-WebSocket-Protocol") handler(@request).should send_handshake(@response) end it "should handle extra headers by simply ignoring them" do @request[:headers]['EmptyValue'] = "" @request[:headers]['AKey'] = "AValue" handler(@request).should send_handshake(@response) end it "should raise error on HTTP request" do @request[:headers] = { 'Host' => 'www.google.com', 'User-Agent' => 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 GTB6 GTBA', 'Accept' => 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language' => 'en-us,en;q=0.5', 'Accept-Encoding' => 'gzip,deflate', 'Accept-Charset' => 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Keep-Alive' => '300', 'Connection' => 'keep-alive', } lambda { handler(@request).handshake }.should raise_error(EM::WebSocket::HandshakeError) end it "should raise error on wrong method" do @request[:method] = 'POST' lambda { handler(@request).handshake }.should raise_error(EM::WebSocket::HandshakeError) end it "should raise error if upgrade header incorrect" do @request[:headers]['Upgrade'] = 'NonWebSocket' lambda { handler(@request).handshake }.should raise_error(EM::WebSocket::HandshakeError) end it "should raise error if Sec-WebSocket-Protocol is empty" do @request[:headers]['Sec-WebSocket-Protocol'] = '' lambda { handler(@request).handshake }.should raise_error(EM::WebSocket::HandshakeError) end %w[Sec-WebSocket-Key1 Sec-WebSocket-Key2].each do |header| it "should raise error if #{header} has zero spaces" do @request[:headers][header] = 'nospaces' lambda { handler(@request).handshake }.should raise_error(EM::WebSocket::HandshakeError, 'Websocket Key1 or Key2 does not contain spaces - this is a symptom of a cross-protocol attack') end end it "should raise error if spaces do not divide numbers in Sec-WebSocket-Key* " do @request[:headers]['Sec-WebSocket-Key2'] = '12998 5 Y3 1.P00' lambda { handler(@request).handshake }.should raise_error(EM::WebSocket::HandshakeError, 'Invalid Key "12998 5 Y3 1.P00"') end it "should leave request with incomplete header" do data = format_request(@request) # Sends only half of the request EM::WebSocket::HandlerFactory.build(mock(EM::WebSocket::Connection), data[0...(data.length / 2)]).should == nil end it "should leave request with incomplete third key" do data = format_request(@request) # Removes last two bytes of the third key EM::WebSocket::HandlerFactory.build(mock(EM::WebSocket::Connection), data[0...(data.length - 2)]).should == nil end end Added spec for empty header bug require 'helper' describe "EventMachine::WebSocket::Handler" do def handler(request, secure = false) connection = Object.new EM::WebSocket::HandlerFactory.build(connection, format_request(request), secure) end before :each do @request = { :port => 80, :method => "GET", :path => "/demo", :headers => { 'Host' => 'example.com', 'Connection' => 'Upgrade', 'Sec-WebSocket-Key2' => '12998 5 Y3 1 .P00', 'Sec-WebSocket-Protocol' => 'sample', 'Upgrade' => 'WebSocket', 'Sec-WebSocket-Key1' => '4 @1 46546xW%0l 1 5', 'Origin' => 'http://example.com' }, :body => '^n:ds[4U' } @secure_request = @request.merge(:port => 443) @response = { :headers => { "Upgrade" => "WebSocket", "Connection" => "Upgrade", "Sec-WebSocket-Location" => "ws://example.com/demo", "Sec-WebSocket-Origin" => "http://example.com", "Sec-WebSocket-Protocol" => "sample" }, :body => "8jKS\'y:G*Co,Wxa-" } @secure_response = @response.merge(:headers => @response[:headers].merge('Sec-WebSocket-Location' => "wss://example.com/demo")) end it "should handle good request" do handler(@request).should send_handshake(@response) end it "should handle good request to secure default port if secure mode is enabled" do handler(@secure_request, true).should send_handshake(@secure_response) end it "should not handle good request to secure default port if secure mode is disabled" do handler(@secure_request, false).should_not send_handshake(@secure_response) end it "should handle good request on nondefault port" do @request[:port] = 8081 @request[:headers]['Host'] = 'example.com:8081' @response[:headers]['Sec-WebSocket-Location'] = 'ws://example.com:8081/demo' handler(@request).should send_handshake(@response) end it "should handle good request to secure nondefault port" do @secure_request[:port] = 8081 @secure_request[:headers]['Host'] = 'example.com:8081' @secure_response[:headers]['Sec-WebSocket-Location'] = 'wss://example.com:8081/demo' handler(@secure_request, true).should send_handshake(@secure_response) end it "should handle good request with no protocol" do @request[:headers].delete('Sec-WebSocket-Protocol') @response[:headers].delete("Sec-WebSocket-Protocol") handler(@request).should send_handshake(@response) end it "should handle extra headers by simply ignoring them" do @request[:headers]['EmptyValue'] = "" @request[:headers]['AKey'] = "AValue" handler(@request).should send_handshake(@response) end it "should raise error on HTTP request" do @request[:headers] = { 'Host' => 'www.google.com', 'User-Agent' => 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 GTB6 GTBA', 'Accept' => 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language' => 'en-us,en;q=0.5', 'Accept-Encoding' => 'gzip,deflate', 'Accept-Charset' => 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Keep-Alive' => '300', 'Connection' => 'keep-alive', } lambda { handler(@request).handshake }.should raise_error(EM::WebSocket::HandshakeError) end it "should raise error on wrong method" do @request[:method] = 'POST' lambda { handler(@request).handshake }.should raise_error(EM::WebSocket::HandshakeError) end it "should raise error if upgrade header incorrect" do @request[:headers]['Upgrade'] = 'NonWebSocket' lambda { handler(@request).handshake }.should raise_error(EM::WebSocket::HandshakeError) end it "should raise error if Sec-WebSocket-Protocol is empty" do @request[:headers]['Sec-WebSocket-Protocol'] = '' lambda { handler(@request).handshake }.should raise_error(EM::WebSocket::HandshakeError) end %w[Sec-WebSocket-Key1 Sec-WebSocket-Key2].each do |header| it "should raise error if #{header} has zero spaces" do @request[:headers][header] = 'nospaces' lambda { handler(@request).handshake }.should raise_error(EM::WebSocket::HandshakeError, 'Websocket Key1 or Key2 does not contain spaces - this is a symptom of a cross-protocol attack') end end it "should raise error if spaces do not divide numbers in Sec-WebSocket-Key* " do @request[:headers]['Sec-WebSocket-Key2'] = '12998 5 Y3 1.P00' lambda { handler(@request).handshake }.should raise_error(EM::WebSocket::HandshakeError, 'Invalid Key "12998 5 Y3 1.P00"') end it "should raise error if the HTTP header is empty" do connection = Object.new lambda { EM::WebSocket::HandlerFactory.build(connection, "\r\n\r\nfoobar", false) }.should raise_error(EM::WebSocket::HandshakeError, "Empty HTTP header") end it "should leave request with incomplete header" do data = format_request(@request) # Sends only half of the request EM::WebSocket::HandlerFactory.build(mock(EM::WebSocket::Connection), data[0...(data.length / 2)]).should == nil end it "should leave request with incomplete third key" do data = format_request(@request) # Removes last two bytes of the third key EM::WebSocket::HandlerFactory.build(mock(EM::WebSocket::Connection), data[0...(data.length - 2)]).should == nil end end
require 'spec_helper' require 'ohai' module Omnibus describe Project do let(:project) { Project.load(project_path('sample')) } subject { project } shared_examples 'a cleanroom setter' do |id, value| it "for `#{id}'" do expect { subject.evaluate("#{id}(#{value.inspect})") } .to_not raise_error end end shared_examples 'a cleanroom getter' do |id| it "for `#{id}'" do expect { subject.evaluate("#{id}") }.to_not raise_error end end it_behaves_like 'a cleanroom setter', :name, 'chef' it_behaves_like 'a cleanroom setter', :friendly_name, 'Chef' it_behaves_like 'a cleanroom setter', :msi_parameters, { foo: 'bar' } it_behaves_like 'a cleanroom setter', :package_name, 'chef.package' it_behaves_like 'a cleanroom setter', :install_path, '/opt/chef' it_behaves_like 'a cleanroom setter', :maintainer, 'Chef Software, Inc' it_behaves_like 'a cleanroom setter', :homepage, 'https://getchef.com' it_behaves_like 'a cleanroom setter', :description, 'Installs the thing' it_behaves_like 'a cleanroom setter', :replaces, 'old-chef' it_behaves_like 'a cleanroom setter', :conflict, 'puppet' it_behaves_like 'a cleanroom setter', :build_version, '1.2.3' it_behaves_like 'a cleanroom setter', :build_iteration, 1 it_behaves_like 'a cleanroom setter', :mac_pkg_identifier, 'com.getchef' it_behaves_like 'a cleanroom setter', :package_user, 'chef' it_behaves_like 'a cleanroom setter', :package_group, 'chef' it_behaves_like 'a cleanroom setter', :override, 'foo' it_behaves_like 'a cleanroom setter', :resources_path, '/path' it_behaves_like 'a cleanroom setter', :dependency, 'libxslt-dev' it_behaves_like 'a cleanroom setter', :runtime_dependency, 'libxslt' it_behaves_like 'a cleanroom setter', :exclude, 'hamlet' it_behaves_like 'a cleanroom setter', :config_file, '/path/to/config.rb' it_behaves_like 'a cleanroom setter', :extra_package_file, '/path/to/asset' it_behaves_like 'a cleanroom getter', :files_path describe 'basics' do it 'should return a name' do expect(project.name).to eq('sample') end it 'should return an install path' do expect(project.install_path).to eq('/sample') end it 'should return a maintainer' do expect(project.maintainer).to eq('Sample Devs') end it 'should return a homepage' do expect(project.homepage).to eq('http://example.com/') end it 'should return a build version' do expect(project.build_version).to eq('1.0') end it 'should return a build iteration' do expect(project.build_iteration).to eq('1') end it 'should return an array of files and dirs' do expect(project.extra_package_files).to eq(['/path/to/sample_dir', '/path/to/file.conf']) end it 'should return friendly_name' do expect(project.friendly_name).to eq('Sample Project') end it 'should return friendly_name' do expect(project.resources_path).to eq('sample/project/resources') end end describe '#dirty!' do it 'dirties the cache' do subject.instance_variable_set(:@dirty, nil) subject.dirty! expect(subject).to be_dirty end end describe '#dirty?' do it 'returns true by default' do subject.instance_variable_set(:@dirty, nil) expect(subject).to_not be_dirty end it 'returns true when the cache is dirty' do subject.instance_variable_set(:@dirty, true) expect(subject).to be_dirty end it 'returns false when the cache is not dirty' do subject.instance_variable_set(:@dirty, false) expect(subject).to_not be_dirty end end describe '#<=>' do it 'compares projects by name' do list = [ project, Omnibus::Project.load(project_path('chefdk')), ] expect(list.sort.map(&:name)).to eq(%w(chefdk sample)) end end describe '#iteration' do let(:fauxhai_options) { Hash.new } before do stub_ohai(Fauxhai.mock(fauxhai_options).data) end context 'when on RHEL' do let(:fauxhai_options) { { platform: 'redhat', version: '6.4' } } it 'should return a RHEL iteration' do expect(project.iteration).to eq('1.el6') end end context 'when on Debian' do let(:fauxhai_options) { { platform: 'debian', version: '7.2' } } it 'should return a Debian iteration' do expect(project.iteration).to eq('1') end end context 'when on FreeBSD' do let(:fauxhai_options) { { platform: 'freebsd', version: '9.1' } } it 'should return a FreeBSD iteration' do expect(project.iteration).to eq('1.freebsd.9.amd64') end end context 'when on Windows' do let(:fauxhai_options) { { platform: 'windows', version: '2008R2' } } before { stub_const('File::ALT_SEPARATOR', '\\') } it 'should return a Windows iteration' do expect(project.iteration).to eq('1.windows') end end context 'when on OS X' do let(:fauxhai_options) { { platform: 'mac_os_x', version: '10.8.2' } } it 'should return a generic iteration' do expect(project.iteration).to eq('1') end end end describe '#overrides' do let(:project) { Omnibus::Project.load(project_path('chefdk')) } it 'should set an override for the zlib version' do expect(project.overrides[:zlib][:version]).to eq('1.2.8') end it 'should access the zlib version through the #override method as well' do expect(project.override(:zlib)[:version]).to eq('1.2.8') end it 'should set all the things through #overrides' do project.overrides(thing: { version: '6.6.6' }) expect(project.override(:zlib)).to be_nil end it 'should retrieve the things set through #overrides' do project.overrides(thing: { version: '6.6.6' }) expect(project.override(:thing)[:version]).to eq('6.6.6') end it 'should not set other things through setting a single #override' do project.override(:thing, version: '6.6.6') expect(project.override(:zlib)[:version]).to eq('1.2.8') end it 'should retrieve the things set through #overrides' do project.override(:thing, version: '6.6.6') expect(project.override(:thing)[:version]).to eq('6.6.6') end end end end Cleanup Project specs require 'spec_helper' require 'ohai' module Omnibus describe Project do let(:project) { Project.load(project_path('sample')) } subject { project } shared_examples 'a cleanroom setter' do |id, value| it "for `#{id}'" do expect { subject.evaluate("#{id}(#{value.inspect})") } .to_not raise_error end end shared_examples 'a cleanroom getter' do |id| it "for `#{id}'" do expect { subject.evaluate("#{id}") }.to_not raise_error end end it_behaves_like 'a cleanroom setter', :name, 'chef' it_behaves_like 'a cleanroom setter', :friendly_name, 'Chef' it_behaves_like 'a cleanroom setter', :msi_parameters, { foo: 'bar' } it_behaves_like 'a cleanroom setter', :package_name, 'chef.package' it_behaves_like 'a cleanroom setter', :install_path, '/opt/chef' it_behaves_like 'a cleanroom setter', :maintainer, 'Chef Software, Inc' it_behaves_like 'a cleanroom setter', :homepage, 'https://getchef.com' it_behaves_like 'a cleanroom setter', :description, 'Installs the thing' it_behaves_like 'a cleanroom setter', :replaces, 'old-chef' it_behaves_like 'a cleanroom setter', :conflict, 'puppet' it_behaves_like 'a cleanroom setter', :build_version, '1.2.3' it_behaves_like 'a cleanroom setter', :build_iteration, 1 it_behaves_like 'a cleanroom setter', :mac_pkg_identifier, 'com.getchef' it_behaves_like 'a cleanroom setter', :package_user, 'chef' it_behaves_like 'a cleanroom setter', :package_group, 'chef' it_behaves_like 'a cleanroom setter', :override, 'foo' it_behaves_like 'a cleanroom setter', :resources_path, '/path' it_behaves_like 'a cleanroom setter', :dependency, 'libxslt-dev' it_behaves_like 'a cleanroom setter', :runtime_dependency, 'libxslt' it_behaves_like 'a cleanroom setter', :exclude, 'hamlet' it_behaves_like 'a cleanroom setter', :config_file, '/path/to/config.rb' it_behaves_like 'a cleanroom setter', :extra_package_file, '/path/to/asset' it_behaves_like 'a cleanroom getter', :files_path describe 'basics' do it 'should return a name' do expect(project.name).to eq('sample') end it 'should return an install path' do expect(project.install_path).to eq('/sample') end it 'should return a maintainer' do expect(project.maintainer).to eq('Sample Devs') end it 'should return a homepage' do expect(project.homepage).to eq('http://example.com/') end it 'should return a build version' do expect(project.build_version).to eq('1.0') end it 'should return a build iteration' do expect(project.build_iteration).to eq('1') end it 'should return an array of files and dirs' do expect(project.extra_package_files).to eq(['/path/to/sample_dir', '/path/to/file.conf']) end it 'should return friendly_name' do expect(project.friendly_name).to eq('Sample Project') end it 'should return friendly_name' do expect(project.resources_path).to eq('sample/project/resources') end end describe '#dirty!' do it 'dirties the cache' do subject.instance_variable_set(:@dirty, nil) subject.dirty! expect(subject).to be_dirty end end describe '#dirty?' do it 'returns true by default' do subject.instance_variable_set(:@dirty, nil) expect(subject).to_not be_dirty end it 'returns true when the cache is dirty' do subject.instance_variable_set(:@dirty, true) expect(subject).to be_dirty end it 'returns false when the cache is not dirty' do subject.instance_variable_set(:@dirty, false) expect(subject).to_not be_dirty end end describe '#<=>' do it 'compares projects by name' do list = [ project, Project.load(project_path('chefdk')), ] expect(list.sort.map(&:name)).to eq(%w(chefdk sample)) end end describe '#iteration' do let(:fauxhai_options) { Hash.new } before do stub_ohai(Fauxhai.mock(fauxhai_options).data) end context 'when on RHEL' do let(:fauxhai_options) { { platform: 'redhat', version: '6.4' } } it 'should return a RHEL iteration' do expect(project.iteration).to eq('1.el6') end end context 'when on Debian' do let(:fauxhai_options) { { platform: 'debian', version: '7.2' } } it 'should return a Debian iteration' do expect(project.iteration).to eq('1') end end context 'when on FreeBSD' do let(:fauxhai_options) { { platform: 'freebsd', version: '9.1' } } it 'should return a FreeBSD iteration' do expect(project.iteration).to eq('1.freebsd.9.amd64') end end context 'when on Windows' do let(:fauxhai_options) { { platform: 'windows', version: '2008R2' } } before { stub_const('File::ALT_SEPARATOR', '\\') } it 'should return a Windows iteration' do expect(project.iteration).to eq('1.windows') end end context 'when on OS X' do let(:fauxhai_options) { { platform: 'mac_os_x', version: '10.8.2' } } it 'should return a generic iteration' do expect(project.iteration).to eq('1') end end end describe '#overrides' do let(:project) { Project.load(project_path('chefdk')) } before { project.overrides.clear } it 'should set all the things through #overrides' do project.override(:thing, version: '6.6.6') expect(project.override(:zlib)).to be_nil end it 'retrieves the things set through #overrides' do project.override(:thing, version: '6.6.6') expect(project.override(:thing)[:version]).to eq('6.6.6') end end end end
# # Author:: Prabhu Das (<prabhu.das@clogeny.com>) # Copyright:: Copyright (c) 2013-2014 Chef Software, Inc. # License:: Apache License, Version 2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require File.expand_path(File.dirname(__FILE__) + '/../spec_helper') require 'chef/knife/cloud/service' describe Chef::Knife::Cloud::Service do let (:instance) { Chef::Knife::Cloud::Service.new } it { expect {instance}.to_not raise_error } it { expect {instance.connection}.to raise_error(Chef::Exceptions::Override, "You must override connection in #{instance.to_s}") } it { expect {instance.create_server}.to raise_error(Chef::Exceptions::Override, "You must override create_server in #{instance.to_s}") } it { expect {instance.delete_server(:server_name)}.to raise_error(Chef::Exceptions::Override, "You must override delete_server in #{instance.to_s}") } it { expect {instance.delete_server}.to raise_error(ArgumentError, "wrong number of arguments (given 0, expected 1)") } it { expect {instance.list_servers}.to raise_error(Chef::Exceptions::Override, "You must override list_servers in #{instance.to_s}") } it { expect {instance.list_images}.to raise_error(ArgumentError, "wrong number of arguments (given 0, expected 1)") } it { expect {instance.list_images(:image_filters)}.to raise_error(Chef::Exceptions::Override, "You must override list_images in #{instance.to_s}") } it { expect {instance.list_resource_configurations()}.to raise_error(Chef::Exceptions::Override, "You must override list_resource_configurations in #{instance.to_s}") } it { expect { Chef::Knife::Cloud::Service.new({:auth_params => {:provider => 'Any Cloud Provider'}}) }.to_not raise_error } end Make specs pass on Ruby 2.2 and 2.3 2.3 changed the arguments error string Signed-off-by: Tim Smith <764ef62106582a09ed09dfa0b6bff7c05fd7d1e4@chef.io> # # Author:: Prabhu Das (<prabhu.das@clogeny.com>) # Copyright:: Copyright (c) 2013-2014 Chef Software, Inc. # License:: Apache License, Version 2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require File.expand_path(File.dirname(__FILE__) + '/../spec_helper') require 'chef/knife/cloud/service' describe Chef::Knife::Cloud::Service do let (:instance) { Chef::Knife::Cloud::Service.new } it { expect {instance}.to_not raise_error } it { expect {instance.connection}.to raise_error(Chef::Exceptions::Override, "You must override connection in #{instance.to_s}") } it { expect {instance.create_server}.to raise_error(Chef::Exceptions::Override, "You must override create_server in #{instance.to_s}") } it { expect {instance.delete_server(:server_name)}.to raise_error(Chef::Exceptions::Override, "You must override delete_server in #{instance.to_s}") } it { expect {instance.delete_server}.to raise_error(ArgumentError) } it { expect {instance.list_servers}.to raise_error(Chef::Exceptions::Override, "You must override list_servers in #{instance.to_s}") } it { expect {instance.list_images}.to raise_error(ArgumentError) } it { expect {instance.list_images(:image_filters)}.to raise_error(Chef::Exceptions::Override, "You must override list_images in #{instance.to_s}") } it { expect {instance.list_resource_configurations()}.to raise_error(Chef::Exceptions::Override, "You must override list_resource_configurations in #{instance.to_s}") } it { expect { Chef::Knife::Cloud::Service.new({:auth_params => {:provider => 'Any Cloud Provider'}}) }.to_not raise_error } end
require 'spec/spec_helper' describe ROXML, "encoding" do class TestResult include ROXML xml_accessor :message end context "when provided non-latin characters" do it "should output those characters as input via methods" do res = TestResult.new res.message = "sadfk одловыа jjklsd " #random russian and english charecters res.to_xml.at('message').inner_text.should == "sadfk одловыа jjklsd " end it "should output those characters as input via xml" do res = TestResult.from_xml("<test_result><message>sadfk одловыа jjklsd </message></test_result>") res.to_xml.at('message').inner_text.should == "sadfk одловыа jjklsd " end end end Expand on encoding spec a bit to accomodate libxml idosyncracies require 'spec/spec_helper' describe ROXML, "encoding" do class TestResult include ROXML xml_accessor :message end context "when provided non-latin characters" do it "should output those characters as input via methods" do res = TestResult.new res.message = "sadfk одловыа jjklsd " #random russian and english charecters doc = ROXML::XML::Document.new doc.root = res.to_xml if defined?(Nokogiri) doc.at('message').inner_text else doc.find_first('message').inner_xml end.should == "sadfk одловыа jjklsd " end it "should output those characters as input via xml" do res = TestResult.from_xml("<test_result><message>sadfk одловыа jjklsd </message></test_result>") doc = ROXML::XML::Document.new doc.root = res.to_xml if defined?(Nokogiri) doc.at('message').inner_text else doc.find_first('message').inner_xml end.should == "sadfk одловыа jjklsd " end end end
class Admin::MyconfigController < ApplicationController def global_lock_enable Myconfig.global_lock_set_true # If there is a failure an exception will be raised, not need to test it head :no_content end def global_lock_disable Myconfig.global_lock_set_false head :no_content end def global_lock_switch Myconfig.global_lock_switch head :no_content end end fixing rubocop errors class Admin::MyconfigController < ApplicationController def global_lock_enable Myconfig.global_lock_set_true # If there is a failure an exception will be raised, not need to test it head :no_content end def global_lock_disable Myconfig.global_lock_set_false head :no_content end def global_lock_switch Myconfig.global_lock_switch head :no_content end end
class Admin::ProductsController < Admin::BaseController def index @manufacturers_array = Rails.cache.read("all_manufacturers") if @manufacturers_array.nil? @manufacturers_array = Product.group("manufacturer").collect(&:manufacturer).compact.sort Rails.cache.write("all_manufacturers", @manufacturers_array) end conditions = {} if params[:supplier_id] conditions.merge!({:supplier_id => params[:supplier_id]}) end # Resource was accessed in nested form through /admin/categories/n/products if params[:category_id] @category = Category.find params[:category_id] # Which categories to display on top of the product list for deeper # navigation into the category tree. @categories = @category.children @products = @category.products.where(conditions).paginate(:per_page => Product.per_page, :page => params[:page]) else @categories ||= Category.roots keyword = nil keyword = params[:search][:keyword] unless params[:search].blank? or params[:search][:keyword].blank? conditions[:manufacturer] = params[:manufacturer] unless params[:manufacturer].blank? @products = Product.search(keyword, :conditions => conditions, :max_matches => 100000, :per_page => Product.per_page, :page => params[:page]) end end def create @product = Product.create(params[:product]) if @product.save flash[:notice] = "Product created." redirect_to edit_admin_product_path(@product) else flash[:error] = "Error creating product." render :action => 'new', :layout => 'admin_blank' end end def new @product = Product.new # Must add this, otherwise the product picture partial in # views/admin/products/new.html.erb fails #@product.product_pictures << ProductPicture.new render :layout => 'admin_blank' end def edit @product = Product.find(params[:id]) render :layout => 'admin_blank' end def show end def update @product = Product.find(params[:id]) @product.update_attributes(params[:product]) # The "add component" button was pressed -- refactor this into a separate # page inside a fancybox and a separate controller method once the component # functionality is more robust if params[:add_component] component = SupplyItem.find(params[:component_id].to_i) @product.add_component(component, params[:quantity].to_i) end if @product.save flash[:notice] = "Product updated." else flash[:error] = "Error updating product." end #render :action => 'edit', :layout => 'admin_blank' redirect_to :back end def destroy @product = Product.find(params[:id]) if @product.destroy flash[:notice] = "Product destroyed." else flash[:error] = "Product couldn't be destroyed!" end redirect_to admin_products_path end def new_from_supply_item @supply_item = SupplyItem.find(params[:supply_item_id]) @product = Product.new_from_supply_item(@supply_item) render :action => 'new', :layout => 'admin_blank' end def switch_to_supply_item @supply_item = SupplyItem.find(params[:supply_item_id]) @product = Product.find(params[:id]) @product.supply_item = @supply_item if @product.sync_from_supply_item flash[:notice] = "Product metamorphosis complete." else flash[:error] = "Sorry, could not metamorphosize product." end #redirect_to edit_admin_product_path(@product) redirect_to :back end end Filtering by supplier and only that supplier's category (at the same time), including caching. class Admin::ProductsController < Admin::BaseController def index @manufacturers_array = Rails.cache.read("all_manufacturers") if @manufacturers_array.nil? @manufacturers_array = Product.group("manufacturer").collect(&:manufacturer).compact.sort Rails.cache.write("all_manufacturers", @manufacturers_array) end conditions = {} if params[:supplier_id] conditions.merge!({:supplier_id => params[:supplier_id]}) @manufacturers_array = Rails.cache.read("supplier_#{params[:supplier_id]}_manufacturers") if @manufacturers_array.nil? @manufacturers_array = Product.where(:supplier_id => params[:supplier_id]).group("manufacturer").collect(&:manufacturer).compact.sort Rails.cache.write("supplier_#{params[:supplier_id]}_manufacturers", @manufacturers_array) end end # Resource was accessed in nested form through /admin/categories/n/products if params[:category_id] @category = Category.find params[:category_id] # Which categories to display on top of the product list for deeper # navigation into the category tree. @categories = @category.children @products = @category.products.where(conditions).paginate(:per_page => Product.per_page, :page => params[:page]) else @categories ||= Category.roots keyword = nil keyword = params[:search][:keyword] unless params[:search].blank? or params[:search][:keyword].blank? conditions[:manufacturer] = params[:manufacturer] unless params[:manufacturer].blank? @products = Product.search(keyword, :conditions => conditions, :max_matches => 100000, :per_page => Product.per_page, :page => params[:page]) end end def create @product = Product.create(params[:product]) if @product.save flash[:notice] = "Product created." redirect_to edit_admin_product_path(@product) else flash[:error] = "Error creating product." render :action => 'new', :layout => 'admin_blank' end end def new @product = Product.new # Must add this, otherwise the product picture partial in # views/admin/products/new.html.erb fails #@product.product_pictures << ProductPicture.new render :layout => 'admin_blank' end def edit @product = Product.find(params[:id]) render :layout => 'admin_blank' end def show end def update @product = Product.find(params[:id]) @product.update_attributes(params[:product]) # The "add component" button was pressed -- refactor this into a separate # page inside a fancybox and a separate controller method once the component # functionality is more robust if params[:add_component] component = SupplyItem.find(params[:component_id].to_i) @product.add_component(component, params[:quantity].to_i) end if @product.save flash[:notice] = "Product updated." else flash[:error] = "Error updating product." end #render :action => 'edit', :layout => 'admin_blank' redirect_to :back end def destroy @product = Product.find(params[:id]) if @product.destroy flash[:notice] = "Product destroyed." else flash[:error] = "Product couldn't be destroyed!" end redirect_to admin_products_path end def new_from_supply_item @supply_item = SupplyItem.find(params[:supply_item_id]) @product = Product.new_from_supply_item(@supply_item) render :action => 'new', :layout => 'admin_blank' end def switch_to_supply_item @supply_item = SupplyItem.find(params[:supply_item_id]) @product = Product.find(params[:id]) @product.supply_item = @supply_item if @product.sync_from_supply_item flash[:notice] = "Product metamorphosis complete." else flash[:error] = "Sorry, could not metamorphosize product." end #redirect_to edit_admin_product_path(@product) redirect_to :back end end
class Api::V1::PresetsController < Api::V1::ApiController before_filter :api_session_token_authenticate! def create @preset = Preset.new(params[:preset]) @preset.owner = current_user.id if @preset.save render :json => @preset, :code => :ok else render :json => {}, :code => :unprocessable_entity end end def show render :json => Preset.where(:owner => current_user.id) end def update if Preset.update(params[:id], params[:preset]) render :json => Preset.find_by_id(params[:id]), :status => :ok else render :json => {}, :code => :unprocessable_entity end end end fix typo class Api::V1::PresetsController < Api::V1::ApiController before_filter :api_session_token_authenticate! def create @preset = Preset.new(params[:preset]) @preset.user_id = current_user.id if @preset.save render :json => @preset, :code => :ok else render :json => {}, :code => :unprocessable_entity end end def show render :json => Preset.where(:user_id => current_user.id) end def update if Preset.update(params[:id], params[:preset]) render :json => Preset.find_by_id(params[:id]), :status => :ok else render :json => {}, :code => :unprocessable_entity end end end
class AuthenticationController < ApplicationController def index redirect_to :action=>:login end def retrieve retrieve_xil(params[:id],:key=>params[:id]) end def render_f render_xil(params[:id], :key=>1, :output=>'pdf') end def login @login = 'lf' if request.post? # raise Exception.new params[:screen_width].to_s+'/'+params[:screen_width].class.to_s session[:body_width] = params[:screen_width].to_i-50 if params[:screen_width] user = User.authenticate(params[:user][:name], params[:user][:password]) if user init_session(user) redirect_to :controller=>session[:last_controller]||:guide, :action=>session[:last_action]||:index unless session[:user_id].blank? else flash[:error] = lc :no_authenticated # 'User can not be authenticated. Please retry.' end session[:user_name] = params[:user][:name] end end def register if request.post? if session[:company_id].nil? @company = Company.new(params[:company]) else @company = Company.find(session[:company_id]) @company.attributes = params[:company] end if @company.save session[:company_id] = @company.id params[:user][:company_id] = @company.id @user = User.new(params[:user]) @user.role_id = @company.admin_role.id if @user.save init_session(@user) redirect_to :controller=>:guide, :action=>:welcome end end else session[:company_id] = nil end end def logout session[:user_id] = nil session[:last_controller] = nil session[:last_action] = nil reset_session redirect_to :action=>:login end protected def init_session(user) session[:user_id] = user.id session[:last_query] = Time.now.to_i session[:expiration] = 3600 end end git-svn-id: https://www.ekylibre.org/svn/trunk/ekylibre@72 67a09383-3dfa-4221-8551-890bac9c277c class AuthenticationController < ApplicationController def index redirect_to :action=>:login end def retrieve retrieve_xil(params[:id],:key=>params[:id]) end def render_f render_xil(params[:id].to_i, :key=>1, :output=>'pdf', :archive=>false) end def login @login = 'lf' if request.post? # raise Exception.new params[:screen_width].to_s+'/'+params[:screen_width].class.to_s session[:body_width] = params[:screen_width].to_i-50 if params[:screen_width] user = User.authenticate(params[:user][:name], params[:user][:password]) if user init_session(user) redirect_to :controller=>session[:last_controller]||:guide, :action=>session[:last_action]||:index unless session[:user_id].blank? else flash[:error] = lc :no_authenticated # 'User can not be authenticated. Please retry.' end session[:user_name] = params[:user][:name] end end def register if request.post? if session[:company_id].nil? @company = Company.new(params[:company]) else @company = Company.find(session[:company_id]) @company.attributes = params[:company] end if @company.save session[:company_id] = @company.id params[:user][:company_id] = @company.id @user = User.new(params[:user]) @user.role_id = @company.admin_role.id if @user.save init_session(@user) redirect_to :controller=>:guide, :action=>:welcome end end else session[:company_id] = nil end end def logout session[:user_id] = nil session[:last_controller] = nil session[:last_action] = nil reset_session redirect_to :action=>:login end protected def init_session(user) session[:user_id] = user.id session[:last_query] = Time.now.to_i session[:expiration] = 3600 end end
class ExamTemplatesController < ApplicationController # responders setup responders :flash, :http_cache respond_to :html before_filter :authorize_only_for_admin layout 'assignment_content' def index @assignment = Assignment.find(params[:assignment_id]) @exam_templates = @assignment.exam_templates.includes(:template_divisions) end # Creates a new instance of the exam template. def create assignment = Assignment.find(params[:assignment_id]) new_uploaded_io = params[:create_template][:file_io] name = params[:create_template][:name] # error checking when new_uploaded_io is not pdf, nil, or when filename is not given if new_uploaded_io.nil? || new_uploaded_io.content_type != 'application/pdf' flash_message(:error, t('exam_templates.create.failure')) else filename = new_uploaded_io.original_filename new_template = ExamTemplate.new_with_file(new_uploaded_io.read, assignment_id: assignment.id, filename: filename, name: name) # sending flash message if saved if new_template.save flash_message(:success, t('exam_templates.create.success')) else flash_message(:error, t('exam_templates.create.failure')) end end redirect_to action: 'index' end def download assignment = Assignment.find(params[:assignment_id]) exam_template = assignment.exam_templates.find_by(id: params[:id]) # look up a specific exam template based on the params[:id] filename = exam_template.filename send_file(File.join(exam_template.base_path, filename), filename: "#{filename}", type: "application/pdf") end def update assignment = Assignment.find(params[:assignment_id]) old_exam_template = assignment.exam_templates.find_by(id: params[:id]) # updating exam template file new_uploaded_io = params[:exam_template][:new_template] unless new_uploaded_io.nil? new_template_filename = new_uploaded_io.original_filename # error checking when new_uploaded_io is not pdf if new_uploaded_io.content_type != 'application/pdf' flash_message(:error, t('exam_templates.update.failure')) else old_template_filename = old_exam_template.filename old_exam_template.replace_with_file(new_uploaded_io.read, assignment_id: assignment.id, old_filename: old_template_filename, new_filename: new_template_filename) old_exam_template.update(exam_template_params) respond_with(old_exam_template, location: assignment_exam_templates_url) return end else # updating template division if old_exam_template.update(exam_template_params) flash_message(:success, t('exam_templates.update.success')) else flash_message(:error, t('exam_templates.update.failure')) end end redirect_to action: 'index' end def generate copies = params[:numCopies].to_i index = params[:examTemplateIndex].to_i assignment = Assignment.find(params[:assignment_id]) exam_template = assignment.exam_templates.find(params[:id]) flash_message(:success, t('exam_templates.generate.generate_job_started', exam_name: exam_template.assignment.short_identifier)) current_job = exam_template.generate_copies(copies, index) respond_to do |format| format.js { render 'exam_templates/_poll_generate_job.js.erb', locals: { file_name: "#{exam_template.name}-#{index}-#{index + copies - 1}.pdf", exam_id: exam_template.id, job_id: current_job.job_id} } end end def download_generate assignment = Assignment.find(params[:assignment_id]) exam_template = assignment.exam_templates.find(params[:id]) send_file(exam_template.base_path + '/' + params[:file_name], filename: params[:file_name], type: "application/pdf") end def split assignment = Assignment.find(params[:assignment_id]) exam_template = assignment.exam_templates.find(params[:id]) split_exam = params[:exam_template][:pdf_to_split] unless split_exam.nil? if split_exam.content_type != 'application/pdf' flash_message(:error, t('exam_templates.split.invalid')) redirect_to action: 'index' else current_job = exam_template.split_pdf(split_exam.path, split_exam.original_filename, @current_user) session[:job_id] = current_job.job_id redirect_to view_logs_assignment_exam_templates_path end else flash_message(:error, t('exam_templates.split.missing')) redirect_to action: 'index' end end def destroy assignment = Assignment.find(params[:assignment_id]) exam_template = assignment.exam_templates.find(params[:id]) if exam_template.destroy flash_message(:success, t('exam_templates.delete.success')) else flash_message(:failure, t('exam_templates.delete.failure')) end redirect_to action: 'index' end def view_logs @assignment = Assignment.find(params[:assignment_id]) @split_pdf_logs = SplitPdfLog.joins(exam_template: :assignment) .where(assignments: {id: @assignment.id}) .includes(:exam_template) .includes(:user) end def assign_errors @assignment = Assignment.find(params[:assignment_id]) @error_files = [] Dir.foreach(File.join(MarkusConfigurator.markus_exam_template_dir, @assignment.short_identifier, 'error' )) do |file| @error_files << file unless file =~ /^\.\.?$/ end @error_files = @error_files.sort end def download_error_file @assignment = Assignment.find(params[:assignment_id]) send_file(File.join(MarkusConfigurator.markus_exam_template_dir, @assignment.short_identifier, 'error', params[:file_name]), filename: params[:file_name], type: 'application/pdf') end def download_error_file_path render text: download_error_file_assignment_exam_templates_path( assignment_id: params[:assignment_id], file_name: params[:file_name], show_in_browser: true ) end def fix_error exam_template = ExamTemplate.find(params[:fix_error][:exam_template]) copy_number = params[:fix_error][:copy_number] page_number = params[:fix_error][:page_number] filename = params[:fix_error][:filename] upside_down = params[:fix_error][:upside_down] == 'true' # because params[:fix_error][:upside_down] is passed as string exam_template.fix_error(filename, copy_number, page_number, upside_down) redirect_to action: 'assign_errors' end def exam_template_params params.require(:exam_template) .permit( :name, template_divisions_attributes: [:id, :start, :end, :label, :_destroy] ) end end make requested change class ExamTemplatesController < ApplicationController # responders setup responders :flash, :http_cache respond_to :html before_filter :authorize_only_for_admin layout 'assignment_content' def index @assignment = Assignment.find(params[:assignment_id]) @exam_templates = @assignment.exam_templates.includes(:template_divisions) end # Creates a new instance of the exam template. def create assignment = Assignment.find(params[:assignment_id]) new_uploaded_io = params[:create_template][:file_io] name = params[:create_template][:name] # error checking when new_uploaded_io is not pdf, nil, or when filename is not given if new_uploaded_io.nil? || new_uploaded_io.content_type != 'application/pdf' flash_message(:error, t('exam_templates.create.failure')) else filename = new_uploaded_io.original_filename new_template = ExamTemplate.new_with_file(new_uploaded_io.read, assignment_id: assignment.id, filename: filename, name: name) # sending flash message if saved if new_template.save flash_message(:success, t('exam_templates.create.success')) else flash_message(:error, t('exam_templates.create.failure')) end end redirect_to action: 'index' end def download assignment = Assignment.find(params[:assignment_id]) exam_template = assignment.exam_templates.find_by(id: params[:id]) # look up a specific exam template based on the params[:id] filename = exam_template.filename send_file(File.join(exam_template.base_path, filename), filename: "#{filename}", type: "application/pdf") end def update assignment = Assignment.find(params[:assignment_id]) old_exam_template = assignment.exam_templates.find_by(id: params[:id]) # updating exam template file new_uploaded_io = params[:exam_template][:new_template] unless new_uploaded_io.nil? new_template_filename = new_uploaded_io.original_filename # error checking when new_uploaded_io is not pdf if new_uploaded_io.content_type != 'application/pdf' flash_message(:error, t('exam_templates.update.failure')) else old_template_filename = old_exam_template.filename old_exam_template.replace_with_file(new_uploaded_io.read, assignment_id: assignment.id, old_filename: old_template_filename, new_filename: new_template_filename) old_exam_template.update(exam_template_params) respond_with(old_exam_template, location: assignment_exam_templates_url) return end else # updating template division if old_exam_template.update(exam_template_params) flash_message(:success, t('exam_templates.update.success')) else flash_message(:error, t('exam_templates.update.failure')) end end redirect_to action: 'index' end def generate copies = params[:numCopies].to_i index = params[:examTemplateIndex].to_i assignment = Assignment.find(params[:assignment_id]) exam_template = assignment.exam_templates.find(params[:id]) flash_message(:success, t('exam_templates.generate.generate_job_started', exam_name: exam_template.assignment.short_identifier)) current_job = exam_template.generate_copies(copies, index) respond_to do |format| format.js { render 'exam_templates/_poll_generate_job.js.erb', locals: { file_name: "#{exam_template.name}-#{index}-#{index + copies - 1}.pdf", exam_id: exam_template.id, job_id: current_job.job_id} } end end def download_generate assignment = Assignment.find(params[:assignment_id]) exam_template = assignment.exam_templates.find(params[:id]) send_file(File.join(exam_template.base_path, params[:file_name]), filename: params[:file_name], type: "application/pdf") end def split assignment = Assignment.find(params[:assignment_id]) exam_template = assignment.exam_templates.find(params[:id]) split_exam = params[:exam_template][:pdf_to_split] unless split_exam.nil? if split_exam.content_type != 'application/pdf' flash_message(:error, t('exam_templates.split.invalid')) redirect_to action: 'index' else current_job = exam_template.split_pdf(split_exam.path, split_exam.original_filename, @current_user) session[:job_id] = current_job.job_id redirect_to view_logs_assignment_exam_templates_path end else flash_message(:error, t('exam_templates.split.missing')) redirect_to action: 'index' end end def destroy assignment = Assignment.find(params[:assignment_id]) exam_template = assignment.exam_templates.find(params[:id]) if exam_template.destroy flash_message(:success, t('exam_templates.delete.success')) else flash_message(:failure, t('exam_templates.delete.failure')) end redirect_to action: 'index' end def view_logs @assignment = Assignment.find(params[:assignment_id]) @split_pdf_logs = SplitPdfLog.joins(exam_template: :assignment) .where(assignments: {id: @assignment.id}) .includes(:exam_template) .includes(:user) end def assign_errors @assignment = Assignment.find(params[:assignment_id]) @error_files = [] Dir.foreach(File.join(MarkusConfigurator.markus_exam_template_dir, @assignment.short_identifier, 'error' )) do |file| @error_files << file unless file =~ /^\.\.?$/ end @error_files = @error_files.sort end def download_error_file @assignment = Assignment.find(params[:assignment_id]) send_file(File.join(MarkusConfigurator.markus_exam_template_dir, @assignment.short_identifier, 'error', params[:file_name]), filename: params[:file_name], type: 'application/pdf') end def download_error_file_path render text: download_error_file_assignment_exam_templates_path( assignment_id: params[:assignment_id], file_name: params[:file_name], show_in_browser: true ) end def fix_error exam_template = ExamTemplate.find(params[:fix_error][:exam_template]) copy_number = params[:fix_error][:copy_number] page_number = params[:fix_error][:page_number] filename = params[:fix_error][:filename] upside_down = params[:fix_error][:upside_down] == 'true' # because params[:fix_error][:upside_down] is passed as string exam_template.fix_error(filename, copy_number, page_number, upside_down) redirect_to action: 'assign_errors' end def exam_template_params params.require(:exam_template) .permit( :name, template_divisions_attributes: [:id, :start, :end, :label, :_destroy] ) end end
class Groups::AvatarsController < Groups::BaseController include_controllers 'common/controllers/avatar' before_filter :setup protected # always enable cache, even in dev mode. def self.perform_caching; true; end def perform_caching; true; end def setup @entity = @group @success_url = groups_settings_url(@group) end end avatar typo class Groups::AvatarsController < Groups::BaseController include_controllers 'common/controllers/avatars' before_filter :setup protected # always enable cache, even in dev mode. def self.perform_caching; true; end def perform_caching; true; end def setup @entity = @group @success_url = groups_settings_url(@group) end end
class QualificationsController < ApplicationController end Add index action to qualifications controller class QualificationsController < ApplicationController def index @qualifications = Qualification.all end end
require 'bio' require 'query_analysis/upload_cuffdiff.rb' require 'query_analysis/upload_fasta_sequences.rb' require 'query_analysis/upload_trinity_with_edger.rb' require 'query_analysis/find_go_terms_for_dataset.rb' require 'query_analysis/query_diff_exp_transcripts.rb' require 'query_analysis/query_transcript_isoforms.rb' require 'query_analysis/query_diff_exp_genes.rb' require 'query_analysis/get_gene_fastas.rb' require 'query_analysis/get_transcript_fasta.rb' require 'query_analysis/query_using_blastn.rb' require 'query_analysis/query_using_tblastn.rb' require 'query_analysis/query_using_tblastx.rb' class QueryAnalysisController < ApplicationController before_filter :authenticate_user! before_filter :confirm_datasets_available, :only => [ :query_diff_exp_transcripts, :get_transcript_diff_exp_samples_for_dataset, :query_diff_exp_genes, :get_gene_diff_exp_samples_for_dataset, :query_transcript_isoforms, :get_transcript_isoforms_samples_for_dataset, :get_transcript_fasta, :get_gene_fastas, :query_using_blastn, :query_using_tblastn, :query_using_tblastx, :get_blastn_gap_costs_for_match_and_mismatch_scores, :get_tblastn_gap_costs_for_matrix ] before_filter :confirm_transcript_isoform_datasets_available, :only => [ :query_transcript_isoforms, :get_transcript_isoforms_samples_for_dataset ] before_filter :confirm_transcript_diff_exp_datasets_available, :only => [ :query_diff_exp_transcripts, :get_transcript_diff_exp_samples_for_dataset ] before_filter :confirm_gene_diff_exp_datasets_available, :only => [ :query_diff_exp_genes, :get_gene_diff_exp_samples_for_dataset ] before_filter :confirm_datasets_without_go_terms_available, :only => [:find_go_terms_for_dataset] def upload_cuffdiff if request.get? @upload_cuffdiff = UploadCuffdiff.new(current_user) @upload_cuffdiff.set_attributes_and_defaults() elsif request.post? @upload_cuffdiff = UploadCuffdiff.new(current_user) @upload_cuffdiff.set_attributes_and_defaults(params[:upload_cuffdiff]) if (@upload_cuffdiff.valid?) SuckerPunch::Queue[:upload_cuffdiff_queue].async.perform(@upload_cuffdiff) flash[:notice] = I18n.t :added_to_upload_queue, :name => @upload.dataset_name #Reset the upload cuffdiff form @upload_cuffdiff = UploadCuffdiff.new(current_user) @upload_cuffdiff.set_attributes_and_defaults() end end end def upload_fasta_sequences if request.get? @upload = UploadFastaSequences.new(current_user) @upload.set_attributes_and_defaults() elsif request.post? @upload = UploadFastaSequences.new(current_user) @upload.set_attributes_and_defaults(params[:upload_fasta_sequences]) if (@upload.valid?) SuckerPunch::Queue[:upload_fasta_sequences_queue].async.perform(@upload) flash[:notice] = I18n.t :added_to_upload_queue, :name => @upload.dataset_name #Reset the upload cuffdiff form @upload = UploadFastaSequences.new(current_user) @upload.set_attributes_and_defaults() end end end def upload_trinity_with_edger if (request.get?) @upload = UploadTrinityWithEdgeR.new(current_user) @upload.set_attributes_and_defaults() elsif (request.post?) @upload = UploadTrinityWithEdgeR.new(current_user) upload_params = params[:upload_trinity_with_edge_r] @upload.set_attributes_and_defaults(upload_params) if @upload.valid? queue_name = :upload_trinity_with_edger_queue SuckerPunch::Queue[queue_name].async.perform(@upload) flash[:notice] = I18n.t :added_to_upload_queue, :name => @upload.dataset_name #Reset the upload form @upload = UploadTrinityWithEdgeR.new(current_user) @upload.set_attributes_and_defaults() end end end def add_sample_cmp_for_trinity_with_edger_transcripts render :partial => 'trinity_with_edger_transcripts_sample_cmp' end def add_sample_cmp_for_trinity_with_edger_genes render :partial => 'trinity_with_edger_genes_sample_cmp' end def find_go_terms_for_dataset if request.get? @finder = FindGoTermsForDataset.new(current_user) @finder.set_attributes_and_defaults() elsif request.post? @finder = FindGoTermsForDataset.new(current_user) @finder.set_attributes_and_defaults(params[:find_go_terms_for_dataset]) if @finder.valid? queue_name = :find_go_terms_for_dataset_queue SuckerPunch::Queue[queue_name].async.perform(@finder) #Make sure the dataset is set to in-progress before the page reloads @dataset = Dataset.find_by_id(@finder.dataset_id) @dataset.go_terms_status = 'in-progress' @dataset.save! #Make sure there are still datasets left to display if current_user.datasets.where(:go_terms_status => 'not-started').empty? @datasets_in_progress = current_user.datasets.where(:go_terms_status => 'in-progress') render :no_datasets_without_go_terms else #Reset the form @finder = FindGoTermsForDataset.new(current_user) @finder.set_attributes_and_defaults() end flash[:notice] = I18n.t :added_to_go_terms_queue, :name => @dataset.name end end end def query_diff_exp_transcripts #Create the view model, giving the current user @qdet = QueryDiffExpTranscripts.new(current_user) #Which type of request was received? if request.get? @qdet.set_attributes_and_defaults() elsif request.post? #Fill in the inputs from the view @qdet.set_attributes_and_defaults(params[:query_diff_exp_transcripts]) # If valid, query and return results; otherwise return failure @qdet.query() if @qdet.valid? render :partial => 'query_diff_exp_transcripts_table_rows', :locals => {:object => @qdet} end end def get_transcript_diff_exp_samples_for_dataset @qdet = QueryDiffExpTranscripts.new(current_user) dataset_id = params[:dataset_id] @qdet.set_attributes_and_defaults({:dataset_id => dataset_id}) if @qdet.valid? render :partial => 'diff_exp_samples_for_dataset', :locals => {:object => @qdet} else render :no_datasets end end def get_query_diff_exp_transcripts_header_row @qdet = QueryDiffExpTranscripts.new(current_user) @qdet.set_attributes_and_defaults(params[:query_diff_exp_transcripts]) if @qdet.valid? render :partial => 'query_diff_exp_transcripts_header_row', :locals => {:object => @qdet} end end def get_transcript_fasta #Create/fill in the view model get_transcript_fasta = GetTranscriptFasta.new(current_user) get_transcript_fasta.set_attributes(params) #Output based on whether the view model is valid if get_transcript_fasta.valid? render :text => get_transcript_fasta.query, :content_type => 'text/plain' else error_messages_string = "Error(s) found:\n" get_transcript_fasta.errors.full_messages.each do |error_msg| error_messages_string += "#{error_msg}\n" end render :text => error_messages_string, :content_type => 'text/plain' end end def get_gene_fastas #Create/fill in the view model get_gene_fastas = GetGeneFastas.new(current_user) get_gene_fastas.set_attributes(params) #Output based on whether the view model is valid if get_gene_fastas.valid? render :text => get_gene_fastas.query, :content_type => 'text/plain' else error_messages_string = "Error(s) found:\n" get_gene_fastas.errors.full_messages.each do |error_msg| error_messages_string += "#{error_msg}\n" end render :text => error_messages_string, :content_type => 'text/plain' end end def get_if_dataset_has_go_terms dataset = Dataset.find_by_id(params[:dataset_id]) if dataset.user != current_user msg = "not authorized" else if dataset.go_terms_status == 'found' msg = true else msg = false end end render :text => msg, :content_type => 'text/plain' end def query_diff_exp_genes #Create the view model, giving the current user @qdeg = QueryDiffExpGenes.new(current_user) #Which type of request was received? if request.get? @qdeg.set_attributes_and_defaults() elsif request.post? #Fill in the inputs from the view @qdeg.set_attributes_and_defaults(params[:query_diff_exp_genes]) # If valid, query and return results; otherwise return failure @qdeg.query() if @qdeg.valid? render :partial => 'query_diff_exp_genes_table_rows', :locals => {:object => @qdeg} end end def get_gene_diff_exp_samples_for_dataset @qdeg = QueryDiffExpGenes.new(current_user) dataset_id = params[:dataset_id] @qdeg.set_attributes_and_defaults({:dataset_id => dataset_id}) if @qdeg.valid? render :partial => 'diff_exp_samples_for_dataset', :locals => {:object => @qdeg} end end def get_query_diff_exp_genes_header_row @qdeg = QueryDiffExpGenes.new(current_user) @qdeg.set_attributes_and_defaults(params[:query_diff_exp_genes]) if @qdeg.valid? render :partial => 'query_diff_exp_genes_header_row', :locals => {:object => @qdeg} end end def query_transcript_isoforms #Create the view model, giving the current user @qti = QueryTranscriptIsoforms.new(current_user) #Which type of request was received? if request.get? @qti.set_attributes_and_defaults() elsif request.post? #Fill in the inputs from the view @qti.set_attributes_and_defaults(params[:query_transcript_isoforms]) # If valid, query and return results; otherwise return failure @qti.query() if @qti.valid? render :partial => 'query_transcript_isoforms_table_rows', :locals => {:object => @qti} end end def get_transcript_isoforms_samples_for_dataset @qti = QueryTranscriptIsoforms.new(current_user) dataset_id = params[:dataset_id] @qti.set_attributes_and_defaults(:dataset_id => dataset_id) if @qti.valid? render :partial => 'transcript_isoforms_samples_for_dataset', :locals => {:object => @qti} end end def get_query_transcript_isoforms_header_row @qti = QueryTranscriptIsoforms.new(current_user) @qti.set_attributes_and_defaults(params[:query_transcript_isoforms]) if @qti.valid? render :partial => 'query_transcript_isoforms_header_row', :locals => {:object => @qti} end end def query_using_blastn #Changed after architecture design @sequence_type = 'nucleic acid' if request.get? @query_using_blastn = QueryUsingBlastn.new(current_user) @query_using_blastn.set_attributes_and_defaults() elsif request.post? @query_using_blastn = QueryUsingBlastn.new(current_user) @query_using_blastn.set_attributes_and_defaults(params[:query_using_blastn]) if @query_using_blastn.valid? @program = :blastn @blast_report = @query_using_blastn.blast #Send the result to the user @dataset = Dataset.find_by_id(@query_using_blastn.dataset_id) render :file => 'query_analysis/blast_results' end end end def get_blastn_gap_costs_for_match_and_mismatch_scores #Calculate the new gap costs from the match and mismatch scores @query_using_blastn = QueryUsingBlastn.new(current_user) match_and_mismatch_scores = params[:match_and_mismatch_scores] @query_using_blastn.set_attributes_and_defaults( :match_and_mismatch_scores => match_and_mismatch_scores ) #Render the new gap costs render :partial => 'gap_costs', :locals => {:object => @query_using_blastn} end def query_using_tblastn #changed after the architecture design @sequence_type = 'amino acid' if request.get? @query_using_tblastn = QueryUsingTblastn.new(current_user) @query_using_tblastn.set_attributes_and_defaults() elsif request.post? @query_using_tblastn = QueryUsingTblastn.new(current_user) @query_using_tblastn.set_attributes_and_defaults(params[:query_using_tblastn]) if @query_using_tblastn.valid? @program = :tblastn #Run the blast query and get the file path of the result @blast_report = @query_using_tblastn.blast() #Send the result to the user @dataset = Dataset.find_by_id(@query_using_tblastn.dataset_id) render :file => 'query_analysis/blast_results' end end end def get_tblastn_gap_costs_for_matrix #Calculate the new gap costs from the match and mismatch scores @query_using_blastn = QueryUsingTblastn.new(current_user) matrix = params[:matrix] @query_using_blastn.set_attributes_and_defaults(:matrix => matrix) #Render the new gap costs render :partial => 'gap_costs', :locals => {:object => @query_using_blastn} end def query_using_tblastx #changed after the architecture design @sequence_type = 'nucleic acid' if request.get? @query_using_tblastx = QueryUsingTblastx.new(current_user) @query_using_tblastx.set_attributes_and_defaults() elsif request.post? @query_using_tblastx = QueryUsingTblastx.new(current_user) @query_using_tblastx.set_attributes_and_defaults(params[:query_using_tblastx]) if @query_using_tblastx.valid? @program = :tblastx #Run the blast query and get the file path of the result @blast_report = @query_using_tblastx.blast() #Send the result to the user @dataset = Dataset.find_by_id(@query_using_tblastx.dataset_id) render :file => 'query_analysis/blast_results' end end end private def confirm_datasets_available if current_user.datasets.where(:finished_uploading => true).empty? render :no_datasets end end def confirm_transcript_isoform_datasets_available if current_user.datasets.where(:has_transcript_isoforms => true, :finished_uploading => true).empty? render :no_transcript_isoforms end end def confirm_transcript_diff_exp_datasets_available if current_user.datasets.where(:has_transcript_diff_exp => true, :finished_uploading => true).empty? render :no_diff_exp_transcripts end end def confirm_gene_diff_exp_datasets_available if current_user.datasets.where(:has_gene_diff_exp => true, :finished_uploading => true).empty? render :no_diff_exp_genes end end def confirm_datasets_without_go_terms_available if current_user.datasets.where(:go_terms_status => 'not-started', :finished_uploading => true).empty? @datasets_in_progress = current_user.datasets.where(:go_terms_status => 'in-progress', :finished_uploading => true) render :no_datasets_without_go_terms end end end Fixed a small bug in the query analysis controller. require 'bio' require 'query_analysis/upload_cuffdiff.rb' require 'query_analysis/upload_fasta_sequences.rb' require 'query_analysis/upload_trinity_with_edger.rb' require 'query_analysis/find_go_terms_for_dataset.rb' require 'query_analysis/query_diff_exp_transcripts.rb' require 'query_analysis/query_transcript_isoforms.rb' require 'query_analysis/query_diff_exp_genes.rb' require 'query_analysis/get_gene_fastas.rb' require 'query_analysis/get_transcript_fasta.rb' require 'query_analysis/query_using_blastn.rb' require 'query_analysis/query_using_tblastn.rb' require 'query_analysis/query_using_tblastx.rb' class QueryAnalysisController < ApplicationController before_filter :authenticate_user! before_filter :confirm_datasets_available, :only => [ :query_diff_exp_transcripts, :get_transcript_diff_exp_samples_for_dataset, :query_diff_exp_genes, :get_gene_diff_exp_samples_for_dataset, :query_transcript_isoforms, :get_transcript_isoforms_samples_for_dataset, :get_transcript_fasta, :get_gene_fastas, :query_using_blastn, :query_using_tblastn, :query_using_tblastx, :get_blastn_gap_costs_for_match_and_mismatch_scores, :get_tblastn_gap_costs_for_matrix ] before_filter :confirm_transcript_isoform_datasets_available, :only => [ :query_transcript_isoforms, :get_transcript_isoforms_samples_for_dataset ] before_filter :confirm_transcript_diff_exp_datasets_available, :only => [ :query_diff_exp_transcripts, :get_transcript_diff_exp_samples_for_dataset ] before_filter :confirm_gene_diff_exp_datasets_available, :only => [ :query_diff_exp_genes, :get_gene_diff_exp_samples_for_dataset ] before_filter :confirm_datasets_without_go_terms_available, :only => [:find_go_terms_for_dataset] def upload_cuffdiff if request.get? @upload_cuffdiff = UploadCuffdiff.new(current_user) @upload_cuffdiff.set_attributes_and_defaults() elsif request.post? @upload_cuffdiff = UploadCuffdiff.new(current_user) @upload_cuffdiff.set_attributes_and_defaults(params[:upload_cuffdiff]) if (@upload_cuffdiff.valid?) SuckerPunch::Queue[:upload_cuffdiff_queue].async.perform(@upload_cuffdiff) flash[:notice] = I18n.t :added_to_upload_queue, :name => @upload_cuffdiff.dataset_name #Reset the upload cuffdiff form @upload_cuffdiff = UploadCuffdiff.new(current_user) @upload_cuffdiff.set_attributes_and_defaults() end end end def upload_fasta_sequences if request.get? @upload = UploadFastaSequences.new(current_user) @upload.set_attributes_and_defaults() elsif request.post? @upload = UploadFastaSequences.new(current_user) @upload.set_attributes_and_defaults(params[:upload_fasta_sequences]) if (@upload.valid?) SuckerPunch::Queue[:upload_fasta_sequences_queue].async.perform(@upload) flash[:notice] = I18n.t :added_to_upload_queue, :name => @upload.dataset_name #Reset the upload cuffdiff form @upload = UploadFastaSequences.new(current_user) @upload.set_attributes_and_defaults() end end end def upload_trinity_with_edger if (request.get?) @upload = UploadTrinityWithEdgeR.new(current_user) @upload.set_attributes_and_defaults() elsif (request.post?) @upload = UploadTrinityWithEdgeR.new(current_user) upload_params = params[:upload_trinity_with_edge_r] @upload.set_attributes_and_defaults(upload_params) if @upload.valid? queue_name = :upload_trinity_with_edger_queue SuckerPunch::Queue[queue_name].async.perform(@upload) flash[:notice] = I18n.t :added_to_upload_queue, :name => @upload.dataset_name #Reset the upload form @upload = UploadTrinityWithEdgeR.new(current_user) @upload.set_attributes_and_defaults() end end end def add_sample_cmp_for_trinity_with_edger_transcripts render :partial => 'trinity_with_edger_transcripts_sample_cmp' end def add_sample_cmp_for_trinity_with_edger_genes render :partial => 'trinity_with_edger_genes_sample_cmp' end def find_go_terms_for_dataset if request.get? @finder = FindGoTermsForDataset.new(current_user) @finder.set_attributes_and_defaults() elsif request.post? @finder = FindGoTermsForDataset.new(current_user) @finder.set_attributes_and_defaults(params[:find_go_terms_for_dataset]) if @finder.valid? queue_name = :find_go_terms_for_dataset_queue SuckerPunch::Queue[queue_name].async.perform(@finder) #Make sure the dataset is set to in-progress before the page reloads @dataset = Dataset.find_by_id(@finder.dataset_id) @dataset.go_terms_status = 'in-progress' @dataset.save! #Make sure there are still datasets left to display if current_user.datasets.where(:go_terms_status => 'not-started').empty? @datasets_in_progress = current_user.datasets.where(:go_terms_status => 'in-progress') render :no_datasets_without_go_terms else #Reset the form @finder = FindGoTermsForDataset.new(current_user) @finder.set_attributes_and_defaults() end flash[:notice] = I18n.t :added_to_go_terms_queue, :name => @dataset.name end end end def query_diff_exp_transcripts #Create the view model, giving the current user @qdet = QueryDiffExpTranscripts.new(current_user) #Which type of request was received? if request.get? @qdet.set_attributes_and_defaults() elsif request.post? #Fill in the inputs from the view @qdet.set_attributes_and_defaults(params[:query_diff_exp_transcripts]) # If valid, query and return results; otherwise return failure @qdet.query() if @qdet.valid? render :partial => 'query_diff_exp_transcripts_table_rows', :locals => {:object => @qdet} end end def get_transcript_diff_exp_samples_for_dataset @qdet = QueryDiffExpTranscripts.new(current_user) dataset_id = params[:dataset_id] @qdet.set_attributes_and_defaults({:dataset_id => dataset_id}) if @qdet.valid? render :partial => 'diff_exp_samples_for_dataset', :locals => {:object => @qdet} else render :no_datasets end end def get_query_diff_exp_transcripts_header_row @qdet = QueryDiffExpTranscripts.new(current_user) @qdet.set_attributes_and_defaults(params[:query_diff_exp_transcripts]) if @qdet.valid? render :partial => 'query_diff_exp_transcripts_header_row', :locals => {:object => @qdet} end end def get_transcript_fasta #Create/fill in the view model get_transcript_fasta = GetTranscriptFasta.new(current_user) get_transcript_fasta.set_attributes(params) #Output based on whether the view model is valid if get_transcript_fasta.valid? render :text => get_transcript_fasta.query, :content_type => 'text/plain' else error_messages_string = "Error(s) found:\n" get_transcript_fasta.errors.full_messages.each do |error_msg| error_messages_string += "#{error_msg}\n" end render :text => error_messages_string, :content_type => 'text/plain' end end def get_gene_fastas #Create/fill in the view model get_gene_fastas = GetGeneFastas.new(current_user) get_gene_fastas.set_attributes(params) #Output based on whether the view model is valid if get_gene_fastas.valid? render :text => get_gene_fastas.query, :content_type => 'text/plain' else error_messages_string = "Error(s) found:\n" get_gene_fastas.errors.full_messages.each do |error_msg| error_messages_string += "#{error_msg}\n" end render :text => error_messages_string, :content_type => 'text/plain' end end def get_if_dataset_has_go_terms dataset = Dataset.find_by_id(params[:dataset_id]) if dataset.user != current_user msg = "not authorized" else if dataset.go_terms_status == 'found' msg = true else msg = false end end render :text => msg, :content_type => 'text/plain' end def query_diff_exp_genes #Create the view model, giving the current user @qdeg = QueryDiffExpGenes.new(current_user) #Which type of request was received? if request.get? @qdeg.set_attributes_and_defaults() elsif request.post? #Fill in the inputs from the view @qdeg.set_attributes_and_defaults(params[:query_diff_exp_genes]) # If valid, query and return results; otherwise return failure @qdeg.query() if @qdeg.valid? render :partial => 'query_diff_exp_genes_table_rows', :locals => {:object => @qdeg} end end def get_gene_diff_exp_samples_for_dataset @qdeg = QueryDiffExpGenes.new(current_user) dataset_id = params[:dataset_id] @qdeg.set_attributes_and_defaults({:dataset_id => dataset_id}) if @qdeg.valid? render :partial => 'diff_exp_samples_for_dataset', :locals => {:object => @qdeg} end end def get_query_diff_exp_genes_header_row @qdeg = QueryDiffExpGenes.new(current_user) @qdeg.set_attributes_and_defaults(params[:query_diff_exp_genes]) if @qdeg.valid? render :partial => 'query_diff_exp_genes_header_row', :locals => {:object => @qdeg} end end def query_transcript_isoforms #Create the view model, giving the current user @qti = QueryTranscriptIsoforms.new(current_user) #Which type of request was received? if request.get? @qti.set_attributes_and_defaults() elsif request.post? #Fill in the inputs from the view @qti.set_attributes_and_defaults(params[:query_transcript_isoforms]) # If valid, query and return results; otherwise return failure @qti.query() if @qti.valid? render :partial => 'query_transcript_isoforms_table_rows', :locals => {:object => @qti} end end def get_transcript_isoforms_samples_for_dataset @qti = QueryTranscriptIsoforms.new(current_user) dataset_id = params[:dataset_id] @qti.set_attributes_and_defaults(:dataset_id => dataset_id) if @qti.valid? render :partial => 'transcript_isoforms_samples_for_dataset', :locals => {:object => @qti} end end def get_query_transcript_isoforms_header_row @qti = QueryTranscriptIsoforms.new(current_user) @qti.set_attributes_and_defaults(params[:query_transcript_isoforms]) if @qti.valid? render :partial => 'query_transcript_isoforms_header_row', :locals => {:object => @qti} end end def query_using_blastn #Changed after architecture design @sequence_type = 'nucleic acid' if request.get? @query_using_blastn = QueryUsingBlastn.new(current_user) @query_using_blastn.set_attributes_and_defaults() elsif request.post? @query_using_blastn = QueryUsingBlastn.new(current_user) @query_using_blastn.set_attributes_and_defaults(params[:query_using_blastn]) if @query_using_blastn.valid? @program = :blastn @blast_report = @query_using_blastn.blast #Send the result to the user @dataset = Dataset.find_by_id(@query_using_blastn.dataset_id) render :file => 'query_analysis/blast_results' end end end def get_blastn_gap_costs_for_match_and_mismatch_scores #Calculate the new gap costs from the match and mismatch scores @query_using_blastn = QueryUsingBlastn.new(current_user) match_and_mismatch_scores = params[:match_and_mismatch_scores] @query_using_blastn.set_attributes_and_defaults( :match_and_mismatch_scores => match_and_mismatch_scores ) #Render the new gap costs render :partial => 'gap_costs', :locals => {:object => @query_using_blastn} end def query_using_tblastn #changed after the architecture design @sequence_type = 'amino acid' if request.get? @query_using_tblastn = QueryUsingTblastn.new(current_user) @query_using_tblastn.set_attributes_and_defaults() elsif request.post? @query_using_tblastn = QueryUsingTblastn.new(current_user) @query_using_tblastn.set_attributes_and_defaults(params[:query_using_tblastn]) if @query_using_tblastn.valid? @program = :tblastn #Run the blast query and get the file path of the result @blast_report = @query_using_tblastn.blast() #Send the result to the user @dataset = Dataset.find_by_id(@query_using_tblastn.dataset_id) render :file => 'query_analysis/blast_results' end end end def get_tblastn_gap_costs_for_matrix #Calculate the new gap costs from the match and mismatch scores @query_using_blastn = QueryUsingTblastn.new(current_user) matrix = params[:matrix] @query_using_blastn.set_attributes_and_defaults(:matrix => matrix) #Render the new gap costs render :partial => 'gap_costs', :locals => {:object => @query_using_blastn} end def query_using_tblastx #changed after the architecture design @sequence_type = 'nucleic acid' if request.get? @query_using_tblastx = QueryUsingTblastx.new(current_user) @query_using_tblastx.set_attributes_and_defaults() elsif request.post? @query_using_tblastx = QueryUsingTblastx.new(current_user) @query_using_tblastx.set_attributes_and_defaults(params[:query_using_tblastx]) if @query_using_tblastx.valid? @program = :tblastx #Run the blast query and get the file path of the result @blast_report = @query_using_tblastx.blast() #Send the result to the user @dataset = Dataset.find_by_id(@query_using_tblastx.dataset_id) render :file => 'query_analysis/blast_results' end end end private def confirm_datasets_available if current_user.datasets.where(:finished_uploading => true).empty? render :no_datasets end end def confirm_transcript_isoform_datasets_available if current_user.datasets.where(:has_transcript_isoforms => true, :finished_uploading => true).empty? render :no_transcript_isoforms end end def confirm_transcript_diff_exp_datasets_available if current_user.datasets.where(:has_transcript_diff_exp => true, :finished_uploading => true).empty? render :no_diff_exp_transcripts end end def confirm_gene_diff_exp_datasets_available if current_user.datasets.where(:has_gene_diff_exp => true, :finished_uploading => true).empty? render :no_diff_exp_genes end end def confirm_datasets_without_go_terms_available if current_user.datasets.where(:go_terms_status => 'not-started', :finished_uploading => true).empty? @datasets_in_progress = current_user.datasets.where(:go_terms_status => 'in-progress', :finished_uploading => true) render :no_datasets_without_go_terms end end end
class QueryDatasetsController < ApplicationController before_action :logged_in_user, only: [:index, :destroy] before_action :correct_user_or_admin, only: [:show, :destroy, :result] def index # Find query datasets qd = list_query_datasets # Paginate cur_page = (params[:page] || 1).to_i per_page = (params[:per_page] || 30).to_i @query_datasets = WillPaginate::Collection.create(cur_page, per_page, qd.size) do |pager| start = (cur_page-1)*per_page pager.replace(qd[start, per_page]) end end def new @project ||= Project.find_by(id: params[:project_id]) if @project.nil? redirect_to projects_path else @query_dataset = QueryDataset.new end render "new" end def show @query_dataset = QueryDataset.find(params[:id]) @query_dataset.complete_seen! end def create @project = Project.find_by(id: query_dataset_params[:project_id]) params[:query_dataset][:name]+= "_"+SecureRandom.hex(4) if current_user.nil? if @project.nil? redirect_to root_url elsif params[:query_dataset][:name] =~ /[^A-Za-z0-9_]/ flash[:danger] = "Invalid name, please use only alphanumerics and " + "underscores." new elsif QueryDataset.by_user_and_project(current_user, @project). find_by(name: params[:query_dataset][:name]).nil? @query_dataset = @project.query_datasets.create(query_dataset_params) flash[:success] = "It's saved" if @query_dataset.save! if @query_dataset.save and not @query_dataset.miga.nil? [:description,:comments,:type].each do |k| @query_dataset.miga.metadata[k] = params[k] unless params[k].nil? or params[k].empty? end @query_dataset.miga.save flash[:success] = "Query dataset created." redirect_to @query_dataset else params[:project_id] = query_dataset_params[:project_id] flash[:danger] = "Query dataset couldn't be saved." new end else flash[:danger] = "Name already exists, please use a different name." new end end def destroy qd = QueryDataset.find(params[:id]) p = qd.project p.miga.unlink_dataset qd.miga.name qd.miga.remove! qd.destroy redirect_to p end def result qd = QueryDataset.find(params[:id]) m = qd.miga res = m.result(params[:result]) unless res.nil? abs_path = res.file_path(params[:file]) if Dir.exists?(abs_path) and params[:f] and not params[:f]=~/\// abs_path = File.expand_path(params[:f], abs_path) end if Dir.exists? abs_path @path = abs_path @file = File.basename abs_path @res = res render template: "shared/result_dir" else type = case File.extname(abs_path) when ".pdf" ; "application/pdf" when ".html" ; "text/html" else ; "raw/text" end send_file(abs_path, filename: File.basename(abs_path), disposition: "inline", type: type, x_sendfile: true) end return end render :nothing => true, :status => 200, :content_type => "text/html" end # Execute the MyTaxa Scan step upon request. def run_mytaxa_scan @query_dataset = QueryDataset.find(params[:id]) @query_dataset.run_mytaxa_scan! redirect_to(@query_dataset) end # Re-calculate the Distances step upon request. def run_distances @query_dataset = QueryDataset.find(params[:id]) @query_dataset.run_distances! redirect_to(@query_dataset) end private def query_dataset_params params.require(:query_dataset).permit( :name, :user_id, :project_id, :input_file, :input_file_2, :input_type) end def list_query_datasets if params[:project_id] @project = Project.find(params[:project_id]) qd = params[:all] ? @project.query_datasets.all : QueryDataset.by_user_and_project(current_user, @project) else @project = nil qd = params[:all] ? QueryDataset.all : current_user.query_datasets end if params[:complete_new] qd = qd.select{ |i| i.complete_new } end @all_qd = qd.count params[:ready] ||= false if params[:ready]=="yes" qd = qd.select{ |i| i.ready? } @ready_qd = qd.count @running_qd = @all_qd - @ready_qd elsif params[:ready]=="no" qd = qd.select{ |i| not i.ready? } @running_qd = qd.count @ready_qd = @all_qd - @running_qd else @ready_qd = qd.select{ |i| i.ready? }.count @running_qd = @all_qd - @ready_qd end qd end # Confirms the correct user def correct_user_or_admin @user = QueryDataset.find(params[:id]).user return true if current_user.nil? and @user.nil? redirect_to(root_url) if current_user.nil? or not( current_user?(@user) or current_user.admin? ) end end Minor permissions issue solved Close #11 class QueryDatasetsController < ApplicationController before_action :logged_in_user, only: [:index, :destroy] before_action :correct_user_or_admin, only: [:show, :destroy, :result] def index # Find query datasets qd = list_query_datasets # Paginate cur_page = (params[:page] || 1).to_i per_page = (params[:per_page] || 30).to_i @query_datasets = WillPaginate::Collection.create(cur_page, per_page, qd.size) do |pager| start = (cur_page-1)*per_page pager.replace(qd[start, per_page]) end end def new @project ||= Project.find_by(id: params[:project_id]) if @project.nil? redirect_to projects_path else @query_dataset = QueryDataset.new end render "new" end def show @query_dataset = QueryDataset.find(params[:id]) @query_dataset.complete_seen! end def create @project = Project.find_by(id: query_dataset_params[:project_id]) params[:query_dataset][:name]+= "_"+SecureRandom.hex(4) if current_user.nil? if @project.nil? redirect_to root_url elsif params[:query_dataset][:name] =~ /[^A-Za-z0-9_]/ flash[:danger] = "Invalid name, please use only alphanumerics and " + "underscores." new elsif QueryDataset.by_user_and_project(current_user, @project). find_by(name: params[:query_dataset][:name]).nil? @query_dataset = @project.query_datasets.create(query_dataset_params) flash[:success] = "It's saved" if @query_dataset.save! if @query_dataset.save and not @query_dataset.miga.nil? [:description,:comments,:type].each do |k| @query_dataset.miga.metadata[k] = params[k] unless params[k].nil? or params[k].empty? end @query_dataset.miga.save flash[:success] = "Query dataset created." redirect_to @query_dataset else params[:project_id] = query_dataset_params[:project_id] flash[:danger] = "Query dataset couldn't be saved." new end else flash[:danger] = "Name already exists, please use a different name." new end end def destroy qd = QueryDataset.find(params[:id]) p = qd.project p.miga.unlink_dataset qd.miga.name qd.miga.remove! qd.destroy redirect_to p end def result qd = QueryDataset.find(params[:id]) m = qd.miga res = m.result(params[:result]) unless res.nil? abs_path = res.file_path(params[:file]) if Dir.exists?(abs_path) and params[:f] and not params[:f]=~/\// abs_path = File.expand_path(params[:f], abs_path) end if Dir.exists? abs_path @path = abs_path @file = File.basename abs_path @res = res render template: "shared/result_dir" else type = case File.extname(abs_path) when ".pdf" ; "application/pdf" when ".html" ; "text/html" else ; "raw/text" end send_file(abs_path, filename: File.basename(abs_path), disposition: "inline", type: type, x_sendfile: true) end return end render :nothing => true, :status => 200, :content_type => "text/html" end # Execute the MyTaxa Scan step upon request. def run_mytaxa_scan @query_dataset = QueryDataset.find(params[:id]) @query_dataset.run_mytaxa_scan! redirect_to(@query_dataset) end # Re-calculate the Distances step upon request. def run_distances @query_dataset = QueryDataset.find(params[:id]) @query_dataset.run_distances! redirect_to(@query_dataset) end private def query_dataset_params params.require(:query_dataset).permit( :name, :user_id, :project_id, :input_file, :input_file_2, :input_type) end def list_query_datasets if params[:project_id] @project = Project.find(params[:project_id]) qd = params[:all] ? @project.query_datasets.all : QueryDataset.by_user_and_project(current_user, @project) else @project = nil qd = params[:all] ? QueryDataset.all : current_user.query_datasets end if params[:complete_new] qd = qd.select{ |i| i.complete_new } end @all_qd = qd.count params[:ready] ||= false if params[:ready]=="yes" qd = qd.select{ |i| i.ready? } @ready_qd = qd.count @running_qd = @all_qd - @ready_qd elsif params[:ready]=="no" qd = qd.select{ |i| not i.ready? } @running_qd = qd.count @ready_qd = @all_qd - @running_qd else @ready_qd = qd.select{ |i| i.ready? }.count @running_qd = @all_qd - @ready_qd end qd end # Confirms the correct user def correct_user_or_admin @user = QueryDataset.find(params[:id]).user return true if @user.nil? redirect_to(root_url) if current_user.nil? or not( current_user?(@user) or current_user.admin? ) end end
class QuestionnairesController < ApplicationController # Controller for Questionnaire objects # A Questionnaire can be of several types (QuestionnaireType) # Each Questionnaire contains zero or more questions (Question) # Generally a questionnaire is associated with an assignment (Assignment) before_action :authorize def action_allowed? ['Super-Administrator', 'Administrator', 'Instructor', 'Teaching Assistant', 'Student'].include? current_role_name end # Create a clone of the given questionnaire, copying all associated # questions. The name and creator are updated. def copy orig_questionnaire = Questionnaire.find(params[:id]) questions = Question.where(questionnaire_id: params[:id]) @questionnaire = orig_questionnaire.dup @questionnaire.instructor_id = session[:user].instructor_id ## Why was TA-specific code removed here? See Project E713. copy_questionnaire_details(questions, orig_questionnaire) end def view @questionnaire = Questionnaire.find(params[:id]) end def show @questionnaire = Questionnaire.find(params[:id]) end # Define a new questionnaire def new if Questionnaire::QUESTIONNAIRE_TYPES.include? params[:model] @questionnaire = Object.const_get(params[:model].split.join).new end end def create questionnaire_private = params[:questionnaire][:private] == "true" ? true : false display_type = params[:questionnaire][:type].split('Questionnaire')[0] if Questionnaire::QUESTIONNAIRE_TYPES.include? params[:model] @questionnaire = Object.const_get(params[:questionnaire][:type]).new end begin @questionnaire.private = questionnaire_private @questionnaire.name = params[:questionnaire][:name] @questionnaire.instructor_id = session[:user].id @questionnaire.min_question_score = params[:questionnaire][:min_question_score] @questionnaire.max_question_score = params[:questionnaire][:max_question_score] @questionnaire.type = params[:questionnaire][:type] # Zhewei: Right now, the display_type in 'questionnaires' table and name in 'tree_folders' table are not consistent. # In the future, we need to write migration files to make them consistency. case display_type when 'AuthorFeedback' display_type = 'Author%Feedback' when 'CourseEvaluation' display_type = 'Course%Evaluation' when 'TeammateReview' display_type = 'Teammate%Review' when 'GlobalSurvey' display_type = 'Global%Survey' end @questionnaire.display_type = display_type @questionnaire.instruction_loc = Questionnaire::DEFAULT_QUESTIONNAIRE_URL @questionnaire.save # Create node tree_folder = TreeFolder.where(['name like ?', @questionnaire.display_type]).first parent = FolderNode.find_by_node_object_id(tree_folder.id) QuestionnaireNode.create(parent_id: parent.id, node_object_id: @questionnaire.id, type: 'QuestionnaireNode') flash[:success] = 'You have successfully created a questionnaire!' rescue flash[:error] = $ERROR_INFO end redirect_to controller: 'questionnaires', action: 'edit', id: @questionnaire.id end def create_questionnaire @questionnaire = Object.const_get(params[:questionnaire][:type]).new(questionnaire_params) # TODO: check for Quiz Questionnaire? if @questionnaire.type == "QuizQuestionnaire" # checking if it is a quiz questionnaire participant_id = params[:pid] # creating a local variable to send as parameter to submitted content if it is a quiz questionnaire @questionnaire.min_question_score = 0 @questionnaire.max_question_score = 1 @assignment = Assignment.find(params[:aid]) author_team = AssignmentTeam.team(Participant.find(participant_id)) @questionnaire.instructor_id = author_team.id # for a team assignment, set the instructor id to the team_id @successful_create = true save save_choices @questionnaire.id if @successful_create == true flash[:note] = "The quiz was successfully created." end redirect_to controller: 'submitted_content', action: 'edit', id: participant_id else # if it is not a quiz questionnaire if session[:user].role.name == "Teaching Assistant" @questionnaire.instructor_id = Ta.get_my_instructor(session[:user].id) end save redirect_to controller: 'tree_display', action: 'list' end end # Edit a questionnaire def edit @questionnaire = Questionnaire.find(params[:id]) redirect_to Questionnaire if @questionnaire.nil? end def update @questionnaire = Questionnaire.find(params[:id]) begin @questionnaire.update_attributes(questionnaire_params) flash[:success] = 'The questionnaire has been successfully updated!' rescue flash[:error] = $ERROR_INFO end redirect_to edit_questionnaire_path(@questionnaire.id.to_s.to_sym) end # Remove a given questionnaire def delete @questionnaire = Questionnaire.find(params[:id]) if @questionnaire begin name = @questionnaire.name # if this rubric is used by some assignment, flash error @questionnaire.assignments.each do |assignment| raise "The assignment #{assignment.name} uses this questionnaire. Are sure you want to <A href='../assignment/delete/#{assignment.id}'>delete</A> the assignment?" end questions = @questionnaire.questions # if this rubric had some answers, flash error questions.each do |question| unless question.answers.empty? raise "There are responses based on this rubric, we suggest you do not delete it." end end questions.each do |question| advices = question.question_advices advices.each(&:delete) question.delete end questionnaire_node = @questionnaire.questionnaire_node questionnaire_node.delete @questionnaire.delete undo_link("The questionnaire \"#{name}\" has been successfully deleted.") rescue flash[:error] = $ERROR_INFO end end redirect_to action: 'list', controller: 'tree_display' end def edit_advice # #Code used to be in this class, was removed. I have not checked the other class. redirect_to controller: 'advice', action: 'edit_advice' end def save_advice begin for advice_key in params[:advice].keys QuestionAdvice.update(advice_key, params[:advice][advice_key]) end flash[:notice] = "The questionnaire's question advice was successfully saved." # redirect_to :action => 'list' redirect_to controller: 'advice', action: 'save_advice' rescue flash[:error] = $ERROR_INFO end end # Toggle the access permission for this assignment from public to private, or vice versa def toggle_access @questionnaire = Questionnaire.find(params[:id]) @questionnaire.private = !@questionnaire.private @questionnaire.save @access = @questionnaire.private == true ? "private" : "public" undo_link("teh questionnaire \"#{@questionnaire.name}\" has been successfully made #{@access}. ") redirect_to controller: 'tree_display', action: 'list' end # Zhewei: This method is used to add new questions when editing questionnaire. def add_new_questions questionnaire_id = params[:id] unless params[:id].nil? num_of_existed_questions = Questionnaire.find(questionnaire_id).questions.size ((num_of_existed_questions + 1)..(num_of_existed_questions + params[:question][:total_num].to_i)).each do |i| question = Object.const_get(params[:question][:type]).create(txt: '', questionnaire_id: questionnaire_id, seq: i, type: params[:question][:type], break_before: true) if question.is_a? ScoredQuestion question.weight = 1 question.max_label = 'Strong agree' question.min_label = 'Strong disagree' end question.size = '50,3' if question.is_a? Criterion question.alternatives = '0|1|2|3|4|5' if question.is_a? Dropdown question.size = '60,5' if question.is_a? TextResponse begin question.save rescue flash[:error] = $ERROR_INFO end end redirect_to edit_questionnaire_path(questionnaire_id.to_sym) end # Zhewei: This method is used to save all questions in current questionnaire. def save_all_questions questionnaire_id = params[:id] unless params[:id].nil? if params['save'] params[:question].each_pair do |k, v| @question = Question.find(k) # example of 'v' value # {"seq"=>"1.0", "txt"=>"WOW", "weight"=>"1", "size"=>"50,3", "max_label"=>"Strong agree", "min_label"=>"Not agree"} v.each_pair do |key, value| @question.send(key + '=', value) if @question.send(key) != value end begin @question.save flash[:success] = 'All questions has been successfully saved!' rescue flash[:error] = $ERROR_INFO end end end export if params['export'] import if params['import'] if params['view_advice'] redirect_to controller: 'advice', action: 'edit_advice', id: params[:id] else redirect_to edit_questionnaire_path(questionnaire_id.to_sym) end end #========================================================================================================= # Separate methods for quiz questionnaire #========================================================================================================= # View a quiz questionnaire def view_quiz @questionnaire = Questionnaire.find(params[:id]) @participant = Participant.find(params[:pid]) # creating an instance variable since it needs to be sent to submitted_content/edit render :view end # define a new quiz questionnaire # method invoked by the view def new_quiz valid_request = true @assignment_id = params[:aid] # creating an instance variable to hold the assignment id @participant_id = params[:pid] # creating an instance variable to hold the participant id assignment = Assignment.find(@assignment_id) if !assignment.require_quiz? # flash error if this assignment does not require quiz flash[:error] = "This assignment does not support the quizzing feature." valid_request = false else team = AssignmentParticipant.find(@participant_id).team if team.nil? # flash error if this current participant does not have a team flash[:error] = "You should create or join a team first." valid_request = false else if assignment.has_topics? && team.topic.nil? # flash error if this assignment has topic but current team does not have a topic flash[:error] = "Your team should have a topic." valid_request = false end end end if valid_request && Questionnaire::QUESTIONNAIRE_TYPES.include? params[:model] @questionnaire = Object.const_get(params[:model]).new @questionnaire.private = params[:private] @questionnaire.min_question_score = 0 @questionnaire.max_question_score = 1 render :new_quiz else redirect_to controller: 'submitted_content', action: 'view', id: params[:pid] end end # seperate method for creating a quiz questionnaire because of differences in permission def create_quiz_questionnaire valid = valid_quiz if valid.eql?("valid") create_questionnaire else flash[:error] = valid.to_s redirect_to :back end end # edit a quiz questionnaire def edit_quiz @questionnaire = Questionnaire.find(params[:id]) if !@questionnaire.taken_by_anyone? render :edit else flash[:error] = "Your quiz has been taken by some other students, editing cannot be done any more." redirect_to controller: 'submitted_content', action: 'view', id: params[:pid] end end # save an updated quiz questionnaire to the database def update_quiz @questionnaire = Questionnaire.find(params[:id]) redirect_to controller: 'submitted_content', action: 'view', id: params[:pid] if @questionnaire.nil? if params['save'] @questionnaire.update_attributes(questionnaire_params) for qid in params[:question].keys @question = Question.find(qid) @question.txt = params[:question][qid.to_sym][:txt] @question.save @quiz_question_choices = QuizQuestionChoice.where(question_id: qid) i = 1 for quiz_question_choice in @quiz_question_choices if @question.type == "MultipleChoiceCheckbox" if params[:quiz_question_choices][@question.id.to_s][@question.type][i.to_s] quiz_question_choice.update_attributes(iscorrect: params[:quiz_question_choices][@question.id.to_s][@question.type][i.to_s][:iscorrect], txt: params[:quiz_question_choices][@question.id.to_s][@question.type][i.to_s][:txt]) else quiz_question_choice.update_attributes(iscorrect: '0', txt: params[:quiz_question_choices][quiz_question_choice.id.to_s][:txt]) end end if @question.type == "MultipleChoiceRadio" if params[:quiz_question_choices][@question.id.to_s][@question.type][:correctindex] == i.to_s quiz_question_choice.update_attributes(iscorrect: '1', txt: params[:quiz_question_choices][@question.id.to_s][@question.type][i.to_s][:txt]) else quiz_question_choice.update_attributes(iscorrect: '0', txt: params[:quiz_question_choices][@question.id.to_s][@question.type][i.to_s][:txt]) end end if @question.type == "TrueFalse" if params[:quiz_question_choices][@question.id.to_s][@question.type][1.to_s][:iscorrect] == "True" # the statement is correct if quiz_question_choice.txt == "True" quiz_question_choice.update_attributes(iscorrect: '1') # the statement is correct so "True" is the right answer else quiz_question_choice.update_attributes(iscorrect: '0') end else # the statement is not correct if quiz_question_choice.txt == "True" quiz_question_choice.update_attributes(iscorrect: '0') else quiz_question_choice.update_attributes(iscorrect: '1') # the statement is not correct so "False" is the right answer end end end i += 1 end end end redirect_to controller: 'submitted_content', action: 'view', id: params[:pid] end def valid_quiz num_quiz_questions = Assignment.find(params[:aid]).num_quiz_questions valid = "valid" (1..num_quiz_questions).each do |i| if params[:new_question][i.to_s] == '' # One of the questions text is not filled out valid = "Please make sure all questions have text" break elsif !params.key?(:question_type) || !params[:question_type].key?(i.to_s) || params[:question_type][i.to_s][:type].nil? # A type isnt selected for a question valid = "Please select a type for each question" break elsif params[:questionnaire][:name] == "" # questionnaire name is not specified valid = "Please specify quiz name (please do not use your name or id)." break else type = params[:question_type][i.to_s][:type] if type == 'MultipleChoiceCheckbox' or type == 'MultipleChoiceRadio' correct_selected = false (1..4).each do |x| if params[:new_choices][i.to_s][type][x.to_s][:txt] == '' # Text isnt provided for an option valid = "Please make sure every question has text for all options" break elsif type == 'MultipleChoiceRadio' and !params[:new_choices][i.to_s][type][x.to_s][:iscorrect].nil? correct_selected = true elsif type == 'MultipleChoiceCheckbox' and params[:new_choices][i.to_s][type][x.to_s][:iscorrect] != 0.to_s correct_selected = true end end if valid == "valid" && !correct_selected # A correct option isnt selected for a check box or radio question valid = "Please select a correct answer for all questions" break end elsif type == 'TF' # TF is not disabled. We need to test TF later. if params[:new_choices][i.to_s]["TF"].nil? # A correct option isnt selected for a true/false question valid = "Please select a correct answer for all questions" break end end end end valid end private # save questionnaire object after create or edit def save @questionnaire.save! save_questions @questionnaire.id if !@questionnaire.id.nil? and @questionnaire.id > 0 # We do not create node for quiz questionnaires if @questionnaire.type != "QuizQuestionnaire" pFolder = TreeFolder.find_by_name(@questionnaire.display_type) parent = FolderNode.find_by_node_object_id(pFolder.id) create_new_node_if_necessary(parent) end undo_link("Questionnaire \"#{@questionnaire.name}\" has been updated successfully. ") end # save parameters for new questions def save_new_question_parameters(qid, q_num) q = QuestionType.new q.q_type = params[:question_type][q_num][:type] q.parameters = params[:question_type][q_num][:parameters] q.question_id = qid q.save end # save questions that have been added to a questionnaire def save_new_questions(questionnaire_id) if params[:new_question] # The new_question array contains all the new questions # that should be saved to the database for question_key in params[:new_question].keys q = Question.new q.txt = params[:new_question][question_key] q.questionnaire_id = questionnaire_id q.type = params[:question_type][question_key][:type] q.seq = question_key.to_i if @questionnaire.type == "QuizQuestionnaire" q.weight = 1 # setting the weight to 1 for quiz questionnaire since the model validates this field end q.save unless q.txt.strip.empty? end end end # delete questions from a questionnaire # @param [Object] questionnaire_id def delete_questions(questionnaire_id) # Deletes any questions that, as a result of the edit, are no longer in the questionnaire questions = Question.where("questionnaire_id = " + questionnaire_id.to_s) @deleted_questions = [] for question in questions should_delete = true unless question_params.nil? for question_key in params[:question].keys should_delete = false if question_key.to_s === question.id.to_s end end next unless should_delete for advice in question.question_advices advice.destroy end # keep track of the deleted questions @deleted_questions.push(question) question.destroy end end # Handles questions whose wording changed as a result of the edit # @param [Object] questionnaire_id def save_questions(questionnaire_id) delete_questions questionnaire_id save_new_questions questionnaire_id if params[:question] for question_key in params[:question].keys if params[:question][question_key][:txt].strip.empty? # question text is empty, delete the question Question.delete(question_key) else # Update existing question. question = Question.find(question_key) unless question.update_attributes(params[:question][question_key]) Rails.logger.info(question.errors.messages.inspect) end end end end end # method to save the choices associated with a question in a quiz to the database # only for quiz questionnaire def save_choices(questionnaire_id) if params[:new_question] and params[:new_choices] questions = Question.where(questionnaire_id: questionnaire_id) questionnum = 1 for question in questions q_type = params[:question_type][questionnum.to_s][:type] for choice_key in params[:new_choices][questionnum.to_s][q_type].keys score = if params[:new_choices][questionnum.to_s][q_type][choice_key]["weight"] == 1.to_s 1 else 0 end if q_type == "MultipleChoiceCheckbox" if params[:new_choices][questionnum.to_s][q_type][choice_key][:iscorrect] == 1.to_s q = QuizQuestionChoice.new(txt: params[:new_choices][questionnum.to_s][q_type][choice_key][:txt], iscorrect: "true", question_id: question.id) else q = QuizQuestionChoice.new(txt: params[:new_choices][questionnum.to_s][q_type][choice_key][:txt], iscorrect: "false", question_id: question.id) end q.save elsif q_type == "TrueFalse" if params[:new_choices][questionnum.to_s][q_type][1.to_s][:iscorrect] == choice_key q = QuizQuestionChoice.new(txt: "True", iscorrect: "true", question_id: question.id) q.save q = QuizQuestionChoice.new(txt: "False", iscorrect: "false", question_id: question.id) q.save else q = QuizQuestionChoice.new(txt: "True", iscorrect: "false", question_id: question.id) q.save q = QuizQuestionChoice.new(txt: "False", iscorrect: "true", question_id: question.id) q.save end else if params[:new_choices][questionnum.to_s][q_type][1.to_s][:iscorrect] == choice_key q = QuizQuestionChoice.new(txt: params[:new_choices][questionnum.to_s][q_type][choice_key][:txt], iscorrect: "true", question_id: question.id) else q = QuizQuestionChoice.new(txt: params[:new_choices][questionnum.to_s][q_type][choice_key][:txt], iscorrect: "false", question_id: question.id) end q.save end end questionnum += 1 question.weight = 1 end end end def questionnaire_params params.require(:questionnaire).permit(:name, :instructor_id, :private, :min_question_score, :max_question_score, :type, :display_type, :instruction_loc) end def question_params params.require(:question).permit(:txt, :weight, :questionnaire_id, :seq, :type, :size, :alternatives, :break_before, :max_label, :min_label) end # FIXME: These private methods belong in the Questionnaire model def export @questionnaire = Questionnaire.find(params[:id]) csv_data = QuestionnaireHelper.create_questionnaire_csv @questionnaire, session[:user].name send_data csv_data, type: 'text/csv; charset=iso-8859-1; header=present', disposition: "attachment; filename=questionnaires.csv" end def import @questionnaire = Questionnaire.find(params[:id]) file = params['csv'] @questionnaire.questions << QuestionnaireHelper.get_questions_from_csv(@questionnaire, file) end # clones the contents of a questionnaire, including the questions and associated advice def copy_questionnaire_details(questions, orig_questionnaire) assign_instructor_id @questionnaire.name = 'Copy of ' + orig_questionnaire.name begin @questionnaire.created_at = Time.now @questionnaire.save! questions.each do |question| new_question = question.dup new_question.questionnaire_id = @questionnaire.id if (new_question.is_a? Criterion or new_question.is_a? TextResponse) and new_question.size.nil? new_question.size = '50,3' end new_question.save! advice = QuestionAdvice.find_by_question_id(question.id) next unless advice new_advice = advice.dup new_advice.question_id = new_question.id new_advice.save! end pFolder = TreeFolder.find_by_name(@questionnaire.display_type) parent = FolderNode.find_by_node_object_id(pFolder.id) create_new_node_if_necessary(parent) undo_link("Copy of questionnaire #{orig_questionnaire.name} has been created successfully. ") redirect_to controller: 'questionnaires', action: 'view', id: @questionnaire.id rescue flash[:error] = 'The questionnaire was not able to be copied. Please check the original course for missing information.' + $ERROR_INFO redirect_to action: 'list', controller: 'tree_display' end end private def create_new_node_if_necessary(parent) unless QuestionnaireNode.exists?(parent_id: parent.id, node_object_id: @questionnaire.id) QuestionnaireNode.create(parent_id: parent.id, node_object_id: @questionnaire.id) end end def assign_instructor_id # if the user to copy the questionnaire is a TA, the instructor should be the owner instead of the TA @questionnaire.instructor_id = if session[:user].role.name != "Teaching Assistant" session[:user].id else # for TA we need to get his instructor id and by default add it to his course for which he is the TA Ta.get_my_instructor(session[:user].id) end end end Fix bug in eacae4cd6151b3066db26759dd31e3eff2ee1e16 class QuestionnairesController < ApplicationController # Controller for Questionnaire objects # A Questionnaire can be of several types (QuestionnaireType) # Each Questionnaire contains zero or more questions (Question) # Generally a questionnaire is associated with an assignment (Assignment) before_action :authorize def action_allowed? ['Super-Administrator', 'Administrator', 'Instructor', 'Teaching Assistant', 'Student'].include? current_role_name end # Create a clone of the given questionnaire, copying all associated # questions. The name and creator are updated. def copy orig_questionnaire = Questionnaire.find(params[:id]) questions = Question.where(questionnaire_id: params[:id]) @questionnaire = orig_questionnaire.dup @questionnaire.instructor_id = session[:user].instructor_id ## Why was TA-specific code removed here? See Project E713. copy_questionnaire_details(questions, orig_questionnaire) end def view @questionnaire = Questionnaire.find(params[:id]) end def show @questionnaire = Questionnaire.find(params[:id]) end # Define a new questionnaire def new if Questionnaire::QUESTIONNAIRE_TYPES.include? params[:model] @questionnaire = Object.const_get(params[:model].split.join).new end end def create questionnaire_private = params[:questionnaire][:private] == "true" ? true : false display_type = params[:questionnaire][:type].split('Questionnaire')[0] if Questionnaire::QUESTIONNAIRE_TYPES.include? params[:questionnaire][:type] @questionnaire = Object.const_get(params[:questionnaire][:type]).new end begin @questionnaire.private = questionnaire_private @questionnaire.name = params[:questionnaire][:name] @questionnaire.instructor_id = session[:user].id @questionnaire.min_question_score = params[:questionnaire][:min_question_score] @questionnaire.max_question_score = params[:questionnaire][:max_question_score] @questionnaire.type = params[:questionnaire][:type] # Zhewei: Right now, the display_type in 'questionnaires' table and name in 'tree_folders' table are not consistent. # In the future, we need to write migration files to make them consistency. case display_type when 'AuthorFeedback' display_type = 'Author%Feedback' when 'CourseEvaluation' display_type = 'Course%Evaluation' when 'TeammateReview' display_type = 'Teammate%Review' when 'GlobalSurvey' display_type = 'Global%Survey' end @questionnaire.display_type = display_type @questionnaire.instruction_loc = Questionnaire::DEFAULT_QUESTIONNAIRE_URL @questionnaire.save # Create node tree_folder = TreeFolder.where(['name like ?', @questionnaire.display_type]).first parent = FolderNode.find_by_node_object_id(tree_folder.id) QuestionnaireNode.create(parent_id: parent.id, node_object_id: @questionnaire.id, type: 'QuestionnaireNode') flash[:success] = 'You have successfully created a questionnaire!' rescue flash[:error] = $ERROR_INFO end redirect_to controller: 'questionnaires', action: 'edit', id: @questionnaire.id end def create_questionnaire @questionnaire = Object.const_get(params[:questionnaire][:type]).new(questionnaire_params) # TODO: check for Quiz Questionnaire? if @questionnaire.type == "QuizQuestionnaire" # checking if it is a quiz questionnaire participant_id = params[:pid] # creating a local variable to send as parameter to submitted content if it is a quiz questionnaire @questionnaire.min_question_score = 0 @questionnaire.max_question_score = 1 @assignment = Assignment.find(params[:aid]) author_team = AssignmentTeam.team(Participant.find(participant_id)) @questionnaire.instructor_id = author_team.id # for a team assignment, set the instructor id to the team_id @successful_create = true save save_choices @questionnaire.id if @successful_create == true flash[:note] = "The quiz was successfully created." end redirect_to controller: 'submitted_content', action: 'edit', id: participant_id else # if it is not a quiz questionnaire if session[:user].role.name == "Teaching Assistant" @questionnaire.instructor_id = Ta.get_my_instructor(session[:user].id) end save redirect_to controller: 'tree_display', action: 'list' end end # Edit a questionnaire def edit @questionnaire = Questionnaire.find(params[:id]) redirect_to Questionnaire if @questionnaire.nil? end def update @questionnaire = Questionnaire.find(params[:id]) begin @questionnaire.update_attributes(questionnaire_params) flash[:success] = 'The questionnaire has been successfully updated!' rescue flash[:error] = $ERROR_INFO end redirect_to edit_questionnaire_path(@questionnaire.id.to_s.to_sym) end # Remove a given questionnaire def delete @questionnaire = Questionnaire.find(params[:id]) if @questionnaire begin name = @questionnaire.name # if this rubric is used by some assignment, flash error @questionnaire.assignments.each do |assignment| raise "The assignment #{assignment.name} uses this questionnaire. Are sure you want to <A href='../assignment/delete/#{assignment.id}'>delete</A> the assignment?" end questions = @questionnaire.questions # if this rubric had some answers, flash error questions.each do |question| unless question.answers.empty? raise "There are responses based on this rubric, we suggest you do not delete it." end end questions.each do |question| advices = question.question_advices advices.each(&:delete) question.delete end questionnaire_node = @questionnaire.questionnaire_node questionnaire_node.delete @questionnaire.delete undo_link("The questionnaire \"#{name}\" has been successfully deleted.") rescue flash[:error] = $ERROR_INFO end end redirect_to action: 'list', controller: 'tree_display' end def edit_advice # #Code used to be in this class, was removed. I have not checked the other class. redirect_to controller: 'advice', action: 'edit_advice' end def save_advice begin for advice_key in params[:advice].keys QuestionAdvice.update(advice_key, params[:advice][advice_key]) end flash[:notice] = "The questionnaire's question advice was successfully saved." # redirect_to :action => 'list' redirect_to controller: 'advice', action: 'save_advice' rescue flash[:error] = $ERROR_INFO end end # Toggle the access permission for this assignment from public to private, or vice versa def toggle_access @questionnaire = Questionnaire.find(params[:id]) @questionnaire.private = !@questionnaire.private @questionnaire.save @access = @questionnaire.private == true ? "private" : "public" undo_link("teh questionnaire \"#{@questionnaire.name}\" has been successfully made #{@access}. ") redirect_to controller: 'tree_display', action: 'list' end # Zhewei: This method is used to add new questions when editing questionnaire. def add_new_questions questionnaire_id = params[:id] unless params[:id].nil? num_of_existed_questions = Questionnaire.find(questionnaire_id).questions.size ((num_of_existed_questions + 1)..(num_of_existed_questions + params[:question][:total_num].to_i)).each do |i| question = Object.const_get(params[:question][:type]).create(txt: '', questionnaire_id: questionnaire_id, seq: i, type: params[:question][:type], break_before: true) if question.is_a? ScoredQuestion question.weight = 1 question.max_label = 'Strong agree' question.min_label = 'Strong disagree' end question.size = '50,3' if question.is_a? Criterion question.alternatives = '0|1|2|3|4|5' if question.is_a? Dropdown question.size = '60,5' if question.is_a? TextResponse begin question.save rescue flash[:error] = $ERROR_INFO end end redirect_to edit_questionnaire_path(questionnaire_id.to_sym) end # Zhewei: This method is used to save all questions in current questionnaire. def save_all_questions questionnaire_id = params[:id] unless params[:id].nil? if params['save'] params[:question].each_pair do |k, v| @question = Question.find(k) # example of 'v' value # {"seq"=>"1.0", "txt"=>"WOW", "weight"=>"1", "size"=>"50,3", "max_label"=>"Strong agree", "min_label"=>"Not agree"} v.each_pair do |key, value| @question.send(key + '=', value) if @question.send(key) != value end begin @question.save flash[:success] = 'All questions has been successfully saved!' rescue flash[:error] = $ERROR_INFO end end end export if params['export'] import if params['import'] if params['view_advice'] redirect_to controller: 'advice', action: 'edit_advice', id: params[:id] else redirect_to edit_questionnaire_path(questionnaire_id.to_sym) end end #========================================================================================================= # Separate methods for quiz questionnaire #========================================================================================================= # View a quiz questionnaire def view_quiz @questionnaire = Questionnaire.find(params[:id]) @participant = Participant.find(params[:pid]) # creating an instance variable since it needs to be sent to submitted_content/edit render :view end # define a new quiz questionnaire # method invoked by the view def new_quiz valid_request = true @assignment_id = params[:aid] # creating an instance variable to hold the assignment id @participant_id = params[:pid] # creating an instance variable to hold the participant id assignment = Assignment.find(@assignment_id) if !assignment.require_quiz? # flash error if this assignment does not require quiz flash[:error] = "This assignment does not support the quizzing feature." valid_request = false else team = AssignmentParticipant.find(@participant_id).team if team.nil? # flash error if this current participant does not have a team flash[:error] = "You should create or join a team first." valid_request = false else if assignment.has_topics? && team.topic.nil? # flash error if this assignment has topic but current team does not have a topic flash[:error] = "Your team should have a topic." valid_request = false end end end if valid_request && Questionnaire::QUESTIONNAIRE_TYPES.include?(params[:model]) @questionnaire = Object.const_get(params[:model]).new @questionnaire.private = params[:private] @questionnaire.min_question_score = 0 @questionnaire.max_question_score = 1 render :new_quiz else redirect_to controller: 'submitted_content', action: 'view', id: params[:pid] end end # seperate method for creating a quiz questionnaire because of differences in permission def create_quiz_questionnaire valid = valid_quiz if valid.eql?("valid") create_questionnaire else flash[:error] = valid.to_s redirect_to :back end end # edit a quiz questionnaire def edit_quiz @questionnaire = Questionnaire.find(params[:id]) if !@questionnaire.taken_by_anyone? render :edit else flash[:error] = "Your quiz has been taken by some other students, editing cannot be done any more." redirect_to controller: 'submitted_content', action: 'view', id: params[:pid] end end # save an updated quiz questionnaire to the database def update_quiz @questionnaire = Questionnaire.find(params[:id]) redirect_to controller: 'submitted_content', action: 'view', id: params[:pid] if @questionnaire.nil? if params['save'] @questionnaire.update_attributes(questionnaire_params) for qid in params[:question].keys @question = Question.find(qid) @question.txt = params[:question][qid.to_sym][:txt] @question.save @quiz_question_choices = QuizQuestionChoice.where(question_id: qid) i = 1 for quiz_question_choice in @quiz_question_choices if @question.type == "MultipleChoiceCheckbox" if params[:quiz_question_choices][@question.id.to_s][@question.type][i.to_s] quiz_question_choice.update_attributes(iscorrect: params[:quiz_question_choices][@question.id.to_s][@question.type][i.to_s][:iscorrect], txt: params[:quiz_question_choices][@question.id.to_s][@question.type][i.to_s][:txt]) else quiz_question_choice.update_attributes(iscorrect: '0', txt: params[:quiz_question_choices][quiz_question_choice.id.to_s][:txt]) end end if @question.type == "MultipleChoiceRadio" if params[:quiz_question_choices][@question.id.to_s][@question.type][:correctindex] == i.to_s quiz_question_choice.update_attributes(iscorrect: '1', txt: params[:quiz_question_choices][@question.id.to_s][@question.type][i.to_s][:txt]) else quiz_question_choice.update_attributes(iscorrect: '0', txt: params[:quiz_question_choices][@question.id.to_s][@question.type][i.to_s][:txt]) end end if @question.type == "TrueFalse" if params[:quiz_question_choices][@question.id.to_s][@question.type][1.to_s][:iscorrect] == "True" # the statement is correct if quiz_question_choice.txt == "True" quiz_question_choice.update_attributes(iscorrect: '1') # the statement is correct so "True" is the right answer else quiz_question_choice.update_attributes(iscorrect: '0') end else # the statement is not correct if quiz_question_choice.txt == "True" quiz_question_choice.update_attributes(iscorrect: '0') else quiz_question_choice.update_attributes(iscorrect: '1') # the statement is not correct so "False" is the right answer end end end i += 1 end end end redirect_to controller: 'submitted_content', action: 'view', id: params[:pid] end def valid_quiz num_quiz_questions = Assignment.find(params[:aid]).num_quiz_questions valid = "valid" (1..num_quiz_questions).each do |i| if params[:new_question][i.to_s] == '' # One of the questions text is not filled out valid = "Please make sure all questions have text" break elsif !params.key?(:question_type) || !params[:question_type].key?(i.to_s) || params[:question_type][i.to_s][:type].nil? # A type isnt selected for a question valid = "Please select a type for each question" break elsif params[:questionnaire][:name] == "" # questionnaire name is not specified valid = "Please specify quiz name (please do not use your name or id)." break else type = params[:question_type][i.to_s][:type] if type == 'MultipleChoiceCheckbox' or type == 'MultipleChoiceRadio' correct_selected = false (1..4).each do |x| if params[:new_choices][i.to_s][type][x.to_s][:txt] == '' # Text isnt provided for an option valid = "Please make sure every question has text for all options" break elsif type == 'MultipleChoiceRadio' and !params[:new_choices][i.to_s][type][x.to_s][:iscorrect].nil? correct_selected = true elsif type == 'MultipleChoiceCheckbox' and params[:new_choices][i.to_s][type][x.to_s][:iscorrect] != 0.to_s correct_selected = true end end if valid == "valid" && !correct_selected # A correct option isnt selected for a check box or radio question valid = "Please select a correct answer for all questions" break end elsif type == 'TF' # TF is not disabled. We need to test TF later. if params[:new_choices][i.to_s]["TF"].nil? # A correct option isnt selected for a true/false question valid = "Please select a correct answer for all questions" break end end end end valid end private # save questionnaire object after create or edit def save @questionnaire.save! save_questions @questionnaire.id if !@questionnaire.id.nil? and @questionnaire.id > 0 # We do not create node for quiz questionnaires if @questionnaire.type != "QuizQuestionnaire" pFolder = TreeFolder.find_by_name(@questionnaire.display_type) parent = FolderNode.find_by_node_object_id(pFolder.id) create_new_node_if_necessary(parent) end undo_link("Questionnaire \"#{@questionnaire.name}\" has been updated successfully. ") end # save parameters for new questions def save_new_question_parameters(qid, q_num) q = QuestionType.new q.q_type = params[:question_type][q_num][:type] q.parameters = params[:question_type][q_num][:parameters] q.question_id = qid q.save end # save questions that have been added to a questionnaire def save_new_questions(questionnaire_id) if params[:new_question] # The new_question array contains all the new questions # that should be saved to the database for question_key in params[:new_question].keys q = Question.new q.txt = params[:new_question][question_key] q.questionnaire_id = questionnaire_id q.type = params[:question_type][question_key][:type] q.seq = question_key.to_i if @questionnaire.type == "QuizQuestionnaire" q.weight = 1 # setting the weight to 1 for quiz questionnaire since the model validates this field end q.save unless q.txt.strip.empty? end end end # delete questions from a questionnaire # @param [Object] questionnaire_id def delete_questions(questionnaire_id) # Deletes any questions that, as a result of the edit, are no longer in the questionnaire questions = Question.where("questionnaire_id = " + questionnaire_id.to_s) @deleted_questions = [] for question in questions should_delete = true unless question_params.nil? for question_key in params[:question].keys should_delete = false if question_key.to_s === question.id.to_s end end next unless should_delete for advice in question.question_advices advice.destroy end # keep track of the deleted questions @deleted_questions.push(question) question.destroy end end # Handles questions whose wording changed as a result of the edit # @param [Object] questionnaire_id def save_questions(questionnaire_id) delete_questions questionnaire_id save_new_questions questionnaire_id if params[:question] for question_key in params[:question].keys if params[:question][question_key][:txt].strip.empty? # question text is empty, delete the question Question.delete(question_key) else # Update existing question. question = Question.find(question_key) unless question.update_attributes(params[:question][question_key]) Rails.logger.info(question.errors.messages.inspect) end end end end end # method to save the choices associated with a question in a quiz to the database # only for quiz questionnaire def save_choices(questionnaire_id) if params[:new_question] and params[:new_choices] questions = Question.where(questionnaire_id: questionnaire_id) questionnum = 1 for question in questions q_type = params[:question_type][questionnum.to_s][:type] for choice_key in params[:new_choices][questionnum.to_s][q_type].keys score = if params[:new_choices][questionnum.to_s][q_type][choice_key]["weight"] == 1.to_s 1 else 0 end if q_type == "MultipleChoiceCheckbox" if params[:new_choices][questionnum.to_s][q_type][choice_key][:iscorrect] == 1.to_s q = QuizQuestionChoice.new(txt: params[:new_choices][questionnum.to_s][q_type][choice_key][:txt], iscorrect: "true", question_id: question.id) else q = QuizQuestionChoice.new(txt: params[:new_choices][questionnum.to_s][q_type][choice_key][:txt], iscorrect: "false", question_id: question.id) end q.save elsif q_type == "TrueFalse" if params[:new_choices][questionnum.to_s][q_type][1.to_s][:iscorrect] == choice_key q = QuizQuestionChoice.new(txt: "True", iscorrect: "true", question_id: question.id) q.save q = QuizQuestionChoice.new(txt: "False", iscorrect: "false", question_id: question.id) q.save else q = QuizQuestionChoice.new(txt: "True", iscorrect: "false", question_id: question.id) q.save q = QuizQuestionChoice.new(txt: "False", iscorrect: "true", question_id: question.id) q.save end else if params[:new_choices][questionnum.to_s][q_type][1.to_s][:iscorrect] == choice_key q = QuizQuestionChoice.new(txt: params[:new_choices][questionnum.to_s][q_type][choice_key][:txt], iscorrect: "true", question_id: question.id) else q = QuizQuestionChoice.new(txt: params[:new_choices][questionnum.to_s][q_type][choice_key][:txt], iscorrect: "false", question_id: question.id) end q.save end end questionnum += 1 question.weight = 1 end end end def questionnaire_params params.require(:questionnaire).permit(:name, :instructor_id, :private, :min_question_score, :max_question_score, :type, :display_type, :instruction_loc) end def question_params params.require(:question).permit(:txt, :weight, :questionnaire_id, :seq, :type, :size, :alternatives, :break_before, :max_label, :min_label) end # FIXME: These private methods belong in the Questionnaire model def export @questionnaire = Questionnaire.find(params[:id]) csv_data = QuestionnaireHelper.create_questionnaire_csv @questionnaire, session[:user].name send_data csv_data, type: 'text/csv; charset=iso-8859-1; header=present', disposition: "attachment; filename=questionnaires.csv" end def import @questionnaire = Questionnaire.find(params[:id]) file = params['csv'] @questionnaire.questions << QuestionnaireHelper.get_questions_from_csv(@questionnaire, file) end # clones the contents of a questionnaire, including the questions and associated advice def copy_questionnaire_details(questions, orig_questionnaire) assign_instructor_id @questionnaire.name = 'Copy of ' + orig_questionnaire.name begin @questionnaire.created_at = Time.now @questionnaire.save! questions.each do |question| new_question = question.dup new_question.questionnaire_id = @questionnaire.id if (new_question.is_a? Criterion or new_question.is_a? TextResponse) and new_question.size.nil? new_question.size = '50,3' end new_question.save! advice = QuestionAdvice.find_by_question_id(question.id) next unless advice new_advice = advice.dup new_advice.question_id = new_question.id new_advice.save! end pFolder = TreeFolder.find_by_name(@questionnaire.display_type) parent = FolderNode.find_by_node_object_id(pFolder.id) create_new_node_if_necessary(parent) undo_link("Copy of questionnaire #{orig_questionnaire.name} has been created successfully. ") redirect_to controller: 'questionnaires', action: 'view', id: @questionnaire.id rescue flash[:error] = 'The questionnaire was not able to be copied. Please check the original course for missing information.' + $ERROR_INFO redirect_to action: 'list', controller: 'tree_display' end end private def create_new_node_if_necessary(parent) unless QuestionnaireNode.exists?(parent_id: parent.id, node_object_id: @questionnaire.id) QuestionnaireNode.create(parent_id: parent.id, node_object_id: @questionnaire.id) end end def assign_instructor_id # if the user to copy the questionnaire is a TA, the instructor should be the owner instead of the TA @questionnaire.instructor_id = if session[:user].role.name != "Teaching Assistant" session[:user].id else # for TA we need to get his instructor id and by default add it to his course for which he is the TA Ta.get_my_instructor(session[:user].id) end end end
# frozen_string_literal: true class RentersFriendController < ApplicationController layout "renters_friend" def index @images = { twitter: path_to_asset('twitter_logo') } end private def path_to_asset(asset) ApplicationController.helper.asset_path(asset) end end fix spelling error # frozen_string_literal: true class RentersFriendController < ApplicationController layout "renters_friend" def index @images = { twitter: path_to_asset('twitter_logo') } end private def path_to_asset(asset) ApplicationController.helpers.asset_path(asset) end end
class SingleSignOnController < ApplicationController after_filter :set_access_control_headers # before_action :authenticate_user! def sso return render nothing: true, status: 401 unless sign(params[:payload]) == params[:sig] return render nothing: true, status: 401 unless nonce = extract_nonce return render nothing: true, status: 403 unless current_user = extract_user user = Addressable::URI.new user.query_values = { nonce: nonce, team: ENV["LANDLINE_TEAM"], id: current_user.id, avatar_url: current_user.avatar.url.to_s, username: current_user.username, email: current_user.email, real_name: current_user.name, profile_url: user_url(current_user) } payload = Base64.encode64(user.query) sig = sign(payload) url = "#{ENV["LANDLINE_URL"]}/sessions/sso?payload=#{URI.escape(payload)}&sig=#{sig}" redirect_to url end private def decode_payload payload = params[:payload] raw = Base64.decode64(payload) uri = CGI.parse(raw) end def extract_nonce decode_payload["nonce"][0] end def extract_user User.find_by(authentication_token: decode_payload["uid"][0])) end def sign(payload) digest = OpenSSL::Digest.new('sha256') OpenSSL::HMAC.hexdigest(digest, ENV["LANDLINE_SECRET"], payload) end def set_access_control_headers headers['Access-Control-Allow-Origin'] = '*' headers['Access-Control-Allow-Methods'] = 'GET, POST, DELETE' headers['Access-Control-Request-Method'] = '*' headers['Access-Control-Allow-Headers'] = 'Origin, Content-Type, Accept' end end Remove offending parenthesis class SingleSignOnController < ApplicationController after_filter :set_access_control_headers # before_action :authenticate_user! def sso return render nothing: true, status: 401 unless sign(params[:payload]) == params[:sig] return render nothing: true, status: 401 unless nonce = extract_nonce return render nothing: true, status: 403 unless current_user = extract_user user = Addressable::URI.new user.query_values = { nonce: nonce, team: ENV["LANDLINE_TEAM"], id: current_user.id, avatar_url: current_user.avatar.url.to_s, username: current_user.username, email: current_user.email, real_name: current_user.name, profile_url: user_url(current_user) } payload = Base64.encode64(user.query) sig = sign(payload) url = "#{ENV["LANDLINE_URL"]}/sessions/sso?payload=#{URI.escape(payload)}&sig=#{sig}" redirect_to url end private def decode_payload payload = params[:payload] raw = Base64.decode64(payload) uri = CGI.parse(raw) end def extract_nonce decode_payload["nonce"][0] end def extract_user User.find_by(authentication_token: decode_payload["uid"][0]) end def sign(payload) digest = OpenSSL::Digest.new('sha256') OpenSSL::HMAC.hexdigest(digest, ENV["LANDLINE_SECRET"], payload) end def set_access_control_headers headers['Access-Control-Allow-Origin'] = '*' headers['Access-Control-Allow-Methods'] = 'GET, POST, DELETE' headers['Access-Control-Request-Method'] = '*' headers['Access-Control-Allow-Headers'] = 'Origin, Content-Type, Accept' end end
# frozen_string_literal: true require 'open_food_network/address_finder' class SplitCheckoutController < ::BaseController layout 'darkswarm' include OrderStockCheck include Spree::BaseHelper include CheckoutCallbacks include CablecarResponses helper 'terms_and_conditions' helper 'checkout' helper 'spree/orders' helper OrderHelper def edit redirect_to_step unless params[:step] end def update if confirm_order || update_order clear_invalid_payments advance_order_state redirect_to_step else flash.now[:error] = I18n.t('split_checkout.errors.global') render status: :unprocessable_entity, operations: cable_car. replace("#checkout", partial("split_checkout/checkout")). replace("#flashes", partial("shared/flashes", locals: { flashes: flash })) end end private def clear_invalid_payments @order.payments.with_state(:invalid).delete_all end def confirm_order return unless summary_step? && @order.confirmation? return unless validate_summary! && @order.errors.empty? @order.customer.touch :terms_and_conditions_accepted_at @order.confirm! end def update_order return if @order.errors.any? @order.select_shipping_method(params[:shipping_method_id]) @order.update(order_params) validate_current_step! @order.errors.empty? end def summary_step? params[:step] == "summary" end def advance_order_state return if @order.complete? OrderWorkflow.new(@order).advance_checkout(raw_params.slice(:shipping_method_id)) end def validate_current_step! step = ([params[:step]] & ["details", "payment", "summary"]).first send("validate_#{step}!") end def validate_details! return true if params[:shipping_method_id].present? @order.errors.add :shipping_method, I18n.t('split_checkout.errors.select_a_shipping_method') end def validate_payment! return true if params.dig(:order, :payments_attributes, 0, :payment_method_id).present? @order.errors.add :payment_method, I18n.t('split_checkout.errors.select_a_payment_method') end def validate_summary! return true if params[:accept_terms] return true unless TermsOfService.required?(@order.distributor) @order.errors.add(:terms_and_conditions, t("split_checkout.errors.terms_not_accepted")) end def order_params @order_params ||= Checkout::Params.new(@order, params).call end def redirect_to_step case @order.state when "cart", "address", "delivery" redirect_to checkout_step_path(:details) when "payment" redirect_to checkout_step_path(:payment) when "confirmation" redirect_to checkout_step_path(:summary) else redirect_to order_path(@order) end end end Use OrderCompletion in split checkout for #order_completion_reset # frozen_string_literal: true require 'open_food_network/address_finder' class SplitCheckoutController < ::BaseController layout 'darkswarm' include OrderStockCheck include Spree::BaseHelper include CheckoutCallbacks include OrderCompletion include CablecarResponses helper 'terms_and_conditions' helper 'checkout' helper 'spree/orders' helper OrderHelper def edit redirect_to_step unless params[:step] end def update if confirm_order || update_order clear_invalid_payments advance_order_state redirect_to_step else flash.now[:error] = I18n.t('split_checkout.errors.global') render status: :unprocessable_entity, operations: cable_car. replace("#checkout", partial("split_checkout/checkout")). replace("#flashes", partial("shared/flashes", locals: { flashes: flash })) end end private def clear_invalid_payments @order.payments.with_state(:invalid).delete_all end def confirm_order return unless summary_step? && @order.confirmation? return unless validate_summary! && @order.errors.empty? @order.customer.touch :terms_and_conditions_accepted_at @order.confirm! order_completion_reset @order end def update_order return if @order.errors.any? @order.select_shipping_method(params[:shipping_method_id]) @order.update(order_params) validate_current_step! @order.errors.empty? end def summary_step? params[:step] == "summary" end def advance_order_state return if @order.complete? OrderWorkflow.new(@order).advance_checkout(raw_params.slice(:shipping_method_id)) end def validate_current_step! step = ([params[:step]] & ["details", "payment", "summary"]).first send("validate_#{step}!") end def validate_details! return true if params[:shipping_method_id].present? @order.errors.add :shipping_method, I18n.t('split_checkout.errors.select_a_shipping_method') end def validate_payment! return true if params.dig(:order, :payments_attributes, 0, :payment_method_id).present? @order.errors.add :payment_method, I18n.t('split_checkout.errors.select_a_payment_method') end def validate_summary! return true if params[:accept_terms] return true unless TermsOfService.required?(@order.distributor) @order.errors.add(:terms_and_conditions, t("split_checkout.errors.terms_not_accepted")) end def order_params @order_params ||= Checkout::Params.new(@order, params).call end def redirect_to_step case @order.state when "cart", "address", "delivery" redirect_to checkout_step_path(:details) when "payment" redirect_to checkout_step_path(:payment) when "confirmation" redirect_to checkout_step_path(:summary) else redirect_to order_path(@order) end end end
class Spree::SettingsController < Spree::BaseController def new @valor = Spree::Setting.new end def create #@valor = Spree::Setting.new(params[:setting]) prueba = params.select{|k,v| k =~ /.x/} case prueba.keys.first when "0" session[:locale] = "es" when "1" session[:locale] = "en" end redirect_to('/', :notice => t('spree.settings.notices.success')) end def set_flag session[:locale] = params[:id] session[:vat] = 1.18 end end Added Search from for tires class Spree::SettingsController < Spree::BaseController def new @valor = Spree::Setting.new end def create #@valor = Spree::Setting.new(params[:setting]) prueba = params.select{|k,v| k =~ /.x/} case prueba.keys.first[0] when "0" session[:locale] = "es" when "1" session[:locale] = "en" end redirect_to('/', :notice => t('spree.settings.notices.success')) end def set_flag session[:locale] = params[:id] session[:vat] = 1.18 end end
class Teams::ProjectsController < Teams::ApplicationController skip_before_filter :authorize_manage_user_team!, only: [:index] def index @projects = user_team.projects @avaliable_projects = current_user.admin? ? Project.without_team(user_team) : (Project.personal(current_user) + current_user.projects).uniq end def new user_team @avaliable_projects = Project.scoped @avaliable_projects = @avaliable_projects.without_team(user_team) if user_team.projects.any? #@projects.reject!(&:empty_repo?) redirect_to team_projects_path(user_team), notice: "No avalible projects." unless @avaliable_projects.any? end def create unless params[:project_ids].blank? project_ids = params[:project_ids] access = params[:greatest_project_access] user_team.assign_to_projects(project_ids, access) end redirect_to team_projects_path(user_team), notice: 'Team of users was successfully assgned to projects.' end def edit team_project end def update if user_team.update_project_access(team_project, params[:greatest_project_access]) redirect_to team_projects_path(user_team), notice: 'Access was successfully updated.' else render :edit end end def destroy user_team.resign_from_project(team_project) redirect_to team_projects_path(user_team), notice: 'Team of users was successfully reassigned from project.' end private def team_project @project ||= user_team.projects.find_with_namespace(params[:id]) end end Dont allow to select a project you have no right to assign class Teams::ProjectsController < Teams::ApplicationController skip_before_filter :authorize_manage_user_team!, only: [:index] def index @projects = user_team.projects @avaliable_projects = current_user.admin? ? Project.without_team(user_team) : (Project.personal(current_user) + current_user.projects).uniq end def new user_team @avaliable_projects = current_user.owned_projects.scoped @avaliable_projects = @avaliable_projects.without_team(user_team) if user_team.projects.any? redirect_to team_projects_path(user_team), notice: "No avalible projects." unless @avaliable_projects.any? end def create unless params[:project_ids].blank? project_ids = params[:project_ids] access = params[:greatest_project_access] user_team.assign_to_projects(project_ids, access) end redirect_to team_projects_path(user_team), notice: 'Team of users was successfully assgned to projects.' end def edit team_project end def update if user_team.update_project_access(team_project, params[:greatest_project_access]) redirect_to team_projects_path(user_team), notice: 'Access was successfully updated.' else render :edit end end def destroy user_team.resign_from_project(team_project) redirect_to team_projects_path(user_team), notice: 'Team of users was successfully reassigned from project.' end private def team_project @project ||= user_team.projects.find_with_namespace(params[:id]) end end
module Users class DossiersController < UserController include Devise::StoreLocationExtension include DossierHelper layout 'procedure_context', only: [:identite, :update_identite, :siret, :update_siret] ACTIONS_ALLOWED_TO_ANY_USER = [:index, :recherche, :new] ACTIONS_ALLOWED_TO_OWNER_OR_INVITE = [:show, :demande, :messagerie, :brouillon, :update_brouillon, :modifier, :update, :create_commentaire] before_action :ensure_ownership!, except: ACTIONS_ALLOWED_TO_ANY_USER + ACTIONS_ALLOWED_TO_OWNER_OR_INVITE before_action :ensure_ownership_or_invitation!, only: ACTIONS_ALLOWED_TO_OWNER_OR_INVITE before_action :ensure_dossier_can_be_updated, only: [:update_identite, :update_brouillon, :modifier, :update] before_action :forbid_invite_submission!, only: [:update_brouillon] before_action :forbid_closed_submission!, only: [:update_brouillon] before_action :show_demarche_en_test_banner before_action :store_user_location!, only: :new def index @user_dossiers = current_user.dossiers.includes(:procedure).order_by_updated_at.page(page) @dossiers_invites = current_user.dossiers_invites.includes(:procedure).order_by_updated_at.page(page) @current_tab = current_tab(@user_dossiers.count, @dossiers_invites.count) @dossiers = case @current_tab when 'mes-dossiers' @user_dossiers when 'dossiers-invites' @dossiers_invites end end def show if dossier.brouillon? redirect_to brouillon_dossier_path(dossier) end @dossier = dossier end def demande @dossier = dossier end def messagerie @dossier = dossier @commentaire = Commentaire.new end def attestation if dossier.attestation.pdf_active_storage.attached? redirect_to url_for(dossier.attestation.pdf_active_storage) else send_data(dossier.attestation.pdf.read, filename: 'attestation.pdf', type: 'application/pdf') end end def identite @dossier = dossier @user = current_user end def update_identite @dossier = dossier if @dossier.individual.update(individual_params) @dossier.update!(autorisation_donnees: true) flash.notice = "Identité enregistrée" redirect_to brouillon_dossier_path(@dossier) else flash.now.alert = @dossier.individual.errors.full_messages render :identite end end def siret @dossier = dossier end def update_siret @dossier = dossier # We use the user as the holder model object for the siret value # (so that we can restore it on the form in case of error). # # This is the only remaining use of User#siret: it could be refactored away. # However some existing users have a siret but no associated etablissement, # so we would need to analyze the legacy data and decide what to do with it. current_user.siret = siret_params[:siret] siret_model = Siret.new(siret: siret_params[:siret]) if !siret_model.valid? return render_siret_error(siret_model.errors.full_messages) end sanitized_siret = siret_model.siret begin etablissement_attributes = ApiEntrepriseService.get_etablissement_params_for_siret(sanitized_siret, @dossier.procedure.id) rescue RestClient::RequestFailed return render_siret_error(t('errors.messages.siret_network_error')) end if etablissement_attributes.blank? return render_siret_error(t('errors.messages.siret_unknown')) end etablissement = @dossier.build_etablissement(etablissement_attributes) etablissement.save! current_user.update!(siret: sanitized_siret) @dossier.update!(autorisation_donnees: true) redirect_to etablissement_dossier_path end def etablissement @dossier = dossier # Redirect if the user attempts to access the page URL directly if !@dossier.etablissement flash.alert = 'Aucun établissement n’est associé à ce dossier' return redirect_to siret_dossier_path(@dossier) end end def brouillon @dossier = dossier_with_champs # TODO: remove when the champs are unifed if !@dossier.autorisation_donnees if dossier.procedure.for_individual redirect_to identite_dossier_path(@dossier) else redirect_to siret_dossier_path(@dossier) end end end # FIXME: # - delegate draft save logic to champ ? def update_brouillon @dossier = dossier_with_champs errors = update_dossier_and_compute_errors if errors.present? flash.now.alert = errors render :brouillon else if save_draft? flash.now.notice = 'Votre brouillon a bien été sauvegardé.' render :brouillon else @dossier.en_construction! NotificationMailer.send_initiated_notification(@dossier).deliver_later redirect_to merci_dossier_path(@dossier) end end end def modifier @dossier = dossier_with_champs end def update @dossier = dossier_with_champs errors = update_dossier_and_compute_errors if errors.present? flash.now.alert = errors render :modifier else redirect_to demande_dossier_path(@dossier) end end def merci @dossier = current_user.dossiers.includes(:procedure).find(params[:id]) end def create_commentaire @commentaire = CommentaireService.build(current_user, dossier, commentaire_params) if @commentaire.save flash.notice = "Votre message a bien été envoyé à l’instructeur en charge de votre dossier." redirect_to messagerie_dossier_path(dossier) else flash.now.alert = @commentaire.errors.full_messages render :messagerie end end def ask_deletion dossier = current_user.dossiers.includes(:user, procedure: :administrateurs).find(params[:id]) if dossier.can_be_deleted_by_user? dossier.delete_and_keep_track(current_user) flash.notice = 'Votre dossier a bien été supprimé.' redirect_to dossiers_path else flash.notice = "L'instruction de votre dossier a commencé, il n'est plus possible de supprimer votre dossier. Si vous souhaitez annuler l'instruction contactez votre administration par la messagerie de votre dossier." redirect_to dossier_path(dossier) end end def recherche @dossier_id = params[:dossier_id] dossier = current_user.dossiers.find_by(id: @dossier_id) if dossier redirect_to url_for_dossier(dossier) else flash.alert = "Vous n’avez pas de dossier avec le nº #{@dossier_id}." redirect_to dossiers_path end end def new erase_user_location! if params[:brouillon] procedure = Procedure.brouillon.find(params[:procedure_id]) else procedure = Procedure.publiees.find(params[:procedure_id]) end dossier = Dossier.create!(procedure: procedure, user: current_user, state: Dossier.states.fetch(:brouillon)) if dossier.procedure.for_individual if current_user.france_connect_information.present? dossier.update_with_france_connect(current_user.france_connect_information) end redirect_to identite_dossier_path(dossier) else redirect_to siret_dossier_path(id: dossier.id) end rescue ActiveRecord::RecordNotFound flash.alert = t('errors.messages.procedure_not_found') redirect_to url_for dossiers_path end def dossier_for_help dossier_id = params[:id] || params[:dossier_id] @dossier || (dossier_id.present? && Dossier.find_by(id: dossier_id.to_i)) end private def store_user_location! store_location_for(:user, request.fullpath) end def erase_user_location! clear_stored_location_for(:user) end def show_demarche_en_test_banner if @dossier.present? && @dossier.procedure.brouillon? flash.now.alert = "Ce dossier est déposé sur une démarche en test. Toute modification de la démarche par l'administrateur (ajout d'un champ, publication de la démarche...) entrainera sa suppression." end end def ensure_dossier_can_be_updated if !dossier.can_be_updated_by_user? flash.alert = 'Votre dossier ne peut plus être modifié' redirect_to dossiers_path end end def page [params[:page].to_i, 1].max end def current_tab(mes_dossiers_count, dossiers_invites_count) if dossiers_invites_count == 0 'mes-dossiers' elsif mes_dossiers_count == 0 'dossiers-invites' else params[:current_tab].presence || 'mes-dossiers' end end # FIXME: require(:dossier) when all the champs are united def champs_params params.permit(dossier: { champs_attributes: [ :id, :value, :primary_value, :secondary_value, :piece_justificative_file, value: [], champs_attributes: [:id, :_destroy, :value, :primary_value, :secondary_value, :piece_justificative_file, value: []] ] }) end def dossier @dossier ||= Dossier.find(params[:id] || params[:dossier_id]) end def dossier_with_champs Dossier.with_champs.find(params[:id]) end def update_dossier_and_compute_errors errors = [] if champs_params[:dossier] && !@dossier.update(champs_params[:dossier]) errors += @dossier.errors.full_messages end if !save_draft? errors += @dossier.check_mandatory_champs end errors end def ensure_ownership! if !current_user.owns?(dossier) forbidden! end end def ensure_ownership_or_invitation! if !current_user.owns_or_invite?(dossier) forbidden! end end def forbid_invite_submission! if passage_en_construction? && !current_user.owns?(dossier) forbidden! end end def forbid_closed_submission! if passage_en_construction? && !dossier.can_transition_to_en_construction? forbidden! end end def forbidden! flash[:alert] = "Vous n'avez pas accès à ce dossier" redirect_to root_path end def render_siret_error(error_message) flash.alert = error_message render :siret end def individual_params params.require(:individual).permit(:gender, :nom, :prenom, :birthdate) end def siret_params params.require(:user).permit(:siret) end def commentaire_params params.require(:commentaire).permit(:body, :piece_jointe) end def passage_en_construction? dossier.brouillon? && !save_draft? end def save_draft? dossier.brouillon? && params[:save_draft] end end end User/DossierController: dossier are linked to default group by default module Users class DossiersController < UserController include Devise::StoreLocationExtension include DossierHelper layout 'procedure_context', only: [:identite, :update_identite, :siret, :update_siret] ACTIONS_ALLOWED_TO_ANY_USER = [:index, :recherche, :new] ACTIONS_ALLOWED_TO_OWNER_OR_INVITE = [:show, :demande, :messagerie, :brouillon, :update_brouillon, :modifier, :update, :create_commentaire] before_action :ensure_ownership!, except: ACTIONS_ALLOWED_TO_ANY_USER + ACTIONS_ALLOWED_TO_OWNER_OR_INVITE before_action :ensure_ownership_or_invitation!, only: ACTIONS_ALLOWED_TO_OWNER_OR_INVITE before_action :ensure_dossier_can_be_updated, only: [:update_identite, :update_brouillon, :modifier, :update] before_action :forbid_invite_submission!, only: [:update_brouillon] before_action :forbid_closed_submission!, only: [:update_brouillon] before_action :show_demarche_en_test_banner before_action :store_user_location!, only: :new def index @user_dossiers = current_user.dossiers.includes(:procedure).order_by_updated_at.page(page) @dossiers_invites = current_user.dossiers_invites.includes(:procedure).order_by_updated_at.page(page) @current_tab = current_tab(@user_dossiers.count, @dossiers_invites.count) @dossiers = case @current_tab when 'mes-dossiers' @user_dossiers when 'dossiers-invites' @dossiers_invites end end def show if dossier.brouillon? redirect_to brouillon_dossier_path(dossier) end @dossier = dossier end def demande @dossier = dossier end def messagerie @dossier = dossier @commentaire = Commentaire.new end def attestation if dossier.attestation.pdf_active_storage.attached? redirect_to url_for(dossier.attestation.pdf_active_storage) else send_data(dossier.attestation.pdf.read, filename: 'attestation.pdf', type: 'application/pdf') end end def identite @dossier = dossier @user = current_user end def update_identite @dossier = dossier if @dossier.individual.update(individual_params) @dossier.update!(autorisation_donnees: true) flash.notice = "Identité enregistrée" redirect_to brouillon_dossier_path(@dossier) else flash.now.alert = @dossier.individual.errors.full_messages render :identite end end def siret @dossier = dossier end def update_siret @dossier = dossier # We use the user as the holder model object for the siret value # (so that we can restore it on the form in case of error). # # This is the only remaining use of User#siret: it could be refactored away. # However some existing users have a siret but no associated etablissement, # so we would need to analyze the legacy data and decide what to do with it. current_user.siret = siret_params[:siret] siret_model = Siret.new(siret: siret_params[:siret]) if !siret_model.valid? return render_siret_error(siret_model.errors.full_messages) end sanitized_siret = siret_model.siret begin etablissement_attributes = ApiEntrepriseService.get_etablissement_params_for_siret(sanitized_siret, @dossier.procedure.id) rescue RestClient::RequestFailed return render_siret_error(t('errors.messages.siret_network_error')) end if etablissement_attributes.blank? return render_siret_error(t('errors.messages.siret_unknown')) end etablissement = @dossier.build_etablissement(etablissement_attributes) etablissement.save! current_user.update!(siret: sanitized_siret) @dossier.update!(autorisation_donnees: true) redirect_to etablissement_dossier_path end def etablissement @dossier = dossier # Redirect if the user attempts to access the page URL directly if !@dossier.etablissement flash.alert = 'Aucun établissement n’est associé à ce dossier' return redirect_to siret_dossier_path(@dossier) end end def brouillon @dossier = dossier_with_champs # TODO: remove when the champs are unifed if !@dossier.autorisation_donnees if dossier.procedure.for_individual redirect_to identite_dossier_path(@dossier) else redirect_to siret_dossier_path(@dossier) end end end # FIXME: # - delegate draft save logic to champ ? def update_brouillon @dossier = dossier_with_champs errors = update_dossier_and_compute_errors if errors.present? flash.now.alert = errors render :brouillon else if save_draft? flash.now.notice = 'Votre brouillon a bien été sauvegardé.' render :brouillon else @dossier.en_construction! NotificationMailer.send_initiated_notification(@dossier).deliver_later redirect_to merci_dossier_path(@dossier) end end end def modifier @dossier = dossier_with_champs end def update @dossier = dossier_with_champs errors = update_dossier_and_compute_errors if errors.present? flash.now.alert = errors render :modifier else redirect_to demande_dossier_path(@dossier) end end def merci @dossier = current_user.dossiers.includes(:procedure).find(params[:id]) end def create_commentaire @commentaire = CommentaireService.build(current_user, dossier, commentaire_params) if @commentaire.save flash.notice = "Votre message a bien été envoyé à l’instructeur en charge de votre dossier." redirect_to messagerie_dossier_path(dossier) else flash.now.alert = @commentaire.errors.full_messages render :messagerie end end def ask_deletion dossier = current_user.dossiers.includes(:user, procedure: :administrateurs).find(params[:id]) if dossier.can_be_deleted_by_user? dossier.delete_and_keep_track(current_user) flash.notice = 'Votre dossier a bien été supprimé.' redirect_to dossiers_path else flash.notice = "L'instruction de votre dossier a commencé, il n'est plus possible de supprimer votre dossier. Si vous souhaitez annuler l'instruction contactez votre administration par la messagerie de votre dossier." redirect_to dossier_path(dossier) end end def recherche @dossier_id = params[:dossier_id] dossier = current_user.dossiers.find_by(id: @dossier_id) if dossier redirect_to url_for_dossier(dossier) else flash.alert = "Vous n’avez pas de dossier avec le nº #{@dossier_id}." redirect_to dossiers_path end end def new erase_user_location! if params[:brouillon] procedure = Procedure.brouillon.find(params[:procedure_id]) else procedure = Procedure.publiees.find(params[:procedure_id]) end dossier = Dossier.create!(groupe_instructeur: procedure.defaut_groupe_instructeur, user: current_user, state: Dossier.states.fetch(:brouillon)) if dossier.procedure.for_individual if current_user.france_connect_information.present? dossier.update_with_france_connect(current_user.france_connect_information) end redirect_to identite_dossier_path(dossier) else redirect_to siret_dossier_path(id: dossier.id) end rescue ActiveRecord::RecordNotFound flash.alert = t('errors.messages.procedure_not_found') redirect_to url_for dossiers_path end def dossier_for_help dossier_id = params[:id] || params[:dossier_id] @dossier || (dossier_id.present? && Dossier.find_by(id: dossier_id.to_i)) end private def store_user_location! store_location_for(:user, request.fullpath) end def erase_user_location! clear_stored_location_for(:user) end def show_demarche_en_test_banner if @dossier.present? && @dossier.procedure.brouillon? flash.now.alert = "Ce dossier est déposé sur une démarche en test. Toute modification de la démarche par l'administrateur (ajout d'un champ, publication de la démarche...) entrainera sa suppression." end end def ensure_dossier_can_be_updated if !dossier.can_be_updated_by_user? flash.alert = 'Votre dossier ne peut plus être modifié' redirect_to dossiers_path end end def page [params[:page].to_i, 1].max end def current_tab(mes_dossiers_count, dossiers_invites_count) if dossiers_invites_count == 0 'mes-dossiers' elsif mes_dossiers_count == 0 'dossiers-invites' else params[:current_tab].presence || 'mes-dossiers' end end # FIXME: require(:dossier) when all the champs are united def champs_params params.permit(dossier: { champs_attributes: [ :id, :value, :primary_value, :secondary_value, :piece_justificative_file, value: [], champs_attributes: [:id, :_destroy, :value, :primary_value, :secondary_value, :piece_justificative_file, value: []] ] }) end def dossier @dossier ||= Dossier.find(params[:id] || params[:dossier_id]) end def dossier_with_champs Dossier.with_champs.find(params[:id]) end def update_dossier_and_compute_errors errors = [] if champs_params[:dossier] && !@dossier.update(champs_params[:dossier]) errors += @dossier.errors.full_messages end if !save_draft? errors += @dossier.check_mandatory_champs end errors end def ensure_ownership! if !current_user.owns?(dossier) forbidden! end end def ensure_ownership_or_invitation! if !current_user.owns_or_invite?(dossier) forbidden! end end def forbid_invite_submission! if passage_en_construction? && !current_user.owns?(dossier) forbidden! end end def forbid_closed_submission! if passage_en_construction? && !dossier.can_transition_to_en_construction? forbidden! end end def forbidden! flash[:alert] = "Vous n'avez pas accès à ce dossier" redirect_to root_path end def render_siret_error(error_message) flash.alert = error_message render :siret end def individual_params params.require(:individual).permit(:gender, :nom, :prenom, :birthdate) end def siret_params params.require(:user).permit(:siret) end def commentaire_params params.require(:commentaire).permit(:body, :piece_jointe) end def passage_en_construction? dossier.brouillon? && !save_draft? end def save_draft? dossier.brouillon? && params[:save_draft] end end end
class V1::LegislatorsController < V1::BaseController def index render json: Legislator.with_includes .order(:first_name, :last_name) .to_json( methods: [:name, :title, :state_name, :eligible], only: [:id, :with_us]) end def show legislator = Legislator.with_includes.find_by_bioguide_id(params[:id]) render json: legislator .to_json( methods: [:name, :title, :state_name, :eligible, :image_url, :state_abbrev], only: [:with_us, :party]) end def targeted render json: Legislator.with_includes.targeted end end end Legislators: add fake map API endpoint class V1::LegislatorsController < V1::BaseController def index render json: Legislator.with_includes .order(:first_name, :last_name) .to_json( methods: [:name, :title, :state_name, :eligible], only: [:id, :with_us]) end def show legislator = Legislator.with_includes.find_by_bioguide_id(params[:id]) render json: legislator .to_json( methods: [:name, :title, :state_name, :eligible, :image_url, :state_abbrev], only: [:with_us, :party]) end def targeted render json: Legislator.with_includes.targeted end def supporters # coordinates = (0..40).to_a.map{|x| (0..20).to_a.map{|y| [x,y]} }.flatten(1) coordinates = {"WA-02"=>[0, 0], "WA-01"=>[2, 0], "WA-SENIOR"=>[3, 0], "WA-05"=>[4, 0], "WA-06"=>[0, 1], "WA-07"=>[1, 1], "WA-09"=>[2, 1], "WA-08"=>[3, 1], "WA-JUNIOR"=>[4, 1], "ID-01"=>[5, 1], "OR-01"=>[0, 2], "WA-10"=>[1, 2], "WA-03"=>[2, 2], "WA-04"=>[3, 2], "OR-SENIOR"=>[4, 2], "ID-JUNIOR"=>[5, 2], "MT-00"=>[6, 2], "MT-JUNIOR"=>[7, 2], "OR-04"=>[0, 3], "OR-03"=>[1, 3], "OR-05"=>[2, 3], "OR-JUNIOR"=>[3, 3], "OR-02"=>[4, 3], "ID-SENIOR"=>[5, 3], "ID-02"=>[6, 3], "MT-SENIOR"=>[7, 3], "WY-00"=>[8, 3], "CA-05"=>[0, 4], "CA-03"=>[1, 4], "CA-02"=>[2, 4], "CA-01"=>[3, 4], "WY-SENIOR"=>[7, 4], "WY-JUNIOR"=>[8, 4], "CA-06"=>[0, 5], "CA-07"=>[1, 5], "CA-09"=>[2, 5], "CA-04"=>[3, 5], "CA-12"=>[0, 6], "CA-11"=>[1, 6], "CA-10"=>[2, 6], "CA-08"=>[3, 6], "CA-13"=>[0, 7], "CA-14"=>[1, 7], "CA-15"=>[2, 7], "CA-16"=>[3, 7], "ND-00"=>[11, 7], "ND-JUNIOR"=>[12, 7], "ND-SENIOR"=>[13, 7], "TN-08"=>[24, 7], "TN-06"=>[25, 7], "TN-03"=>[26, 7], "TN-02"=>[27, 7], "TN-01"=>[28, 7], "NC-05"=>[29, 7], "NC-06"=>[30, 7], "NC-01"=>[31, 7], "CA-17"=>[0, 8], "CA-18"=>[1, 8], "CA-19"=>[2, 8], "CA-20"=>[3, 8], "NV-02"=>[5, 8], "NV-04"=>[6, 8], "UT-01"=>[7, 8], "SD-00"=>[11, 8], "SD-JUNIOR"=>[12, 8], "SD-SENIOR"=>[13, 8], "TN-SENIOR"=>[24, 8], "TN-05"=>[25, 8], "TN-04"=>[26, 8], "NC-SENIOR"=>[27, 8], "NC-09"=>[28, 8], "NC-12"=>[29, 8], "NC-04"=>[30, 8], "NC-13"=>[31, 8], "CA-21"=>[0, 9], "CA-22"=>[1, 9], "CA-23"=>[2, 9], "CA-24"=>[3, 9], "NV-JUNIOR"=>[5, 9], "NV-SENIOR"=>[6, 9], "UT-SENIOR"=>[7, 9], "NE-03"=>[11, 9], "NE-01"=>[12, 9], "NE-02"=>[13, 9], "AR-03"=>[20, 9], "AR-JUNIOR"=>[21, 9], "AR-01"=>[22, 9], "TN-09"=>[23, 9], "TN-JUNIOR"=>[24, 9], "TN-07"=>[25, 9], "NC-11"=>[26, 9], "NC-10"=>[27, 9], "NC-JUNIOR"=>[28, 9], "NC-08"=>[29, 9], "NC-02"=>[30, 9], "NC-07"=>[31, 9], "CA-25"=>[0, 10], "CA-26"=>[1, 10], "CA-27"=>[2, 10], "CA-28"=>[3, 10], "CA-SENIOR"=>[4, 10], "NV-01"=>[6, 10], "UT-04"=>[7, 10], "UT-JUNIOR"=>[8, 10], "CO-02"=>[9, 10], "CO-01"=>[10, 10], "CO-07"=>[11, 10], "NE-SENIOR"=>[12, 10], "NE-JUNIOR"=>[13, 10], "TX-13"=>[15, 10], "TX-JUNIOR"=>[16, 10], "AR-04"=>[20, 10], "AR-02"=>[21, 10], "GA-14"=>[26, 10], "GA-11"=>[27, 10], "SC-03"=>[28, 10], "SC-04"=>[29, 10], "SC-05"=>[30, 10], "SC-07"=>[31, 10], "CA-JUNIOR"=>[1, 11], "CA-29"=>[2, 11], "CA-30"=>[3, 11], "CA-31"=>[4, 11], "CA-32"=>[5, 11], "NV-03"=>[6, 11], "UT-02"=>[7, 11], "UT-03"=>[8, 11], "CO-JUNIOR"=>[9, 11], "CO-SENIOR"=>[10, 11], "CO-06"=>[11, 11], "KS-01"=>[12, 11], "KS-02"=>[13, 11], "KS-03"=>[14, 11], "TX-19"=>[15, 11], "TX-SENIOR"=>[16, 11], "AR-SENIOR"=>[20, 11], "AL-04"=>[24, 11], "AL-05"=>[25, 11], "GA-13"=>[26, 11], "GA-06"=>[27, 11], "GA-09"=>[28, 11], "SC-02"=>[29, 11], "SC-SENIOR"=>[30, 11], "CA-33"=>[1, 12], "CA-34"=>[2, 12], "CA-35"=>[3, 12], "CA-37"=>[4, 12], "CA-38"=>[5, 12], "CA-39"=>[6, 12], "CO-03"=>[9, 12], "CO-05"=>[10, 12], "CO-04"=>[11, 12], "KS-SENIOR"=>[12, 12], "KS-04"=>[13, 12], "KS-JUNIOR"=>[14, 12], "TX-11"=>[15, 12], "TX-04"=>[16, 12], "MS-01"=>[23, 12], "AL-JUNIOR"=>[24, 12], "AL-SENIOR"=>[25, 12], "GA-05"=>[26, 12], "GA-04"=>[27, 12], "GA-07"=>[28, 12], "SC-06"=>[29, 12], "SC-01"=>[30, 12], "CA-40"=>[2, 13], "CA-43"=>[3, 13], "CA-44"=>[4, 13], "CA-45"=>[5, 13], "CA-46"=>[6, 13], "AZ-04"=>[7, 13], "AZ-06"=>[8, 13], "AZ-01"=>[9, 13], "NM-03"=>[10, 13], "NM-JUNIOR"=>[11, 13], "TX-16"=>[12, 13], "TX-23"=>[13, 13], "TX-12"=>[14, 13], "TX-26"=>[15, 13], "TX-03"=>[16, 13], "TX-01"=>[17, 13], "TX-36"=>[18, 13], "TX-14"=>[19, 13], "LA-04"=>[20, 13], "LA-05"=>[21, 13], "MS-02"=>[22, 13], "MS-JUNIOR"=>[23, 13], "AL-06"=>[24, 13], "AL-03"=>[25, 13], "GA-03"=>[26, 13], "GA-JUNIOR"=>[27, 13], "GA-SENIOR"=>[28, 13], "GA-10"=>[29, 13], "SC-JUNIOR"=>[30, 13], "CA-47"=>[3, 14], "CA-48"=>[4, 14], "CA-42"=>[5, 14], "CA-41"=>[6, 14], "AZ-08"=>[7, 14], "AZ-05"=>[8, 14], "AZ-JUNIOR"=>[9, 14], "NM-01"=>[10, 14], "NM-SENIOR"=>[11, 14], "TX-20"=>[12, 14], "TX-25"=>[13, 14], "TX-30"=>[14, 14], "TX-32"=>[15, 14], "TX-17"=>[16, 14], "TX-05"=>[17, 14], "TX-29"=>[18, 14], "TX-07"=>[19, 14], "LA-03"=>[20, 14], "LA-06"=>[21, 14], "MS-03"=>[22, 14], "MS-SENIOR"=>[23, 14], "AL-07"=>[24, 14], "AL-02"=>[25, 14], "GA-02"=>[26, 14], "GA-08"=>[27, 14], "GA-01"=>[28, 14], "GA-12"=>[29, 14], "CA-53"=>[4, 15], "CA-49"=>[5, 15], "CA-36"=>[6, 15], "AZ-07"=>[7, 15], "AZ-09"=>[8, 15], "AZ-SENIOR"=>[9, 15], "NM-02"=>[10, 15], "TX-28"=>[13, 15], "TX-22"=>[14, 15], "TX-06"=>[15, 15], "TX-33"=>[16, 15], "TX-18"=>[17, 15], "TX-27"=>[18, 15], "TX-02"=>[19, 15], "LA-JUNIOR"=>[20, 15], "LA-02"=>[21, 15], "LA-01"=>[22, 15], "MS-04"=>[23, 15], "AL-01"=>[24, 15], "FL-01"=>[25, 15], "FL-02"=>[26, 15], "FL-04"=>[27, 15], "FL-05"=>[28, 15], "FL-06"=>[29, 15], "FL-07"=>[30, 15], "CA-52"=>[4, 16], "CA-50"=>[5, 16], "CA-51"=>[6, 16], "AZ-03"=>[8, 16], "AZ-02"=>[9, 16], "TX-21"=>[13, 16], "TX-31"=>[15, 16], "TX-10"=>[16, 16], "TX-08"=>[17, 16], "TX-09"=>[18, 16], "LA-SENIOR"=>[22, 16], "FL-03"=>[26, 16], "FL-SENIOR"=>[28, 16], "FL-11"=>[29, 16], "FL-JUNIOR"=>[30, 16], "AK-00"=>[0, 17], "AK-JUNIOR"=>[1, 17], "TX-24"=>[13, 17], "TX-35"=>[16, 17], "TX-15"=>[17, 17], "FL-12"=>[28, 17], "FL-10"=>[29, 17], "FL-09"=>[30, 17], "FL-08"=>[31, 17], "AK-SENIOR"=>[1, 18], "TX-34"=>[16, 18], "FL-13"=>[28, 18], "FL-14"=>[29, 18], "FL-15"=>[30, 18], "FL-16"=>[31, 18], "HI-01"=>[4, 19], "FL-17"=>[29, 19], "FL-18"=>[30, 19], "FL-19"=>[31, 19], "HI-SENIOR"=>[5, 20], "HI-JUNIOR"=>[6, 20], "FL-20"=>[29, 20], "FL-21"=>[30, 20], "FL-22"=>[31, 20], "HI-02"=>[7, 21], "FL-23"=>[29, 21], "FL-24"=>[30, 21], "FL-25"=>[31, 21], "FL-26"=>[30, 22], "FL-27"=>[31, 22]} bill_ids = %w[hr20-114 hr424-114] support_levels = %w[cosponsored pledged] + ['',''] output = [] Legislator.with_includes.all.each_with_index do |l,i| next unless coordinates.include?(l.map_key) sponsorships = bill_ids.each_with_object({}){|b,h| h[b] = support_levels.sample} support_max = sponsorships.values.reject{|ob| ob.empty?}.sort[0] targeted = support_max == 'cosponsored' ? false : [true, false, false, false, false].sample output << { map_key: l.map_key, coordinates: coordinates[l.map_key], legislator: {name: l.name, title: l.title, party: l.party[0], support_max: support_max, targeted: targeted, image_url: l.image_url, sponsorships: sponsorships} } end end end
module Sprangular module ApplicationHelper def payment_methods Spree::PaymentMethod.available(:front_end).map do |method| { id: method.id, name: method.name } end end def js_environment config = ::Spree::Config store = Spree::Store.current templates = Hash[ Rails.application.assets.each_logical_path. select { |file| file.end_with?('html') }. map do |file| path = digest_assets? ? File.join('/assets', Rails.application.assets[file].digest_path) : asset_path(file) [file, path] end ] { env: Rails.env, config: { site_name: store.seo_title || store.name, logo: asset_path(config.logo), locale: I18n.locale, currency: Money::Currency.table[current_currency.downcase.to_sym], supported_locales: supported_locales, default_country_id: config.default_country_id, payment_methods: payment_methods, image_sizes: Spree::Image.attachment_definitions[:attachment][:styles].keys, product_page_size: Spree::Config.products_per_page }, translations: current_translations[:sprangular], templates: templates } end def supported_locales if Object.const_defined?('SpreeI18n') SpreeI18n::Config.supported_locales else [:en] end end def current_translations @translations ||= I18n.backend.send(:translations) @translations[I18n.locale].with_indifferent_access end def cached_templates Sprangular::Engine.config.cached_paths.inject({}) do |files, dir| cached_templates_for_dir(files, dir) end end def cached_templates_for_dir(files, dir) root = Sprangular::Engine.root files = Dir[root + "app/assets/templates/#{dir}/**"].inject(files) do |hash, path| asset_path = asset_path path.gsub(root.to_s + "/app/assets/templates/", "") local_path = "app/assets/templates/#{I18n.locale}/" + asset_path hash[asset_path.gsub(/.slim$/, '')] = Tilt.new(path).render.html_safe if !File.exists?(local_path) hash end Dir["app/assets/templates/#{dir}/**"].inject(files) do |hash, path| sprockets_path = path.gsub("app/assets/templates/", "") asset_path = asset_path(sprockets_path). gsub(/^\/app\/assets\/templates/, '/assets'). gsub(/.slim$/, '') hash[asset_path] = Rails.application.assets.find_asset(sprockets_path).body.html_safe hash end end end end Don't show additional locales if they are not avaliable. module Sprangular module ApplicationHelper def payment_methods Spree::PaymentMethod.available(:front_end).map do |method| { id: method.id, name: method.name } end end def js_environment config = ::Spree::Config store = Spree::Store.current templates = Hash[ Rails.application.assets.each_logical_path. select { |file| file.end_with?('html') }. map do |file| path = digest_assets? ? File.join('/assets', Rails.application.assets[file].digest_path) : asset_path(file) [file, path] end ] { env: Rails.env, config: { site_name: store.seo_title || store.name, logo: asset_path(config.logo), locale: I18n.locale, currency: Money::Currency.table[current_currency.downcase.to_sym], supported_locales: supported_locales, default_country_id: config.default_country_id, payment_methods: payment_methods, image_sizes: Spree::Image.attachment_definitions[:attachment][:styles].keys, product_page_size: Spree::Config.products_per_page }, translations: current_translations[:sprangular], templates: templates } end def supported_locales if Object.const_defined?('SpreeI18n') SpreeI18n::Config.supported_locales else # Use Default locale, and do not provide a front end selector. [] end end def current_translations @translations ||= I18n.backend.send(:translations) @translations[I18n.locale].with_indifferent_access end def cached_templates Sprangular::Engine.config.cached_paths.inject({}) do |files, dir| cached_templates_for_dir(files, dir) end end def cached_templates_for_dir(files, dir) root = Sprangular::Engine.root files = Dir[root + "app/assets/templates/#{dir}/**"].inject(files) do |hash, path| asset_path = asset_path path.gsub(root.to_s + "/app/assets/templates/", "") local_path = "app/assets/templates/#{I18n.locale}/" + asset_path hash[asset_path.gsub(/.slim$/, '')] = Tilt.new(path).render.html_safe if !File.exists?(local_path) hash end Dir["app/assets/templates/#{dir}/**"].inject(files) do |hash, path| sprockets_path = path.gsub("app/assets/templates/", "") asset_path = asset_path(sprockets_path). gsub(/^\/app\/assets\/templates/, '/assets'). gsub(/.slim$/, '') hash[asset_path] = Rails.application.assets.find_asset(sprockets_path).body.html_safe hash end end end end
module Sprangular module ApplicationHelper def payment_methods Spree::PaymentMethod.available(:front_end).map do |method| { id: method.id, name: method.name } end end def js_environment config = ::Spree::Config store = Spree::Store.current templates = Hash[ Rails.application.assets.each_logical_path. select { |file| file.end_with?('html') }. map do |file| path = digest_assets? ? File.join('/assets', Rails.application.assets[file].digest_path) : asset_path(file) [file, path] end ] { env: Rails.env, config: { site_name: store.seo_title || store.name, logo: asset_path(config.logo), locale: I18n.locale, currency: Money::Currency.table[current_currency.downcase.to_sym], supported_locales: supported_locales, default_country_id: config.default_country_id, payment_methods: payment_methods, image_sizes: Spree::Image.attachment_definitions[:attachment][:styles].keys, product_page_size: Spree::Config.products_per_page }, translations: current_sprangular_translations, templates: templates } end def supported_locales if Object.const_defined?("SpreeI18n") SpreeI18n::Config.supported_locales else # Use Default locale, and do not provide a front end selector. [] end end ## # Get relevant translations for front end. For both a simple, and # "Chainable" i18n Backend, which is used by spree i18n. def current_sprangular_translations if I18n.backend.class == I18n::Backend::Simple @translations ||= I18n.backend.send(:translations) else @translations ||= I18n.backend.backends.last.send(:translations) end # Return only sprangular keys for js environment @translations[I18n.locale][:sprangular] end def cached_templates Sprangular::Engine.config.cached_paths.inject({}) do |files, dir| cached_templates_for_dir(files, dir) end end def cached_templates_for_dir(files, dir) root = Sprangular::Engine.root files = Dir[root + "app/assets/templates/#{dir}/**"].inject(files) do |hash, path| asset_path = asset_path path.gsub(root.to_s + "/app/assets/templates/", "") local_path = "app/assets/templates/" + asset_path hash[asset_path.gsub(/.slim$/, '')] = Tilt.new(path).render.html_safe if !File.exists?(local_path) hash end Dir["app/assets/templates/#{dir}/**"].inject(files) do |hash, path| sprockets_path = path.gsub("app/assets/templates/", "") asset_path = asset_path(sprockets_path). gsub(/^\/app\/assets\/templates/, '/assets'). gsub(/.slim$/, '') hash[asset_path] = Rails.application.assets.find_asset(sprockets_path).body.html_safe hash end end end end Ensure i18n backend has loaded translations module Sprangular module ApplicationHelper def payment_methods Spree::PaymentMethod.available(:front_end).map do |method| { id: method.id, name: method.name } end end def js_environment config = ::Spree::Config store = Spree::Store.current templates = Hash[ Rails.application.assets.each_logical_path. select { |file| file.end_with?('html') }. map do |file| path = digest_assets? ? File.join('/assets', Rails.application.assets[file].digest_path) : asset_path(file) [file, path] end ] { env: Rails.env, config: { site_name: store.seo_title || store.name, logo: asset_path(config.logo), locale: I18n.locale, currency: Money::Currency.table[current_currency.downcase.to_sym], supported_locales: supported_locales, default_country_id: config.default_country_id, payment_methods: payment_methods, image_sizes: Spree::Image.attachment_definitions[:attachment][:styles].keys, product_page_size: Spree::Config.products_per_page }, translations: current_translations, templates: templates } end def supported_locales if Object.const_defined?("SpreeI18n") SpreeI18n::Config.supported_locales else # Use Default locale, and do not provide a front end selector. [] end end ## # Get relevant translations for front end. For both a simple, and # "Chainable" i18n Backend, which is used by spree i18n. def current_translations I18n.backend.load_translations if I18n.backend.class == I18n::Backend::Simple @translations ||= I18n.backend.send(:translations) else @translations ||= I18n.backend.backends.last.send(:translations) end # Return only sprangular keys for js environment @translations[I18n.locale][:sprangular] end def cached_templates Sprangular::Engine.config.cached_paths.inject({}) do |files, dir| cached_templates_for_dir(files, dir) end end def cached_templates_for_dir(files, dir) root = Sprangular::Engine.root files = Dir[root + "app/assets/templates/#{dir}/**"].inject(files) do |hash, path| asset_path = asset_path path.gsub(root.to_s + "/app/assets/templates/", "") local_path = "app/assets/templates/" + asset_path hash[asset_path.gsub(/.slim$/, '')] = Tilt.new(path).render.html_safe if !File.exists?(local_path) hash end Dir["app/assets/templates/#{dir}/**"].inject(files) do |hash, path| sprockets_path = path.gsub("app/assets/templates/", "") asset_path = asset_path(sprockets_path). gsub(/^\/app\/assets\/templates/, '/assets'). gsub(/.slim$/, '') hash[asset_path] = Rails.application.assets.find_asset(sprockets_path).body.html_safe hash end end end end
class UpdateCocoaPodFromGithubJob def run github_updater = GithubUpdater.new CocoaPod.all.each do |cocoa_pod| puts "sync github #{cocoa_pod.name}" github_updater.update cocoa_pod end end end Use batch find in job. class UpdateCocoaPodFromGithubJob def run github_updater = GithubUpdater.new CocoaPod.all.find_each do |cocoa_pod| puts "sync github #{cocoa_pod.name}" github_updater.update cocoa_pod end end end
amqp requestor module Amqp class UriDereferenceRequestor < Requestor end end
class ChargebackRateDetailMeasure < ApplicationRecord serialize :units, Array serialize :units_display, Array validates :name, :presence => true, :length => {:maximum => 100} validates :step, :presence => true, :numericality => {:greater_than => 0} validates :units, :presence => true, :length => {:minimum => 2} validates :units_display, :presence => true, :length => {:minimum => 2} validate :units_same_length def measures Hash[units_display.zip(units)] end def adjust(from_unit, to_unit) return 1 if from_unit == to_unit jumps = units.index(to_unit) - units.index(from_unit) BigDecimal.new(step)**jumps end private def units_same_length unless (units.count == units_display.count) errors.add("Units Problem", "Units_display length diferent that the units length") end end def self.seed fixture_file_measure = File.join(FIXTURE_DIR, "chargeback_rates_measures.yml") if File.exist?(fixture_file_measure) fixture = YAML.load_file(fixture_file_measure) fixture.each do |cbr| rec = ChargebackRateDetailMeasure.find_by(:name => cbr[:name]) if rec.nil? _log.info("Creating [#{cbr[:name]}] with units=[#{cbr[:units]}]") rec = ChargebackRateDetailMeasure.create(cbr) else fixture_mtime = File.mtime(fixture_file_measure).utc if fixture_mtime > rec.created_at _log.info("Updating [#{cbr[:name]}] with units=[#{cbr[:units]}]") rec.update!(cbr.merge(:created_at => fixture_mtime)) end end end end end end BigDecimal.new deprecated in 1.3.3 which is bundled with Ruby 2.5.0.rc1 class ChargebackRateDetailMeasure < ApplicationRecord serialize :units, Array serialize :units_display, Array validates :name, :presence => true, :length => {:maximum => 100} validates :step, :presence => true, :numericality => {:greater_than => 0} validates :units, :presence => true, :length => {:minimum => 2} validates :units_display, :presence => true, :length => {:minimum => 2} validate :units_same_length def measures Hash[units_display.zip(units)] end def adjust(from_unit, to_unit) return 1 if from_unit == to_unit jumps = units.index(to_unit) - units.index(from_unit) BigDecimal(step)**jumps end private def units_same_length unless (units.count == units_display.count) errors.add("Units Problem", "Units_display length diferent that the units length") end end def self.seed fixture_file_measure = File.join(FIXTURE_DIR, "chargeback_rates_measures.yml") if File.exist?(fixture_file_measure) fixture = YAML.load_file(fixture_file_measure) fixture.each do |cbr| rec = ChargebackRateDetailMeasure.find_by(:name => cbr[:name]) if rec.nil? _log.info("Creating [#{cbr[:name]}] with units=[#{cbr[:units]}]") rec = ChargebackRateDetailMeasure.create(cbr) else fixture_mtime = File.mtime(fixture_file_measure).utc if fixture_mtime > rec.created_at _log.info("Updating [#{cbr[:name]}] with units=[#{cbr[:units]}]") rec.update!(cbr.merge(:created_at => fixture_mtime)) end end end end end end
class ChecklistSourceDataCriteria include Mongoid::Document include Mongoid::Attributes::Dynamic include Mongoid::Timestamps embedded_in :checklist_test field :measure_id, type: String field :source_data_criteria, type: String # this is the name of the source_data_criteria field :recorded_result, type: String field :code, type: String field :attribute_code, type: String field :passed_qrda, type: Boolean field :code_complete, type: Boolean field :attribute_complete, type: Boolean field :result_complete, type: Boolean def validate_criteria self.passed_qrda = false result_completed? attribute_code_matches_valueset? code_matches_valueset? end def checklist_complete? if code.blank? && attribute_code.blank? && recorded_result.blank? nil else code_complete != false && attribute_complete != false && result_complete != false end end def complete? checklist_complete? && passed_qrda end def result_completed? if recorded_result self.result_complete = recorded_result == '' ? false : true end end def attribute_code_matches_valueset? # validate if an attribute_code is required and is correct if attribute_code measure = Measure.find_by(_id: measure_id) criteria = measure.hqmf_document[:data_criteria].select { |key| key == source_data_criteria }.values.first valueset = if criteria[:field_values] [criteria[:field_values].values[0].code_list_id] elsif criteria[:value] [criteria[:value].code_list_id] else [criteria.negation_code_list_id] end self.attribute_complete = code_in_valuesets(valueset, attribute_code) end end def code_matches_valueset? # validate if an code is required and is correct if code valuesets = get_all_valuesets_for_dc(measure_id) self.code_complete = code_in_valuesets(valuesets, code) end end def printable_name measure = Measure.find_by(_id: measure_id) sdc = measure.hqmf_document[:data_criteria].select { |key, value| key == source_data_criteria }.values.first sdc['status'] ? "#{measure.cms_id} - #{sdc['definition']}, #{sdc['status']}" : "#{measure.cms_id} - #{sdc['definition']}" end # goes through all data criteria in a measure to find valuesets that have the same type, status and field values def get_all_valuesets_for_dc(measure_id) measure = Measure.find_by(_id: measure_id) criteria = measure.hqmf_document[:data_criteria].select { |key| key == source_data_criteria }.values.first arr = [] # if criteria is a characteristic, only return a single valueset if criteria['type'] == 'characteristic' arr << criteria.code_list_id else valuesets = measure.all_data_criteria.map { |data_criteria| include_valueset(data_criteria, criteria) } valuesets.uniq.each do |valueset| arr << valueset unless valueset.nil? end end arr end # data_criteria is from the measure defintion, criteria is for the specific checklist test def include_valueset(data_criteria, criteria) include_vset = false if data_criteria.type.to_s == criteria['type'] && data_criteria.status == criteria['status'] # value set should not be included if there is a negation, and the negation doesn't match return nil if criteria.negation && criteria.negation_code_list_id != data_criteria.negation_code_list_id # if the criteria has a field_value, check it is the same as the data_criteria, else return true include_vset = criteria['field_values'] ? compare_field_values(data_criteria, criteria) : true end data_criteria.code_list_id if include_vset end # data_criteria is from the measure defintion, criteria is for the specific checklist test def compare_field_values(data_criteria, criteria) include_vset = false if data_criteria.field_values && criteria['field_values'].keys[0] == data_criteria.field_values.keys[0] if data_criteria.field_values.values[0].type == 'CD' if data_criteria.field_values.values[0].code_list_id == criteria['field_values'].values[0]['code_list_id'] include_vset = true end else include_vset = true end end include_vset end # searches an array of valuesets for a code def code_in_valuesets(valuesets, input_code) !HealthDataStandards::SVS::ValueSet.where('concepts.code' => input_code).in(oid: valuesets).empty? end end code/valueset check in manual entry form should also take bundle into account class ChecklistSourceDataCriteria include Mongoid::Document include Mongoid::Attributes::Dynamic include Mongoid::Timestamps embedded_in :checklist_test field :measure_id, type: String field :source_data_criteria, type: String # this is the name of the source_data_criteria field :recorded_result, type: String field :code, type: String field :attribute_code, type: String field :passed_qrda, type: Boolean field :code_complete, type: Boolean field :attribute_complete, type: Boolean field :result_complete, type: Boolean def validate_criteria self.passed_qrda = false result_completed? attribute_code_matches_valueset? code_matches_valueset? end def checklist_complete? if code.blank? && attribute_code.blank? && recorded_result.blank? nil else code_complete != false && attribute_complete != false && result_complete != false end end def complete? checklist_complete? && passed_qrda end def result_completed? if recorded_result self.result_complete = recorded_result == '' ? false : true end end def attribute_code_matches_valueset? # validate if an attribute_code is required and is correct if attribute_code measure = Measure.find_by(_id: measure_id) criteria = measure.hqmf_document[:data_criteria].select { |key| key == source_data_criteria }.values.first valueset = if criteria[:field_values] [criteria[:field_values].values[0].code_list_id] elsif criteria[:value] [criteria[:value].code_list_id] else [criteria.negation_code_list_id] end self.attribute_complete = code_in_valuesets(valueset, attribute_code, measure.bundle_id) end end def code_matches_valueset? # validate if an code is required and is correct if code valuesets = get_all_valuesets_for_dc(measure_id) self.code_complete = code_in_valuesets(valuesets, code, Measure.find_by(_id: measure_id).bundle_id) end end def printable_name measure = Measure.find_by(_id: measure_id) sdc = measure.hqmf_document[:data_criteria].select { |key, value| key == source_data_criteria }.values.first sdc['status'] ? "#{measure.cms_id} - #{sdc['definition']}, #{sdc['status']}" : "#{measure.cms_id} - #{sdc['definition']}" end # goes through all data criteria in a measure to find valuesets that have the same type, status and field values def get_all_valuesets_for_dc(measure_id) measure = Measure.find_by(_id: measure_id) criteria = measure.hqmf_document[:data_criteria].select { |key| key == source_data_criteria }.values.first arr = [] # if criteria is a characteristic, only return a single valueset if criteria['type'] == 'characteristic' arr << criteria.code_list_id else valuesets = measure.all_data_criteria.map { |data_criteria| include_valueset(data_criteria, criteria) } valuesets.uniq.each do |valueset| arr << valueset unless valueset.nil? end end arr end # data_criteria is from the measure defintion, criteria is for the specific checklist test def include_valueset(data_criteria, criteria) include_vset = false if data_criteria.type.to_s == criteria['type'] && data_criteria.status == criteria['status'] # value set should not be included if there is a negation, and the negation doesn't match return nil if criteria.negation && criteria.negation_code_list_id != data_criteria.negation_code_list_id # if the criteria has a field_value, check it is the same as the data_criteria, else return true include_vset = criteria['field_values'] ? compare_field_values(data_criteria, criteria) : true end data_criteria.code_list_id if include_vset end # data_criteria is from the measure defintion, criteria is for the specific checklist test def compare_field_values(data_criteria, criteria) include_vset = false if data_criteria.field_values && criteria['field_values'].keys[0] == data_criteria.field_values.keys[0] if data_criteria.field_values.values[0].type == 'CD' if data_criteria.field_values.values[0].code_list_id == criteria['field_values'].values[0]['code_list_id'] include_vset = true end else include_vset = true end end include_vset end # searches an array of valuesets for a code def code_in_valuesets(valuesets, input_code, bundle_id) !HealthDataStandards::SVS::ValueSet.where('concepts.code' => input_code).in(oid: valuesets, bundle_id: bundle_id).empty? end end
module Setup module NamespaceNamed extend ActiveSupport::Concern include DynamicValidators include CustomTitle included do field :namespace, type: String field :name, type: String validates_presence_of :name validates_uniqueness_of :name, scope: :namespace before_validation do self.namespace = if namespace.nil? '' else namespace.strip end self.name = name.to_s.strip # unless ::User.super_access? # errors.add(:namespace, 'is reserved') if Cenit.reserved_namespaces.include?(namespace.downcase) # end TODO Implements reserved namespaces errors.blank? end after_save do Setup::Optimizer.regist_ns(namespace) end end def scope_title namespace end def ns_slug namespace_ns.slug end def namespace_ns if @namespace_ns.nil? || @namespace_ns.name != namespace @namespace_ns = Setup::Namespace.find_or_create_by(name: namespace) end @namespace_ns end def namespace_ns=(namespace_ns) @namespace_ns = namespace_ns self.namespace = namespace_ns.name if namespace != namespace_ns.name end module ClassMethods def namespace_enum (Setup::Namespace.all.collect(&:name) + all.distinct(:namespace).flatten).uniq.sort end end end end Update | Default namespace label for build-in types module Setup module NamespaceNamed extend ActiveSupport::Concern include DynamicValidators include CustomTitle included do build_in_data_type.and_polymorphic(label: '{{namespace}} | {{name}}') field :namespace, type: String field :name, type: String validates_presence_of :name validates_uniqueness_of :name, scope: :namespace before_validation do self.namespace = if namespace.nil? '' else namespace.strip end self.name = name.to_s.strip # unless ::User.super_access? # errors.add(:namespace, 'is reserved') if Cenit.reserved_namespaces.include?(namespace.downcase) # end TODO Implements reserved namespaces errors.blank? end after_save do Setup::Optimizer.regist_ns(namespace) end end def scope_title namespace end def ns_slug namespace_ns.slug end def namespace_ns if @namespace_ns.nil? || @namespace_ns.name != namespace @namespace_ns = Setup::Namespace.find_or_create_by(name: namespace) end @namespace_ns end def namespace_ns=(namespace_ns) @namespace_ns = namespace_ns self.namespace = namespace_ns.name if namespace != namespace_ns.name end module ClassMethods def namespace_enum (Setup::Namespace.all.collect(&:name) + all.distinct(:namespace).flatten).uniq.sort end end end end
require_dependency "gobierto_participation" module GobiertoParticipation class Process < ApplicationRecord include User::Subscribable include GobiertoCommon::Searchable include GobiertoAttachments::Attachable include GobiertoCommon::ActsAsCollectionContainer algoliasearch_gobierto do attribute :site_id, :updated_at, :title_en, :title_es, :title_ca, :body_en, :body_es, :body_ca searchableAttributes ['title_en', 'title_es', 'title_ca', 'body_en', 'body_es', 'body_ca'] attributesForFaceting [:site_id] add_attribute :resource_path, :class_name end translates :title, :body, :information_text belongs_to :site belongs_to :issue has_many :stages, -> { order(stage_type: :asc) }, dependent: :destroy, class_name: 'GobiertoParticipation::ProcessStage', autosave: true has_many :polls has_many :contribution_containers, dependent: :destroy, class_name: "GobiertoParticipation::ContributionContainer" enum visibility_level: { draft: 0, active: 1 } enum process_type: { process: 0, group_process: 1 } validates :site, :title, :body, presence: true validates :slug, uniqueness: { scope: :site } validates_associated :stages scope :sorted, -> { order(id: :desc) } accepts_nested_attributes_for :stages after_create :create_collections def self.open ids = GobiertoParticipation::Process.select(&:open?).map(&:id) where(id: ids) end def to_s title end def polls_stage? stages.exists?(stage_type: ProcessStage.stage_types[:polls]) end def information_stage? stages.exists?(stage_type: ProcessStage.stage_types[:information]) end def pages_collection GobiertoCommon::Collection.find_by(container: self, item_type: 'GobiertoCms::Page') end def events_collection GobiertoCommon::Collection.find_by(container: self, item_type: 'GobiertoCalendars::Event') end def attachments_collection GobiertoCommon::Collection.find_by(container: self, item_type: 'GobiertoAttachments::Attachment') end def current_stage if open? process_stages = stages.where("starts >= ? AND ends <= ?", Time.zone.now, Time.zone.now) process_stages.first.to_s end end def open? if stages.any? Time.zone.now.between?(stages.last.starts, stages.last.ends) else false end end private def create_collections # Events site.collections.create! container: self, item_type: 'GobiertoCalendars::Event', slug: "calendar-#{self.slug}", title: self.title # Attachments site.collections.create! container: self, item_type: 'GobiertoAttachments::Attachment', slug: "attachment-#{self.slug}", title: self.title # News / Pages site.collections.create! container: self, item_type: 'GobiertoCms::Page', slug: "news-#{self.slug}", title: self.title end end end Modify map with pluck require_dependency "gobierto_participation" module GobiertoParticipation class Process < ApplicationRecord include User::Subscribable include GobiertoCommon::Searchable include GobiertoAttachments::Attachable include GobiertoCommon::ActsAsCollectionContainer algoliasearch_gobierto do attribute :site_id, :updated_at, :title_en, :title_es, :title_ca, :body_en, :body_es, :body_ca searchableAttributes ['title_en', 'title_es', 'title_ca', 'body_en', 'body_es', 'body_ca'] attributesForFaceting [:site_id] add_attribute :resource_path, :class_name end translates :title, :body, :information_text belongs_to :site belongs_to :issue has_many :stages, -> { order(stage_type: :asc) }, dependent: :destroy, class_name: 'GobiertoParticipation::ProcessStage', autosave: true has_many :polls has_many :contribution_containers, dependent: :destroy, class_name: "GobiertoParticipation::ContributionContainer" enum visibility_level: { draft: 0, active: 1 } enum process_type: { process: 0, group_process: 1 } validates :site, :title, :body, presence: true validates :slug, uniqueness: { scope: :site } validates_associated :stages scope :sorted, -> { order(id: :desc) } accepts_nested_attributes_for :stages after_create :create_collections def self.open ids = GobiertoParticipation::Process.select(&:open?).pluck(:id) where(id: ids) end def to_s title end def polls_stage? stages.exists?(stage_type: ProcessStage.stage_types[:polls]) end def information_stage? stages.exists?(stage_type: ProcessStage.stage_types[:information]) end def pages_collection GobiertoCommon::Collection.find_by(container: self, item_type: 'GobiertoCms::Page') end def events_collection GobiertoCommon::Collection.find_by(container: self, item_type: 'GobiertoCalendars::Event') end def attachments_collection GobiertoCommon::Collection.find_by(container: self, item_type: 'GobiertoAttachments::Attachment') end def current_stage if open? process_stages = stages.where("starts >= ? AND ends <= ?", Time.zone.now, Time.zone.now) process_stages.first.to_s end end def open? if stages.any? Time.zone.now.between?(stages.last.starts, stages.last.ends) else false end end private def create_collections # Events site.collections.create! container: self, item_type: 'GobiertoCalendars::Event', slug: "calendar-#{self.slug}", title: self.title # Attachments site.collections.create! container: self, item_type: 'GobiertoAttachments::Attachment', slug: "attachment-#{self.slug}", title: self.title # News / Pages site.collections.create! container: self, item_type: 'GobiertoCms::Page', slug: "news-#{self.slug}", title: self.title end end end
module Protocols::Csv class CsvTransmission include Mongoid::Document field :batch_id, type: String field :file_name, type: String field :submitted_by, type: String belongs_to :carrier, :index => true has_many :csv_transactions, :class_name => "Protocols::Csv::CsvTransaction" def self.find_or_create_transmission(data) transmission = self.where({ :batch_id => data[:batch_id], :file_name => data[:file_name] }).first return transmission if transmission self.create!(data) end end end Fixing missing index. module Protocols::Csv class CsvTransmission include Mongoid::Document field :batch_id, type: String field :file_name, type: String field :submitted_by, type: String index({:batch_id => 1, :file_name => 1}) belongs_to :carrier, :index => true has_many :csv_transactions, :class_name => "Protocols::Csv::CsvTransaction" def self.find_or_create_transmission(data) transmission = self.where({ :batch_id => data[:batch_id], :file_name => data[:file_name] }).first return transmission if transmission self.create!(data) end end end
require_dependency "renalware/patients" module Renalware module Patients class WorryQuery attr_reader :query_params def initialize(query_params) @query_params = query_params @query_params[:s] = "date_time DESC" if @query_params[:s].blank? end def call search .result .includes(patient: { current_modality: [:description] }) .order(created_at: :asc) .page(query_params[:page]) .per(query_params[:per_page]) end def search @search ||= Worry.ransack(query_params) end end end end Resolve Worryboard list N+1 issues require_dependency "renalware/patients" module Renalware module Patients class WorryQuery attr_reader :query_params def initialize(query_params) @query_params = query_params @query_params[:s] = "date_time DESC" if @query_params[:s].blank? end def call search .result .includes(:created_by, patient: { current_modality: [:description] }) .order(created_at: :asc) .page(query_params[:page]) .per(query_params[:per_page]) end def search @search ||= Worry.ransack(query_params) end end end end
add start of portfolio evidence submission module Submission class PortfolioEvidence def logger Rails.logger end # # Combines image, code or documents files given to pdf. # Returns the tempfile that was generated. # # It is the caller's responsibility to delete this tempfile # once the method is finished. # def produce_student_work(files, student) # # Ensure that each file in files has the following attributes: # id, name, filename, type, tempfile # files.each do | file | error!({"error" => "Missing file data for '#{file.name}'"}, 403) if file.id.nil? || file.name.nil? || file.filename.nil? || file.type.nil? || file.tempfile.nil? end # # Confirm subtype categories using filemagic (exception handling # must be done outside multithreaded environment below...) # files.each do | file | logger.debug "checking file type for #{file.tempfile.path}" fm = FileMagic.new(FileMagic::MAGIC_MIME) mime = fm.file file.tempfile.path logger.debug "#{file.tempfile.path} is mime type: #{mime}" case file.type when 'image' accept = ["image/png", "image/gif", "image/bmp", "image/tiff", "image/jpeg"] when 'code' accept = ["text/x-pascal", "text/x-c", "text/x-c++", "text/plain"] when 'document' accept = [ # -- one day"application/vnd.openxmlformats-officedocument.wordprocessingml.document", # --"application/msword", "application/pdf" ] else error!({"error" => "Unknown type '#{file.type}' provided for '#{file.name}'"}, 403) end if not mime.start_with?(*accept) error!({"error" => "'#{file.name}' was not an #{file.type} file type"}, 403) end end # # Create student submission folder # Dir.mkdir(File.join( Dir.tmpdir, ".foo") # # Create cover pages for submission # files.each_with_index.map do | file, idx | # # Create dual output documents (coverpage and document itself) # coverp_file = Tempfile.new(["#{idx}.cover", ".pdf"]) output_file = Tempfile.new(["#{idx}.data", ".pdf"]) # # Make file coverpage # coverpage_data = { "Filename" => "<pre>#{file.filename}</pre>", "Document Type" => file.type.capitalize, "Upload Timestamp" => DateTime.now.strftime("%F %T"), "File Number" => "#{idx+1} of #{files.length}"} # Add student details if exists if not student.nil? coverpage_data["Student Name"] = student.name coverpage_data["Student ID"] = student.username end coverpage_body = "<h1>#{file.name}</h1>\n<dl>" coverpage_data.each do | key, value | coverpage_body << "<dt>#{key}</dt><dd>#{value}</dd>\n" end coverpage_body << "</dl><footer>Generated with Doubtfire</footer>" logger.debug "pre PDFKit" kit = PDFKit.new(coverpage_body, :page_size => 'A4', :margin_top => "30mm", :margin_right => "30mm", :margin_bottom => "30mm", :margin_left => "30mm") kit.stylesheets << "vendor/assets/stylesheets/doubtfire-coverpage.css" logger.debug "pre kit.to_file #{coverp_file.path}" kit.to_file coverp_file.path logger.debug "post PDFKit call" # # File -> PDF # case file.type # # img -> pdf # when 'image' img = Magick::Image.read(file.tempfile.path).first # resize the image if its too big (e.g., taken with a digital camera) if img.columns > 1000 || img.rows > 500 # resize such that it's 600px in width scale = 1000.0 / img.columns img = img.resize(scale) end img.write("pdf:#{output_file.path}") { self.quality = 75 } # # code -> html -> pdf # when 'code' # decide language syntax highlighting case File.extname(file.filename) when '.cpp', '.cs' lang = :cplusplus when '.c', '.h' lang = :c when '.java' lang = :java when '.pas' lang = :delphi else # should follow basic C syntax (if, else etc...) lang = :c end # code -> HTML html_body = CodeRay.scan_file(file.tempfile, lang).html(:wrap => :div, :tab_width => 2, :css => :class, :line_numbers => :table, :line_number_anchors => false) # HTML -> PDF kit = PDFKit.new(html_body, :page_size => 'A4', :header_left => file.filename, :header_right => "[page]/[toPage]", :margin_top => "10mm", :margin_right => "5mm", :margin_bottom => "5mm", :margin_left => "5mm") kit.stylesheets << "vendor/assets/stylesheets/coderay.css" kit.to_file output_file.path # # document -> pdf # when 'document' # if uploaded a PDF, then directly pass in if File.extname(file.filename) == '.pdf' # copy the file over (note we need to copy it into # output_file as file will be removed at the end of this block) FileUtils.cp file.tempfile.path, output_file.path else # TODO: convert word -> pdf error!({"error" => "Currently, word documents are not supported. Convert the document to PDF first."}, 403) end end # Insert (at appropriate index) the converted PDF and its coverpage to pdf_paths array (lock first!)... pdf_paths_mutex.synchronize do pdf_paths[idx] = [coverp_file.path, output_file.path] end end end.each { | thread | thread.join } pdf_paths = pdf_paths.flatten # # Aggregate each of the output PDFs # didCompile = system "pdftk #{pdf_paths.join ' '} cat output #{final_pdf.path}" if !didCompile error!({"error" => "PDF failed to compile. Please try again."}, 403) end # We don't need any of those pdf_paths files anymore after compiling the final_pdf! pdf_paths.each { | path | if File::exist?(path) then FileUtils::rm path end } files.each { | file | if File::exist?(file.tempfile.path) then file.tempfile.unlink end } # We need to do something with this... so we'll let the caller handle that. final_pdf end end end
module TimedTransitions class Transitioner attr_accessor :success @@timed_transition_specifications = { draft: Specification.new(:draft, Settings.timed_transition_stale_weeks, :destroy_claim), authorised: Specification.new(:authorised, Settings.timed_transition_stale_weeks, :archive), part_authorised: Specification.new(:part_authorised, Settings.timed_transition_stale_weeks, :archive), refused: Specification.new(:refused, Settings.timed_transition_stale_weeks, :archive), rejected: Specification.new(:rejected, Settings.timed_transition_stale_weeks, :archive), archived_pending_delete: Specification.new(:archived_pending_delete, Settings.timed_transition_pending_weeks, :destroy_claim) } def self.candidate_claims_ids Claim::BaseClaim.where(state: candidate_states) .where('updated_at < ?', Settings.timed_transition_stale_weeks.weeks.ago).pluck(:id) end def self.softly_deleted_ids Claim::BaseClaim.where('deleted_at < ?', Settings.timed_transition_soft_delete_weeks.weeks.ago).pluck(:id) end def self.candidate_states @@timed_transition_specifications.keys end def initialize(claim, dummy = false) @claim = claim @dummy = dummy end def run @claim.softly_deleted? ? destroy_claim : process_stale_claim end def success? !success.nil? end private def is_dummy? @dummy end def log_level is_dummy? ? :debug : :info end def process_stale_claim specification = @@timed_transition_specifications[@claim.state.to_sym] last_transition = @claim.last_state_transition_time return unless last_transition.nil? || last_transition < specification.period_in_weeks.weeks.ago send(specification.method) end def archive values = @claim.hardship? ? hardship_archive_checks : archive_checks @claim.send(values[:event], reason_code: ['timed_transition']) unless is_dummy? @claim.reload # not sure if needed log(log_level, action: 'archive', message: values[:message], succeeded: @claim.send(values[:check])) self.success = @claim.send(values[:check]) rescue StandardError => e log(:error, action: 'archive', message: values[:error_message], succeeded: @claim.reload.send(values[:check]), error: e.message) end def hardship_archive_checks { event: :archive_pending_review!, message: 'Archiving claim pending review', error_message: 'Archiving claim pending review failed!', check: :archived_pending_review? } end def archive_checks { event: :archive_pending_delete!, message: 'Archiving claim', error_message: 'Archiving claim failed!', check: :archived_pending_delete? } end def destroy_claim Stats::MIData.import(@claim) && @claim.destroy unless is_dummy? log(log_level, action: 'destroy', message: 'Destroying soft-deleted claim', succeeded: @claim.destroyed?) self.success = @claim.destroyed? rescue StandardError => e log(:error, action: 'destroy', message: 'Destroying soft-deleted claim failed!', succeeded: @claim.destroyed?, error: e.message) end def log(level = :info, action:, message:, succeeded:, error: nil) LogStuff.send( level.to_sym, 'TimedTransitions::Transitioner', action: action, claim_id: @claim.id, claim_state: @claim.state, softly_deleted_on: @claim.deleted_at, valid_until: @claim.valid_until, dummy_run: @dummy, error: error, succeeded: succeeded ) do message end end end end Remove `is_dummy?` method Rubocop was objecting to the method name `is_dummy?`, suggesting `dummy?` instead. As is it just a wrapper for `@dummy` it can be removed completely. module TimedTransitions class Transitioner attr_accessor :success @@timed_transition_specifications = { draft: Specification.new(:draft, Settings.timed_transition_stale_weeks, :destroy_claim), authorised: Specification.new(:authorised, Settings.timed_transition_stale_weeks, :archive), part_authorised: Specification.new(:part_authorised, Settings.timed_transition_stale_weeks, :archive), refused: Specification.new(:refused, Settings.timed_transition_stale_weeks, :archive), rejected: Specification.new(:rejected, Settings.timed_transition_stale_weeks, :archive), archived_pending_delete: Specification.new(:archived_pending_delete, Settings.timed_transition_pending_weeks, :destroy_claim) } def self.candidate_claims_ids Claim::BaseClaim.where(state: candidate_states) .where('updated_at < ?', Settings.timed_transition_stale_weeks.weeks.ago).pluck(:id) end def self.softly_deleted_ids Claim::BaseClaim.where('deleted_at < ?', Settings.timed_transition_soft_delete_weeks.weeks.ago).pluck(:id) end def self.candidate_states @@timed_transition_specifications.keys end def initialize(claim, dummy = false) @claim = claim @dummy = dummy end def run @claim.softly_deleted? ? destroy_claim : process_stale_claim end def success? !success.nil? end private def log_level @dummy ? :debug : :info end def process_stale_claim specification = @@timed_transition_specifications[@claim.state.to_sym] last_transition = @claim.last_state_transition_time return unless last_transition.nil? || last_transition < specification.period_in_weeks.weeks.ago send(specification.method) end def archive values = @claim.hardship? ? hardship_archive_checks : archive_checks @claim.send(values[:event], reason_code: ['timed_transition']) unless @dummy @claim.reload # not sure if needed log(log_level, action: 'archive', message: values[:message], succeeded: @claim.send(values[:check])) self.success = @claim.send(values[:check]) rescue StandardError => e log(:error, action: 'archive', message: values[:error_message], succeeded: @claim.reload.send(values[:check]), error: e.message) end def hardship_archive_checks { event: :archive_pending_review!, message: 'Archiving claim pending review', error_message: 'Archiving claim pending review failed!', check: :archived_pending_review? } end def archive_checks { event: :archive_pending_delete!, message: 'Archiving claim', error_message: 'Archiving claim failed!', check: :archived_pending_delete? } end def destroy_claim Stats::MIData.import(@claim) && @claim.destroy unless @dummy log(log_level, action: 'destroy', message: 'Destroying soft-deleted claim', succeeded: @claim.destroyed?) self.success = @claim.destroyed? rescue StandardError => e log(:error, action: 'destroy', message: 'Destroying soft-deleted claim failed!', succeeded: @claim.destroyed?, error: e.message) end def log(level = :info, action:, message:, succeeded:, error: nil) LogStuff.send( level.to_sym, 'TimedTransitions::Transitioner', action: action, claim_id: @claim.id, claim_state: @claim.state, softly_deleted_on: @claim.deleted_at, valid_until: @claim.valid_until, dummy_run: @dummy, error: error, succeeded: succeeded ) do message end end end end