repo stringlengths 5 92 | file_url stringlengths 80 287 | file_path stringlengths 5 197 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:37:27 2026-01-04 17:58:21 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/samples/Ruby/foo.rb | samples/Ruby/foo.rb | module Foo
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/samples/Ruby/inflector.rb | samples/Ruby/inflector.rb | # encoding: utf-8
require 'active_support/inflector/inflections'
module ActiveSupport
# The Inflector transforms words from singular to plural, class names to table names, modularized class names to ones without,
# and class names to foreign keys. The default inflections for pluralization, singularization, and uncountable words are kept
# in inflections.rb.
#
# The Rails core team has stated patches for the inflections library will not be accepted
# in order to avoid breaking legacy applications which may be relying on errant inflections.
# If you discover an incorrect inflection and require it for your application, you'll need
# to correct it yourself (explained below).
module Inflector
extend self
# Returns the plural form of the word in the string.
#
# "post".pluralize # => "posts"
# "octopus".pluralize # => "octopi"
# "sheep".pluralize # => "sheep"
# "words".pluralize # => "words"
# "CamelOctopus".pluralize # => "CamelOctopi"
def pluralize(word)
apply_inflections(word, inflections.plurals)
end
# The reverse of +pluralize+, returns the singular form of a word in a string.
#
# "posts".singularize # => "post"
# "octopi".singularize # => "octopus"
# "sheep".singularize # => "sheep"
# "word".singularize # => "word"
# "CamelOctopi".singularize # => "CamelOctopus"
def singularize(word)
apply_inflections(word, inflections.singulars)
end
# By default, +camelize+ converts strings to UpperCamelCase. If the argument to +camelize+
# is set to <tt>:lower</tt> then +camelize+ produces lowerCamelCase.
#
# +camelize+ will also convert '/' to '::' which is useful for converting paths to namespaces.
#
# "active_model".camelize # => "ActiveModel"
# "active_model".camelize(:lower) # => "activeModel"
# "active_model/errors".camelize # => "ActiveModel::Errors"
# "active_model/errors".camelize(:lower) # => "activeModel::Errors"
#
# As a rule of thumb you can think of +camelize+ as the inverse of +underscore+,
# though there are cases where that does not hold:
#
# "SSLError".underscore.camelize # => "SslError"
def camelize(term, uppercase_first_letter = true)
string = term.to_s
if uppercase_first_letter
string = string.sub(/^[a-z\d]*/) { inflections.acronyms[$&] || $&.capitalize }
else
string = string.sub(/^(?:#{inflections.acronym_regex}(?=\b|[A-Z_])|\w)/) { $&.downcase }
end
string.gsub(/(?:_|(\/))([a-z\d]*)/i) { "#{$1}#{inflections.acronyms[$2] || $2.capitalize}" }.gsub('/', '::')
end
# Makes an underscored, lowercase form from the expression in the string.
#
# Changes '::' to '/' to convert namespaces to paths.
#
# "ActiveModel".underscore # => "active_model"
# "ActiveModel::Errors".underscore # => "active_model/errors"
#
# As a rule of thumb you can think of +underscore+ as the inverse of +camelize+,
# though there are cases where that does not hold:
#
# "SSLError".underscore.camelize # => "SslError"
def underscore(camel_cased_word)
word = camel_cased_word.to_s.dup
word.gsub!('::', '/')
word.gsub!(/(?:([A-Za-z\d])|^)(#{inflections.acronym_regex})(?=\b|[^a-z])/) { "#{$1}#{$1 && '_'}#{$2.downcase}" }
word.gsub!(/([A-Z\d]+)([A-Z][a-z])/,'\1_\2')
word.gsub!(/([a-z\d])([A-Z])/,'\1_\2')
word.tr!("-", "_")
word.downcase!
word
end
# Capitalizes the first word and turns underscores into spaces and strips a
# trailing "_id", if any. Like +titleize+, this is meant for creating pretty output.
#
# "employee_salary" # => "Employee salary"
# "author_id" # => "Author"
def humanize(lower_case_and_underscored_word)
result = lower_case_and_underscored_word.to_s.dup
inflections.humans.each { |(rule, replacement)| break if result.sub!(rule, replacement) }
result.gsub!(/_id$/, "")
result.tr!('_', ' ')
result.gsub(/([a-z\d]*)/i) { |match|
"#{inflections.acronyms[match] || match.downcase}"
}.gsub(/^\w/) { $&.upcase }
end
# Capitalizes all the words and replaces some characters in the string to create
# a nicer looking title. +titleize+ is meant for creating pretty output. It is not
# used in the Rails internals.
#
# +titleize+ is also aliased as +titlecase+.
#
# "man from the boondocks".titleize # => "Man From The Boondocks"
# "x-men: the last stand".titleize # => "X Men: The Last Stand"
# "TheManWithoutAPast".titleize # => "The Man Without A Past"
# "raiders_of_the_lost_ark".titleize # => "Raiders Of The Lost Ark"
def titleize(word)
humanize(underscore(word)).gsub(/\b(?<!['’`])[a-z]/) { $&.capitalize }
end
# Create the name of a table like Rails does for models to table names. This method
# uses the +pluralize+ method on the last word in the string.
#
# "RawScaledScorer".tableize # => "raw_scaled_scorers"
# "egg_and_ham".tableize # => "egg_and_hams"
# "fancyCategory".tableize # => "fancy_categories"
def tableize(class_name)
pluralize(underscore(class_name))
end
# Create a class name from a plural table name like Rails does for table names to models.
# Note that this returns a string and not a Class. (To convert to an actual class
# follow +classify+ with +constantize+.)
#
# "egg_and_hams".classify # => "EggAndHam"
# "posts".classify # => "Post"
#
# Singular names are not handled correctly:
# "business".classify # => "Busines"
def classify(table_name)
# strip out any leading schema name
camelize(singularize(table_name.to_s.sub(/.*\./, '')))
end
# Replaces underscores with dashes in the string.
#
# "puni_puni".dasherize # => "puni-puni"
def dasherize(underscored_word)
underscored_word.tr('_', '-')
end
# Removes the module part from the expression in the string:
#
# "ActiveRecord::CoreExtensions::String::Inflections".demodulize # => "Inflections"
# "Inflections".demodulize # => "Inflections"
#
# See also +deconstantize+.
def demodulize(path)
path = path.to_s
if i = path.rindex('::')
path[(i+2)..-1]
else
path
end
end
# Removes the rightmost segment from the constant expression in the string:
#
# "Net::HTTP".deconstantize # => "Net"
# "::Net::HTTP".deconstantize # => "::Net"
# "String".deconstantize # => ""
# "::String".deconstantize # => ""
# "".deconstantize # => ""
#
# See also +demodulize+.
def deconstantize(path)
path.to_s[0...(path.rindex('::') || 0)] # implementation based on the one in facets' Module#spacename
end
# Creates a foreign key name from a class name.
# +separate_class_name_and_id_with_underscore+ sets whether
# the method should put '_' between the name and 'id'.
#
# "Message".foreign_key # => "message_id"
# "Message".foreign_key(false) # => "messageid"
# "Admin::Post".foreign_key # => "post_id"
def foreign_key(class_name, separate_class_name_and_id_with_underscore = true)
underscore(demodulize(class_name)) + (separate_class_name_and_id_with_underscore ? "_id" : "id")
end
# Tries to find a constant with the name specified in the argument string:
#
# "Module".constantize # => Module
# "Test::Unit".constantize # => Test::Unit
#
# The name is assumed to be the one of a top-level constant, no matter whether
# it starts with "::" or not. No lexical context is taken into account:
#
# C = 'outside'
# module M
# C = 'inside'
# C # => 'inside'
# "C".constantize # => 'outside', same as ::C
# end
#
# NameError is raised when the name is not in CamelCase or the constant is
# unknown.
def constantize(camel_cased_word)
names = camel_cased_word.split('::')
names.shift if names.empty? || names.first.empty?
names.inject(Object) do |constant, name|
if constant == Object
constant.const_get(name)
else
candidate = constant.const_get(name)
next candidate if constant.const_defined?(name, false)
next candidate unless Object.const_defined?(name)
# Go down the ancestors to check it it's owned
# directly before we reach Object or the end of ancestors.
constant = constant.ancestors.inject do |const, ancestor|
break const if ancestor == Object
break ancestor if ancestor.const_defined?(name, false)
const
end
# owner is in Object, so raise
constant.const_get(name, false)
end
end
end
# Tries to find a constant with the name specified in the argument string:
#
# "Module".safe_constantize # => Module
# "Test::Unit".safe_constantize # => Test::Unit
#
# The name is assumed to be the one of a top-level constant, no matter whether
# it starts with "::" or not. No lexical context is taken into account:
#
# C = 'outside'
# module M
# C = 'inside'
# C # => 'inside'
# "C".safe_constantize # => 'outside', same as ::C
# end
#
# nil is returned when the name is not in CamelCase or the constant (or part of it) is
# unknown.
#
# "blargle".safe_constantize # => nil
# "UnknownModule".safe_constantize # => nil
# "UnknownModule::Foo::Bar".safe_constantize # => nil
#
def safe_constantize(camel_cased_word)
begin
constantize(camel_cased_word)
rescue NameError => e
raise unless e.message =~ /(uninitialized constant|wrong constant name) #{const_regexp(camel_cased_word)}$/ ||
e.name.to_s == camel_cased_word.to_s
rescue ArgumentError => e
raise unless e.message =~ /not missing constant #{const_regexp(camel_cased_word)}\!$/
end
end
# Returns the suffix that should be added to a number to denote the position
# in an ordered sequence such as 1st, 2nd, 3rd, 4th.
#
# ordinal(1) # => "st"
# ordinal(2) # => "nd"
# ordinal(1002) # => "nd"
# ordinal(1003) # => "rd"
# ordinal(-11) # => "th"
# ordinal(-1021) # => "st"
def ordinal(number)
if (11..13).include?(number.to_i.abs % 100)
"th"
else
case number.to_i.abs % 10
when 1; "st"
when 2; "nd"
when 3; "rd"
else "th"
end
end
end
# Turns a number into an ordinal string used to denote the position in an
# ordered sequence such as 1st, 2nd, 3rd, 4th.
#
# ordinalize(1) # => "1st"
# ordinalize(2) # => "2nd"
# ordinalize(1002) # => "1002nd"
# ordinalize(1003) # => "1003rd"
# ordinalize(-11) # => "-11th"
# ordinalize(-1021) # => "-1021st"
def ordinalize(number)
"#{number}#{ordinal(number)}"
end
private
# Mount a regular expression that will match part by part of the constant.
# For instance, Foo::Bar::Baz will generate Foo(::Bar(::Baz)?)?
def const_regexp(camel_cased_word) #:nodoc:
parts = camel_cased_word.split("::")
last = parts.pop
parts.reverse.inject(last) do |acc, part|
part.empty? ? acc : "#{part}(::#{acc})?"
end
end
# Applies inflection rules for +singularize+ and +pluralize+.
#
# apply_inflections("post", inflections.plurals) # => "posts"
# apply_inflections("posts", inflections.singulars) # => "post"
def apply_inflections(word, rules)
result = word.to_s.dup
if word.empty? || inflections.uncountables.include?(result.downcase[/\b\w+\Z/])
result
else
rules.each { |(rule, replacement)| break if result.sub!(rule, replacement) }
result
end
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/samples/Ruby/racc.rb | samples/Ruby/racc.rb | #
# DO NOT MODIFY!!!!
# This file is automatically generated by Racc 1.4.7
# from Racc grammer file "".
#
require 'racc/parser.rb'
module RJSON
class Parser < Racc::Parser
require 'rjson/handler'
attr_reader :handler
def initialize tokenizer, handler = Handler.new
@tokenizer = tokenizer
@handler = handler
super()
end
def next_token
@tokenizer.next_token
end
def parse
do_parse
handler
end
##### State transition tables begin ###
racc_action_table = [
9, 33, 9, 11, 13, 16, 19, 22, 9, 7,
23, 1, 9, 11, 13, 16, 19, 29, 30, 7,
21, 1, 9, 11, 13, 16, 19, 31, nil, 7,
21, 1, 23, 7, nil, 1 ]
racc_action_check = [
6, 27, 33, 33, 33, 33, 33, 3, 31, 33,
6, 33, 29, 29, 29, 29, 29, 12, 22, 29,
12, 29, 2, 2, 2, 2, 2, 25, nil, 2,
2, 2, 25, 0, nil, 0 ]
racc_action_pointer = [
24, nil, 20, 7, nil, nil, -2, nil, nil, nil,
nil, nil, 10, nil, nil, nil, nil, nil, nil, nil,
nil, nil, 18, nil, nil, 20, nil, -7, nil, 10,
nil, 6, nil, 0, nil, nil, nil ]
racc_action_default = [
-27, -12, -21, -27, -1, -2, -27, -10, -15, -26,
-8, -22, -27, -23, -17, -16, -24, -20, -18, -25,
-19, -11, -27, -13, -3, -27, -6, -27, -9, -21,
37, -27, -4, -21, -14, -5, -7 ]
racc_goto_table = [
8, 26, 24, 27, 10, 3, 25, 5, 4, 12,
nil, nil, nil, nil, 28, nil, nil, nil, nil, nil,
nil, 32, nil, nil, nil, nil, 35, 34, 27, nil,
nil, 36 ]
racc_goto_check = [
9, 7, 5, 8, 11, 1, 6, 3, 2, 12,
nil, nil, nil, nil, 11, nil, nil, nil, nil, nil,
nil, 5, nil, nil, nil, nil, 7, 9, 8, nil,
nil, 9 ]
racc_goto_pointer = [
nil, 5, 8, 7, nil, -4, 0, -5, -3, -2,
nil, 2, 7, nil, nil ]
racc_goto_default = [
nil, nil, 14, 18, 6, nil, nil, nil, 20, nil,
2, nil, nil, 15, 17 ]
racc_reduce_table = [
0, 0, :racc_error,
1, 14, :_reduce_none,
1, 14, :_reduce_none,
2, 15, :_reduce_none,
3, 15, :_reduce_none,
3, 19, :_reduce_none,
1, 19, :_reduce_none,
3, 20, :_reduce_none,
2, 16, :_reduce_none,
3, 16, :_reduce_none,
1, 23, :_reduce_10,
1, 24, :_reduce_11,
1, 17, :_reduce_12,
1, 18, :_reduce_13,
3, 25, :_reduce_none,
1, 25, :_reduce_none,
1, 22, :_reduce_none,
1, 22, :_reduce_none,
1, 22, :_reduce_none,
1, 26, :_reduce_none,
1, 26, :_reduce_20,
0, 27, :_reduce_none,
1, 27, :_reduce_22,
1, 27, :_reduce_23,
1, 27, :_reduce_24,
1, 27, :_reduce_25,
1, 21, :_reduce_26 ]
racc_reduce_n = 27
racc_shift_n = 37
racc_token_table = {
false => 0,
:error => 1,
:STRING => 2,
:NUMBER => 3,
:TRUE => 4,
:FALSE => 5,
:NULL => 6,
"," => 7,
":" => 8,
"[" => 9,
"]" => 10,
"{" => 11,
"}" => 12 }
racc_nt_base = 13
racc_use_result_var = true
Racc_arg = [
racc_action_table,
racc_action_check,
racc_action_default,
racc_action_pointer,
racc_goto_table,
racc_goto_check,
racc_goto_default,
racc_goto_pointer,
racc_nt_base,
racc_reduce_table,
racc_token_table,
racc_shift_n,
racc_reduce_n,
racc_use_result_var ]
Racc_token_to_s_table = [
"$end",
"error",
"STRING",
"NUMBER",
"TRUE",
"FALSE",
"NULL",
"\",\"",
"\":\"",
"\"[\"",
"\"]\"",
"\"{\"",
"\"}\"",
"$start",
"document",
"object",
"array",
"start_object",
"end_object",
"pairs",
"pair",
"string",
"value",
"start_array",
"end_array",
"values",
"scalar",
"literal" ]
Racc_debug_parser = false
##### State transition tables end #####
# reduce 0 omitted
# reduce 1 omitted
# reduce 2 omitted
# reduce 3 omitted
# reduce 4 omitted
# reduce 5 omitted
# reduce 6 omitted
# reduce 7 omitted
# reduce 8 omitted
# reduce 9 omitted
def _reduce_10(val, _values, result)
@handler.start_array
result
end
def _reduce_11(val, _values, result)
@handler.end_array
result
end
def _reduce_12(val, _values, result)
@handler.start_object
result
end
def _reduce_13(val, _values, result)
@handler.end_object
result
end
# reduce 14 omitted
# reduce 15 omitted
# reduce 16 omitted
# reduce 17 omitted
# reduce 18 omitted
# reduce 19 omitted
def _reduce_20(val, _values, result)
@handler.scalar val[0]
result
end
# reduce 21 omitted
def _reduce_22(val, _values, result)
n = val[0]; result = n.count('.') > 0 ? n.to_f : n.to_i
result
end
def _reduce_23(val, _values, result)
result = true
result
end
def _reduce_24(val, _values, result)
result = false
result
end
def _reduce_25(val, _values, result)
result = nil
result
end
def _reduce_26(val, _values, result)
@handler.scalar val[0].gsub(/^"|"$/, '')
result
end
def _reduce_none(val, _values, result)
val[0]
end
end # class Parser
end # module RJSON
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/samples/Ruby/jekyll.rb | samples/Ruby/jekyll.rb | $:.unshift File.dirname(__FILE__) # For use/testing when no gem is installed
# Require all of the Ruby files in the given directory.
#
# path - The String relative path from here to the directory.
#
# Returns nothing.
def require_all(path)
glob = File.join(File.dirname(__FILE__), path, '*.rb')
Dir[glob].each do |f|
require f
end
end
# rubygems
require 'rubygems'
# stdlib
require 'fileutils'
require 'time'
require 'yaml'
require 'English'
# 3rd party
require 'liquid'
require 'maruku'
require 'albino'
# internal requires
require 'jekyll/core_ext'
require 'jekyll/site'
require 'jekyll/convertible'
require 'jekyll/layout'
require 'jekyll/page'
require 'jekyll/post'
require 'jekyll/filters'
require 'jekyll/static_file'
require 'jekyll/errors'
# extensions
require 'jekyll/plugin'
require 'jekyll/converter'
require 'jekyll/generator'
require_all 'jekyll/converters'
require_all 'jekyll/generators'
require_all 'jekyll/tags'
module Jekyll
VERSION = '0.11.2'
# Default options. Overriden by values in _config.yml or command-line opts.
# (Strings rather symbols used for compatability with YAML).
DEFAULTS = {
'safe' => false,
'auto' => false,
'server' => false,
'server_port' => 4000,
'source' => Dir.pwd,
'destination' => File.join(Dir.pwd, '_site'),
'plugins' => File.join(Dir.pwd, '_plugins'),
'future' => true,
'lsi' => false,
'pygments' => false,
'markdown' => 'maruku',
'permalink' => 'date',
'include' => ['.htaccess'],
'paginate_path' => 'page:num',
'markdown_ext' => 'markdown,mkd,mkdn,md',
'textile_ext' => 'textile',
'maruku' => {
'use_tex' => false,
'use_divs' => false,
'png_engine' => 'blahtex',
'png_dir' => 'images/latex',
'png_url' => '/images/latex'
},
'rdiscount' => {
'extensions' => []
},
'redcarpet' => {
'extensions' => []
},
'kramdown' => {
'auto_ids' => true,
'footnote_nr' => 1,
'entity_output' => 'as_char',
'toc_levels' => '1..6',
'smart_quotes' => 'lsquo,rsquo,ldquo,rdquo',
'use_coderay' => false,
'coderay' => {
'coderay_wrap' => 'div',
'coderay_line_numbers' => 'inline',
'coderay_line_number_start' => 1,
'coderay_tab_width' => 4,
'coderay_bold_every' => 10,
'coderay_css' => 'style'
}
},
'redcloth' => {
'hard_breaks' => true
}
}
# Public: Generate a Jekyll configuration Hash by merging the default
# options with anything in _config.yml, and adding the given options on top.
#
# override - A Hash of config directives that override any options in both
# the defaults and the config file. See Jekyll::DEFAULTS for a
# list of option names and their defaults.
#
# Returns the final configuration Hash.
def self.configuration(override)
# _config.yml may override default source location, but until
# then, we need to know where to look for _config.yml
source = override['source'] || Jekyll::DEFAULTS['source']
# Get configuration from <source>/_config.yml
config_file = File.join(source, '_config.yml')
begin
config = YAML.load_file(config_file)
raise "Invalid configuration - #{config_file}" if !config.is_a?(Hash)
$stdout.puts "Configuration from #{config_file}"
rescue => err
$stderr.puts "WARNING: Could not read configuration. " +
"Using defaults (and options)."
$stderr.puts "\t" + err.to_s
config = {}
end
# Merge DEFAULTS < _config.yml < override
Jekyll::DEFAULTS.deep_merge(config).deep_merge(override)
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/samples/Ruby/formula.rb | samples/Ruby/formula.rb | require 'download_strategy'
require 'dependencies'
require 'formula_support'
require 'hardware'
require 'bottles'
require 'extend/fileutils'
require 'patches'
require 'compilers'
# Derive and define at least @url, see Library/Formula for examples
class Formula
include FileUtils
attr_reader :name, :path, :url, :version, :homepage, :specs, :downloader
attr_reader :standard, :unstable, :head
attr_reader :bottle_version, :bottle_url, :bottle_sha1
# The build folder, usually in /tmp.
# Will only be non-nil during the stage method.
attr_reader :buildpath
# Homebrew determines the name
def initialize name='__UNKNOWN__', path=nil
set_instance_variable 'homepage'
set_instance_variable 'url'
set_instance_variable 'bottle_version'
set_instance_variable 'bottle_url'
set_instance_variable 'bottle_sha1'
set_instance_variable 'head'
set_instance_variable 'specs'
set_instance_variable 'standard'
set_instance_variable 'unstable'
if @head and (not @url or ARGV.build_head?)
@url = @head
@version = 'HEAD'
@spec_to_use = @unstable
else
if @standard.nil?
@spec_to_use = SoftwareSpecification.new(@url, @specs)
else
@spec_to_use = @standard
end
end
raise "No url provided for formula #{name}" if @url.nil?
@name = name
validate_variable :name
# If we got an explicit path, use that, else determine from the name
@path = path.nil? ? self.class.path(name) : Pathname.new(path)
# Use a provided version, if any
set_instance_variable 'version'
# Otherwise detect the version from the URL
@version ||= @spec_to_use.detect_version
# Only validate if a version was set; GitHubGistFormula needs to get
# the URL to determine the version
validate_variable :version if @version
CHECKSUM_TYPES.each { |type| set_instance_variable type }
@downloader = download_strategy.new @spec_to_use.url, name, version, @spec_to_use.specs
@bottle_url ||= bottle_base_url + bottle_filename(self) if @bottle_sha1
end
# if the dir is there, but it's empty we consider it not installed
def installed?
return installed_prefix.children.length > 0
rescue
return false
end
def explicitly_requested?
# `ARGV.formulae` will throw an exception if it comes up with an empty list.
# FIXME: `ARGV.formulae` shouldn't be throwing exceptions, see issue #8823
return false if ARGV.named.empty?
ARGV.formulae.include? self
end
def linked_keg
HOMEBREW_REPOSITORY/'Library/LinkedKegs'/@name
end
def installed_prefix
head_prefix = HOMEBREW_CELLAR+@name+'HEAD'
if @version == 'HEAD' || head_prefix.directory?
head_prefix
else
prefix
end
end
def prefix
validate_variable :name
validate_variable :version
HOMEBREW_CELLAR+@name+@version
end
def rack; prefix.parent end
def bin; prefix+'bin' end
def doc; prefix+'share/doc'+name end
def include; prefix+'include' end
def info; prefix+'share/info' end
def lib; prefix+'lib' end
def libexec; prefix+'libexec' end
def man; prefix+'share/man' end
def man1; man+'man1' end
def man2; man+'man2' end
def man3; man+'man3' end
def man4; man+'man4' end
def man5; man+'man5' end
def man6; man+'man6' end
def man7; man+'man7' end
def man8; man+'man8' end
def sbin; prefix+'sbin' end
def share; prefix+'share' end
# configuration needs to be preserved past upgrades
def etc; HOMEBREW_PREFIX+'etc' end
# generally we don't want var stuff inside the keg
def var; HOMEBREW_PREFIX+'var' end
# plist name, i.e. the name of the launchd service
def plist_name; 'homebrew.mxcl.'+name end
def plist_path; prefix+(plist_name+'.plist') end
# Use the @spec_to_use to detect the download strategy.
# Can be overriden to force a custom download strategy
def download_strategy
@spec_to_use.download_strategy
end
def cached_download
@downloader.cached_location
end
# tell the user about any caveats regarding this package, return a string
def caveats; nil end
# any e.g. configure options for this package
def options; [] end
# patches are automatically applied after extracting the tarball
# return an array of strings, or if you need a patch level other than -p1
# return a Hash eg.
# {
# :p0 => ['http://foo.com/patch1', 'http://foo.com/patch2'],
# :p1 => 'http://bar.com/patch2',
# :p2 => ['http://moo.com/patch5', 'http://moo.com/patch6']
# }
# The final option is to return DATA, then put a diff after __END__. You
# can still return a Hash with DATA as the value for a patch level key.
def patches; end
# rarely, you don't want your library symlinked into the main prefix
# see gettext.rb for an example
def keg_only?
self.class.keg_only_reason || false
end
def fails_with? cc
return false if self.class.cc_failures.nil?
cc = Compiler.new(cc) unless cc.is_a? Compiler
return self.class.cc_failures.find do |failure|
next unless failure.compiler == cc.name
failure.build.zero? or failure.build >= cc.build
end
end
# sometimes the clean process breaks things
# skip cleaning paths in a formula with a class method like this:
# skip_clean [bin+"foo", lib+"bar"]
# redefining skip_clean? now deprecated
def skip_clean? path
return true if self.class.skip_clean_all?
to_check = path.relative_path_from(prefix).to_s
self.class.skip_clean_paths.include? to_check
end
# yields self with current working directory set to the uncompressed tarball
def brew
validate_variable :name
validate_variable :version
stage do
begin
patch
# we allow formulas to do anything they want to the Ruby process
# so load any deps before this point! And exit asap afterwards
yield self
rescue Interrupt, RuntimeError, SystemCallError => e
puts if Interrupt === e # don't print next to the ^C
unless ARGV.debug?
%w(config.log CMakeCache.txt).select{|f| File.exist? f}.each do |f|
HOMEBREW_LOGS.install f
puts "#{f} was copied to #{HOMEBREW_LOGS}"
end
raise
end
onoe e.inspect
puts e.backtrace
ohai "Rescuing build..."
if (e.was_running_configure? rescue false) and File.exist? 'config.log'
puts "It looks like an autotools configure failed."
puts "Gist 'config.log' and any error output when reporting an issue."
puts
end
puts "When you exit this shell Homebrew will attempt to finalise the installation."
puts "If nothing is installed or the shell exits with a non-zero error code,"
puts "Homebrew will abort. The installation prefix is:"
puts prefix
interactive_shell self
end
end
end
def == b
name == b.name
end
def eql? b
self == b and self.class.equal? b.class
end
def hash
name.hash
end
def <=> b
name <=> b.name
end
def to_s
name
end
# Standard parameters for CMake builds.
# Using Build Type "None" tells cmake to use our CFLAGS,etc. settings.
# Setting it to Release would ignore our flags.
# Setting CMAKE_FIND_FRAMEWORK to "LAST" tells CMake to search for our
# libraries before trying to utilize Frameworks, many of which will be from
# 3rd party installs.
# Note: there isn't a std_autotools variant because autotools is a lot
# less consistent and the standard parameters are more memorable.
def std_cmake_args
%W[
-DCMAKE_INSTALL_PREFIX=#{prefix}
-DCMAKE_BUILD_TYPE=None
-DCMAKE_FIND_FRAMEWORK=LAST
-Wno-dev
]
end
def self.class_s name
#remove invalid characters and then camelcase it
name.capitalize.gsub(/[-_.\s]([a-zA-Z0-9])/) { $1.upcase } \
.gsub('+', 'x')
end
# an array of all Formula names
def self.names
Dir["#{HOMEBREW_REPOSITORY}/Library/Formula/*.rb"].map{ |f| File.basename f, '.rb' }.sort
end
# an array of all Formula, instantiated
def self.all
map{ |f| f }
end
def self.map
rv = []
each{ |f| rv << yield(f) }
rv
end
def self.each
names.each do |n|
begin
yield Formula.factory(n)
rescue
# Don't let one broken formula break commands. But do complain.
onoe "Formula #{n} will not import."
end
end
end
def inspect
name
end
def self.aliases
Dir["#{HOMEBREW_REPOSITORY}/Library/Aliases/*"].map{ |f| File.basename f }.sort
end
def self.canonical_name name
name = name.to_s if name.kind_of? Pathname
formula_with_that_name = HOMEBREW_REPOSITORY+"Library/Formula/#{name}.rb"
possible_alias = HOMEBREW_REPOSITORY+"Library/Aliases/#{name}"
possible_cached_formula = HOMEBREW_CACHE_FORMULA+"#{name}.rb"
if name.include? "/"
if name =~ %r{(.+)/(.+)/(.+)}
tapd = HOMEBREW_REPOSITORY/"Library/Taps"/"#$1-#$2".downcase
tapd.find_formula do |relative_pathname|
return "#{tapd}/#{relative_pathname}" if relative_pathname.stem.to_s == $3
end if tapd.directory?
end
# Otherwise don't resolve paths or URLs
name
elsif formula_with_that_name.file? and formula_with_that_name.readable?
name
elsif possible_alias.file?
possible_alias.realpath.basename('.rb').to_s
elsif possible_cached_formula.file?
possible_cached_formula.to_s
else
name
end
end
def self.factory name
# If an instance of Formula is passed, just return it
return name if name.kind_of? Formula
# Otherwise, convert to String in case a Pathname comes in
name = name.to_s
# If a URL is passed, download to the cache and install
if name =~ %r[(https?|ftp)://]
url = name
name = Pathname.new(name).basename
target_file = HOMEBREW_CACHE_FORMULA+name
name = name.basename(".rb").to_s
HOMEBREW_CACHE_FORMULA.mkpath
FileUtils.rm target_file, :force => true
curl url, '-o', target_file
require target_file
install_type = :from_url
else
name = Formula.canonical_name(name)
# If name was a path or mapped to a cached formula
if name.include? "/"
require name
# require allows filenames to drop the .rb extension, but everything else
# in our codebase will require an exact and fullpath.
name = "#{name}.rb" unless name =~ /\.rb$/
path = Pathname.new(name)
name = path.stem
install_type = :from_path
target_file = path.to_s
else
# For names, map to the path and then require
require Formula.path(name)
install_type = :from_name
end
end
begin
klass_name = self.class_s(name)
klass = Object.const_get klass_name
rescue NameError
# TODO really this text should be encoded into the exception
# and only shown if the UI deems it correct to show it
onoe "class \"#{klass_name}\" expected but not found in #{name}.rb"
puts "Double-check the name of the class in that formula."
raise LoadError
end
return klass.new(name) if install_type == :from_name
return klass.new(name, target_file)
rescue LoadError
raise FormulaUnavailableError.new(name)
end
def tap
if path.realpath.to_s =~ %r{#{HOMEBREW_REPOSITORY}/Library/Taps/(\w+)-(\w+)}
"#$1/#$2"
else
# remotely installed formula are not mxcl/master but this will do for now
"mxcl/master"
end
end
def self.path name
HOMEBREW_REPOSITORY+"Library/Formula/#{name.downcase}.rb"
end
def mirrors; self.class.mirrors or []; end
def deps; self.class.dependencies.deps; end
def external_deps; self.class.dependencies.external_deps; end
# deps are in an installable order
# which means if a depends on b then b will be ordered before a in this list
def recursive_deps
Formula.expand_deps(self).flatten.uniq
end
def self.expand_deps f
f.deps.map do |dep|
f_dep = Formula.factory dep.to_s
expand_deps(f_dep) << f_dep
end
end
protected
# Pretty titles the command and buffers stdout/stderr
# Throws if there's an error
def system cmd, *args
# remove "boring" arguments so that the important ones are more likely to
# be shown considering that we trim long ohai lines to the terminal width
pretty_args = args.dup
pretty_args.delete "--disable-dependency-tracking" if cmd == "./configure" and not ARGV.verbose?
ohai "#{cmd} #{pretty_args*' '}".strip
removed_ENV_variables = case if args.empty? then cmd.split(' ').first else cmd end
when "xcodebuild"
ENV.remove_cc_etc
end
if ARGV.verbose?
safe_system cmd, *args
else
rd, wr = IO.pipe
pid = fork do
rd.close
$stdout.reopen wr
$stderr.reopen wr
args.collect!{|arg| arg.to_s}
exec(cmd, *args) rescue nil
exit! 1 # never gets here unless exec threw or failed
end
wr.close
out = ''
out << rd.read until rd.eof?
Process.wait
unless $?.success?
puts out
raise
end
end
removed_ENV_variables.each do |key, value|
ENV[key] = value # ENV.kind_of? Hash # => false
end if removed_ENV_variables
rescue
raise BuildError.new(self, cmd, args, $?)
end
public
# For brew-fetch and others.
def fetch
if install_bottle? self
downloader = CurlBottleDownloadStrategy.new bottle_url, name, version, nil
mirror_list = []
else
downloader = @downloader
# Don't attempt mirrors if this install is not pointed at a "stable" URL.
# This can happen when options like `--HEAD` are invoked.
mirror_list = @spec_to_use == @standard ? mirrors : []
end
# Ensure the cache exists
HOMEBREW_CACHE.mkpath
begin
fetched = downloader.fetch
rescue CurlDownloadStrategyError => e
raise e if mirror_list.empty?
puts "Trying a mirror..."
url, specs = mirror_list.shift.values_at :url, :specs
downloader = download_strategy.new url, name, version, specs
retry
end
return fetched, downloader
end
# Detect which type of checksum is being used, or nil if none
def checksum_type
CHECKSUM_TYPES.detect { |type| instance_variable_defined?("@#{type}") }
end
# For FormulaInstaller.
def verify_download_integrity fn, *args
require 'digest'
if args.length != 2
type = checksum_type || :md5
supplied = instance_variable_get("@#{type}")
# Convert symbol to readable string
type = type.to_s.upcase
else
supplied, type = args
end
hasher = Digest.const_get(type)
hash = fn.incremental_hash(hasher)
if supplied and not supplied.empty?
message = <<-EOF
#{type} mismatch
Expected: #{supplied}
Got: #{hash}
Archive: #{fn}
(To retry an incomplete download, remove the file above.)
EOF
raise message unless supplied.upcase == hash.upcase
else
opoo "Cannot verify package integrity"
puts "The formula did not provide a download checksum"
puts "For your reference the #{type} is: #{hash}"
end
end
private
CHECKSUM_TYPES=[:md5, :sha1, :sha256].freeze
def stage
fetched, downloader = fetch
verify_download_integrity fetched if fetched.kind_of? Pathname
mktemp do
downloader.stage
# Set path after the downloader changes the working folder.
@buildpath = Pathname.pwd
yield
@buildpath = nil
end
end
def patch
patch_list = Patches.new(patches)
return if patch_list.empty?
if patch_list.external_patches?
ohai "Downloading patches"
patch_list.download!
end
ohai "Patching"
patch_list.each do |p|
case p.compression
when :gzip then safe_system "/usr/bin/gunzip", p.compressed_filename
when :bzip2 then safe_system "/usr/bin/bunzip2", p.compressed_filename
end
# -f means don't prompt the user if there are errors; just exit with non-zero status
safe_system '/usr/bin/patch', '-f', *(p.patch_args)
end
end
def validate_variable name
v = instance_variable_get("@#{name}")
raise "Invalid @#{name}" if v.to_s.empty? or v =~ /\s/
end
def set_instance_variable(type)
return if instance_variable_defined? "@#{type}"
class_value = self.class.send(type)
instance_variable_set("@#{type}", class_value) if class_value
end
def self.method_added method
raise 'You cannot override Formula.brew' if method == :brew
end
class << self
# The methods below define the formula DSL.
attr_reader :standard, :unstable
def self.attr_rw(*attrs)
attrs.each do |attr|
class_eval %Q{
def #{attr}(val=nil)
val.nil? ? @#{attr} : @#{attr} = val
end
}
end
end
attr_rw :version, :homepage, :mirrors, :specs
attr_rw :keg_only_reason, :skip_clean_all, :cc_failures
attr_rw :bottle_version, :bottle_url, :bottle_sha1
attr_rw(*CHECKSUM_TYPES)
def head val=nil, specs=nil
return @head if val.nil?
@unstable = SoftwareSpecification.new(val, specs)
@head = val
@specs = specs
end
def url val=nil, specs=nil
return @url if val.nil?
@standard = SoftwareSpecification.new(val, specs)
@url = val
@specs = specs
end
def stable &block
raise "url and md5 must be specified in a block" unless block_given?
instance_eval(&block) unless ARGV.build_devel? or ARGV.build_head?
end
def devel &block
raise "url and md5 must be specified in a block" unless block_given?
if ARGV.build_devel?
@mirrors = nil # clear out mirrors from the stable release
instance_eval(&block)
end
end
def bottle url=nil, &block
return unless block_given?
bottle_block = Class.new do
def self.version version
@version = version
end
def self.url url
@url = url
end
def self.sha1 sha1
case sha1
when Hash
key, value = sha1.shift
@sha1 = key if value == MacOS.cat
when String
@sha1 = sha1 if MacOS.lion?
end
end
def self.data
@version = 0 unless @version
return @version, @url, @sha1 if @sha1 && @url
return @version, nil, @sha1 if @sha1
end
end
bottle_block.instance_eval(&block)
@bottle_version, @bottle_url, @bottle_sha1 = bottle_block.data
end
def mirror val, specs=nil
@mirrors ||= []
@mirrors << {:url => val, :specs => specs}
# Added the uniq after some inspection with Pry---seems `mirror` gets
# called three times. The first two times only one copy of the input is
# left in `@mirrors`. On the final call, two copies are present. This
# happens with `@deps` as well. Odd.
@mirrors.uniq!
end
def dependencies
@dependencies ||= DependencyCollector.new
end
def depends_on dep
dependencies.add(dep)
end
def skip_clean paths
if paths == :all
@skip_clean_all = true
return
end
@skip_clean_paths ||= []
[paths].flatten.each do |p|
@skip_clean_paths << p.to_s unless @skip_clean_paths.include? p.to_s
end
end
def skip_clean_all?
@skip_clean_all
end
def skip_clean_paths
@skip_clean_paths or []
end
def keg_only reason, explanation=nil
@keg_only_reason = KegOnlyReason.new(reason, explanation.to_s.chomp)
end
def fails_with compiler, &block
@cc_failures ||= CompilerFailures.new
@cc_failures << if block_given?
CompilerFailure.new(compiler, &block)
else
CompilerFailure.new(compiler)
end
end
end
end
require 'formula_specialties'
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/samples/Ruby/grit.rb | samples/Ruby/grit.rb | module Grit
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_repository.rb | test/test_repository.rb | require_relative "./helper"
class TestRuggedRepository < Minitest::Test
def master_oid
'7dbcffcf982e766fc711e633322de848f2b60ba5'
end
def linguist_repo(oid = master_oid)
Linguist::Repository.new(source_repository, oid)
end
def source_repository
@rugged ||= Rugged::Repository.new(File.expand_path("../../.git", __FILE__))
end
def test_linguist_language
assert_equal 'Ruby', linguist_repo.language
end
def test_linguist_languages
assert linguist_repo.languages['Ruby'] > 10_000
end
def test_linguist_size
assert linguist_repo.size > 30_000
end
def test_linguist_breakdown
assert linguist_repo.breakdown_by_file.has_key?("Ruby")
assert linguist_repo.breakdown_by_file["Ruby"].include?("bin/github-linguist")
assert linguist_repo.breakdown_by_file["Ruby"].include?("lib/linguist/language.rb")
end
def test_incremental_stats
old_commit = '3d7364877d6794f6cc2a86b493e893968a597332'
old_repo = linguist_repo(old_commit)
assert old_repo.languages['Ruby'] > 10_000
assert old_repo.size > 30_000
new_repo = Linguist::Repository.incremental(source_repository, master_oid, old_commit, old_repo.cache)
assert new_repo.languages['Ruby'] > old_repo.languages['Ruby']
assert new_repo.size > old_repo.size
assert_equal linguist_repo.cache, new_repo.cache
end
def test_repo_git_attributes
# See https://github.com/github/linguist/blob/72a89fc9dcd3585250056ab591f9d7e2411d5fa1/.gitattributes
#
# It looks like this:
# Gemfile linguist-vendored=true
# lib/linguist.rb linguist-language=Java
# test/*.rb linguist-language=Java
# Rakefile linguist-generated
# test/fixtures/** linguist-vendored=false
# README.md linguist-documentation=false
# samples/Arduino/* linguist-documentation
# samples/Markdown/*.md linguist-detectable=true
# samples/HTML/*.html linguist-detectable=false
# samples/CSS/bootstrap.css -linguist-vendored
# samples/CSS/bootstrap.min.css -linguist-generated
# LICENSE -linguist-documentation
# samples/CoffeeScript/browser.coffee -linguist-detectable
attr_commit = '72a89fc9dcd3585250056ab591f9d7e2411d5fa1'
repo = linguist_repo(attr_commit)
assert repo.breakdown_by_file.has_key?("Java")
assert repo.breakdown_by_file["Java"].include?("lib/linguist.rb")
assert repo.breakdown_by_file.has_key?("Ruby")
assert !repo.breakdown_by_file["Ruby"].empty?
# Ensures the filename that contains unicode char is UTF-8 encoded and invalid chars scrubbed
assert repo.breakdown_by_file.has_key?("Raku")
assert repo.breakdown_by_file["Raku"].include?("test/fixtures/ba�r/file_ã.pl")
assert_equal "UTF-8", repo.breakdown_by_file["Raku"].first.encoding.to_s
assert repo.breakdown_by_file["Raku"].first.valid_encoding?
end
def test_commit_with_git_attributes_data
# Before we had any .gitattributes data
old_commit = '4a017d9033f91b2776eb85275463f9613cc371ef'
old_repo = linguist_repo(old_commit)
# With some .gitattributes data
attr_commit = '7ee006cbcb2d7261f9e648510a684ee9ac64126b'
# It's incremental but now is scanning more data and should bust the cache
new_repo = Linguist::Repository.incremental(source_repository, attr_commit, old_commit, old_repo.cache, 350_000)
assert new_repo.breakdown_by_file["Java"].include?("lib/linguist.rb")
end
def test_linguist_override_vendored?
attr_commit = '72a89fc9dcd3585250056ab591f9d7e2411d5fa1'
linguist_repo(attr_commit).repository.set_attribute_source(attr_commit)
override_vendored = Linguist::LazyBlob.new(source_repository, attr_commit, 'Gemfile')
# overridden .gitattributes
assert override_vendored.vendored?
end
def test_linguist_override_unvendored?
attr_commit = '01d6b9c637a7a6581fe456c600725b68f355b295'
linguist_repo(attr_commit).repository.set_attribute_source(attr_commit)
# lib/linguist/vendor.yml defines this as vendored.
override_unvendored = Linguist::LazyBlob.new(source_repository, attr_commit, 'test/fixtures/foo.rb')
# test -linguist-vendored attribute method
override_unvendored_minus = Linguist::LazyBlob.new(source_repository, attr_commit, 'samples/CSS/bootstrap.css')
# overridden .gitattributes
refute override_unvendored.vendored?
refute override_unvendored_minus.vendored?
end
def test_linguist_override_documentation?
attr_commit = "01d6b9c637a7a6581fe456c600725b68f355b295"
linguist_repo(attr_commit).repository.set_attribute_source(attr_commit)
readme = Linguist::LazyBlob.new(source_repository, attr_commit, "README.md")
arduino = Linguist::LazyBlob.new(source_repository, attr_commit, "samples/Arduino/hello.ino")
# test -linguist-documentation attribute method
minus = Linguist::LazyBlob.new(source_repository, attr_commit, "LICENSE")
# overridden by .gitattributes
refute_predicate readme, :documentation?
assert_predicate arduino, :documentation?
refute_predicate minus, :documentation?
end
def test_linguist_override_generated?
attr_commit = "01d6b9c637a7a6581fe456c600725b68f355b295"
linguist_repo(attr_commit).repository.set_attribute_source(attr_commit)
rakefile = Linguist::LazyBlob.new(source_repository, attr_commit, "Rakefile")
# test -linguist-generated attribute method
minus = Linguist::LazyBlob.new(source_repository, attr_commit, "samples/CSS/bootstrap.min.css")
# overridden .gitattributes
assert rakefile.generated?
refute minus.generated?
end
def test_linguist_override_detectable?
attr_commit = "01d6b9c637a7a6581fe456c600725b68f355b295"
linguist_repo(attr_commit).repository.set_attribute_source(attr_commit)
# markdown is overridden by .gitattributes to be detectable, html to not be detectable
markdown = Linguist::LazyBlob.new(source_repository, attr_commit, "samples/Markdown/tender.md")
html = Linguist::LazyBlob.new(source_repository, attr_commit, "samples/HTML/pages.html")
# test -linguist-detectable attribute method
minus = Linguist::LazyBlob.new(source_repository, attr_commit, "samples/CoffeeScript/browser.coffee")
assert_predicate markdown, :detectable?
refute_predicate html, :detectable?
refute_predicate minus, :detectable?
end
def test_read_index
attr_commit = '72a89fc9dcd3585250056ab591f9d7e2411d5fa1'
repo = linguist_repo(attr_commit)
repo.read_index
expected_tree = '9dd86972f2d3caa295588b329f9f195bcb409204'
assert_equal expected_tree, @rugged.index.write_tree
end
def test_current_tree
repo = linguist_repo
expected_tree = 'f6cb65aeaee0b206b961746175ecaf4449f73c56'
assert_equal expected_tree, repo.current_tree.oid
end
end
################################################################################
class TestEmptyRepository < Minitest::Test
def source_repository
@source ||= EmptyRepository.new
end
def linguist_repo
Linguist::Repository.new(source_repository, "1234567890123456789012345678901234567890")
end
def test_linguist_language
assert_nil linguist_repo.language
end
def test_linguist_size
assert_equal 0, linguist_repo.size
end
def test_read_index_raises_error
assert_raises(NotImplementedError) { linguist_repo.read_index }
end
def test_current_tree_raises_error
assert_raises(NotImplementedError) { linguist_repo.current_tree }
end
end
class EmptyRepository < Linguist::Source::Repository
class Diff < Linguist::Source::Diff
def each_delta(&block)
[].each(&block)
end
end
def get_tree_size(commit_id, limit)
0
end
def set_attribute_source(commit_id)
end
def load_attributes_for_path(path, attr_names)
{}
end
def load_blob(blob_id, max_size)
["", 0]
end
def diff(old_commit, new_commit)
Diff.new
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_samples.rb | test/test_samples.rb | require_relative "./helper"
require "tempfile"
class TestSamples < Minitest::Test
include Linguist
def test_up_to_date
assert serialized = Samples.cache
assert latest = Samples.data
# Just warn, it shouldn't scare people off by breaking the build.
if serialized['sha256'] != latest['sha256']
warn "Samples database is out of date. Run `bundle exec rake samples`."
expected = Tempfile.new('expected.json')
expected.write Yajl.dump(serialized, :pretty => true)
expected.close
actual = Tempfile.new('actual.json')
actual.write Yajl.dump(latest, :pretty => true)
actual.close
expected.unlink
actual.unlink
end
end
def test_verify
assert data = Samples.cache
assert !data["vocabulary"].empty?
assert !data["icf"].empty?
assert !data["centroids"].empty?
assert_equal data["icf"].size, data["vocabulary"].size
assert !data["extnames"].empty?
assert !data["interpreters"].empty?
assert !data["filenames"].empty?
end
def test_ext_or_shebang
Samples.each do |sample|
if sample[:extname].to_s.empty? && !sample[:filename]
assert sample[:interpreter], "#{sample[:path]} should have a file extension or a shebang, maybe it belongs in filenames/ subdir"
end
end
end
def test_filename_listed
Samples.each do |sample|
if sample[:filename]
listed_filenames = Language[sample[:language]].filenames
listed_filenames -= ["HOSTS"] if ["Hosts File", "INI"].include?(sample[:language])
assert_includes listed_filenames, sample[:filename], "#{sample[:path]} isn't listed as a filename for #{sample[:language]} in languages.yml"
end
end
end
# Check that there aren't samples with extensions or interpreters that
# aren't explicitly defined in languages.yml
languages_yml = File.expand_path("../../lib/linguist/languages.yml", __FILE__)
YAML.load_file(languages_yml).each do |name, options|
define_method "test_samples_have_parity_with_languages_yml_for_#{name}" do
options['extensions'] ||= []
if extnames = Samples.cache['extnames'][name]
extnames.each do |extname|
assert options['extensions'].index { |x| x.downcase.end_with? extname.downcase }, "#{name} has a sample with extension (#{extname.downcase}) that isn't explicitly defined in languages.yml"
end
end
options['interpreters'] ||= []
if interpreters = Samples.cache['interpreters'][name]
interpreters.each do |interpreter|
assert options['interpreters'].include?(interpreter),
"#{name} has a sample with an interpreter (#{interpreter}) that isn't explicitly defined in languages.yml"
end
end
end
end
# If a language extension isn't globally unique then make sure there are samples
Linguist::Language.all.each do |language|
define_method "test_#{language.name}_has_samples" do
language.extensions.each do |extension|
language_matches = Language.find_by_extension(extension)
# Check for samples if more than one language matches the given extension.
if language_matches.length > 1
language_matches.each do |match|
generic = Strategy::Extension.generic? extension
samples = generic ? "test/fixtures/Generic/#{extension.sub(/^\./, "")}/#{match.name}/*" : "samples/#{match.name}/*#{case_insensitive_glob(extension)}"
assert Dir.glob(samples).any?, "Missing samples in #{samples.inspect}. See https://github.com/github/linguist/blob/master/CONTRIBUTING.md"
end
end
end
language.filenames.each do |filename|
# Kludge for an unusual edge-case; see https://bit.ly/41EyUkU
next if ["Hosts File", "INI"].include?(language.name) && filename == "HOSTS"
# Check for samples if more than one language matches the given filename
if Language.find_by_filename(filename).size > 1
sample = "samples/#{language.name}/filenames/#{filename}"
assert File.exist?(sample),
"Missing sample in #{sample.inspect}. See https://github.com/github/linguist/blob/master/CONTRIBUTING.md"
end
end
end
end
def case_insensitive_glob(extension)
glob = ""
extension.each_char do |c|
glob += c.downcase != c.upcase ? "[#{c.downcase}#{c.upcase}]" : c
end
glob
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_tokenizer.rb | test/test_tokenizer.rb | require_relative "./helper"
class TestTokenizer < Minitest::Test
include Linguist
def tokenize(data)
data = File.read(File.join(samples_path, data.to_s)) if data.is_a?(Symbol)
Tokenizer.tokenize(data)
end
def test_skip_string_literals
assert_equal %w(print), tokenize('print ""')
assert_equal %w(print), tokenize('print "Josh"')
assert_equal %w(print), tokenize("print 'Josh'")
assert_equal %w(print), tokenize('print "Hello \"Josh\""')
assert_equal %w(print), tokenize("print 'Hello \\'Josh\\''")
assert_equal %w(print ,), tokenize("print \"Hello\", \"Josh\"")
assert_equal %w(print ,), tokenize("print 'Hello', 'Josh'")
assert_equal %w(print , ,), tokenize("print \"Hello\", \"\", \"Josh\"")
assert_equal %w(print , ,), tokenize("print 'Hello', '', 'Josh'")
end
def test_skip_number_literals
assert_equal %w(+), tokenize('1 + 1')
assert_equal %w(add \( , \)), tokenize('add(123, 456)')
assert_equal %w(|), tokenize('0x01 | 0x10')
assert_equal %w(*), tokenize('500.42 * 1.0')
assert_equal %w(), tokenize('1.23e-04')
assert_equal %w(), tokenize('1.0f')
assert_equal %w(), tokenize('1234ULL')
assert_equal %w(G1 X55 Y5 F2000), tokenize('G1 X55 Y5 F2000')
end
def test_comments
assert_equal %w(COMMENT#), tokenize("#\n")
assert_equal %w(COMMENT#), tokenize("##\n")
assert_equal %w(foo COMMENT#), tokenize("foo\n# Comment")
assert_equal %w(foo COMMENT#), tokenize("foo\n## Comment")
assert_equal %w(foo COMMENT# bar), tokenize("foo\n# Comment\nbar")
assert_equal %w(COMMENT//), tokenize("//\n")
assert_equal %w(COMMENT//), tokenize("///\n")
assert_equal %w(foo COMMENT//), tokenize("foo\n// Comment")
assert_equal %w(foo COMMENT//), tokenize("foo\n/// Comment")
assert_equal %w(foo COMMENT//!), tokenize("foo\n//! Comment")
assert_equal %w(COMMENT//), tokenize("//***\n")
assert_equal %w(COMMENT--), tokenize("--\n")
assert_equal %w(foo COMMENT--), tokenize("foo\n-- Comment")
assert_equal %w(COMMENT"), tokenize("\"\n")
assert_equal %w(foo COMMENT"), tokenize("foo\n\" Comment")
assert_equal %w(COMMENT;), tokenize(";\n")
assert_equal %w(COMMENT;), tokenize(";;\n")
assert_equal %w(foo COMMENT;), tokenize("foo\n; Comment")
assert_equal %w(foo COMMENT;), tokenize("foo\n;; Comment")
assert_equal %w(foo COMMENT/*), tokenize("foo /* Comment */")
assert_equal %w(foo COMMENT/*), tokenize("foo /*Comment*/")
assert_equal %w(foo COMMENT/*), tokenize("foo /* \nComment\n */")
assert_equal %w(foo COMMENT/**), tokenize("foo /** Comment */")
assert_equal %w(foo COMMENT/**), tokenize("foo /**Comment*/")
assert_equal %w(foo COMMENT/*!), tokenize("foo /*! Comment */")
assert_equal %w(foo COMMENT/*!), tokenize("foo /*!Comment*/")
assert_equal %w(COMMENT/*), tokenize("/**/")
assert_equal %w(COMMENT/*), tokenize("/*\n*\n*/")
assert_equal %w(COMMENT/**), tokenize("/***/")
assert_equal %w(COMMENT/**), tokenize("/****/")
assert_equal %w(COMMENT/*!), tokenize("/*!*/")
assert_equal %w(foo COMMENT<!--), tokenize("foo <!-- Comment -->")
assert_equal %w(foo COMMENT<!--), tokenize("foo <!--Comment-->")
assert_equal %w(foo COMMENT<!--), tokenize("foo<!--Comment-->")
assert_equal %w(foo COMMENT<!--), tokenize("foo<!---->")
assert_equal %w(foo COMMENT{-), tokenize("foo {- Comment -}")
assert_equal %w!foo COMMENT(*!, tokenize("foo (* Comment *)")
assert_equal %w(COMMENT%), tokenize("%\n")
assert_equal %w(COMMENT%), tokenize("%%\n")
assert_equal %w(% COMMENT%), tokenize("2 % 10\n% Comment")
assert_equal %w(foo COMMENT""" bar), tokenize("foo\n\"\"\"\nComment\n\"\"\"\nbar")
assert_equal %w(foo COMMENT''' bar), tokenize("foo\n'''\nComment\n'''\nbar")
# Lean comments
assert_equal %w(foo COMMENT/-), tokenize("foo /- Comment -/")
assert_equal %w(foo COMMENT/-), tokenize("foo /-Comment-/")
assert_equal %w(foo COMMENT/-), tokenize("foo /- \nComment\n -/")
assert_equal %w(foo COMMENT/-), tokenize("foo /-\nComment\n-/")
assert_equal %w(foo COMMENT/-), tokenize("foo /-- Comment -/")
assert_equal %w(foo COMMENT/-), tokenize("foo /--Comment-/")
assert_equal %w(foo COMMENT/-), tokenize("foo /--\nComment\n-/")
assert_equal %w(foo COMMENT/-), tokenize("foo /-! Comment -/")
assert_equal %w(foo COMMENT/-), tokenize("foo /-!Comment-/")
assert_equal %w(foo COMMENT/-), tokenize("foo /-!\nComment\n-/")
assert_equal %w(foo COMMENT/- bar), tokenize("foo /-\nComment\n-/ bar")
assert_equal %w(foo COMMENT/- bar), tokenize("foo /-\nComment\n-/\nbar")
assert_equal %w(foo COMMENT/- bar), tokenize("foo\n/-\nComment\n-/\nbar")
# Nested comments are not processed correctly as it's rarely used and adds unnecessary complexity
assert_equal %w(foo COMMENT/- comment), tokenize("foo /- Comment /- Still Comment /- And Still Comment -/ comment")
assert_equal %w(foo COMMENT/- comment3 - / bar), tokenize("foo /- comment1 /- comment2 -/ comment3 -/ bar")
assert_equal %w(COMMENT/-), tokenize("/-\n*\n-/")
assert_equal %w(COMMENT/-), tokenize("/-*-/")
assert_equal %w(COMMENT/-), tokenize("/-**-/")
assert_equal %w(COMMENT/-), tokenize("/-!-/")
assert_equal %w(COMMENT/-), tokenize("/--/ -/")
# Roff comments
assert_equal %w(COMMENT.\\" bar), tokenize(".\\\" foo\nbar")
assert_equal %w(COMMENT.\\" bar), tokenize(". \\\" foo\nbar")
assert_equal %w(COMMENT'\\" bar), tokenize("'\\\" foo\nbar")
assert_equal %w(COMMENT'\\" bar), tokenize("' \\\" foo\nbar")
assert_equal %w(COMMENT.ig), tokenize(".ig\nComment\n..")
# DIGITAL Command Language comments
assert_equal %w(COMMENT$!), tokenize("$! Foo")
# Easily mistaken with comment
assert_equal %w(* /), tokenize("1 */ 2")
end
def test_sgml_tags
assert_equal %w(< html ></ html >), tokenize("<html></html>")
assert_equal %w(< div id ></ div >), tokenize("<div id></div>")
assert_equal %w(< div id = foo ></ div >), tokenize("<div id=foo></div>")
assert_equal %w(< div id class ></ div >), tokenize("<div id class></div>")
assert_equal %w(< div id = ></ div >), tokenize("<div id=\"foo bar\"></div>")
assert_equal %w(< div id = ></ div >), tokenize("<div id='foo bar'></div>")
assert_equal %w(<? xml version = ?>), tokenize("<?xml version=\"1.0\"?>")
assert_equal %w(<! DOCTYPE html >), tokenize("<!DOCTYPE html>")
assert_equal %w(< a >), tokenize("<a>")
end
def test_freemarker_tags
assert_equal %w(<# a > b </# a >), tokenize("<#a>b</#a>")
assert_equal %w(<@ a > b </@ a >), tokenize("<@a>b</@a>")
end
def test_operators
assert_equal %w(+), tokenize("1 + 1")
assert_equal %w(+), tokenize("1+1")
assert_equal %w(-), tokenize("1 - 1")
assert_equal %w(-), tokenize("1-1")
assert_equal %w(*), tokenize("1 * 1")
assert_equal %w(*), tokenize("1*1")
assert_equal %w(a ** b), tokenize("a ** b")
assert_equal %w(**), tokenize("1**2")
assert_equal %w(a ** b), tokenize("a**b")
assert_equal %w(/), tokenize("1 / 1")
assert_equal %w(/), tokenize("1/1")
assert_equal %w(//), tokenize("1 // 1")
assert_equal %w(//), tokenize("1//1")
assert_equal %w(%), tokenize("2 % 5")
assert_equal %w(%), tokenize("2%5")
assert_equal %w(&), tokenize("1 & 1")
assert_equal %w(&), tokenize("1&1")
assert_equal %w(&&), tokenize("1 && 1")
assert_equal %w(&&), tokenize("1&&1")
assert_equal %w(|), tokenize("1 | 1")
assert_equal %w(|), tokenize("1|1")
assert_equal %w(||), tokenize("1 || 1")
assert_equal %w(||), tokenize("1||1")
assert_equal %w(<), tokenize("1 < 0x01")
assert_equal %w(<), tokenize("1<0x01")
assert_equal %w(<<), tokenize("1 << 0x01")
assert_equal %w(<<), tokenize("1<<0x01")
assert_equal %w(<<<), tokenize("1 <<< 0x01")
assert_equal %w(<<<), tokenize("1<<<0x01")
assert_equal %w(>), tokenize("1 > 0x01")
assert_equal %w(>), tokenize("1>0x01")
assert_equal %w(>>), tokenize("1 >> 0x01")
assert_equal %w(>>), tokenize("1>>0x01")
assert_equal %w(>>>), tokenize("1 >>> 0x01")
assert_equal %w(>>>), tokenize("1>>>0x01")
assert_equal %w(a --), tokenize("a--")
assert_equal %w(a ++), tokenize("a++")
assert_equal %w(-- a), tokenize("--a")
assert_equal %w(++ a), tokenize("++a")
assert_equal %w(a -> b), tokenize("a -> b")
assert_equal %w(a -> b), tokenize("a->b")
assert_equal %w(a --> b), tokenize("a --> b")
assert_equal %w(a --> b), tokenize("a-->b")
assert_equal %w(a <- b), tokenize("a <- b")
assert_equal %w(a <- b), tokenize("a<-b")
assert_equal %w(a <-- b), tokenize("a <-- b")
assert_equal %w(a <-- b), tokenize("a<--b")
assert_equal %w(a = b), tokenize("a = b")
assert_equal %w(a = b), tokenize("a=b")
assert_equal %w(a == b), tokenize("a == b")
assert_equal %w(a == b), tokenize("a==b")
assert_equal %w(a === b), tokenize("a === b")
assert_equal %w(a === b), tokenize("a===b")
assert_equal %w(a !== b), tokenize("a !== b")
assert_equal %w(a !== b), tokenize("a!==b")
assert_equal %w(a >= b), tokenize("a>=b")
assert_equal %w(a <= b), tokenize("a<=b")
assert_equal %w(a <> b), tokenize("a<>b")
assert_equal %w(a ^ b), tokenize("a ^ b")
assert_equal %w(a ^ b), tokenize("a^b")
assert_equal %w(~ a), tokenize("~a")
assert_equal %w(a := b), tokenize("a:=b")
assert_equal %w(a :== b), tokenize("a:==b")
assert_equal %w(a += b), tokenize("a+=b")
assert_equal %w(a -= b), tokenize("a-=b")
assert_equal %w(a *= b), tokenize("a*=b")
assert_equal %w(a /= b), tokenize("a/=b")
assert_equal %w(a %= b), tokenize("a%=b")
assert_equal %w(a ^= b), tokenize("a^=b")
assert_equal %w(a &= b), tokenize("a&=b")
assert_equal %w(a |= b), tokenize("a|=b")
assert_equal %w(a ~= b), tokenize("a~=b")
assert_equal %w(a =~ b), tokenize("a=~b")
assert_equal %w(a !~ b), tokenize("a!~b")
# Regexps/Globs
assert_equal %w(.*), tokenize(".*")
assert_equal %w(.*?), tokenize(".*?")
assert_equal %w(.**), tokenize(".**")
assert_equal %w(.+), tokenize(".+")
assert_equal %w(.+?), tokenize(".+?")
assert_equal %w(.++), tokenize(".++")
assert_equal %w((?: a )), tokenize("(?:a)")
assert_equal %w([[ a ]]), tokenize("[[a]]")
assert_equal %w([[ a ]]), tokenize("[[ a ]]")
# Edge cases
assert_equal %w(- ! # $ % & * + , . : ; <=>), tokenize("-!#$%&*+,.:;<=>")
assert_equal %w(- ! # $ % & ? @ \\ ^ _ ` | ~), tokenize("-!#$%&?@\\^_`|~")
assert_equal %w(- ! # $ % & * + , . : ; <=>), tokenize("-!#$%&*+,.:;<=>")
assert_equal %w(- / ! # $ % & * + , . : ; <>), tokenize("-/!#$%&*+,.:;<>")
end
def test_c_tokens
assert_equal %w(#ifndef HELLO_H #define HELLO_H void hello \(\) ; #endif), tokenize(:"C/hello.h")
assert_equal %w(#include < stdio .h > int main \(\) { printf \( \) ; return ; }), tokenize(:"C/hello.c")
end
def test_cpp_tokens
assert_equal %w(class Bar { protected : char * name ; public : void hello \(\) ; }), tokenize(:"C++/bar.h")
assert_equal %w(#include < iostream > using namespace std ; int main \(\) { cout << << endl ; }), tokenize(:"C++/hello.cpp")
end
def test_lua_tokens
assert_equal %w({...}), tokenize("{...}")
end
def test_objective_c_tokens
assert_equal %w(#import < Foundation / Foundation .h > @interface Foo : NSObject { } @end), tokenize(:"Objective-C/Foo.h")
assert_equal %w(#import @implementation Foo @end), tokenize(:"Objective-C/Foo.m")
assert_equal %w(#import < Cocoa / Cocoa .h > int main \( int argc , char * argv [] \) { NSLog \( @ \) ; return ; }), tokenize(:"Objective-C/hello.m")
end
def test_perl_tokens
assert_equal %w(COMMENT# COMMENT# COMMENT# package POSIX ; #line sub getchar { usage if @_ != ; CORE :: getc \( STDIN \) ; } COMMENT# ;), tokenize(:"Perl/getchar.al")
assert_equal %w(@_), tokenize("@_")
assert_equal %w($_), tokenize("$_")
end
def test_php_tokens
assert_equal %w(<? php echo ( ) ; ?>), tokenize("<?php echo('hello world'); ?>")
assert_equal %w(<? php COMMENT/* ?>), tokenize("<?php /* comment */ ?>")
end
def test_prolog_tokens
assert_equal %w(a ( A , B ) :- f .), tokenize("a(A, B) :- f.")
end
def test_shebang
assert_equal "SHEBANG#!sh", tokenize(:"Shell/sh")[0]
assert_equal "SHEBANG#!bash", tokenize(:"Shell/bash")[0]
assert_equal "SHEBANG#!zsh", tokenize(:"Shell/zsh")[0]
assert_equal "SHEBANG#!perl", tokenize(:"Perl/perl")[0]
assert_equal "SHEBANG#!python", tokenize(:"Python/python")[0]
assert_equal "SHEBANG#!ruby", tokenize(:"Ruby/ruby")[0]
assert_equal "SHEBANG#!ruby", tokenize(:"Ruby/ruby2")[0]
assert_equal "SHEBANG#!node", tokenize(:"JavaScript/js")[0]
assert_equal "SHEBANG#!php", tokenize(:"PHP/php")[0]
assert_equal "SHEBANG#!escript", tokenize(:"Erlang/factorial")[0]
assert_equal "echo", tokenize(:"Shell/invalid-shebang.sh")[0]
end
def test_javascript_tokens
assert_equal %w( \( function \(\) { console .log \( \) ; } \) .call \( this \) ;), tokenize(:"JavaScript/hello.js")
end
def test_json_tokens
assert_equal %w( { : , : , : , : [ , ] , : { : , : } } ), tokenize(:"JSON/product.json")
end
def test_ruby_tokens
assert_equal %w(module Foo end), tokenize(:"Ruby/foo.rb")
assert_equal %w(task : default do puts end), tokenize(:"Ruby/filenames/Rakefile")
end
def test_shell_tokens
# Bash
assert_equal %w(&>), tokenize("&>")
assert_equal %w(|&), tokenize("|&")
assert_equal %w(<&), tokenize("<&")
assert_equal %w(>&), tokenize(">&")
assert_equal %w(${ a }), tokenize("${a}")
assert_equal %w($( a )), tokenize("$( a )")
assert_equal %w($(( + ))), tokenize("$(( 1+1 ))")
# Fish
assert_equal %w(<&-), tokenize("<&-")
assert_equal %w(&|), tokenize("&|")
end
def test_truncate
assert_equal ['a'*16], tokenize('a'*100)
end
def test_long_token
assert_equal ["." * 16], tokenize("." * (32*1024+1))
end
# This is a terrible way to test this, but it does the job.
#
# If this test fails, it means you've introduced a regression in the tokenizer in the form of an action that uses
# REJECT or a rule with a trailing context which is effectively the same as REJECT. Both of these cause us problems
# because they introduce a fixed length buffer. This fixed buffer can cause the tokenizer to crash. This also has
# an impact on performance of the tokenizer.
#
# Please do not use rules with a trailing context or REJECT actions
#
def test_flex_no_reject
refute File.open("ext/linguist/lex.linguist_yy.c").grep(/#define REJECT reject_used_but_not_detected/).empty?, \
"Tokenizer should not use rules with a trailing context or REJECT actions"
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_pedantic.rb | test/test_pedantic.rb | require_relative "./helper"
class TestPedantic < Minitest::Test
filename = File.expand_path("../../lib/linguist/languages.yml", __FILE__)
LANGUAGES = YAML.load(File.read(filename))
GRAMMARS = YAML.load(File.read(File.expand_path("../../grammars.yml", __FILE__)))
GENERICS = YAML.load_file(File.expand_path("../../lib/linguist/generic.yml", __FILE__))
HEURISTICS = YAML.load_file(File.expand_path("../../lib/linguist/heuristics.yml", __FILE__))
def test_language_names_are_sorted
assert_sorted LANGUAGES.keys
end
def test_nonprimary_extensions_are_sorted
LANGUAGES.each do |name, language|
extensions = language['extensions']
assert_sorted extensions[1..-1].map(&:downcase) if extensions && extensions.size > 1
end
end
def test_filenames_are_sorted
LANGUAGES.each do |name, language|
assert_sorted language['filenames'] if language['filenames']
end
end
def test_generics_are_sorted
assert_sorted GENERICS.keys
end
def test_grammars_are_sorted
assert_sorted GRAMMARS.keys
end
def test_scopes_are_sorted
GRAMMARS.values.each do |scopes|
assert_sorted scopes
end
end
def test_heuristics_are_sorted
disambiguations = HEURISTICS['disambiguations']
assert_sorted disambiguations.map { |r| r['extensions'][0] }
assert_sorted HEURISTICS['named_patterns'].keys
end
def test_heuristics_tests_are_sorted
file = File.expand_path("../test_heuristics.rb", __FILE__)
tests = File.open(file).each.grep(/^ *def test_[0-9a-z_]+_by_heuristics/)
assert_sorted tests
end
def test_heuristics_tests_are_exhaustive
file = File.expand_path("../test_heuristics.rb", __FILE__)
tests = File.open(file).each.grep(/^ *def test_[0-9a-z_]+_by_heuristics/)
tests = tests.map { |s| s.match(/test_(.*)_by_heuristic/).captures[0] }
extensions = HEURISTICS['disambiguations'].map { |r| r['extensions'] }
extensions = extensions.flatten(1).map { |e| e[1..-1] }
extensions.each do |ext|
assert tests.include?(ext), "Extension .#{ext} has no test in test_heuristics.rb"
end
end
def test_submodules_are_sorted
system(File.expand_path("../../script/sort-submodules", __FILE__) + " -t")
assert $?.success?
end
def assert_sorted(list)
list.each_cons(2) do |previous, item|
flunk "#{previous} should come after #{item}" if previous > item
end
end
def test_no_unknown_fields
known_fields = <<~END.split("\n")
ace_mode
aliases
codemirror_mime_type
codemirror_mode
color
extensions
filenames
fs_name
group
interpreters
language_id
searchable
tm_scope
type
wrap
END
LANGUAGES.each do |name, language|
unknown_fields = language.keys.reject { |key| known_fields.include?(key) }
message = "Language '#{name}' has the following unknown field(s):\n"
message << unknown_fields.map { |key| sprintf(" - %s: %s", key, language[key]) }.sort.join("\n")
assert unknown_fields.empty?, message
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_blob.rb | test/test_blob.rb | require_relative "./helper"
class TestBlob < Minitest::Test
include Linguist
def script_blob(name)
blob = sample_blob_memory(name)
blob.instance_variable_set(:@name, 'script')
blob
end
def test_name
assert_equal "foo.rb", sample_blob_memory("Ruby/foo.rb").name
end
def test_mime_type
assert_equal "application/pdf", fixture_blob_memory("Binary/octocat.ai").mime_type
assert_equal "application/x-ruby", sample_blob_memory("Ruby/grit.rb").mime_type
assert_equal "application/x-sh", sample_blob_memory("Shell/script.sh").mime_type
assert_equal "text/plain", fixture_blob_memory("Data/README").mime_type
end
def test_content_type
assert_equal "application/pdf", fixture_blob_memory("Binary/foo.pdf").content_type
assert_equal "image/png", fixture_blob_memory("Binary/foo.png").content_type
assert_equal "text/plain; charset=iso-8859-2", fixture_blob_memory("Data/README").content_type
end
def test_disposition
assert_equal "attachment; filename=foo+bar.jar", fixture_blob_memory("Binary/foo bar.jar").disposition
assert_equal "attachment; filename=foo.bin", fixture_blob_memory("Binary/foo.bin").disposition
assert_equal "attachment; filename=linguist.gem", fixture_blob_memory("Binary/linguist.gem").disposition
assert_equal "attachment; filename=octocat.ai", fixture_blob_memory("Binary/octocat.ai").disposition
assert_equal "inline", fixture_blob_memory("Data/README").disposition
assert_equal "inline", sample_blob_memory("Text/foo.txt").disposition
assert_equal "inline", sample_blob_memory("Ruby/grit.rb").disposition
assert_equal "inline", fixture_blob_memory("Binary/octocat.png").disposition
end
def test_data
assert_equal "module Foo\nend\n", sample_blob_memory("Ruby/foo.rb").data
end
def test_lines
assert_equal ["module Foo", "end"], sample_blob_memory("Ruby/foo.rb").lines
assert_equal ["line 1", "line 2"], sample_blob_memory("Text/mac.txt").lines
assert_equal 474, sample_blob_memory("Emacs Lisp/ess-julia.el").lines.length
end
def test_size
assert_equal 15, sample_blob_memory("Ruby/foo.rb").size
end
def test_loc
assert_equal 2, sample_blob_memory("Ruby/foo.rb").loc
assert_equal 3, fixture_blob_memory("Data/utf16le-windows").loc
assert_equal 3, fixture_blob_memory("Data/utf16le").loc
assert_equal 1, fixture_blob_memory("Data/iso8859-8-i").loc
end
def test_sloc
assert_equal 2, sample_blob_memory("Ruby/foo.rb").sloc
assert_equal 3, fixture_blob_memory("Data/utf16le-windows").sloc
assert_equal 3, fixture_blob_memory("Data/utf16le").sloc
assert_equal 1, fixture_blob_memory("Data/iso8859-8-i").sloc
end
def test_encoding
assert_equal "ISO-8859-2", fixture_blob_memory("Data/README").encoding
assert_equal "ISO-8859-2", fixture_blob_memory("Data/README").ruby_encoding
assert_equal "UTF-8", sample_blob_memory("Text/foo.txt").encoding
assert_equal "UTF-8", sample_blob_memory("Text/foo.txt").ruby_encoding
assert_equal "UTF-16LE", fixture_blob_memory("Data/utf16le").encoding
assert_equal "UTF-16LE", fixture_blob_memory("Data/utf16le").ruby_encoding
assert_equal "UTF-16LE", fixture_blob_memory("Data/utf16le-windows").encoding
assert_equal "UTF-16LE", fixture_blob_memory("Data/utf16le-windows").ruby_encoding
assert_equal "ISO-2022-KR", fixture_blob_memory("Text/ISO-2022-KR.txt").encoding
assert_equal "binary", fixture_blob_memory("Text/ISO-2022-KR.txt").ruby_encoding
assert_nil fixture_blob_memory("Binary/dog.o").encoding
end
def test_binary
assert fixture_blob_memory("Binary/git.deb").binary?
assert fixture_blob_memory("Binary/hello.pbc").binary?
assert fixture_blob_memory("Binary/linguist.gem").binary?
assert fixture_blob_memory("Binary/octocat.ai").binary?
assert fixture_blob_memory("Binary/octocat.png").binary?
assert fixture_blob_memory("Binary/zip").binary?
assert !fixture_blob_memory("Data/README").binary?
assert !sample_blob_memory("Ruby/foo.rb").binary?
assert !sample_blob_memory("Perl/script.pl").binary?
end
def test_all_binary
Samples.each do |sample|
blob = sample_blob_memory(sample[:path])
assert ! (blob.likely_binary? || blob.binary?), "#{sample[:path]} is a binary file"
end
end
def test_text
assert fixture_blob_memory("Data/README").text?
assert fixture_blob_memory("Data/md").text?
assert sample_blob_memory("Shell/script.sh").text?
assert fixture_blob_memory("Data/txt").text?
end
def test_image
assert fixture_blob_memory("Binary/octocat.png").image?
assert !fixture_blob_memory("Binary/octocat.ai").image?
assert !fixture_blob_memory("Binary/octocat.psd").image?
end
def test_solid
assert fixture_blob_memory("Binary/cube.stl").solid?
assert fixture_blob_memory("Generic/stl/STL/cube2.stl").solid?
end
def test_csv
assert sample_blob_memory("CSV/cars.csv").csv?
end
def test_pdf
assert fixture_blob_memory("Binary/foo.pdf").pdf?
end
def test_viewable
assert fixture_blob_memory("Data/README").viewable?
assert sample_blob_memory("Ruby/foo.rb").viewable?
assert sample_blob_memory("Perl/script.pl").viewable?
assert !fixture_blob_memory("Binary/linguist.gem").viewable?
assert !fixture_blob_memory("Binary/octocat.ai").viewable?
assert !fixture_blob_memory("Binary/octocat.png").viewable?
end
def test_generated
assert !fixture_blob_memory("Data/README").generated?
# Catch generated checks that don't return a boolean when they don't match
refute_nil fixture_blob_memory("Data/README").generated?
# Generated .NET Docfiles
assert sample_blob_memory("XML/net_docfile.xml").generated?
# Long line
assert !sample_blob_memory("JavaScript/uglify.js").generated?
# Inlined JS, but mostly code
assert !sample_blob_memory("JavaScript/json2_backbone.js").generated?
# Minified JS
assert !sample_blob_memory("JavaScript/jquery-1.6.1.js").generated?
assert sample_blob_memory("JavaScript/jquery-1.6.1.min.js").generated?
assert sample_blob_memory("JavaScript/jquery-1.4.2.min.js").generated?
# Go lockfiles
assert sample_blob_memory("TOML/filenames/Gopkg.lock").generated?
assert sample_blob_memory("YAML/filenames/glide.lock").generated?
# Cargo generated Cargo.lock file
assert sample_blob_memory("TOML/filenames/Cargo.lock").generated?
# Composer generated composer.lock file
assert sample_blob_memory("JSON/filenames/composer.lock").generated?
# Nix generated flake.lock file
assert sample_blob_memory("JSON/filenames/flake.lock").generated?
# Bazel generated bzlmod lockfile
assert sample_blob_memory("JSON/filenames/MODULE.bazel.lock").generated?
# Deno generated deno.lock file
assert sample_blob_memory("JSON/filenames/deno.lock").generated?
# pixi lockfile
assert sample_blob_memory("YAML/filenames/pixi.lock").generated?
# pnpm lockfile
assert fixture_blob_memory("YAML/pnpm-lock.yaml").generated?
# PEG.js-generated parsers
assert sample_blob_memory("JavaScript/parser.js").generated?
# Generated PostScript
assert !sample_blob_memory("PostScript/sierpinski.ps").generated?
# These examples are too basic to tell
assert !sample_blob_memory("JavaScript/hello.js").generated?
assert sample_blob_memory("JavaScript/intro-old.js").generated?
assert sample_blob_memory("JavaScript/classes-old.js").generated?
assert sample_blob_memory("JavaScript/intro.js").generated?
assert sample_blob_memory("JavaScript/classes.js").generated?
assert sample_blob_memory("JavaScript/ccalc-lex.js").generated?
assert sample_blob_memory("JavaScript/ccalc-parse.js").generated?
# Protocol Buffer generated code
assert sample_blob_memory("C++/protocol-buffer.pb.h").generated?
assert sample_blob_memory("C++/protocol-buffer.pb.cc").generated?
assert sample_blob_memory("Java/ProtocolBuffer.java").generated?
assert sample_blob_memory("Python/protocol_buffer_pb2.py").generated?
assert sample_blob_memory("Go/api.pb.go").generated?
assert sample_blob_memory("Go/embedded.go").generated?
assert sample_blob_memory("Go/oapi-codegen.go").generated?
assert sample_blob_memory("JavaScript/proto.js").generated?
assert sample_blob_memory("TypeScript/proto.ts").generated?
assert sample_blob_memory("PHP/ProtobufGenerated.php").generated?
# Apache Thrift generated code
assert sample_blob_memory("Python/gen-py-linguist-thrift.py").generated?
assert sample_blob_memory("Go/gen-go-linguist-thrift.go").generated?
assert sample_blob_memory("Java/gen-java-linguist-thrift.java").generated?
assert sample_blob_memory("JavaScript/gen-js-linguist-thrift.js").generated?
assert sample_blob_memory("Ruby/gen-rb-linguist-thrift.rb").generated?
assert sample_blob_memory("Objective-C/gen-cocoa-linguist-thrift.m").generated?
assert sample_blob_memory("PHP/ThriftGenerated.php").generated?
# Generated JNI
assert sample_blob_memory("C/jni_layer.h").generated?
# Minified CSS
assert !sample_blob_memory("CSS/bootstrap.css").generated?
assert sample_blob_memory("CSS/bootstrap.min.css").generated?
# Generated VCR
assert sample_blob_memory("YAML/vcr_cassette.yml").generated?
# Generated by Zephir
assert !sample_blob_memory("Zephir/Router.zep").generated?
# Go vendored dependencies
refute sample_blob("vendor/vendor.json").generated?
assert sample_blob("vendor/github.com/kr/s3/sign.go").generated?
refute fixture_blob("go/food_vendor/candy.go").generated?
# Cython-generated C/C++
assert sample_blob_memory("C/sgd_fast.c").generated?
assert sample_blob_memory("C++/wrapper_inner.cpp").generated?
# Unity3D-generated metadata
assert sample_blob_memory("Unity3D Asset/Tiles.meta").generated?
# Racc-generated Ruby
assert sample_blob_memory("Ruby/racc.rb").generated?
# protobuf/grpc-plugin C++
assert sample_blob_memory("C++/hello.grpc.pb.h").generated?
assert sample_blob_memory("C++/grpc.pb.cc").generated?
# Generated HTML
assert sample_blob_memory("HTML/pkgdown.html").generated?
assert sample_blob_memory("HTML/pages.html").generated?
assert fixture_blob_memory("HTML/mandoc.html").generated?
assert fixture_blob_memory("HTML/node78.html").generated?
# Generated Pascal _TLB file
assert sample_blob_memory("Pascal/lazcomlib_1_0_tlb.pas").generated?
# Sorbet RBI generated by Tapioca
assert sample_blob_memory("Ruby/rails@7.0.3.1.rbi").generated?
assert sample_blob_memory("Ruby/rendering.rbi").generated?
assert sample_blob_memory("Ruby/actionmailer.rbi").generated?
# SQLx query files
assert fixture_blob_memory("Rust/.sqlx/query-2b8b1aae3740a05cb7179be9c7d5af30e8362c3cba0b07bc18fa32ff1a2232cc.json").generated?
end
def test_vendored
assert !fixture_blob_memory("Data/README").vendored?
# Go fixtures
assert sample_blob("Go/testdata/foo.yml").vendored?
end
def test_language
allowed_failures = {
"#{samples_path}/C/rpc.h" => ["C", "C++"],
}
Samples.each do |sample|
blob = sample_blob_memory(sample[:path])
assert blob.language, "No language for #{sample[:path]}"
fs_name = blob.language.fs_name ? blob.language.fs_name : blob.language.name
if allowed_failures.has_key? sample[:path]
# Failures are reasonable when a file is fully valid in more than one language.
assert allowed_failures[sample[:path]].include?(sample[:language]), blob.name
else
assert_equal sample[:language], fs_name, blob.name
end
end
# Test language detection for files which shouldn't be used as samples
root = File.expand_path('../fixtures', __FILE__)
Dir.entries(root).each do |language|
next if language == '.' || language == '..' || language == 'Binary' ||
File.basename(language) == 'ace_modes.json'
# Each directory contains test files of a language
dirname = File.join(root, language)
Dir.entries(dirname).each do |filename|
# By default blob search the file in the samples;
# thus, we need to give it the absolute path
filepath = File.join(dirname, filename)
next unless File.file?(filepath)
blob = fixture_blob_memory(filepath)
if language == 'Data'
assert blob.language.nil?, "A language was found for #{filepath}"
elsif language == 'Generated'
assert blob.generated?, "#{filepath} is not a generated file"
elsif language == 'Generic'
assert !blob.language, "#{filepath} should not match a language"
else
fs_name = blob.language.fs_name ? blob.language.fs_name : blob.language.name
if allowed_failures.has_key? filepath
assert allowed_failures[filepath].include?(fs_name), filepath
else
assert blob.language, "No language for #{filepath}"
assert_equal language, fs_name, filepath
end
end
end
end
end
def test_minified_files_not_safe_to_highlight
assert !sample_blob_memory("JavaScript/jquery-1.6.1.min.js").safe_to_colorize?
end
def test_empty
blob = Struct.new(:data) { include Linguist::BlobHelper }
assert blob.new("").empty?
assert blob.new(nil).empty?
refute blob.new(" ").empty?
refute blob.new("nope").empty?
end
def test_include_in_language_stats
generated = sample_blob_memory("CSS/bootstrap.min.css")
assert_predicate generated, :generated?
refute_predicate generated, :include_in_language_stats?
data = sample_blob_memory("Ant Build System/filenames/ant.xml")
assert_equal :data, data.language.type
refute_predicate data, :include_in_language_stats?
prose = sample_blob_memory("Markdown/tender.md")
assert_equal :prose, prose.language.type
refute_predicate prose, :include_in_language_stats?
included = sample_blob_memory("HTML/pages.html")
refute_predicate included, :include_in_language_stats?
# Test detectable override (i.e by .gitattributes)
def prose.detectable?; true end
assert_predicate prose, :include_in_language_stats?
included_not_detectable = included.clone()
def included_not_detectable.detectable?; false end
refute_predicate included_not_detectable, :include_in_language_stats?
# Test not included if vendored, documentation or generated overridden
# even if detectable
included_vendored = included.clone()
def included_vendored.vendored?; true end
refute_predicate included_vendored, :include_in_language_stats?
def included_vendored.detectable?; true end
refute_predicate included_vendored, :include_in_language_stats?
included_documentation = included.clone()
def included_documentation.documentation?; true end
refute_predicate included_documentation, :include_in_language_stats?
def included_documentation.detectable?; true end
refute_predicate included_documentation, :include_in_language_stats?
included_generated = included.clone()
def included_generated.generated?; true end
refute_predicate included_generated, :include_in_language_stats?
def included_generated.detectable?; true end
refute_predicate included_generated, :include_in_language_stats?
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_heuristics.rb | test/test_heuristics.rb | require_relative "./helper"
class TestHeuristics < Minitest::Test
include Linguist
def fixture(name)
File.read(File.join(samples_path, name))
end
def file_blob(name, alt_name=nil)
path = File.exist?(name) ? name : File.join(samples_path, name)
blob = FileBlob.new(path)
if !alt_name.nil?
blob.instance_variable_set("@path", alt_name)
end
blob
end
def all_fixtures(language_name, file="*")
fixs = Dir.glob("#{samples_path}/#{language_name}/#{file}") +
Dir.glob("#{fixtures_path}/#{language_name}/#{file}") -
["#{samples_path}/#{language_name}/filenames"]
fixs = fixs.reject { |f| File.symlink?(f) }
assert !fixs.empty?, "no fixtures for #{language_name} #{file}"
fixs
end
def test_no_match
language = []
results = Heuristics.call(file_blob("JavaScript/namespace.js"), language)
assert_equal [], results
end
def test_symlink_empty
assert_equal [], Heuristics.call(file_blob("Markdown/symlink.md"), [Language["Markdown"]])
end
def test_no_match_if_regexp_timeout
skip("This test requires Ruby 3.2.0 or later") if Gem::Version.new(RUBY_VERSION) < Gem::Version.new('3.2.0')
Regexp.any_instance.stubs(:match?).raises(Regexp::TimeoutError)
assert_equal [], Heuristics.call(file_blob("#{fixtures_path}/Generic/stl/STL/cube1.stl"), [Language["STL"]])
end
# alt_name is a file name that will be used instead of the file name of the
# original sample. This is used to force a sample to go through a specific
# heuristic even if its extension doesn't match.
def assert_heuristics(hash, alt_name=nil)
candidates = hash.keys.map { |l| Language[l] }
hash.each do |language, blobs|
blobs = Array(blobs)
assert blobs.length >= 1, "Expected at least 1 blob for #{language}"
blobs.each do |blob|
result = Heuristics.call(file_blob(blob, alt_name), candidates)
if language.nil?
expected = []
elsif language.is_a?(Array)
expected = language.map{ |l| Language[l] }
else
expected = [Language[language]]
end
assert_equal expected, result, "Failed for #{blob}"
end
end
end
def test_detect_still_works_if_nothing_matches
blob = Linguist::FileBlob.new(File.join(samples_path, "Objective-C/hello.m"))
match = Linguist.detect(blob)
assert_equal Language["Objective-C"], match
end
def test_all_extensions_are_listed
Heuristics.all.all? do |rule|
rule.languages.each do |lang|
unlisted = rule.extensions.reject do |ext|
lang.extensions.include?(ext) or
lang.filenames.select {|n| n.downcase.end_with? ext.downcase}
end
assert_equal [], unlisted, (<<~EOF).chomp
The extension '#{unlisted.first}' is not assigned to #{lang.name}.
Add it to `languages.yml` or update the heuristic which uses it
EOF
end
end
end
def test_1_by_heuristics
n = 1
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff Manpage/*"),
"Roff" => all_fixtures("Roff") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff/*"),
nil => Dir.glob("#{fixtures_path}/Generic/#{n}/nil/*")
}, alt_name="man.#{n}")
end
def test_1in_by_heuristics
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage"),
"Roff" => all_fixtures("Roff")
}, alt_name="man.1in")
end
def test_1m_by_heuristics
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage"),
"Roff" => all_fixtures("Roff")
}, alt_name="man.1m")
end
def test_1x_by_heuristics
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage"),
"Roff" => all_fixtures("Roff")
}, alt_name="man.1x")
end
def test_2_by_heuristics
n = 2
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff Manpage/*"),
"Roff" => all_fixtures("Roff") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff/*"),
nil => Dir.glob("#{fixtures_path}/Generic/#{n}/nil/*")
}, alt_name="man.#{n}")
end
def test_3_by_heuristics
n = 3
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff Manpage/*"),
"Roff" => all_fixtures("Roff") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff/*"),
nil => Dir.glob("#{fixtures_path}/Generic/#{n}/nil/*")
}, alt_name="man.#{n}")
end
def test_3in_by_heuristics
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage"),
"Roff" => all_fixtures("Roff")
}, alt_name="man.3in")
end
def test_3m_by_heuristics
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage"),
"Roff" => all_fixtures("Roff")
}, alt_name="man.3m")
end
def test_3p_by_heuristics
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage"),
"Roff" => all_fixtures("Roff")
}, alt_name="man.3p")
end
def test_3pm_by_heuristics
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage"),
"Roff" => all_fixtures("Roff")
}, alt_name="man.3pm")
end
def test_3qt_by_heuristics
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage"),
"Roff" => all_fixtures("Roff")
}, alt_name="man.3qt")
end
def test_3x_by_heuristics
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage"),
"Roff" => all_fixtures("Roff")
}, alt_name="man.3x")
end
def test_4_by_heuristics
n = 4
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff Manpage/*"),
"Roff" => all_fixtures("Roff") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff/*"),
nil => Dir.glob("#{fixtures_path}/Generic/#{n}/nil/*")
}, alt_name="man.#{n}")
end
def test_5_by_heuristics
n = 5
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff Manpage/*"),
"Roff" => all_fixtures("Roff") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff/*"),
nil => Dir.glob("#{fixtures_path}/Generic/#{n}/nil/*")
}, alt_name="man.#{n}")
end
def test_6_by_heuristics
n = 6
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff Manpage/*"),
"Roff" => all_fixtures("Roff") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff/*"),
nil => Dir.glob("#{fixtures_path}/Generic/#{n}/nil/*")
}, alt_name="man.#{n}")
end
def test_7_by_heuristics
n = 7
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff Manpage/*"),
"Roff" => all_fixtures("Roff") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff/*"),
nil => Dir.glob("#{fixtures_path}/Generic/#{n}/nil/*")
}, alt_name="man.#{n}")
end
def test_8_by_heuristics
n = 8
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff Manpage/*"),
"Roff" => all_fixtures("Roff") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff/*"),
nil => Dir.glob("#{fixtures_path}/Generic/#{n}/nil/*")
}, alt_name="man.#{n}")
end
def test_9_by_heuristics
n = 9
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff Manpage/*"),
"Roff" => all_fixtures("Roff") + Dir.glob("#{fixtures_path}/Generic/#{n}/Roff/*"),
nil => Dir.glob("#{fixtures_path}/Generic/#{n}/nil/*")
}, alt_name="man.#{n}")
end
def test_action_by_heuristics
assert_heuristics({
"ROS Interface" => all_fixtures("ROS Interface", "*.action"),
})
end
def test_al_by_heuristics
assert_heuristics({
"AL" => all_fixtures("AL", "*.al"),
"Perl" => all_fixtures("Perl", "*.al")
})
end
def test_app_by_heuristics
assert_heuristics({
"Erlang" => Dir.glob("#{fixtures_path}/Generic/app/Erlang/*"),
nil => Dir.glob("#{fixtures_path}/Generic/app/nil/*")
})
end
def test_as_by_heuristics
assert_heuristics({
"ActionScript" => all_fixtures("ActionScript", "*.as"),
nil => all_fixtures("AngelScript", "*.as")
})
end
def test_asc_by_heuristics
assert_heuristics({
"AsciiDoc" => all_fixtures("AsciiDoc"),
"AGS Script" => all_fixtures("AGS Script"),
"Public Key" => all_fixtures("Public Key")
}, "test.asc")
end
def test_asm_by_heuristics
assert_heuristics({
"Motorola 68K Assembly" => all_fixtures("Motorola 68K Assembly", "*.asm"),
"Assembly" => all_fixtures("Assembly", "*.asm")
})
end
def test_asy_by_heuristics
assert_heuristics({
"Asymptote" => all_fixtures("Asymptote", "*.asy"),
"LTspice Symbol" => all_fixtures("LTspice Symbol", "*.asy")
})
end
def test_bas_by_heuristics
assert_heuristics({
"B4X" => all_fixtures("B4X", "*.bas"),
"FreeBASIC" => all_fixtures("FreeBASIC", "*.bas"),
"BASIC" => all_fixtures("BASIC", "*.bas"),
"VBA" => all_fixtures("VBA", "*.bas"),
"Visual Basic 6.0" => all_fixtures("Visual Basic 6.0", "*.bas"),
"QuickBASIC" => all_fixtures("QuickBASIC", "*.bas")
})
end
def test_bb_by_heuristics
assert_heuristics({
"BitBake" => all_fixtures("BitBake", "*.bb"),
"BlitzBasic" => all_fixtures("BlitzBasic", "*.bb")
})
end
def test_bf_by_heuristics
assert_heuristics({
"Beef" => all_fixtures("Beef", "*.bf"),
"Brainfuck" => all_fixtures("Brainfuck", "*.bf"),
"HyPhy" => all_fixtures("HyPhy", "*.bf"),
nil => all_fixtures("Befunge", "*.bf"),
})
end
def test_bi_by_heuristics
assert_heuristics({
"FreeBASIC" => all_fixtures("FreeBASIC", "*.bi")
})
end
def test_bs_by_heuristics
assert_heuristics({
"BrighterScript" => all_fixtures("BrighterScript", "*.bs"),
"Bikeshed" => all_fixtures("Bikeshed", "*.bs")
})
end
def test_bst_by_heuristics
assert_heuristics({
"BibTeX Style" => all_fixtures("BibTeX Style", "*.bst"),
"BuildStream" => all_fixtures("BuildStream", "*.bst")
})
end
def test_builds_by_heuristics
assert_heuristics({
nil => all_fixtures("Text"),
"XML" => all_fixtures("XML", "*.builds")
}, "test.builds")
end
def test_cairo_by_heuristics
assert_heuristics({
"Cairo Zero" => all_fixtures("Cairo Zero"),
"Cairo" => all_fixtures("Cairo")
})
end
def test_ch_by_heuristics
assert_heuristics({
"xBase" => all_fixtures("xBase", "*.ch"),
# Missing heuristic for Charity
nil => all_fixtures("Charity", "*.ch")
})
end
def test_cl_by_heuristics
assert_heuristics({
"Common Lisp" => all_fixtures("Common Lisp", "*.cl"),
"OpenCL" => all_fixtures("OpenCL", "*.cl")
})
end
def test_cls_by_heuristics
assert_heuristics({
"Visual Basic 6.0" => all_fixtures("Visual Basic 6.0", "*.cls"),
"VBA" => all_fixtures("VBA", "*.cls"),
"TeX" => all_fixtures("TeX", "*.cls"),
"ObjectScript" => all_fixtures("ObjectScript", "*.cls"),
# Missing heuristics
nil => all_fixtures("Apex", "*.cls") + all_fixtures("OpenEdge ABL", "*.cls"),
})
end
def test_cmp_by_heuristics
assert_heuristics({
"Gerber Image" => all_fixtures("Gerber Image", "*"),
nil => all_fixtures("Text", "*"),
}, alt_name="test.cmp")
assert_heuristics({
"Gerber Image" => Dir.glob("#{fixtures_path}/Generic/cmp/Gerber Image/*"),
nil => Dir.glob("#{fixtures_path}/Generic/cmp/nil/*"),
})
end
def test_cs_by_heuristics
assert_heuristics({
"C#" => all_fixtures("C#", "*.cs"),
"Smalltalk" => all_fixtures("Smalltalk", "*.cs")
})
end
def test_csc_by_heuristics
assert_heuristics({
"GSC" => all_fixtures("GSC", "*.csc")
})
end
def test_csl_by_heuristics
assert_heuristics({
"Kusto" => all_fixtures("Kusto", "*.csl"),
"XML" => all_fixtures("XML", "*.csl")
})
end
def test_d_by_heuristics
assert_heuristics({
"D" => all_fixtures("D", "*.d"),
"DTrace" => all_fixtures("DTrace", "*.d"),
"Makefile" => all_fixtures("Makefile", "*.d"),
}, "test.d")
end
def test_dsp_by_heuristics
assert_heuristics({
"Faust" => all_fixtures("Faust", "*.dsp"),
"Microsoft Developer Studio Project" => all_fixtures("Microsoft Developer Studio Project"),
})
end
def test_e_by_heuristics
assert_heuristics({
"E" => all_fixtures("E", "*.E"),
"Eiffel" => all_fixtures("Eiffel", "*.e"),
"Euphoria" => all_fixtures("Euphoria", "*.e")
})
end
def test_ecl_by_heuristics
assert_heuristics({
"ECL" => all_fixtures("ECL", "*.ecl"),
"ECLiPSe" => all_fixtures("ECLiPSe", "*.ecl")
})
end
def test_es_by_heuristics
assert_heuristics({
"Erlang" => all_fixtures("Erlang", "*.es"),
"JavaScript" => all_fixtures("JavaScript", "*.es")
})
end
def test_ex_by_heuristics
assert_heuristics({
"Elixir" => all_fixtures("Elixir", "*.ex"),
"Euphoria" => all_fixtures("Euphoria", "*.ex")
})
end
def test_f_by_heuristics
assert_heuristics({
"Fortran" => all_fixtures("Fortran", "*.f") + all_fixtures("Fortran", "*.for"),
"Forth" => all_fixtures("Forth", "*.f") + all_fixtures("Forth", "*.for")
}, alt_name="main.f")
end
def test_for_by_heuristics
assert_heuristics({
"Fortran" => all_fixtures("Fortran", "*.f") + all_fixtures("Fortran", "*.for"),
"Forth" => all_fixtures("Forth", "*.f") + all_fixtures("Forth", "*.for"),
nil => all_fixtures("Formatted", "*.for")
}, alt_name="main.for")
end
def test_fr_by_heuristics
assert_heuristics({
"Frege" => all_fixtures("Frege", "*.fr"),
"Forth" => all_fixtures("Forth", "*.fr"),
"Text" => all_fixtures("Text", "*.fr")
})
end
def test_frm_by_heuristics
assert_heuristics({
"VBA" => all_fixtures("VBA", "*.frm"),
"Visual Basic 6.0" => all_fixtures("Visual Basic 6.0", "*.frm"),
"INI" => all_fixtures("INI", "*.frm"),
})
end
def test_fs_by_heuristics
assert_heuristics({
"F#" => all_fixtures("F#", "*.fs"),
"Forth" => all_fixtures("Forth", "*.fs"),
"GLSL" => all_fixtures("GLSL", "*.fs")
})
end
def test_ftl_by_heuristics
assert_heuristics({
"Fluent" => all_fixtures("Fluent", "*.ftl"),
"FreeMarker" => all_fixtures("FreeMarker", "*.ftl")
}, alt_name="main.ftl")
end
def test_g_by_heuristics
assert_heuristics({
"GAP" => all_fixtures("GAP", "*.g*"),
"G-code" => all_fixtures("G-code", "*.g")
}, alt_name="test.g")
end
def test_gd_by_heuristics
assert_heuristics({
"GAP" => all_fixtures("GAP", "*.gd"),
"GDScript" => all_fixtures("GDScript", "*.gd")
})
end
def test_gml_by_heuristics
assert_heuristics({
"Game Maker Language" => all_fixtures("Game Maker Language", "*.gml"),
"Graph Modeling Language" => all_fixtures("Graph Modeling Language", "*.gml"),
"XML" => all_fixtures("XML", "*.gml")
})
end
def test_gs_by_heuristics
ambiguous = [
"#{samples_path}/Genie/Class.gs",
"#{samples_path}/Genie/Hello.gs",
]
assert_heuristics({
"GLSL" => all_fixtures("GLSL", "*.gs"),
"Genie" => all_fixtures("Genie", "*.gs") - ambiguous,
"Gosu" => all_fixtures("Gosu", "*.gs"),
})
assert_heuristics({
nil => all_fixtures("JavaScript")
}, alt_name="test.gs")
end
def test_gsc_by_heuristics
assert_heuristics({
"GSC" => all_fixtures("GSC", "*.gsc")
})
end
def test_gsh_by_heuristics
assert_heuristics({
"GSC" => all_fixtures("GSC", "*.gsh")
})
end
def test_gts_by_heuristics
assert_heuristics({
"Gerber Image" => all_fixtures("Gerber Image", "*.gts"),
"Glimmer TS" => all_fixtures("Glimmer TS", "*.gts"),
})
end
def test_h_by_heuristics
assert_heuristics({
"Objective-C" => all_fixtures("Objective-C", "*.h"),
"C++" => all_fixtures("C++", "*.h"),
# Default to C if the content is ambiguous
"C" => all_fixtures("C", "*.h")
})
end
def test_hh_by_heuristics
assert_heuristics({
"Hack" => all_fixtures("Hack", "*.hh"),
nil => all_fixtures("C++", "*.hh")
})
end
def test_html_by_heuristics
assert_heuristics({
"Ecmarkup" => all_fixtures("Ecmarkup", "*.html"),
"HTML" => all_fixtures("HTML", "*.html")
})
end
def test_i_by_heuristics
assert_heuristics({
"Motorola 68K Assembly" => all_fixtures("Motorola 68K Assembly", "*.i"),
"SWIG" => all_fixtures("SWIG", "*.i"),
"Assembly" => all_fixtures("Assembly", "*.i")
})
end
def test_ice_by_heuristics
assert_heuristics({
"Slice" => all_fixtures("Slice", "*.ice"),
"JSON" => all_fixtures("JSON", "*.ice")
})
end
def test_inc_by_heuristics
assert_heuristics({
"Motorola 68K Assembly" => all_fixtures("Motorola 68K Assembly", "*.inc"),
"NASL" => all_fixtures("NASL", "*.inc"),
"Pascal" => all_fixtures("Pascal", "*.inc"),
"PHP" => all_fixtures("PHP", "*.inc"),
"POV-Ray SDL" => all_fixtures("POV-Ray SDL", "*.inc"),
"SourcePawn" => all_fixtures("SourcePawn", "*.inc"),
"Assembly" => all_fixtures("Assembly", "*.inc"),
nil => all_fixtures("C++", "*.inc") +
all_fixtures("HTML", "*.inc") +
all_fixtures("Pawn", "*.inc") +
all_fixtures("SQL", "*.inc")
}, alt_name="foo.inc")
end
def test_json_by_heuristics
assert_heuristics({
"OASv2-json" => all_fixtures("OASv2-json", "*.json"),
"OASv3-json" => all_fixtures("OASv3-json", "*.json"),
"JSON" => all_fixtures("JSON", "*.json"),
})
end
def test_k_by_heuristics
assert_heuristics({
"KCL" => all_fixtures("KCL", "*.k"),
"KFramework" => all_fixtures("KFramework", "*.k")
})
end
def test_l_by_heuristics
assert_heuristics({
"Common Lisp" => all_fixtures("Common Lisp", "*.l"),
"Lex" => all_fixtures("Lex", "*.l"),
"Roff" => all_fixtures("Roff", "*.l"),
"PicoLisp" => all_fixtures("PicoLisp", "*.l")
})
end
def test_lean_by_heuristics
assert_heuristics({
"Lean" => all_fixtures("Lean", "*.lean"),
"Lean 4" => all_fixtures("Lean 4", "*.lean")
})
end
def test_lisp_by_heuristics
assert_heuristics({
"Common Lisp" => all_fixtures("Common Lisp", "*.lisp") + all_fixtures("Common Lisp", "*.lsp"),
"NewLisp" => all_fixtures("NewLisp", "*.lisp") + all_fixtures("NewLisp", "*.lsp")
}, "main.lisp")
end
def test_lp_by_heuristics
assert_heuristics({
"Answer Set Programming" => all_fixtures("Answer Set Programming", "*.lp"),
"Linear Programming" => all_fixtures("Linear Programming", "*.lp")
})
end
def test_ls_by_heuristics
assert_heuristics({
"LiveScript" => all_fixtures("LiveScript", "*.ls"),
"LoomScript" => all_fixtures("LoomScript", "*.ls")
})
end
def test_lsp_by_heuristics
assert_heuristics({
"Common Lisp" => all_fixtures("Common Lisp", "*.lisp") + all_fixtures("Common Lisp", "*.lsp"),
"NewLisp" => all_fixtures("NewLisp", "*.lisp") + all_fixtures("NewLisp", "*.lsp")
}, "main.lsp")
end
def test_m4_by_heuristics
assert_heuristics({
"M4" => all_fixtures("M4", "*.m4"),
"M4Sugar" => all_fixtures("M4Sugar", "*.m4")
})
end
def test_m_by_heuristics
ambiguous = all_fixtures("Objective-C", "cocoa_monitor.m")
assert_heuristics({
"Objective-C" => all_fixtures("Objective-C", "*.m") - ambiguous,
"Mercury" => all_fixtures("Mercury", "*.m"),
"MUF" => all_fixtures("MUF", "*.m"),
"M" => all_fixtures("M", "MDB.m"),
"Wolfram Language" => all_fixtures("Wolfram Language", "*.m") - all_fixtures("Wolfram Language", "Problem12.m"),
"MATLAB" => all_fixtures("MATLAB", "create_ieee_paper_plots.m"),
"Limbo" => all_fixtures("Limbo", "*.m"),
nil => ambiguous
})
end
def test_man_by_heuristics
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage"),
"Roff" => all_fixtures("Roff")
}, alt_name="man.man")
end
def test_mask_by_heuristics
assert_heuristics({
"Unity3D Asset" => all_fixtures("Unity3D Asset", "*.mask")
})
end
def test_mc_by_heuristics
assert_heuristics({
"Monkey C" => all_fixtures("Monkey C", "*.mc"),
"Win32 Message File" => all_fixtures("Win32 Message File", "*.mc")
})
end
def test_md_by_heuristics
assert_heuristics({
"Markdown" => all_fixtures("Markdown", "*.md"),
"GCC Machine Description" => all_fixtures("GCC Machine Description", "*.md")
})
end
def test_mdoc_by_heuristics
assert_heuristics({
"Roff Manpage" => all_fixtures("Roff Manpage"),
"Roff" => all_fixtures("Roff")
}, alt_name="man.mdoc")
end
def test_ml_by_heuristics
ambiguous = [
"#{samples_path}/OCaml/date.ml",
"#{samples_path}/OCaml/common.ml",
"#{samples_path}/OCaml/sigset.ml",
"#{samples_path}/Standard ML/Foo.sig",
]
assert_heuristics({
"OCaml" => all_fixtures("OCaml") - ambiguous,
"Standard ML" => all_fixtures("Standard ML") - ambiguous,
nil => ambiguous
}, "test.ml")
end
def test_mod_by_heuristics
assert_heuristics({
"Modula-2" => all_fixtures("Modula-2", "*.mod"),
"NMODL" => all_fixtures("NMODL", "*.mod"),
"XML" => all_fixtures("XML", "*.mod"),
["Linux Kernel Module", "AMPL"] => all_fixtures("Linux Kernel Module", "*.mod"),
["Linux Kernel Module", "AMPL"] => all_fixtures("AMPL", "*.mod"),
})
end
def test_mojo_by_heuristics
assert_heuristics({
"Mojo" => all_fixtures("Mojo", "*.mojo"),
"XML" => all_fixtures("XML", "*.mojo"),
})
end
def test_ms_by_heuristics
assert_heuristics({
"Roff" => all_fixtures("Roff", "*.ms"),
"Unix Assembly" => all_fixtures("Unix Assembly", "*.ms"),
"MAXScript" => all_fixtures("MAXScript", "*.ms")
})
end
def test_msg_by_heuristics
assert_heuristics({
"OMNeT++ MSG" => all_fixtures("OMNeT++ MSG", "*.msg"),
"ROS Interface" => all_fixtures("ROS Interface", "*.msg"),
})
end
def test_n_by_heuristics
assert_heuristics({
"Roff" => all_fixtures("Roff", "*.n"),
"Nemerle" => all_fixtures("Nemerle", "*.n")
})
end
def test_ncl_by_heuristics
assert_heuristics({
"Gerber Image" => all_fixtures("Gerber Image", "*"),
"XML" => all_fixtures("XML", "*.ncl"),
"Text" => all_fixtures("Text", "*.ncl"),
"Nickel" => all_fixtures("Nickel", "*.ncl"),
"NCL" => all_fixtures("NCL", "*.ncl")
}, alt_name="test.ncl")
end
def test_nl_by_heuristics
assert_heuristics({
"NewLisp" => all_fixtures("NewLisp", "*.nl"),
"NL" => all_fixtures("NL", "*.nl")
})
end
def test_nr_by_heuristics
assert_heuristics({
"Noir" => all_fixtures("Noir", "*.nr"),
"Roff" => all_fixtures("Roff", "*.nr")
})
end
def test_nu_by_heuristics
assert_heuristics({
"Nushell" => all_fixtures("Nushell", "*.nu"),
"Nu" => all_fixtures("Nu", "*.nu")
})
end
def test_odin_by_heuristics
assert_heuristics({
"Object Data Instance Notation" => all_fixtures("Object Data Instance Notation", "*.odin"),
"Odin" => all_fixtures("Odin", "*.odin")
})
end
def test_p_by_heuristics
assert_heuristics({
"Gnuplot" => all_fixtures("Gnuplot"),
"OpenEdge ABL" => all_fixtures("OpenEdge ABL")
}, alt_name="test.p")
end
def test_php_by_heuristics
assert_heuristics({
"Hack" => all_fixtures("Hack", "*.php"),
"PHP" => all_fixtures("PHP", "*.php")
})
end
def test_pkl_by_heuristics
assert_heuristics({
"Pkl" => all_fixtures("Pkl", "*.pkl"),
"Pickle" => all_fixtures("Pickle", "*.pkl")
})
end
def test_pl_by_heuristics
assert_heuristics({
"Prolog" => all_fixtures("Prolog", "*.pl"),
"Perl" => ["Perl/oo1.pl", "Perl/oo2.pl", "Perl/oo3.pl", "Perl/fib.pl", "Perl/use5.pl"],
"Raku" => all_fixtures("Raku", "*.pl")
})
end
def test_plist_by_heuristics
assert_heuristics({
"OpenStep Property List" => all_fixtures("OpenStep Property List", "*.plist"),
"XML Property List" => all_fixtures("XML Property List", "*.plist")
})
end
def test_plt_by_heuristics
assert_heuristics({
"Prolog" => all_fixtures("Prolog", "*.plt"),
# Gnuplot lacks a heuristic
nil => all_fixtures("Gnuplot", "*.plt")
})
end
def test_pm_by_heuristics
assert_heuristics({
"Perl" => all_fixtures("Perl", "*.pm"),
"Raku" => all_fixtures("Raku", "*.pm"),
"X PixMap" => all_fixtures("X PixMap")
}, "test.pm")
end
def test_pod_by_heuristics
assert_heuristics({
"Pod" => all_fixtures("Pod", "*.pod"),
"Pod 6" => all_fixtures("Pod 6", "*.pod")
})
end
def test_pp_by_heuristics
assert_heuristics({
"Pascal" => all_fixtures("Pascal", "*.pp"),
"Puppet" => all_fixtures("Puppet", "*.pp") - ["#{samples_path}/Puppet/stages-example.pp", "#{samples_path}/Puppet/hiera_include.pp"]
})
end
def test_pro_by_heuristics
assert_heuristics({
"Proguard" => all_fixtures("Proguard", "*.pro"),
"Prolog" => all_fixtures("Prolog", "*.pro"),
"IDL" => all_fixtures("IDL", "*.pro"),
"INI" => all_fixtures("INI", "*.pro"),
"QMake" => all_fixtures("QMake", "*.pro")
})
end
def test_properties_by_heuristics
assert_heuristics({
"INI" => all_fixtures("INI", "*.properties"),
"Java Properties" => all_fixtures("Java Properties", "*.properties")
})
end
def test_q_by_heuristics
assert_heuristics({
"q" => all_fixtures("q", "*.q"),
"HiveQL" => all_fixtures("HiveQL", "*.q")
})
end
def test_qs_by_heuristics
assert_heuristics({
"Q#" => all_fixtures("Q#", "*.qs"),
"Qt Script" => all_fixtures("Qt Script", "*.qs")
})
end
def test_r_by_heuristics
assert_heuristics({
"R" => all_fixtures("R", "*.r") + all_fixtures("R", "*.R"),
"Rebol" => all_fixtures("Rebol", "*.r")
})
end
def test_re_by_heuristics
assert_heuristics({
"C++" => all_fixtures("C++", "*.re"),
"Reason" => all_fixtures("Reason", "*.re")
})
end
def test_res_by_heuristics
assert_heuristics({
"ReScript" => all_fixtures("ReScript", "*.res"),
nil => all_fixtures("XML", "*.res")
})
end
def test_resource_by_heuristics
assert_heuristics({
"RobotFramework" => all_fixtures("RobotFramework", "*.resource")
})
end
def test_rno_by_heuristics
assert_heuristics({
"RUNOFF" => all_fixtures("RUNOFF", "*.rno"),
"Roff" => all_fixtures("Roff", "*.rno")
})
end
def test_rpy_by_heuristics
assert_heuristics({
"Python" => all_fixtures("Python", "*.rpy"),
"Ren'Py" => all_fixtures("Ren'Py", "*.rpy")
})
end
def test_rs_by_heuristics
assert_heuristics({
"Rust" => all_fixtures("Rust", "*.rs"),
"RenderScript" => all_fixtures("RenderScript", "*.rs")
})
end
def test_s_by_heuristics
assert_heuristics({
"Motorola 68K Assembly" => all_fixtures("Motorola 68K Assembly", "*.s"),
"Assembly" => all_fixtures("Assembly", "*.s"),
"Unix Assembly" => all_fixtures("Unix Assembly", "*.s")
})
end
def test_sc_by_heuristics
assert_heuristics({
"SuperCollider" => all_fixtures("SuperCollider", "*.sc"),
"Scala" => all_fixtures("Scala", "*.sc")
})
end
def test_scd_by_heuristics
assert_heuristics({
"SuperCollider" => all_fixtures("SuperCollider", "*"),
"Markdown" => all_fixtures("Markdown", "*.scd")
}, alt_name="test.scd")
end
def test_scm_by_heuristics
assert_heuristics({
"Scheme" => all_fixtures("Scheme", "*.scm"),
"Tree-sitter Query" => all_fixtures("Tree-sitter Query", "*.scm")
})
end
def test_sol_by_heuristics
assert_heuristics({
"Gerber Image" => Dir.glob("#{fixtures_path}/Generic/sol/Gerber Image/*"),
"Solidity" => Dir.glob("#{fixtures_path}/Generic/sol/Solidity/*"),
nil => Dir.glob("#{fixtures_path}/Generic/sol/nil/*")
})
end
def test_sql_by_heuristics
assert_heuristics({
"SQL" => ["SQL/create_stuff.sql", "SQL/db.sql", "SQL/dual.sql"],
"PLpgSQL" => all_fixtures("PLpgSQL", "*.sql"),
"SQLPL" => ["SQLPL/trigger.sql"],
"PLSQL" => all_fixtures("PLSQL", "*.sql")
})
end
def test_srt_by_heuristics
assert_heuristics({
"SubRip Text" => all_fixtures("SubRip Text", "*.srt")
})
end
def test_srv_by_heuristics
assert_heuristics({
"ROS Interface" => all_fixtures("ROS Interface", "*.srv"),
})
end
def test_st_by_heuristics
assert_heuristics({
"StringTemplate" => all_fixtures("StringTemplate", "*.st"),
"Smalltalk" => all_fixtures("Smalltalk", "*.st")
})
end
def test_star_by_heuristics
assert_heuristics({
"STAR" => all_fixtures("STAR", "*.star"),
"Starlark" => all_fixtures("Starlark", "*.star")
})
end
def test_stl_by_heuristics
assert_heuristics({
"STL" => Dir.glob("#{fixtures_path}/Generic/stl/STL/*"),
nil => Dir.glob("#{fixtures_path}/Generic/stl/nil/*")
})
end
def test_svx_by_heuristics
assert_heuristics({
"Survex data" => all_fixtures("Survex data", "*.svx"),
"mdsvex" => all_fixtures("mdsvex", "*.svx")
})
end
def test_sw_by_heuristics
assert_heuristics({
"Sway" => all_fixtures("Sway", "*.sw"),
"XML" => all_fixtures("XML", "*.sw")
})
end
def test_t_by_heuristics
# Turing not fully covered.
assert_heuristics({
"Turing" => all_fixtures("Turing", "*.t"),
"Perl" => all_fixtures("Perl", "*.t"),
"Raku" => ["Raku/01-dash-uppercase-i.t", "Raku/01-parse.t", "Raku/advent2009-day16.t",
"Raku/basic-open.t", "Raku/calendar.t", "Raku/for.t", "Raku/hash.t",
"Raku/listquote-whitespace.t"]
})
end
def test_tact_by_heuristics
assert_heuristics({
"Tact" => all_fixtures("Tact", "*.tact"),
"JSON" => all_fixtures("JSON", "*.tact"),
})
end
def test_tag_by_heuristics
assert_heuristics({
"Java Server Pages" => Dir.glob("#{fixtures_path}/Generic/tag/Java Server Pages/*"),
nil => Dir.glob("#{fixtures_path}/Generic/tag/nil/*")
})
end
def test_tl_by_heuristics
assert_heuristics({
"Teal" => all_fixtures("Teal", "*.tl"),
"Type Language" => all_fixtures("Type Language", "*.tl")
})
end
def test_tlv_by_heuristics
assert_heuristics({
"TL-Verilog" => all_fixtures("TL-Verilog", "*.tlv"),
})
end
def test_toc_by_heuristics
assert_heuristics({
"TeX" => all_fixtures("TeX", "*.toc"),
"World of Warcraft Addon Data" => all_fixtures("World of Warcraft Addon Data", "*.toc")
})
end
def test_tpl_by_heuristics
assert_heuristics({
"Go Template" => all_fixtures("Go Template", "*.tpl"),
"Smarty" => all_fixtures("Smarty", "*.tpl")
})
end
def test_ts_by_heuristics
assert_heuristics({
"TypeScript" => all_fixtures("TypeScript", "*.ts"),
"XML" => all_fixtures("XML", "*.ts")
})
end
def test_tsp_by_heuristics
assert_heuristics({
"TypeSpec" => all_fixtures("TypeSpec", "*.tsp"),
"TSPLIB data" => all_fixtures("TSPLIB data", "*.tsp")
})
end
def test_tst_by_heuristics
assert_heuristics({
"GAP" => all_fixtures("GAP", "*.tst"),
"Scilab" => all_fixtures("Scilab", "*.tst")
})
end
def test_tsx_by_heuristics
assert_heuristics({
"TSX" => all_fixtures("TSX", "*.tsx"),
"XML" => all_fixtures("XML", "*.tsx")
})
end
def test_txt_by_heuristics
assert_heuristics({
"Adblock Filter List" => all_fixtures("Adblock Filter List", "*.txt"),
"Vim Help File" => all_fixtures("Vim Help File", "*.txt"),
"Text" => all_fixtures("Text", "*.txt")
})
end
def test_typ_by_heuristics
assert_heuristics({
"Typst" => all_fixtures("Typst", "*.typ"),
"XML" => all_fixtures("XML", "*.typ")
})
end
def test_url_by_heuristics
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | true |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_strategies.rb | test/test_strategies.rb | require_relative "./helper"
class TestStrategies < Minitest::Test
include Linguist
def assert_modeline(language, blob)
if language.nil?
assert_nil Linguist::Strategy::Modeline.call(blob).first
else
assert_equal language, Linguist::Strategy::Modeline.call(blob).first
end
end
def assert_interpreter(interpreter, body)
if interpreter.nil?
assert_nil Shebang.interpreter(body)
else
assert_equal interpreter, Shebang.interpreter(body)
end
end
def file_blob(name)
path = File.exist?(name) ? name : File.join(samples_path, name)
FileBlob.new(path)
end
def all_xml_fixtures(file="*")
fixs = Dir.glob("#{samples_path}/XML/#{file}") -
["#{samples_path}/XML/demo.hzp"] -
["#{samples_path}/XML/psd-data.xmp"] -
["#{samples_path}/XML/filenames"]
fixs.reject { |f| File.symlink?(f) }
end
def assert_manpage(blob)
languages = Linguist::Strategy::Manpage.call(blob)
assert_equal Language["Roff Manpage"], languages[0], "#{blob} not detected as manpage"
assert_equal Language["Roff"], languages[1], "#{blob} should include Roff as candidate language"
end
def assert_xml(blob)
language = Linguist::Strategy::XML.call(file_blob(blob)).first
assert_equal Language["XML"], language, "#{blob} not detected as XML"
end
def assert_all_xml(blobs)
Array(blobs).each do |blob|
assert_xml blob
end
end
def test_manpage_strategy
assert_manpage fixture_blob("Data/Manpages/bsdmalloc.3malloc")
assert_manpage fixture_blob("Data/Manpages/dirent.h.0p")
assert_manpage fixture_blob("Data/Manpages/linguist.1gh")
assert_manpage fixture_blob("Data/Manpages/test.1.in")
assert_manpage fixture_blob("Data/Manpages/test.2.in")
assert_manpage fixture_blob("Data/Manpages/test.3.in")
assert_manpage fixture_blob("Data/Manpages/test.4.in")
assert_manpage fixture_blob("Data/Manpages/test.5.in")
assert_manpage fixture_blob("Data/Manpages/test.6.in")
assert_manpage fixture_blob("Data/Manpages/test.7.in")
assert_manpage fixture_blob("Data/Manpages/test.8.in")
assert_manpage fixture_blob("Data/Manpages/test.9.in")
assert_manpage fixture_blob("Data/Manpages/test.man.in")
assert_manpage fixture_blob("Data/Manpages/test.mdoc.in")
end
def test_modeline_strategy
assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby")
assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby2")
assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby3")
assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby4")
assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby5")
assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby6")
assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby7")
assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby8")
assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby9")
assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby10")
assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby11")
assert_modeline Language["Ruby"], fixture_blob("Data/Modelines/ruby12")
assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplus")
assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs1")
assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs2")
assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs3")
assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs4")
assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs5")
assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs6")
assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs7")
assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs8")
assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs9")
assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs10")
assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs11")
assert_modeline Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs12")
assert_modeline Language["Text"], fixture_blob("Data/Modelines/fundamentalEmacs.c")
assert_modeline Language["Prolog"], fixture_blob("Data/Modelines/not_perl.pl")
assert_modeline Language["Smalltalk"], fixture_blob("Data/Modelines/example_smalltalk.md")
assert_modeline Language["JavaScript"], fixture_blob("Data/Modelines/iamjs.pl")
assert_modeline Language["JavaScript"], fixture_blob("Data/Modelines/iamjs2.pl")
assert_modeline Language["PHP"], fixture_blob("Data/Modelines/iamphp.inc")
assert_modeline nil, sample_blob("C++/runtime-compiler.cc")
end
def test_modeline_languages
assert_equal Language["Ruby"], fixture_blob("Data/Modelines/ruby").language
assert_equal Language["Ruby"], fixture_blob("Data/Modelines/ruby2").language
assert_equal Language["Ruby"], fixture_blob("Data/Modelines/ruby3").language
assert_equal Language["C++"], fixture_blob("Data/Modelines/seeplusplus").language
assert_equal Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs1").language
assert_equal Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs2").language
assert_equal Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs3").language
assert_equal Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs4").language
assert_equal Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs5").language
assert_equal Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs6").language
assert_equal Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs7").language
assert_equal Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs8").language
assert_equal Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs9").language
assert_equal Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs10").language
assert_equal Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs11").language
assert_equal Language["C++"], fixture_blob("Data/Modelines/seeplusplusEmacs12").language
assert_equal Language["Text"], fixture_blob("Data/Modelines/fundamentalEmacs.c").language
assert_equal Language["Prolog"], fixture_blob("Data/Modelines/not_perl.pl").language
assert_equal Language["Smalltalk"], fixture_blob("Data/Modelines/example_smalltalk.md").language
assert_equal Language["JavaScript"], fixture_blob("Data/Modelines/iamjs.pl").language
assert_equal Language["JavaScript"], fixture_blob("Data/Modelines/iamjs2.pl").language
assert_equal Language["PHP"], fixture_blob("Data/Modelines/iamphp.inc").language
end
def test_shebangs
assert_interpreter nil, ""
assert_interpreter nil, "foo"
assert_interpreter nil, "#bar"
assert_interpreter nil, "#baz"
assert_interpreter nil, "///"
assert_interpreter nil, "\n\n\n\n\n"
assert_interpreter nil, " #!/usr/sbin/ruby"
assert_interpreter nil, "\n#!/usr/sbin/ruby"
assert_interpreter nil, "#!"
assert_interpreter nil, "#! "
assert_interpreter nil, "#!/usr/bin/env"
assert_interpreter nil, "#!/usr/bin/env osascript -l JavaScript"
assert_interpreter nil, "#!/usr/bin/env osascript -l AppleScript"
assert_interpreter nil, "#!/usr/bin/env osascript -l foobar"
assert_interpreter nil, "#!/usr/bin/osascript -l JavaScript"
assert_interpreter nil, "#!/usr/bin/osascript -l foobar"
assert_interpreter "ruby", "#!/usr/sbin/ruby\n# bar"
assert_interpreter "ruby", "#!/usr/bin/ruby\n# foo"
assert_interpreter "ruby", "#!/usr/sbin/ruby"
assert_interpreter "ruby", "#!/usr/sbin/ruby foo bar baz\n"
assert_interpreter "Rscript", "#!/usr/bin/env Rscript\n# example R script\n#\n"
assert_interpreter "crystal", "#!/usr/bin/env bin/crystal"
assert_interpreter "ruby", "#!/usr/bin/env ruby\n# baz"
assert_interpreter "bash", "#!/usr/bin/bash\n"
assert_interpreter "sh", "#!/bin/sh"
assert_interpreter "python", "#!/bin/python\n# foo\n# bar\n# baz"
assert_interpreter "python2", "#!/usr/bin/python2.7\n\n\n\n"
assert_interpreter "python3", "#!/usr/bin/python3\n\n\n\n"
assert_interpreter "sbcl", "#!/usr/bin/sbcl --script\n\n"
assert_interpreter "perl", "#! perl"
assert_interpreter "ruby", "#!/bin/sh\n\n\nexec ruby $0 $@"
assert_interpreter "sh", "#! /usr/bin/env A=003 B=149 C=150 D=xzd E=base64 F=tar G=gz H=head I=tail sh"
assert_interpreter "python", "#!/usr/bin/env foo=bar bar=foo python -cos=__import__(\"os\");"
assert_interpreter "osascript", "#!/usr/bin/env osascript"
assert_interpreter "osascript", "#!/usr/bin/osascript"
assert_interpreter "ruby", "#!/usr/bin/env -vS ruby -wKU\nputs ?t+?e+?s+?t"
assert_interpreter "sed", "#!/usr/bin/env --split-string sed -f\ny/a/A/"
assert_interpreter "deno", "#!/usr/bin/env -S GH_TOKEN=ghp_*** deno run --allow-net\nconsole.log(1);"
end
def test_xml
no_root_tag = [
"#{samples_path}/XML/libsomething.dll.config",
"#{samples_path}/XML/real-estate.mjml",
"#{samples_path}/XML/XmlIO.pluginspec",
"#{samples_path}/XML/MainView.ux",
"#{samples_path}/XML/MyApp.ux",
"#{samples_path}/XML/xhtml-struct-1.mod",
"#{samples_path}/XML/wixdemo.wixproj",
"#{samples_path}/XML/msbuild-example.proj",
"#{samples_path}/XML/sample.targets",
"#{samples_path}/XML/Default.props",
"#{samples_path}/XML/racoon.mjml",
"#{samples_path}/XML/route-gas-works-lake-union-loop.gpx",
"#{samples_path}/XML/some-ideas.mm",
"#{samples_path}/XML/GMOculus.project.gmx",
"#{samples_path}/XML/obj_control.object.gmx",
"#{samples_path}/XML/MainView.axaml",
"#{samples_path}/XML/Robots.slnx",
]
assert_all_xml all_xml_fixtures("*") - no_root_tag
assert_xml "test/fixtures/XML/app.config"
assert_xml "test/fixtures/XML/AssertionIDRequestOptionalAttributes.xml.svn-base"
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_classifier.rb | test/test_classifier.rb | require_relative "./helper"
class TestClassifier < Minitest::Test
include Linguist
def fixture(name)
File.read(File.join(samples_path, name))
end
def test_classify
db = {}
Classifier.train! db, "Ruby", fixture("Ruby/foo.rb")
Classifier.train! db, "Objective-C", fixture("Objective-C/Foo.h")
Classifier.train! db, "Objective-C", fixture("Objective-C/Foo.m")
Classifier.finalize_train! db
results = Classifier.classify(db, fixture("Objective-C/hello.m"))
assert_equal "Objective-C", results.first[0]
tokens = Tokenizer.tokenize(fixture("Objective-C/hello.m"))
results = Classifier.classify(db, tokens)
assert_equal "Objective-C", results.first[0]
end
def test_restricted_classify
db = {}
Classifier.train! db, "Ruby", fixture("Ruby/foo.rb")
Classifier.train! db, "Objective-C", fixture("Objective-C/Foo.h")
Classifier.train! db, "Objective-C", fixture("Objective-C/Foo.m")
Classifier.finalize_train! db
results = Classifier.classify(db, fixture("Objective-C/hello.m"), ["Objective-C"])
assert_equal "Objective-C", results.first[0]
results = Classifier.classify(db, fixture("Objective-C/hello.m"), ["Ruby"])
assert results.empty?
end
def test_instance_classify_empty
results = Classifier.classify(Samples.cache, "")
assert results.empty?
end
def test_instance_classify_nil
assert_equal [], Classifier.classify(Samples.cache, nil)
end
def test_classify_ambiguous_languages
# Failures are reasonable in some cases, such as when a file is fully valid in more than one language.
allowed_failures = {
# Valid C and C++
"#{samples_path}/C/rpc.h" => ["C", "C++"],
# Tricky samples
"#{samples_path}/C/syscalldefs.h" => ["C", "C++"],
"#{samples_path}/C++/Types.h" => ["C", "C++"],
}
# Skip extensions with catch-all rule
skip_extensions = Set.new
Heuristics.all.each do |h|
rules = h.instance_variable_get(:@rules)
if rules[-1]['pattern'].is_a? AlwaysMatch
skip_extensions |= Set.new(h.extensions)
end
end
Samples.each do |sample|
next if skip_extensions.include? sample[:extname]
language = Linguist::Language.find_by_name(sample[:language])
languages = Language.find_by_filename(sample[:path]).map(&:name)
next if languages.length == 1
languages = Language.find_by_extension(sample[:path]).map(&:name)
next if languages.length <= 1
results = Classifier.classify(Samples.cache, File.read(sample[:path]), languages)
if results.empty?
assert false, "no results for #{sample[:path]}"
elsif allowed_failures.has_key? sample[:path]
assert allowed_failures[sample[:path]].include?(results.first[0]), "#{sample[:path]}\n#{results.inspect}"
else
assert_equal language.name, results.first[0], "#{sample[:path]}\n#{results.inspect}"
end
end
end
def test_classify_empty_languages
assert_equal [], Classifier.classify(Samples.cache, fixture("Ruby/foo.rb"), [])
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_instrumentation.rb | test/test_instrumentation.rb | require_relative "./helper"
class TestInstrumentation < Minitest::Test
include Linguist
class LocalInstrumenter
Event = Struct.new(:name, :args)
attr_reader :events
def initialize
@events = []
end
def instrument(name, *args)
@events << Event.new(name, args)
yield if block_given?
end
end
def setup
Linguist.instrumenter = LocalInstrumenter.new
end
def teardown
Linguist.instrumenter = nil
end
def test_detection_instrumentation_with_binary_blob
binary_blob = fixture_blob("Binary/octocat.ai")
Linguist.detect(binary_blob)
# Shouldn't instrument this (as it's binary)
assert_equal 0, Linguist.instrumenter.events.size
end
def test_modeline_instrumentation
blob = fixture_blob("Data/Modelines/ruby")
Linguist.detect(blob)
detect_event = Linguist.instrumenter.events.last
detect_event_payload = detect_event[:args].first
assert_equal 3, Linguist.instrumenter.events.size
assert_equal "linguist.detected", detect_event.name
assert_equal Language['Ruby'], detect_event_payload[:language]
assert_equal blob, detect_event_payload[:blob]
assert_equal Linguist::Strategy::Modeline, detect_event_payload[:strategy]
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_basic_instrumenter.rb | test/test_basic_instrumenter.rb | require_relative "./helper"
class TestBasicInstrumenter < Minitest::Test
include Linguist
def setup
@instrumenter = Linguist::BasicInstrumenter.new
Linguist.instrumenter = @instrumenter
end
def teardown
Linguist.instrumenter = nil
end
def test_tracks_extension_strategy
# Ruby file detected by extension
blob = fixture_blob("Ruby/foo.rb")
Linguist.detect(blob)
assert @instrumenter.detected_info.key?(blob.name)
assert_equal "Extension", @instrumenter.detected_info[blob.name][:strategy]
assert_equal "Ruby", @instrumenter.detected_info[blob.name][:language]
end
def test_tracks_modeline_strategy
# File with vim modeline
blob = fixture_blob("Data/Modelines/ruby")
Linguist.detect(blob)
assert @instrumenter.detected_info.key?(blob.name)
assert_equal "Modeline", @instrumenter.detected_info[blob.name][:strategy]
assert_equal "Ruby", @instrumenter.detected_info[blob.name][:language]
end
def test_tracks_shebang_strategy
# File with shebang
blob = fixture_blob("Shell/sh")
Linguist.detect(blob)
assert @instrumenter.detected_info.key?(blob.name)
assert_equal "Shebang", @instrumenter.detected_info[blob.name][:strategy]
assert_equal "Shell", @instrumenter.detected_info[blob.name][:language]
end
def test_tracks_multiple_files
# Track multiple files in sequence
ruby_blob = fixture_blob("Ruby/foo.rb")
shell_blob = fixture_blob("Shell/sh")
Linguist.detect(ruby_blob)
Linguist.detect(shell_blob)
assert_equal 2, @instrumenter.detected_info.size
assert @instrumenter.detected_info.key?(ruby_blob.name)
assert @instrumenter.detected_info.key?(shell_blob.name)
end
def test_no_tracking_for_binary_files
binary_blob = fixture_blob("Binary/octocat.ai")
Linguist.detect(binary_blob)
# Should not record info for binary files
assert_equal 0, @instrumenter.detected_info.size
end
def test_records_correct_strategy_for_heuristics
# .bas file that should be detected via heuristics
blob = fixture_blob("VBA/sample.bas")
Linguist.detect(blob)
assert @instrumenter.detected_info.key?(blob.name)
assert_equal "Heuristics", @instrumenter.detected_info[blob.name][:strategy]
end
def test_tracks_filename_strategy
# Dockerfile detected by filename
blob = fixture_blob("Dockerfile/Dockerfile")
Linguist.detect(blob)
assert @instrumenter.detected_info.key?(blob.name)
assert_equal "Filename", @instrumenter.detected_info[blob.name][:strategy]
assert_equal "Dockerfile", @instrumenter.detected_info[blob.name][:language]
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/helper.rb | test/helper.rb | require "bundler/setup"
require "minitest/autorun"
require "mocha/minitest"
require "linguist"
require "linguist/blob"
require "licensee"
require "set"
def fixtures_path
File.expand_path("../fixtures", __FILE__)
end
def fixture_blob(name)
filepath = (name =~ /^\//)? name : File.join(fixtures_path, name)
Linguist::FileBlob.new(filepath, fixtures_path)
end
def fixture_blob_memory(name)
filepath = (name =~ /^\//)? name : File.join(fixtures_path, name)
content = File.read(filepath, :encoding => "ASCII-8BIT")
Linguist::Blob.new(name, content)
end
def samples_path
File.expand_path("../../samples", __FILE__)
end
def sample_blob(name)
filepath = (name =~ /^\//)? name : File.join(samples_path, name)
Linguist::FileBlob.new(filepath, samples_path)
end
def sample_blob_memory(name)
filepath = (name =~ /^\//)? name : File.join(samples_path, name)
content = File.read(filepath, :encoding => "ASCII-8BIT")
Linguist::Blob.new(name, content)
end
def silence_warnings
original_verbosity = $VERBOSE
$VERBOSE = nil
yield
ensure
$VERBOSE = original_verbosity
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_language.rb | test/test_language.rb | require_relative "./helper"
class TestLanguage < Minitest::Test
include Linguist
def test_find_by_alias
assert_equal Language['ASP.NET'], Language.find_by_alias('aspx')
assert_equal Language['ASP.NET'], Language.find_by_alias('aspx-vb')
assert_equal Language['ActionScript'], Language.find_by_alias('as3')
assert_equal Language['ApacheConf'], Language.find_by_alias('apache')
assert_equal Language['Assembly'], Language.find_by_alias('nasm')
assert_equal Language['Batchfile'], Language.find_by_alias('bat')
assert_equal Language['C#'], Language.find_by_alias('c#')
assert_equal Language['C#'], Language.find_by_alias('csharp')
assert_equal Language['C'], Language.find_by_alias('c')
assert_equal Language['C++'], Language.find_by_alias('c++')
assert_equal Language['C++'], Language.find_by_alias('cpp')
assert_equal Language['Chapel'], Language.find_by_alias('chpl')
assert_equal Language['Classic ASP'], Language.find_by_alias('asp')
assert_equal Language['CoffeeScript'], Language.find_by_alias('coffee')
assert_equal Language['CoffeeScript'], Language.find_by_alias('coffee-script')
assert_equal Language['ColdFusion'], Language.find_by_alias('cfm')
assert_equal Language['Common Lisp'], Language.find_by_alias('common-lisp')
assert_equal Language['Common Lisp'], Language.find_by_alias('lisp')
assert_equal Language['Darcs Patch'], Language.find_by_alias('dpatch')
assert_equal Language['Dart'], Language.find_by_alias('dart')
assert_equal Language['EdgeQL'], Language.find_by_alias('esdl')
assert_equal Language['Emacs Lisp'], Language.find_by_alias('elisp')
assert_equal Language['Emacs Lisp'], Language.find_by_alias('emacs')
assert_equal Language['Emacs Lisp'], Language.find_by_alias('emacs-lisp')
assert_equal Language['Gettext Catalog'], Language.find_by_alias('pot')
assert_equal Language['HTML'], Language.find_by_alias('html')
assert_equal Language['HTML'], Language.find_by_alias('xhtml')
assert_equal Language['HTML+ERB'], Language.find_by_alias('html+erb')
assert_equal Language['HTML+ERB'], Language.find_by_alias('erb')
assert_equal Language['IRC log'], Language.find_by_alias('irc')
assert_equal Language['JSON'], Language.find_by_alias('json')
assert_equal Language['Java Server Pages'], Language.find_by_alias('jsp')
assert_equal Language['Java'], Language.find_by_alias('java')
assert_equal Language['JavaScript'], Language.find_by_alias('javascript')
assert_equal Language['JavaScript'], Language.find_by_alias('js')
assert_equal Language['Literate Haskell'], Language.find_by_alias('lhs')
assert_equal Language['Literate Haskell'], Language.find_by_alias('literate-haskell')
assert_equal Language['Objective-C'], Language.find_by_alias('objc')
assert_equal Language['OpenEdge ABL'], Language.find_by_alias('openedge')
assert_equal Language['OpenEdge ABL'], Language.find_by_alias('progress')
assert_equal Language['OpenEdge ABL'], Language.find_by_alias('abl')
assert_equal Language['Parrot Internal Representation'], Language.find_by_alias('pir')
assert_equal Language['PowerShell'], Language.find_by_alias('posh')
assert_equal Language['Puppet'], Language.find_by_alias('puppet')
assert_equal Language['Pure Data'], Language.find_by_alias('pure-data')
assert_equal Language['Raw token data'], Language.find_by_alias('raw')
assert_equal Language['Ruby'], Language.find_by_alias('rb')
assert_equal Language['Ruby'], Language.find_by_alias('ruby')
assert_equal Language['R'], Language.find_by_alias('r')
assert_equal Language['Scheme'], Language.find_by_alias('scheme')
assert_equal Language['Shell'], Language.find_by_alias('bash')
assert_equal Language['Shell'], Language.find_by_alias('sh')
assert_equal Language['Shell'], Language.find_by_alias('shell')
assert_equal Language['Shell'], Language.find_by_alias('zsh')
assert_equal Language['SuperCollider'], Language.find_by_alias('supercollider')
assert_equal Language['TeX'], Language.find_by_alias('tex')
assert_equal Language['Tree-sitter Query'], Language.find_by_alias('tsq')
assert_equal Language['TypeScript'], Language.find_by_alias('ts')
assert_equal Language['Vim Script'], Language.find_by_alias('vim')
assert_equal Language['Vim Script'], Language.find_by_alias('viml')
assert_equal Language['reStructuredText'], Language.find_by_alias('rst')
assert_equal Language['X BitMap'], Language.find_by_alias('xbm')
assert_equal Language['X PixMap'], Language.find_by_alias('xpm')
assert_equal Language['YAML'], Language.find_by_alias('yml')
assert_nil Language.find_by_alias(nil)
end
# Note these are set by `script/update-ids`. If these tests fail then someone
# has changed the `language_id` fields set in languages.yml which is almost certainly
# not what you want to happen (these fields are used in GitHub's search indexes)
def test_language_ids
assert_equal 4, Language['ANTLR'].language_id
assert_equal 54, Language['Ceylon'].language_id
assert_equal 326, Language['Ruby'].language_id
assert_equal 421, Language['xBase'].language_id
end
def test_find_by_id
assert_equal Language['Elixir'], Language.find_by_id(100)
assert_equal Language['Ruby'], Language.find_by_id(326)
assert_equal Language['xBase'], Language.find_by_id(421)
end
def test_groups
# Test a couple identity cases
assert_equal Language['Perl'], Language['Perl'].group
assert_equal Language['Python'], Language['Python'].group
assert_equal Language['Ruby'], Language['Ruby'].group
# Test a few special groups
assert_equal Language['Assembly'], Language['Unix Assembly'].group
assert_equal Language['C'], Language['OpenCL'].group
assert_equal Language['Haskell'], Language['Literate Haskell'].group
assert_equal Language['Java'], Language['Java Server Pages'].group
assert_equal Language['Python'], Language['NumPy'].group
assert_equal Language['Shell'], Language['Gentoo Ebuild'].group
assert_equal Language['Shell'], Language['Gentoo Eclass'].group
assert_equal Language['Shell'], Language['Tcsh'].group
# Ensure everyone has a group
Language.all.each do |language|
assert language.group, "#{language} has no group"
end
end
def test_popular
assert Language['Ruby'].popular?
assert Language['Perl'].popular?
assert Language['Python'].popular?
assert Language['Assembly'].unpopular?
assert Language['Brainfuck'].unpopular?
end
def test_programming
assert_equal :programming, Language['JavaScript'].type
assert_equal :programming, Language['LSL'].type
assert_equal :programming, Language['Perl'].type
assert_equal :programming, Language['PowerShell'].type
assert_equal :programming, Language['Python'].type
assert_equal :programming, Language['Ruby'].type
assert_equal :programming, Language['TypeScript'].type
assert_equal :programming, Language['Makefile'].type
assert_equal :programming, Language['SuperCollider'].type
end
def test_markup
assert_equal :markup, Language['HTML'].type
assert_equal :markup, Language['SCSS'].type
end
def test_data
assert_equal :data, Language['YAML'].type
end
def test_prose
assert_equal :prose, Language['Markdown'].type
assert_equal :prose, Language['Org'].type
end
def test_find_by_name
assert_nil Language.find_by_name(nil)
ruby = Language['Ruby']
assert_equal ruby, Language.find_by_name('Ruby')
end
def test_find_all_by_name
Language.all.each do |language|
assert_equal language, Language.find_by_name(language.name)
assert_equal language, Language[language.name]
end
end
def test_find_all_by_alias
Language.all.each do |language|
language.aliases.each do |name|
assert_equal language, Language.find_by_alias(name)
assert_equal language, Language[name]
end
end
end
def test_find_by_extension
assert_equal [], Language.find_by_extension('.factor-rc')
assert_equal [Language['Limbo'], Language['M'], Language['MATLAB'], Language['MUF'], Language['Mercury'], Language['Objective-C'], Language['Wolfram Language']], Language.find_by_extension('foo.m')
assert_equal [Language['Ruby']], Language.find_by_extension('foo.rb')
assert_equal [Language['Ruby']], Language.find_by_extension('foo/bar.rb')
assert_equal [Language['Ruby']], Language.find_by_extension('PKGBUILD.rb')
assert_equal ['C', 'C++', 'Objective-C'], Language.find_by_extension('foo.h').map(&:name).sort
assert_equal [], Language.find_by_extension('rb')
assert_equal [], Language.find_by_extension('.null')
assert_equal [Language['Jinja']], Language.find_by_extension('index.jinja')
assert_equal [Language['Chapel']], Language.find_by_extension('examples/hello.chpl')
assert_equal [], Language.find_by_filename('F.I.L.E.')
end
def test_find_all_by_extension
Language.all.each do |language|
language.extensions.each do |extension|
assert_includes Language.find_by_extension(extension), language
end
end
end
def test_find_by_filename
assert_equal [Language['Shell']], Language.find_by_filename('PKGBUILD')
assert_equal [Language['Ruby']], Language.find_by_filename('Rakefile')
assert_equal Language['ApacheConf'], Language.find_by_filename('httpd.conf').first
assert_equal [Language['ApacheConf']], Language.find_by_filename('.htaccess')
assert_equal Language['Nginx'], Language.find_by_filename('nginx.conf').first
assert_equal [], Language.find_by_filename('foo.rb')
assert_equal [], Language.find_by_filename('rb')
assert_equal [], Language.find_by_filename('.null')
assert_equal [Language['Shell']], Language.find_by_filename('.bashrc')
assert_equal [Language['Shell']], Language.find_by_filename('bash_profile')
assert_equal [Language['Shell']], Language.find_by_filename('.zshrc')
assert_equal [Language['Clojure']], Language.find_by_filename('riemann.config')
end
def test_find_by_interpreter
{
"ruby" => "Ruby",
"Rscript" => "R",
"sh" => "Shell",
"bash" => "Shell",
"python" => "Python",
"python2" => "Python",
"python3" => "Python",
"sbcl" => "Common Lisp",
"sclang" => "SuperCollider"
}.each do |interpreter, language|
assert_equal [Language[language]], Language.find_by_interpreter(interpreter)
end
assert_equal [], Language.find_by_interpreter(nil)
end
def test_find
assert_equal 'Ruby', Language['Ruby'].name
assert_equal 'Ruby', Language['ruby'].name
assert_equal 'C++', Language['C++'].name
assert_equal 'C++', Language['c++'].name
assert_equal 'C++', Language['cpp'].name
assert_equal 'C#', Language['C#'].name
assert_equal 'C#', Language['c#'].name
assert_equal 'C#', Language['csharp'].name
assert_nil Language['defunkt']
assert_nil Language[nil]
end
def test_find_ignores_case
assert_equal 'AGS Script', Language['ags script'].name
assert_equal 'AGS Script', Language['ags sCRIPT'].name
end
def test_find_by_name_ignores_case
assert_equal 'AGS Script', Language.find_by_name('ags script').name
assert_equal 'AGS Script', Language.find_by_name('ags sCRIPT').name
end
def test_find_by_alias_ignores_case
refute_includes Language['AGS Script'].aliases, 'AGS'
assert_equal 'AGS Script', Language.find_by_alias('AGS').name
end
def test_find_ignores_comma
assert_equal 'Rust', Language['rust,no_run'].name
end
def test_find_by_name_ignores_comma
assert_equal Language['Rust'], Language.find_by_name('rust,no_run')
end
def test_find_by_alias_ignores_comma
assert_equal Language['Rust'], Language.find_by_alias('rust,no_run')
end
def test_doesnt_blow_up_with_blank_lookup
assert_nil Language.find_by_alias('')
assert_nil Language.find_by_name(nil)
assert_nil Language[""]
end
def test_does_not_blow_up_with_non_string_lookup
assert_nil Language.find_by_alias(true)
assert_nil Language.find_by_name(true)
assert_nil Language[true]
end
def test_name
assert_equal 'Perl', Language['Perl'].name
assert_equal 'Python', Language['Python'].name
assert_equal 'Ruby', Language['Ruby'].name
end
def test_escaped_name
assert_equal 'C', Language['C'].escaped_name
assert_equal 'C%23', Language['C#'].escaped_name
assert_equal 'C%2B%2B', Language['C++'].escaped_name
assert_equal 'Objective-C', Language['Objective-C'].escaped_name
assert_equal 'Common%20Lisp', Language['Common Lisp'].escaped_name
end
def test_error_without_name
assert_raises ArgumentError do
Language.new :name => nil
end
end
def test_color
assert_equal '#701516', Language['Ruby'].color
assert_equal '#3572A5', Language['Python'].color
assert_equal '#f1e05a', Language['JavaScript'].color
assert_equal '#3178c6', Language['TypeScript'].color
assert_equal '#3d9970', Language['LSL'].color
end
def test_colors
assert Language.colors.include?(Language['Ruby'])
assert Language.colors.include?(Language['Python'])
end
def test_ace_mode
assert_equal 'c_cpp', Language['C++'].ace_mode
assert_equal 'coffee', Language['CoffeeScript'].ace_mode
assert_equal 'csharp', Language['C#'].ace_mode
assert_equal 'css', Language['CSS'].ace_mode
assert_equal 'lsl', Language['LSL'].ace_mode
assert_equal 'javascript', Language['JavaScript'].ace_mode
assert_equal 'fortran', Language['FORTRAN'].ace_mode
end
def test_codemirror_mode
assert_equal 'ruby', Language['Ruby'].codemirror_mode
assert_equal 'javascript', Language['JavaScript'].codemirror_mode
assert_equal 'clike', Language['C'].codemirror_mode
assert_equal 'clike', Language['C++'].codemirror_mode
end
def test_codemirror_mime_type
assert_equal 'text/x-ruby', Language['Ruby'].codemirror_mime_type
assert_equal 'text/javascript', Language['JavaScript'].codemirror_mime_type
assert_equal 'text/x-csrc', Language['C'].codemirror_mime_type
assert_equal 'text/x-c++src', Language['C++'].codemirror_mime_type
end
def test_wrap
assert_equal false, Language['C'].wrap
assert_equal true, Language['Markdown'].wrap
end
def test_extensions
assert Language['LSL'].extensions.include?('.lsl')
assert Language['Perl'].extensions.include?('.pl')
assert Language['Python'].extensions.include?('.py')
assert Language['Ruby'].extensions.include?('.rb')
assert Language['SuperCollider'].extensions.include?('.scd')
end
def test_eql
assert Language['Ruby'].eql?(Language['Ruby'])
assert !Language['Ruby'].eql?(Language['Python'])
end
def test_by_type
assert !Language.by_type(:prose).nil?
end
def test_all_languages_have_grammars
scopes = YAML.load(File.read(File.expand_path("../../grammars.yml", __FILE__))).values.flatten
missing = Language.all.reject { |language| language.tm_scope == "none" || scopes.include?(language.tm_scope) }
message = "The following languages' scopes are not listed in grammars.yml. Please add grammars for all new languages.\n"
message += "If no grammar exists for a language, mark the language with `tm_scope: none` in lib/linguist/languages.yml.\n"
width = missing.map { |language| language.name.length }.max
message += missing.map { |language| sprintf("%-#{width}s %s", language.name, language.tm_scope) }.sort.join("\n")
assert missing.empty?, message
end
def test_all_languages_have_scopes
languages = YAML.load(File.read(File.expand_path("../../lib/linguist/languages.yml", __FILE__)))
missing = languages.reject { |name,language| language.has_key?('tm_scope') }
message = "The following languages do not have a `tm_scope` field defined. Use `tm_scope: none` if the language has no grammar.\n"
message += missing.keys.sort.join("\n")
assert missing.empty?, message
end
def test_all_languages_have_type
missing = Language.all.select { |language| language.type.nil? }
message = "The following languages do not have a type listed in grammars.yml. Please add types for all new languages.\n"
width = missing.map { |language| language.name.length }.max
message += missing.map { |language| sprintf("%-#{width}s", language.name) }.sort.join("\n")
assert missing.empty?, message
end
def test_all_languages_have_a_language_id_set
missing = Language.all.select { |language| language.language_id.nil? }
message = "The following languages do not have a language_id listed in languages.yml. Please run `script/update-ids` as per the contribution guidelines.\n"
missing.each { |language| message << "#{language.name}\n" }
assert missing.empty?, message
end
def test_all_languages_have_a_valid_id
deleted_language_ids = [21] # Prevent re-use of deleted language IDs
invalid = Language.all.select { |language| language.language_id < 0 || deleted_language_ids.include?(language.language_id) || (language.language_id > 431 && language.language_id < 1024) || language.language_id >= (2**31 - 1) }
message = "The following languages do not have a valid language_id. Please run `script/update-ids` as per the contribution guidelines.\n"
invalid.each { |language| message << "#{language.name}\n" }
assert invalid.empty?, message
end
def test_all_language_id_are_unique
duplicates = Language.all.group_by{ |language| language.language_id }.select { |k, v| v.size > 1 }.map(&:first)
message = "The following language_id are used several times in languages.yml. Please run `script/update-ids` as per the contribution guidelines.\n"
duplicates.each { |language_id| message << "#{language_id}\n" }
assert duplicates.empty?, message
end
def test_all_languages_have_a_valid_ace_mode
ace_fixture_path = File.join('test', 'fixtures', 'ace_modes.json')
skip("No ace_modes.json file") unless File.exist?(ace_fixture_path)
ace_modes = Yajl.load(File.read(ace_fixture_path))
ace_github_modes = ace_modes[0].concat(ace_modes[1])
existing_ace_modes = ace_github_modes.map do |ace_github_mode|
File.basename(ace_github_mode["name"], ".js") if ace_github_mode["name"] !~ /_highlight_rules|_test|_worker/
end.compact.uniq.sort.map(&:downcase)
missing = Language.all.reject { |language| language.ace_mode == "text" || existing_ace_modes.include?(language.ace_mode) }
message = "The following languages do not have an Ace mode listed in languages.yml. Please add an Ace mode for all new languages.\n"
message += "If no Ace mode exists for a language, mark the language with `ace_mode: text` in lib/linguist/languages.yml.\n"
width = missing.map { |language| language.name.length }.max
message += missing.map { |language| sprintf("%-#{width}s %s", language.name, language.ace_mode) }.sort.join("\n")
assert missing.empty?, message
end
def test_codemirror_modes_present
Language.all.each do |language|
if language.codemirror_mode || language.codemirror_mime_type
assert language.codemirror_mode, "#{language.inspect} missing CodeMirror mode"
assert language.codemirror_mime_type, "#{language.inspect} missing CodeMirror MIME mode"
end
end
end
def test_valid_codemirror_mode
Language.all.each do |language|
if mode = language.codemirror_mode
assert File.exist?(File.expand_path("../../vendor/CodeMirror/mode/#{mode}", __FILE__)), "#{mode} isn't a valid CodeMirror mode"
end
end
end
def test_codemirror_mode_and_mime_defined_by_meta_mapping
meta = File.read(File.expand_path("../../vendor/CodeMirror/mode/meta.js", __FILE__))
Language.all.each do |language|
next unless language.codemirror_mode && language.codemirror_mime_type
assert meta.match(/^.+#{Regexp.escape(language.codemirror_mime_type)}.+#{Regexp.escape(language.codemirror_mode)}.+$/), "#{language.inspect}: #{language.codemirror_mime_type} not defined under #{language.codemirror_mode}"
end
end
def test_codemirror_mime_declared_in_mode_file
Language.all.each do |language|
next unless language.codemirror_mode && language.codemirror_mime_type
filename = File.expand_path("../../vendor/CodeMirror/mode/#{language.codemirror_mode}/#{language.codemirror_mode}.js", __FILE__)
assert File.exist?(filename), "#{filename} does not exist"
assert File.read(filename).match(Regexp.escape language.codemirror_mime_type), "#{language.inspect}: #{language.codemirror_mime_type} not defined in #{filename}"
end
end
def test_all_popular_languages_exist
popular = YAML.load(File.read(File.expand_path("../../lib/linguist/popular.yml", __FILE__)))
missing = popular - Language.all.map(&:name)
message = "The following languages are listed in lib/linguist/popular.yml but not in lib/linguist/languages.yml.\n"
message += missing.sort.join("\n")
assert missing.empty?, message
end
def test_non_crash_on_comma
assert_nil Language[',']
assert_nil Language.find_by_name(',')
assert_nil Language.find_by_alias(',')
end
def test_detect_prefers_markdown_for_md
blob = Linguist::FileBlob.new(File.join(samples_path, "Markdown/symlink.md"))
match = Linguist.detect(blob)
assert_equal Language["Markdown"], match
end
def test_fs_names
Language.all.each do |language|
next unless /[\\:*?"<>|]/.match(language.name)
assert language.fs_name, "#{language.name} needs an fs_name for Windows' file system."
assert !/[\\:*?"<>|]/.match(language.fs_name), "The fs_name for #{language.name} is invalid."
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_file_blob.rb | test/test_file_blob.rb | require_relative "./helper"
class TestFileBlob < Minitest::Test
include Linguist
def silence_warnings
original_verbosity = $VERBOSE
$VERBOSE = nil
yield
ensure
$VERBOSE = original_verbosity
end
def script_blob(name)
blob = sample_blob(name)
blob.instance_variable_set(:@name, 'script')
blob
end
def test_extensions
assert_equal [".gitignore"], Linguist::FileBlob.new(".gitignore").extensions
assert_equal [".xml"], Linguist::FileBlob.new("build.xml").extensions
assert_equal [".html.erb", ".erb"], Linguist::FileBlob.new("dotted.dir/index.html.erb").extensions
end
def test_name
assert_equal "foo.rb", sample_blob("foo.rb").name
end
def test_mime_type
assert_equal "application/pdf", fixture_blob("Binary/octocat.ai").mime_type
assert_equal "application/x-ruby", sample_blob("Ruby/grit.rb").mime_type
assert_equal "application/x-sh", sample_blob("Shell/script.sh").mime_type
assert_equal "application/xml", sample_blob("XML/bar.xml").mime_type
assert_equal "audio/ogg", fixture_blob("Binary/foo.ogg").mime_type
assert_equal "text/plain", fixture_blob("Data/README").mime_type
# GitHub doesn't use a filename when returning raw blobs
blob = Struct.new(:name) { include Linguist::BlobHelper }
assert_equal "text/plain", blob.new(nil).mime_type
end
def test_content_type
assert_equal "application/pdf", fixture_blob("Binary/foo.pdf").content_type
assert_equal "audio/ogg", fixture_blob("Binary/foo.ogg").content_type
assert_equal "image/png", fixture_blob("Binary/foo.png").content_type
assert_equal "text/plain; charset=iso-8859-2", fixture_blob("Data/README").content_type
end
def test_disposition
assert_equal "attachment; filename=foo+bar.jar", fixture_blob("Binary/foo bar.jar").disposition
assert_equal "attachment; filename=foo.bin", fixture_blob("Binary/foo.bin").disposition
assert_equal "attachment; filename=linguist.gem", fixture_blob("Binary/linguist.gem").disposition
assert_equal "attachment; filename=octocat.ai", fixture_blob("Binary/octocat.ai").disposition
assert_equal "inline", fixture_blob("Data/README").disposition
assert_equal "inline", sample_blob("Text/foo.txt").disposition
assert_equal "inline", sample_blob("Ruby/grit.rb").disposition
assert_equal "inline", fixture_blob("Binary/octocat.png").disposition
end
def test_data
assert_equal "module Foo\nend\n", sample_blob("Ruby/foo.rb").data
end
def test_lines
assert_equal ["module Foo", "end"], sample_blob("Ruby/foo.rb").lines
assert_equal ["line 1", "line 2"], sample_blob("Text/mac.txt").lines
assert_equal 474, sample_blob("Emacs Lisp/ess-julia.el").lines.length
end
def test_size
assert_equal 15, sample_blob("Ruby/foo.rb").size
end
def test_loc
assert_equal 2, sample_blob("Ruby/foo.rb").loc
end
def test_sloc
assert_equal 2, sample_blob("Ruby/foo.rb").sloc
assert_equal 3, fixture_blob("Data/utf16le-windows").sloc
assert_equal 1, fixture_blob("Data/iso8859-8-i").sloc
end
def test_encoding
assert_equal "ISO-8859-2", fixture_blob("Data/README").encoding
assert_equal "ISO-8859-2", fixture_blob("Data/README").ruby_encoding
assert_equal "UTF-8", sample_blob("Text/foo.txt").encoding
assert_equal "UTF-8", sample_blob("Text/foo.txt").ruby_encoding
assert_equal "UTF-16LE", fixture_blob("Data/utf16le").encoding
assert_equal "UTF-16LE", fixture_blob("Data/utf16le").ruby_encoding
assert_equal "UTF-16LE", fixture_blob("Data/utf16le-windows").encoding
assert_equal "UTF-16LE", fixture_blob("Data/utf16le-windows").ruby_encoding
assert_equal "ISO-2022-KR", fixture_blob("Text/ISO-2022-KR.txt").encoding
assert_equal "binary", fixture_blob("Text/ISO-2022-KR.txt").ruby_encoding
assert_nil fixture_blob("Binary/dog.o").encoding
end
def test_binary
# Large blobs aren't loaded
large_blob = sample_blob("git.exe")
large_blob.instance_eval do
def data; end
end
assert large_blob.binary?
assert fixture_blob("Binary/git.deb").binary?
assert fixture_blob("Binary/git.exe").binary?
assert fixture_blob("Binary/hello.pbc").binary?
assert fixture_blob("Binary/linguist.gem").binary?
assert fixture_blob("Binary/octocat.ai").binary?
assert fixture_blob("Binary/octocat.png").binary?
assert fixture_blob("Binary/zip").binary?
assert !fixture_blob("Data/README").binary?
assert !sample_blob("Ruby/foo.rb").binary?
assert !sample_blob("Perl/script.pl").binary?
end
def test_all_binary
Samples.each do |sample|
blob = sample_blob(sample[:path])
assert ! (blob.likely_binary? || blob.binary?), "#{sample[:path]} is a binary file"
end
end
def test_text
assert fixture_blob("Data/README").text?
assert fixture_blob("Data/md").text?
assert sample_blob("Shell/script.sh").text?
assert fixture_blob("Data/txt").text?
end
def test_image
assert fixture_blob("Binary/octocat.gif").image?
assert fixture_blob("Binary/octocat.jpeg").image?
assert fixture_blob("Binary/octocat.jpg").image?
assert fixture_blob("Binary/octocat.png").image?
assert !fixture_blob("Binary/octocat.ai").image?
assert !fixture_blob("Binary/octocat.psd").image?
end
def test_solid
assert fixture_blob("Binary/cube.stl").solid?
assert fixture_blob("Generic/stl/STL/cube2.stl").solid?
end
def test_csv
assert sample_blob("CSV/cars.csv").csv?
end
def test_pdf
assert fixture_blob("Binary/foo.pdf").pdf?
end
def test_viewable
assert fixture_blob("Data/README").viewable?
assert sample_blob("Ruby/foo.rb").viewable?
assert sample_blob("Perl/script.pl").viewable?
assert !fixture_blob("Binary/linguist.gem").viewable?
assert !fixture_blob("Binary/octocat.ai").viewable?
assert !fixture_blob("Binary/octocat.png").viewable?
end
def test_generated
assert !fixture_blob("Data/README").generated?
# Xcode project files
assert !sample_blob("XML/MainMenu.xib").generated?
assert fixture_blob("Binary/MainMenu.nib").generated?
assert !sample_blob("XML/project.pbxproj").generated?
# Cocoapods
assert sample_blob('Pods/blah').generated?
assert !sample_blob('My-Pods/blah').generated?
# Carthage
assert sample_blob('Carthage/Build/blah').generated?
assert !sample_blob('Carthage/blah').generated?
assert !sample_blob('Carthage/Checkout/blah').generated?
assert !sample_blob('My-Carthage/Build/blah').generated?
assert !sample_blob('My-Carthage/Build/blah').generated?
# Gemfile.lock is NOT generated
assert !sample_blob("Gemfile.lock").generated?
# Generated .NET Docfiles
assert sample_blob("XML/net_docfile.xml").generated?
# Long line
assert !sample_blob("JavaScript/uglify.js").generated?
# Inlined JS, but mostly code
assert !sample_blob("JavaScript/json2_backbone.js").generated?
# Minified JS
assert !sample_blob("JavaScript/jquery-1.6.1.js").generated?
assert sample_blob("JavaScript/jquery-1.6.1.min.js").generated?
assert sample_blob("JavaScript/jquery-1.4.2.min.js").generated?
# CoffeeScript-generated JS
# TODO
# TypeScript-generated JS
# TODO
# Composer generated composer.lock file
assert sample_blob("JSON/composer.lock").generated?
# PEG.js-generated parsers
assert sample_blob("JavaScript/parser.js").generated?
# Generated PostScript
assert !sample_blob("PostScript/sierpinski.ps").generated?
# These examples are too basic to tell
assert !sample_blob("JavaScript/hello.js").generated?
assert sample_blob("JavaScript/intro-old.js").generated?
assert sample_blob("JavaScript/classes-old.js").generated?
assert sample_blob("JavaScript/intro.js").generated?
assert sample_blob("JavaScript/classes.js").generated?
# Protocol Buffer generated code
assert sample_blob("C++/protocol-buffer.pb.h").generated?
assert sample_blob("C++/protocol-buffer.pb.cc").generated?
assert sample_blob("Java/ProtocolBuffer.java").generated?
assert sample_blob("Python/protocol_buffer_pb2.py").generated?
assert sample_blob("Go/api.pb.go").generated?
assert sample_blob("Go/embedded.go").generated?
assert sample_blob("Go/oapi-codegen.go").generated?
# Apache Thrift generated code
assert sample_blob("Python/gen-py-linguist-thrift.py").generated?
assert sample_blob("Go/gen-go-linguist-thrift.go").generated?
assert sample_blob("Java/gen-java-linguist-thrift.java").generated?
assert sample_blob("JavaScript/gen-js-linguist-thrift.js").generated?
assert sample_blob("Ruby/gen-rb-linguist-thrift.rb").generated?
assert sample_blob("Objective-C/gen-cocoa-linguist-thrift.m").generated?
# Generated JNI
assert sample_blob("C/jni_layer.h").generated?
# Minified CSS
assert !sample_blob("CSS/bootstrap.css").generated?
assert sample_blob("CSS/bootstrap.min.css").generated?
# Generated VCR
assert sample_blob("YAML/vcr_cassette.yml").generated?
# Generated by Zephir
assert sample_blob("Zephir/filenames/exception.zep.c").generated?
assert sample_blob("Zephir/filenames/exception.zep.h").generated?
assert sample_blob("Zephir/filenames/exception.zep.php").generated?
assert !sample_blob("Zephir/Router.zep").generated?
assert sample_blob("node_modules/grunt/lib/grunt.js").generated?
# Godep saved dependencies
assert sample_blob("Godeps/Godeps.json").generated?
assert sample_blob("Godeps/_workspace/src/github.com/kr/s3/sign.go").generated?
# Cython-generated C/C++
assert sample_blob("C/sgd_fast.c").generated?
assert sample_blob("C++/wrapper_inner.cpp").generated?
# Unity3D-generated metadata
assert sample_blob("Unity3D Asset/Tiles.meta").generated?
end
def test_vendored
assert !fixture_blob("Data/README").vendored?
assert !sample_blob("ext/extconf.rb").vendored?
# Dependencies
assert sample_blob("dependencies/windows/headers/GL/glext.h").vendored?
# Node dependencies
assert sample_blob("node_modules/coffee-script/lib/coffee-script.js").vendored?
# Bower Components
assert sample_blob("bower_components/custom/custom.js").vendored?
assert sample_blob("app/bower_components/custom/custom.js").vendored?
assert sample_blob("vendor/assets/bower_components/custom/custom.js").vendored?
# Go dependencies
assert !sample_blob("Godeps/Godeps.json").vendored?
assert sample_blob("Godeps/_workspace/src/github.com/kr/s3/sign.go").vendored?
assert sample_blob(".indent.pro").vendored?
# Rails vendor/
assert sample_blob("vendor/plugins/will_paginate/lib/will_paginate.rb").vendored?
# Vendor/
assert sample_blob("Vendor/my_great_file.h").vendored?
# 'thirdparty' directory
assert sample_blob("thirdparty/lib/main.c").vendored?
# 'extern(al)' directory
assert sample_blob("extern/util/__init__.py").vendored?
assert sample_blob("external/jquery.min.js").vendored?
assert sample_blob("externals/fmt/CMakeLists.txt").vendored?
assert sample_blob("External/imgui/imgui.h").vendored?
# C deps
assert sample_blob("deps/http_parser/http_parser.c").vendored?
assert sample_blob("deps/v8/src/v8.h").vendored?
# Chart.js
assert sample_blob("some/vendored/path/Chart.js").vendored?
assert !sample_blob("some/vendored/path/chart.js").vendored?
# CodeMirror deps
assert sample_blob("codemirror/mode/blah.js").vendored?
assert sample_blob("codemirror/5.0/mode/blah.js").vendored?
# Debian packaging
assert sample_blob("debian/cron.d").vendored?
# Django env
assert sample_blob("env/foo.py").vendored?
# Erlang
assert sample_blob("rebar").vendored?
# git config files
assert_predicate fixture_blob("some/path/.gitattributes"), :vendored?
assert_predicate fixture_blob(".gitignore"), :vendored?
assert_predicate fixture_blob("special/path/.gitmodules"), :vendored?
# Minified JavaScript and CSS
assert sample_blob("foo.min.js").vendored?
assert sample_blob("foo.min.css").vendored?
assert sample_blob("foo-min.js").vendored?
assert sample_blob("foo-min.css").vendored?
assert !sample_blob("foomin.css").vendored?
assert !sample_blob("foo.min.txt").vendored?
#.osx
assert sample_blob(".osx").vendored?
# Prototype
assert !sample_blob("public/javascripts/application.js").vendored?
assert sample_blob("public/javascripts/prototype.js").vendored?
assert sample_blob("public/javascripts/effects.js").vendored?
assert sample_blob("public/javascripts/controls.js").vendored?
assert sample_blob("public/javascripts/dragdrop.js").vendored?
# jQuery
assert sample_blob("jquery.js").vendored?
assert sample_blob("public/javascripts/jquery.js").vendored?
assert sample_blob("public/javascripts/jquery.min.js").vendored?
assert sample_blob("public/javascripts/jquery-1.7.js").vendored?
assert sample_blob("public/javascripts/jquery-1.7.min.js").vendored?
assert sample_blob("public/javascripts/jquery-1.5.2.js").vendored?
assert sample_blob("public/javascripts/jquery-1.6.1.js").vendored?
assert sample_blob("public/javascripts/jquery-1.6.1.min.js").vendored?
assert sample_blob("public/javascripts/jquery-1.10.1.js").vendored?
assert sample_blob("public/javascripts/jquery-1.10.1.min.js").vendored?
assert !sample_blob("public/javascripts/jquery.github.menu.js").vendored?
# jQuery UI
assert sample_blob("themes/ui-lightness/jquery-ui.css").vendored?
assert sample_blob("themes/ui-lightness/jquery-ui-1.8.22.custom.css").vendored?
assert sample_blob("themes/ui-lightness/jquery.ui.accordion.css").vendored?
assert sample_blob("ui/i18n/jquery.ui.datepicker-ar.js").vendored?
assert sample_blob("ui/i18n/jquery-ui-i18n.js").vendored?
assert sample_blob("ui/jquery.effects.blind.js").vendored?
assert sample_blob("ui/jquery-ui-1.8.22.custom.js").vendored?
assert sample_blob("ui/jquery-ui-1.8.22.custom.min.js").vendored?
assert sample_blob("ui/jquery-ui-1.8.22.js").vendored?
assert sample_blob("ui/jquery-ui-1.8.js").vendored?
assert sample_blob("ui/jquery-ui.min.js").vendored?
assert sample_blob("ui/jquery.ui.accordion.js").vendored?
assert sample_blob("ui/minified/jquery.effects.blind.min.js").vendored?
assert sample_blob("ui/minified/jquery.ui.accordion.min.js").vendored?
# jQuery Gantt
assert sample_blob("web-app/jquery-gantt/js/jquery.fn.gantt.js").vendored?
# jQuery fancyBox
assert sample_blob("web-app/fancybox/jquery.fancybox.js").vendored?
# Fuel UX
assert sample_blob("web-app/fuelux/js/fuelux.js").vendored?
# jQuery File Upload
assert sample_blob("fileupload-9.0.0/jquery.fileupload-process.js").vendored?
# Slick
assert sample_blob("web-app/slickgrid/controls/slick.columnpicker.js").vendored?
# Leaflet plugins
assert sample_blob("leaflet-plugins/Leaflet.Coordinates-0.5.0.src.js").vendored?
assert sample_blob("leaflet-plugins/leaflet.draw-src.js").vendored?
assert sample_blob("leaflet-plugins/leaflet.spin.js").vendored?
# VSCode
assert sample_blob(".vscode/settings.json").vendored?
assert !sample_blob("testing.vscode-testing").vendored?
# MooTools
assert sample_blob("public/javascripts/mootools-core-1.3.2-full-compat.js").vendored?
assert sample_blob("public/javascripts/mootools-core-1.3.2-full-compat-yc.js").vendored?
# Dojo
assert sample_blob("public/javascripts/dojo.js").vendored?
# MochiKit
assert sample_blob("public/javascripts/MochiKit.js").vendored?
# YUI
assert sample_blob("public/javascripts/yahoo-dom-event.js").vendored?
assert sample_blob("public/javascripts/yahoo-min.js").vendored?
assert sample_blob("public/javascripts/yuiloader-dom-event.js").vendored?
# WYS editors
assert sample_blob("public/javascripts/ckeditor.js").vendored?
assert sample_blob("public/javascripts/tiny_mce.js").vendored?
assert sample_blob("public/javascripts/tiny_mce_popup.js").vendored?
assert sample_blob("public/javascripts/tiny_mce_src.js").vendored?
# Ace Editor
assert sample_blob("ace-builds/src/ace.js").vendored?
assert sample_blob("static/project/ace-builds/src/ace.js").vendored?
# Fontello CSS files
assert sample_blob("fontello.css").vendored?
assert sample_blob("fontello-ie7.css").vendored?
assert sample_blob("fontello-codes.css").vendored?
assert sample_blob("fontello-codes-ie7.css").vendored?
assert sample_blob("fontello-embedded.css").vendored?
assert sample_blob("assets/css/fontello.css").vendored?
assert sample_blob("assets/css/fontello-ie7.css").vendored?
assert sample_blob("assets/css/fontello-codes.css").vendored?
assert sample_blob("assets/css/fontello-codes-ie7.css").vendored?
assert sample_blob("assets/css/fontello-embedded.css").vendored?
# AngularJS
assert sample_blob("public/javascripts/angular.js").vendored?
assert sample_blob("public/javascripts/angular.min.js").vendored?
# D3.js
assert sample_blob("public/javascripts/d3.v3.js").vendored?
assert sample_blob("public/javascripts/d3.v3.min.js").vendored?
# Modernizr
assert sample_blob("public/javascripts/modernizr-2.7.1.js").vendored?
assert sample_blob("public/javascripts/modernizr.custom.01009.js").vendored?
# Fabric
assert sample_blob("fabfile.py").vendored?
# WAF
assert sample_blob("waf").vendored?
# Visual Studio IntelliSense
assert sample_blob("Scripts/jquery-1.7-vsdoc.js").vendored?
# Microsoft Ajax
assert sample_blob("Scripts/MicrosoftAjax.debug.js").vendored?
assert sample_blob("Scripts/MicrosoftAjax.js").vendored?
assert sample_blob("Scripts/MicrosoftMvcAjax.debug.js").vendored?
assert sample_blob("Scripts/MicrosoftMvcAjax.js").vendored?
assert sample_blob("Scripts/MicrosoftMvcValidation.debug.js").vendored?
assert sample_blob("Scripts/MicrosoftMvcValidation.js").vendored?
# jQuery validation plugin (MS bundles this with asp.net mvc)
assert sample_blob("Scripts/jquery.validate.js").vendored?
assert sample_blob("Scripts/jquery.validate.min.js").vendored?
assert sample_blob("Scripts/jquery.validate.unobtrusive.js").vendored?
assert sample_blob("Scripts/jquery.validate.unobtrusive.min.js").vendored?
assert sample_blob("Scripts/jquery.unobtrusive-ajax.js").vendored?
assert sample_blob("Scripts/jquery.unobtrusive-ajax.min.js").vendored?
# NuGet Packages
assert sample_blob("packages/Modernizr.2.0.6/Content/Scripts/modernizr-2.0.6-development-only.js").vendored?
# Font Awesome
assert sample_blob("some/asset/path/font-awesome.min.css").vendored?
assert sample_blob("some/asset/path/font-awesome.css").vendored?
# Normalize
assert sample_blob("some/asset/path/normalize.css").vendored?
# Carthage
assert sample_blob('Carthage/blah').vendored?
assert sample_blob('iOS/Carthage/blah').vendored?
assert !sample_blob('My-Carthage/blah').vendored?
assert !sample_blob('iOS/My-Carthage/blah').vendored?
# Html5shiv
assert sample_blob("Scripts/html5shiv.js").vendored?
assert sample_blob("Scripts/html5shiv.min.js").vendored?
# Test fixtures
assert sample_blob("test/fixtures/random.rkt").vendored?
assert sample_blob("Test/fixtures/random.rkt").vendored?
assert sample_blob("tests/fixtures/random.rkt").vendored?
# Cordova/PhoneGap
assert sample_blob("cordova.js").vendored?
assert sample_blob("cordova.min.js").vendored?
assert sample_blob("cordova-2.1.0.js").vendored?
assert sample_blob("cordova-2.1.0.min.js").vendored?
# Foundation js
assert sample_blob("foundation.js").vendored?
assert sample_blob("foundation.min.js").vendored?
assert sample_blob("foundation.abide.js").vendored?
# Vagrant
assert sample_blob("Vagrantfile").vendored?
# Gradle
assert sample_blob("gradlew").vendored?
assert sample_blob("gradlew.bat").vendored?
assert sample_blob("gradle/wrapper/gradle-wrapper.properties").vendored?
assert sample_blob("subproject/gradlew").vendored?
assert sample_blob("subproject/gradlew.bat").vendored?
assert sample_blob("subproject/gradle/wrapper/gradle-wrapper.properties").vendored?
# Maven
assert sample_blob("mvnw").vendored?
assert sample_blob("mvnw.cmd").vendored?
assert sample_blob(".mvn/wrapper/maven-wrapper.properties").vendored?
assert sample_blob("subproject/mvnw").vendored?
assert sample_blob("subproject/mvnw.cmd").vendored?
assert sample_blob("subproject/.mvn/wrapper/maven-wrapper.properties").vendored?
# .DS_Store
assert sample_blob(".DS_Store").vendored?
assert sample_blob("another-dir/.DS_Store").vendored?
# Octicons
assert sample_blob("octicons.css").vendored?
assert sample_blob("public/octicons.min.css").vendored?
assert sample_blob("public/octicons/sprockets-octicons.scss").vendored?
# Typesafe Activator
assert sample_blob("activator").vendored?
assert sample_blob("activator.bat").vendored?
assert sample_blob("subproject/activator").vendored?
assert sample_blob("subproject/activator.bat").vendored?
assert_predicate fixture_blob(".google_apis/bar.jar"), :vendored?
assert_predicate fixture_blob("foo/.google_apis/bar.jar"), :vendored?
# Sphinx docs
assert sample_blob("docs/_build/asset.doc").vendored?
assert sample_blob("docs/theme/file.css").vendored?
# ProGuard
assert sample_blob("proguard.pro").vendored?
assert sample_blob("proguard-rules.pro").vendored?
# Vagrant
assert sample_blob("puphpet/file.pp").vendored?
# Fabric.io
assert sample_blob("Fabric.framework/Fabric.h").vendored?
# Crashlytics
assert sample_blob("Crashlytics.framework/Crashlytics.h").vendored?
assert sample_blob("myapp/My Template.xctemplate/___FILEBASENAME___.h").vendored?
assert sample_blob("myapp/My Images.xcassets/some/stuff.imageset/Contents.json").vendored?
assert !sample_blob("myapp/MyData.json").vendored?
# Jenkins
assert sample_blob("Jenkinsfile").vendored?
# Bootstrap
assert !sample_blob("src/bootstraps/settings.js").vendored?
assert !sample_blob("bootstrap/misc/other/reset.css").vendored?
assert sample_blob("bootstrap-1.4/misc/other/reset.css").vendored?
assert sample_blob("bootstrap.10.4/misc/other/reset.css").vendored?
assert sample_blob("src/bootstrap-5.4.1-beta-dist/js/bundle.js").vendored?
assert sample_blob("src/bootstrap-custom.js").vendored?
assert sample_blob("src/bootstrap-1.4.js").vendored?
assert sample_blob("src/bootstrap-5.4.1-beta-dist/js/bootstrap.bundle.js").vendored?
assert sample_blob("src/bootstrap-5.4.1-beta-dist/js/bootstrap.esm.js").vendored?
assert sample_blob("src/bootstrap-5.4.1-beta-dist/css/bootstrap.rtl.css").vendored?
# GitHub.com
assert sample_blob(".github/CODEOWNERS").vendored?
assert sample_blob(".github/workflows/test.yml").vendored?
# obsidian.md settings
assert sample_blob(".obsidian/app.json").vendored?
assert sample_blob(".obsidian/plugins/templater-obsidian/main.js").vendored?
# teamcity ci configuration
assert sample_blob(".teamcity/Project_Name_CI/Project.kt").vendored?
assert sample_blob(".teamcity/Project_Name_CI/settings.kts").vendored?
assert sample_blob(".teamcity/Project_Name_CI/patches/projects/3b71d400-c5d6-4628-8164-c50b1254cf1d.kts").vendored?
end
def test_documentation
assert_predicate fixture_blob("doc/foo.html"), :documentation?
assert_predicate fixture_blob("docs/foo.html"), :documentation?
refute_predicate fixture_blob("project/doc/foo.html"), :documentation?
refute_predicate fixture_blob("project/docs/foo.html"), :documentation?
assert_predicate fixture_blob("Documentation/foo.md"), :documentation?
assert_predicate fixture_blob("documentation/foo.md"), :documentation?
assert_predicate fixture_blob("project/Documentation/foo.md"), :documentation?
assert_predicate fixture_blob("project/documentation/foo.md"), :documentation?
assert_predicate fixture_blob("javadoc/foo.html"), :documentation?
assert_predicate fixture_blob("project/javadoc/foo.html"), :documentation?
assert_predicate fixture_blob("man/foo.html"), :documentation?
refute_predicate fixture_blob("project/man/foo.html"), :documentation?
assert_predicate fixture_blob("README"), :documentation?
assert_predicate fixture_blob("README.md"), :documentation?
assert_predicate fixture_blob("README.txt"), :documentation?
assert_predicate fixture_blob("Readme"), :documentation?
assert_predicate fixture_blob("readme"), :documentation?
assert_predicate fixture_blob("foo/README"), :documentation?
assert_predicate fixture_blob("CHANGE"), :documentation?
assert_predicate fixture_blob("CHANGE.md"), :documentation?
assert_predicate fixture_blob("CHANGE.txt"), :documentation?
assert_predicate fixture_blob("foo/CHANGE"), :documentation?
assert_predicate fixture_blob("CHANGELOG"), :documentation?
assert_predicate fixture_blob("CHANGELOG.md"), :documentation?
assert_predicate fixture_blob("CHANGELOG.txt"), :documentation?
assert_predicate fixture_blob("foo/CHANGELOG"), :documentation?
assert_predicate fixture_blob("CHANGES"), :documentation?
assert_predicate fixture_blob("CHANGES.md"), :documentation?
assert_predicate fixture_blob("CHANGES.txt"), :documentation?
assert_predicate fixture_blob("foo/CHANGES"), :documentation?
assert_predicate fixture_blob("CONTRIBUTING"), :documentation?
assert_predicate fixture_blob("CONTRIBUTING.md"), :documentation?
assert_predicate fixture_blob("CONTRIBUTING.txt"), :documentation?
assert_predicate fixture_blob("foo/CONTRIBUTING"), :documentation?
assert_predicate fixture_blob("examples/some-file.pl"), :documentation?
assert_predicate fixture_blob("Examples/some-example-file.rb"), :documentation?
assert_predicate fixture_blob("LICENSE"), :documentation?
assert_predicate fixture_blob("LICENCE.md"), :documentation?
assert_predicate fixture_blob("License.txt"), :documentation?
assert_predicate fixture_blob("LICENSE.txt"), :documentation?
assert_predicate fixture_blob("foo/LICENSE"), :documentation?
assert_predicate fixture_blob("COPYING"), :documentation?
assert_predicate fixture_blob("COPYING.md"), :documentation?
assert_predicate fixture_blob("COPYING.txt"), :documentation?
assert_predicate fixture_blob("foo/COPYING"), :documentation?
assert_predicate fixture_blob("INSTALL"), :documentation?
assert_predicate fixture_blob("INSTALL.md"), :documentation?
assert_predicate fixture_blob("INSTALL.txt"), :documentation?
assert_predicate fixture_blob("foo/INSTALL"), :documentation?
refute_predicate fixture_blob("foo.md"), :documentation?
# Samples
assert sample_blob("Samples/Ruby/foo.rb").documentation?
assert_predicate fixture_blob("INSTALL.txt"), :documentation?
end
def test_language
# Failures are reasonable in some cases, such as when a file is fully valid in more than one language.
allowed_failures = {
"#{samples_path}/C/rpc.h" => ["C", "C++"],
}
Samples.each do |sample|
blob = sample_blob(sample[:path])
assert blob.language, "No language for #{sample[:path]}"
fs_name = blob.language.fs_name ? blob.language.fs_name : blob.language.name
if allowed_failures.has_key? sample[:path]
assert allowed_failures[sample[:path]].include?(sample[:language]), blob.name
else
assert_equal sample[:language], fs_name, blob.name
end
end
# Test language detection for files which shouldn't be used as samples
root = File.expand_path('../fixtures', __FILE__)
Dir.entries(root).each do |language|
next if language == '.' || language == '..' || language == 'Binary' ||
File.basename(language) == 'ace_modes.json'
# Each directory contains test files of a language
dirname = File.join(root, language)
Dir.entries(dirname).each do |filename|
# By default blob search the file in the samples;
# thus, we need to give it the absolute path
filepath = File.join(dirname, filename)
next unless File.file?(filepath)
blob = fixture_blob(filepath)
if language == 'Data'
assert blob.language.nil?, "A language was found for #{filepath}"
elsif language == 'Generated'
assert blob.generated?, "#{filepath} is not a generated file"
elsif language == 'Generic'
assert !blob.language, "#{filepath} should not match a language"
else
fs_name = blob.language.fs_name ? blob.language.fs_name : blob.language.name
if allowed_failures.has_key? filepath
assert allowed_failures[filepath].include?(fs_name), filepath
else
assert blob.language, "No language for #{filepath}"
assert_equal language, fs_name, filepath
end
end
end
end
end
def test_minified_files_not_safe_to_highlight
assert !sample_blob("JavaScript/jquery-1.6.1.min.js").safe_to_colorize?
end
def test_empty
blob = Struct.new(:data) { include Linguist::BlobHelper }
assert blob.new("").empty?
assert blob.new(nil).empty?
refute blob.new(" ").empty?
refute blob.new("nope").empty?
end
def test_include_in_language_stats
vendored = sample_blob("bower_components/custom/custom.js")
assert_predicate vendored, :vendored?
refute_predicate vendored, :include_in_language_stats?
documentation = fixture_blob("README")
assert_predicate documentation, :documentation?
refute_predicate documentation, :include_in_language_stats?
generated = sample_blob("CSS/bootstrap.min.css")
assert_predicate generated, :generated?
refute_predicate generated, :include_in_language_stats?
data = sample_blob("Ant Build System/filenames/ant.xml")
assert_equal :data, data.language.type
refute_predicate data, :include_in_language_stats?
prose = sample_blob("Markdown/tender.md")
assert_equal :prose, prose.language.type
refute_predicate prose, :include_in_language_stats?
included = sample_blob("HTML/pages.html")
refute_predicate included, :include_in_language_stats?
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_grammars.rb | test/test_grammars.rb | require_relative "./helper"
class TestGrammars < Minitest::Test
ROOT = File.expand_path("../..", __FILE__)
def setup
@grammars = YAML.load(File.read(File.join(ROOT, "grammars.yml")))
end
def test_no_duplicate_scopes
scopes = @grammars.values.flatten
duplicates = scopes.group_by { |s| s }.select { |k, v| v.length > 1 }.map(&:first)
assert duplicates.empty?, "The following scopes appear in grammars.yml more than once:\n#{duplicates.sort.join("\n")}"
end
def test_submodules_are_in_sync
# Strip off paths inside the submodule so that just the submodule path remains.
listed_submodules = @grammars.keys.grep(/vendor\/grammars/).map { |source| source[%r{vendor/grammars/[^/]+}] }
nonexistent_submodules = listed_submodules - submodule_paths
unlisted_submodules = submodule_paths - listed_submodules
message = ""
unless nonexistent_submodules.empty?
message << "The following submodules are listed in grammars.yml but don't seem to exist in the repository.\n"
message << "Either add them using `git submodule add` or remove them from grammars.yml.\n"
message << nonexistent_submodules.sort.join("\n")
end
unless unlisted_submodules.empty?
message << "\n" unless message.empty?
message << "The following submodules exist in the repository but aren't listed in grammars.yml.\n"
message << "Either add them to grammars.yml or remove them from the repository using `git rm`.\n"
message << unlisted_submodules.sort.join("\n")
end
assert nonexistent_submodules.empty? && unlisted_submodules.empty?, message.sub(/\.\Z/, "")
end
def test_readme_file_is_in_sync
current_data = File.read("#{ROOT}/vendor/README.md").to_s.sub(/\A.+?<!--.+?-->\n/mu, "")
updated_data = `script/list-grammars --print`
assert_equal current_data, updated_data, "Grammar list is out-of-date. Run `script/list-grammars`"
end
def test_submodules_use_https_links
File.open(".gitmodules", "r") do |fh|
ssh_submodules = []
fh.each_line do |line|
if matches = line.match(/url = (git@.*)/)
submodule_link = matches.captures[0]
ssh_submodules.push(submodule_link)
end
end
msg = "The following submodules don't have an HTTPS link:\n* #{ssh_submodules.join("\n* ")}"
assert_equal [], ssh_submodules, msg
end
end
private
def submodule_paths
@submodule_paths ||= `git config --list --file "#{File.join(ROOT, ".gitmodules")}"`.lines.grep(/\.path=/).map { |line| line.chomp.split("=", 2).last }.reject { |path| path =~ /CodeMirror/ }
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_generated.rb | test/test_generated.rb | require_relative "./helper"
class TestGenerated < Minitest::Test
include Linguist
class DataLoadedError < StandardError; end
def error_message(blob, negate = false)
if negate
"#{blob} was incorrectly recognised as a generated file"
else
"#{blob} was not recognised as a generated file"
end
end
def generated_without_loading_data(blob, negate = false)
begin
expected = !negate
actual = Generated.generated?(blob, lambda { raise DataLoadedError.new })
assert(expected == actual, error_message(blob, negate))
rescue DataLoadedError
assert false, "Data was loaded when calling generated? on #{blob}"
end
end
def generated_loading_data(blob, negate = false)
assert_raises(DataLoadedError, "Data wasn't loaded when calling generated? on #{blob}") do
Generated.generated?(blob, lambda { raise DataLoadedError.new })
end
expected = !negate
actual = Generated.generated?(blob, lambda { File.read(blob) })
assert(expected == !!actual, error_message(blob, negate))
end
def generated_fixture_without_loading_data(name, negate = false)
generated_without_loading_data(File.join(fixtures_path, name), negate)
end
def generated_fixture_loading_data(name, negate = false)
generated_loading_data(File.join(fixtures_path, name), negate)
end
def generated_sample_without_loading_data(name, negate = false)
generated_without_loading_data(File.join(samples_path, name), negate)
end
def generated_sample_loading_data(name, negate = false)
generated_loading_data(File.join(samples_path, name), negate)
end
def test_check_generated
# Xcode project files
generated_sample_without_loading_data("Binary/MainMenu.nib")
generated_sample_without_loading_data("Dummy/foo.xcworkspacedata")
generated_sample_without_loading_data("Dummy/foo.xcuserstate")
# Cocoapods
generated_sample_without_loading_data("Pods/Pods.xcodeproj")
generated_sample_without_loading_data("Pods/SwiftDependency/foo.swift")
generated_sample_without_loading_data("Pods/ObjCDependency/foo.h")
generated_sample_without_loading_data("Pods/ObjCDependency/foo.m")
generated_sample_without_loading_data("Dummy/Pods/Pods.xcodeproj")
generated_sample_without_loading_data("Dummy/Pods/SwiftDependency/foo.swift")
generated_sample_without_loading_data("Dummy/Pods/ObjCDependency/foo.h")
generated_sample_without_loading_data("Dummy/Pods/ObjCDependency/foo.m")
# Carthage
generated_sample_without_loading_data("Carthage/Build/.Dependency.version")
generated_sample_without_loading_data("Carthage/Build/iOS/Dependency.framework")
generated_sample_without_loading_data("Carthage/Build/Mac/Dependency.framework")
generated_sample_without_loading_data("src/Carthage/Build/.Dependency.version")
generated_sample_without_loading_data("src/Carthage/Build/iOS/Dependency.framework")
generated_sample_without_loading_data("src/Carthage/Build/Mac/Dependency.framework")
# Go-specific vendored paths
generated_sample_without_loading_data("go/vendor/github.com/foo.go")
generated_sample_without_loading_data("go/vendor/golang.org/src/foo.c")
generated_sample_without_loading_data("go/vendor/gopkg.in/some/nested/path/foo.go")
# .NET designer file
generated_sample_without_loading_data("Dummy/foo.designer.cs")
generated_sample_without_loading_data("Dummy/foo.Designer.cs")
generated_sample_without_loading_data("Dummy/foo.designer.vb")
generated_sample_without_loading_data("Dummy/foo.Designer.vb")
# Composer generated composer.lock file
generated_sample_without_loading_data("JSON/composer.lock")
# Node modules
generated_sample_without_loading_data("Dummy/node_modules/foo.js")
# npm shrinkwrap file
generated_sample_without_loading_data("Dummy/npm-shrinkwrap.json")
generated_sample_without_loading_data("Dummy/package-lock.json")
# pnpm lockfile
generated_sample_without_loading_data("Dummy/pnpm-lock.yaml")
# Bun lockfile
generated_sample_without_loading_data("JSON/filenames/bun.lock")
# Yarn Plug'n'Play file
generated_sample_without_loading_data(".pnp.js")
generated_sample_without_loading_data(".pnp.cjs")
generated_sample_without_loading_data(".pnp.mjs")
generated_sample_without_loading_data(".pnp.loader.mjs")
# Godep saved dependencies
generated_sample_without_loading_data("Godeps/Godeps.json")
generated_sample_without_loading_data("Godeps/_workspace/src/github.com/kr/s3/sign.go")
# Generated by Zephir
generated_sample_without_loading_data("C/exception.zep.c")
generated_sample_without_loading_data("C/exception.zep.h")
generated_sample_without_loading_data("PHP/exception.zep.php")
# Minified files
generated_sample_loading_data("JavaScript/jquery-1.6.1.min.js")
# MySQL View Definition Format (INI)
generated_sample_loading_data("INI/metrics.frm")
# JavaScript with source-maps
generated_sample_loading_data("JavaScript/namespace.js")
generated_fixture_loading_data("Generated/inline.js")
# CSS with source-maps
generated_fixture_loading_data("Generated/linked.css")
generated_fixture_loading_data("Generated/inline.css")
# Source-map
generated_fixture_without_loading_data("Data/bootstrap.css.map")
generated_fixture_without_loading_data("Generated/linked.css.map")
generated_fixture_loading_data("Data/sourcemap.v3.map")
generated_fixture_loading_data("Data/sourcemap.v1.map")
# Specflow
generated_fixture_without_loading_data("Features/BindingCulture.feature.cs")
# JFlex
generated_sample_loading_data("Java/JFlexLexer.java")
# GrammarKit
generated_sample_loading_data("Java/GrammarKit.java")
# roxygen2
generated_sample_loading_data("R/import.Rd")
# PostScript
generated_sample_loading_data("PostScript/lambda.pfa")
# Perl ppport.h
generated_fixture_loading_data("Generated/ppport.h")
# Graphql Relay
generated_sample_without_loading_data("Javascript/__generated__/App_user.graphql.js")
# Game Maker Studio 2
generated_sample_loading_data("JSON/GMS2_Project.yyp")
generated_sample_loading_data("JSON/2ea73365-b6f1-4bd1-a454-d57a67e50684.yy")
generated_sample_loading_data("JSON/VCT.yy")
generated_fixture_loading_data("Generated/options_main.inherited.yy")
# Pipenv
generated_sample_without_loading_data("Dummy/Pipfile.lock")
# HTML
generated_fixture_loading_data("HTML/attr-swapped.html")
generated_fixture_loading_data("HTML/extra-attr.html")
generated_fixture_loading_data("HTML/extra-spaces.html")
generated_fixture_loading_data("HTML/extra-tags.html")
generated_fixture_loading_data("HTML/grohtml.html")
generated_fixture_loading_data("HTML/grohtml.xhtml")
generated_fixture_loading_data("HTML/makeinfo.html")
generated_fixture_loading_data("HTML/mandoc.html")
generated_fixture_loading_data("HTML/node78.html")
generated_fixture_loading_data("HTML/org-mode.html")
generated_fixture_loading_data("HTML/quotes-double.html")
generated_fixture_loading_data("HTML/quotes-none.html")
generated_fixture_loading_data("HTML/quotes-single.html")
generated_fixture_loading_data("HTML/uppercase.html")
generated_fixture_loading_data("HTML/ronn.html")
generated_fixture_loading_data("HTML/unknown.html", true)
generated_fixture_loading_data("HTML/no-content.html", true)
generated_sample_loading_data("HTML/pages.html")
# GIMP
generated_fixture_loading_data("C/image.c")
generated_fixture_loading_data("C/image.h")
# Haxe
generated_fixture_loading_data("Generated/Haxe/main.js")
generated_fixture_loading_data("Generated/Haxe/main.py")
generated_fixture_loading_data("Generated/Haxe/main.lua")
generated_fixture_loading_data("Generated/Haxe/Main.cpp")
generated_fixture_loading_data("Generated/Haxe/Main.h")
generated_fixture_loading_data("Generated/Haxe/Main.java")
generated_fixture_loading_data("Generated/Haxe/Main.cs")
generated_fixture_loading_data("Generated/Haxe/Main.php")
# Cargo
generated_sample_without_loading_data("TOML/filenames/Cargo.toml.orig")
# jOOQ
generated_sample_loading_data("Java/generated-jooq-table.java")
# Package.resolved
generated_sample_without_loading_data("JSON/filenames/Package.resolved")
# poetry
generated_sample_without_loading_data("TOML/filenames/poetry.lock")
# pdm
generated_sample_without_loading_data("TOML/filenames/pdm.lock")
# uv
generated_sample_without_loading_data("TOML/filenames/uv.lock")
# coverage.py `coverage html` output
generated_sample_without_loading_data("htmlcov/index.html")
generated_sample_without_loading_data("htmlcov/coverage_html.js")
generated_sample_without_loading_data("htmlcov/style.css")
generated_sample_without_loading_data("htmlcov/status.json")
generated_sample_without_loading_data("Dummy/htmlcov/index.html")
generated_sample_without_loading_data("Dummy/htmlcov/coverage_html.js")
generated_sample_without_loading_data("Dummy/htmlcov/style.css")
generated_sample_without_loading_data("Dummy/htmlcov/status.json")
# Dart
generated_sample_loading_data("Dart/point.dart", true)
generated_sample_loading_data("Dart/equals.dart", true)
generated_sample_loading_data("Dart/addressbook.pb.dart")
generated_sample_loading_data("Dart/addressbook.pbenum.dart")
generated_sample_loading_data("Dart/addressbook.pbjson.dart")
generated_sample_loading_data("Dart/equals.freezed.dart")
# Gradle Wrapper
generated_sample_without_loading_data("Shell/filenames/gradlew")
generated_sample_without_loading_data("Batchfile/filenames/gradlew.bat")
# Maven Wrapper
generated_sample_without_loading_data("Shell/filenames/mvnw")
generated_sample_without_loading_data("Batchfile/filenames/mvnw.cmd")
end
# We've whitelisted these files on purpose, even though they're machine-generated.
# Future contributors won't necessarily know that, so these checks are in-place to
# catch PRs that mark these files as generated (and prevent a forgetful maintainer
# from approving them).
def test_check_not_generated
# Jest snapshots (#3579)
generated_sample_without_loading_data("Jest Snapshot/css.test.tsx.snap", true)
# Yarn lockfiles (#4459)
generated_sample_without_loading_data("YAML/filenames/yarn.lock", true)
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/test_sha256.rb | test/test_sha256.rb | require_relative "./helper"
class TestSHA256 < Minitest::Test
include Linguist
def test_hexdigest_string
assert_equal "15020d93a6f635366cb20229cb3931c3651992dc6df85cddecc743fa11e48a66", SHA256.hexdigest("foo")
assert_equal "3da77c2b08c7d29fe3d936b7918039941c0881065dde07d0af9d280d2d475d00", SHA256.hexdigest("bar")
end
def test_hexdigest_symbol
assert_equal "dea6712e86478d2ee22a35a8c5ac9627e7cbc5ce2407a7da7c645fea2434fe9b", SHA256.hexdigest(:foo)
assert_equal "9e414b095a78ef4c2ae6f74dc6898f653c6590554ff36719d4803733a6d910e3", SHA256.hexdigest(:bar)
refute_equal SHA256.hexdigest("foo"), SHA256.hexdigest(:foo)
end
def test_hexdigest_integer
# Ruby 2.4.0 merged Bignum and Fixnum into Integer which means we get different digests
if Gem::Version.new(RUBY_VERSION) >= Gem::Version.new("2.4.0")
assert_equal "e2e98cd680460c0cecd76714d94e601be9b629ad16ae45251fa30366f62007bb", SHA256.hexdigest(1)
assert_equal "88159e413c4a97194ee0d8b082ebfd54ce3f9985b58270d3bf59a6e6186f8da6", SHA256.hexdigest(2)
else
assert_equal "d9af1c504ce3dc080973d6de94d44884482fcee3ff04fd430c5aba2e4d1ba2cd", SHA256.hexdigest(1)
assert_equal "1a4dfbfcf67bc80f3dbade7083386362d8f40db225cbd8e41e61199599f334c3", SHA256.hexdigest(2)
end
refute_equal SHA256.hexdigest("1"), SHA256.hexdigest(1)
end
def test_hexdigest_boolean
assert_equal "92de503a8b413365fc38050c7dd4bacf28b0f705e744dacebcaa89f2032dcd67", SHA256.hexdigest(true)
assert_equal "bdfd64a7c8febcc3b0b8fb05d60c8e2a4cb6b8c081fcba20db1c9778e9beaf89", SHA256.hexdigest(false)
refute_equal SHA256.hexdigest("true"), SHA256.hexdigest(true)
refute_equal SHA256.hexdigest("false"), SHA256.hexdigest(false)
end
def test_hexdigest_nil
assert_equal "9bda381dac87b1c16b04f996abb623f43f1cdb89ce8be7dda3f67319dc440bc5", SHA256.hexdigest(nil)
refute_equal SHA256.hexdigest("nil"), SHA256.hexdigest(nil)
end
def test_hexdigest_array
assert_equal "f0cf39d0be3efbb6f86ac2404100ff7e055c17ded946a06808d66f89ca03a811", SHA256.hexdigest([])
# Ruby 2.4.0 merged Bignum and Fixnum into Integer which means we get different digests
if Gem::Version.new(RUBY_VERSION) >= Gem::Version.new("2.4.0")
assert_equal "e0ca0f6a274c6cc8e07fce6cf58cd2feef978928cad1c0760b6c3b8a865eafaf", SHA256.hexdigest([1])
assert_equal "46b40ee5dcee74020fd75532659329e2a01b8cd74727218966c40cd1b4a54dab", SHA256.hexdigest([1, 2])
assert_equal "0c950e57182b6830a3f4da3388fa7e3e5ec879277847c20b941f5a0144ba53a7", SHA256.hexdigest([1, 2, 3])
assert_equal "0ef88eec0c680d3860c0001c6beb4407559d61db85744e088436a21d72425b4f", SHA256.hexdigest([1, 2, [3]])
else
assert_equal "87d177e40c167e5ce164081297c788c36540aa8f9cc05a0d1d3abfa577b56886", SHA256.hexdigest([1])
assert_equal "c8189462e4785c786b96c8d29ecfa7c030010700dd1dfb194b84b37c50981feb", SHA256.hexdigest([1, 2])
assert_equal "7edd426189105eede4954333ce40d96ff850ca86aa48e15b58cd017aab52c712", SHA256.hexdigest([1, 2, 3])
assert_equal "b0f08d52b5efd5c44dc47965733ff8baac34db05abf2620a1f71c0fc0a7e42d2", SHA256.hexdigest([1, 2, [3]])
end
end
def test_hexdigest_hash
assert_equal "a91069147f9bd9245cdacaef8ead4c3578ed44f179d7eb6bd4690e62ba4658f2", SHA256.hexdigest({})
# Ruby 2.4.0 merged Bignum and Fixnum into Integer which means we get different digests
if Gem::Version.new(RUBY_VERSION) >= Gem::Version.new("2.4.0")
assert_equal "124c84e8bf1fe4be4d2c3e5d79522972f390dbf6031dd8ba7029793f2fec0ef7", SHA256.hexdigest({:a => 1})
assert_equal "8a1c987468032f979d4a7d4eeb5049bfe0aa04acea54a4456888e2898dd1c532", SHA256.hexdigest({:b => 2})
else
assert_equal "7ae6ddefc4ec3fca1ca84794904144cdc99ca24444fcb44c05c0f269af8c8840", SHA256.hexdigest({:a => 1})
assert_equal "484ca3e0efd43e7d650189150751512a3c985957f4c15b52da0bb12d72b3dcaa", SHA256.hexdigest({:b => 2})
end
refute_equal SHA256.hexdigest([:b, 2]), SHA256.hexdigest({:b => 2})
assert_equal SHA256.hexdigest({:b => 2, :a => 1}), SHA256.hexdigest({:a => 1, :b => 2})
assert_equal SHA256.hexdigest({:c => 3, :b => 2, :a => 1}), SHA256.hexdigest({:a => 1, :b => 2, :c => 3})
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/test/fixtures/Ruby/foo.rb | test/fixtures/Ruby/foo.rb | # This file is used to test the Linguist language detection capabilities.
# It should be detected as Ruby based on its extension and content.
# The file is intentionally simple to ensure it does not contain complex logic.
# You can add more Ruby code here if needed for further testing.
# The purpose of this file is to serve as a fixture for testing the Linguist library.
# It should not be executed in a production environment.
# Ensure that this file is saved with the .rb extension to be recognized as Ruby code.
puts "This is a sample Ruby file for testing purposes."
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/ext/linguist/extconf.rb | ext/linguist/extconf.rb | require 'mkmf'
dir_config('linguist')
create_makefile('linguist/linguist')
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist.rb | lib/linguist.rb | require 'linguist/blob_helper'
require 'linguist/generated'
require 'linguist/grammars'
require 'linguist/heuristics'
require 'linguist/language'
require 'linguist/repository'
require 'linguist/samples'
require 'linguist/shebang'
require 'linguist/version'
require 'linguist/strategy/manpage'
require 'linguist/strategy/xml'
require 'linguist/instrumenter'
class << Linguist
# Public: Detects the Language of the blob.
#
# blob - an object that includes the Linguist `BlobHelper` interface;
# see Linguist::LazyBlob and Linguist::FileBlob for examples
#
# Returns Language or nil.
def detect(blob, allow_empty: false)
# Bail early if the blob is binary or empty.
return nil if blob.likely_binary? || blob.binary? || (!allow_empty && blob.empty?)
Linguist.instrument("linguist.detection", :blob => blob) do
# Call each strategy until one candidate is returned.
languages = []
returning_strategy = nil
STRATEGIES.each do |strategy|
returning_strategy = strategy
candidates = Linguist.instrument("linguist.strategy", :blob => blob, :strategy => strategy, :candidates => languages) do
strategy.call(blob, languages)
end
if candidates.size == 1
languages = candidates
break
elsif candidates.size > 1
# More than one candidate was found, pass them to the next strategy.
languages = candidates
else
# No candidates, try the next strategy
end
end
Linguist.instrument("linguist.detected", :blob => blob, :strategy => returning_strategy, :language => languages.first)
languages.first
end
end
# Internal: The strategies used to detect the language of a file.
#
# A strategy is an object that has a `.call` method that takes two arguments:
#
# blob - An object that quacks like a blob.
# languages - An Array of candidate Language objects that were returned by the
# previous strategy.
#
# A strategy should return an Array of Language candidates.
#
# Strategies are called in turn until a single Language is returned.
STRATEGIES = [
Linguist::Strategy::Modeline,
Linguist::Strategy::Filename,
Linguist::Shebang,
Linguist::Strategy::Extension,
Linguist::Strategy::XML,
Linguist::Strategy::Manpage,
Linguist::Heuristics,
Linguist::Classifier
]
# Public: Set an instrumenter.
#
# class CustomInstrumenter
# def instrument(name, payload = {})
# warn "Instrumenting #{name}: #{payload[:blob]}"
# end
# end
#
# Linguist.instrumenter = CustomInstrumenter.new
#
# The instrumenter must conform to the `ActiveSupport::Notifications`
# interface, which defines `#instrument` and accepts:
#
# name - the String name of the event (e.g. "linguist.detected")
# payload - a Hash of the exception context.
attr_accessor :instrumenter
# Internal: Perform instrumentation on a block
#
# Linguist.instrument("linguist.dosomething", :blob => blob) do
# # logic to instrument here.
# end
#
def instrument(*args, &bk)
if instrumenter
instrumenter.instrument(*args, &bk)
elsif block_given?
yield
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/language.rb | lib/linguist/language.rb | require 'cgi'
require 'yaml'
begin
require 'yajl'
rescue LoadError
require 'json'
end
require 'linguist/classifier'
require 'linguist/heuristics'
require 'linguist/samples'
require 'linguist/file_blob'
require 'linguist/blob_helper'
require 'linguist/strategy/filename'
require 'linguist/strategy/extension'
require 'linguist/strategy/modeline'
require 'linguist/shebang'
module Linguist
# Language names that are recognizable by GitHub. Defined languages
# can be highlighted, searched and listed under the Top Languages page.
#
# Languages are defined in `lib/linguist/languages.yml`.
class Language
@languages = []
@index = {}
@name_index = {}
@alias_index = {}
@language_id_index = {}
@extension_index = Hash.new { |h,k| h[k] = [] }
@interpreter_index = Hash.new { |h,k| h[k] = [] }
@filename_index = Hash.new { |h,k| h[k] = [] }
# Detect languages by a specific type
#
# type - A symbol that exists within TYPES
#
# Returns an array
def self.by_type(type)
all.select { |h| h.type == type }
end
# Internal: Create a new Language object
#
# attributes - A hash of attributes
#
# Returns a Language object
def self.create(attributes = {})
language = new(attributes)
@languages << language
# All Language names should be unique. Raise if there is a duplicate.
if @name_index.key?(language.name)
raise ArgumentError, "Duplicate language name: #{language.name}"
end
# Language name index
@index[language.name.downcase] = @name_index[language.name.downcase] = language
language.aliases.each do |name|
# All Language aliases should be unique. Raise if there is a duplicate.
if @alias_index.key?(name)
raise ArgumentError, "Duplicate alias: #{name}"
end
@index[name.downcase] = @alias_index[name.downcase] = language
end
language.extensions.each do |extension|
if extension !~ /^\./
raise ArgumentError, "Extension is missing a '.': #{extension.inspect}"
end
@extension_index[extension.downcase] << language
end
language.interpreters.each do |interpreter|
@interpreter_index[interpreter] << language
end
language.filenames.each do |filename|
@filename_index[filename] << language
end
@language_id_index[language.language_id] = language
language
end
# Public: Get all Languages
#
# Returns an Array of Languages
def self.all
@languages
end
# Public: Look up Language by its proper name.
#
# name - The String name of the Language
#
# Examples
#
# Language.find_by_name('Ruby')
# # => #<Language name="Ruby">
#
# Returns the Language or nil if none was found.
def self.find_by_name(name)
return nil if !name.is_a?(String) || name.to_s.empty?
name && (@name_index[name.downcase] || @name_index[name.split(',', 2).first.downcase])
end
# Public: Look up Language by one of its aliases.
#
# name - A String alias of the Language
#
# Examples
#
# Language.find_by_alias('cpp')
# # => #<Language name="C++">
#
# Returns the Language or nil if none was found.
def self.find_by_alias(name)
return nil if !name.is_a?(String) || name.to_s.empty?
name && (@alias_index[name.downcase] || @alias_index[name.split(',', 2).first.downcase])
end
# Public: Look up Languages by filename.
#
# The behaviour of this method recently changed.
# See the second example below.
#
# filename - The path String.
#
# Examples
#
# Language.find_by_filename('Cakefile')
# # => [#<Language name="CoffeeScript">]
# Language.find_by_filename('foo.rb')
# # => []
#
# Returns all matching Languages or [] if none were found.
def self.find_by_filename(filename)
basename = File.basename(filename)
@filename_index[basename]
end
# Public: Look up Languages by file extension.
#
# The behaviour of this method recently changed.
# See the second example below.
#
# filename - The path String.
#
# Examples
#
# Language.find_by_extension('dummy.rb')
# # => [#<Language name="Ruby">]
# Language.find_by_extension('rb')
# # => []
#
# Returns all matching Languages or [] if none were found.
def self.find_by_extension(filename)
# find the first extension with language definitions
extname = FileBlob.new(filename.downcase).extensions.detect do |e|
!@extension_index[e].empty?
end
@extension_index[extname]
end
# Public: Look up Languages by interpreter.
#
# interpreter - String of interpreter name
#
# Examples
#
# Language.find_by_interpreter("bash")
# # => [#<Language name="Bash">]
#
# Returns the matching Language
def self.find_by_interpreter(interpreter)
@interpreter_index[interpreter]
end
# Public: Look up Languages by its language_id.
#
# language_id - Integer of language_id
#
# Examples
#
# Language.find_by_id(100)
# # => [#<Language name="Elixir">]
#
# Returns the matching Language
def self.find_by_id(language_id)
@language_id_index[language_id.to_i]
end
# Public: Look up Language by its name.
#
# name - The String name of the Language
#
# Examples
#
# Language['Ruby']
# # => #<Language name="Ruby">
#
# Language['ruby']
# # => #<Language name="Ruby">
#
# Returns the Language or nil if none was found.
def self.[](name)
return nil if !name.is_a?(String) || name.to_s.empty?
lang = @index[name.downcase]
return lang if lang
@index[name.split(',', 2).first.downcase]
end
# Public: A List of popular languages
#
# Popular languages are sorted to the top of language chooser
# dropdowns.
#
# This list is configured in "popular.yml".
#
# Returns an Array of Languages.
def self.popular
@popular ||= all.select(&:popular?).sort_by { |lang| lang.name.downcase }
end
# Public: A List of non-popular languages
#
# Unpopular languages appear below popular ones in language
# chooser dropdowns.
#
# This list is created from all the languages not listed in "popular.yml".
#
# Returns an Array of Languages.
def self.unpopular
@unpopular ||= all.select(&:unpopular?).sort_by { |lang| lang.name.downcase }
end
# Public: A List of languages with assigned colors.
#
# Returns an Array of Languages.
def self.colors
@colors ||= all.select(&:color).sort_by { |lang| lang.name.downcase }
end
# Internal: Initialize a new Language
#
# attributes - A hash of attributes
def initialize(attributes = {})
# @name is required
@name = attributes[:name] || raise(ArgumentError, "missing name")
@fs_name = attributes[:fs_name]
# Set type
@type = attributes[:type] ? attributes[:type].to_sym : nil
if @type && !get_types.include?(@type)
raise ArgumentError, "invalid type: #{@type}"
end
@color = attributes[:color]
# Set aliases
@aliases = [default_alias] + (attributes[:aliases] || [])
@tm_scope = attributes[:tm_scope] || 'none'
@ace_mode = attributes[:ace_mode]
@codemirror_mode = attributes[:codemirror_mode]
@codemirror_mime_type = attributes[:codemirror_mime_type]
@wrap = attributes[:wrap] || false
# Set the language_id
@language_id = attributes[:language_id]
# Set extensions or default to [].
@extensions = attributes[:extensions] || []
@interpreters = attributes[:interpreters] || []
@filenames = attributes[:filenames] || []
# Set popular flag
@popular = attributes.key?(:popular) ? attributes[:popular] : false
# If group name is set, save the name so we can lazy load it later
if attributes[:group_name]
@group_name = attributes[:group_name]
# Otherwise we can set it to self now
else
@group_name = self.name
end
end
def get_types
# Valid Languages types
@types = [:data, :markup, :programming, :prose]
end
# Public: Get proper name
#
# Examples
#
# # => "Ruby"
# # => "Python"
# # => "Perl"
#
# Returns the name String
attr_reader :name
# Public:
#
attr_reader :fs_name
# Public: Get type.
#
# Returns a type Symbol or nil.
attr_reader :type
# Public: Get color.
#
# Returns a hex color String.
attr_reader :color
# Public: Get aliases
#
# Examples
#
# Language['C++'].aliases
# # => ["cpp"]
#
# Returns an Array of String names
attr_reader :aliases
# Public: Get language_id (used in GitHub search)
#
# Examples
#
# # => "1"
# # => "2"
# # => "3"
#
# Returns the integer language_id
attr_reader :language_id
# Public: Get the name of a TextMate-compatible scope
#
# Returns the scope
attr_reader :tm_scope
# Public: Get Ace mode
#
# Examples
#
# # => "text"
# # => "javascript"
# # => "c_cpp"
#
# Returns a String name or nil
attr_reader :ace_mode
# Public: Get CodeMirror mode
#
# Maps to a directory in the `mode/` source code.
# https://github.com/codemirror/CodeMirror/tree/master/mode
#
# Examples
#
# # => "nil"
# # => "javascript"
# # => "clike"
#
# Returns a String name or nil
attr_reader :codemirror_mode
# Public: Get CodeMirror MIME type mode
#
# Examples
#
# # => "nil"
# # => "text/x-javascript"
# # => "text/x-csrc"
#
# Returns a String name or nil
attr_reader :codemirror_mime_type
# Public: Should language lines be wrapped
#
# Returns true or false
attr_reader :wrap
# Public: Get extensions
#
# Examples
#
# # => ['.rb', '.rake', ...]
#
# Returns the extensions Array
attr_reader :extensions
# Public: Get interpreters
#
# Examples
#
# # => ['awk', 'gawk', 'mawk' ...]
#
# Returns the interpreters Array
attr_reader :interpreters
# Public: Get filenames
#
# Examples
#
# # => ['Rakefile', ...]
#
# Returns the extensions Array
attr_reader :filenames
# Public: Get URL escaped name.
#
# Examples
#
# "C%23"
# "C%2B%2B"
# "Common%20Lisp"
#
# Returns the escaped String.
def escaped_name
CGI.escape(name).gsub('+', '%20')
end
# Public: Get default alias name
#
# Returns the alias name String
def default_alias
name.downcase.gsub(/\s/, '-')
end
alias_method :default_alias_name, :default_alias
# Public: Get Language group
#
# Returns a Language
def group
@group ||= Language.find_by_name(@group_name)
end
# Public: Is it popular?
#
# Returns true or false
def popular?
@popular
end
# Public: Is it not popular?
#
# Returns true or false
def unpopular?
!popular?
end
# Public: Return name as String representation
def to_s
name
end
def ==(other)
eql?(other)
end
def eql?(other)
equal?(other)
end
def hash
name.hash
end
def inspect
"#<#{self.class} name=#{name}>"
end
end
samples = Samples.load_samples
extensions = samples['extnames']
interpreters = samples['interpreters']
popular = YAML.load_file(File.expand_path("../popular.yml", __FILE__))
languages_yml = File.expand_path("../languages.yml", __FILE__)
languages_json = File.expand_path("../languages.json", __FILE__)
if File.exist?(languages_json)
serializer = defined?(Yajl) ? Yajl : JSON
languages = serializer.load(File.read(languages_json))
else
languages = YAML.load_file(languages_yml)
end
languages.each do |name, options|
options['extensions'] ||= []
options['interpreters'] ||= []
options['filenames'] ||= []
if extnames = extensions[name]
extnames.each do |extname|
if !options['extensions'].index { |x| x.downcase.end_with? extname.downcase }
warn "#{name} has a sample with extension (#{extname.downcase}) that isn't explicitly defined in languages.yml"
options['extensions'] << extname
end
end
end
interpreters ||= {}
if interpreter_names = interpreters[name]
interpreter_names.each do |interpreter|
if !options['interpreters'].include?(interpreter)
options['interpreters'] << interpreter
end
end
end
Language.create(
:name => name,
:fs_name => options['fs_name'],
:color => options['color'],
:type => options['type'],
:aliases => options['aliases'],
:tm_scope => options['tm_scope'],
:ace_mode => options['ace_mode'],
:codemirror_mode => options['codemirror_mode'],
:codemirror_mime_type => options['codemirror_mime_type'],
:wrap => options['wrap'],
:group_name => options['group'],
:language_id => options['language_id'],
:extensions => Array(options['extensions']),
:interpreters => options['interpreters'].sort,
:filenames => options['filenames'],
:popular => popular.include?(name)
)
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/samples.rb | lib/linguist/samples.rb | begin
require 'yajl'
rescue LoadError
require 'json'
end
require 'linguist/sha256'
require 'linguist/classifier'
require 'linguist/shebang'
module Linguist
# Model for accessing classifier training data.
module Samples
# Path to samples root directory
ROOT = File.expand_path("../../../samples", __FILE__)
# Path for serialized samples db
PATH = File.expand_path('../samples.json', __FILE__)
# Hash of serialized samples object, cached in memory
def self.cache
@cache ||= load_samples
end
# Hash of serialized samples object, uncached
def self.load_samples
serializer = defined?(Yajl) ? Yajl : JSON
data = serializer.load(File.read(PATH, encoding: 'utf-8'))
# JSON serialization does not allow integer keys, we fix them here
for lang in data['centroids'].keys
fixed = data['centroids'][lang].to_a.map { |k,v| [k.to_i, v] }
data['centroids'][lang] = Hash[fixed]
end
data
end
# Public: Iterate over each sample.
#
# &block - Yields Sample to block
#
# Returns nothing.
def self.each(&block)
Dir.entries(ROOT).sort!.each do |category|
next if category == '.' || category == '..'
dirname = File.join(ROOT, category)
Dir.entries(dirname).each do |filename|
next if filename == '.' || filename == '..'
if filename == 'filenames'
Dir.entries(File.join(dirname, filename)).each do |subfilename|
next if subfilename == '.' || subfilename == '..'
yield({
:path => File.join(dirname, filename, subfilename),
:language => category,
:filename => subfilename
})
end
else
path = File.join(dirname, filename)
extname = File.extname(filename)
yield({
:path => path,
:language => category,
:interpreter => Shebang.interpreter(File.read(path)),
:extname => extname.empty? ? nil : extname
})
end
end
end
nil
end
# Public: Build Classifier from all samples.
#
# Returns trained Classifier.
def self.data
db = {}
db['extnames'] = {}
db['interpreters'] = {}
db['filenames'] = {}
each do |sample|
language_name = sample[:language]
if sample[:extname]
db['extnames'][language_name] ||= []
if !db['extnames'][language_name].include?(sample[:extname])
db['extnames'][language_name] << sample[:extname]
db['extnames'][language_name].sort!
end
end
if sample[:interpreter]
db['interpreters'][language_name] ||= []
if !db['interpreters'][language_name].include?(sample[:interpreter])
db['interpreters'][language_name] << sample[:interpreter]
db['interpreters'][language_name].sort!
end
end
if sample[:filename]
db['filenames'][language_name] ||= []
db['filenames'][language_name] << sample[:filename]
db['filenames'][language_name].sort!
end
data = File.read(sample[:path])
Classifier.train!(db, language_name, data)
end
Classifier.finalize_train! db
db['sha256'] = Linguist::SHA256.hexdigest(db)
db
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/repository.rb | lib/linguist/repository.rb | require 'linguist/lazy_blob'
require 'linguist/source/repository'
require 'linguist/source/rugged'
module Linguist
# A Repository is an abstraction of a Grit::Repo or a basic file
# system tree. It holds a list of paths pointing to Blobish objects.
#
# Its primary purpose is for gathering language statistics across
# the entire project.
class Repository
attr_reader :repository
MAX_TREE_SIZE = 100_000
# Public: Create a new Repository based on the stats of
# an existing one
def self.incremental(repo, commit_oid, old_commit_oid, old_stats, max_tree_size = MAX_TREE_SIZE)
repo = self.new(repo, commit_oid, max_tree_size)
repo.load_existing_stats(old_commit_oid, old_stats)
repo
end
# Public: Initialize a new Repository to be analyzed for language
# data
#
# repo - a Linguist::Source::Repository object
# commit_oid - the sha1 of the commit that will be analyzed;
# this is usually the master branch
# max_tree_size - the maximum tree size to consider for analysis (default: MAX_TREE_SIZE)
#
# Returns a Repository
def initialize(repo, commit_oid, max_tree_size = MAX_TREE_SIZE)
@repository = if repo.is_a? Linguist::Source::Repository
repo
else
# Allow this for backward-compatibility purposes
Linguist::Source::RuggedRepository.new(repo)
end
@commit_oid = commit_oid
@max_tree_size = max_tree_size
@old_commit_oid = nil
@old_stats = nil
raise TypeError, 'commit_oid must be a commit SHA1' unless commit_oid.is_a?(String)
end
# Public: Load the results of a previous analysis on this repository
# to speed up the new scan.
#
# The new analysis will be performed incrementally as to only take
# into account the file changes since the last time the repository
# was scanned
#
# old_commit_oid - the sha1 of the commit that was previously analyzed
# old_stats - the result of the previous analysis, obtained by calling
# Repository#cache on the old repository
#
# Returns nothing
def load_existing_stats(old_commit_oid, old_stats)
@old_commit_oid = old_commit_oid
@old_stats = old_stats
nil
end
# Public: Returns a breakdown of language stats.
#
# Examples
#
# # => { 'Ruby' => 46319,
# 'JavaScript' => 258 }
#
# Returns a Hash of language names and Integer size values.
def languages
@sizes ||= begin
sizes = Hash.new { 0 }
cache.each do |_, (language, size)|
sizes[language] += size
end
sizes
end
end
# Public: Get primary Language of repository.
#
# Returns a language name
def language
@language ||= begin
primary = languages.max_by { |(_, size)| size }
primary && primary[0]
end
end
# Public: Get the total size of the repository.
#
# Returns a byte size Integer
def size
@size ||= languages.inject(0) { |s,(_,v)| s + v }
end
# Public: Return the language breakdown of this repository by file
#
# Returns a map of language names => [filenames...]
def breakdown_by_file
@file_breakdown ||= begin
breakdown = Hash.new { |h,k| h[k] = Array.new }
cache.each do |filename, (language, _)|
breakdown[language] << filename.dup.force_encoding("UTF-8").scrub
end
breakdown
end
end
# Public: Return the cached results of the analysis
#
# This is a per-file breakdown that can be passed to other instances
# of Linguist::Repository to perform incremental scans
#
# Returns a map of filename => [language, size]
def cache
@cache ||= begin
if @old_commit_oid == @commit_oid
@old_stats
else
compute_stats(@old_commit_oid, @old_stats)
end
end
end
def read_index
raise NotImplementedError, "read_index is deprecated" unless repository.is_a? Linguist::Source::RuggedRepository
repository.set_attribute_source(@commit_oid)
end
def current_tree
raise NotImplementedError, "current_tree is deprecated" unless repository.is_a? Linguist::Source::RuggedRepository
repository.get_tree(@commit_oid)
end
protected
def compute_stats(old_commit_oid, cache = nil)
return {} if repository.get_tree_size(@commit_oid, @max_tree_size) >= @max_tree_size
repository.set_attribute_source(@commit_oid)
diff = repository.diff(old_commit_oid, @commit_oid)
# Clear file map and fetch full diff if any .gitattributes files are changed
if cache && diff.each_delta.any? { |delta| File.basename(delta.new_file[:path]) == ".gitattributes" }
diff = repository.diff(nil, @commit_oid)
file_map = {}
else
file_map = cache ? cache.dup : {}
end
diff.each_delta do |delta|
old = delta.old_file[:path]
new = delta.new_file[:path]
file_map.delete(old)
next if delta.binary?
if [:added, :modified].include? delta.status
# Skip submodules and symlinks
mode = delta.new_file[:mode]
mode_format = (mode & 0170000)
next if mode_format == 0120000 || mode_format == 040000 || mode_format == 0160000
blob = Linguist::LazyBlob.new(repository, delta.new_file[:oid], new, mode.to_s(8))
update_file_map(blob, file_map, new)
blob.cleanup!
end
end
file_map
end
def update_file_map(blob, file_map, key)
if blob.include_in_language_stats?
file_map[key] = [blob.language.group.name, blob.size]
end
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/version.rb | lib/linguist/version.rb | module Linguist
VERSION = File.read(File.expand_path("../VERSION", __FILE__)).strip
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/lazy_blob.rb | lib/linguist/lazy_blob.rb | require 'linguist/blob_helper'
require 'linguist/language'
require 'linguist/source/repository'
require 'linguist/source/rugged'
module Linguist
class LazyBlob
GIT_ATTR = ['linguist-documentation',
'linguist-language',
'linguist-vendored',
'linguist-generated',
'linguist-detectable']
# DEPRECATED: use Linguist::Source::RuggedRepository::GIT_ATTR_OPTS instead
GIT_ATTR_OPTS = Linguist::Source::RuggedRepository::GIT_ATTR_OPTS
# DEPRECATED: use Linguist::Source::RuggedRepository::GIT_ATTR_FLAGS instead
GIT_ATTR_FLAGS = Linguist::Source::RuggedRepository::GIT_ATTR_FLAGS
include BlobHelper
MAX_SIZE = 128 * 1024
attr_reader :repository
attr_reader :oid
attr_reader :path
attr_reader :mode
alias :name :path
def initialize(repo, oid, path, mode = nil)
@repository = if repo.is_a? Linguist::Source::Repository
repo
else
# Allow this for backward-compatibility purposes
Linguist::Source::RuggedRepository.new(repo)
end
@oid = oid
@path = path
@mode = mode
@data = nil
end
def git_attributes
@git_attributes ||= repository.load_attributes_for_path(name, GIT_ATTR)
end
def documentation?
if not git_attributes['linguist-documentation'].nil?
boolean_attribute(git_attributes['linguist-documentation'])
else
super
end
end
def generated?
if not git_attributes['linguist-generated'].nil?
boolean_attribute(git_attributes['linguist-generated'])
else
super
end
end
def vendored?
if not git_attributes['linguist-vendored'].nil?
boolean_attribute(git_attributes['linguist-vendored'])
else
super
end
end
def language
return @language if defined?(@language)
@language = if lang = git_attributes['linguist-language']
Language.find_by_alias(lang)
else
super
end
end
def detectable?
if not git_attributes['linguist-detectable'].nil?
boolean_attribute(git_attributes['linguist-detectable'])
else
nil
end
end
def data
load_blob!
@data
end
def size
load_blob!
@size
end
def symlink?
# We don't create LazyBlobs for symlinks.
false
end
def cleanup!
@data.clear if @data
end
protected
# Returns true if the attribute is present and not the string "false" and not the false boolean.
def boolean_attribute(attribute)
attribute != "false" && attribute != false
end
def load_blob!
@data, @size = repository.load_blob(oid, MAX_SIZE) if @data.nil?
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/shebang.rb | lib/linguist/shebang.rb | # frozen_string_literal: true
module Linguist
class Shebang
# Public: Use shebang to detect language of the blob.
#
# blob - An object that quacks like a blob.
# candidates - A list of candidate languages.
#
# Examples
#
# Shebang.call(FileBlob.new("path/to/file"))
#
# Returns an array of languages from the candidate list for which the
# blob's shebang is valid. Returns an empty list if there is no shebang.
# If the candidate list is empty, any language is a valid candidate.
def self.call(blob, candidates)
return [] if blob.symlink?
languages = Language.find_by_interpreter interpreter(blob.data)
candidates.any? ? candidates & languages : languages
end
# Public: Get the interpreter from the shebang
#
# Returns a String or nil
def self.interpreter(data)
# First line must start with #!
return unless data.start_with?("#!")
shebang = data[0, data.index($/) || data.length]
s = StringScanner.new(shebang)
# There was nothing after the #!
return unless path = s.scan(/^#!\s*\S+/)
# Keep going
script = path.split('/').last
# if /usr/bin/env type shebang then walk the string
if script == 'env'
s.scan(/\s+/)
while s.scan(/((-[i0uCSv]*|--\S+)\s+)+/) || # skip over optional arguments e.g. -vS
s.scan(/(\S+=\S+\s+)+/) # skip over variable arguments e.g. foo=bar
# do nothing
end
script = s.scan(/\S+/)
end
# Interpreter was /usr/bin/env with no arguments
return unless script
# "python2.6" -> "python2"
script.sub!(/(\.\d+)$/, '')
# #! perl -> perl
script.sub!(/^#!\s*/, '')
# Check for multiline shebang hacks that call `exec`
if script == 'sh' &&
data.lines.first(5).any? { |l| l.match(/exec (\w+)[\s"']+\$0[\s"']+\$@/) }
script = $1
end
# osascript can be called with an optional `-l <language>` argument, which may not be a language with an interpreter.
# In this case, return and rely on the subsequent strategies to determine the language.
if script == 'osascript'
return if s.scan_until(/\-l\s?/)
end
File.basename(script)
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/tokenizer.rb | lib/linguist/tokenizer.rb | require 'strscan'
require 'linguist/linguist'
module Linguist
# Generic programming language tokenizer.
#
# Tokens are designed for use in the language bayes classifier.
# It strips any data strings or comments and preserves significant
# language symbols.
class Tokenizer
# Public: Extract tokens from data
#
# data - String to tokenize
#
# Returns Array of token Strings.
def self.tokenize(data)
new.extract_tokens(data)
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/blob_helper.rb | lib/linguist/blob_helper.rb | require 'linguist/generated'
require 'cgi'
require 'charlock_holmes'
require 'mini_mime'
require 'yaml'
module Linguist
# DEPRECATED Avoid mixing into Blob classes. Prefer functional interfaces
# like `Linguist.detect` over `Blob#language`. Functions are much easier to
# cache and compose.
#
# Avoid adding additional bloat to this module.
#
# BlobHelper is a mixin for Blobish classes that respond to "name",
# "data" and "size" such as Grit::Blob.
module BlobHelper
# Public: Get the extname of the path
#
# Examples
#
# blob(name='foo.rb').extname
# # => '.rb'
#
# Returns a String
def extname
File.extname(name.to_s)
end
# Internal: Lookup mime type for filename.
#
# Returns a MIME::Type
def _mime_type
if defined? @_mime_type
@_mime_type
else
@_mime_type = MiniMime.lookup_by_filename(name.to_s)
end
end
# Public: Get the actual blob mime type
#
# Examples
#
# # => 'text/plain'
# # => 'text/html'
#
# Returns a mime type String.
def mime_type
_mime_type ? _mime_type.content_type : 'text/plain'
end
# Internal: Is the blob binary according to its mime type
#
# Return true or false
def binary_mime_type?
_mime_type ? _mime_type.binary? : false
end
# Internal: Is the blob binary according to its mime type,
# overriding it if we have better data from the languages.yml
# database.
#
# Return true or false
def likely_binary?
binary_mime_type? && !Language.find_by_filename(name)
end
# Public: Get the Content-Type header value
#
# This value is used when serving raw blobs.
#
# Examples
#
# # => 'text/plain; charset=utf-8'
# # => 'application/octet-stream'
#
# Returns a content type String.
def content_type
@content_type ||= (binary_mime_type? || binary?) ? mime_type :
(encoding ? "text/plain; charset=#{encoding.downcase}" : "text/plain")
end
# Public: Get the Content-Disposition header value
#
# This value is used when serving raw blobs.
#
# # => "attachment; filename=file.tar"
# # => "inline"
#
# Returns a content disposition String.
def disposition
if text? || image?
'inline'
elsif name.nil?
"attachment"
else
"attachment; filename=#{CGI.escape(name)}"
end
end
def encoding
if hash = detect_encoding
hash[:encoding]
end
end
def ruby_encoding
if hash = detect_encoding
hash[:ruby_encoding]
end
end
# Try to guess the encoding
#
# Returns: a Hash, with :encoding, :confidence, :type
# this will return nil if an error occurred during detection or
# no valid encoding could be found
def detect_encoding
@detect_encoding ||= CharlockHolmes::EncodingDetector.new.detect(data) if data
end
# Public: Is the blob binary?
#
# Return true or false
def binary?
# Large blobs aren't even loaded into memory
if data.nil?
true
# Treat blank files as text
elsif data == ""
false
# Charlock doesn't know what to think
elsif encoding.nil?
true
# If Charlock says its binary
else
detect_encoding[:type] == :binary
end
end
# Public: Is the blob empty?
#
# Return true or false
def empty?
data.nil? || data == ""
end
# Public: Is the blob text?
#
# Return true or false
def text?
!binary?
end
# Public: Is the blob a supported image format?
#
# Return true or false
def image?
['.png', '.jpg', '.jpeg', '.gif'].include?(extname.downcase)
end
# Public: Is the blob a supported 3D model format?
#
# Return true or false
def solid?
extname.downcase == '.stl'
end
# Public: Is this blob a CSV file?
#
# Return true or false
def csv?
text? && extname.downcase == '.csv'
end
# Public: Is the blob a PDF?
#
# Return true or false
def pdf?
extname.downcase == '.pdf'
end
MEGABYTE = 1024 * 1024
# Public: Is the blob too big to load?
#
# Return true or false
def large?
size.to_i > MEGABYTE
end
# Public: Is the blob safe to colorize?
#
# Return true or false
def safe_to_colorize?
!large? && text? && !high_ratio_of_long_lines?
end
# Internal: Does the blob have a ratio of long lines?
#
# Return true or false
def high_ratio_of_long_lines?
return false if loc == 0
size / loc > 5000
end
# Public: Is the blob viewable?
#
# Non-viewable blobs will just show a "View Raw" link
#
# Return true or false
def viewable?
!large? && text?
end
vendored_paths = YAML.load_file(File.expand_path("../vendor.yml", __FILE__))
VendoredRegexp = Regexp.new(vendored_paths.join('|'))
# Public: Is the blob in a vendored directory?
#
# Vendored files are ignored by language statistics.
#
# See "vendor.yml" for a list of vendored conventions that match
# this pattern.
#
# Return true or false
def vendored?
path =~ VendoredRegexp ? true : false
end
documentation_paths = YAML.load_file(File.expand_path("../documentation.yml", __FILE__))
DocumentationRegexp = Regexp.new(documentation_paths.join('|'))
# Public: Is the blob in a documentation directory?
#
# Documentation files are ignored by language statistics.
#
# See "documentation.yml" for a list of documentation conventions that match
# this pattern.
#
# Return true or false
def documentation?
path =~ DocumentationRegexp ? true : false
end
# Public: Get each line of data
#
# Requires Blob#data
#
# Returns an Array of lines
def lines
@lines ||=
if viewable? && data
# `data` is usually encoded as ASCII-8BIT even when the content has
# been detected as a different encoding. However, we are not allowed
# to change the encoding of `data` because we've made the implicit
# guarantee that each entry in `lines` is encoded the same way as
# `data`.
#
# Instead, we re-encode each possible newline sequence as the
# detected encoding, then force them back to the encoding of `data`
# (usually a binary encoding like ASCII-8BIT). This means that the
# byte sequence will match how newlines are likely encoded in the
# file, but we don't have to change the encoding of `data` as far as
# Ruby is concerned. This allows us to correctly parse out each line
# without changing the encoding of `data`, and
# also--importantly--without having to duplicate many (potentially
# large) strings.
begin
# `data` is split after having its last `\n` removed by
# chomp (if any). This prevents the creation of an empty
# element after the final `\n` character on POSIX files.
data.chomp.split(encoded_newlines_re, -1)
rescue Encoding::ConverterNotFoundError
# The data is not splittable in the detected encoding. Assume it's
# one big line.
[data]
end
else
[]
end
end
def encoded_newlines_re
@encoded_newlines_re ||= Regexp.union(["\r\n", "\r", "\n"].
map { |nl| nl.encode(ruby_encoding, "ASCII-8BIT").force_encoding(data.encoding) })
end
def first_lines(n)
return lines[0...n] if defined? @lines
return [] unless viewable? && data
i, c = 0, 0
while c < n && j = data.index(encoded_newlines_re, i)
i = j + $&.length
c += 1
end
data[0...i].split(encoded_newlines_re, -1)
end
def last_lines(n)
if defined? @lines
if n >= @lines.length
@lines
else
lines[-n..-1]
end
end
return [] unless viewable? && data
no_eol = true
i, c = data.length, 0
k = i
while c < n && j = data.rindex(encoded_newlines_re, i - 1)
if c == 0 && j + $&.length == i
no_eol = false
n += 1
end
i = j
k = j + $&.length
c += 1
end
r = data[k..-1].split(encoded_newlines_re, -1)
r.pop if !no_eol
r
end
# Public: Get number of lines of code
#
# Requires Blob#data
#
# Returns Integer
def loc
lines.size
end
# Public: Get number of source lines of code
#
# Requires Blob#data
#
# Returns Integer
def sloc
lines.grep(/\S/).size
end
# Public: Is the blob a generated file?
#
# Generated source code is suppressed in diffs and is ignored by
# language statistics.
#
# May load Blob#data
#
# Return true or false
def generated?
@_generated ||= Generated.generated?(path, lambda { data })
end
# Public: Detects the Language of the blob.
#
# May load Blob#data
#
# Returns a Language or nil if none is detected
def language
@language ||= Linguist.detect(self)
end
# Internal: Get the TextMate compatible scope for the blob
def tm_scope
language && language.tm_scope
end
DETECTABLE_TYPES = [:programming, :markup].freeze
# Internal: Should this blob be included in repository language statistics?
def include_in_language_stats?
!vendored? &&
!documentation? &&
!generated? &&
language && ( defined?(detectable?) && !detectable?.nil? ?
detectable? :
DETECTABLE_TYPES.include?(language.type)
)
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/file_blob.rb | lib/linguist/file_blob.rb | require 'linguist/blob_helper'
require 'linguist/blob'
module Linguist
# A FileBlob is a wrapper around a File object to make it quack
# like a Grit::Blob. It provides the basic interface: `name`,
# `data`, `path` and `size`.
class FileBlob < Blob
include BlobHelper
# Public: Initialize a new FileBlob from a path
#
# path - A path String that exists on the file system.
# base_path - Optional base to relativize the path
#
# Returns a FileBlob.
def initialize(path, base_path = nil)
@fullpath = path
@path = base_path ? path.sub("#{base_path}/", '') : path
end
# Public: Read file permissions
#
# Returns a String like '100644'
def mode
@mode ||= File.stat(@fullpath).mode.to_s(8)
end
def symlink?
return @symlink if defined? @symlink
@symlink = (File.symlink?(@fullpath) rescue false)
end
# Public: Read file contents.
#
# Returns a String.
def data
@data ||= File.read(@fullpath, :encoding => "ASCII-8BIT")
end
# Public: Get byte size
#
# Returns an Integer.
def size
@size ||= File.size(@fullpath)
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/classifier.rb | lib/linguist/classifier.rb | require 'linguist/tokenizer'
require 'set'
module Linguist
# Language content classifier.
class Classifier
# Maximum number of bytes to consider for classification.
# This is only used at evaluation time. During training, full content of
# samples is used.
CLASSIFIER_CONSIDER_BYTES = 50 * 1024
# Public: Use the classifier to detect language of the blob.
#
# blob - An object that quacks like a blob.
# possible_languages - Array of Language objects
#
# Examples
#
# Classifier.call(FileBlob.new("path/to/file"), [
# Language["Ruby"], Language["Python"]
# ])
#
# Returns an Array of Language objects, most probable first.
def self.call(blob, possible_languages)
language_names = possible_languages.map(&:name)
classify(Samples.cache, blob.data[0...CLASSIFIER_CONSIDER_BYTES], language_names).map do |name, _|
Language[name] # Return the actual Language objects
end
end
# Public: Train classifier that data is a certain language.
#
# db - Hash classifier database object
# language - String language of data
# data - String contents of file or array of tokens.
#
# Examples
#
# Classifier.train!(db, 'Ruby', "def hello; end")
#
# Returns nil.
#
# Set LINGUIST_DEBUG=1, =2 or =3 to print internal statistics.
def self.train!(db, language, data)
tokens = data
tokens = Tokenizer.tokenize(tokens) if tokens.is_a?(String)
db['vocabulary'] ||= {}
# Set hash to autoincremented index value
if db['vocabulary'].default_proc.nil?
db['vocabulary'].default_proc = proc do |hash, key|
hash[key] = hash.length
end
end
db['samples'] ||= {}
db['samples'][language] ||= []
termfreq = to_vocabulary_index_termfreq(db['vocabulary'], tokens)
db['samples'][language] << termfreq
nil
end
# Public: Finalize training.
#
# db - Hash classifier database object
#
# Examples:
# Classifier.finalize_train!(db)
#
# Returns nil.
#
# This method must be called after the last #train! call.
def self.finalize_train!(db)
db['vocabulary'] ||= {}
# Unset hash autoincrement
db['vocabulary'].default_proc = nil
db['samples'] ||= []
filter_vocab_by_freq! db, MIN_DOCUMENT_FREQUENCY
sort_vocab! db
db['icf'] = inverse_class_freqs db
normalize_samples! db
db['centroids'] = get_centroids db
db.delete 'samples'
nil
end
# Public: Guess language of data.
#
# db - Hash of classifier tokens database.
# data - Array of tokens or String data to analyze.
# languages - Array of language name Strings to restrict to.
#
# Examples
#
# Classifier.classify(db, "def hello; end")
# # => [ 'Ruby', 0.90], ['Python', 0.2], ... ]
#
# Returns sorted Array of result pairs. Each pair contains the
# String language name and a Float score between 0.0 and 1.0.
def self.classify(db, tokens, languages = nil)
languages ||= db['centroids'].keys
new(db).classify(tokens, languages)
end
# Internal: Initialize a Classifier.
def initialize(db = {})
@vocabulary = db['vocabulary']
@centroids = db['centroids']
@icf = db['icf']
end
# Internal: Guess language of data
#
# data - Array of tokens or String data to analyze.
# languages - Array of language name Strings to restrict to.
#
# Returns sorted Array of result pairs. Each pair contains the
# String language name and a Float score between 0.0 and 1.0.
def classify(tokens, languages)
return [] if tokens.nil? || languages.empty?
tokens = Tokenizer.tokenize(tokens) if tokens.is_a?(String)
debug_dump_tokens(tokens) if verbosity >= 3
vec = Classifier.to_vocabulary_index_termfreq_gaps(@vocabulary, tokens)
vec.each do |idx, freq|
tf = 1.0 + Math.log(freq)
vec[idx] = tf * @icf[idx]
end
return [] if vec.empty?
Classifier.l2_normalize!(vec)
scores = {}
languages.each do |language|
centroid = @centroids[language]
score = Classifier.similarity(vec, centroid)
if score > 0.0
scores[language] = score
end
end
scores = scores.sort_by { |x| -x[1] }
debug_dump_all_tokens(tokens, scores) if verbosity >= 2
debug_dump_scores(scores) if verbosity >= 1
scores
end
private
MIN_DOCUMENT_FREQUENCY = 2
def verbosity
@verbosity ||= (ENV['LINGUIST_DEBUG'] || 0).to_i
end
def debug_dump_scores(scores)
headers = ["Language", "Score"]
rows = scores.map { |l, s| [l, "%.3f" % s] }
dump_table(headers, rows)
end
def debug_dump_tokens(tokens)
counts = Hash.new(0)
tokens.each do |tok|
idx = @vocabulary[tok]
if not idx.nil?
counts[tok] += 1
end
end
norm = Classifier.l2_norm(counts)
rows = counts.map do |tok, tf|
idx = @vocabulary[tok]
log_tf = 1.0 + Math.log(tf)
with_icf = log_tf * @icf[idx]
normalized = with_icf / norm
row = [tok, tf, "%.3f" % log_tf, "%.3f" % with_icf, "%.3f" % normalized]
[normalized, row]
end
headers = ["Token", "TF", "log", "*ICF", "L2"]
rows = rows.sort_by { |x| -x[0] }.map { |_, row| row }
dump_table(headers, rows)
end
# Internal: show a table of probabilities for each <token,language> pair.
#
# The number in each table entry is the number of "points" that each
# token contributes toward the belief that the file under test is a
# particular language. Points are additive.
def debug_dump_all_tokens(tokens, scores)
languages = scores.map { |l, _| l }
counts = Hash.new(0)
tokens.each do |tok|
idx = @vocabulary[tok]
if not idx.nil?
counts[tok] += 1
end
end
data = {}
norm = Classifier.l2_norm(counts)
languages.each do |language|
data[language] = {}
counts.each do |tok, tf|
idx = @vocabulary[tok]
log_tf = 1.0 + Math.log(tf)
with_icf = log_tf * @icf[idx]
normalized = with_icf / norm
data[language][tok] = normalized * @centroids[language][idx].to_f
end
end
norm = Classifier.l2_norm(counts)
rows = counts.map do |tok, tf|
idx = @vocabulary[tok]
log_tf = 1.0 + Math.log(tf)
with_icf = log_tf * @icf[idx]
normalized = with_icf / norm
scores = languages.map do |l, _|
[l, data[l][tok].to_f]
end
max_score = scores.to_h.values.max
row = [tok] + scores.map do |l, s|
if s == max_score
"%.4f*" % s
elsif s > 0.0
"%.4f" % s
else
"-"
end
end
[normalized, row]
end
headers = ["Token"] + (0..languages.length-1).map { |lidx| "[#{lidx}]" }
rows = rows.sort_by { |x| -x[0] }.map { |_, row| row }
legend = languages.each_with_index.map { |l, lidx| "[#{lidx}] = #{l}" }
dump_table(headers, rows, legend)
end
def dump_table(header, rows, legend = nil)
n_cols = header.length
rows = rows.map { |r| r.map { |c| c.to_s } }
col_widths = (0..n_cols - 1).map do |j|
([header[j].length] + rows.map { |row| row[j].length }).max
end
sep_line = "| #{(0..n_cols-1).map { |j| "-" * col_widths[j] }.join(" | ")} |"
content_width = sep_line.length - 4
top_line = "| #{"-" * content_width} |"
format_row = Proc.new do |row|
cells = row.zip(col_widths).map do |cell, width|
"%-#{width}s" % cell
end
"| %s |" % cells.join(" | ")
end
puts top_line
puts format_row.call(header)
puts sep_line
rows.each do |row|
puts format_row.call(row)
end
puts top_line
if legend
legend.each do |line|
puts "| %-#{content_width}s |" % line
end
puts top_line
end
end
def self.to_vocabulary_index_termfreq(vocab, tokens)
counts = Hash.new(0)
tokens.each do |key|
idx = vocab[key]
counts[idx] += 1
end
counts
end
def self.to_vocabulary_index_termfreq_gaps(vocab, tokens)
counts = Hash.new(0)
tokens.each do |key|
if vocab.key? key
idx = vocab[key]
counts[idx] += 1
end
end
counts
end
def self.l2_norm(vec)
norm = vec.values.inject(0.0) { |sum, x| sum + x**2 }
Math.sqrt(norm)
end
def self.l2_normalize!(vec)
norm = l2_norm(vec)
vec.transform_values! do |value|
value.to_f / norm
end
nil
end
def self.similarity(a, b)
sum = 0.0
a.each_key do |idx|
if b.key? idx
sum += a[idx] * b[idx]
end
end
sum
end
# Filter vocabulary by minimum document frequency.
def self.filter_vocab_by_freq!(db, min_freq)
vocabulary = db['vocabulary']
# Get document frequencies
docfreq = Array.new(vocabulary.size, 0)
db['samples'].each_value do |samples|
samples.each do |sample|
sample.each_key do |idx|
docfreq[idx] += 1
end
end
end
vocabulary.select! do |_, idx|
docfreq[idx] >= min_freq
end
nil
end
# Sort vocabulary lexicographically.
def self.sort_vocab!(db)
new_indices = Hash.new { |h,k| h[k] = h.length }
db['vocabulary'].sort_by { |x| x[0] }.each do |term, idx|
db['vocabulary'][term] = new_indices[idx]
end
new_indices.default_proc = nil
db['samples'].transform_values! do |samples|
samples.map do |sample|
new_sample = {}
sample.each do |idx, freq|
new_idx = new_indices[idx]
if not new_idx.nil?
new_sample[new_idx] = freq
end
end
new_sample
end
end
end
# Compute inverse class frequency (ICF) for every term.
def self.inverse_class_freqs(db)
icf = Array.new(db['vocabulary'].size, 0)
db['samples'].each_value do |samples|
terms = Set.new
samples.each do |sample|
terms |= sample.keys
end
terms.each do |idx|
icf[idx] += 1
end
end
icf.map! do |val|
Math.log(db['samples'].size.to_f / val.to_f) + 1
end
icf
end
def self.normalize_samples!(db)
icf = db['icf']
db['samples'].each_value do |samples|
samples.each do |sample|
sample.each do |idx, freq|
tf = 1.0 + Math.log(freq)
sample[idx] = tf * icf[idx]
end
l2_normalize! sample
end
end
end
def self.get_centroids(db)
centroids = {}
db['samples'].each do |language, samples|
centroid = Hash.new(0.0)
samples.each do |sample|
sample.each do |idx, val|
centroid[idx] += val
end
end
centroid.each_key do |idx|
centroid[idx] = centroid[idx] / samples.length
end
l2_normalize! centroid
centroids[language] = centroid
end
centroids
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/grammars.rb | lib/linguist/grammars.rb | module Linguist
module Grammars
# Get the path to the directory containing the language grammar JSON files.
#
# Returns a String.
def self.path
File.expand_path("../../../grammars", __FILE__)
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/instrumenter.rb | lib/linguist/instrumenter.rb | module Linguist
class BasicInstrumenter
attr_reader :detected_info
def initialize
@detected_info = {}
end
def instrument(name, payload = {})
if name == "linguist.detected" && payload[:blob]
@detected_info[payload[:blob].name] = {
strategy: payload[:strategy].name.split("::").last,
language: payload[:language]&.name
}
end
yield if block_given?
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/blob.rb | lib/linguist/blob.rb | require 'linguist/blob_helper'
module Linguist
# A Blob is a wrapper around the content of a file to make it quack
# like a Grit::Blob. It provides the basic interface: `name`,
# `data`, `path` and `size`.
class Blob
include BlobHelper
# Public: Initialize a new Blob.
#
# path - A path String (does not necessarily exists on the file system).
# content - Content of the file.
# symlink - Whether the file is a symlink.
#
# Returns a Blob.
def initialize(path, content, symlink: false)
@path = path
@content = content
@symlink = symlink
end
# Public: Filename
#
# Examples
#
# Blob.new("/path/to/linguist/lib/linguist.rb", "").path
# # => "/path/to/linguist/lib/linguist.rb"
#
# Returns a String
attr_reader :path
# Public: File name
#
# Returns a String
def name
File.basename(@path)
end
# Public: File contents.
#
# Returns a String.
def data
@content
end
# Public: Get byte size
#
# Returns an Integer.
def size
@content.bytesize
end
# Public: Get file extension.
#
# Returns a String.
def extension
extensions.last || ""
end
# Public: Return an array of the file extensions
#
# >> Linguist::Blob.new("app/views/things/index.html.erb").extensions
# => [".html.erb", ".erb"]
#
# Returns an Array
def extensions
_, *segments = name.downcase.split(".", -1)
segments.map.with_index do |segment, index|
"." + segments[index..-1].join(".")
end
end
# Public: Is this a symlink?
#
# Returns true or false.
def symlink?
@symlink
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/heuristics.rb | lib/linguist/heuristics.rb | require 'yaml'
module Linguist
# A collection of simple heuristics that can be used to better analyze languages.
class Heuristics
HEURISTICS_CONSIDER_BYTES = 50 * 1024
# Public: Use heuristics to detect language of the blob.
#
# blob - An object that quacks like a blob.
# possible_languages - Array of Language objects
#
# Examples
#
# Heuristics.call(FileBlob.new("path/to/file"), [
# Language["Ruby"], Language["Python"]
# ])
#
# Returns an Array of languages, or empty if none matched or were inconclusive.
def self.call(blob, candidates)
return [] if blob.symlink?
self.load()
data = blob.data[0...HEURISTICS_CONSIDER_BYTES]
@heuristics.each do |heuristic|
if heuristic.matches?(blob.name, candidates)
return Array(heuristic.call(data))
end
end
[] # No heuristics matched
rescue Regexp::TimeoutError
[] # Return nothing if we have a bad regexp which leads to a timeout enforced by Regexp.timeout in Ruby 3.2 or later
end
# Public: Get all heuristic definitions
#
# Returns an Array of heuristic objects.
def self.all
self.load()
@heuristics
end
# Internal: Load heuristics from 'heuristics.yml'.
def self.load()
if @heuristics.any?
return
end
data = self.load_config
named_patterns = data['named_patterns'].map { |k,v| [k, self.to_regex(v)] }.to_h
data['disambiguations'].each do |disambiguation|
exts = disambiguation['extensions']
rules = disambiguation['rules']
rules.map! do |rule|
rule['pattern'] = self.parse_rule(named_patterns, rule)
rule
end
@heuristics << new(exts, rules)
end
end
def self.load_config
YAML.load_file(File.expand_path("../heuristics.yml", __FILE__))
end
def self.parse_rule(named_patterns, rule)
if !rule['and'].nil?
rules = rule['and'].map { |block| self.parse_rule(named_patterns, block) }
return And.new(rules)
elsif !rule['pattern'].nil?
return self.to_regex(rule['pattern'])
elsif !rule['negative_pattern'].nil?
pat = self.to_regex(rule['negative_pattern'])
return NegativePattern.new(pat)
elsif !rule['named_pattern'].nil?
return named_patterns[rule['named_pattern']]
else
return AlwaysMatch.new()
end
end
# Internal: Converts a string or array of strings to regexp
#
# str: string or array of strings. If it is an array of strings,
# Regexp.union will be used.
def self.to_regex(str)
if str.kind_of?(Array)
Regexp.union(str.map { |s| Regexp.new(s) })
else
Regexp.new(str)
end
end
# Internal: Array of defined heuristics
@heuristics = []
# Internal
def initialize(exts, rules)
@exts = exts
@rules = rules
end
# Internal: Return the heuristic's target extensions
def extensions
@exts
end
# Internal: Return the heuristic's candidate languages
def languages
@rules.map do |rule|
[rule['language']].flatten(2).map { |name| Language[name] }
end.flatten.uniq
end
# Internal: Check if this heuristic matches the candidate filenames or
# languages.
def matches?(filename, candidates)
filename = filename.downcase
candidates = candidates.compact.map(&:name)
@exts.any? { |ext| filename.end_with?(ext) }
end
# Internal: Perform the heuristic
def call(data)
matched = @rules.find do |rule|
rule['pattern'].match?(data)
end
if !matched.nil?
languages = matched['language']
if languages.is_a?(Array)
languages.map{ |l| Language[l] }
else
Language[languages]
end
end
end
end
class And
def initialize(pats)
@pats = pats
end
def match?(input)
return @pats.all? { |pat| pat.match?(input) }
end
end
class AlwaysMatch
def match?(input)
return true
end
end
class NegativePattern
def initialize(pat)
@pat = pat
end
def match?(input)
return !@pat.match?(input)
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/generated.rb | lib/linguist/generated.rb | module Linguist
class Generated
# Public: Is the blob a generated file?
#
# name - String filename
# data - String blob data. A block also may be passed in for lazy
# loading. This behavior is deprecated and you should always
# pass in a String.
#
# Return true or false
def self.generated?(name, data)
new(name, data).generated?
end
# Internal: Initialize Generated instance
#
# name - String filename
# data - String blob data
def initialize(name, data)
@name = name
@extname = File.extname(name)
@_data = data
end
attr_reader :name, :extname
# Lazy load blob data if block was passed in.
#
# Awful, awful stuff happening here.
#
# Returns String data.
def data
@data ||= @_data.respond_to?(:call) ? @_data.call() : @_data
end
# Public: Get each line of data
#
# Returns an Array of lines
def lines
# TODO: data should be required to be a String, no nils
@lines ||= data ? data.split("\n", -1) : []
end
# Internal: Is the blob a generated file?
#
# Generated source code is suppressed in diffs and is ignored by
# language statistics.
#
# Please add additional test coverage to
# `test/test_blob.rb#test_generated` if you make any changes.
#
# Return true or false
def generated?
xcode_file? ||
intellij_file? ||
cocoapods? ||
carthage_build? ||
generated_graphql_relay? ||
generated_net_designer_file? ||
generated_net_specflow_feature_file? ||
composer_lock? ||
cargo_lock? ||
cargo_orig? ||
deno_lock? ||
flake_lock? ||
bazel_lock? ||
node_modules? ||
go_vendor? ||
go_lock? ||
package_resolved? ||
poetry_lock? ||
pdm_lock? ||
uv_lock? ||
pixi_lock? ||
esy_lock? ||
npm_shrinkwrap_or_package_lock? ||
pnpm_lock? ||
bun_lock? ||
terraform_lock? ||
generated_yarn_plugnplay? ||
godeps? ||
generated_by_zephir? ||
htmlcov? ||
minified_files? ||
has_source_map? ||
source_map? ||
compiled_coffeescript? ||
generated_parser? ||
generated_net_docfile? ||
generated_postscript? ||
compiled_cython_file? ||
pipenv_lock? ||
gradle_wrapper? ||
maven_wrapper? ||
generated_go? ||
generated_protocol_buffer_from_go? ||
generated_protocol_buffer? ||
generated_javascript_protocol_buffer? ||
generated_typescript_protocol_buffer? ||
generated_apache_thrift? ||
generated_jni_header? ||
vcr_cassette? ||
generated_antlr? ||
generated_module? ||
generated_unity3d_meta? ||
generated_racc? ||
generated_jflex? ||
generated_grammarkit? ||
generated_roxygen2? ||
generated_html? ||
generated_jison? ||
generated_grpc_cpp? ||
generated_dart? ||
generated_perl_ppport_header? ||
generated_gamemakerstudio? ||
generated_gimp? ||
generated_visualstudio6? ||
generated_haxe? ||
generated_jooq? ||
generated_pascal_tlb? ||
generated_sorbet_rbi? ||
generated_mysql_view_definition_format? ||
generated_sqlx_query?
end
# Internal: Is the blob an Xcode file?
#
# Generated if the file extension is an Xcode
# file extension.
#
# Returns true or false.
def xcode_file?
['.nib', '.xcworkspacedata', '.xcuserstate'].include?(extname)
end
# Internal: Is the blob an IntelliJ IDEA project file?
#
# JetBrains IDEs generate project files under an `.idea` directory
# that are sometimes checked into version control.
#
# Returns true or false.
def intellij_file?
!!name.match(/(?:^|\/)\.idea\//)
end
# Internal: Is the blob part of Pods/, which contains dependencies not meant for humans in pull requests.
#
# Returns true or false.
def cocoapods?
!!name.match(/(^Pods|\/Pods)\//)
end
# Internal: Is the blob part of Carthage/Build/, which contains dependencies not meant for humans in pull requests.
#
# Returns true or false.
def carthage_build?
!!name.match(/(^|\/)Carthage\/Build\//)
end
# Internal: Does extname indicate a filetype which is commonly minified?
#
# Returns true or false.
def maybe_minified?
['.js', '.css'].include? extname.downcase
end
# Internal: Is the blob a minified file?
#
# Consider a file minified if the average line length is
# greater then 110c.
#
# Currently, only JS and CSS files are detected by this method.
#
# Returns true or false.
def minified_files?
if maybe_minified? and lines.any?
(lines.inject(0) { |n, l| n += l.length } / lines.length) > 110
else
false
end
end
# Internal: Does the blob contain a source-map reference?
#
# We assume that if one of the last 2 lines starts with a source-map
# reference, then the current file was generated from other files.
#
# We use the last 2 lines because the last line might be empty.
#
# Returns true or false.
def has_source_map?
return false unless maybe_minified?
lines.last(2).any? { |l| l.match(/^\/[*\/][\#@] source(?:Mapping)?URL|sourceURL=/) }
end
# Internal: Is the blob a generated source-map?
#
# Source-maps usually have .css.map or .js.map extensions. In case they
# are not following the name convention, detect them based on the content.
#
# Returns true or false.
def source_map?
return false unless extname.downcase == '.map'
return true if name =~ /(\.css|\.js)\.map$/i || # Name convention
lines[0] =~ /^{"version":\d+,/ || # Revision 2 and later begin with the version number
lines[0] =~ /^\/\*\* Begin line maps\. \*\*\/{/ # Revision 1 begins with a magic comment
false
end
# Internal: Is the blob of JS generated by CoffeeScript?
#
# CoffeeScript is meant to output JS that would be difficult to
# tell if it was generated or not. Look for a number of patterns
# output by the CS compiler.
#
# Return true or false
def compiled_coffeescript?
return false unless extname == '.js'
# CoffeeScript generated by > 1.2 include a comment on the first line
if lines[0] =~ /^\/\/ Generated by /
return true
end
if lines[0] == '(function() {' && # First line is module closure opening
lines[-2] == '}).call(this);' && # Second to last line closes module closure
lines[-1] == '' # Last line is blank
score = 0
lines.each do |line|
if line =~ /var /
# Underscored temp vars are likely to be Coffee
score += 1 * line.gsub(/(_fn|_i|_len|_ref|_results)/).count
# bind and extend functions are very Coffee specific
score += 3 * line.gsub(/(__bind|__extends|__hasProp|__indexOf|__slice)/).count
end
end
# Require a score of 3. This is fairly arbitrary. Consider
# tweaking later.
score >= 3
else
false
end
end
# Internal: Is this a generated documentation file for a .NET assembly?
#
# .NET developers often check in the XML Intellisense file along with an
# assembly - however, these don't have a special extension, so we have to
# dig into the contents to determine if it's a docfile. Luckily, these files
# are extremely structured, so recognizing them is easy.
#
# Returns true or false
def generated_net_docfile?
return false unless extname.downcase == ".xml"
return false unless lines.count > 3
# .NET Docfiles always open with <doc> and their first tag is an
# <assembly> tag
return lines[1].include?("<doc>") &&
lines[2].include?("<assembly>") &&
lines[-2].include?("</doc>")
end
# Internal: Is this a codegen file for a .NET project?
#
# Visual Studio often uses code generation to generate partial classes, and
# these files can be quite unwieldy. Let's hide them.
#
# Returns true or false
def generated_net_designer_file?
!!name.match(/\.designer\.(cs|vb)$/i)
end
# Internal: Is this a codegen file for Specflow feature file?
#
# Visual Studio's SpecFlow extension generates *.feature.cs files
# from *.feature files, they are not meant to be consumed by humans.
# Let's hide them.
#
# Returns true or false
def generated_net_specflow_feature_file?
!!name.match(/\.feature\.cs$/i)
end
# Internal: Is the blob of JS a parser generated by PEG.js?
#
# PEG.js-generated parsers are not meant to be consumed by humans.
#
# Return true or false
def generated_parser?
return false unless extname == '.js'
# PEG.js-generated parsers include a comment near the top of the file
# that marks them as such.
if lines[0..4].join('') =~ /^(?:[^\/]|\/[^\*])*\/\*(?:[^\*]|\*[^\/])*Generated by PEG.js/
return true
end
false
end
# Internal: Is the blob of PostScript generated?
#
# PostScript files are often generated by other programs. If they tell us so,
# we can detect them.
#
# Returns true or false.
def generated_postscript?
return false unless ['.ps', '.eps', '.pfa'].include? extname
# Type 1 and Type 42 fonts converted to PostScript are stored as hex-encoded byte streams; these
# streams are always preceded the `eexec` operator (if Type 1), or the `/sfnts` key (if Type 42).
return true if data =~ /^\s*(?:currentfile eexec\s+|\/sfnts\s+\[\s<)/
# We analyze the "%%Creator:" comment, which contains the author/generator
# of the file. If there is one, it should be in one of the first few lines.
creator = lines[0..9].find {|line| line =~ /^%%Creator: /}
return false if creator.nil?
# Most generators write their version number, while human authors' or companies'
# names don't contain numbers. So look if the line contains digits. Also
# look for some special cases without version numbers.
return true if creator =~ /[0-9]|draw|mpage|ImageMagick|inkscape|MATLAB/ ||
creator =~ /PCBNEW|pnmtops|\(Unknown\)|Serif Affinity|Filterimage -tops/
# EAGLE doesn't include a version number when it generates PostScript.
# However, it does prepend its name to the document's "%%Title" field.
!!creator.include?("EAGLE") and lines[0..4].find {|line| line =~ /^%%Title: EAGLE Drawing /}
end
def generated_go?
return false unless extname == '.go'
return false unless lines.count > 1
return lines.first(40).any? { |l| l =~ %r{^// Code generated .*} }
end
# Internal: Is the blob a protocol buffer file generated by the
# go-to-protobuf tool?
#
# Returns true or false
def generated_protocol_buffer_from_go?
return false unless extname == '.proto'
return false unless lines.count > 1
return lines.first(20).any? { |l| l.include? "This file was autogenerated by go-to-protobuf" }
end
PROTOBUF_EXTENSIONS = ['.py', '.java', '.h', '.cc', '.cpp', '.m', '.rb', '.php']
# Internal: Is the blob a C++, Java or Python source file generated by the
# Protocol Buffer compiler?
#
# Returns true or false.
def generated_protocol_buffer?
return false unless PROTOBUF_EXTENSIONS.include?(extname)
return false unless lines.count > 1
return lines.first(3).any? { |l| l.include?("Generated by the protocol buffer compiler. DO NOT EDIT!") }
end
# Internal: Is the blob a Javascript source file generated by the
# Protocol Buffer compiler?
#
# Returns true or false.
def generated_javascript_protocol_buffer?
return false unless extname == ".js"
return false unless lines.count > 6
return lines[5].include?("GENERATED CODE -- DO NOT EDIT!")
end
# Internal: Is the blob a TypeScript source file generated by the
# Protocol Buffer compiler?
#
# Files generated by ts-proto typically start with something like this
# (though the versions lines are optional):
#
# // Code generated by protoc-gen-ts_proto. DO NOT EDIT.
# // versions:
# // protoc-gen-ts_proto v1.181.2
# // protoc v5.28.2
# // source: hello.proto
#
# /* eslint-disable */
#
# Returns true or false.
def generated_typescript_protocol_buffer?
return false unless extname == ".ts"
return false unless lines.count > 4
return lines[0].include?("Code generated by protoc-gen-ts_proto. DO NOT EDIT.")
end
APACHE_THRIFT_EXTENSIONS = ['.rb', '.py', '.go', '.js', '.m', '.java', '.h', '.cc', '.cpp', '.php']
# Internal: Is the blob generated by Apache Thrift compiler?
#
# Returns true or false
def generated_apache_thrift?
return false unless APACHE_THRIFT_EXTENSIONS.include?(extname)
return lines.first(6).any? { |l| l.include?("Autogenerated by Thrift Compiler") }
end
# Internal: Is the blob a C/C++ header generated by the Java JNI tool javah?
#
# Returns true or false.
def generated_jni_header?
return false unless extname == '.h'
return false unless lines.count > 2
return lines[0].include?("/* DO NOT EDIT THIS FILE - it is machine generated */") &&
lines[1].include?("#include <jni.h>")
end
# Internal: Is the blob part of node_modules/, which are not meant for humans in pull requests.
#
# Returns true or false.
def node_modules?
!!name.match(/node_modules\//)
end
# Internal: Is the blob part of the Go vendor/ tree,
# not meant for humans in pull requests.
#
# Returns true or false.
def go_vendor?
!!name.match(/vendor\/((?!-)[-0-9A-Za-z]+(?<!-)\.)+(com|edu|gov|in|me|net|org|fm|io)/)
end
# Internal: Is the blob a generated Go dep or glide lock file?
#
# Returns true or false.
def go_lock?
!!name.match(/(Gopkg|glide)\.lock/)
end
# Internal: Is the blob a generated Package.resolved?
#
# Returns true or false.
def package_resolved?
!!name.match(/Package\.resolved/)
end
# Internal: Is the blob a generated poetry.lock?
#
# Returns true or false.
def poetry_lock?
!!name.match(/poetry\.lock/)
end
# Internal: Is the blob a generated pdm.lock?
#
# Returns true or false.
def pdm_lock?
!!name.match(/pdm\.lock/)
end
# Internal: Is the blob a generated uv.lock?
#
# Returns true or false.
def uv_lock?
!!name.match(/uv\.lock/)
end
# Internal: Is the blob a generated pixi lock file?
#
# Returns true or false.
def pixi_lock?
!!name.match(/pixi\.lock/)
end
# Internal: Is the blob a generated esy lock file?
#
# Returns true or false.
def esy_lock?
!!name.match(/(^|\/)(\w+\.)?esy.lock$/)
end
# Internal: Is the blob a generated deno lockfile, which are not meant for humans in pull requests.
#
# Returns true or false.
def deno_lock?
!!name.match(/deno\.lock/)
end
# Internal: Is the blob a generated npm shrinkwrap or package lock file?
#
# Returns true or false.
def npm_shrinkwrap_or_package_lock?
!!name.match(/npm-shrinkwrap\.json/) || !!name.match(/package-lock\.json/)
end
# Internal: Is the blob a generated pnpm lockfile?
#
# Returns true or false.
def pnpm_lock?
!!name.match(/pnpm-lock\.yaml/)
end
# Internal: Is the blob a generated bun lockfile?
#
# Returns true or false.
def bun_lock?
!!name.match(/(?:^|\/)bun\.lockb?$/)
end
# Internal: Is the blob a generated Yarn Plug'n'Play?
#
# Returns true or false.
def generated_yarn_plugnplay?
!!name.match(/(^|\/)\.pnp\..*$/)
end
# Internal: Is the blob part of Godeps/,
# which are not meant for humans in pull requests.
#
# Returns true or false.
def godeps?
!!name.match(/Godeps\//)
end
# Internal: Is the blob a generated php composer lock file?
#
# Returns true or false.
def composer_lock?
!!name.match(/composer\.lock/)
end
# Internal: Is the blob generated by Zephir?
#
# Returns true or false.
def generated_by_zephir?
!!name.match(/.\.zep\.(?:c|h|php)$/)
end
# Internal: Is the blob a generated Rust Cargo lock file?
#
# Returns true or false.
def cargo_lock?
!!name.match(/Cargo\.lock/)
end
# Internal: Is the blob a generated Rust Cargo original file?
#
# Returns true or false.
def cargo_orig?
!!name.match(/Cargo\.toml\.orig/)
end
# Internal: Is the blob a generated Nix flakes lock file?
#
# Returns true or false
def flake_lock?
!!name.match(/(^|\/)flake\.lock$/)
end
# Internal: Is the blob a Bazel generated bzlmod lockfile?
#
# Returns true or false
def bazel_lock?
!!name.match(/(^|\/)MODULE\.bazel\.lock$/)
end
# Internal: Is the blob a generated gradle wrapper file?
#
# Returns true or false.
def gradle_wrapper?
!!name.match(/(?:^|\/)gradlew(?:\.bat)?$/i)
end
# Internal: Is the blob a generated maven wrapper file?
#
# Returns true or false.
def maven_wrapper?
!!name.match(/(?:^|\/)mvnw(?:\.cmd)?$/i)
end
# Is the blob a VCR Cassette file?
#
# Returns true or false
def vcr_cassette?
return false unless extname == '.yml'
return false unless lines.count > 2
# VCR Cassettes have "recorded_with: VCR" in the second last line.
return lines[-2].include?("recorded_with: VCR")
end
# Is this a generated ANTLR file?
#
# Returns true or false
def generated_antlr?
return false unless extname == '.g'
return false unless lines.count > 2
return lines[1].include?("generated by Xtest")
end
# Internal: Is this a compiled C/C++ file from Cython?
#
# Cython-compiled C/C++ files typically contain:
# /* Generated by Cython x.x.x on ... */
# on the first line.
#
# Return true or false
def compiled_cython_file?
return false unless ['.c', '.cpp'].include? extname
return false unless lines.count > 1
return lines[0].include?("Generated by Cython")
end
# Internal: Is this a Pipenv lock file?
#
# Returns true or false.
def pipenv_lock?
!!name.match(/Pipfile\.lock/)
end
# Internal: Is this a Terraform lock file?
#
# Returns true or false.
def terraform_lock?
!!name.match(/(?:^|\/)\.terraform\.lock\.hcl$/)
end
# Internal: Is it a KiCAD or GFortran module file?
#
# KiCAD module files contain:
# PCBNEW-LibModule-V1 yyyy-mm-dd h:mm:ss XM
# on the first line.
#
# GFortran module files contain:
# GFORTRAN module version 'x' created from
# on the first line.
#
# Return true or false
def generated_module?
return false unless extname == '.mod'
return false unless lines.count > 1
return lines[0].include?("PCBNEW-LibModule-V") ||
lines[0].include?("GFORTRAN module version '")
end
# Internal: Is this a metadata file from Unity3D?
#
# Unity3D Meta files start with:
# fileFormatVersion: X
# guid: XXXXXXXXXXXXXXX
#
# Return true or false
def generated_unity3d_meta?
return false unless extname == '.meta'
return false unless lines.count > 1
return lines[0].include?("fileFormatVersion: ")
end
# Internal: Is this a Racc-generated file?
#
# A Racc-generated file contains:
# # This file is automatically generated by Racc x.y.z
# on the third line.
#
# Return true or false
def generated_racc?
return false unless extname == '.rb'
return false unless lines.count > 2
return lines[2].start_with?("# This file is automatically generated by Racc")
end
# Internal: Is this a JFlex-generated file?
#
# A JFlex-generated file contains:
# /* The following code was generated by JFlex x.y.z on d/at/e ti:me */
# on the first line.
#
# Return true or false
def generated_jflex?
return false unless extname == '.java'
return false unless lines.count > 1
return lines[0].start_with?("/* The following code was generated by JFlex ")
end
# Internal: Is this a GrammarKit-generated file?
#
# A GrammarKit-generated file typically contain:
# // This is a generated file. Not intended for manual editing.
# on the first line. This is not always the case, as it's possible to
# customize the class header.
#
# Return true or false
def generated_grammarkit?
return false unless extname == '.java'
return false unless lines.count > 1
return lines[0].start_with?("// This is a generated file. Not intended for manual editing.")
end
# Internal: Is this a roxygen2-generated file?
#
# A roxygen2-generated file typically contain:
# % Generated by roxygen2: do not edit by hand
# on the first line.
#
# Return true or false
def generated_roxygen2?
return false unless extname == '.Rd'
return false unless lines.count > 1
return lines[0].include?("% Generated by roxygen2: do not edit by hand")
end
# Internal: Is this a Jison-generated file?
#
# Jison-generated parsers typically contain:
# /* parser generated by jison
# on the first line.
#
# Jison-generated lexers typically contain:
# /* generated by jison-lex
# on the first line.
#
# Return true or false
def generated_jison?
return false unless extname == '.js'
return false unless lines.count > 1
return lines[0].start_with?("/* parser generated by jison ") ||
lines[0].start_with?("/* generated by jison-lex ")
end
# Internal: Is this a protobuf/grpc-generated C++ file?
#
# A generated file contains:
# // Generated by the gRPC C++ plugin.
# on the first line.
#
# Return true or false
def generated_grpc_cpp?
return false unless %w{.cpp .hpp .h .cc}.include? extname
return false unless lines.count > 1
return lines[0].start_with?("// Generated by the gRPC")
end
# Internal: Is this a generated Dart file?
#
# A google/protoc-plugin generated file contains:
# // Generated code. Do not modify.
# on the second line.
#
# A source_gen generated file may contain:
# // GENERATED CODE - DO NOT MODIFY
# on the first, second, or third line.
#
# Return true or false
def generated_dart?
return false unless extname == '.dart'
return false unless lines.count > 1
return lines.first(3).any? { |l| l.downcase.match(/generated code\W{2,3}do not modify/) }
end
# Internal: Is the file a generated Perl/Pollution/Portability header file?
#
# Returns true or false.
def generated_perl_ppport_header?
return false unless name.match(/ppport\.h$/)
return false unless lines.count > 10
return lines[8].include?("Automatically created by Devel::PPPort")
end
# Internal: Is this a relay-compiler generated graphql file?
#
# Return true or false
def generated_graphql_relay?
!!name.match(/__generated__\//)
end
# Internal: Is this a generated Game Maker Studio (2) metadata file?
#
# Return true or false
def generated_gamemakerstudio?
return false unless ['.yy', '.yyp'].include? extname
return false unless lines.count > 3
return lines.first(3).join('').match?(/^\s*[\{\[]/) ||
lines[0] =~ /^\d\.\d\.\d.+\|\{/
end
# Internal: Is this a generated GIMP C image file?
#
# GIMP saves C sources with one of two comment forms:
# * `/* GIMP RGB C-Source image dump (<filename>.c) */` (C source export)
# * `/* GIMP header image file format (RGB): <filename>.h */` (Header export)
#
# Return true or false
def generated_gimp?
return false unless ['.c', '.h'].include? extname
return false unless lines.count > 0
return lines[0].match(/^\/\* GIMP [a-zA-Z0-9\- ]+ C\-Source image dump \(.+?\.c\) \*\//) ||
lines[0].match(/^\/\* GIMP header image file format \([a-zA-Z0-9\- ]+\)\: .+?\.h \*\//)
end
# Internal: Is this a generated Microsoft Visual Studio 6.0 build file?
#
# Return true or false
def generated_visualstudio6?
return false unless extname.downcase == '.dsp'
lines.first(3).any? { |l| l.include? '# Microsoft Developer Studio Generated Build File' }
end
HAXE_EXTENSIONS = ['.js', '.py', '.lua', '.cpp', '.h', '.java', '.cs', '.php']
# Internal: Is this a generated Haxe-generated source file?
#
# Return true or false
def generated_haxe?
return false unless HAXE_EXTENSIONS.include?(extname)
return lines.first(3).any? { |l| l.include?("Generated by Haxe") }
end
# Internal: Is this a generated HTML file?
#
# HTML documents generated by authoring tools often include a
# a <meta> tag in the header of the form:
#
# <meta name="generator" content="DocGen v5.0.1" />
#
# Return true or false
def generated_html?
return false unless ['.html', '.htm', '.xhtml'].include? extname.downcase
return false unless lines.count > 1
# Pkgdown
return true if lines[0..1].any? do |line|
line.match(/<!-- Generated by pkgdown: do not edit by hand -->/)
end
# Mandoc
return true if lines.count > 2 && lines[2].start_with?('<!-- This is an automatically generated file.')
# Doxygen
return true if lines[0..30].any? do |line|
line.match(/<!--\s+Generated by Doxygen\s+[.0-9]+\s*-->/i)
end
# HTML tag: <meta name="generator" content="…" />
matches = lines[0..30].join(' ').scan(/<meta(\s+[^>]++)>/i)
return false if matches.empty?
return matches.map {|x| extract_html_meta(x) }.any? do |attr|
attr["name"].to_s.downcase == 'generator' &&
[attr["content"], attr["value"]].any? do |cv|
!cv.nil? &&
cv.match(/^
( org \s+ mode
| j?latex2html
| groff
| makeinfo
| texi2html
| ronn
) \b
/ix)
end
end
end
# Internal: Is this a generated jOOQ file?
#
# Return true or false
def generated_jooq?
return false unless extname.downcase == '.java'
lines.first(2).any? { |l| l.include? 'This file is generated by jOOQ.' }
end
# Internal: Is this a generated Delphi Interface file for a type library?
#
# Delphi Type Library Import tool generates *_TLB.pas files based on .ridl files.
# They are not meant to be altered by humans.
#
# Returns true or false
def generated_pascal_tlb?
!!name.match(/_tlb\.pas$/i)
end
# Internal: Is this a Sorbet RBI file generated by Tapioca?
#
# Tapioca generates non-human-editable .rbi files in several different
# ways:
#
# 1. `tapioca gem` uses reflection to generate generic .rbi for gems.
# 2. `tapioca dsl` uses DSL compilers to generate .rbi for modules/classes.
# 3. `tapioca annotations` pulls .rbi from remote sources.
#
# All are marked with similar wording.
#
# Returns true or false
def generated_sorbet_rbi?
return false unless extname.downcase == '.rbi'
return false unless lines.count >= 5
lines[0].match?(/^# typed:/) &&
lines[2].include?("DO NOT EDIT MANUALLY") &&
lines[4].match?(/^# Please (run|instead update this file by running) `bin\/tapioca/)
end
# Internal: Is it MySQL View Definition Format?
#
# MySQL View Definition Format (INI) files are generated by MySQL 5.7 and earlier.
# They are not meant to be altered by humans.
#
# Returns true or false
def generated_mysql_view_definition_format?
return false unless extname.downcase == '.frm'
return lines[0].include?("TYPE=VIEW")
end
# Internal: Is this an HTML coverage report?
#
# Tools like coverage.py generate HTML reports under an `htmlcov` directory.
#
# Returns true or false.
def htmlcov?
!!name.match(/(?:^|\/)htmlcov\//)
end
# Internal: Extract a Hash of name/content pairs from an HTML <meta> tag
def extract_html_meta(match)
(match.last.sub(/\/\Z/, "").strip.scan(/
(?<=^|\s) # Check for preceding whitespace
(name|content|value) # Attribute names we're interested in
\s* = \s* # Key-value separator
# Attribute value
( "[^"]+" # name="value"
| '[^']+' # name='value'
| [^\s"']+ # name=value
)
/ix)).map do |match|
key = match[0].downcase
val = match[1].gsub(/\A["']|["']\Z/, '')
[key, val]
end.select { |x| x.length == 2 }.to_h
end
# Internal: Is this a generated SQLx query file?
#
# SQLx is a Rust SQL library which generates `**/.sqlx/query-*.json` files
# in offline mode (enabled by default).
#
# These are used to be able to compile a project without requiring
# the development database to be online.
#
# Returns true or false.
def generated_sqlx_query?
!!name.match(/(?:^|\/)\.sqlx\/query-[a-f\d]{64}\.json$/)
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/sha256.rb | lib/linguist/sha256.rb | require 'digest/sha2'
module Linguist
module SHA256
# Public: Create deep nested digest of value object.
#
# Useful for object comparison.
#
# obj - Object to digest.
#
# Returns String hex digest
def self.hexdigest(obj)
digest = Digest::SHA256.new
case obj
when String, Symbol, Integer, Float
digest.update "#{obj.class}"
digest.update "#{obj}"
when TrueClass, FalseClass, NilClass
digest.update "#{obj.class}"
when Array
digest.update "#{obj.class}"
for e in obj
digest.update(hexdigest(e))
end
when Hash
digest.update "#{obj.class}"
for e in obj.map { |(k, v)| hexdigest([k, v]) }.sort
digest.update(e)
end
else
raise TypeError, "can't convert #{obj.inspect} into String"
end
digest.hexdigest
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/source/repository.rb | lib/linguist/source/repository.rb | module Linguist
module Source
# Repository is an interface for providing direct access to functionality in
# a repository of files whose contents can be scanned for language
# information.
class Repository
# Public: get the number of entries in the root tree of the given commit,
# with an optional maximum value.
#
# commit_id - the string unique identifier of the commit to analyze.
# limit - (Optional) the integer maximum number of tree entries to
# count.
#
# Returns the number of entries in the tree or 'limit', whichever is
# smaller.
def get_tree_size(commit_id, limit = nil)
raise NotImplementedError
end
# Public: set the commit whose .gitattributes file(s) should be used as
# the source of attribute information in 'load_attributes_for_path'.
#
# commit_id - the string unique identifier of the attribute source commit.
#
# Returns nothing.
def set_attribute_source(commit_id)
raise NotImplementedError
end
# Public: read the data and size information for the specified file blob.
#
# blob_id - the string unique identifier of the blob to read.
# max_size - the integer maximum size in bytes to read from the blob.
#
# Returns the (possibly truncated) byte string of blob content and
# the full, untruncated size of the blob.
def load_blob(blob_id, max_size)
raise NotImplementedError
end
# Public: look up the attribute values for a given path.
#
# path - the path for which we want attribute values.
# attr_names - the attributes to read for the given path.
#
# Returns a Hash mapping attribute names to their corresponding values.
def load_attributes_for_path(path, attr_names)
raise NotImplementedError
end
# Public: compute the diff between the given old and new commits.
#
# old_commit - the string unique identifier of the "before" state of the
# diff, or nil (representing an empty tree).
# new_commit - the string unique identifier of the "after" state of the
# diff, or nil (representing an empty tree).
#
# Returns a Source::Diff.
def diff(old_commit, new_commit)
raise NotImplementedError
end
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/source/diff.rb | lib/linguist/source/diff.rb | require 'linguist/generated'
require 'cgi'
require 'charlock_holmes'
require 'mini_mime'
require 'yaml'
module Linguist
module Source
# Diff is an interface representing a diff between two trees. It is composed
# of a collection of iterable deltas between before/after states of files.
class Diff
# A Delta represents a single file's before/after state in a diff.
class Delta
# Public: get the status of the file's "after" state as compared to
# "before". Valid status values include:
#
# - :added
# - :deleted
# - :modified
# - :renamed
# - :copied
# - :ignored
# - :untracked
# - :typechange
#
# Returns the status.
def status
raise NotImplementedError
end
# Public: determine whether the file delta is binary.
#
# Returns true if the delta is binary, false otherwise.
def binary?
raise NotImplementedError
end
# Public: get the metadata of the "before" file in the delta. The
# metadata is represented as a Hash with the keys:
#
# - :path (string)
# - :oid (string)
# - :mode (integer)
#
# Returns the entry metadata hash.
def old_file
raise NotImplementedError
end
# Public: get the metadata of the "after" file in the delta. The
# metadata is represented as a Hash with the keys:
#
# - :path (string)
# - :oid (string)
# - :mode (integer)
#
# Returns the entry metadata hash.
def new_file
raise NotImplementedError
end
end
# Public: iterate through each delta of the given diff. Yields a single
# delta to the given block.
#
# Returns nothing.
def each_delta
raise NotImplementedError
end
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/source/rugged.rb | lib/linguist/source/rugged.rb | require 'rugged'
require 'linguist/source/diff'
module Linguist
module Source
# RuggedRepository is an implementation of the Source::Repository abstract
# class. It represents a Git repository that is accessed using the libgit2
# wrapper Rugged.
class RuggedRepository < Linguist::Source::Repository
class Diff < Linguist::Source::Diff
class Delta < Linguist::Source::Diff::Delta
def initialize(rugged_delta)
@delta = rugged_delta
end
def status; @delta.status; end
def binary?; @delta.binary; end
def old_file; @delta.old_file; end
def new_file; @delta.new_file; end
end
def initialize(rugged_diff)
@diff = rugged_diff
end
def each_delta(&block)
@diff.each_delta.map do |delta|
Delta.new(delta)
end.each(&block)
end
end
GIT_ATTR_OPTS = { :priority => [:index], :skip_system => true }
GIT_ATTR_FLAGS = Rugged::Repository::Attributes.parse_opts(GIT_ATTR_OPTS)
def initialize(rugged)
@rugged = rugged
@tree_map = {}
@attr_source = nil
end
def get_tree_size(commit_id, limit)
get_tree(commit_id).count_recursive(limit)
end
def set_attribute_source(commit_id)
tree = get_tree(commit_id)
return if @attr_source == tree
@attr_source = tree
attr_index = Rugged::Index.new
attr_index.read_tree(@attr_source)
@rugged.index = attr_index
end
def load_attributes_for_path(path, attr_names)
@rugged.fetch_attributes(path, attr_names, GIT_ATTR_FLAGS)
end
def load_blob(blob_id, max_size)
Rugged::Blob.to_buffer(@rugged, blob_id, max_size)
end
def diff(old_commit, new_commit)
old_tree = old_commit.nil? ? nil : get_tree(old_commit)
new_tree = new_commit.nil? ? nil : get_tree(new_commit)
Diff.new(Rugged::Tree.diff(@rugged, old_tree, new_tree))
end
# Internal: get the Rugged::Tree associated with a given commit ID. This
# method should not be used outside of Linguist itself and is subject to
# change or be removed.
#
# commit_id - the object ID of the commit whose tree instance we want.
#
# Returns the Rugged::Tree of the specified commit.
def get_tree(commit_id)
tree = @tree_map[commit_id]
return tree if tree
@tree_map[commit_id] = Rugged::Commit.lookup(@rugged, commit_id).tree
@tree_map[commit_id]
end
def method_missing(method_name, *args, &block)
@rugged.send(method_name, *args, &block)
end
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/strategy/xml.rb | lib/linguist/strategy/xml.rb | module Linguist
module Strategy
# Detects XML files based on root tag.
class XML
# Scope of the search for the root tag
# Number of lines to check at the beginning of the file
SEARCH_SCOPE = 2
# Public: Use the root tag to detect the XML blobs, only if no other
# candidates were previously identified.
#
# blob - An object that quacks like a blob.
# candidates - A list of candidate languages.
#
# Examples
#
# XML.call(FileBlob.new("path/to/file"))
#
# Returns the list of candidates if it wasn't empty, an array with the
# XML language as sole item if the root tag is detected, and an empty
# Array otherwise.
def self.call(blob, candidates = [])
return candidates if candidates.any?
header = blob.first_lines(SEARCH_SCOPE).join("\n")
/<?xml version=/.match(header) ? [Language["XML"]] : []
end
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/strategy/modeline.rb | lib/linguist/strategy/modeline.rb | module Linguist
module Strategy
class Modeline
EMACS_MODELINE = %r[
(?-m)
# Opening delimiter
-\*-
(?:
# Short form: `-*- ruby -*-`
[ \t]*
(?=
[^:;\s]+ # Name of mode
[ \t]* # Optional whitespace
-\*- # Closing delimiter
)
|
# Longer form: `-*- foo:bar; mode: ruby; -*-`
(?:
.*?[ \t;] # Preceding variables: `-*- foo:bar bar:baz;`
|
(?<=-\*-) # Not preceded by anything: `-*-mode:ruby-*-`
)
# Explicitly-named variable: `mode: ruby` or `mode : ruby`
[ \t]* mode [ \t]* : [ \t]*
)
# Name of major-mode, which corresponds to syntax or filetype
([^:;\s]+)
# Ensure the name is terminated correctly
(?=
# Followed by semicolon or whitespace
[ \t;]
|
# Touching the ending sequence: `ruby-*-`
(?<![-*]) # Don't allow stuff like `ruby--*-` to match; it'll invalidate the mode
-\*- # Emacs has no problems reading `ruby --*-`, however.
)
# If we've gotten this far, it means the modeline is valid.
# We gleefully skip past everything up until reaching "-*-"
.*?
# Closing delimiter
-\*-
]xi
# NOTE: When changing this regex, be sure to keep the Vim Help heuristic updated too (#5347)
VIM_MODELINE = %r[
(?-m)
# Start of modeline (syntax documented in E520)
(?:
# `vi:`, `vim:` or `Vim:`
(?:^|[ \t]) (?:vi|Vi(?=m))
# Check if specific Vim version(s) are requested (won't work in vi/ex)
(?:
# Versioned modeline. `vim<700:` targets Vim versions older than 7.0
m
[<=>]? # If comparison operator is omitted, *only* this version is targeted
[0-9]+ # Version argument = (MINOR_VERSION_NUMBER * 100) + MINOR_VERSION_NUMBER
|
# Unversioned modeline. `vim:` targets any version of Vim.
m
)?
|
# `ex:`, which requires leading whitespace to avoid matching stuff like "lex:"
[ \t] ex
)
# If the option-list begins with `set ` or `se `, it indicates an alternative
# modeline syntax partly-compatible with older versions of Vi. Here, the colon
# serves as a terminator for an option sequence, delimited by whitespace.
(?=
# So we have to ensure the modeline ends with a colon
: (?=[ \t]* set? [ \t] [^\r\n:]+ :) |
# Otherwise, it isn't valid syntax and should be ignored
: (?![ \t]* set? [ \t])
)
# Possible (unrelated) `option=value` pairs to skip past
(?:
# Option separator, either
(?:
# 1. A colon (possibly surrounded by whitespace)
[ \t]* : [ \t]* # vim: noai : ft=sh:noexpandtab
|
# 2. At least one (horizontal) whitespace character
[ \t] # vim: noai ft=sh noexpandtab
)
# Option's name. All recognised Vim options have an alphanumeric form.
\w*
# Possible value. Not every option takes an argument.
(?:
# Whitespace between name and value is allowed: `vim: ft =sh`
[ \t]*=
# Option's value. Might be blank; `vim: ft= ` means "use no filetype".
(?:
[^\\\s] # Beware of escaped characters: titlestring=\ ft=sh
| # will be read by Vim as { titlestring: " ft=sh" }.
\\.
)*
)?
)*
# The actual filetype declaration
[ \t:] (?:filetype|ft|syntax) [ \t]*=
# Language's name
(\w+)
# Ensure it's followed by a legal separator (including EOL)
(?=$|\s|:)
]x
MODELINES = [EMACS_MODELINE, VIM_MODELINE]
# Scope of the search for modelines
# Number of lines to check at the beginning and at the end of the file
SEARCH_SCOPE = 5
# Public: Detects language based on Vim and Emacs modelines
#
# blob - An object that quacks like a blob.
#
# Examples
#
# Modeline.call(FileBlob.new("path/to/file"))
#
# Returns an Array with one Language if the blob has a Vim or Emacs modeline
# that matches a Language name or alias. Returns an empty array if no match.
def self.call(blob, _ = nil)
return [] if blob.symlink?
header = blob.first_lines(SEARCH_SCOPE).join("\n")
# Return early for Vimball files as their modeline will not reflect their filetype.
return [] if header.include?("UseVimball")
footer = blob.last_lines(SEARCH_SCOPE).join("\n")
Array(Language.find_by_alias(modeline(header + footer)))
end
# Public: Get the modeline from the first n-lines of the file
#
# Returns a String or nil
def self.modeline(data)
match = MODELINES.map { |regex| data.match(regex) }.reject(&:nil?).first
match[1] if match
end
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/strategy/manpage.rb | lib/linguist/strategy/manpage.rb | module Linguist
module Strategy
# Detects man pages based on numeric file extensions with group suffixes.
class Manpage
# Public: RegExp for matching conventional manpage extensions
#
# This is the same expression as that used by `github/markup`
MANPAGE_EXTS = /\.(?:[1-9](?![0-9])[a-z_0-9]*|0p|n|man|mdoc)(?:\.in)?$/i
# Public: Use the file extension to match a possible man page,
# only if no other candidates were previously identified.
#
# blob - An object that quacks like a blob.
# candidates - A list of candidate languages.
#
# Examples
#
# Manpage.call(FileBlob.new("path/to/file"))
#
# Returns:
# 1. The list of candidates if it wasn't empty
# 2. An array of ["Roff", "Roff Manpage"] if the file's
# extension matches a valid-looking man(1) section
# 3. An empty Array for anything else
#
def self.call(blob, candidates = [])
return candidates if candidates.any?
if blob.name =~ MANPAGE_EXTS
return [
Language["Roff Manpage"],
Language["Roff"],
# Language["Text"] TODO: Uncomment once #4258 gets merged
];
end
[]
end
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/strategy/filename.rb | lib/linguist/strategy/filename.rb | module Linguist
module Strategy
# Detects language based on filename
class Filename
# Public: Use the filename to detect the blob's language.
#
# blob - An object that quacks like a blob.
# candidates - A list of candidate languages.
#
# Examples
#
# Filename.call(FileBlob.new("path/to/file"))
#
# Returns an array of languages with a associated blob's filename.
# Selected languages must be in the candidate list, except if it's empty,
# in which case any language is a valid candidate.
def self.call(blob, candidates)
name = blob.name.to_s
languages = Language.find_by_filename(name)
candidates.any? ? candidates & languages : languages
end
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
github-linguist/linguist | https://github.com/github-linguist/linguist/blob/a7e40d31e271f6747fa1234df37b5fff3e6e2406/lib/linguist/strategy/extension.rb | lib/linguist/strategy/extension.rb | require 'yaml'
module Linguist
module Strategy
# Detects language based on extension
class Extension
# Public: Use the file extension to detect the blob's language.
#
# blob - An object that quacks like a blob.
# candidates - A list of candidate languages.
#
# Examples
#
# Extension.call(FileBlob.new("path/to/file"))
#
# Returns an array of languages associated with a blob's file extension.
# Selected languages must be in the candidate list, except if it's empty,
# in which case any language is a valid candidate.
def self.call(blob, candidates)
return candidates if generic? blob.name.to_s
languages = Language.find_by_extension(blob.name.to_s)
candidates.any? ? candidates & languages : languages
end
# Public: Return true if filename uses a generic extension.
def self.generic?(filename)
self.load
@generic.any? { |ext| filename.downcase.end_with? ext }
end
@generic = []
# Internal: Load the contents of `generic.yml`
def self.load()
return if @generic.any?
data = YAML.load_file(File.expand_path("../../generic.yml", __FILE__))
@generic = data['extensions']
end
end
end
end
| ruby | MIT | a7e40d31e271f6747fa1234df37b5fff3e6e2406 | 2026-01-04T15:37:32.851738Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/spec/clear_spec.rb | spec/clear_spec.rb | # frozen_string_literal: true
require "spec_helper"
RSpec.describe "Clear", type: :feature do
it "clears all messages" do
# Delivering three emails ..
deliver_example("plainmail")
deliver_example("plainmail")
deliver_example("plainmail")
# .. should display three emails
expect(page).to have_selector("#messages table tbody tr", text: "Plain mail", count: 3)
# Clicking Clear but cancelling ..
dismiss_confirm do
click_on "Clear"
end
# .. should still display three emails
expect(page).to have_selector("#messages table tbody tr", text: "Plain mail", count: 3)
# Clicking clear and confirming ..
accept_confirm "Are you sure you want to clear all messages?" do
click_on "Clear"
end
# .. should display no emails
expect(page).not_to have_selector("#messages table tbody tr")
end
end
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/spec/quit_spec.rb | spec/quit_spec.rb | # frozen_string_literal: true
require "spec_helper"
RSpec.describe "Quit", type: :feature do
it "quits cleanly via the Quit button" do
# Quitting and cancelling ..
dismiss_confirm do
click_on "Quit"
end
# .. should not exit the process
expect { Process.kill(0, @pid) }.not_to raise_error
# Reload the page to be sure
visit "/"
wait.until { page.evaluate_script("MailCatcher.websocket.readyState") == 1 rescue false }
# Quitting and confirming ..
accept_confirm "Are you sure you want to quit?" do
click_on "Quit"
end
# .. should exit the process ..
_, status = Process.wait2(@pid)
expect(status).to be_exited
expect(status).to be_success
# .. and navigate to the mailcatcher website
expect(page).to have_current_path "https://mailcatcher.me"
end
it "quits cleanly on Ctrl+C" do
# Sending a SIGINT (Ctrl+C) ...
Process.kill(:SIGINT, @pid)
# .. should cause the process to exit cleanly
_, status = Process.wait2(@pid)
expect(status).to be_exited
expect(status).to be_success
end
end
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/spec/delivery_spec.rb | spec/delivery_spec.rb | # frozen_string_literal: true
require "spec_helper"
RSpec.describe MailCatcher, type: :feature do
def messages_element
page.find("#messages")
end
def message_row_element
messages_element.find(:xpath, ".//table/tbody/tr[1]")
end
def message_from_element
message_row_element.find(:xpath, ".//td[1]")
end
def message_to_element
message_row_element.find(:xpath, ".//td[2]")
end
def message_subject_element
message_row_element.find(:xpath, ".//td[3]")
end
def message_received_element
message_row_element.find(:xpath, ".//td[4]")
end
def html_tab_element
page.find("#message header .format.html a")
end
def plain_tab_element
page.find("#message header .format.plain a")
end
def source_tab_element
page.find("#message header .format.source a")
end
def attachment_header_element
page.find("#message header .metadata dt.attachments")
end
def attachment_contents_element
page.find("#message header .metadata dd.attachments")
end
def first_attachment_element
attachment_contents_element.find("ul li:first-of-type a")
end
def body_element
page.find("body")
end
it "catches and displays a plain text message as plain text and source" do
deliver_example("plainmail")
# Do not reload, make sure that the message appears via websockets
expect(page).to have_selector("#messages table tbody tr:first-of-type", text: "Plain mail")
expect(message_from_element).to have_text(DEFAULT_FROM)
expect(message_to_element).to have_text(DEFAULT_TO)
expect(message_subject_element).to have_text("Plain mail")
expect(Time.parse(message_received_element.text)).to be <= Time.now + 5
message_row_element.click
expect(source_tab_element).to be_visible
expect(plain_tab_element).to be_visible
expect(page).to have_no_selector("#message header .format.html a")
plain_tab_element.click
within_frame do
expect(body_element).to have_no_text("Subject: Plain mail")
expect(body_element).to have_text("Here's some text")
end
source_tab_element.click
within_frame do
expect(body_element.text).to include("Subject: Plain mail")
expect(body_element.text).to include("Here's some text")
end
end
it "catches and displays an html message as html and source" do
deliver_example("htmlmail")
# Do not reload, make sure that the message appears via websockets
expect(page).to have_selector("#messages table tbody tr:first-of-type", text: "Test HTML Mail")
expect(message_from_element).to have_text(DEFAULT_FROM)
expect(message_to_element).to have_text(DEFAULT_TO)
expect(message_subject_element).to have_text("Test HTML Mail")
expect(Time.parse(message_received_element.text)).to be <= Time.now + 5
message_row_element.click
expect(source_tab_element).to be_visible
expect(page).to have_no_selector("#message header .format.plain a")
expect(html_tab_element).to be_visible
html_tab_element.click
within_frame do
expect(page).to have_text("Yo, you slimey scoundrel.")
expect(page).to have_no_text("Content-Type: text/html")
expect(page).to have_no_text("Yo, you <em>slimey scoundrel</em>.")
end
source_tab_element.click
within_frame do
expect(page).to have_no_text("Yo, you slimey scoundrel.")
expect(page).to have_text("Content-Type: text/html")
expect(page).to have_text("Yo, you <em>slimey scoundrel</em>.")
end
end
it "catches and displays a multipart message as text, html and source" do
deliver_example("multipartmail")
# Do not reload, make sure that the message appears via websockets
expect(page).to have_selector("#messages table tbody tr:first-of-type", text: "Test Multipart Mail")
expect(message_from_element).to have_text(DEFAULT_FROM)
expect(message_to_element).to have_text(DEFAULT_TO)
expect(message_subject_element).to have_text("Test Multipart Mail")
expect(Time.parse(message_received_element.text)).to be <= Time.now + 5
message_row_element.click
expect(source_tab_element).to be_visible
expect(plain_tab_element).to be_visible
expect(html_tab_element).to be_visible
plain_tab_element.click
within_frame do
expect(page).to have_text "Plain text mail"
expect(page).to have_no_text "HTML mail"
expect(page).to have_no_text "Content-Type: multipart/alternative; boundary=BOUNDARY--198849662"
end
html_tab_element.click
within_frame do
expect(page).to have_no_text "Plain text mail"
expect(page).to have_text "HTML mail"
expect(page).to have_no_text "Content-Type: multipart/alternative; boundary=BOUNDARY--198849662"
end
source_tab_element.click
within_frame do
expect(page).to have_text "Content-Type: multipart/alternative; boundary=BOUNDARY--198849662"
expect(page).to have_text "Plain text mail"
expect(page).to have_text "<em>HTML</em> mail"
end
end
it "catches and displays a multipart UTF8 message as text, html and source" do
deliver_example("multipartmail-with-utf8")
# Do not reload, make sure that the message appears via websockets
expect(page).to have_selector("#messages table tbody tr:first-of-type", text: "Test Multipart UTF8 Mail")
expect(message_from_element).to have_text(DEFAULT_FROM)
expect(message_to_element).to have_text(DEFAULT_TO)
expect(message_subject_element).to have_text("Test Multipart UTF8 Mail")
expect(Time.parse(message_received_element.text)).to be <= Time.now + 5
message_row_element.click
expect(source_tab_element).to be_visible
expect(plain_tab_element).to be_visible
expect(html_tab_element).to be_visible
plain_tab_element.click
within_frame do
expect(page).to have_text "Plain text mail"
expect(page).to have_no_text "© HTML mail"
expect(page).to have_no_text "Content-Type: multipart/alternative; boundary=BOUNDARY--198849662"
end
html_tab_element.click
within_frame do
expect(page).to have_no_text "Plain text mail"
expect(page).to have_text "© HTML mail"
expect(page).to have_no_text "Content-Type: multipart/alternative; boundary=BOUNDARY--198849662"
end
source_tab_element.click
within_frame do
expect(page).to have_text "Content-Type: multipart/alternative; boundary=BOUNDARY--198849662"
expect(page).to have_text "Plain text mail"
expect(page).to have_text "<em>© HTML</em> mail"
end
end
it "catches and displays an unknown message as source" do
deliver_example("unknownmail")
# Do not reload, make sure that the message appears via websockets
skip
end
it "catches and displays a message with multipart attachments" do
deliver_example("attachmail")
# Do not reload, make sure that the message appears via websockets
expect(page).to have_selector("#messages table tbody tr:first-of-type", text: "Test Attachment Mail")
expect(message_from_element).to have_text(DEFAULT_FROM)
expect(message_to_element).to have_text(DEFAULT_TO)
expect(message_subject_element).to have_text("Test Attachment Mail")
expect(Time.parse(message_received_element.text)).to be <= Time.now + 5
message_row_element.click
expect(source_tab_element).to be_visible
expect(plain_tab_element).to be_visible
expect(attachment_header_element).to be_visible
plain_tab_element.click
within_frame do
expect(page).to have_text "This is plain text"
end
expect(first_attachment_element).to be_visible
expect(first_attachment_element).to have_text("attachment")
# Downloading via the browser is hard, so just grab from the URI directly
expect(Net::HTTP.get(URI.join(Capybara.app_host, first_attachment_element[:href]))).to eql("Hello, I am an attachment!\r\n")
source_tab_element.click
within_frame do
expect(page).to have_text "Content-Type: multipart/mixed"
expect(page).to have_text "This is plain text"
expect(page).to have_text "Content-Disposition: attachment"
# Too hard to add expectations on the transfer encoded attachment contents
end
end
it "doesn't choke on messages containing dots" do
deliver_example("dotmail")
# Do not reload, make sure that the message appears via websockets
skip
end
it "doesn't choke on messages containing quoted printables" do
deliver_example("quoted_printable_htmlmail")
# Do not reload, make sure that the message appears via websockets
skip
end
end
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/spec/command_spec.rb | spec/command_spec.rb | require "spec_helper"
RSpec.describe "mailcatcher command" do
context "--version" do
it "shows a version then exits" do
expect { system %(mailcatcher --version) }
.to output(a_string_including("MailCatcher v#{MailCatcher::VERSION}"))
.to_stdout_from_any_process
end
end
context "--help" do
it "shows help then exits" do
expect { system %(mailcatcher --help) }
.to output(a_string_including("MailCatcher v#{MailCatcher::VERSION}") & a_string_including("--help") & a_string_including("Display this help information"))
.to_stdout_from_any_process
end
end
end
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/spec/spec_helper.rb | spec/spec_helper.rb | # frozen_string_literal: true
ENV["MAILCATCHER_ENV"] ||= "test"
require "capybara/rspec"
require "capybara-screenshot/rspec"
require "selenium/webdriver"
require "net/smtp"
require "socket"
require "mail_catcher"
DEFAULT_FROM = "from@example.com"
DEFAULT_TO = "to@example.com"
LOCALHOST = "127.0.0.1"
SMTP_PORT = 20025
HTTP_PORT = 20080
# Use headless chrome by default
Capybara.default_driver = :selenium
Capybara.register_driver :selenium do |app|
opts = Selenium::WebDriver::Chrome::Options.new
opts.add_argument('disable-gpu')
opts.add_argument('force-device-scale-factor=1')
opts.add_argument('window-size=1400,900')
# Use NO_HEADLESS to open real chrome when debugging tests
unless ENV["NO_HEADLESS"]
opts.add_argument('headless=new')
end
Capybara::Selenium::Driver.new app, browser: :chrome,
service: Selenium::WebDriver::Service.chrome(log: File.expand_path("../tmp/chromedriver.log", __dir__)),
options: opts
end
Capybara.configure do |config|
# Don't start a rack server, connect to mailcatcher process
config.run_server = false
# Give a little more leeway for slow compute in CI
config.default_max_wait_time = 10 if ENV["CI"]
# Save into tmp directory
config.save_path = File.expand_path("../tmp/capybara", __dir__)
end
# Tell Capybara to talk to mailcatcher
Capybara.app_host = "http://#{LOCALHOST}:#{HTTP_PORT}"
RSpec.configure do |config|
# Helpers for delivering example email
def deliver(message, options={})
options = {:from => DEFAULT_FROM, :to => DEFAULT_TO}.merge(options)
Net::SMTP.start(LOCALHOST, SMTP_PORT) do |smtp|
smtp.send_message message, options[:from], options[:to]
end
end
def read_example(name)
File.read(File.expand_path("../../examples/#{name}", __FILE__))
end
def deliver_example(name, options={})
deliver(read_example(name), options)
end
# Teach RSpec to gather console errors from chrome when there are failures
config.after(:each, type: :feature) do |example|
# Did the example fail?
next unless example.exception # "failed"
# Do we have a browser?
next unless page.driver.browser
# Retrieve console logs if the browser/driver supports it
logs = page.driver.browser.manage.logs.get(:browser) rescue []
# Anything to report?
next if logs.empty?
# Add the log messages so they appear in failures
# This might already be a string, an array, or nothing
# Array(nil) => [], Array("a") => ["a"], Array(["a", "b"]) => ["a", "b"]
lines = example.metadata[:extra_failure_lines] = Array(example.metadata[:extra_failure_lines])
# Add a gap if there's anything there and it doesn't end with an empty line
lines << "" if lines.last
lines << "Browser console errors:"
lines << JSON.pretty_generate(logs.map { |log| log.as_json })
end
def wait
Selenium::WebDriver::Wait.new
end
config.before :each, type: :feature do
# Start MailCatcher
@pid = spawn "bundle", "exec", "mailcatcher", "--foreground", "--smtp-port", SMTP_PORT.to_s, "--http-port", HTTP_PORT.to_s
# Wait for it to boot
begin
Socket.tcp(LOCALHOST, SMTP_PORT, connect_timeout: 1) { |s| s.close }
Socket.tcp(LOCALHOST, HTTP_PORT, connect_timeout: 1) { |s| s.close }
rescue Errno::ECONNREFUSED, Errno::ETIMEDOUT
retry
end
# Open the web interface
visit "/"
# Wait for the websocket to be available to avoid race conditions
wait.until { page.evaluate_script("MailCatcher.websocket.readyState") == 1 rescue false }
end
config.after :each, type: :feature do
# Quit MailCatcher
Process.kill("TERM", @pid)
Process.wait
rescue Errno::ESRCH
# It's already gone
end
end
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/lib/mailcatcher.rb | lib/mailcatcher.rb | # frozen_string_literal: true
require "mail_catcher"
Mailcatcher = MailCatcher
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/lib/mail_catcher.rb | lib/mail_catcher.rb | # frozen_string_literal: true
require "open3"
require "optparse"
require "rbconfig"
require "eventmachine"
require "thin"
module EventMachine
# Monkey patch fix for 10deb4
# See https://github.com/eventmachine/eventmachine/issues/569
def self.reactor_running?
(@reactor_running || false)
end
end
require "mail_catcher/version"
module MailCatcher extend self
autoload :Bus, "mail_catcher/bus"
autoload :Mail, "mail_catcher/mail"
autoload :Smtp, "mail_catcher/smtp"
autoload :Web, "mail_catcher/web"
def env
ENV.fetch("MAILCATCHER_ENV", "production")
end
def development?
env == "development"
end
def which?(command)
ENV["PATH"].split(File::PATH_SEPARATOR).any? do |directory|
File.executable?(File.join(directory, command.to_s))
end
end
def windows?
RbConfig::CONFIG["host_os"].match?(/mswin|mingw/)
end
def browsable?
windows? or which? "open"
end
def browse url
if windows?
system "start", "/b", url
elsif which? "open"
system "open", url
end
end
def log_exception(message, context, exception)
gems_paths = (Gem.path | [Gem.default_dir]).map { |path| Regexp.escape(path) }
gems_regexp = %r{(?:#{gems_paths.join("|")})/gems/([^/]+)-([\w.]+)/(.*)}
gems_replace = '\1 (\2) \3'
puts "*** #{message}: #{context.inspect}"
puts " Exception: #{exception}"
puts " Backtrace:", *exception.backtrace.map { |line| " #{line.sub(gems_regexp, gems_replace)}" }
puts " Please submit this as an issue at https://github.com/sj26/mailcatcher/issues"
end
@@defaults = {
:smtp_ip => "127.0.0.1",
:smtp_port => "1025",
:http_ip => "127.0.0.1",
:http_port => "1080",
:http_path => "/",
:messages_limit => nil,
:verbose => false,
:daemon => !windows?,
:browse => false,
:quit => true,
}
def options
@@options
end
def quittable?
options[:quit]
end
def parse! arguments=ARGV, defaults=@defaults
@@defaults.dup.tap do |options|
OptionParser.new do |parser|
parser.banner = "Usage: mailcatcher [options]"
parser.version = VERSION
parser.separator ""
parser.separator "MailCatcher v#{VERSION}"
parser.separator ""
parser.on("--ip IP", "Set the ip address of both servers") do |ip|
options[:smtp_ip] = options[:http_ip] = ip
end
parser.on("--smtp-ip IP", "Set the ip address of the smtp server") do |ip|
options[:smtp_ip] = ip
end
parser.on("--smtp-port PORT", Integer, "Set the port of the smtp server") do |port|
options[:smtp_port] = port
end
parser.on("--http-ip IP", "Set the ip address of the http server") do |ip|
options[:http_ip] = ip
end
parser.on("--http-port PORT", Integer, "Set the port address of the http server") do |port|
options[:http_port] = port
end
parser.on("--messages-limit COUNT", Integer, "Only keep up to COUNT most recent messages") do |count|
options[:messages_limit] = count
end
parser.on("--http-path PATH", String, "Add a prefix to all HTTP paths") do |path|
clean_path = Rack::Utils.clean_path_info("/#{path}")
options[:http_path] = clean_path
end
parser.on("--no-quit", "Don't allow quitting the process") do
options[:quit] = false
end
unless windows?
parser.on("-f", "--foreground", "Run in the foreground") do
options[:daemon] = false
end
end
if browsable?
parser.on("-b", "--browse", "Open web browser") do
options[:browse] = true
end
end
parser.on("-v", "--verbose", "Be more verbose") do
options[:verbose] = true
end
parser.on_tail("-h", "--help", "Display this help information") do
puts parser
exit
end
parser.on_tail("--version", "Display the current version") do
puts "MailCatcher v#{VERSION}"
exit
end
end.parse!
end
end
def run! options=nil
# If we are passed options, fill in the blanks
options &&= @@defaults.merge options
# Otherwise, parse them from ARGV
options ||= parse!
# Stash them away for later
@@options = options
# If we're running in the foreground sync the output.
unless options[:daemon]
$stdout.sync = $stderr.sync = true
end
puts "Starting MailCatcher v#{VERSION}"
Thin::Logging.debug = development?
Thin::Logging.silent = !development?
# One EventMachine loop...
EventMachine.run do
# Set up an SMTP server to run within EventMachine
rescue_port options[:smtp_port] do
EventMachine.start_server options[:smtp_ip], options[:smtp_port], Smtp
puts "==> #{smtp_url}"
end
# Let Thin set itself up inside our EventMachine loop
# Faye connections are hijacked but continue to be supervised by thin
rescue_port options[:http_port] do
Thin::Server.start(options[:http_ip], options[:http_port], Web, signals: false)
puts "==> #{http_url}"
end
# Make sure we quit nicely when asked
# We need to handle outside the trap context, hence the timer
trap("INT") { EM.add_timer(0) { quit! } }
trap("TERM") { EM.add_timer(0) { quit! } }
trap("QUIT") { EM.add_timer(0) { quit! } } unless windows?
# Open the web browser before detaching console
if options[:browse]
EventMachine.next_tick do
browse http_url
end
end
# Daemonize, if we should, but only after the servers have started.
if options[:daemon]
EventMachine.next_tick do
if quittable?
puts "*** MailCatcher runs as a daemon by default. Go to the web interface to quit."
else
puts "*** MailCatcher is now running as a daemon that cannot be quit."
end
Process.daemon
end
end
end
end
def quit!
MailCatcher::Bus.push(type: "quit")
EventMachine.next_tick { EventMachine.stop_event_loop }
end
protected
def smtp_url
"smtp://#{@@options[:smtp_ip]}:#{@@options[:smtp_port]}"
end
def http_url
"http://#{@@options[:http_ip]}:#{@@options[:http_port]}#{@@options[:http_path]}".chomp("/")
end
def rescue_port port
begin
yield
# XXX: EventMachine only spits out RuntimeError with a string description
rescue RuntimeError
if $!.to_s =~ /\bno acceptor\b/
puts "~~> ERROR: Something's using port #{port}. Are you already running MailCatcher?"
puts "==> #{smtp_url}"
puts "==> #{http_url}"
exit -1
else
raise
end
end
end
end
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/lib/mail_catcher/version.rb | lib/mail_catcher/version.rb | # frozen_string_literal: true
module MailCatcher
VERSION = "0.10.0"
end
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/lib/mail_catcher/smtp.rb | lib/mail_catcher/smtp.rb | # frozen_string_literal: true
require "eventmachine"
require "mail_catcher/mail"
class MailCatcher::Smtp < EventMachine::Protocols::SmtpServer
# We override EM's mail from processing to allow multiple mail-from commands
# per [RFC 2821](https://tools.ietf.org/html/rfc2821#section-4.1.1.2)
def process_mail_from sender
if @state.include? :mail_from
@state -= [:mail_from, :rcpt, :data]
receive_reset
end
super
end
def current_message
@current_message ||= {}
end
def receive_reset
@current_message = nil
true
end
def receive_sender(sender)
# EventMachine SMTP advertises size extensions [https://tools.ietf.org/html/rfc1870]
# so strip potential " SIZE=..." suffixes from senders
sender = $` if sender =~ / SIZE=\d+\z/
current_message[:sender] = sender
true
end
def receive_recipient(recipient)
current_message[:recipients] ||= []
current_message[:recipients] << recipient
true
end
def receive_data_chunk(lines)
current_message[:source] ||= +""
lines.each do |line|
current_message[:source] << line << "\r\n"
end
true
end
def receive_message
MailCatcher::Mail.add_message current_message
MailCatcher::Mail.delete_older_messages!
puts "==> SMTP: Received message from '#{current_message[:sender]}' (#{current_message[:source].length} bytes)"
true
rescue => exception
MailCatcher.log_exception("Error receiving message", @current_message, exception)
false
ensure
@current_message = nil
end
end
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/lib/mail_catcher/web.rb | lib/mail_catcher/web.rb | # frozen_string_literal: true
require "rack/builder"
require "mail_catcher/web/application"
module MailCatcher
module Web extend self
def app
@@app ||= Rack::Builder.new do
map(MailCatcher.options[:http_path]) do
if MailCatcher.development?
require "mail_catcher/web/assets"
map("/assets") { run Assets }
end
run Application
end
# This should only affect when http_path is anything but "/" above
run lambda { |env| [302, {"Location" => MailCatcher.options[:http_path]}, []] }
end
end
def call(env)
app.call(env)
end
end
end
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/lib/mail_catcher/mail.rb | lib/mail_catcher/mail.rb | # frozen_string_literal: true
require "eventmachine"
require "json"
require "mail"
require "sqlite3"
module MailCatcher::Mail extend self
def db
@__db ||= begin
SQLite3::Database.new(":memory:", :type_translation => true).tap do |db|
db.execute(<<-SQL)
CREATE TABLE message (
id INTEGER PRIMARY KEY ASC,
sender TEXT,
recipients TEXT,
subject TEXT,
source BLOB,
size TEXT,
type TEXT,
created_at DATETIME DEFAULT CURRENT_DATETIME
)
SQL
db.execute(<<-SQL)
CREATE TABLE message_part (
id INTEGER PRIMARY KEY ASC,
message_id INTEGER NOT NULL,
cid TEXT,
type TEXT,
is_attachment INTEGER,
filename TEXT,
charset TEXT,
body BLOB,
size INTEGER,
created_at DATETIME DEFAULT CURRENT_DATETIME,
FOREIGN KEY (message_id) REFERENCES message (id) ON DELETE CASCADE
)
SQL
db.execute("PRAGMA foreign_keys = ON")
end
end
end
def add_message(message)
@add_message_query ||= db.prepare("INSERT INTO message (sender, recipients, subject, source, type, size, created_at) VALUES (?, ?, ?, ?, ?, ?, datetime('now'))")
mail = Mail.new(message[:source])
@add_message_query.execute(message[:sender], JSON.generate(message[:recipients]), mail.subject, message[:source], mail.mime_type || "text/plain", message[:source].length)
message_id = db.last_insert_row_id
parts = mail.all_parts
parts = [mail] if parts.empty?
parts.each do |part|
body = part.body.to_s
# Only parts have CIDs, not mail
cid = part.cid if part.respond_to? :cid
add_message_part(message_id, cid, part.mime_type || "text/plain", part.attachment? ? 1 : 0, part.filename, part.charset, body, body.length)
end
EventMachine.next_tick do
message = MailCatcher::Mail.message message_id
MailCatcher::Bus.push(type: "add", message: message)
end
end
def add_message_part(*args)
@add_message_part_query ||= db.prepare "INSERT INTO message_part (message_id, cid, type, is_attachment, filename, charset, body, size, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, datetime('now'))"
@add_message_part_query.execute(*args)
end
def latest_created_at
@latest_created_at_query ||= db.prepare "SELECT created_at FROM message ORDER BY created_at DESC LIMIT 1"
@latest_created_at_query.execute.next
end
def messages
@messages_query ||= db.prepare "SELECT id, sender, recipients, subject, size, created_at FROM message ORDER BY created_at, id ASC"
@messages_query.execute.map do |row|
Hash[row.fields.zip(row)].tap do |message|
message["recipients"] &&= JSON.parse(message["recipients"])
end
end
end
def message(id)
@message_query ||= db.prepare "SELECT id, sender, recipients, subject, size, type, created_at FROM message WHERE id = ? LIMIT 1"
row = @message_query.execute(id).next
row && Hash[row.fields.zip(row)].tap do |message|
message["recipients"] &&= JSON.parse(message["recipients"])
end
end
def message_source(id)
@message_source_query ||= db.prepare "SELECT source FROM message WHERE id = ? LIMIT 1"
row = @message_source_query.execute(id).next
row && row.first
end
def message_has_html?(id)
@message_has_html_query ||= db.prepare "SELECT 1 FROM message_part WHERE message_id = ? AND is_attachment = 0 AND type IN ('application/xhtml+xml', 'text/html') LIMIT 1"
(!!@message_has_html_query.execute(id).next) || ["text/html", "application/xhtml+xml"].include?(message(id)["type"])
end
def message_has_plain?(id)
@message_has_plain_query ||= db.prepare "SELECT 1 FROM message_part WHERE message_id = ? AND is_attachment = 0 AND type = 'text/plain' LIMIT 1"
(!!@message_has_plain_query.execute(id).next) || message(id)["type"] == "text/plain"
end
def message_parts(id)
@message_parts_query ||= db.prepare "SELECT cid, type, filename, size FROM message_part WHERE message_id = ? ORDER BY filename ASC"
@message_parts_query.execute(id).map do |row|
Hash[row.fields.zip(row)]
end
end
def message_attachments(id)
@message_parts_query ||= db.prepare "SELECT cid, type, filename, size FROM message_part WHERE message_id = ? AND is_attachment = 1 ORDER BY filename ASC"
@message_parts_query.execute(id).map do |row|
Hash[row.fields.zip(row)]
end
end
def message_part(message_id, part_id)
@message_part_query ||= db.prepare "SELECT * FROM message_part WHERE message_id = ? AND id = ? LIMIT 1"
row = @message_part_query.execute(message_id, part_id).next
row && Hash[row.fields.zip(row)]
end
def message_part_type(message_id, part_type)
@message_part_type_query ||= db.prepare "SELECT * FROM message_part WHERE message_id = ? AND type = ? AND is_attachment = 0 LIMIT 1"
row = @message_part_type_query.execute(message_id, part_type).next
row && Hash[row.fields.zip(row)]
end
def message_part_html(message_id)
part = message_part_type(message_id, "text/html")
part ||= message_part_type(message_id, "application/xhtml+xml")
part ||= begin
message = message(message_id)
message if message and ["text/html", "application/xhtml+xml"].include? message["type"]
end
end
def message_part_plain(message_id)
message_part_type message_id, "text/plain"
end
def message_part_cid(message_id, cid)
@message_part_cid_query ||= db.prepare "SELECT * FROM message_part WHERE message_id = ?"
@message_part_cid_query.execute(message_id).map do |row|
Hash[row.fields.zip(row)]
end.find do |part|
part["cid"] == cid
end
end
def delete!
@delete_all_messages_query ||= db.prepare "DELETE FROM message"
@delete_all_messages_query.execute
EventMachine.next_tick do
MailCatcher::Bus.push(type: "clear")
end
end
def delete_message!(message_id)
@delete_messages_query ||= db.prepare "DELETE FROM message WHERE id = ?"
@delete_messages_query.execute(message_id)
EventMachine.next_tick do
MailCatcher::Bus.push(type: "remove", id: message_id)
end
end
def delete_older_messages!(count = MailCatcher.options[:messages_limit])
return if count.nil?
@older_messages_query ||= db.prepare "SELECT id FROM message WHERE id NOT IN (SELECT id FROM message ORDER BY created_at DESC LIMIT ?)"
@older_messages_query.execute(count).map do |row|
Hash[row.fields.zip(row)]
end.each do |message|
delete_message!(message["id"])
end
end
end
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/lib/mail_catcher/bus.rb | lib/mail_catcher/bus.rb | # frozen_string_literal: true
require "eventmachine"
module MailCatcher
Bus = EventMachine::Channel.new
end
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/lib/mail_catcher/web/application.rb | lib/mail_catcher/web/application.rb | # frozen_string_literal: true
require "pathname"
require "net/http"
require "uri"
require "faye/websocket"
require "sinatra"
require "mail_catcher/bus"
require "mail_catcher/mail"
Faye::WebSocket.load_adapter("thin")
# Faye's adapter isn't smart enough to close websockets when thin is stopped,
# so we teach it to do so.
class Thin::Backends::Base
alias :thin_stop :stop
def stop
thin_stop
@connections.each_value do |connection|
if connection.socket_stream
connection.socket_stream.close_connection_after_writing
end
end
end
end
class Sinatra::Request
include Faye::WebSocket::Adapter
end
module MailCatcher
module Web
class Application < Sinatra::Base
set :environment, MailCatcher.env
set :prefix, MailCatcher.options[:http_path]
set :asset_prefix, File.join(prefix, "assets")
set :root, File.expand_path("#{__FILE__}/../../../..")
if development?
require "sprockets-helpers"
configure do
require "mail_catcher/web/assets"
Sprockets::Helpers.configure do |config|
config.environment = Assets
config.prefix = settings.asset_prefix
config.digest = false
config.public_path = public_folder
config.debug = true
end
end
helpers do
include Sprockets::Helpers
end
else
helpers do
def asset_path(filename)
File.join(settings.asset_prefix, filename)
end
end
end
get "/" do
erb :index
end
delete "/" do
if MailCatcher.quittable?
MailCatcher.quit!
status 204
else
status 403
end
end
get "/messages" do
if request.websocket?
bus_subscription = nil
ws = Faye::WebSocket.new(request.env)
ws.on(:open) do |_|
bus_subscription = MailCatcher::Bus.subscribe do |message|
begin
ws.send(JSON.generate(message))
rescue => exception
MailCatcher.log_exception("Error sending message through websocket", message, exception)
end
end
end
ws.on(:close) do |_|
MailCatcher::Bus.unsubscribe(bus_subscription) if bus_subscription
end
ws.rack_response
else
content_type :json
JSON.generate(Mail.messages)
end
end
delete "/messages" do
Mail.delete!
status 204
end
get "/messages/:id.json" do
id = params[:id].to_i
if message = Mail.message(id)
content_type :json
JSON.generate(message.merge({
"formats" => [
"source",
("html" if Mail.message_has_html? id),
("plain" if Mail.message_has_plain? id)
].compact,
"attachments" => Mail.message_attachments(id),
}))
else
not_found
end
end
get "/messages/:id.html" do
id = params[:id].to_i
if part = Mail.message_part_html(id)
content_type :html, :charset => (part["charset"] || "utf8")
body = part["body"]
# Rewrite body to link to embedded attachments served by cid
body.gsub! /cid:([^'"> ]+)/, "#{id}/parts/\\1"
body
else
not_found
end
end
get "/messages/:id.plain" do
id = params[:id].to_i
if part = Mail.message_part_plain(id)
content_type part["type"], :charset => (part["charset"] || "utf8")
part["body"]
else
not_found
end
end
get "/messages/:id.source" do
id = params[:id].to_i
if message_source = Mail.message_source(id)
content_type "text/plain"
message_source
else
not_found
end
end
get "/messages/:id.eml" do
id = params[:id].to_i
if message_source = Mail.message_source(id)
content_type "message/rfc822"
message_source
else
not_found
end
end
get "/messages/:id/parts/:cid" do
id = params[:id].to_i
if part = Mail.message_part_cid(id, params[:cid])
content_type part["type"], :charset => (part["charset"] || "utf8")
attachment part["filename"] if part["is_attachment"] == 1
body part["body"].to_s
else
not_found
end
end
delete "/messages/:id" do
id = params[:id].to_i
if Mail.message(id)
Mail.delete_message!(id)
status 204
else
not_found
end
end
not_found do
erb :"404"
end
end
end
end
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
sj26/mailcatcher | https://github.com/sj26/mailcatcher/blob/fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843/lib/mail_catcher/web/assets.rb | lib/mail_catcher/web/assets.rb | # frozen_string_literal: true
require "sprockets"
require "sprockets-sass"
require "compass"
module MailCatcher
module Web
Assets = Sprockets::Environment.new(File.expand_path("#{__FILE__}/../../../..")).tap do |sprockets|
Dir["#{sprockets.root}/{,vendor}/assets/*"].each do |path|
sprockets.append_path(path)
end
end
end
end
| ruby | MIT | fbe811a53aeabe75b6e728b87f7f4c8fbf4b4843 | 2026-01-04T15:40:11.381229Z | false |
kkuchta/css-only-chat | https://github.com/kkuchta/css-only-chat/blob/b5a2e3853803d0fb9b3f057b6f989a73e3f0e1b1/server.rb | server.rb | require 'redis'
require 'json'
require 'securerandom'
# Since some requests (intentionally) never complete, ctrl-c won't kill this
# server. Let's make sure it does.
Signal.trap(2) { exit }
# Misc redis keys
NEW_MESSAGE_CHANNEL = 'new_message_channel'.freeze
UPDATED_CLIENT_CHANNEL = 'updated_client_channel'.freeze
MESSAGE_LIST_KEY = 'message_list'.freeze
CLIENT_IDS_KEY = 'client_ids'.freeze
# Clear out any old messages when we boot up
redis = Redis.new(url: ENV['REDIS_URL'])
redis.del(MESSAGE_LIST_KEY)
redis.del(CLIENT_IDS_KEY)
class Server
def call(env)
request = Rack::Request.new(env)
response =
case request.path
when '/'
index
when '/style.css'
style
when %r{/img.*}
image(request.path)
when '/favicon.ico'
[404, {}, []]
end
response || [500, {}, ['oops']]
end
private
def redis
@redis ||= Redis.new(url: ENV['REDIS_URL'])
end
def index
# This endpoint streams forever. IndexStreamer implements an `each` that
# yields continuously (and blocks when there's nothing new to send)
[200, {}, IndexStreamer.new]
end
def style
[200, { 'Content-Type' => 'text/css'}, [File.read('style.css')]]
end
# Image names are `clientid_currentmessage_newbutton`, eg `bruce123_hellowor_l`
def decode_image_name(image_name)
client_id, current_message, new_letter = image_name.split('_')
{ client_id: client_id, current_message: current_message, new_letter: new_letter }
end
# Handle an image request. We don't actually serv any images - it's just a
# way for the client to send messages back to the server using the filename
# of the requested image.
def image(path)
image_name = path.split('/').last
button_press = decode_image_name(image_name)
puts "Decoded button_press to #{button_press}"
# `-` is our shorthand for a carriage return (needs to be a css-class-
# friendly character)
if button_press[:new_letter] == '-'
new_message = {
client_id: button_press[:client_id],
body: button_press[:current_message].split('-').last,
id: SecureRandom.uuid
}
# So we have a complete message now. Save it in the list of messages.
redis.lpush(MESSAGE_LIST_KEY, new_message.to_json)
# Let all clients know there's a new message to display
redis.publish(NEW_MESSAGE_CHANNEL, nil)
# Let the sending client know to update it's displayed "current message"
redis.publish(UPDATED_CLIENT_CHANNEL, {
client_id: button_press[:client_id],
new_string: button_press[:current_message] + button_press[:new_letter]
}.to_json)
else
# Got a new letter press. Tell the sending client to display an updated
# "current message."
redis.publish(UPDATED_CLIENT_CHANNEL, {
client_id: button_press[:client_id],
new_string: button_press[:current_message] + button_press[:new_letter]
}.to_json)
end
[200, {}, []]
end
end
# A class whose "each" method blocks while waiting for messages from redis. It
# yields new html to be streamed to a client and appended to the index.html
class IndexStreamer
def redis
@redis ||= Redis.new(url: ENV['REDIS_URL'])
end
def each(&each_block)
# Generate a random name to differentiate clients
# If a name already exists, the name is rerolled
begin
client_id = Faker::Name.first_name + rand(1000).to_s
end while redis.sismember(CLIENT_IDS_KEY, client_id)
redis.sadd(CLIENT_IDS_KEY, client_id)
puts "new client #{client_id}"
# Send the opening explanatory blurb and the initial onscreen keyboard.
each_block.call(intro_html(client_id))
each_block.call(keys_html('', client_id))
# Need a new redis connection here, since you can't make any requests to
# redis *after* a subscribe call on the same connection
Redis
.new(url: ENV['REDIS_URL'])
.subscribe(NEW_MESSAGE_CHANNEL, UPDATED_CLIENT_CHANNEL) do |on|
on.message do |channel, message|
message = JSON.parse(message) unless message.empty?
puts "#{client_id}: Just received message #{message} on channel #{channel}"
case channel
when NEW_MESSAGE_CHANNEL
each_block.call(messages_html)
when UPDATED_CLIENT_CHANNEL
puts "#{client_id}: got UPDATED_CLIENT_CHANNEL"
if message['client_id'] == client_id
puts "#{client_id}: it's for me. sending keys, #{message['new_string']}"
each_block.call(keys_html(message['new_string'], client_id))
end
end
end
end
# Should never really get here since the above stuff should block forever.
puts "#{client_id}: post-subscribe block?!"
end
def encode_image_name(client_id:, current_message:, new_letter:)
[client_id, current_message, new_letter].join('_')
end
def intro_html(client_id)
"<html><head><link rel='stylesheet' href='style.css'/></head><body>" +
"<h1>Welcome to CSS-only Chat!</h1>" +
"<p>This page uses no javascript whatsosever - only CSS and html. Blame @kkuchta for this.</p>" +
"<p>Your name is #{client_id}.</p>"
end
# The html that displays the list of previous messages (up to 100 of them)
def messages_html
messages = redis.lrange(MESSAGE_LIST_KEY, 0, 100)
puts "messages = #{messages}"
list_html = messages.map do |message|
message = JSON.parse(message)
"<p><b>#{message['client_id']}:</b> #{message['body']}</p>"
end.join
last_message = JSON.parse(messages[0])['id']
hide_previous_messages =
if messages.count >= 2
previous_last_message_id = JSON.parse(messages[1])['id']
previous_last_message_class = "messages_#{previous_last_message_id}"
"<style>.#{previous_last_message_class} { display: none; }</style>"
end
"<div class='messages messages_#{last_message}'>#{list_html}#{hide_previous_messages}</div>"
end
# The html that displays the keyboard keys. The keys, when ':active' (the css
# property of a button that's clicked), they'll get a background image assigned
# to them, which will only
def keys_html(previous_string, client_id)
previous_previous_string = previous_string[0..-2]
render_letter = ->(letter, label) {
image_name = encode_image_name(
client_id: client_id,
current_message: previous_string,
new_letter: letter
)
unique_class = 'insert_' + image_name
result = "<button class='letter_#{letter} #{unique_class}'>#{label}</button>"
result << "<style>.#{unique_class}:active { background-image: url('img/#{image_name}') }</style>"
# hide previous generation
unless previous_string == ''
previous_unique_class = 'insert_' + encode_image_name(
client_id: client_id,
current_message: previous_previous_string,
new_letter: letter
)
result << "<style>.#{previous_unique_class} { display: none; }</style>"
end
result
}
# Draw the keyboard
letters = ('a'..'z').to_a.map do |letter|
render_letter.call(letter, letter)
end.join(' ') + render_letter.call('-', 'submit')
clear_old_message = "<style>.message_#{previous_previous_string} { display: none }</style>"
message_content = previous_string.end_with?('-') ? '' : previous_string.split('-').last
message = "<div class='message_#{previous_string}'>Current Message: #{message_content || '...'}</div>"
"<div class='keys'>#{letters + clear_old_message + message}</div>"
end
end
| ruby | MIT | b5a2e3853803d0fb9b3f057b6f989a73e3f0e1b1 | 2026-01-04T15:40:18.447348Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/benchmark/index.rb | benchmark/index.rb | require "bundler/setup"
Bundler.require(:default)
require "active_record"
require "active_job"
require "benchmark"
require "active_support/notifications"
ActiveSupport::Notifications.subscribe "request.searchkick" do |*args|
event = ActiveSupport::Notifications::Event.new(*args)
# puts "Import: #{event.duration.round}ms"
end
# ActiveJob::Base.queue_adapter = :sidekiq
class SearchSerializer
def dump(object)
JSON.generate(object)
end
end
# Elasticsearch::API.settings[:serializer] = SearchSerializer.new
# OpenSearch::API.settings[:serializer] = SearchSerializer.new
Searchkick.redis = Redis.new
ActiveRecord.default_timezone = :utc
ActiveRecord::Base.time_zone_aware_attributes = true
ActiveRecord::Base.establish_connection adapter: "sqlite3", database: "/tmp/searchkick"
# ActiveRecord::Base.establish_connection "postgresql://localhost/searchkick_bench"
# ActiveRecord::Base.logger = Logger.new(STDOUT)
ActiveJob::Base.logger = nil
class Product < ActiveRecord::Base
searchkick batch_size: 1000
def search_data
{
name: name,
color: color,
store_id: store_id
}
end
end
if ENV["SETUP"]
total_docs = 100000
ActiveRecord::Schema.define do
create_table :products, force: :cascade do |t|
t.string :name
t.string :color
t.integer :store_id
end
end
records = []
total_docs.times do |i|
records << {
name: "Product #{i}",
color: ["red", "blue"].sample,
store_id: rand(10)
}
end
Product.insert_all(records)
puts "Imported"
end
result = nil
report = nil
stats = nil
Product.searchkick_index.delete rescue nil
GC.start
GC.disable
start_mem = GetProcessMem.new.mb
time =
Benchmark.realtime do
# result = RubyProf::Profile.profile do
# report = MemoryProfiler.report do
# stats = AllocationStats.trace do
reindex = Product.reindex #(async: true)
# p reindex
# end
# 60.times do |i|
# if reindex.is_a?(Hash)
# docs = Searchkick::Index.new(reindex[:index_name]).total_docs
# else
# docs = Product.searchkick_index.total_docs
# end
# puts "#{i}: #{docs}"
# if docs == total_docs
# break
# end
# p Searchkick.reindex_status(reindex[:index_name]) if reindex.is_a?(Hash)
# sleep(1)
# # Product.searchkick_index.refresh
# end
end
puts "Time: #{time.round(1)}s"
if result
printer = RubyProf::GraphPrinter.new(result)
printer.print(STDOUT, min_percent: 5)
end
if report
puts report.pretty_print
end
if stats
puts result.allocations(alias_paths: true).group_by(:sourcefile, :class).to_text
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/benchmark/search.rb | benchmark/search.rb | require "bundler/setup"
Bundler.require(:default)
require "active_record"
require "benchmark/ips"
ActiveRecord.default_timezone = :utc
ActiveRecord::Base.time_zone_aware_attributes = true
ActiveRecord::Base.establish_connection adapter: "sqlite3", database: "/tmp/searchkick"
class Product < ActiveRecord::Base
searchkick batch_size: 1000
def search_data
{
name: name,
color: color,
store_id: store_id
}
end
end
if ENV["SETUP"]
total_docs = 1000000
ActiveRecord::Schema.define do
create_table :products, force: :cascade do |t|
t.string :name
t.string :color
t.integer :store_id
end
end
records = []
total_docs.times do |i|
records << {
name: "Product #{i}",
color: ["red", "blue"].sample,
store_id: rand(10)
}
end
Product.insert_all(records)
puts "Imported"
Product.reindex
puts "Reindexed"
end
query = Product.search("product", fields: [:name], where: {color: "red", store_id: 5}, limit: 10000, load: false)
pp query.body.as_json
puts
Benchmark.ips do |x|
x.report { query.dup.load }
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/geo_shape_test.rb | test/geo_shape_test.rb | require_relative "test_helper"
class GeoShapeTest < Minitest::Test
def setup
setup_region
store [
{
name: "Region A",
text: "The witch had a cat",
territory: {
type: "polygon",
coordinates: [[[30, 40], [35, 45], [40, 40], [40, 30], [30, 30], [30, 40]]]
}
},
{
name: "Region B",
text: "and a very tall hat",
territory: {
type: "polygon",
coordinates: [[[50, 60], [55, 65], [60, 60], [60, 50], [50, 50], [50, 60]]]
}
},
{
name: "Region C",
text: "and long ginger hair which she wore in a plait",
territory: {
type: "polygon",
coordinates: [[[10, 20], [15, 25], [20, 20], [20, 10], [10, 10], [10, 20]]]
}
}
]
end
def test_envelope
assert_search "*", ["Region A"], {
where: {
territory: {
geo_shape: {
type: "envelope",
coordinates: [[28, 42], [32, 38]]
}
}
}
}
end
def test_polygon
assert_search "*", ["Region A"], {
where: {
territory: {
geo_shape: {
type: "polygon",
coordinates: [[[38, 42], [42, 42], [42, 38], [38, 38], [38, 42]]]
}
}
}
}
end
def test_multipolygon
assert_search "*", ["Region A", "Region B"], {
where: {
territory: {
geo_shape: {
type: "multipolygon",
coordinates: [
[[[38, 42], [42, 42], [42, 38], [38, 38], [38, 42]]],
[[[58, 62], [62, 62], [62, 58], [58, 58], [58, 62]]]
]
}
}
}
}
end
def test_disjoint
assert_search "*", ["Region B", "Region C"], {
where: {
territory: {
geo_shape: {
type: "envelope",
relation: "disjoint",
coordinates: [[28, 42], [32, 38]]
}
}
}
}
end
def test_within
assert_search "*", ["Region A"], {
where: {
territory: {
geo_shape: {
type: "envelope",
relation: "within",
coordinates: [[20, 50], [50, 20]]
}
}
}
}
end
def test_search_match
assert_search "witch", ["Region A"], {
where: {
territory: {
geo_shape: {
type: "envelope",
coordinates: [[28, 42], [32, 38]]
}
}
}
}
end
def test_search_no_match
assert_search "ginger hair", [], {
where: {
territory: {
geo_shape: {
type: "envelope",
coordinates: [[28, 42], [32, 38]]
}
}
}
}
end
def test_latlon
assert_search "*", ["Region A"], {
where: {
territory: {
geo_shape: {
type: "envelope",
coordinates: [{lat: 42, lon: 28}, {lat: 38, lon: 32}]
}
}
}
}
end
def default_model
Region
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/pagination_test.rb | test/pagination_test.rb | require_relative "test_helper"
class PaginationTest < Minitest::Test
def test_limit
store_names ["Product A", "Product B", "Product C", "Product D"]
assert_order "product", ["Product A", "Product B"], order: {name: :asc}, limit: 2
assert_order_relation ["Product A", "Product B"], Product.search("product").order(name: :asc).limit(2)
end
def test_no_limit
names = 20.times.map { |i| "Product #{i}" }
store_names names
assert_search "product", names
end
def test_offset
store_names ["Product A", "Product B", "Product C", "Product D"]
assert_order "product", ["Product C", "Product D"], order: {name: :asc}, offset: 2, limit: 100
assert_order_relation ["Product C", "Product D"], Product.search("product").order(name: :asc).offset(2).limit(100)
end
def test_pagination
store_names ["Product A", "Product B", "Product C", "Product D", "Product E", "Product F"]
products = Product.search("product", order: {name: :asc}, page: 2, per_page: 2, padding: 1)
assert_equal ["Product D", "Product E"], products.map(&:name)
assert_equal "product", products.entry_name
assert_equal 2, products.current_page
assert_equal 1, products.padding
assert_equal 2, products.per_page
assert_equal 2, products.size
assert_equal 2, products.length
assert_equal 3, products.total_pages
assert_equal 6, products.total_count
assert_equal 6, products.total_entries
assert_equal 2, products.limit_value
assert_equal 3, products.offset_value
assert_equal 3, products.offset
assert_equal 3, products.next_page
assert_equal 1, products.previous_page
assert_equal 1, products.prev_page
assert !products.first_page?
assert !products.last_page?
assert !products.empty?
assert !products.out_of_range?
assert products.any?
end
def test_relation
store_names ["Product A", "Product B", "Product C", "Product D", "Product E", "Product F"]
products = Product.search("product", padding: 1).order(name: :asc).page(2).per_page(2)
assert_equal ["Product D", "Product E"], products.map(&:name)
assert_equal "product", products.entry_name
assert_equal 2, products.current_page
assert_equal 1, products.padding
assert_equal 2, products.per_page
assert_equal 2, products.size
assert_equal 2, products.length
assert_equal 3, products.total_pages
assert_equal 6, products.total_count
assert_equal 6, products.total_entries
assert_equal 2, products.limit_value
assert_equal 3, products.offset_value
assert_equal 3, products.offset
assert_equal 3, products.next_page
assert_equal 1, products.previous_page
assert_equal 1, products.prev_page
assert !products.first_page?
assert !products.last_page?
assert !products.empty?
assert !products.out_of_range?
assert products.any?
end
def test_body
store_names ["Product A", "Product B", "Product C", "Product D", "Product E", "Product F"]
products = Product.search("product", body: {query: {match_all: {}}, sort: [{name: "asc"}]}, page: 2, per_page: 2, padding: 1)
assert_equal ["Product D", "Product E"], products.map(&:name)
assert_equal "product", products.entry_name
assert_equal 2, products.current_page
assert_equal 1, products.padding
assert_equal 2, products.per_page
assert_equal 2, products.size
assert_equal 2, products.length
assert_equal 3, products.total_pages
assert_equal 6, products.total_count
assert_equal 6, products.total_entries
assert_equal 2, products.limit_value
assert_equal 3, products.offset_value
assert_equal 3, products.offset
assert_equal 3, products.next_page
assert_equal 1, products.previous_page
assert_equal 1, products.prev_page
assert !products.first_page?
assert !products.last_page?
assert !products.empty?
assert !products.out_of_range?
assert products.any?
end
def test_nil_page
store_names ["Product A", "Product B", "Product C", "Product D", "Product E"]
products = Product.search("product", order: {name: :asc}, page: nil, per_page: 2)
assert_equal ["Product A", "Product B"], products.map(&:name)
assert_equal 1, products.current_page
assert products.first_page?
end
def test_strings
store_names ["Product A", "Product B", "Product C", "Product D", "Product E", "Product F"]
products = Product.search("product", order: {name: :asc}, page: "2", per_page: "2", padding: "1")
assert_equal ["Product D", "Product E"], products.map(&:name)
products = Product.search("product", order: {name: :asc}, limit: "2", offset: "3")
assert_equal ["Product D", "Product E"], products.map(&:name)
end
def test_total_entries
products = Product.search("product", total_entries: 4)
assert_equal 4, products.total_entries
end
def test_kaminari
require "action_view"
I18n.load_path = Dir["test/support/kaminari.yml"]
I18n.backend.load_translations
view = ActionView::Base.new(ActionView::LookupContext.new([]), [], nil)
store_names ["Product A"]
assert_equal "Displaying <b>1</b> product", view.page_entries_info(Product.search("product"))
store_names ["Product B"]
assert_equal "Displaying <b>all 2</b> products", view.page_entries_info(Product.search("product"))
store_names ["Product C"]
assert_equal "Displaying products <b>1 - 2</b> of <b>3</b> in total", view.page_entries_info(Product.search("product").per_page(2))
end
def test_deep_paging
with_options({deep_paging: true}, Song) do
assert_empty Song.search("*", offset: 10000, limit: 1).to_a
end
end
def test_no_deep_paging
Song.reindex
error = assert_raises(Searchkick::InvalidQueryError) do
Song.search("*", offset: 10000, limit: 1).to_a
end
assert_match "Result window is too large", error.message
end
def test_max_result_window
Song.delete_all
with_options({max_result_window: 10000}, Song) do
relation = Song.search("*", offset: 10000, limit: 1)
assert_empty relation.to_a
assert_equal 1, relation.per_page
assert_equal 0, relation.total_pages
end
end
def test_search_after
store_names ["Product A", "Product B", "Product C", "Product D"]
# ensure different created_at
store_names ["Product B"]
options = {order: {name: :asc, created_at: :asc}, per_page: 2}
products = Product.search("product", **options)
assert_equal ["Product A", "Product B"], products.map(&:name)
search_after = products.hits.last["sort"]
products = Product.search("product", body_options: {search_after: search_after}, **options)
assert_equal ["Product B", "Product C"], products.map(&:name)
search_after = products.hits.last["sort"]
products = Product.search("product", body_options: {search_after: search_after}, **options)
assert_equal ["Product D"], products.map(&:name)
end
def test_pit
skip unless pit_supported?
store_names ["Product A", "Product B", "Product D", "Product E", "Product G"]
pit_id =
if Searchkick.opensearch?
path = "#{CGI.escape(Product.searchkick_index.name)}/_search/point_in_time"
Searchkick.client.transport.perform_request("POST", path, {keep_alive: "5s"}).body["pit_id"]
else
Searchkick.client.open_point_in_time(index: Product.searchkick_index.name, keep_alive: "5s")["id"]
end
store_names ["Product C", "Product F"]
options = {
order: {name: :asc},
per_page: 2,
body_options: {pit: {id: pit_id}},
index_name: ""
}
products = Product.search("product", **options)
assert_equal ["Product A", "Product B"], products.map(&:name)
products = Product.search("product", page: 2, **options)
assert_equal ["Product D", "Product E"], products.map(&:name)
products = Product.search("product", page: 3, **options)
assert_equal ["Product G"], products.map(&:name)
products = Product.search("product", page: 4, **options)
assert_empty products.map(&:name)
if Searchkick.opensearch?
Searchkick.client.transport.perform_request("DELETE", "_search/point_in_time", {}, {pit_id: pit_id})
else
Searchkick.client.close_point_in_time(body: {id: pit_id})
end
error = assert_raises do
Product.search("product", **options).load
end
assert_match "No search context found for id", error.message
end
private
def pit_supported?
Searchkick.opensearch? ? !Searchkick.server_below?("2.4.0") : true
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/log_subscriber_test.rb | test/log_subscriber_test.rb | require_relative "test_helper"
class LogSubscriberTest < Minitest::Test
def test_create
output = capture_logs do
Product.create!(name: "Product A")
end
assert_match "Product Store", output
end
def test_update
product = Product.create!(name: "Product A")
output = capture_logs do
product.reindex(:search_name)
end
assert_match "Product Update", output
end
def test_destroy
product = Product.create!(name: "Product A")
output = capture_logs do
product.destroy
end
assert_match "Product Remove", output
end
def test_bulk
output = capture_logs do
Searchkick.callbacks(:bulk) do
Product.create!(name: "Product A")
end
end
assert_match "Bulk", output
refute_match "Product Store", output
end
def test_reindex
create_products
output = capture_logs do
Product.reindex
end
assert_match "Product Import", output
assert_match '"count":3', output
end
def test_reindex_relation
products = create_products
output = capture_logs do
Product.where.not(id: products.last.id).reindex
end
assert_match "Product Import", output
assert_match '"count":2', output
end
def test_search
output = capture_logs do
Product.search("product").to_a
end
assert_match "Product Search", output
end
def test_multi_search
output = capture_logs do
Searchkick.multi_search([Product.search("product")])
end
assert_match "Multi Search", output
end
private
def create_products
Searchkick.callbacks(false) do
3.times.map do
Product.create!(name: "Product A")
end
end
end
def capture_logs
previous_logger = ActiveSupport::LogSubscriber.logger
io = StringIO.new
begin
ActiveSupport::LogSubscriber.logger = ActiveSupport::Logger.new(io)
yield
io.rewind
output = io.read
previous_logger.debug(output) if previous_logger
puts output if ENV["LOG_SUBSCRIBER"]
output
ensure
ActiveSupport::LogSubscriber.logger = previous_logger
end
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/where_test.rb | test/where_test.rb | require_relative "test_helper"
class WhereTest < Minitest::Test
def test_where
now = Time.now
store [
{name: "Product A", store_id: 1, in_stock: true, backordered: true, created_at: now, orders_count: 4, user_ids: [1, 2, 3]},
{name: "Product B", store_id: 2, in_stock: true, backordered: false, created_at: now - 1, orders_count: 3, user_ids: [1]},
{name: "Product C", store_id: 3, in_stock: false, backordered: true, created_at: now - 2, orders_count: 2, user_ids: [1, 3]},
{name: "Product D", store_id: 4, in_stock: false, backordered: false, created_at: now - 3, orders_count: 1}
]
assert_search "product", ["Product A", "Product B"], where: {in_stock: true}
# arrays
assert_search "product", ["Product A"], where: {user_ids: 2}
assert_search "product", ["Product A", "Product C"], where: {user_ids: [2, 3]}
# date
assert_search "product", ["Product A"], where: {created_at: {gt: now - 1}}
assert_search "product", ["Product A", "Product B"], where: {created_at: {gte: now - 1}}
assert_search "product", ["Product D"], where: {created_at: {lt: now - 2}}
assert_search "product", ["Product C", "Product D"], where: {created_at: {lte: now - 2}}
# integer
assert_search "product", ["Product A"], where: {store_id: {lt: 2}}
assert_search "product", ["Product A", "Product B"], where: {store_id: {lte: 2}}
assert_search "product", ["Product D"], where: {store_id: {gt: 3}}
assert_search "product", ["Product C", "Product D"], where: {store_id: {gte: 3}}
# range
assert_search "product", ["Product A", "Product B"], where: {store_id: 1..2}
assert_search "product", ["Product A"], where: {store_id: 1...2}
assert_search "product", ["Product A", "Product B"], where: {store_id: [1, 2]}
assert_search "product", ["Product B", "Product C", "Product D"], where: {store_id: {not: 1}}
assert_search "product", ["Product B", "Product C", "Product D"], where: {store_id: {_not: 1}}
assert_search "product", ["Product C", "Product D"], where: {store_id: {not: [1, 2]}}
assert_search "product", ["Product C", "Product D"], where: {store_id: {_not: [1, 2]}}
assert_search "product", ["Product A"], where: {user_ids: {lte: 2, gte: 2}}
assert_search "product", ["Product A", "Product B", "Product C", "Product D"], where: {store_id: -Float::INFINITY..Float::INFINITY}
assert_search "product", ["Product C", "Product D"], where: {store_id: 3..Float::INFINITY}
assert_search "product", ["Product A", "Product B"], where: {store_id: -Float::INFINITY..2}
assert_search "product", ["Product C", "Product D"], where: {store_id: 3..}
assert_search "product", ["Product A", "Product B"], where: {store_id: ..2}
assert_search "product", ["Product A", "Product B"], where: {store_id: ...3}
# or
assert_search "product", ["Product A", "Product B", "Product C"], where: {or: [[{in_stock: true}, {store_id: 3}]]}
assert_search "product", ["Product A", "Product B", "Product C"], where: {or: [[{orders_count: [2, 4]}, {store_id: [1, 2]}]]}
assert_search "product", ["Product A", "Product D"], where: {or: [[{orders_count: 1}, {created_at: {gte: now - 1}, backordered: true}]]}
# _or
assert_search "product", ["Product A", "Product B", "Product C"], where: {_or: [{in_stock: true}, {store_id: 3}]}
assert_search "product", ["Product A", "Product B", "Product C"], where: {_or: [{orders_count: [2, 4]}, {store_id: [1, 2]}]}
assert_search "product", ["Product A", "Product D"], where: {_or: [{orders_count: 1}, {created_at: {gte: now - 1}, backordered: true}]}
# _and
assert_search "product", ["Product A"], where: {_and: [{in_stock: true}, {backordered: true}]}
# _not
assert_search "product", ["Product B", "Product C"], where: {_not: {_or: [{orders_count: 1}, {created_at: {gte: now - 1}, backordered: true}]}}
# all
assert_search "product", ["Product A", "Product C"], where: {user_ids: {all: [1, 3]}}
assert_search "product", [], where: {user_ids: {all: [1, 2, 3, 4]}}
# any / nested terms
assert_search "product", ["Product B", "Product C"], where: {user_ids: {not: [2], in: [1, 3]}}
assert_search "product", ["Product B", "Product C"], where: {user_ids: {_not: [2], in: [1, 3]}}
# not
assert_search "product", ["Product D"], where: {user_ids: nil}
assert_search "product", ["Product A", "Product B", "Product C"], where: {user_ids: {not: nil}}
assert_search "product", ["Product A", "Product B", "Product C"], where: {user_ids: {_not: nil}}
assert_search "product", ["Product A", "Product C", "Product D"], where: {user_ids: [3, nil]}
assert_search "product", ["Product B"], where: {user_ids: {not: [3, nil]}}
assert_search "product", ["Product B"], where: {user_ids: {_not: [3, nil]}}
end
def test_where_relation
now = Time.now
store [
{name: "Product A", store_id: 1, in_stock: true, backordered: true, created_at: now, orders_count: 4, user_ids: [1, 2, 3]},
{name: "Product B", store_id: 2, in_stock: true, backordered: false, created_at: now - 1, orders_count: 3, user_ids: [1]},
{name: "Product C", store_id: 3, in_stock: false, backordered: true, created_at: now - 2, orders_count: 2, user_ids: [1, 3]},
{name: "Product D", store_id: 4, in_stock: false, backordered: false, created_at: now - 3, orders_count: 1}
]
assert_search_relation ["Product A", "Product B"], Product.search("product").where(in_stock: true)
# multiple where
assert_search_relation ["Product A"], Product.search("product").where(in_stock: true).where(backordered: true)
# rewhere
assert_search_relation ["Product A", "Product C"], Product.search("product").where(in_stock: true).rewhere(backordered: true)
# not
assert_search_relation ["Product C", "Product D"], Product.search("product").where.not(in_stock: true)
assert_search_relation ["Product C"], Product.search("product").where.not(in_stock: true).where(backordered: true)
assert_search_relation ["Product A", "Product C"], Product.search("product").where.not(store_id: [2, 4])
# compound
assert_search_relation ["Product B", "Product C"], Product.search("product").where(_or: [{in_stock: true}, {backordered: true}]).where(_or: [{store_id: 2}, {orders_count: 2}])
end
def test_where_string_operators
error = assert_raises(ArgumentError) do
assert_search "product", [], where: {store_id: {"lt" => 2}}
end
assert_includes error.message, "Unknown where operator"
end
def test_unknown_operator
error = assert_raises(ArgumentError) do
assert_search "product", [], where: {store_id: {contains: "%2%"}}
end
assert_includes error.message, "Unknown where operator"
end
def test_regexp
store_names ["Product A"]
assert_search "*", ["Product A"], where: {name: /\APro.+\z/}
end
def test_alternate_regexp
store_names ["Product A", "Item B"]
assert_search "*", ["Product A"], where: {name: {regexp: "Pro.+"}}
end
def test_special_regexp
store_names ["Product <A>", "Item <B>"]
assert_search "*", ["Product <A>"], where: {name: /\APro.+<.+\z/}
end
def test_regexp_not_anchored
store_names ["abcde"]
assert_search "*", ["abcde"], where: {name: /abcd/}
assert_search "*", ["abcde"], where: {name: /bcde/}
assert_search "*", ["abcde"], where: {name: /abcde/}
assert_search "*", ["abcde"], where: {name: /.*bcd.*/}
end
def test_regexp_anchored
store_names ["abcde"]
assert_search "*", ["abcde"], where: {name: /\Aabcde\z/}
assert_search "*", ["abcde"], where: {name: /\Aabc/}
assert_search "*", ["abcde"], where: {name: /cde\z/}
assert_search "*", [], where: {name: /\Abcd/}
assert_search "*", [], where: {name: /bcd\z/}
end
def test_regexp_case
store_names ["abcde"]
assert_search "*", [], where: {name: /\AABCDE\z/}
assert_search "*", ["abcde"], where: {name: /\AABCDE\z/i}
end
def test_prefix
store_names ["Product A", "Product B", "Item C"]
assert_search "*", ["Product A", "Product B"], where: {name: {prefix: "Pro"}}
end
def test_exists
store [
{name: "Product A", user_ids: [1, 2]},
{name: "Product B"}
]
assert_search "product", ["Product A"], where: {user_ids: {exists: true}}
assert_search "product", ["Product B"], where: {user_ids: {exists: false}}
error = assert_raises(ArgumentError) do
assert_search "product", ["Product A"], where: {user_ids: {exists: nil}}
end
assert_equal "Passing a value other than true or false to exists is not supported", error.message
end
def test_like
store_names ["Product ABC", "Product DEF"]
assert_search "product", ["Product ABC"], where: {name: {like: "%ABC%"}}
assert_search "product", ["Product ABC"], where: {name: {like: "%ABC"}}
assert_search "product", [], where: {name: {like: "ABC"}}
assert_search "product", [], where: {name: {like: "ABC%"}}
assert_search "product", [], where: {name: {like: "ABC%"}}
assert_search "product", ["Product ABC"], where: {name: {like: "Product_ABC"}}
end
def test_like_escape
store_names ["Product 100%", "Product 1000"]
assert_search "product", ["Product 100%"], where: {name: {like: "% 100\\%"}}
end
def test_like_special_characters
store_names [
"Product ABC", "Product.ABC", "Product?ABC", "Product+ABC", "Product*ABC", "Product|ABC",
"Product{ABC}", "Product[ABC]", "Product(ABC)", "Product\"ABC\"", "Product\\ABC"
]
assert_search "*", ["Product.ABC"], where: {name: {like: "Product.A%"}}
assert_search "*", ["Product?ABC"], where: {name: {like: "Product?A%"}}
assert_search "*", ["Product+ABC"], where: {name: {like: "Product+A%"}}
assert_search "*", ["Product*ABC"], where: {name: {like: "Product*A%"}}
assert_search "*", ["Product|ABC"], where: {name: {like: "Product|A%"}}
assert_search "*", ["Product{ABC}"], where: {name: {like: "%{ABC}"}}
assert_search "*", ["Product[ABC]"], where: {name: {like: "%[ABC]"}}
assert_search "*", ["Product(ABC)"], where: {name: {like: "%(ABC)"}}
assert_search "*", ["Product\"ABC\""], where: {name: {like: "%\"ABC\""}}
assert_search "*", ["Product\\ABC"], where: {name: {like: "Product\\A%"}}
end
def test_like_optional_operators
store_names ["Product A&B", "Product B", "Product <3", "Product @Home"]
assert_search "product", ["Product A&B"], where: {name: {like: "%A&B"}}
assert_search "product", ["Product <3"], where: {name: {like: "%<%"}}
assert_search "product", ["Product @Home"], where: {name: {like: "%@Home%"}}
end
def test_ilike
store_names ["Product ABC", "Product DEF"]
assert_search "product", ["Product ABC"], where: {name: {ilike: "%abc%"}}
assert_search "product", ["Product ABC"], where: {name: {ilike: "%abc"}}
assert_search "product", [], where: {name: {ilike: "abc"}}
assert_search "product", [], where: {name: {ilike: "abc%"}}
assert_search "product", [], where: {name: {ilike: "abc%"}}
assert_search "product", ["Product ABC"], where: {name: {ilike: "Product_abc"}}
end
def test_ilike_escape
store_names ["Product 100%", "Product B"]
assert_search "product", ["Product 100%"], where: {name: {ilike: "% 100\\%"}}
end
def test_ilike_special_characters
store_names ["Product ABC\"", "Product B"]
assert_search "product", ["Product ABC\""], where: {name: {ilike: "%abc\""}}
end
def test_ilike_optional_operators
store_names ["Product A&B", "Product B", "Product <3", "Product @Home"]
assert_search "product", ["Product A&B"], where: {name: {ilike: "%a&b"}}
assert_search "product", ["Product <3"], where: {name: {ilike: "%<%"}}
assert_search "product", ["Product @Home"], where: {name: {ilike: "%@home%"}}
end
def test_script
store [
{name: "Product A", store_id: 1},
{name: "Product B", store_id: 10}
]
assert_search "product", ["Product A"], where: {_script: Searchkick.script("doc['store_id'].value < 10")}
assert_search "product", ["Product A"], where: {_script: Searchkick.script("doc['store_id'].value < 10", lang: "expression")}
assert_search "product", ["Product A"], where: {_script: Searchkick.script("doc['store_id'].value < params['value']", params: {value: 10})}
end
def test_script_string
error = assert_raises(TypeError) do
assert_search "product", ["Product A"], where: {_script: "doc['store_id'].value < 10"}
end
assert_equal "expected Searchkick::Script", error.message
end
def test_where_string
store [
{name: "Product A", color: "RED"}
]
assert_search "product", ["Product A"], where: {color: "RED"}
end
def test_where_nil
store [
{name: "Product A"},
{name: "Product B", color: "red"}
]
assert_search "product", ["Product A"], where: {color: nil}
end
def test_where_id
store_names ["Product A"]
product = Product.first
assert_search "product", ["Product A"], where: {id: product.id.to_s}
end
def test_where_empty
store_names ["Product A"]
assert_search "product", ["Product A"], where: {}
end
def test_where_empty_array
store_names ["Product A"]
assert_search "product", [], where: {store_id: []}
end
# https://discuss.elastic.co/t/numeric-range-quey-or-filter-in-an-array-field-possible-or-not/14053
# https://gist.github.com/jprante/7099463
def test_where_range_array
store [
{name: "Product A", user_ids: [11, 23, 13, 16, 17, 23]},
{name: "Product B", user_ids: [1, 2, 3, 4, 5, 6, 7, 8, 9]},
{name: "Product C", user_ids: [101, 230, 150, 200]}
]
assert_search "product", ["Product A"], where: {user_ids: {gt: 10, lt: 24}}
end
def test_where_range_array_again
store [
{name: "Product A", user_ids: [19, 32, 42]},
{name: "Product B", user_ids: [13, 40, 52]}
]
assert_search "product", ["Product A"], where: {user_ids: {gt: 26, lt: 36}}
end
def test_near
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000}
]
assert_search "san", ["San Francisco"], where: {location: {near: [37.5, -122.5]}}
end
def test_near_hash
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000}
]
assert_search "san", ["San Francisco"], where: {location: {near: {lat: 37.5, lon: -122.5}}}
end
def test_near_within
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000},
{name: "San Marino", latitude: 43.9333, longitude: 12.4667}
]
assert_search "san", ["San Francisco", "San Antonio"], where: {location: {near: [37, -122], within: "2000mi"}}
end
def test_near_within_hash
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000},
{name: "San Marino", latitude: 43.9333, longitude: 12.4667}
]
assert_search "san", ["San Francisco", "San Antonio"], where: {location: {near: {lat: 37, lon: -122}, within: "2000mi"}}
end
def test_geo_polygon
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000},
{name: "San Marino", latitude: 43.9333, longitude: 12.4667}
]
polygon = [
{lat: 42.185695, lon: -125.496146},
{lat: 42.185695, lon: -94.125535},
{lat: 27.122789, lon: -94.125535},
{lat: 27.12278, lon: -125.496146}
]
assert_search "san", ["San Francisco", "San Antonio"], where: {location: {geo_polygon: {points: polygon}}}
polygon << polygon.first
assert_search "san", ["San Francisco", "San Antonio"], where: {location: {geo_shape: {type: "polygon", coordinates: [polygon]}}}
end
def test_top_left_bottom_right
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000}
]
assert_search "san", ["San Francisco"], where: {location: {top_left: [38, -123], bottom_right: [37, -122]}}
end
def test_top_left_bottom_right_hash
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000}
]
assert_search "san", ["San Francisco"], where: {location: {top_left: {lat: 38, lon: -123}, bottom_right: {lat: 37, lon: -122}}}
end
def test_top_right_bottom_left
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000}
]
assert_search "san", ["San Francisco"], where: {location: {top_right: [38, -122], bottom_left: [37, -123]}}
end
def test_top_right_bottom_left_hash
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000}
]
assert_search "san", ["San Francisco"], where: {location: {top_right: {lat: 38, lon: -122}, bottom_left: {lat: 37, lon: -123}}}
end
def test_multiple_locations
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000}
]
assert_search "san", ["San Francisco"], where: {multiple_locations: {near: [37.5, -122.5]}}
end
def test_multiple_locations_with_term_filter
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000}
]
assert_search "san", [], where: {multiple_locations: {near: [37.5, -122.5]}, name: "San Antonio"}
assert_search "san", ["San Francisco"], where: {multiple_locations: {near: [37.5, -122.5]}, name: "San Francisco"}
end
def test_multiple_locations_hash
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000}
]
assert_search "san", ["San Francisco"], where: {multiple_locations: {near: {lat: 37.5, lon: -122.5}}}
end
def test_nested
store [
{name: "Product A", details: {year: 2016}}
]
assert_search "product", ["Product A"], where: {"details.year" => 2016}
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/unscope_test.rb | test/unscope_test.rb | require_relative "test_helper"
class UnscopeTest < Minitest::Test
def setup
@@once ||= Artist.reindex
Artist.unscoped.destroy_all
end
def test_reindex
create_records
Artist.reindex
assert_search "*", ["Test", "Test 2"]
assert_search "*", ["Test", "Test 2"], {load: false}
end
def test_relation_async
create_records
perform_enqueued_jobs do
Artist.unscoped.reindex(mode: :async)
end
Artist.searchkick_index.refresh
assert_search "*", ["Test", "Test 2"]
end
def create_records
store [
{name: "Test", active: true, should_index: true},
{name: "Test 2", active: false, should_index: true},
{name: "Test 3", active: false, should_index: false}
], reindex: false
end
def default_model
Artist
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/relation_test.rb | test/relation_test.rb | require_relative "test_helper"
class RelationTest < Minitest::Test
def test_loaded
Product.searchkick_index.refresh
products = Product.search("*")
refute products.loaded?
assert_equal 0, products.count
assert products.loaded?
refute products.clone.loaded?
refute products.dup.loaded?
refute products.limit(2).loaded?
error = assert_raises(Searchkick::Error) do
products.limit!(2)
end
assert_equal "Relation loaded", error.message
end
def test_mutating
store_names ["Product A", "Product B"]
products = Product.search("*").order(:name)
products.limit!(1)
assert_equal ["Product A"], products.map(&:name)
end
def test_load
products = Product.search("*")
refute products.loaded?
assert products.load.loaded?
assert products.load.load.loaded?
end
def test_clone
products = Product.search("*")
assert_equal 10, products.limit(10).limit_value
assert_equal 10000, products.limit_value
end
def test_only
assert_equal 10, Product.search("*").limit(10).only(:limit).limit_value
end
def test_except
assert_equal 10000, Product.search("*").limit(10).except(:limit).limit_value
end
def test_first
store_names ["Product A", "Product B"]
products = Product.search("product")
assert_kind_of Product, products.first
assert_kind_of Array, products.first(1)
assert_equal 1, products.limit(1).first(2).size
end
def test_first_loaded
store_names ["Product A", "Product B"]
products = Product.search("product").load
assert_kind_of Product, products.first
end
# TODO call pluck or select on Active Record query
# currently uses pluck from Active Support enumerable
def test_pluck
store_names ["Product A", "Product B"]
assert_equal ["Product A", "Product B"], Product.search("product").pluck(:name).sort
assert_equal ["Product A", "Product B"], Product.search("product").load(false).pluck(:name).sort
end
def test_model
assert_equal Product, Product.search("product").model
assert_nil Searchkick.search("product").model
end
def test_klass
assert_equal Product, Product.search("product").klass
assert_nil Searchkick.search("product").klass
end
def test_respond_to
relation = Product.search("product")
assert relation.respond_to?(:page)
assert relation.respond_to?(:response)
assert relation.respond_to?(:size)
refute relation.respond_to?(:hello)
refute relation.loaded?
end
def test_to_yaml
store_names ["Product A", "Product B"]
if mongoid?
assert_equal Product.all.to_a.to_yaml, Product.search("product").to_yaml
else
assert_equal Product.all.to_yaml, Product.search("product").to_yaml
end
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/reindex_test.rb | test/reindex_test.rb | require_relative "test_helper"
class ReindexTest < Minitest::Test
def test_record_inline
store_names ["Product A", "Product B"], reindex: false
product = Product.find_by!(name: "Product A")
assert_equal true, product.reindex(refresh: true)
assert_search "product", ["Product A"]
end
def test_record_destroyed
store_names ["Product A", "Product B"]
product = Product.find_by!(name: "Product A")
product.destroy
Product.searchkick_index.refresh
assert_equal true, product.reindex
end
def test_record_async
store_names ["Product A", "Product B"], reindex: false
product = Product.find_by!(name: "Product A")
perform_enqueued_jobs do
assert_equal true, product.reindex(mode: :async)
end
Product.searchkick_index.refresh
assert_search "product", ["Product A"]
end
def test_record_async_job_options
product = Product.create!(name: "Product A")
assert_enqueued_jobs(1, queue: "test") do
assert_equal true, product.reindex(mode: :async, job_options: {queue: "test"})
end
end
def test_record_queue
reindex_queue = Product.searchkick_index.reindex_queue
reindex_queue.clear
store_names ["Product A", "Product B"], reindex: false
product = Product.find_by!(name: "Product A")
assert_equal true, product.reindex(mode: :queue)
Product.searchkick_index.refresh
assert_search "product", []
perform_enqueued_jobs do
Searchkick::ProcessQueueJob.perform_now(class_name: "Product")
end
Product.searchkick_index.refresh
assert_search "product", ["Product A"]
end
def test_process_queue_job_options
product = Product.create!(name: "Product A")
product.reindex(mode: :queue)
assert_enqueued_jobs(1, queue: "test") do
Searchkick::ProcessQueueJob.perform_now(class_name: "Product", job_options: {queue: "test"})
end
end
def test_record_index
store_names ["Product A", "Product B"], reindex: false
product = Product.find_by!(name: "Product A")
assert_equal true, Product.searchkick_index.reindex([product], refresh: true)
assert_search "product", ["Product A"]
end
def test_relation_inline
store_names ["Product A"]
store_names ["Product B", "Product C"], reindex: false
Product.where(name: "Product B").reindex(refresh: true)
assert_search "product", ["Product A", "Product B"]
end
def test_relation_associations
store_names ["Product A"]
store = Store.create!(name: "Test")
Product.create!(name: "Product B", store_id: store.id)
assert_equal true, store.products.reindex(refresh: true)
assert_search "product", ["Product A", "Product B"]
end
def test_relation_scoping
store_names ["Product A", "Product B"]
Product.dynamic_data = lambda do
{
name: "Count #{Product.count}"
}
end
Product.where(name: "Product A").reindex(refresh: true)
assert_search "count", ["Count 2"], load: false
ensure
Product.dynamic_data = nil
end
def test_relation_scoping_restored
# TODO add test for Mongoid
skip unless activerecord?
assert_nil Product.current_scope
Product.where(name: "Product A").scoping do
scope = Product.current_scope
refute_nil scope
Product.all.reindex(refresh: true)
# note: should be reset even if we don't do it
assert_equal scope, Product.current_scope
end
assert_nil Product.current_scope
end
def test_relation_should_index
store_names ["Product A", "Product B"]
Searchkick.callbacks(false) do
Product.find_by(name: "Product B").update!(name: "DO NOT INDEX")
end
assert_equal true, Product.where(name: "DO NOT INDEX").reindex
Product.searchkick_index.refresh
assert_search "product", ["Product A"]
end
def test_relation_async
store_names ["Product A"]
store_names ["Product B", "Product C"], reindex: false
perform_enqueued_jobs do
Product.where(name: "Product B").reindex(mode: :async)
end
Product.searchkick_index.refresh
assert_search "product", ["Product A", "Product B"]
end
def test_relation_async_should_index
store_names ["Product A", "Product B"]
Searchkick.callbacks(false) do
Product.find_by(name: "Product B").update!(name: "DO NOT INDEX")
end
perform_enqueued_jobs do
assert_equal true, Product.where(name: "DO NOT INDEX").reindex(mode: :async)
end
Product.searchkick_index.refresh
assert_search "product", ["Product A"]
end
def test_relation_async_routing
store_names ["Store A"], Store, reindex: false
perform_enqueued_jobs do
Store.where(name: "Store A").reindex(mode: :async)
end
Store.searchkick_index.refresh
assert_search "*", ["Store A"], {routing: "Store A"}, Store
end
def test_relation_async_job_options
store_names ["Store A"], Store, reindex: false
assert_enqueued_jobs(1, queue: "test") do
Store.where(name: "Store A").reindex(mode: :async, job_options: {queue: "test"})
end
end
def test_relation_queue
reindex_queue = Product.searchkick_index.reindex_queue
reindex_queue.clear
store_names ["Product A"]
store_names ["Product B", "Product C"], reindex: false
Product.where(name: "Product B").reindex(mode: :queue)
Product.searchkick_index.refresh
assert_search "product", ["Product A"]
perform_enqueued_jobs do
Searchkick::ProcessQueueJob.perform_now(class_name: "Product")
end
Product.searchkick_index.refresh
assert_search "product", ["Product A", "Product B"]
end
def test_relation_queue_all
reindex_queue = Product.searchkick_index.reindex_queue
reindex_queue.clear
store_names ["Product A"]
store_names ["Product B", "Product C"], reindex: false
Product.all.reindex(mode: :queue)
Product.searchkick_index.refresh
assert_search "product", ["Product A"]
perform_enqueued_jobs do
Searchkick::ProcessQueueJob.perform_now(class_name: "Product")
end
Product.searchkick_index.refresh
assert_search "product", ["Product A", "Product B", "Product C"]
end
def test_relation_queue_routing
reindex_queue = Store.searchkick_index.reindex_queue
reindex_queue.clear
store_names ["Store A"], Store, reindex: false
Store.where(name: "Store A").reindex(mode: :queue)
Store.searchkick_index.refresh
assert_search "*", [], {}, Store
perform_enqueued_jobs do
Searchkick::ProcessQueueJob.perform_now(class_name: "Store")
end
Store.searchkick_index.refresh
assert_search "*", ["Store A"], {routing: "Store A"}, Store
end
def test_relation_index
store_names ["Product A"]
store_names ["Product B", "Product C"], reindex: false
Product.searchkick_index.reindex(Product.where(name: "Product B"), refresh: true)
assert_search "product", ["Product A", "Product B"]
end
def test_full_async
store_names ["Product A"], reindex: false
reindex = nil
perform_enqueued_jobs do
reindex = Product.reindex(mode: :async)
assert_search "product", [], conversions: false
end
index = Searchkick::Index.new(reindex[:index_name])
index.refresh
assert_equal 1, index.total_docs
reindex_status = Searchkick.reindex_status(reindex[:name])
assert_equal true, reindex_status[:completed]
assert_equal 0, reindex_status[:batches_left]
Product.searchkick_index.promote(reindex[:index_name])
assert_search "product", ["Product A"]
end
def test_full_async_should_index
store_names ["Product A", "Product B", "DO NOT INDEX"], reindex: false
reindex = nil
perform_enqueued_jobs do
reindex = Product.reindex(mode: :async)
end
index = Searchkick::Index.new(reindex[:index_name])
index.refresh
assert_equal 2, index.total_docs
end
def test_full_async_wait
store_names ["Product A"], reindex: false
perform_enqueued_jobs do
capture_io do
Product.reindex(mode: :async, wait: true)
end
end
assert_search "product", ["Product A"]
end
def test_full_async_job_options
store_names ["Product A"], reindex: false
assert_enqueued_jobs(1, queue: "test") do
Product.reindex(mode: :async, job_options: {queue: "test"})
end
end
def test_full_async_non_integer_pk
Sku.create(id: SecureRandom.hex, name: "Test")
reindex = nil
perform_enqueued_jobs do
reindex = Sku.reindex(mode: :async)
assert_search "sku", [], conversions: false
end
index = Searchkick::Index.new(reindex[:index_name])
index.refresh
assert_equal 1, index.total_docs
ensure
Sku.destroy_all
end
def test_full_queue
error = assert_raises(ArgumentError) do
Product.reindex(mode: :queue)
end
assert_equal "Full reindex does not support :queue mode - use :async mode instead", error.message
end
def test_full_refresh_interval
reindex = Product.reindex(refresh_interval: "30s", mode: :async, import: false)
index = Searchkick::Index.new(reindex[:index_name])
assert_nil Product.searchkick_index.refresh_interval
assert_equal "30s", index.refresh_interval
Product.searchkick_index.promote(index.name, update_refresh_interval: true)
assert_equal "1s", index.refresh_interval
assert_equal "1s", Product.searchkick_index.refresh_interval
end
def test_full_resume
if mongoid?
error = assert_raises(Searchkick::Error) do
Product.reindex(resume: true)
end
assert_equal "Resume not supported for Mongoid", error.message
else
assert Product.reindex(resume: true)
end
end
def test_full_refresh
Product.reindex(refresh: true)
end
def test_full_partial_async
store_names ["Product A"]
Product.reindex(:search_name, mode: :async)
assert_search "product", ["Product A"]
end
def test_wait_not_async
error = assert_raises(ArgumentError) do
Product.reindex(wait: false)
end
assert_equal "wait only available in :async mode", error.message
end
def test_object_index
error = assert_raises(Searchkick::Error) do
Product.searchkick_index.reindex(Object.new)
end
assert_equal "Cannot reindex object", error.message
end
def test_transaction
skip unless activerecord?
Product.transaction do
store_names ["Product A"]
raise ActiveRecord::Rollback
end
assert_search "*", []
end
def test_both_paths
Product.searchkick_index.delete if Product.searchkick_index.exists?
Product.reindex
Product.reindex # run twice for both index paths
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/highlight_test.rb | test/highlight_test.rb | require_relative "test_helper"
class HighlightTest < Minitest::Test
def test_basic
store_names ["Two Door Cinema Club"]
assert_equal "Two Door <em>Cinema</em> Club", Product.search("cinema", highlight: true).highlights.first[:name]
end
def test_with_highlights
store_names ["Two Door Cinema Club"]
assert_equal "Two Door <em>Cinema</em> Club", Product.search("cinema", highlight: true).with_highlights.first.last[:name]
end
def test_tag
store_names ["Two Door Cinema Club"]
assert_equal "Two Door <strong>Cinema</strong> Club", Product.search("cinema", highlight: {tag: "<strong>"}).highlights.first[:name]
assert_equal "Two Door <strong>Cinema</strong> Club", Product.search("cinema").highlight(tag: "<strong>").highlights.first[:name]
end
def test_tag_class
store_names ["Two Door Cinema Club"]
assert_equal "Two Door <strong class='classy'>Cinema</strong> Club", Product.search("cinema", highlight: {tag: "<strong class='classy'>"}).highlights.first[:name]
end
def test_very_long
store_names [("Two Door Cinema Club " * 100).strip]
assert_equal ("Two Door <em>Cinema</em> Club " * 100).strip, Product.search("cinema", highlight: true).highlights.first[:name]
end
def test_multiple_fields
store [{name: "Two Door Cinema Club", color: "Cinema Orange"}]
highlights = Product.search("cinema", fields: [:name, :color], highlight: true).highlights.first
assert_equal "Two Door <em>Cinema</em> Club", highlights[:name]
assert_equal "<em>Cinema</em> Orange", highlights[:color]
end
def test_fields
store [{name: "Two Door Cinema Club", color: "Cinema Orange"}]
highlights = Product.search("cinema", fields: [:name, :color], highlight: {fields: [:name]}).highlights.first
assert_equal "Two Door <em>Cinema</em> Club", highlights[:name]
assert_nil highlights[:color]
end
def test_field_options
store_names ["Two Door Cinema Club are a Northern Irish indie rock band"]
fragment_size = ENV["MATCH"] == "word_start" ? 26 : 21
assert_equal "Two Door <em>Cinema</em> Club are", Product.search("cinema", highlight: {fields: {name: {fragment_size: fragment_size}}}).highlights.first[:name]
end
def test_multiple_words
store_names ["Hello World Hello"]
assert_equal "<em>Hello</em> World <em>Hello</em>", Product.search("hello", highlight: true).highlights.first[:name]
end
def test_encoder
store_names ["<b>Hello</b>"]
assert_equal "<b><em>Hello</em></b>", Product.search("hello", highlight: {encoder: "html"}, misspellings: false).highlights.first[:name]
end
def test_word_middle
store_names ["Two Door Cinema Club"]
assert_equal "Two Door <em>Cinema</em> Club", Product.search("ine", match: :word_middle, highlight: true).highlights.first[:name]
end
def test_body
skip if ENV["MATCH"] == "word_start"
store_names ["Two Door Cinema Club"]
body = {
query: {
match: {
"name.analyzed" => "cinema"
}
},
highlight: {
pre_tags: ["<strong>"],
post_tags: ["</strong>"],
fields: {
"name.analyzed" => {}
}
}
}
assert_equal "Two Door <strong>Cinema</strong> Club", Product.search(body: body).highlights.first[:"name.analyzed"]
end
def test_multiple_highlights
store_names ["Two Door Cinema Club Some Other Words And Much More Doors Cinema Club"]
highlights = Product.search("cinema", highlight: {fragment_size: 20}).highlights(multiple: true).first[:name]
assert highlights.is_a?(Array)
assert_equal highlights.count, 2
refute_equal highlights.first, highlights.last
highlights.each do |highlight|
assert highlight.include?("<em>Cinema</em>")
end
end
def test_search_highlights_method
store_names ["Two Door Cinema Club"]
assert_equal "Two Door <em>Cinema</em> Club", Product.search("cinema", highlight: true).first.search_highlights[:name]
end
def test_match_all
store_names ["Two Door Cinema Club"]
assert_nil Product.search("*", highlight: true).highlights.first[:name]
end
def test_match_all_load_false
store_names ["Two Door Cinema Club"]
assert_nil Product.search("*", highlight: true, load: false).highlights.first[:name]
end
def test_match_all_search_highlights
store_names ["Two Door Cinema Club"]
assert_nil Product.search("*", highlight: true).first.search_highlights[:name]
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/partial_match_test.rb | test/partial_match_test.rb | require_relative "test_helper"
class PartialMatchTest < Minitest::Test
def test_autocomplete
store_names ["Hummus"]
assert_search "hum", ["Hummus"], match: :text_start
end
def test_autocomplete_two_words
store_names ["Organic Hummus"]
assert_search "hum", [], match: :text_start
end
def test_autocomplete_fields
store_names ["Hummus"]
assert_search "hum", ["Hummus"], match: :text_start, fields: [:name]
end
def test_text_start
store_names ["Where in the World is Carmen San Diego"]
assert_search "where in the world is", ["Where in the World is Carmen San Diego"], fields: [{name: :text_start}]
assert_search "in the world", [], fields: [{name: :text_start}]
end
def test_text_middle
store_names ["Where in the World is Carmen San Diego"]
assert_search "where in the world is", ["Where in the World is Carmen San Diego"], fields: [{name: :text_middle}]
assert_search "n the wor", ["Where in the World is Carmen San Diego"], fields: [{name: :text_middle}]
assert_search "men san diego", ["Where in the World is Carmen San Diego"], fields: [{name: :text_middle}]
assert_search "world carmen", [], fields: [{name: :text_middle}]
end
def test_text_end
store_names ["Where in the World is Carmen San Diego"]
assert_search "men san diego", ["Where in the World is Carmen San Diego"], fields: [{name: :text_end}]
assert_search "carmen san", [], fields: [{name: :text_end}]
end
def test_word_start
store_names ["Where in the World is Carmen San Diego"]
assert_search "car san wor", ["Where in the World is Carmen San Diego"], fields: [{name: :word_start}]
end
def test_word_middle
store_names ["Where in the World is Carmen San Diego"]
assert_search "orl", ["Where in the World is Carmen San Diego"], fields: [{name: :word_middle}]
end
def test_word_end
store_names ["Where in the World is Carmen San Diego"]
assert_search "rld men ego", ["Where in the World is Carmen San Diego"], fields: [{name: :word_end}]
end
def test_word_start_multiple_words
store_names ["Dark Grey", "Dark Blue"]
assert_search "dark grey", ["Dark Grey"], fields: [{name: :word_start}]
end
def test_word_start_exact
store_names ["Back Scratcher", "Backpack"]
assert_order "back", ["Back Scratcher", "Backpack"], fields: [{name: :word_start}]
end
def test_word_start_exact_martin
store_names ["Martina", "Martin"]
assert_order "martin", ["Martin", "Martina"], fields: [{name: :word_start}]
end
# TODO find a better place
def test_exact
store_names ["hi@example.org"]
assert_search "hi@example.org", ["hi@example.org"], fields: [{name: :exact}]
end
def test_exact_case
store_names ["Hello"]
assert_search "hello", [], fields: [{name: :exact}]
assert_search "Hello", ["Hello"], fields: [{name: :exact}]
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/select_test.rb | test/select_test.rb | require_relative "test_helper"
class SelectTest < Minitest::Test
def test_basic
store [{name: "Product A", store_id: 1}]
result = Product.search("product", load: false, select: [:name, :store_id]).first
assert_equal %w(id name store_id), result.to_h.keys.reject { |k| k.start_with?("_") }.sort
assert_equal "Product A", result.name
assert_equal 1, result.store_id
end
def test_relation
store [{name: "Product A", store_id: 1}]
result = Product.search("product", load: false).select(:name, :store_id).first
assert_equal %w(id name store_id), result.to_h.keys.reject { |k| k.start_with?("_") }.sort
assert_equal "Product A", result.name
assert_equal 1, result.store_id
end
def test_block
store [{name: "Product A", store_id: 1}, {name: "Product B", store_id: 2}]
assert_equal ["Product B"], Product.search("product", load: false).select { |v| v.store_id == 2 }.map(&:name)
end
def test_block_arguments
store [{name: "Product A", store_id: 1}, {name: "Product B", store_id: 2}]
error = assert_raises(ArgumentError) do
Product.search("product", load: false).select(:name) { |v| v.store_id == 2 }
end
assert_equal "wrong number of arguments (given 1, expected 0)", error.message
end
def test_multiple
store [{name: "Product A", store_id: 1}]
result = Product.search("product", load: false).select(:name).select(:store_id).first
assert_equal %w(id name store_id), result.to_h.keys.reject { |k| k.start_with?("_") }.sort
assert_equal "Product A", result.name
assert_equal 1, result.store_id
end
def test_reselect
store [{name: "Product A", store_id: 1}]
result = Product.search("product", load: false).select(:name).reselect(:store_id).first
assert_equal %w(id store_id), result.to_h.keys.reject { |k| k.start_with?("_") }.sort
assert_equal 1, result.store_id
end
def test_array
store [{name: "Product A", user_ids: [1, 2]}]
result = Product.search("product", load: false, select: [:user_ids]).first
assert_equal [1, 2], result.user_ids
end
def test_single_field
store [{name: "Product A", store_id: 1}]
result = Product.search("product", load: false, select: :name).first
assert_equal %w(id name), result.to_h.keys.reject { |k| k.start_with?("_") }.sort
assert_equal "Product A", result.name
refute result.respond_to?(:store_id)
end
def test_all
store [{name: "Product A", user_ids: [1, 2]}]
hit = Product.search("product", select: true).hits.first
assert_equal hit["_source"]["name"], "Product A"
assert_equal hit["_source"]["user_ids"], [1, 2]
end
def test_none
store [{name: "Product A", user_ids: [1, 2]}]
hit = Product.search("product", select: []).hits.first
assert_nil hit["_source"]
hit = Product.search("product", select: false).hits.first
assert_nil hit["_source"]
end
def test_includes
store [{name: "Product A", user_ids: [1, 2]}]
result = Product.search("product", load: false, select: {includes: [:name]}).first
assert_equal %w(id name), result.to_h.keys.reject { |k| k.start_with?("_") }.sort
assert_equal "Product A", result.name
refute result.respond_to?(:store_id)
end
def test_excludes
store [{name: "Product A", user_ids: [1, 2], store_id: 1}]
result = Product.search("product", load: false, select: {excludes: [:name]}).first
refute result.respond_to?(:name)
assert_equal [1, 2], result.user_ids
assert_equal 1, result.store_id
end
def test_include_and_excludes
# let's take this to the next level
store [{name: "Product A", user_ids: [1, 2], store_id: 1}]
result = Product.search("product", load: false, select: {includes: [:store_id], excludes: [:name]}).first
assert_equal 1, result.store_id
refute result.respond_to?(:name)
refute result.respond_to?(:user_ids)
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/results_test.rb | test/results_test.rb | require_relative "test_helper"
class ResultsTest < Minitest::Test
def test_array_methods
store_names ["Product A", "Product B"]
products = Product.search("product")
assert_equal 2, products.count
assert_equal 2, products.size
assert_equal 2, products.length
assert products.any?
refute products.empty?
refute products.none?
refute products.one?
assert products.many?
assert_kind_of Product, products[0]
assert_kind_of Array, products.slice(0, 1)
assert_kind_of Array, products.to_ary
end
def test_with_hit
store_names ["Product A", "Product B"]
results = Product.search("product")
assert_kind_of Enumerator, results.with_hit
assert_equal 2, results.with_hit.to_a.size
count = 0
results.with_hit do |product, hit|
assert_kind_of Product, product
assert_kind_of Hash, hit
count += 1
end
assert_equal 2, count
end
def test_with_score
store_names ["Product A", "Product B"]
results = Product.search("product")
assert_kind_of Enumerator, results.with_score
assert_equal 2, results.with_score.to_a.size
count = 0
results.with_score do |product, score|
assert_kind_of Product, product
assert_kind_of Numeric, score
count += 1
end
assert_equal 2, count
end
def test_model_name_with_model
store_names ["Product A", "Product B"]
results = Product.search("product")
assert_equal "Product", results.model_name.human
end
def test_model_name_without_model
store_names ["Product A", "Product B"]
results = Searchkick.search("product")
assert_equal "Result", results.model_name.human
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/search_test.rb | test/search_test.rb | require_relative "test_helper"
class SearchTest < Minitest::Test
def test_search_relation
error = assert_raises(Searchkick::Error) do
Product.all.search("*")
end
assert_equal "search must be called on model, not relation", error.message
end
def test_unscoped
if mongoid?
Product.unscoped do
Product.search("*")
end
else
error = assert_raises(Searchkick::Error) do
Product.unscoped do
Product.search("*")
end
end
assert_equal "search must be called on model, not relation", error.message
end
Product.unscoped do
Searchkick.search("*", models: [Product])
end
end
def test_body
store_names ["Dollar Tree"], Store
assert_equal ["Dollar Tree"], Store.search(body: {query: {match: {name: "dollar"}}}, load: false).map(&:name)
end
def test_body_incompatible_options
assert_raises(ArgumentError) do
Store.search(body: {query: {match: {name: "dollar"}}}, where: {id: 1})
end
end
def test_block
store_names ["Dollar Tree"]
products =
Product.search "boom" do |body|
body[:query] = {match_all: {}}
end
assert_equal ["Dollar Tree"], products.map(&:name)
end
def test_missing_records
store_names ["Product A", "Product B"]
product = Product.find_by(name: "Product A")
product.delete
assert_output nil, /\[searchkick\] WARNING: Records in search index do not exist in database: Product \d+/ do
result = Product.search("product")
assert_equal ["Product B"], result.map(&:name)
assert_equal [product.id.to_s], result.missing_records.map { |v| v[:id] }
assert_equal [Product], result.missing_records.map { |v| v[:model] }
end
assert_empty Product.search("product", load: false).missing_records
ensure
Product.reindex
end
def test_bad_mapping
Product.searchkick_index.delete
store_names ["Product A"]
error = assert_raises(Searchkick::InvalidQueryError) { Product.search("test").to_a }
assert_equal "Bad mapping - run Product.reindex", error.message
ensure
Product.reindex
end
def test_missing_index
assert_raises(Searchkick::MissingIndexError) { Product.search("test", index_name: "not_found").to_a }
end
def test_invalid_body
assert_raises(Searchkick::InvalidQueryError) { Product.search(body: {boom: true}).to_a }
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/aggs_test.rb | test/aggs_test.rb | require_relative "test_helper"
class AggsTest < Minitest::Test
def setup
super
store [
{name: "Product Show", latitude: 37.7833, longitude: 12.4167, store_id: 1, in_stock: true, color: "blue", price: 21, created_at: 2.days.ago},
{name: "Product Hide", latitude: 29.4167, longitude: -98.5000, store_id: 2, in_stock: false, color: "green", price: 25, created_at: 2.days.from_now},
{name: "Product B", latitude: 43.9333, longitude: -122.4667, store_id: 2, in_stock: false, color: "red", price: 5, created_at: Time.now},
{name: "Foo", latitude: 43.9333, longitude: 12.4667, store_id: 3, in_stock: false, color: "yellow", price: 15, created_at: Time.now}
]
end
def test_basic
assert_equal ({1 => 1, 2 => 2}), store_agg(aggs: [:store_id])
end
def test_where
assert_equal ({1 => 1}), store_agg(aggs: {store_id: {where: {in_stock: true}}})
end
def test_order
agg = Product.search("Product", aggs: {color: {order: {_key: "desc"}}}).aggs["color"]
assert_equal %w(red green blue), agg["buckets"].map { |b| b["key"] }
end
def test_field
assert_equal ({1 => 1, 2 => 2}), store_agg(aggs: {store_id: {}})
assert_equal ({1 => 1, 2 => 2}), store_agg(aggs: {store_id: {field: "store_id"}})
assert_equal ({1 => 1, 2 => 2}), store_agg({aggs: {store_id_new: {field: "store_id"}}}, "store_id_new")
end
def test_min_doc_count
assert_equal ({2 => 2}), store_agg(aggs: {store_id: {min_doc_count: 2}})
end
def test_script
source = "'Color: ' + _value"
agg = Product.search("Product", aggs: {color: {script: {source: source}}}).aggs["color"]
assert_equal ({"Color: blue" => 1, "Color: green" => 1, "Color: red" => 1}), buckets_as_hash(agg)
end
def test_no_aggs
assert_nil Product.search("*").aggs
end
def test_limit
agg = Product.search("Product", aggs: {store_id: {limit: 1}}).aggs["store_id"]
assert_equal 1, agg["buckets"].size
# assert_equal 3, agg["doc_count"]
assert_equal(1, agg["sum_other_doc_count"])
end
def test_ranges
price_ranges = [{to: 10}, {from: 10, to: 20}, {from: 20}]
agg = Product.search("Product", aggs: {price: {ranges: price_ranges}}).aggs["price"]
assert_equal 3, agg["buckets"].size
assert_equal 10.0, agg["buckets"][0]["to"]
assert_equal 20.0, agg["buckets"][2]["from"]
assert_equal 1, agg["buckets"][0]["doc_count"]
assert_equal 0, agg["buckets"][1]["doc_count"]
assert_equal 2, agg["buckets"][2]["doc_count"]
end
def test_date_ranges
ranges = [{to: 1.day.ago}, {from: 1.day.ago, to: 1.day.from_now}, {from: 1.day.from_now}]
agg = Product.search("Product", aggs: {created_at: {date_ranges: ranges}}).aggs["created_at"]
assert_equal 1, agg["buckets"][0]["doc_count"]
assert_equal 1, agg["buckets"][1]["doc_count"]
assert_equal 1, agg["buckets"][2]["doc_count"]
end
def test_query_where
assert_equal ({1 => 1}), store_agg(where: {in_stock: true}, aggs: [:store_id])
end
def test_two_wheres
assert_equal ({2 => 1}), store_agg(where: {color: "red"}, aggs: {store_id: {where: {in_stock: false}}})
end
def test_where_override
assert_equal ({}), store_agg(where: {color: "red"}, aggs: {store_id: {where: {in_stock: false, color: "blue"}}})
assert_equal ({2 => 1}), store_agg(where: {color: "blue"}, aggs: {store_id: {where: {in_stock: false, color: "red"}}})
end
def test_skip
assert_equal ({1 => 1, 2 => 2}), store_agg(where: {store_id: 2}, aggs: [:store_id])
end
def test_skip_complex
assert_equal ({1 => 1, 2 => 1}), store_agg(where: {store_id: 2, price: {gt: 5}}, aggs: [:store_id])
end
def test_multiple
assert_equal ({"store_id" => {1 => 1, 2 => 2}, "color" => {"blue" => 1, "green" => 1, "red" => 1}}), store_multiple_aggs(aggs: [:store_id, :color])
end
def test_smart_aggs_false
assert_equal ({2 => 2}), store_agg(where: {color: "red"}, aggs: {store_id: {where: {in_stock: false}}}, smart_aggs: false)
assert_equal ({2 => 2}), store_agg(where: {color: "blue"}, aggs: {store_id: {where: {in_stock: false}}}, smart_aggs: false)
end
def test_aggs_group_by_date
store [{name: "Old Product", created_at: 3.years.ago}]
products =
Product.search("Product",
where: {
created_at: {lt: Time.now}
},
aggs: {
products_per_year: {
date_histogram: {
field: :created_at,
calendar_interval: :year
}
}
}
)
assert_equal 4, products.aggs["products_per_year"]["buckets"].size
end
def test_aggs_with_time_zone
start_time = Time.at(1529366400)
store [
{name: "Opera House Pass", created_at: start_time},
{name: "London Eye Pass", created_at: start_time + 16.hours},
{name: "London Tube Pass", created_at: start_time + 16.hours}
]
sydney_search = search_aggregate_by_day_with_time_zone('Pass', '+10:00') # Sydney
london_search = search_aggregate_by_day_with_time_zone('Pass', '+01:00') # London
# London search will return all 3 in one bucket because of time zone offset
expected_london_buckets = [
{"key_as_string" => "2018-06-19T00:00:00.000+01:00", "key" => 1529362800000, "doc_count" => 3}
]
assert_equal expected_london_buckets, london_search.aggs["products_per_day"]["buckets"]
# Sydney search will return them in separate buckets due to time zone offset
expected_sydney_buckets = [
{"key_as_string" => "2018-06-19T00:00:00.000+10:00", "key" => 1529330400000, "doc_count" => 1},
{"key_as_string" => "2018-06-20T00:00:00.000+10:00", "key" => 1529416800000, "doc_count" => 2}
]
assert_equal expected_sydney_buckets, sydney_search.aggs["products_per_day"]["buckets"]
end
def test_aggs_avg
products =
Product.search("*",
aggs: {
avg_price: {
avg: {
field: :price
}
}
}
)
assert_equal 16.5, products.aggs["avg_price"]["value"]
end
def test_aggs_cardinality
products =
Product.search("*",
aggs: {
total_stores: {
cardinality: {
field: :store_id
}
}
}
)
assert_equal 3, products.aggs["total_stores"]["value"]
end
def test_aggs_min_max
products =
Product.search("*",
aggs: {
min_price: {
min: {
field: :price
}
},
max_price: {
max: {
field: :price
}
}
}
)
assert_equal 5, products.aggs["min_price"]["value"]
assert_equal 25, products.aggs["max_price"]["value"]
end
def test_aggs_sum
products =
Product.search("*",
aggs: {
sum_price: {
sum: {
field: :price
}
}
}
)
assert_equal 66, products.aggs["sum_price"]["value"]
end
def test_body_options
products =
Product.search("*",
body_options: {
aggs: {
price: {
histogram: {field: :price, interval: 10}
}
}
}
)
expected = [
{"key" => 0.0, "doc_count" => 1},
{"key" => 10.0, "doc_count" => 1},
{"key" => 20.0, "doc_count" => 2}
]
assert_equal products.aggs["price"]["buckets"], expected
end
def test_relation
assert_equal ({1 => 1}), buckets_as_hash(Product.search("Product").aggs(store_id: {where: {in_stock: true}}).aggs["store_id"])
end
def test_relation_smart_aggs_false
assert_equal ({2 => 2}), buckets_as_hash(Product.search("Product").where(color: "red").aggs(store_id: {where: {in_stock: false}}).smart_aggs(false).aggs["store_id"])
end
protected
def search_aggregate_by_day_with_time_zone(query, time_zone = '-8:00')
Product.search(query,
where: {
created_at: {lt: Time.now}
},
aggs: {
products_per_day: {
date_histogram: {
field: :created_at,
calendar_interval: :day,
time_zone: time_zone
}
}
}
)
end
def buckets_as_hash(agg)
agg["buckets"].to_h { |v| [v["key"], v["doc_count"]] }
end
def store_agg(options, agg_key = "store_id")
buckets = Product.search("Product", **options).aggs[agg_key]
buckets_as_hash(buckets)
end
def store_multiple_aggs(options)
Product.search("Product", **options).aggs.to_h do |field, filtered_agg|
[field, buckets_as_hash(filtered_agg)]
end
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/hybrid_test.rb | test/hybrid_test.rb | require_relative "test_helper"
class HybridTest < Minitest::Test
def setup
skip unless Searchkick.knn_support?
super
end
def test_search
error = assert_raises(ArgumentError) do
Product.search("product", knn: {field: :embedding, vector: [1, 2, 3]})
end
assert_equal "Use Searchkick.multi_search for hybrid search", error.message
end
def test_multi_search
store [
{name: "The dog is barking", embedding: [1, 2, 0]},
{name: "The cat is purring", embedding: [1, 0, 0]},
{name: "The bear is growling", embedding: [1, 2, 3]}
]
keyword_search = Product.search("growling bear")
semantic_search = Product.search(knn: {field: :embedding, vector: [1, 2, 3]})
Searchkick.multi_search([keyword_search, semantic_search])
results = Searchkick::Reranking.rrf(keyword_search, semantic_search)
expected = ["The bear is growling", "The dog is barking", "The cat is purring"]
assert_equal expected, results.map { |v| v[:result].name }
assert_in_delta 0.03279, results[0][:score]
assert_in_delta 0.01612, results[1][:score]
assert_in_delta 0.01587, results[2][:score]
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/inheritance_test.rb | test/inheritance_test.rb | require_relative "test_helper"
class InheritanceTest < Minitest::Test
def setup
super
setup_animal
end
def test_child_reindex
store_names ["Max"], Cat
assert Dog.reindex
assert_equal 1, Animal.search("*").size
end
def test_child_index_name
assert_equal "animals_test#{ENV["TEST_ENV_NUMBER"]}", Dog.searchkick_index.name
end
def test_child_search
store_names ["Bear"], Dog
store_names ["Bear"], Cat
assert_equal 1, Dog.search("bear").size
end
def test_parent_search
store_names ["Bear"], Dog
store_names ["Bear"], Cat
assert_equal 2, Animal.search("bear").size
end
def test_force_one_type
store_names ["Green Bear"], Dog
store_names ["Blue Bear"], Cat
assert_equal ["Blue Bear"], Animal.search("bear", type: [Cat]).map(&:name)
end
def test_force_multiple_types
store_names ["Green Bear"], Dog
store_names ["Blue Bear"], Cat
store_names ["Red Bear"], Animal
assert_equal ["Green Bear", "Blue Bear"], Animal.search("bear", type: [Dog, Cat]).map(&:name)
end
def test_child_autocomplete
store_names ["Max"], Cat
store_names ["Mark"], Dog
assert_equal ["Max"], Cat.search("ma", fields: [:name], match: :text_start).map(&:name)
end
def test_parent_autocomplete
store_names ["Max"], Cat
store_names ["Bear"], Dog
assert_equal ["Bear"], Animal.search("bea", fields: [:name], match: :text_start).map(&:name).sort
end
# def test_child_suggest
# store_names ["Shark"], Cat
# store_names ["Sharp"], Dog
# assert_equal ["shark"], Cat.search("shar", fields: [:name], suggest: true).suggestions
# end
def test_parent_suggest
store_names ["Shark"], Cat
store_names ["Tiger"], Dog
assert_equal ["tiger"], Animal.search("tige", fields: [:name], suggest: true).suggestions.sort
end
def test_reindex
store_names ["Bear A"], Cat
store_names ["Bear B"], Dog
Animal.reindex
assert_equal 2, Animal.search("bear").size
end
def test_child_models_option
store_names ["Bear A"], Cat
store_names ["Bear B"], Dog
Animal.reindex
# note: the models option is less efficient than Animal.search("bear", type: [Cat, Dog])
# since it requires two database calls instead of one to Animal
assert_equal 2, Searchkick.search("bear", models: [Cat, Dog]).size
end
def test_missing_records
store_names ["Bear A"], Cat
store_names ["Bear B"], Dog
Animal.reindex
record = Animal.find_by(name: "Bear A")
record.delete
assert_output nil, /\[searchkick\] WARNING: Records in search index do not exist in database: Cat\/Dog \d+/ do
result = Searchkick.search("bear", models: [Cat, Dog])
assert_equal ["Bear B"], result.map(&:name)
assert_equal [record.id.to_s], result.missing_records.map { |v| v[:id] }
assert_equal [[Cat, Dog]], result.missing_records.map { |v| v[:model].sort_by(&:model_name) }
end
assert_empty Product.search("bear", load: false).missing_records
ensure
Animal.reindex
end
def test_inherited_and_non_inherited_models
store_names ["Bear A"], Cat
store_names ["Bear B"], Dog
store_names ["Bear C"]
Animal.reindex
assert_equal 2, Searchkick.search("bear", models: [Cat, Product]).size
assert_equal 2, Searchkick.search("bear", models: [Cat, Product]).hits.size
assert_equal 2, Searchkick.search("bear", models: [Cat, Product], per_page: 1).total_pages
end
# TODO move somewhere better
def test_multiple_indices
store_names ["Product A"]
store_names ["Product B"], Animal
assert_search "product", ["Product A", "Product B"], {models: [Product, Animal], conversions: false}, Searchkick
assert_search "product", ["Product A", "Product B"], {index_name: [Product, Animal], conversions: false}, Searchkick
end
def test_index_name_model
store_names ["Product A"]
assert_equal ["Product A"], Searchkick.search("product", index_name: [Product]).map(&:name)
end
def test_index_name_string
store_names ["Product A"]
error = assert_raises Searchkick::Error do
Searchkick.search("product", index_name: [Product.searchkick_index.name]).map(&:name)
end
assert_includes error.message, "Unknown model"
end
def test_similar
store_names ["Dog", "Other dog"], Dog
store_names ["Not dog"], Cat
dog = Dog.find_by!(name: "Dog")
assert_equal ["Other dog"], dog.similar(fields: [:name]).map(&:name)
assert_equal ["Not dog", "Other dog"], dog.similar(fields: [:name], models: [Animal]).map(&:name).sort
assert_equal ["Not dog"], dog.similar(fields: [:name], models: [Cat]).map(&:name).sort
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/similar_test.rb | test/similar_test.rb | require_relative "test_helper"
class SimilarTest < Minitest::Test
def test_similar
store_names ["Annie's Naturals Organic Shiitake & Sesame Dressing"]
assert_search "Annie's Naturals Shiitake & Sesame Vinaigrette", ["Annie's Naturals Organic Shiitake & Sesame Dressing"], similar: true, fields: [:name]
end
def test_fields
store_names ["1% Organic Milk", "2% Organic Milk", "Popcorn"]
product = Product.find_by(name: "1% Organic Milk")
assert_equal ["2% Organic Milk"], product.similar(fields: ["name"]).map(&:name)
end
def test_order
store_names ["Lucerne Milk Chocolate Fat Free", "Clover Fat Free Milk"]
assert_order "Lucerne Fat Free Chocolate Milk", ["Lucerne Milk Chocolate Fat Free", "Clover Fat Free Milk"], similar: true, fields: [:name]
end
def test_limit
store_names ["1% Organic Milk", "2% Organic Milk", "Fat Free Organic Milk", "Popcorn"]
product = Product.find_by(name: "1% Organic Milk")
assert_equal ["2% Organic Milk"], product.similar(fields: ["name"], order: ["name"], limit: 1).map(&:name)
assert_equal ["2% Organic Milk"], product.similar(fields: ["name"]).order("name").limit(1).map(&:name)
end
def test_per_page
store_names ["1% Organic Milk", "2% Organic Milk", "Fat Free Organic Milk", "Popcorn"]
product = Product.find_by(name: "1% Organic Milk")
assert_equal ["2% Organic Milk"], product.similar(fields: ["name"], order: ["name"], per_page: 1).map(&:name)
assert_equal ["2% Organic Milk"], product.similar(fields: ["name"]).order("name").per_page(1).map(&:name)
end
def test_routing
store_names ["Test"], Store
assert_equal [], Store.first.similar(fields: ["name"]).map(&:name)
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/index_test.rb | test/index_test.rb | require_relative "test_helper"
class IndexTest < Minitest::Test
def setup
super
setup_region
end
def test_tokens
assert_equal ["dollar", "dollartre", "tree"], Product.searchkick_index.tokens("Dollar Tree", analyzer: "searchkick_index")
end
def test_tokens_analyzer
assert_equal ["dollar", "tree"], Product.searchkick_index.tokens("Dollar Tree", analyzer: "searchkick_search2")
end
def test_total_docs
store_names ["Product A"]
assert_equal 1, Product.searchkick_index.total_docs
end
def test_clean_indices
suffix = Searchkick.index_suffix ? "_#{Searchkick.index_suffix}" : ""
old_index = Searchkick::Index.new("products_test#{suffix}_20130801000000000")
different_index = Searchkick::Index.new("items_test#{suffix}_20130801000000000")
old_index.delete if old_index.exists?
different_index.delete if different_index.exists?
# create indexes
old_index.create
different_index.create
Product.searchkick_index.clean_indices
assert Product.searchkick_index.exists?
assert different_index.exists?
assert !old_index.exists?
end
def test_clean_indices_old_format
suffix = Searchkick.index_suffix ? "_#{Searchkick.index_suffix}" : ""
old_index = Searchkick::Index.new("products_test#{suffix}_20130801000000")
old_index.create
Product.searchkick_index.clean_indices
assert !old_index.exists?
end
def test_retain
Product.reindex
assert_equal 1, Product.searchkick_index.all_indices.size
Product.reindex(retain: true)
assert_equal 2, Product.searchkick_index.all_indices.size
end
def test_mappings
store_names ["Dollar Tree"], Store
assert_equal ["Dollar Tree"], Store.search(body: {query: {match: {name: "dollar"}}}).map(&:name)
mapping = Store.searchkick_index.mapping
assert_kind_of Hash, mapping
assert_equal "text", mapping.values.first["mappings"]["properties"]["name"]["type"]
end
def test_settings
assert_kind_of Hash, Store.searchkick_index.settings
end
def test_remove_blank_id
store_names ["Product A"]
Product.searchkick_index.remove(Product.new)
assert_search "product", ["Product A"]
ensure
Product.reindex
end
# keep simple for now, but maybe return client response in future
def test_store_response
product = Searchkick.callbacks(false) { Product.create!(name: "Product A") }
assert_nil Product.searchkick_index.store(product)
end
# keep simple for now, but maybe return client response in future
def test_bulk_index_response
product = Searchkick.callbacks(false) { Product.create!(name: "Product A") }
assert_nil Product.searchkick_index.bulk_index([product])
end
# TODO move
def test_filterable
store [{name: "Product A", alt_description: "Hello"}]
error = assert_raises(Searchkick::InvalidQueryError) do
assert_search "*", [], where: {alt_description: "Hello"}
end
assert_match "Cannot search on field [alt_description] since it is not indexed", error.message
end
def test_filterable_non_string
store [{name: "Product A", store_id: 1}]
assert_search "*", ["Product A"], where: {store_id: 1}
end
def test_large_value
large_value = 1000.times.map { "hello" }.join(" ")
store [{name: "Product A", text: large_value}], Region
assert_search "product", ["Product A"], {}, Region
assert_search "hello", ["Product A"], {fields: [:name, :text]}, Region
assert_search "hello", ["Product A"], {}, Region
assert_search "*", ["Product A"], {where: {text: large_value}}, Region
end
def test_very_large_value
# terms must be < 32 KB with Elasticsearch 8.10.3+
# https://github.com/elastic/elasticsearch/pull/99818
large_value = 5400.times.map { "hello" }.join(" ")
store [{name: "Product A", text: large_value}], Region
assert_search "product", ["Product A"], {}, Region
assert_search "hello", ["Product A"], {fields: [:name, :text]}, Region
assert_search "hello", ["Product A"], {}, Region
# keyword not indexed
assert_search "*", [], {where: {text: large_value}}, Region
end
def test_bulk_import_raises_error
valid_dog = Product.create(name: "2016-01-02")
invalid_dog = Product.create(name: "Ol' One-Leg")
mapping = {
properties: {
name: {type: "date"}
}
}
index = Searchkick::Index.new "dogs", mappings: mapping, _type: "dog"
index.delete if index.exists?
index.create_index
index.store valid_dog
assert_raises(Searchkick::ImportError) do
index.bulk_index [valid_dog, invalid_dog]
end
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/knn_test.rb | test/knn_test.rb | require_relative "test_helper"
class KnnTest < Minitest::Test
def setup
skip unless Searchkick.knn_support?
super
# prevent null_pointer_exception with OpenSearch 3
Product.reindex if Searchkick.opensearch? && !Searchkick.server_below?("3.0.0")
end
def test_basic
store [{name: "A", embedding: [1, 2, 3]}, {name: "B", embedding: [-1, -2, -3]}, {name: "C"}]
assert_order "*", ["A", "B"], knn: {field: :embedding, vector: [1, 2, 3]}
scores = Product.search(knn: {field: :embedding, vector: [1, 2, 3]}).hits.map { |v| v["_score"] }
assert_in_delta 1, scores[0]
assert_in_delta 0, scores[1]
end
def test_basic_exact
store [{name: "A", embedding: [1, 2, 3]}, {name: "B", embedding: [-1, -2, -3]}, {name: "C"}]
assert_order "*", ["A", "B"], knn: {field: :embedding, vector: [1, 2, 3], exact: true}
scores = Product.search(knn: {field: :embedding, vector: [1, 2, 3], exact: true}).hits.map { |v| v["_score"] }
assert_in_delta 1, scores[0]
assert_in_delta 0, scores[1]
end
def test_where
store [
{name: "A", store_id: 1, embedding: [1, 2, 3]},
{name: "B", store_id: 2, embedding: [1, 2, 3]},
{name: "C", store_id: 1, embedding: [-1, -2, -3]},
{name: "D", store_id: 1}
]
assert_order "*", ["A", "C"], knn: {field: :embedding, vector: [1, 2, 3]}, where: {store_id: 1}
end
def test_where_exact
store [
{name: "A", store_id: 1, embedding: [1, 2, 3]},
{name: "B", store_id: 2, embedding: [1, 2, 3]},
{name: "C", store_id: 1, embedding: [-1, -2, -3]},
{name: "D", store_id: 1}
]
assert_order "*", ["A", "C"], knn: {field: :embedding, vector: [1, 2, 3], exact: true}, where: {store_id: 1}
end
def test_pagination
store [
{name: "A", embedding: [1, 2, 3]},
{name: "B", embedding: [1, 2, 0]},
{name: "C", embedding: [-1, -2, 0]},
{name: "D", embedding: [-1, -2, -3]},
{name: "E"}
]
assert_order "*", ["B", "C"], knn: {field: :embedding, vector: [1, 2, 3]}, limit: 2, offset: 1
end
def test_pagination_exact
store [
{name: "A", embedding: [1, 2, 3]},
{name: "B", embedding: [1, 2, 0]},
{name: "C", embedding: [-1, -2, 0]},
{name: "D", embedding: [-1, -2, -3]},
{name: "E"}
]
assert_order "*", ["B", "C"], knn: {field: :embedding, vector: [1, 2, 3], exact: true}, limit: 2, offset: 1
end
def test_euclidean
store [{name: "A", embedding3: [1, 2, 3]}, {name: "B", embedding3: [1, 5, 7]}, {name: "C"}]
assert_order "*", ["A", "B"], knn: {field: :embedding3, vector: [1, 2, 3]}
scores = Product.search(knn: {field: :embedding3, vector: [1, 2, 3]}).hits.map { |v| v["_score"] }
assert_in_delta 1.0 / (1 + 0), scores[0]
assert_in_delta 1.0 / (1 + 5**2), scores[1]
end
def test_euclidean_exact
store [{name: "A", embedding2: [1, 2, 3]}, {name: "B", embedding2: [1, 5, 7]}, {name: "C"}]
assert_order "*", ["A", "B"], knn: {field: :embedding2, vector: [1, 2, 3], distance: "euclidean"}
scores = Product.search(knn: {field: :embedding2, vector: [1, 2, 3], distance: "euclidean"}).hits.map { |v| v["_score"] }
assert_in_delta 1.0 / (1 + 0), scores[0]
assert_in_delta 1.0 / (1 + 5**2), scores[1]
end
def test_taxicab_exact
store [{name: "A", embedding2: [1, 2, 3]}, {name: "B", embedding2: [1, 5, 7]}, {name: "C"}]
assert_order "*", ["A", "B"], knn: {field: :embedding2, vector: [1, 2, 3], distance: "taxicab"}
scores = Product.search(knn: {field: :embedding2, vector: [1, 2, 3], distance: "taxicab"}).hits.map { |v| v["_score"] }
assert_in_delta 1.0 / (1 + 0), scores[0]
assert_in_delta 1.0 / (1 + 7), scores[1]
end
def test_chebyshev_exact
skip unless Searchkick.opensearch?
store [{name: "A", embedding: [1, 2, 3]}, {name: "B", embedding: [1, 5, 7]}, {name: "C"}]
assert_order "*", ["A", "B"], knn: {field: :embedding, vector: [1, 2, 3], distance: "chebyshev"}
scores = Product.search(knn: {field: :embedding, vector: [1, 2, 3], distance: "chebyshev"}).hits.map { |v| v["_score"] }
assert_in_delta 1.0 / (1 + 0), scores[0]
assert_in_delta 1.0 / (1 + 4), scores[1]
end
def test_inner_product
store [{name: "A", embedding2: [-1, -2, -3]}, {name: "B", embedding2: [1, 5, 7]}, {name: "C"}]
assert_order "*", ["B", "A"], knn: {field: :embedding2, vector: [1, 2, 3], distance: "inner_product"}
scores = Product.search(knn: {field: :embedding2, vector: [1, 2, 3], distance: "inner_product"}).hits.map { |v| v["_score"] }
# d > 0: d + 1
# else: 1 / (1 - d)
assert_in_delta 1 + 32, scores[0], (!Searchkick.opensearch? ? 0.5 : 0.001)
assert_in_delta 1.0 / (1 + 14), scores[1]
end
def test_inner_product_exact
store [{name: "A", embedding3: [-1, -2, -3]}, {name: "B", embedding3: [1, 5, 7]}, {name: "C"}]
assert_order "*", ["B", "A"], knn: {field: :embedding3, vector: [1, 2, 3], distance: "inner_product"}
scores = Product.search(knn: {field: :embedding3, vector: [1, 2, 3], distance: "inner_product"}).hits.map { |v| v["_score"] }
assert_in_delta 1 + 32, scores[0]
assert_in_delta 1.0 / (1 + 14), scores[1]
end
def test_unindexed
skip if Searchkick.opensearch?
store [{name: "A", embedding4: [1, 2, 3]}, {name: "B", embedding4: [-1, -2, -3]}, {name: "C"}]
assert_order "*", ["A", "B"], knn: {field: :embedding4, vector: [1, 2, 3], distance: "cosine"}
scores = Product.search(knn: {field: :embedding4, vector: [1, 2, 3], distance: "cosine"}).hits.map { |v| v["_score"] }
assert_in_delta 1, scores[0]
assert_in_delta 0, scores[1]
error = assert_raises(ArgumentError) do
Product.search(knn: {field: :embedding4, vector: [1, 2, 3]})
end
assert_match "distance required", error.message
error = assert_raises(ArgumentError) do
Product.search(knn: {field: :embedding4, vector: [1, 2, 3], exact: false})
end
assert_match "distance required", error.message
error = assert_raises(ArgumentError) do
Product.search(knn: {field: :embedding, vector: [1, 2, 3], distance: "euclidean", exact: false})
end
assert_equal "distance must match searchkick options for approximate search", error.message
if !Searchkick.server_below?("9.0.0")
error = assert_raises(ArgumentError) do
Product.search(knn: {field: :embedding, vector: [1, 2, 3], distance: "euclidean"})
end
assert_equal "distance must match searchkick options", error.message
end
end
def test_explain
store [{name: "A", embedding: [1, 2, 3], embedding2: [1, 2, 3], embedding3: [1, 2, 3], embedding4: [1, 2, 3]}]
assert_approx true, :embedding, "cosine"
if Searchkick.opensearch? || Searchkick.server_below?("9.0.0")
assert_approx false, :embedding, "euclidean"
assert_approx false, :embedding, "inner_product"
assert_approx false, :embedding, "taxicab"
end
if Searchkick.opensearch?
assert_approx false, :embedding, "chebyshev"
end
assert_approx false, :embedding3, "cosine"
assert_approx true, :embedding3, "euclidean"
assert_approx false, :embedding3, "inner_product"
unless Searchkick.opensearch?
assert_approx false, :embedding4, "cosine"
assert_approx false, :embedding4, "euclidean"
assert_approx false, :embedding4, "inner_product"
end
assert_approx false, :embedding2, "cosine"
assert_approx false, :embedding2, "euclidean"
assert_approx true, :embedding2, "inner_product"
assert_approx false, :embedding, "cosine", exact: true
assert_approx true, :embedding, "cosine", exact: false
error = assert_raises(ArgumentError) do
assert_approx true, :embedding, "euclidean", exact: false
end
assert_equal "distance must match searchkick options for approximate search", error.message
end
def test_ef_search
skip if Searchkick.opensearch? && Searchkick.server_below?("2.16.0")
store [{name: "A", embedding: [1, 2, 3]}, {name: "B", embedding: [-1, -2, -3]}, {name: "C"}]
assert_order "*", ["A", "B"], knn: {field: :embedding, vector: [1, 2, 3], ef_search: 20}, limit: 10
end
private
def assert_approx(approx, field, distance, **knn_options)
response = Product.search(knn: {field: field, vector: [1, 2, 3], distance: distance, **knn_options}, explain: true).response.to_s
if approx
if Searchkick.opensearch?
assert_match "within top", response
else
assert_match "within top k documents", response
end
else
if Searchkick.opensearch?
assert_match "knn_score", response
else
assert_match "params.query_vector", response
end
end
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/load_test.rb | test/load_test.rb | require_relative "test_helper"
class LoadTest < Minitest::Test
def test_default
store_names ["Product A"]
product = Product.search("product").first
assert_kind_of Product, product
if mongoid?
assert_match "#<Product _id: ", product.inspect
else
assert_match "#<Product id: ", product.inspect
end
assert_equal "Product A", product.name
assert_equal "Product A", product[:name]
assert_equal "Product A", product["name"]
refute product.respond_to?(:missing)
assert_nil product[:missing]
assert_equal "Product A", product.attributes["name"]
assert_equal "Product A", product.as_json["name"]
assert_equal "Product A", JSON.parse(product.to_json)["name"]
assert_equal "Product A", JSON.parse(Product.search("product").to_json).first["name"]
assert_equal "Product A", Product.search("product").as_json.first["name"]
assert_equal ({"name" => "Product A"}), product.as_json(only: ["name"])
assert_equal ({"name" => "Product A"}), product.as_json(only: [:name])
refute product.as_json(except: ["name"]).key?("name")
refute product.as_json(except: [:name]).key?("name")
assert_empty product.as_json(only: ["missing"])
if mongoid?
product.as_json(methods: [:missing])
else
assert_raises(NoMethodError) do
product.as_json(methods: [:missing])
end
end
end
def test_false
store_names ["Product A"]
product = Product.search("product", load: false).first
assert_kind_of Searchkick::HashWrapper, product
assert_match "#<Searchkick::HashWrapper id: ", product.inspect
assert_equal "Product A", product.name
assert_equal "Product A", product[:name]
assert_equal "Product A", product["name"]
refute product.respond_to?(:missing)
assert_nil product[:missing]
assert_equal "Product A", product.to_h["name"]
assert_equal "Product A", product.as_json["name"]
assert_equal "Product A", JSON.parse(product.to_json)["name"]
assert_equal "Product A", JSON.parse(Product.search("product", load: false).to_json).first["name"]
assert_equal "Product A", Product.search("product", load: false).as_json.first["name"]
assert_equal ({"name" => "Product A"}), product.as_json(only: ["name"])
# same behavior as Hashie::Mash
assert_empty product.as_json(only: [:name])
refute product.as_json(except: ["name"]).key?("name")
# same behavior as Hashie::Mash
assert product.as_json(except: [:name]).key?("name")
assert_empty product.as_json(only: ["missing"])
# same behavior as Hashie::Mash
product.as_json(methods: [:missing])
end
def test_false_methods
store_names ["Product A"]
assert_equal "Product A", Product.search("product", load: false).first.name
end
def test_false_with_includes
store_names ["Product A"]
assert_kind_of Searchkick::HashWrapper, Product.search("product", load: false, includes: [:store]).first
end
def test_false_nested_object
aisle = {"id" => 1, "name" => "Frozen"}
store [{name: "Product A", aisle: aisle}]
assert_equal aisle, Product.search("product", load: false).first.aisle.to_hash
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/boost_test.rb | test/boost_test.rb | require_relative "test_helper"
class BoostTest < Minitest::Test
# global boost
def test_boost
store [
{name: "Tomato A"},
{name: "Tomato B", orders_count: 10},
{name: "Tomato C", orders_count: 100}
]
assert_order "tomato", ["Tomato C", "Tomato B", "Tomato A"], boost: "orders_count"
end
def test_boost_zero
store [
{name: "Zero Boost", orders_count: 0}
]
assert_order "zero", ["Zero Boost"], boost: "orders_count"
end
# fields
def test_fields
store [
{name: "Red", color: "White"},
{name: "White", color: "Red Red Red"}
]
assert_order "red", ["Red", "White"], fields: ["name^10", "color"]
end
def test_fields_decimal
store [
{name: "Red", color: "White"},
{name: "White", color: "Red Red Red"}
]
assert_order "red", ["Red", "White"], fields: ["name^10.5", "color"]
end
def test_fields_word_start
store [
{name: "Red", color: "White"},
{name: "White", color: "Red Red Red"}
]
assert_order "red", ["Red", "White"], fields: [{"name^10" => :word_start}, "color"]
end
# for issue #855
def test_fields_apostrophes
store_names ["Valentine's Day Special"]
assert_search "Valentines", ["Valentine's Day Special"], fields: ["name^5"]
assert_search "Valentine's", ["Valentine's Day Special"], fields: ["name^5"]
assert_search "Valentine", ["Valentine's Day Special"], fields: ["name^5"]
end
def test_boost_by
store [
{name: "Tomato A"},
{name: "Tomato B", orders_count: 10},
{name: "Tomato C", orders_count: 100}
]
assert_order "tomato", ["Tomato C", "Tomato B", "Tomato A"], boost_by: [:orders_count]
assert_order "tomato", ["Tomato C", "Tomato B", "Tomato A"], boost_by: {orders_count: {factor: 10}}
end
def test_boost_by_missing
store [
{name: "Tomato A"},
{name: "Tomato B", orders_count: 10}
]
assert_order "tomato", ["Tomato A", "Tomato B"], boost_by: {orders_count: {missing: 100}}
end
def test_boost_by_boost_mode_multiply
store [
{name: "Tomato A", found_rate: 0.9},
{name: "Tomato B"},
{name: "Tomato C", found_rate: 0.5}
]
assert_order "tomato", ["Tomato B", "Tomato A", "Tomato C"], boost_by: {found_rate: {boost_mode: "multiply"}}
end
def test_boost_where
store [
{name: "Tomato A"},
{name: "Tomato B", user_ids: [1, 2]},
{name: "Tomato C", user_ids: [3]}
]
assert_first "tomato", "Tomato B", boost_where: {user_ids: 2}
assert_first "tomato", "Tomato B", boost_where: {user_ids: 1..2}
assert_first "tomato", "Tomato B", boost_where: {user_ids: [1, 4]}
assert_first "tomato", "Tomato B", boost_where: {user_ids: {value: 2, factor: 10}}
assert_first "tomato", "Tomato B", boost_where: {user_ids: {value: [1, 4], factor: 10}}
assert_order "tomato", ["Tomato C", "Tomato B", "Tomato A"], boost_where: {user_ids: [{value: 1, factor: 10}, {value: 3, factor: 20}]}
end
def test_boost_where_negative_boost
store [
{name: "Tomato A"},
{name: "Tomato B", user_ids: [2]},
{name: "Tomato C", user_ids: [2]}
]
assert_first "tomato", "Tomato A", boost_where: {user_ids: {value: 2, factor: 0.5}}
end
def test_boost_by_recency
store [
{name: "Article 1", created_at: 2.days.ago},
{name: "Article 2", created_at: 1.day.ago},
{name: "Article 3", created_at: Time.now}
]
assert_order "article", ["Article 3", "Article 2", "Article 1"], boost_by_recency: {created_at: {scale: "7d", decay: 0.5}}
end
def test_boost_by_recency_origin
store [
{name: "Article 1", created_at: 2.days.ago},
{name: "Article 2", created_at: 1.day.ago},
{name: "Article 3", created_at: Time.now}
]
assert_order "article", ["Article 1", "Article 2", "Article 3"], boost_by_recency: {created_at: {origin: 2.days.ago, scale: "7d", decay: 0.5}}
end
def test_boost_by_distance
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000},
{name: "San Marino", latitude: 43.9333, longitude: 12.4667}
]
assert_order "san", ["San Francisco", "San Antonio", "San Marino"], boost_by_distance: {field: :location, origin: [37, -122], scale: "1000mi"}
end
def test_boost_by_distance_hash
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000},
{name: "San Marino", latitude: 43.9333, longitude: 12.4667}
]
assert_order "san", ["San Francisco", "San Antonio", "San Marino"], boost_by_distance: {field: :location, origin: {lat: 37, lon: -122}, scale: "1000mi"}
end
def test_boost_by_distance_v2
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000},
{name: "San Marino", latitude: 43.9333, longitude: 12.4667}
]
assert_order "san", ["San Francisco", "San Antonio", "San Marino"], boost_by_distance: {location: {origin: [37, -122], scale: "1000mi"}}
end
def test_boost_by_distance_v2_hash
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000},
{name: "San Marino", latitude: 43.9333, longitude: 12.4667}
]
assert_order "san", ["San Francisco", "San Antonio", "San Marino"], boost_by_distance: {location: {origin: {lat: 37, lon: -122}, scale: "1000mi"}}
end
def test_boost_by_distance_v2_factor
store [
{name: "San Francisco", latitude: 37.7833, longitude: -122.4167, found_rate: 0.1},
{name: "San Antonio", latitude: 29.4167, longitude: -98.5000, found_rate: 0.99},
{name: "San Marino", latitude: 43.9333, longitude: 12.4667, found_rate: 0.2}
]
assert_order "san", ["San Antonio", "San Francisco", "San Marino"], boost_by: {found_rate: {factor: 100}}, boost_by_distance: {location: {origin: [37, -122], scale: "1000mi"}}
assert_order "san", ["San Francisco", "San Antonio", "San Marino"], boost_by: {found_rate: {factor: 100}}, boost_by_distance: {location: {origin: [37, -122], scale: "1000mi", factor: 100}}
end
def test_boost_by_indices
setup_animal
store_names ["Rex"], Animal
store_names ["Rexx"], Product
assert_order "Rex", ["Rexx", "Rex"], {models: [Animal, Product], indices_boost: {Animal => 1, Product => 200}, fields: [:name]}, Searchkick
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/synonyms_test.rb | test/synonyms_test.rb | require_relative "test_helper"
class SynonymsTest < Minitest::Test
def test_bleach
store_names ["Clorox Bleach", "Kroger Bleach"]
assert_search "clorox", ["Clorox Bleach", "Kroger Bleach"]
end
def test_burger_buns
store_names ["Hamburger Buns"]
assert_search "burger buns", ["Hamburger Buns"]
end
def test_bandaids
store_names ["Band-Aid", "Kroger 12-Pack Bandages"]
assert_search "bandaids", ["Band-Aid", "Kroger 12-Pack Bandages"]
end
def test_reverse
store_names ["Hamburger"]
assert_search "burger", ["Hamburger"]
end
def test_stemmed
store_names ["Burger"]
assert_search "hamburgers", ["Burger"]
end
def test_word_start
store_names ["Clorox Bleach", "Kroger Bleach"]
assert_search "clorox", ["Clorox Bleach", "Kroger Bleach"], fields: [{name: :word_start}]
end
def test_directional
store_names ["Lightbulb", "Green Onions", "Led"]
assert_search "led", ["Lightbulb", "Led"]
assert_search "Lightbulb", ["Lightbulb"]
assert_search "Halogen Lamp", ["Lightbulb"]
assert_search "onions", ["Green Onions"]
end
def test_case
store_names ["Uppercase"]
assert_search "lowercase", ["Uppercase"]
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/routing_test.rb | test/routing_test.rb | require_relative "test_helper"
class RoutingTest < Minitest::Test
def test_query
query = Store.search("Dollar Tree", routing: "Dollar Tree")
assert_equal query.params[:routing], "Dollar Tree"
end
def test_mappings
mappings = Store.searchkick_index.index_options[:mappings]
assert_equal mappings[:_routing], required: true
end
def test_correct_node
store_names ["Dollar Tree"], Store
assert_search "*", ["Dollar Tree"], {routing: "Dollar Tree"}, Store
end
def test_incorrect_node
store_names ["Dollar Tree"], Store
assert_search "*", ["Dollar Tree"], {routing: "Boom"}, Store
end
def test_async
with_options({routing: true, callbacks: :async}, Song) do
store_names ["Dollar Tree"], Song
Song.destroy_all
end
end
def test_queue
with_options({routing: true, callbacks: :queue}, Song) do
store_names ["Dollar Tree"], Song
Song.destroy_all
Searchkick::ProcessQueueJob.perform_later(class_name: "Song")
end
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/test_helper.rb | test/test_helper.rb | require "bundler/setup"
Bundler.require(:default)
require "minitest/autorun"
require "active_support/notifications"
ENV["RACK_ENV"] = "test"
# for reloadable synonyms
if ENV["CI"]
ENV["ES_PATH"] ||= File.join(ENV["HOME"], Searchkick.opensearch? ? "opensearch" : "elasticsearch", Searchkick.server_version)
end
$logger = ActiveSupport::Logger.new(ENV["VERBOSE"] ? STDOUT : nil)
if ENV["LOG_TRANSPORT"]
transport_logger = ActiveSupport::Logger.new(STDOUT)
if Searchkick.client.transport.respond_to?(:transport)
Searchkick.client.transport.transport.logger = transport_logger
else
Searchkick.client.transport.logger = transport_logger
end
end
Searchkick.search_timeout = 5
Searchkick.index_suffix = ENV["TEST_ENV_NUMBER"] # for parallel tests
puts "Running against #{Searchkick.opensearch? ? "OpenSearch" : "Elasticsearch"} #{Searchkick.server_version}"
I18n.config.enforce_available_locales = true
ActiveJob::Base.logger = $logger
ActiveJob::Base.queue_adapter = :test
ActiveSupport::LogSubscriber.logger = ActiveSupport::Logger.new(STDOUT) if ENV["VERBOSE"]
if defined?(Mongoid)
require_relative "support/mongoid"
else
require_relative "support/activerecord"
end
require_relative "support/redis"
# models
Dir["#{__dir__}/models/*"].each do |file|
require file
end
require_relative "support/helpers"
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/reindex_v2_job_test.rb | test/reindex_v2_job_test.rb | require_relative "test_helper"
class ReindexV2JobTest < Minitest::Test
def test_create
product = Searchkick.callbacks(false) { Product.create!(name: "Boom") }
Product.searchkick_index.refresh
assert_search "*", []
Searchkick::ReindexV2Job.perform_now("Product", product.id.to_s)
Product.searchkick_index.refresh
assert_search "*", ["Boom"]
end
def test_destroy
product = Searchkick.callbacks(false) { Product.create!(name: "Boom") }
Product.reindex
assert_search "*", ["Boom"]
Searchkick.callbacks(false) { product.destroy }
Searchkick::ReindexV2Job.perform_now("Product", product.id.to_s)
Product.searchkick_index.refresh
assert_search "*", []
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/default_scope_test.rb | test/default_scope_test.rb | require_relative "test_helper"
class DefaultScopeTest < Minitest::Test
def setup
Band.destroy_all
end
def test_reindex
store [
{name: "Test", active: true},
{name: "Test 2", active: false}
], reindex: false
Band.reindex
assert_search "*", ["Test"], {load: false}
end
def test_search
Band.reindex
Band.search("*") # test works
error = assert_raises(Searchkick::Error) do
Band.all.search("*")
end
assert_equal "search must be called on model, not relation", error.message
end
def default_model
Band
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/index_options_test.rb | test/index_options_test.rb | require_relative "test_helper"
class IndexOptionsTest < Minitest::Test
def setup
Song.destroy_all
end
def test_case_sensitive
with_options({case_sensitive: true}) do
store_names ["Test", "test"]
assert_search "test", ["test"], {misspellings: false}
end
end
def test_no_stemming
with_options({stem: false}) do
store_names ["milk", "milks"]
assert_search "milks", ["milks"], {misspellings: false}
end
end
def test_no_stem_exclusion
with_options({}) do
store_names ["animals", "anime"]
assert_search "animals", ["animals", "anime"], {misspellings: false}
assert_search "anime", ["animals", "anime"], {misspellings: false}
assert_equal ["anim"], Song.searchkick_index.tokens("anime", analyzer: "searchkick_index")
assert_equal ["anim"], Song.searchkick_index.tokens("anime", analyzer: "searchkick_search2")
end
end
def test_stem_exclusion
with_options({stem_exclusion: ["anime"]}) do
store_names ["animals", "anime"]
assert_search "animals", ["animals"], {misspellings: false}
assert_search "anime", ["anime"], {misspellings: false}
assert_equal ["anime"], Song.searchkick_index.tokens("anime", analyzer: "searchkick_index")
assert_equal ["anime"], Song.searchkick_index.tokens("anime", analyzer: "searchkick_search2")
end
end
def test_no_stemmer_override
with_options({}) do
store_names ["animals", "animations"]
assert_search "animals", ["animals", "animations"], {misspellings: false}
assert_search "animations", ["animals", "animations"], {misspellings: false}
assert_equal ["anim"], Song.searchkick_index.tokens("animations", analyzer: "searchkick_index")
assert_equal ["anim"], Song.searchkick_index.tokens("animations", analyzer: "searchkick_search2")
end
end
def test_stemmer_override
with_options({stemmer_override: ["animations => animat"]}) do
store_names ["animals", "animations"]
assert_search "animals", ["animals"], {misspellings: false}
assert_search "animations", ["animations"], {misspellings: false}
assert_equal ["animat"], Song.searchkick_index.tokens("animations", analyzer: "searchkick_index")
assert_equal ["animat"], Song.searchkick_index.tokens("animations", analyzer: "searchkick_search2")
end
end
def test_special_characters
with_options({special_characters: false}) do
store_names ["jalapeño"]
assert_search "jalapeno", [], {misspellings: false}
end
end
def test_index_name
with_options({index_name: "songs_v2"}) do
assert_equal "songs_v2", Song.searchkick_index.name
end
end
def test_index_name_callable
with_options({index_name: -> { "songs_v2" }}) do
assert_equal "songs_v2", Song.searchkick_index.name
end
end
def test_index_prefix
with_options({index_prefix: "hello"}) do
assert_equal "hello_songs_test", Song.searchkick_index.name
end
end
def test_index_prefix_callable
with_options({index_prefix: -> { "hello" }}) do
assert_equal "hello_songs_test", Song.searchkick_index.name
end
end
def default_model
Song
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/multi_tenancy_test.rb | test/multi_tenancy_test.rb | require_relative "test_helper"
class MultiTenancyTest < Minitest::Test
def setup
skip unless defined?(Apartment)
end
def test_basic
Apartment::Tenant.switch!("tenant1")
store_names ["Product A"]
Apartment::Tenant.switch!("tenant2")
store_names ["Product B"]
Apartment::Tenant.switch!("tenant1")
assert_search "product", ["Product A"], {load: false}
Apartment::Tenant.switch!("tenant2")
assert_search "product", ["Product B"], {load: false}
end
def teardown
Apartment::Tenant.reset if defined?(Apartment)
end
def default_model
Tenant
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/should_index_test.rb | test/should_index_test.rb | require_relative "test_helper"
class ShouldIndexTest < Minitest::Test
def test_basic
store_names ["INDEX", "DO NOT INDEX"]
assert_search "index", ["INDEX"]
end
def test_default_true
assert Store.new.should_index?
end
def test_change_to_true
store_names ["DO NOT INDEX"]
assert_search "index", []
product = Product.first
product.name = "INDEX"
product.save!
Product.searchkick_index.refresh
assert_search "index", ["INDEX"]
end
def test_change_to_false
store_names ["INDEX"]
assert_search "index", ["INDEX"]
product = Product.first
product.name = "DO NOT INDEX"
product.save!
Product.searchkick_index.refresh
assert_search "index", []
end
def test_bulk
store_names ["INDEX"]
product = Product.first
product.name = "DO NOT INDEX"
Searchkick.callbacks(false) do
product.save!
end
Product.where(id: product.id).reindex
Product.searchkick_index.refresh
assert_search "index", []
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/query_test.rb | test/query_test.rb | require_relative "test_helper"
class QueryTest < Minitest::Test
def test_basic
store_names ["Milk", "Apple"]
query = Product.search("milk", body: {query: {match_all: {}}})
assert_equal ["Apple", "Milk"], query.map(&:name).sort
end
def test_with_uneffective_min_score
store_names ["Milk", "Milk2"]
assert_search "milk", ["Milk", "Milk2"], body_options: {min_score: 0.0001}
end
def test_default_timeout
assert_equal "6000ms", Product.search("*").body[:timeout]
end
def test_timeout_override
assert_equal "1s", Product.search("*", body_options: {timeout: "1s"}).body[:timeout]
end
def test_request_params
assert_equal "dfs_query_then_fetch", Product.search("*", request_params: {search_type: "dfs_query_then_fetch"}).params[:search_type]
end
def test_opaque_id
store_names ["Milk"]
set_search_slow_log(0)
Product.search("*", opaque_id: "search").load
Product.search("*").opaque_id("search_relation").load
Product.search("*", scroll: "5s", opaque_id: "scroll").scroll { }
Searchkick.multi_search([Product.search("*")], opaque_id: "multi_search")
ensure
set_search_slow_log(-1)
end
def test_debug
store_names ["Milk"]
out, _ = capture_io do
assert_search "milk", ["Milk"], debug: true
end
refute_includes out, "Error"
end
def test_big_decimal
store [
{name: "Product", latitude: 80.0}
]
assert_search "product", ["Product"], where: {latitude: {gt: 79}}
end
# body_options
def test_body_options_should_merge_into_body
query = Product.search("*", body_options: {min_score: 1.0})
assert_equal 1.0, query.body[:min_score]
end
# nested
def test_nested_search
setup_speaker
store [{name: "Product A", aisle: {"id" => 1, "name" => "Frozen"}}], Speaker
assert_search "frozen", ["Product A"], {fields: ["aisle.name"]}, Speaker
end
# other tests
def test_includes
skip unless activerecord?
store_names ["Product A"]
assert Product.search("product", includes: [:store]).first.association(:store).loaded?
assert Product.search("product").includes(:store).first.association(:store).loaded?
end
def test_model_includes
skip unless activerecord?
store_names ["Product A"]
store_names ["Store A"], Store
associations = {Product => [:store], Store => [:products]}
result = Searchkick.search("*", models: [Product, Store], model_includes: associations)
assert_equal 2, result.length
result.group_by(&:class).each_pair do |model, records|
assert records.first.association(associations[model].first).loaded?
end
end
def test_scope_results
skip unless activerecord?
store_names ["Product A", "Product B"]
assert_warns "Records in search index do not exist in database" do
assert_search "product", ["Product A"], scope_results: ->(r) { r.where(name: "Product A") }
end
end
def test_scope_results_relation
skip unless activerecord?
store_names ["Product A", "Product B"]
assert_warns "Records in search index do not exist in database" do
assert_search_relation ["Product A"], Product.search("product").scope_results(->(r) { r.where(name: "Product A") })
end
end
private
def set_search_slow_log(value)
settings = {
"index.search.slowlog.threshold.query.warn" => value
}
Product.searchkick_index.update_settings(settings)
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/suggest_test.rb | test/suggest_test.rb | require_relative "test_helper"
class SuggestTest < Minitest::Test
def setup
super
Product.reindex
end
def test_basic
store_names ["Great White Shark", "Hammerhead Shark", "Tiger Shark"]
assert_suggest "How Big is a Tigre Shar", "how big is a tiger shark", fields: [:name]
end
def test_perfect
store_names ["Tiger Shark", "Great White Shark"]
assert_suggest "Tiger Shark", nil, fields: [:name] # no correction
end
def test_phrase
store_names ["Big Tiger Shark", "Tiger Sharp Teeth", "Tiger Sharp Mind"]
assert_suggest "How to catch a big tiger shar", "how to catch a big tiger shark", fields: [:name]
end
def test_empty
assert_suggest "hi", nil
end
def test_without_option
store_names ["hi"] # needed to prevent ElasticsearchException - seed 668
assert_raises(RuntimeError) { Product.search("hi").suggestions }
end
def test_multiple_fields
store [
{name: "Shark", color: "Sharp"},
{name: "Shark", color: "Sharp"}
]
assert_suggest_all "shar", ["shark", "sharp"]
end
def test_multiple_fields_highest_score_first
store [
{name: "Tiger Shark", color: "Sharp"}
]
assert_suggest "tiger shar", "tiger shark"
end
def test_multiple_fields_same_value
store [
{name: "Shark", color: "Shark"}
]
assert_suggest_all "shar", ["shark"]
end
def test_fields_option
store [
{name: "Shark", color: "Sharp"}
]
assert_suggest_all "shar", ["shark"], fields: [:name]
end
def test_fields_option_multiple
store [
{name: "Shark"}
]
assert_suggest "shar", "shark", fields: [:name, :unknown]
end
def test_fields_partial_match
store_names ["Great White Shark", "Hammerhead Shark", "Tiger Shark"]
assert_suggest "How Big is a Tigre Shar", "how big is a tiger shark", fields: [{name: :word_start}]
end
def test_fields_partial_match_boost
store_names ["Great White Shark", "Hammerhead Shark", "Tiger Shark"]
assert_suggest "How Big is a Tigre Shar", "how big is a tiger shark", fields: [{"name^2" => :word_start}]
end
def test_multiple_models
skip # flaky test
store_names ["Great White Shark", "Hammerhead Shark", "Tiger Shark"]
assert_equal "how big is a tiger shark", Searchkick.search("How Big is a Tigre Shar", suggest: [:name], fields: [:name]).suggestions.first
end
def test_multiple_models_no_fields
store_names ["Great White Shark", "Hammerhead Shark", "Tiger Shark"]
assert_raises(ArgumentError) { Searchkick.search("How Big is a Tigre Shar", suggest: true) }
end
def test_star
assert_equal [], Product.search("*", suggest: true).suggestions
end
protected
def assert_suggest(term, expected, options = {})
result = Product.search(term, suggest: true, **options).suggestions.first
if expected.nil?
assert_nil result
else
assert_equal expected, result
end
end
# any order
def assert_suggest_all(term, expected, options = {})
assert_equal expected.sort, Product.search(term, suggest: true, **options).suggestions.sort
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/search_synonyms_test.rb | test/search_synonyms_test.rb | require_relative "test_helper"
class SearchSynonymsTest < Minitest::Test
def setup
super
setup_speaker
end
def test_bleach
store_names ["Clorox Bleach", "Kroger Bleach"]
assert_search "clorox", ["Clorox Bleach", "Kroger Bleach"]
end
def test_burger_buns
store_names ["Hamburger Buns"]
assert_search "burger buns", ["Hamburger Buns"]
end
def test_bandaids
store_names ["Band-Aid", "Kroger 12-Pack Bandages"]
assert_search "bandaids", ["Band-Aid", "Kroger 12-Pack Bandages"]
end
def test_reverse
store_names ["Hamburger"]
assert_search "burger", ["Hamburger"]
end
def test_not_stemmed
store_names ["Burger"]
assert_search "hamburgers", []
assert_search "hamburger", ["Burger"]
end
def test_word_start
store_names ["Clorox Bleach", "Kroger Bleach"]
assert_search "clorox", ["Clorox Bleach", "Kroger Bleach"], {match: :word_start}
end
def test_directional
store_names ["Lightbulb", "Green Onions", "Led"]
assert_search "led", ["Lightbulb", "Led"]
assert_search "Lightbulb", ["Lightbulb"]
assert_search "Halogen Lamp", ["Lightbulb"]
assert_search "onions", ["Green Onions"]
end
def test_case
store_names ["Uppercase"]
assert_search "lowercase", ["Uppercase"]
end
def test_multiple_words
store_names ["USA"]
assert_search "United States of America", ["USA"]
assert_search "usa", ["USA"]
assert_search "United States", []
end
def test_multiple_words_expanded
store_names ["United States of America"]
assert_search "usa", ["United States of America"]
assert_search "United States of America", ["United States of America"]
assert_search "United States", ["United States of America"] # no synonyms used
end
def test_reload_synonyms
Speaker.searchkick_index.reload_synonyms
end
def test_reload_synonyms_better
skip unless ENV["ES_PATH"]
write_synonyms("test,hello")
with_options({search_synonyms: "synonyms.txt"}, Speaker) do
store_names ["Hello", "Goodbye"]
assert_search "test", ["Hello"]
write_synonyms("test,goodbye")
assert_search "test", ["Hello"]
Speaker.searchkick_index.reload_synonyms
assert_search "test", ["Goodbye"]
end
ensure
Speaker.reindex
end
def write_synonyms(contents)
File.write("#{ENV.fetch("ES_PATH")}/config/synonyms.txt", contents)
end
def default_model
Speaker
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/conversions_test.rb | test/conversions_test.rb | require_relative "test_helper"
class ConversionsTest < Minitest::Test
def setup
super
setup_speaker
end
def test_v1
store [
{name: "Tomato A", conversions: {"tomato" => 1}},
{name: "Tomato B", conversions: {"tomato" => 2}},
{name: "Tomato C", conversions: {"tomato" => 3}}
]
assert_order "tomato", ["Tomato C", "Tomato B", "Tomato A"]
assert_order "TOMATO", ["Tomato C", "Tomato B", "Tomato A"]
assert_equal_scores "tomato", conversions_v1: false
end
def test_v1_case
store [
{name: "Tomato A", conversions: {"tomato" => 1, "TOMATO" => 1, "tOmAtO" => 1}},
{name: "Tomato B", conversions: {"tomato" => 2}}
]
assert_order "tomato", ["Tomato A", "Tomato B"]
end
def test_v1_case_sensitive
with_options(case_sensitive: true) do
store [
{name: "Tomato A", conversions: {"Tomato" => 1, "TOMATO" => 1, "tOmAtO" => 1}},
{name: "Tomato B", conversions: {"Tomato" => 2}}
]
assert_order "Tomato", ["Tomato B", "Tomato A"]
end
ensure
Product.reindex
end
def test_v1_term
store [
{name: "Tomato A", conversions: {"tomato" => 1, "soup" => 3}},
{name: "Tomato B", conversions: {"tomato" => 2, "soup" => 2}},
{name: "Tomato C", conversions: {"tomato" => 3, "soup" => 1}}
]
assert_order "tomato", ["Tomato A", "Tomato B", "Tomato C"], conversions_term: "soup"
end
def test_v1_weight
Product.reindex
store [
{name: "Product Boost", orders_count: 20},
{name: "Product Conversions", conversions: {"product" => 10}}
]
assert_order "product", ["Product Conversions", "Product Boost"], boost: "orders_count"
end
def test_v1_multiple_conversions
store [
{name: "Speaker A", conversions_a: {"speaker" => 1}, conversions_b: {"speaker" => 6}},
{name: "Speaker B", conversions_a: {"speaker" => 2}, conversions_b: {"speaker" => 5}},
{name: "Speaker C", conversions_a: {"speaker" => 3}, conversions_b: {"speaker" => 4}}
], Speaker
assert_equal_scores "speaker", {conversions_v1: false}, Speaker
assert_equal_scores "speaker", {}, Speaker
assert_equal_scores "speaker", {conversions_v1: ["conversions_a", "conversions_b"]}, Speaker
assert_equal_scores "speaker", {conversions_v1: ["conversions_b", "conversions_a"]}, Speaker
assert_order "speaker", ["Speaker C", "Speaker B", "Speaker A"], {conversions_v1: "conversions_a"}, Speaker
assert_order "speaker", ["Speaker A", "Speaker B", "Speaker C"], {conversions_v1: "conversions_b"}, Speaker
end
def test_v1_multiple_conversions_with_boost_term
store [
{name: "Speaker A", conversions_a: {"speaker" => 4, "speaker_1" => 1}},
{name: "Speaker B", conversions_a: {"speaker" => 3, "speaker_1" => 2}},
{name: "Speaker C", conversions_a: {"speaker" => 2, "speaker_1" => 3}},
{name: "Speaker D", conversions_a: {"speaker" => 1, "speaker_1" => 4}}
], Speaker
assert_order "speaker", ["Speaker A", "Speaker B", "Speaker C", "Speaker D"], {conversions_v1: "conversions_a"}, Speaker
assert_order "speaker", ["Speaker D", "Speaker C", "Speaker B", "Speaker A"], {conversions_v1: "conversions_a", conversions_term: "speaker_1"}, Speaker
end
def test_v2
store [
{name: "Tomato A", conversions_v2: {"tomato" => 1}},
{name: "Tomato B", conversions_v2: {"tomato" => 2}},
{name: "Tomato C", conversions_v2: {"tomato" => 3}}
]
assert_order "tomato", ["Tomato C", "Tomato B", "Tomato A"], conversions_v2: true
assert_order "TOMATO", ["Tomato C", "Tomato B", "Tomato A"], conversions_v2: true
assert_equal_scores "tomato", conversions_v2: false
end
def test_v2_case
store [
{name: "Tomato A", conversions_v2: {"tomato" => 1, "TOMATO" => 1, "tOmAtO" => 1}},
{name: "Tomato B", conversions_v2: {"tomato" => 2}}
]
assert_order "tomato", ["Tomato A", "Tomato B"], conversions_v2: true
end
def test_v2_case_sensitive
with_options(case_sensitive: true) do
store [
{name: "Tomato A", conversions_v2: {"Tomato" => 1, "TOMATO" => 1, "tOmAtO" => 1}},
{name: "Tomato B", conversions_v2: {"Tomato" => 2}}
]
assert_order "Tomato", ["Tomato B", "Tomato A"], conversions_v2: true
end
ensure
Product.reindex
end
def test_v2_term
store [
{name: "Tomato A", conversions_v2: {"tomato" => 1, "soup" => 3}},
{name: "Tomato B", conversions_v2: {"tomato" => 2, "soup" => 2}},
{name: "Tomato C", conversions_v2: {"tomato" => 3, "soup" => 1}}
]
assert_order "tomato", ["Tomato A", "Tomato B", "Tomato C"], conversions_v2: {term: "soup"}
assert_order "tomato", ["Tomato A", "Tomato B", "Tomato C"], conversions_v2: true, conversions_term: "soup"
end
def test_v2_weight
Product.reindex
store [
{name: "Product Boost", orders_count: 20},
{name: "Product Conversions", conversions_v2: {"product" => 10}}
]
assert_order "product", ["Product Conversions", "Product Boost"], conversions_v2: true, boost: "orders_count"
end
def test_v2_space
store [
{name: "Tomato A", conversions_v2: {"tomato juice" => 1}},
{name: "Tomato B", conversions_v2: {"tomato juice" => 2}},
{name: "Tomato C", conversions_v2: {"tomato juice" => 3}}
]
assert_order "tomato", ["Tomato C", "Tomato B", "Tomato A"], conversions_v2: {term: "tomato juice"}
end
def test_v2_dot
store [
{name: "Tomato A", conversions_v2: {"tomato.juice" => 1}},
{name: "Tomato B", conversions_v2: {"tomato.juice" => 2}},
{name: "Tomato C", conversions_v2: {"tomato.juice" => 3}}
]
assert_order "tomato", ["Tomato C", "Tomato B", "Tomato A"], conversions_v2: {term: "tomato.juice"}
end
def test_v2_unicode
store [
{name: "Tomato A", conversions_v2: {"喰らう" => 1}},
{name: "Tomato B", conversions_v2: {"喰らう" => 2}},
{name: "Tomato C", conversions_v2: {"喰らう" => 3}}
]
assert_order "tomato", ["Tomato C", "Tomato B", "Tomato A"], conversions_v2: {term: "喰らう"}
end
def test_v2_score
store [
{name: "Tomato A", conversions: {"tomato" => 1}, conversions_v2: {"tomato" => 1}},
{name: "Tomato B", conversions: {"tomato" => 2}, conversions_v2: {"tomato" => 2}},
{name: "Tomato C", conversions: {"tomato" => 3}, conversions_v2: {"tomato" => 3}}
]
scores = Product.search("tomato", conversions_v2: false, load: false).map(&:_score)
scores_v2 = Product.search("tomato", conversions_v1: false, conversions_v2: true, load: false).map(&:_score)
assert_equal scores, scores_v2
end
def test_v2_factor
store [
{name: "Tomato A", conversions: {"tomato" => 1}, conversions_v2: {"tomato" => 1}},
{name: "Tomato B", conversions: {"tomato" => 2}, conversions_v2: {"tomato" => 2}},
{name: "Tomato C", conversions: {"tomato" => 3}, conversions_v2: {"tomato" => 3}}
]
scores = Product.search("tomato", conversions_v1: false, conversions_v2: true, load: false).map(&:_score)
scores2 = Product.search("tomato", conversions_v1: false, conversions_v2: {factor: 3}, load: false).map(&:_score)
diffs = scores.zip(scores2).map { |a, b| b - a }
assert_in_delta 6, diffs[0]
assert_in_delta 4, diffs[1]
assert_in_delta 2, diffs[2]
end
def test_v2_no_tokenization
store [
{name: "Tomato A"},
{name: "Tomato B", conversions_v2: {"tomato juice" => 2}},
{name: "Tomato C", conversions_v2: {"tomato vine" => 3}}
]
assert_equal_scores "tomato", conversions_v2: true
end
def test_v2_max_conversions
conversions = 66000.times.to_h { |i| ["term#{i}", 1] }
store [{name: "Tomato A", conversions_v2: conversions}]
conversions.merge!(1000.times.to_h { |i| ["term#{conversions.size + i}", 1] })
assert_raises(Searchkick::ImportError) do
store [{name: "Tomato B", conversions_v2: conversions}]
end
end
def test_v2_max_length
store [{name: "Tomato A", conversions_v2: {"a"*32766 => 1}}]
assert_raises(Searchkick::ImportError) do
store [{name: "Tomato B", conversions_v2: {"a"*32767 => 1}}]
end
end
def test_v2_zero
error = assert_raises(Searchkick::ImportError) do
store [{name: "Tomato A", conversions_v2: {"tomato" => 0}}]
end
assert_match "must be a positive normal float", error.message
end
def test_v2_partial_reindex
store [
{name: "Tomato A", conversions_v2: {"tomato" => 1}},
{name: "Tomato B", conversions_v2: {"tomato" => 2}},
{name: "Tomato C", conversions_v2: {"tomato" => 3}}
]
Product.reindex(:search_name, refresh: true)
assert_order "tomato", ["Tomato C", "Tomato B", "Tomato A"], conversions_v2: true
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/match_test.rb | test/match_test.rb | require_relative "test_helper"
class MatchTest < Minitest::Test
# exact
def test_match
store_names ["Whole Milk", "Fat Free Milk", "Milk"]
assert_search "milk", ["Milk", "Whole Milk", "Fat Free Milk"]
end
def test_case
store_names ["Whole Milk", "Fat Free Milk", "Milk"]
assert_search "MILK", ["Milk", "Whole Milk", "Fat Free Milk"]
end
def test_cheese_space_in_index
store_names ["Pepper Jack Cheese Skewers"]
assert_search "pepperjack cheese skewers", ["Pepper Jack Cheese Skewers"]
end
# def test_cheese_space_in_query
# store_names ["Pepperjack Cheese Skewers"]
# assert_search "pepper jack cheese skewers", ["Pepperjack Cheese Skewers"]
# end
def test_middle_token
store_names ["Dish Washer Amazing Organic Soap"]
assert_search "dish soap", ["Dish Washer Amazing Organic Soap"]
end
def test_middle_token_wine
store_names ["Beringer Wine Founders Estate Chardonnay"]
assert_search "beringer chardonnay", ["Beringer Wine Founders Estate Chardonnay"]
end
def test_percent
store_names ["1% Milk", "Whole Milk"]
assert_search "1%", ["1% Milk"]
end
# ascii
def test_jalapenos
store_names ["Jalapeño"]
assert_search "jalapeno", ["Jalapeño"]
end
def test_swedish
store_names ["ÅÄÖ"]
assert_search "aao", ["ÅÄÖ"]
end
# stemming
def test_stemming
store_names ["Whole Milk", "Fat Free Milk", "Milk"]
assert_search "milks", ["Milk", "Whole Milk", "Fat Free Milk"]
assert_search "milks", ["Milk", "Whole Milk", "Fat Free Milk"], misspellings: false
end
def test_stemming_tokens
assert_equal ["milk"], Product.searchkick_index.tokens("milks", analyzer: "searchkick_search")
assert_equal ["milk"], Product.searchkick_index.tokens("milks", analyzer: "searchkick_search2")
end
# fuzzy
def test_misspelling_sriracha
store_names ["Sriracha"]
assert_search "siracha", ["Sriracha"]
end
def test_misspelling_multiple
store_names ["Greek Yogurt", "Green Onions"]
assert_search "greed", ["Greek Yogurt", "Green Onions"]
end
def test_short_word
store_names ["Finn"]
assert_search "fin", ["Finn"]
end
def test_edit_distance_two
store_names ["Bingo"]
assert_search "bin", []
assert_search "bingooo", []
assert_search "mango", []
end
def test_edit_distance_one
store_names ["Bingo"]
assert_search "bing", ["Bingo"]
assert_search "bingoo", ["Bingo"]
assert_search "ringo", ["Bingo"]
end
def test_edit_distance_long_word
store_names ["thisisareallylongword"]
assert_search "thisisareallylongwor", ["thisisareallylongword"] # missing letter
assert_search "thisisareelylongword", [] # edit distance = 2
end
def test_misspelling_tabasco
store_names ["Tabasco"]
assert_search "tobasco", ["Tabasco"]
end
def test_misspelling_zucchini
store_names ["Zucchini"]
assert_search "zuchini", ["Zucchini"]
end
def test_misspelling_ziploc
store_names ["Ziploc"]
assert_search "zip lock", ["Ziploc"]
end
def test_misspelling_zucchini_transposition
store_names ["zucchini"]
assert_search "zuccihni", ["zucchini"]
# need to specify field
# as transposition option isn't supported for multi_match queries
# until Elasticsearch 6.1
assert_search "zuccihni", [], misspellings: {transpositions: false}, fields: [:name]
end
def test_misspelling_lasagna
store_names ["lasagna"]
assert_search "lasanga", ["lasagna"], misspellings: {transpositions: true}
assert_search "lasgana", ["lasagna"], misspellings: {transpositions: true}
assert_search "lasaang", [], misspellings: {transpositions: true} # triple transposition, shouldn't work
assert_search "lsagana", [], misspellings: {transpositions: true} # triple transposition, shouldn't work
end
def test_misspelling_lasagna_pasta
store_names ["lasagna pasta"]
assert_search "lasanga", ["lasagna pasta"], misspellings: {transpositions: true}
assert_search "lasanga pasta", ["lasagna pasta"], misspellings: {transpositions: true}
assert_search "lasanga pasat", ["lasagna pasta"], misspellings: {transpositions: true} # both words misspelled with a transposition should still work
end
def test_misspellings_word_start
store_names ["Sriracha"]
assert_search "siracha", ["Sriracha"], fields: [{name: :word_start}]
end
# spaces
def test_spaces_in_field
store_names ["Red Bull"]
assert_search "redbull", ["Red Bull"], misspellings: false
end
def test_spaces_in_query
store_names ["Dishwasher"]
assert_search "dish washer", ["Dishwasher"], misspellings: false
end
def test_spaces_three_words
store_names ["Dish Washer Soap", "Dish Washer"]
assert_search "dish washer soap", ["Dish Washer Soap"]
end
def test_spaces_stemming
store_names ["Almond Milk"]
assert_search "almondmilks", ["Almond Milk"]
end
# other
def test_all
store_names ["Product A", "Product B"]
assert_search "*", ["Product A", "Product B"]
end
def test_no_arguments
store_names []
assert_equal [], Product.search.to_a
end
def test_no_term
store_names ["Product A"]
assert_equal ["Product A"], Product.search(where: {name: "Product A"}).map(&:name)
end
def test_to_be_or_not_to_be
store_names ["to be or not to be"]
assert_search "to be", ["to be or not to be"]
end
def test_apostrophe
store_names ["Ben and Jerry's"]
assert_search "ben and jerrys", ["Ben and Jerry's"]
end
def test_apostrophe_search
store_names ["Ben and Jerrys"]
assert_search "ben and jerry's", ["Ben and Jerrys"]
end
def test_ampersand_index
store_names ["Ben & Jerry's"]
assert_search "ben and jerrys", ["Ben & Jerry's"]
end
def test_ampersand_search
store_names ["Ben and Jerry's"]
assert_search "ben & jerrys", ["Ben and Jerry's"]
end
def test_phrase
store_names ["Fresh Honey", "Honey Fresh"]
assert_search "fresh honey", ["Fresh Honey"], match: :phrase
end
def test_phrase_again
store_names ["Social entrepreneurs don't have it easy raising capital"]
assert_search "social entrepreneurs don't have it easy raising capital", ["Social entrepreneurs don't have it easy raising capital"], match: :phrase
end
def test_phrase_order
store_names ["Wheat Bread", "Whole Wheat Bread"]
assert_order "wheat bread", ["Wheat Bread", "Whole Wheat Bread"], match: :phrase, fields: [:name]
end
def test_dynamic_fields
setup_speaker
store_names ["Red Bull"], Speaker
assert_search "redbull", ["Red Bull"], {fields: [:name]}, Speaker
end
def test_unsearchable
skip
store [
{name: "Unsearchable", description: "Almond"}
]
assert_search "almond", []
end
def test_unsearchable_where
store [
{name: "Unsearchable", description: "Almond"}
]
assert_search "*", ["Unsearchable"], where: {description: "Almond"}
end
def test_emoji
store_names ["Banana"]
assert_search "🍌", ["Banana"], emoji: true
end
def test_emoji_multiple
store_names ["Ice Cream Cake"]
assert_search "🍨🍰", ["Ice Cream Cake"], emoji: true
assert_search "🍨🍰", ["Ice Cream Cake"], emoji: true, misspellings: false
end
# operator
def test_operator
store_names ["fresh", "honey"]
assert_search "fresh honey", ["fresh", "honey"], {operator: "or"}
assert_search "fresh honey", [], {operator: "and"}
assert_search "fresh honey", ["fresh", "honey"], {operator: :or}
assert_search "fresh honey", ["fresh", "honey"], {operator: :or, body_options: {track_total_hits: true}}
assert_search "fresh honey", [], {operator: :or, fields: [:name], match: :phrase, body_options: {track_total_hits: true}}
end
def test_operator_scoring
store_names ["Big Red Circle", "Big Green Circle", "Small Orange Circle"]
assert_order "big red circle", ["Big Red Circle", "Big Green Circle", "Small Orange Circle"], operator: "or"
end
# fields
def test_fields_operator
store [
{name: "red", color: "red"},
{name: "blue", color: "blue"},
{name: "cyan", color: "blue green"},
{name: "magenta", color: "red blue"},
{name: "green", color: "green"}
]
assert_search "red blue", ["red", "blue", "cyan", "magenta"], operator: "or", fields: ["color"]
end
def test_fields
store [
{name: "red", color: "light blue"},
{name: "blue", color: "red fish"}
]
assert_search "blue", ["red"], fields: ["color"]
end
def test_non_existent_field
store_names ["Milk"]
assert_search "milk", [], fields: ["not_here"]
end
def test_fields_both_match
# have same score due to dismax
store [
{name: "Blue A", color: "red"},
{name: "Blue B", color: "light blue"}
]
assert_first "blue", "Blue B", fields: [:name, :color]
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
ankane/searchkick | https://github.com/ankane/searchkick/blob/107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b/test/misspellings_test.rb | test/misspellings_test.rb | require_relative "test_helper"
class MisspellingsTest < Minitest::Test
def test_false
store_names ["abc", "abd", "aee"]
assert_search "abc", ["abc"], misspellings: false
end
def test_distance
store_names ["abbb", "aabb"]
assert_search "aaaa", ["aabb"], misspellings: {distance: 2}
end
def test_prefix_length
store_names ["ap", "api", "apt", "any", "nap", "ah", "ahi"]
assert_search "ap", ["ap", "api", "apt"], misspellings: {prefix_length: 2}
assert_search "api", ["ap", "api", "apt"], misspellings: {prefix_length: 2}
end
def test_prefix_length_operator
store_names ["ap", "api", "apt", "any", "nap", "ah", "aha"]
assert_search "ap ah", ["ap", "ah", "api", "apt", "aha"], operator: "or", misspellings: {prefix_length: 2}
assert_search "api ahi", ["ap", "api", "apt", "ah", "aha"], operator: "or", misspellings: {prefix_length: 2}
end
def test_fields_operator
store [
{name: "red", color: "red"},
{name: "blue", color: "blue"},
{name: "cyan", color: "blue green"},
{name: "magenta", color: "red blue"},
{name: "green", color: "green"}
]
assert_search "red blue", ["red", "blue", "cyan", "magenta"], operator: "or", fields: ["color"], misspellings: false
end
def test_below_unmet
store_names ["abc", "abd", "aee"]
assert_search "abc", ["abc", "abd"], misspellings: {below: 2}
end
def test_below_unmet_result
store_names ["abc", "abd", "aee"]
assert Product.search("abc", misspellings: {below: 2}).misspellings?
end
def test_below_met
store_names ["abc", "abd", "aee"]
assert_search "abc", ["abc"], misspellings: {below: 1}
end
def test_below_met_result
store_names ["abc", "abd", "aee"]
assert !Product.search("abc", misspellings: {below: 1}).misspellings?
end
def test_field_correct_spelling_still_works
store [{name: "Sriracha", color: "blue"}]
assert_misspellings "Sriracha", ["Sriracha"], {fields: [:name, :color]}
assert_misspellings "blue", ["Sriracha"], {fields: [:name, :color]}
end
def test_field_enabled
store [{name: "Sriracha", color: "blue"}]
assert_misspellings "siracha", ["Sriracha"], {fields: [:name]}
assert_misspellings "clue", ["Sriracha"], {fields: [:color]}
end
def test_field_disabled
store [{name: "Sriracha", color: "blue"}]
assert_misspellings "siracha", [], {fields: [:color]}
assert_misspellings "clue", [], {fields: [:name]}
end
def test_field_with_transpositions
store [{name: "Sriracha", color: "blue"}]
assert_misspellings "lbue", [], {transpositions: false, fields: [:color]}
end
def test_field_with_edit_distance
store [{name: "Sriracha", color: "blue"}]
assert_misspellings "crue", ["Sriracha"], {edit_distance: 2, fields: [:color]}
end
def test_field_multiple
store [
{name: "Greek Yogurt", color: "white"},
{name: "Green Onions", color: "yellow"}
]
assert_misspellings "greed", ["Greek Yogurt", "Green Onions"], {fields: [:name, :color]}
assert_misspellings "mellow", ["Green Onions"], {fields: [:name, :color]}
end
def test_field_requires_explicit_search_fields
store_names ["Sriracha"]
assert_raises(ArgumentError) do
assert_search "siracha", ["Sriracha"], {misspellings: {fields: [:name]}}
end
end
def test_field_word_start
store_names ["Sriracha"]
assert_search "siracha", ["Sriracha"], fields: [{name: :word_middle}], misspellings: {fields: [:name]}
end
end
| ruby | MIT | 107d270a0d2d7e1935dc7fee1ea0305edb5e4b3b | 2026-01-04T15:40:12.176737Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.