repo stringlengths 5 92 | file_url stringlengths 80 287 | file_path stringlengths 5 197 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:37:27 2026-01-04 17:58:21 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
jkeen/tracking_number_data | https://github.com/jkeen/tracking_number_data/blob/802d8a55e506b65bb5c820ed4e6d259a29f3911e/spec/spec_helper.rb | spec/spec_helper.rb | # frozen_string_literal: true
require 'bundler/setup'
Bundler.setup
require 'byebug'
require 'tracking_number' # and any other gems you need
RSpec.configure do |config|
config.example_status_persistence_file_path = '.rspec_status'
end
module TrackingNumber
module ChecksumValidations
class << self
# DEV NOTE: Add new checksum validation methods here while developing
def validates_luhn?(sequence, check_digit, extras = {})
total = 0
sequence.chars.reverse.each_with_index do |c, i|
x = c.to_i
if i.even?
x *= 2
end
if x > 9
x -= 9
end
total += x
end
check = (total % 10)
check = (10 - check) unless (check.zero?)
return (check.to_i == check_digit.to_i)
end
end
end
end
def validate_with_checksum(tracking_number, info)
pattern = [info[:regex]].flatten.join("")
regex = Regexp.new(pattern)
match = regex.match(tracking_number)
raw_serial_number = match["SerialNumber"].gsub(/\s/, '')
check_digit = match["CheckDigit"].gsub(/\s/, '')
checksum_info = info[:validation][:checksum]
serial_number = format_serial_number(raw_serial_number, info[:validation][:serial_number_format])
result = TrackingNumber::ChecksumValidations.send("validates_#{checksum_info[:name]}?", serial_number, check_digit, checksum_info)
if block_given?
yield result, checksum_info[:name], serial_number, check_digit
else
result
end
end
def expect_valid_checksum(tracking_number, info)
validate_with_checksum(tracking_number, info) do |result, name, serial_number, check_digit|
expect(result).to be_truthy, "#{tracking_number} should have matched #{name} checksum algorithm with serial number = #{serial_number} and check digit = #{check_digit}"
end
end
def format_serial_number(raw_serial, format_info)
if format_info
if format_info[:prepend_if] && raw_serial.match(Regexp.new(format_info[:prepend_if][:matches_regex]))
return "#{format_info[:prepend_if][:content]}#{raw_serial}"
elsif format_info[:prepend_if_missing]
end
end
return raw_serial
end
# Helpers for testing integration with tracking_number ruby gem, as a way to actually test the algorithms out
def possible_numbers(tracking)
tracking = tracking.to_s
possible_numbers = []
possible_numbers << tracking
possible_numbers << tracking.to_s.gsub(" ", "")
possible_numbers << tracking.slice(0, (tracking.length / 2)) + " " + tracking.slice((tracking.length / 2), tracking.length)
possible_numbers.flatten.uniq
end
def possible_strings(tracking)
possible_numbers(tracking).flatten.collect { |t| search_string(t) }
end
def search_string(number)
%Q{Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor #{number} ut labore et dolore magna aliqua.}
end
def should_detect_number_variants(valid_number, klass, info)
possible_strings(valid_number).each do |string|
results = klass.search(string)
expect(results.size).to eq(1), "could not find #{klass} #{valid_number} in #{string} using search regex: #{info[:regex]}"
end
end
def should_be_valid_number(valid_number, type, carrier)
t = TrackingNumber.new(valid_number)
expect(t.class).to eq(type)
expect(t.carrier).to eq(carrier)
end
def should_be_invalid_number(invalid_number, type, carrier)
t = TrackingNumber.new(invalid_number)
expect(t.valid?).to be_truthy
end
def should_fail_on_check_digit_changes(valid_number)
digits = valid_number.gsub(/\s/, "").chars.to_a
last = digits.pop.to_i
digits << (last <= 2 ? last + 3 : last - 3).to_s
invalid_number = digits.join
t = TrackingNumber.new(invalid_number)
expect(t.valid?).to be_falsy, "#{valid_number} -> #{invalid_number} reported as a valid #{t.class}, and it shouldn't be"
end
| ruby | MIT | 802d8a55e506b65bb5c820ed4e6d259a29f3911e | 2026-01-04T17:45:13.109005Z | false |
abhay/calais | https://github.com/abhay/calais/blob/21ef6f2714f2d5f786a1819927b4e8429c274fd5/init.rb | init.rb | require File.dirname(__FILE__) + '/lib/calais' | ruby | MIT | 21ef6f2714f2d5f786a1819927b4e8429c274fd5 | 2026-01-04T17:45:14.094016Z | false |
abhay/calais | https://github.com/abhay/calais/blob/21ef6f2714f2d5f786a1819927b4e8429c274fd5/spec/helper.rb | spec/helper.rb | require 'rubygems'
require 'rspec'
require 'yaml'
require File.dirname(__FILE__) + '/../lib/calais'
FIXTURES_DIR = File.join File.dirname(__FILE__), %[fixtures]
SAMPLE_DOCUMENT = File.read(File.join(FIXTURES_DIR, %[bicycles_australia.xml]))
SAMPLE_RESPONSE = File.read(File.join(FIXTURES_DIR, %[bicycles_australia.response.rdf]))
SAMPLE_RESPONSE_WITH_NO_SCORE = File.read(File.join(FIXTURES_DIR, %[twitter_tweet_without_score.response.rdf]))
RESPONSE_WITH_EXCEPTION = File.read(File.join(FIXTURES_DIR, %[error.response.xml]))
LICENSE_ID = YAML.load(File.read(File.join(FIXTURES_DIR, %[calais.yml])))['key']
RSpec.configure do |c|
c.treat_symbols_as_metadata_keys_with_true_values = true
end
| ruby | MIT | 21ef6f2714f2d5f786a1819927b4e8429c274fd5 | 2026-01-04T17:45:14.094016Z | false |
abhay/calais | https://github.com/abhay/calais/blob/21ef6f2714f2d5f786a1819927b4e8429c274fd5/spec/calais/response_spec.rb | spec/calais/response_spec.rb | require File.join(File.dirname(__FILE__), %w[.. helper])
describe Calais::Response, :new do
it 'accepts an rdf string to generate the response object' do
lambda { Calais::Response.new(SAMPLE_RESPONSE) }.should_not raise_error
end
end
describe Calais::Response, :new do
it "should return error message in runtime error" do
lambda {
@response = Calais::Response.new(RESPONSE_WITH_EXCEPTION)
}.should raise_error(Calais::Error, "My Error Message")
end
end
describe Calais::Response, :new do
before :all do
@response = Calais::Response.new(SAMPLE_RESPONSE)
end
it 'should extract document information' do
@response.language.should == 'English'
@response.submission_date.should be_a_kind_of(DateTime)
@response.signature.should be_a_kind_of(String)
@response.submitter_code.should be_a_kind_of(String)
@response.request_id.should be_a_kind_of(String)
@response.doc_title.should == 'Record number of bicycles sold in Australia in 2006'
@response.doc_date.should be_a_kind_of(Date)
end
it 'should extract entities' do
entities = @response.entities
entities.map { |e| e.type }.sort.uniq.should == %w[City Continent Country IndustryTerm Organization Person Position ProvinceOrState]
end
it 'should extract relations' do
relations = @response.relations
relations.map { |e| e.type }.sort.uniq.should == %w[GenericRelations PersonAttributes PersonCareer Quotation]
end
describe 'geographies' do
it 'should be extracted' do
geographies = @response.geographies
geographies.map { |e| e.name }.sort.uniq.should == %w[Australia Hobart,Tasmania,Australia Tasmania,Australia]
end
it 'should have relevance' do
geographies = @response.geographies
geographies.map { |e| e.relevance }.sort.uniq.should be_true
end
it 'should have relevance value' do
geographies = @response.geographies
geographies.map { |e| e.relevance }.sort.uniq.should == [0.168, 0.718]
end
end
it 'should extract relevances' do
@response.instance_variable_get(:@relevances).should be_a_kind_of(Hash)
end
it 'should assign a floating-point relevance to each entity' do
@response.entities.each {|e| e.relevance.should be_a_kind_of(Float) }
end
it 'should find the correct document categories returned by OpenCalais' do
@response.categories.map {|c| c.name }.sort.should == %w[Business_Finance Technology_Internet]
end
it 'should find the correct document category scores returned by OpenCalais' do
@response.categories.map {|c| c.score.should be_a_kind_of(Float) }
end
it "should not raise an error if no score is given by OpenCalais" do
lambda {Calais::Response.new(SAMPLE_RESPONSE_WITH_NO_SCORE)}.should_not raise_error
end
it "should not raise an error if no score is given by OpenCalais" do
response = Calais::Response.new(SAMPLE_RESPONSE_WITH_NO_SCORE)
response.categories.map {|c| c.score }.should == [nil]
end
it 'should find social tags' do
@response.socialtags.map {|c| c.name }.sort.should == ["Appropriate technology", "Bicycles", "Business_Finance", "Cycling", "Motorized bicycle", "Recreation", "Sustainability", "Sustainable transport", "Technology_Internet"]
end
it 'should have important scores associated with social tags' do
@response.socialtags.map {|c| c.importance.should be_a_kind_of(Integer) }
end
it 'should find instances for each entity' do
@response.entities.each {|e|
e.instances.size.should > 0
}
end
it 'should find instances for each relation' do
@response.relations.each {|r|
r.instances.size.should > 0
}
end
it 'should find the correct instances for each entity' do
## This currently tests only for the "Australia" entity's
## instances. A more thorough test that tests for the instances
## of each of the many entities in the sample doc is desirable in
## the future.
australia = @response.entities.select {|e| e.attributes["name"] == "Australia" }.first
australia.instances.size.should == 3
instances = australia.instances.sort{|a,b| a.offset <=> b.offset }
instances[0].prefix.should == "number of bicycles sold in "
instances[0].exact.should == "Australia"
instances[0].suffix.should == " in 2006<\/title>\n<date>January 4,"
instances[0].offset.should == 67
instances[0].length.should == 9
instances[1].prefix.should == "4, 2007<\/date>\n<body>\nBicycle sales in "
instances[1].exact.should == "Australia"
instances[1].suffix.should == " have recorded record sales of 1,273,781 units"
instances[1].offset.should == 146
instances[1].length.should == 9
instances[2].prefix.should == " the traditional company car,\" he said.\n\n\"Some of "
instances[2].exact.should == "Australia"
instances[2].suffix.should == "'s biggest corporations now have bicycle fleets,"
instances[2].offset.should == 952
instances[2].length.should == 9
end
it 'should find the correct instances for each relation' do
## This currently tests only for one relation's instances. A more
## thorough test that tests for the instances of each of the many other
## relations in the sample doc is desirable in the future.
rel = @response.relations.select {|e| e.calais_hash.value == "8f3936d9-cf6b-37fc-ae0d-a145959ae3b5" }.first
rel.instances.size.should == 1
rel.instances.first.prefix.should == " manufacturers.\n\nThe Cycling Promotion Fund (CPF) "
rel.instances.first.exact.should == "spokesman Ian Christie said Australians were increasingly using bicycles as an alternative to cars."
rel.instances.first.suffix.should == " Sales rose nine percent in 2006 while the car"
rel.instances.first.offset.should == 425
rel.instances.first.length.should == 99
end
end
| ruby | MIT | 21ef6f2714f2d5f786a1819927b4e8429c274fd5 | 2026-01-04T17:45:14.094016Z | false |
abhay/calais | https://github.com/abhay/calais/blob/21ef6f2714f2d5f786a1819927b4e8429c274fd5/spec/calais/client_spec.rb | spec/calais/client_spec.rb | require File.join(File.dirname(__FILE__), %w[.. helper])
describe Calais::Client, :new do
it 'accepts arguments as a hash' do
client = nil
lambda { client = Calais::Client.new(:content => SAMPLE_DOCUMENT, :license_id => LICENSE_ID) }.should_not raise_error
client.license_id.should == LICENSE_ID
client.content.should == SAMPLE_DOCUMENT
end
it 'accepts arguments as a block' do
client = nil
lambda {
client = Calais::Client.new do |c|
c.content = SAMPLE_DOCUMENT
c.license_id = LICENSE_ID
end
}.should_not raise_error
client.license_id.should == LICENSE_ID
client.content.should == SAMPLE_DOCUMENT
end
it 'should not accept unknown attributes' do
lambda { Calais::Client.new(:monkey => 'monkey', :license_id => LICENSE_ID) }.should raise_error(NoMethodError)
end
end
describe Calais::Client, :params_xml do
it 'returns an xml encoded string' do
client = Calais::Client.new(:content => SAMPLE_DOCUMENT, :license_id => LICENSE_ID)
client.params_xml.should == %[<c:params xmlns:c=\"http://s.opencalais.com/1/pred/\" xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">\n <c:processingDirectives/>\n <c:userDirectives/>\n</c:params>]
client.content_type = :xml
client.output_format = :json
client.reltag_base_url = 'http://opencalais.com'
client.calculate_relevance = true
client.metadata_enables = Calais::KNOWN_ENABLES
client.metadata_discards = Calais::KNOWN_DISCARDS
client.allow_distribution = true
client.allow_search = true
client.external_id = Digest::SHA1.hexdigest(client.content)
client.submitter = 'calais.rb'
client.params_xml.should == %[<c:params xmlns:c="http://s.opencalais.com/1/pred/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">\n <c:processingDirectives c:contentType="text/xml" c:outputFormat="application/json" c:reltagBaseURL="http://opencalais.com" c:enableMetadataType="GenericRelations,SocialTags" c:discardMetadata="er/Company;er/Geo;er/Product"/>\n <c:userDirectives c:allowDistribution="true" c:allowSearch="true" c:externalID="1a008b91e7d21962e132bc1d6cb252532116a606" c:submitter="calais.rb"/>\n</c:params>]
end
end
describe Calais::Client, :enlighten do
before do
@client = Calais::Client.new do |c|
c.content = SAMPLE_DOCUMENT
c.license_id = LICENSE_ID
c.content_type = :xml
c.output_format = :json
c.calculate_relevance = true
c.metadata_enables = Calais::KNOWN_ENABLES
c.allow_distribution = true
c.allow_search = true
end
end
it 'provides access to the enlighten command on the generic rest endpoint' do
@client.should_receive(:do_request).with(anything).and_return(SAMPLE_RESPONSE)
@client.enlighten
@client.url.should == URI.parse(Calais::REST_ENDPOINT)
end
it 'provides access to the enlighten command on the beta rest endpoint' do
@client.use_beta = true
@client.should_receive(:do_request).with(anything).and_return(SAMPLE_RESPONSE)
@client.enlighten
@client.url.should == URI.parse(Calais::BETA_REST_ENDPOINT)
end
end
| ruby | MIT | 21ef6f2714f2d5f786a1819927b4e8429c274fd5 | 2026-01-04T17:45:14.094016Z | false |
abhay/calais | https://github.com/abhay/calais/blob/21ef6f2714f2d5f786a1819927b4e8429c274fd5/lib/calais.rb | lib/calais.rb | require 'digest/sha1'
require 'net/http'
require 'uri'
require 'cgi'
require 'iconv' if RUBY_VERSION.to_f < 1.9
require 'set'
require 'date'
require 'rubygems'
require 'nokogiri'
require 'json'
if RUBY_VERSION.to_f < 1.9
$KCODE = "UTF8"
require 'jcode'
end
$:.unshift File.expand_path(File.dirname(__FILE__))
require 'calais/client'
require 'calais/response'
require 'calais/error'
module Calais
REST_ENDPOINT = "http://api.opencalais.com/enlighten/rest/"
BETA_REST_ENDPOINT = "http://beta.opencalais.com/enlighten/rest/"
AVAILABLE_CONTENT_TYPES = {
:xml => 'text/xml',
:html => 'text/html',
:htmlraw => 'text/htmlraw',
:raw => 'text/raw'
}
AVAILABLE_OUTPUT_FORMATS = {
:rdf => 'xml/rdf',
:simple => 'text/simple',
:microformats => 'text/microformats',
:json => 'application/json'
}
KNOWN_ENABLES = ['GenericRelations', 'SocialTags']
KNOWN_DISCARDS = ['er/Company', 'er/Geo', 'er/Product']
MAX_RETRIES = 5
HTTP_TIMEOUT = 60
MIN_CONTENT_SIZE = 1
MAX_CONTENT_SIZE = 100_000
class << self
def enlighten(*args, &block); Client.new(*args, &block).enlighten; end
def process_document(*args, &block)
client = Client.new(*args, &block)
client.output_format = :rdf
Response.new(client.enlighten)
end
end
end
| ruby | MIT | 21ef6f2714f2d5f786a1819927b4e8429c274fd5 | 2026-01-04T17:45:14.094016Z | false |
abhay/calais | https://github.com/abhay/calais/blob/21ef6f2714f2d5f786a1819927b4e8429c274fd5/lib/calais/version.rb | lib/calais/version.rb | module Calais
VERSION = "0.0.13"
end
| ruby | MIT | 21ef6f2714f2d5f786a1819927b4e8429c274fd5 | 2026-01-04T17:45:14.094016Z | false |
abhay/calais | https://github.com/abhay/calais/blob/21ef6f2714f2d5f786a1819927b4e8429c274fd5/lib/calais/response.rb | lib/calais/response.rb | module Calais
class Response
MATCHERS = {
:docinfo => 'DocInfo',
:docinfometa => 'DocInfoMeta',
:defaultlangid => 'DefaultLangId',
:doccat => 'DocCat',
:entities => 'type/em/e',
:relations => 'type/em/r',
:geographies => 'type/er',
:instances => 'type/sys/InstanceInfo',
:relevances => 'type/sys/RelevanceInfo',
}
attr_accessor :submitter_code, :signature, :language, :submission_date, :request_id, :doc_title, :doc_date
attr_accessor :hashes, :entities, :relations, :geographies, :categories, :socialtags, :relevances
def initialize(rdf_string)
@raw_response = rdf_string
@hashes = []
@entities = []
@relations = []
@geographies = []
@relevances = {} # key = String hash, val = Float relevance
@categories = []
@socialtags = []
extract_data
end
class Entity
attr_accessor :calais_hash, :type, :attributes, :relevance, :instances
end
class Relation
attr_accessor :calais_hash, :type, :attributes, :instances
end
class Geography
attr_accessor :name, :calais_hash, :attributes, :relevance
end
class Category
attr_accessor :name, :score
end
class SocialTag
attr_accessor :name, :importance
end
class Instance
attr_accessor :prefix, :exact, :suffix, :offset, :length
# Makes a new Instance object from an appropriate Nokogiri::XML::Node.
def self.from_node(node)
instance = self.new
instance.prefix = node.xpath("c:prefix[1]").first.content
instance.exact = node.xpath("c:exact[1]").first.content
instance.suffix = node.xpath("c:suffix[1]").first.content
instance.offset = node.xpath("c:offset[1]").first.content.to_i
instance.length = node.xpath("c:length[1]").first.content.to_i
instance
end
end
class CalaisHash
attr_accessor :value
def self.find_or_create(hash, hashes)
if !selected = hashes.select {|h| h.value == hash }.first
selected = self.new
selected.value = hash
hashes << selected
end
selected
end
end
private
def extract_data
doc = Nokogiri::XML(@raw_response)
if doc.root.xpath("/Error[1]").first
raise Calais::Error, doc.root.xpath("/Error/Exception").first.content
end
doc.root.xpath("rdf:Description/rdf:type[contains(@rdf:resource, '#{MATCHERS[:docinfometa]}')]/..").each do |node|
@language = node['language']
@submission_date = DateTime.parse node['submissionDate']
attributes = extract_attributes(node.xpath("*[contains(name(), 'c:')]"))
@signature = attributes.delete('signature')
@submitter_code = attributes.delete('submitterCode')
node.remove
end
doc.root.xpath("rdf:Description/rdf:type[contains(@rdf:resource, '#{MATCHERS[:docinfo]}')]/..").each do |node|
@request_id = node['calaisRequestID']
attributes = extract_attributes(node.xpath("*[contains(name(), 'c:')]"))
@doc_title = attributes.delete('docTitle')
@doc_date = Date.parse(attributes.delete('docDate'))
node.remove
end
@socialtags = doc.root.xpath("rdf:Description/c:socialtag/..").map do |node|
tag = SocialTag.new
tag.name = node.xpath("c:name[1]").first.content
tag.importance = node.xpath("c:importance[1]").first.content.to_i
node.remove if node.xpath("c:categoryName[1]").first.nil?
tag
end
@categories = doc.root.xpath("rdf:Description/rdf:type[contains(@rdf:resource, '#{MATCHERS[:doccat]}')]/..").map do |node|
category = Category.new
category.name = node.xpath("c:categoryName[1]").first.content
score = node.xpath("c:score[1]").first
category.score = score.content.to_f unless score.nil?
node.remove
category
end
@relevances = doc.root.xpath("rdf:Description/rdf:type[contains(@rdf:resource, '#{MATCHERS[:relevances]}')]/..").inject({}) do |acc, node|
subject_hash = node.xpath("c:subject[1]").first[:resource].split('/')[-1]
acc[subject_hash] = node.xpath("c:relevance[1]").first.content.to_f
node.remove
acc
end
@entities = doc.root.xpath("rdf:Description/rdf:type[contains(@rdf:resource, '#{MATCHERS[:entities]}')]/..").map do |node|
extracted_hash = node['about'].split('/')[-1] rescue nil
entity = Entity.new
entity.calais_hash = CalaisHash.find_or_create(extracted_hash, @hashes)
entity.type = extract_type(node)
entity.attributes = extract_attributes(node.xpath("*[contains(name(), 'c:')]"))
entity.relevance = @relevances[extracted_hash]
entity.instances = extract_instances(doc, extracted_hash)
node.remove
entity
end
@relations = doc.root.xpath("rdf:Description/rdf:type[contains(@rdf:resource, '#{MATCHERS[:relations]}')]/..").map do |node|
extracted_hash = node['about'].split('/')[-1] rescue nil
relation = Relation.new
relation.calais_hash = CalaisHash.find_or_create(extracted_hash, @hashes)
relation.type = extract_type(node)
relation.attributes = extract_attributes(node.xpath("*[contains(name(), 'c:')]"))
relation.instances = extract_instances(doc, extracted_hash)
node.remove
relation
end
@geographies = doc.root.xpath("rdf:Description/rdf:type[contains(@rdf:resource, '#{MATCHERS[:geographies]}')]/..").map do |node|
attributes = extract_attributes(node.xpath("*[contains(name(), 'c:')]"))
geography = Geography.new
geography.name = attributes.delete('name')
geography.calais_hash = attributes.delete('subject')
geography.attributes = attributes
geography.relevance = extract_relevance(geography.calais_hash.value)
node.remove
geography
end
doc.root.xpath("rdf:Description/rdf:type[contains(@rdf:resource, '#{MATCHERS[:defaultlangid]}')]/..").each { |node| node.remove }
doc.root.xpath("./*").each { |node| node.remove }
return
end
def extract_instances(doc, hash)
doc.root.xpath("rdf:Description/rdf:type[contains(@rdf:resource, '#{MATCHERS[:instances]}')]/..").select do |instance_node|
instance_node.xpath("c:subject[1]").first[:resource].split("/")[-1] == hash
end.map do |instance_node|
instance = Instance.from_node(instance_node)
instance_node.remove
instance
end
end
def extract_type(node)
node.xpath("*[name()='rdf:type']")[0]['resource'].split('/')[-1]
rescue
nil
end
def extract_attributes(nodes)
nodes.inject({}) do |hsh, node|
value = if node['resource']
extracted_hash = node['resource'].split('/')[-1] rescue nil
CalaisHash.find_or_create(extracted_hash, @hashes)
else
node.content
end
hsh.merge(node.name => value)
end
end
def extract_relevance(value)
return @relevances[value]
end
end
end
| ruby | MIT | 21ef6f2714f2d5f786a1819927b4e8429c274fd5 | 2026-01-04T17:45:14.094016Z | false |
abhay/calais | https://github.com/abhay/calais/blob/21ef6f2714f2d5f786a1819927b4e8429c274fd5/lib/calais/client.rb | lib/calais/client.rb | module Calais
class Client
# base attributes of the call
attr_accessor :content
attr_accessor :license_id
# processing directives
attr_accessor :content_type, :output_format, :reltag_base_url, :calculate_relevance, :omit_outputting_original_text
attr_accessor :store_rdf, :metadata_enables, :metadata_discards
# user directives
attr_accessor :allow_distribution, :allow_search, :external_id, :submitter
attr_accessor :external_metadata
attr_accessor :use_beta
def initialize(options={}, &block)
options.each {|k,v| send("#{k}=", v)}
yield(self) if block_given?
end
def enlighten
post_args = {
"licenseID" => @license_id,
"content" => RUBY_VERSION.to_f < 1.9 ?
Iconv.iconv('UTF-8//IGNORE', 'UTF-8', "#{@content} ").first[0..-2] :
"#{@content} ".encode(Encoding::UTF_8, :invalid => :replace, :undef => :replace, :replace => '')[0 .. -2],
"paramsXML" => params_xml
}
do_request(post_args)
end
def params_xml
check_params
document = Nokogiri::XML::Document.new
params_node = Nokogiri::XML::Node.new('c:params', document)
params_node['xmlns:c'] = 'http://s.opencalais.com/1/pred/'
params_node['xmlns:rdf'] = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
processing_node = Nokogiri::XML::Node.new('c:processingDirectives', document)
processing_node['c:contentType'] = AVAILABLE_CONTENT_TYPES[@content_type] if @content_type
processing_node['c:outputFormat'] = AVAILABLE_OUTPUT_FORMATS[@output_format] if @output_format
processing_node['c:calculateRelevanceScore'] = 'false' if @calculate_relevance == false
processing_node['c:reltagBaseURL'] = @reltag_base_url.to_s if @reltag_base_url
processing_node['c:enableMetadataType'] = @metadata_enables.join(',') unless @metadata_enables.empty?
processing_node['c:docRDFaccessible'] = @store_rdf if @store_rdf
processing_node['c:discardMetadata'] = @metadata_discards.join(';') unless @metadata_discards.empty?
processing_node['c:omitOutputtingOriginalText'] = 'true' if @omit_outputting_original_text
user_node = Nokogiri::XML::Node.new('c:userDirectives', document)
user_node['c:allowDistribution'] = @allow_distribution.to_s unless @allow_distribution.nil?
user_node['c:allowSearch'] = @allow_search.to_s unless @allow_search.nil?
user_node['c:externalID'] = @external_id.to_s if @external_id
user_node['c:submitter'] = @submitter.to_s if @submitter
params_node << processing_node
params_node << user_node
if @external_metadata
external_node = Nokogiri::XML::Node.new('c:externalMetadata', document)
external_node << @external_metadata
params_node << external_node
end
params_node.to_xml(:indent => 2)
end
def url
@url ||= URI.parse(calais_endpoint)
end
private
def check_params
raise 'missing content' if @content.nil? || @content.empty?
content_length = @content.length
raise 'content is too small' if content_length < MIN_CONTENT_SIZE
raise 'content is too large' if content_length > MAX_CONTENT_SIZE
raise 'missing license id' if @license_id.nil? || @license_id.empty?
raise 'unknown content type' unless AVAILABLE_CONTENT_TYPES.keys.include?(@content_type) if @content_type
raise 'unknown output format' unless AVAILABLE_OUTPUT_FORMATS.keys.include?(@output_format) if @output_format
%w[calculate_relevance store_rdf allow_distribution allow_search].each do |variable|
value = self.send(variable)
unless NilClass === value || TrueClass === value || FalseClass === value
raise "expected a boolean value for #{variable} but got #{value}"
end
end
@metadata_enables ||= []
unknown_enables = Set.new(@metadata_enables) - KNOWN_ENABLES
raise "unknown metadata enables: #{unknown_enables.to_a.inspect}" unless unknown_enables.empty?
@metadata_discards ||= []
unknown_discards = Set.new(@metadata_discards) - KNOWN_DISCARDS
raise "unknown metadata discards: #{unknown_discards.to_a.inspect}" unless unknown_discards.empty?
end
def do_request(post_fields)
@request ||= Net::HTTP::Post.new(url.path)
@request.set_form_data(post_fields)
Net::HTTP.new(url.host, url.port).start {|http| http.request(@request)}.body
end
def calais_endpoint
@use_beta ? BETA_REST_ENDPOINT : REST_ENDPOINT
end
end
end
| ruby | MIT | 21ef6f2714f2d5f786a1819927b4e8429c274fd5 | 2026-01-04T17:45:14.094016Z | false |
abhay/calais | https://github.com/abhay/calais/blob/21ef6f2714f2d5f786a1819927b4e8429c274fd5/lib/calais/error.rb | lib/calais/error.rb | class Calais::Error < StandardError
end | ruby | MIT | 21ef6f2714f2d5f786a1819927b4e8429c274fd5 | 2026-01-04T17:45:14.094016Z | false |
erwinjunker/notifyor | https://github.com/erwinjunker/notifyor/blob/b95878a055ba28db060daae3c2c50d7162304311/spec/spec_helper.rb | spec/spec_helper.rb | # This file was generated by the `rspec --init` command. Conventionally, all
# specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`.
# The generated `.rspec` file contains `--require spec_helper` which will cause
# this file to always be loaded, without a need to explicitly require it in any
# files.
#
# Given that it is always loaded, you are encouraged to keep this file as
# light-weight as possible. Requiring heavyweight dependencies from this file
# will add to the boot time of your test suite on EVERY test run, even for an
# individual file that may not need all of that loaded. Instead, consider making
# a separate helper file that requires the additional dependencies and performs
# the additional setup, and require it from the spec files that actually need
# it.
#
# The `.rspec` file also contains a few flags that are not defaults but that
# users commonly want.
#
# See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration
RSpec.configure do |config|
# rspec-expectations config goes here. You can use an alternate
# assertion/expectation library such as wrong or the stdlib/minitest
# assertions if you prefer.
config.expect_with :rspec do |expectations|
# This option will default to `true` in RSpec 4. It makes the `description`
# and `failure_message` of custom matchers include text for helper methods
# defined using `chain`, e.g.:
# be_bigger_than(2).and_smaller_than(4).description
# # => "be bigger than 2 and smaller than 4"
# ...rather than:
# # => "be bigger than 2"
expectations.include_chain_clauses_in_custom_matcher_descriptions = true
end
# rspec-mocks config goes here. You can use an alternate test double
# library (such as bogus or mocha) by changing the `mock_with` option here.
config.mock_with :rspec do |mocks|
# Prevents you from mocking or stubbing a method that does not exist on
# a real object. This is generally recommended, and will default to
# `true` in RSpec 4.
mocks.verify_partial_doubles = true
end
# The settings below are suggested to provide a good initial experience
# with RSpec, but feel free to customize to your heart's content.
=begin
# These two settings work together to allow you to limit a spec run
# to individual examples or groups you care about by tagging them with
# `:focus` metadata. When nothing is tagged with `:focus`, all examples
# get run.
config.filter_run :focus
config.run_all_when_everything_filtered = true
# Allows RSpec to persist some state between runs in order to support
# the `--only-failures` and `--next-failure` CLI options. We recommend
# you configure your source control system to ignore this file.
config.example_status_persistence_file_path = "spec/examples.txt"
# Limits the available syntax to the non-monkey patched syntax that is
# recommended. For more details, see:
# - http://rspec.info/blog/2012/06/rspecs-new-expectation-syntax/
# - http://www.teaisaweso.me/blog/2013/05/27/rspecs-new-message-expectation-syntax/
# - http://rspec.info/blog/2014/05/notable-changes-in-rspec-3/#zero-monkey-patching-mode
config.disable_monkey_patching!
# This setting enables warnings. It's recommended, but in some cases may
# be too noisy due to issues in dependencies.
config.warnings = true
# Many RSpec users commonly either run the entire suite or an individual
# file, and it's useful to allow more verbose output when running an
# individual spec file.
if config.files_to_run.one?
# Use the documentation formatter for detailed output,
# unless a formatter has already been configured
# (e.g. via a command-line flag).
config.default_formatter = 'doc'
end
# Print the 10 slowest examples and example groups at the
# end of the spec run, to help surface which specs are running
# particularly slow.
config.profile_examples = 10
# Run specs in random order to surface order dependencies. If you find an
# order dependency and want to debug it, you can fix the order by providing
# the seed, which is printed after each run.
# --seed 1234
config.order = :random
# Seed global randomization in this process using the `--seed` CLI option.
# Setting this allows you to use `--seed` to deterministically reproduce
# test failures related to randomization by passing the same `--seed` value
# as the one that triggered the failure.
Kernel.srand config.seed
=end
end
| ruby | MIT | b95878a055ba28db060daae3c2c50d7162304311 | 2026-01-04T17:45:18.437563Z | false |
erwinjunker/notifyor | https://github.com/erwinjunker/notifyor/blob/b95878a055ba28db060daae3c2c50d7162304311/lib/notifyor.rb | lib/notifyor.rb | $LOAD_PATH.unshift(File.dirname(__FILE__))
require 'notifyor/plugin'
require 'notifyor/configuration'
module Notifyor
class << self
attr_accessor :configuration
end
def self.configure
self.configuration ||= Configuration.new
yield(configuration) if block_given?
end
ActiveRecord::Base.send(:include, ::Notifyor::Plugin) if defined?(ActiveRecord)
end | ruby | MIT | b95878a055ba28db060daae3c2c50d7162304311 | 2026-01-04T17:45:18.437563Z | false |
erwinjunker/notifyor | https://github.com/erwinjunker/notifyor/blob/b95878a055ba28db060daae3c2c50d7162304311/lib/notifyor/version.rb | lib/notifyor/version.rb | module Notifyor
VERSION = "0.8.1"
end
| ruby | MIT | b95878a055ba28db060daae3c2c50d7162304311 | 2026-01-04T17:45:18.437563Z | false |
erwinjunker/notifyor | https://github.com/erwinjunker/notifyor/blob/b95878a055ba28db060daae3c2c50d7162304311/lib/notifyor/growl.rb | lib/notifyor/growl.rb | require 'notifyor/util/os_analyzer'
module Notifyor
module Growl
extend self
def adapter
return @adapter if @adapter
self.adapter =
case ::Notifyor::Util::OSAnalyzer.os
when :macosx
:terminal_notifier
when :linux
:libnotify_notifier
when :unix
:libnotify_notifier
else
raise 'Operating system not recognized.'
end
@adapter
end
def adapter=(adapter_name)
case adapter_name
when Symbol, String
require "notifyor/growl/adapters/#{adapter_name}"
@adapter = Notifyor::Growl::Adapters.const_get("#{adapter_name.to_s.split('_').collect(&:capitalize).join}")
else
raise "Missing adapter #{adapter_name}"
end
end
def create_growl(title, message)
adapter.create_growl(title, message)
end
end
end | ruby | MIT | b95878a055ba28db060daae3c2c50d7162304311 | 2026-01-04T17:45:18.437563Z | false |
erwinjunker/notifyor | https://github.com/erwinjunker/notifyor/blob/b95878a055ba28db060daae3c2c50d7162304311/lib/notifyor/plugin.rb | lib/notifyor/plugin.rb | require 'active_support'
require 'redis'
module Notifyor
module Plugin
extend ::ActiveSupport::Concern
included do
end
module ClassMethods
def notifyor(options = {})
configure_plugin(options)
end
def configure_plugin(options = {})
configuration = default_configuration.deep_merge(options)
publish_channels = configuration[:channels] || ['notifyor']
append_callbacks(configuration, publish_channels)
end
def append_callbacks(configuration, publish_channels)
publish_channels.each do |channel|
configuration[:only].each do |action|
case action
when :create
self.after_commit -> { ::Notifyor.configuration.redis_connection.publish channel, configuration[:messages][:create].call(self) }, on: :create, if: -> { configuration[:only].include? :create }
when :update
self.after_commit -> { ::Notifyor.configuration.redis_connection.publish channel, configuration[:messages][:update].call(self) }, on: :update, if: -> { configuration[:only].include? :update }
when :destroy
self.before_destroy -> { ::Notifyor.configuration.redis_connection.publish channel, configuration[:messages][:destroy].call(self) }, if: -> { configuration[:only].include? :destroy }
else
#nop
end
end
end
end
def default_configuration
{
only: [:create, :destroy, :update],
messages: {
create: -> (model) { I18n.t('notifyor.model.create') },
update: -> (model) { I18n.t('notifyor.model.update') },
destroy: -> (model) { I18n.t('notifyor.model.destroy') }
}
}
end
end
end
end | ruby | MIT | b95878a055ba28db060daae3c2c50d7162304311 | 2026-01-04T17:45:18.437563Z | false |
erwinjunker/notifyor | https://github.com/erwinjunker/notifyor/blob/b95878a055ba28db060daae3c2c50d7162304311/lib/notifyor/configuration.rb | lib/notifyor/configuration.rb | require 'redis'
require 'connection_pool'
module Notifyor
class Configuration
attr_accessor :redis_connection
def initialize
@redis_connection = ::Redis.new
end
end
end | ruby | MIT | b95878a055ba28db060daae3c2c50d7162304311 | 2026-01-04T17:45:18.437563Z | false |
erwinjunker/notifyor | https://github.com/erwinjunker/notifyor/blob/b95878a055ba28db060daae3c2c50d7162304311/lib/notifyor/cli.rb | lib/notifyor/cli.rb | require 'notifyor/version'
require 'optparse'
module Notifyor
class CLI
def parse
# Default configuration.
ENV['ssh_host'] = 'localhost'
ENV['ssh_port'] = '22'
ENV['ssh_tunnel_port'] = '2000'
ENV['ssh_redis_port'] = '6379'
::OptionParser.new do |opts|
opts.banner = 'Usage: notify_me [options]'
opts.on('-v', '--version',
'Show the current version of this gem') do
puts "Notifyor Version: #{::Notifyor::VERSION}"; exit
end
opts.on('--ssh-host host', 'Provide the host address to your deployment/remote server') do |host|
ENV['ssh_host'] = host
end
opts.on('--ssh-port port', 'Provide the ssh port for the deployment/remote server') do |port|
ENV['ssh_port'] = port
end
opts.on('--ssh-user user', 'Provide the ssh user for the deployment/remote server') do |user|
ENV['ssh_user'] = user
end
opts.on('--tunnel-port tunnel_port', 'Provide the ssh user for the deployment/remote server') do |tunnel_port|
ENV['ssh_tunnel_port'] = tunnel_port
end
opts.on('--redis-port redis_port', 'Provide the ssh user for the deployment/remote server') do |redis_port|
ENV['ssh_redis_port'] = redis_port
end
opts.on('--channel [channel]', 'Provide channel on which notifyor should listen.') do |channel|
ENV['channel'] = channel
end
end.parse!
end
def check_notifications
connection = Notifyor::Remote::Connection.new
begin
connection.build_tunnel
connection.build_redis_tunnel_connection
connection.subscribe_to_redis
rescue => e
STDOUT.write "Could not establish SSH tunnel. Reason: #{e.message}"
end
end
end
end | ruby | MIT | b95878a055ba28db060daae3c2c50d7162304311 | 2026-01-04T17:45:18.437563Z | false |
erwinjunker/notifyor | https://github.com/erwinjunker/notifyor/blob/b95878a055ba28db060daae3c2c50d7162304311/lib/notifyor/util/os_analyzer.rb | lib/notifyor/util/os_analyzer.rb | require 'rbconfig'
module Notifyor
module Util
module OSAnalyzer
extend self
def os
host_os = RbConfig::CONFIG['host_os']
case host_os
when /mswin|msys|mingw|cygwin|bccwin|wince|emc/
:windows
when /darwin|mac os/
:macosx
when /linux/
:linux
when /solaris|bsd/
:unix
else
raise "unknown os: #{host_os.inspect}"
end
end
end
end
end
| ruby | MIT | b95878a055ba28db060daae3c2c50d7162304311 | 2026-01-04T17:45:18.437563Z | false |
erwinjunker/notifyor | https://github.com/erwinjunker/notifyor/blob/b95878a055ba28db060daae3c2c50d7162304311/lib/notifyor/util/formatter.rb | lib/notifyor/util/formatter.rb | module Notifyor
module Util
module Formatter
extend self
def squish!(string)
string.gsub!(/\A[[:space:]]+/, '')
string.gsub!(/[[:space:]]+\z/, '')
string.gsub!(/[[:space:]]+/, ' ')
string
end
end
end
end | ruby | MIT | b95878a055ba28db060daae3c2c50d7162304311 | 2026-01-04T17:45:18.437563Z | false |
erwinjunker/notifyor | https://github.com/erwinjunker/notifyor/blob/b95878a055ba28db060daae3c2c50d7162304311/lib/notifyor/remote/connection.rb | lib/notifyor/remote/connection.rb | require 'json'
require 'redis'
require 'notifyor/growl'
require 'notifyor/util/formatter'
require 'net/ssh/gateway'
module Notifyor
module Remote
class Connection
def initialize
@ssh_host = ENV['ssh_host'] || 'localhost'
@ssh_port = ENV['ssh_port'] || '22'
@ssh_user = ENV['ssh_user']
@tunnel_port = ENV['ssh_tunnel_port'] || '2000'
@redis_port = ENV['ssh_redis_port'] || '6379'
@redis_channel = ENV['channel'] || 'notifyor'
@ssh_gateway = nil
@redis_tunnel_connection = nil
end
def build_tunnel
unless ['127.0.0.1', 'localhost'].include? @ssh_host
@ssh_gateway = Net::SSH::Gateway.new(@ssh_host, @ssh_user, port: @ssh_port)
@ssh_gateway.open('127.0.0.1', @redis_port, @tunnel_port)
end
end
def build_redis_tunnel_connection
redis_port = (['127.0.0.1', 'localhost'].include? @ssh_host) ? @redis_port : @tunnel_port
@redis_tunnel_connection = Redis.new(port: redis_port)
end
def subscribe_to_redis
@redis_tunnel_connection.subscribe(@redis_channel) do |on|
on.message do |channel, msg|
STDERR.write "INFO - Message received on channel: #{channel} \n"
growl_message(msg)
end
end
end
def growl_message(message)
::Notifyor::Growl.create_growl("Notifyor", message) unless Notifyor::Util::Formatter.squish!(message).empty?
end
end
end
end | ruby | MIT | b95878a055ba28db060daae3c2c50d7162304311 | 2026-01-04T17:45:18.437563Z | false |
erwinjunker/notifyor | https://github.com/erwinjunker/notifyor/blob/b95878a055ba28db060daae3c2c50d7162304311/lib/notifyor/growl/adapters/libnotify_notifier.rb | lib/notifyor/growl/adapters/libnotify_notifier.rb | module Notifyor
module Growl
module Adapters
module LibnotifyNotifier
extend self
def create_growl(title, message)
%x(notify-send '#{title}' '#{message}')
end
end
end
end
end | ruby | MIT | b95878a055ba28db060daae3c2c50d7162304311 | 2026-01-04T17:45:18.437563Z | false |
erwinjunker/notifyor | https://github.com/erwinjunker/notifyor/blob/b95878a055ba28db060daae3c2c50d7162304311/lib/notifyor/growl/adapters/terminal_notifier.rb | lib/notifyor/growl/adapters/terminal_notifier.rb | require 'terminal-notifier'
module Notifyor
module Growl
module Adapters
module TerminalNotifier
extend self
def create_growl(title, message)
::TerminalNotifier.notify(message, :title => title)
end
end
end
end
end | ruby | MIT | b95878a055ba28db060daae3c2c50d7162304311 | 2026-01-04T17:45:18.437563Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/rakefile.rb | rakefile.rb | # frozen_string_literal: true
require 'bundler'
require 'rake'
require 'rspec'
require 'rspec/core/rake_task'
Bundler.setup
Bundler::GemHelper.install_tasks
task default: [:spec]
desc 'Run all examples'
RSpec::Core::RakeTask.new(:spec) do |t|
t.ruby_opts = %w[-w]
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/spec_helper.rb | spec/spec_helper.rb | # frozen_string_literal: true
require 'vcr'
if ENV['COVERAGE']
require 'simplecov'
SimpleCov.start do
enable_coverage :branch
add_filter '/spec/'
minimum_coverage 95
minimum_coverage_by_file 90
add_group 'Config', 'lib/html2rss/config'
add_group 'Request Service', 'lib/html2rss/request_service'
add_group 'Auto Source', 'lib/html2rss/auto_source'
add_group 'Selectors', 'lib/html2rss/selectors'
add_group 'RSS Builder', 'lib/html2rss/rss_builder'
add_group 'Html Extractor', 'lib/html2rss/html_extractor'
# Add multiple output formats
formatter SimpleCov::Formatter::MultiFormatter.new([
SimpleCov::Formatter::HTMLFormatter,
SimpleCov::Formatter::SimpleFormatter
])
end
end
require_relative '../lib/html2rss'
require_relative 'support/cli_helpers'
# Load custom matchers and helpers
require_relative 'support/helpers/configuration_helpers'
require_relative 'support/helpers/example_helpers'
# Load shared examples
Dir[File.join(__dir__, 'support', 'shared_examples', '**', '*.rb')].each { |f| require f }
Zeitwerk::Loader.eager_load_all # flush all potential loading issues
RSpec.configure do |config|
# Enable flags like --only-failures and --next-failure
config.example_status_persistence_file_path = '.rspec_status'
# Disable RSpec exposing methods globally on `Module` and `main`
config.disable_monkey_patching!
config.include CliHelpers
config.expect_with :rspec do |c|
c.syntax = :expect
end
VCR.configure do |vcr_config|
vcr_config.cassette_library_dir = 'spec/fixtures/vcr_cassettes'
vcr_config.hook_into :faraday
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/support/cli_helpers.rb | spec/support/cli_helpers.rb | # frozen_string_literal: true
require 'stringio'
module CliHelpers
# Capture CLI output while running inside a VCR cassette.
#
# @param args [Array<String>] CLI arguments to execute
# @param cassette [String] VCR cassette name
# @return [String] captured STDOUT output
def capture_cli_output(*args, cassette:)
stdout = StringIO.new
original_stdout = $stdout
$stdout = stdout
VCR.use_cassette(cassette) { Html2rss::CLI.start(args) }
stdout.string
ensure
$stdout = original_stdout
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/support/helpers/configuration_helpers.rb | spec/support/helpers/configuration_helpers.rb | # frozen_string_literal: true
# Configuration and validation helpers for HTML2RSS example specs
module ConfigurationHelpers
# Loads an example configuration from the spec/examples directory
# @param config_name [String] The name of the configuration file (without .yml extension)
# @return [Hash] The loaded configuration hash
# @example
# config = load_example_configuration('combined_scraper_sources')
def load_example_configuration(config_name)
config_file = File.join(%w[spec examples], "#{config_name}.yml")
Html2rss.config_from_yaml_file(config_file)
end
private
def valid_basic_config?(config)
config.is_a?(Hash)
end
def valid_channel_config?(channel)
channel.is_a?(Hash) &&
channel[:url].is_a?(String) &&
channel[:title].is_a?(String)
end
def valid_selectors_config?(selectors)
selectors.is_a?(Hash) &&
selectors[:items].is_a?(Hash) &&
selectors[:items][:selector].is_a?(String)
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/support/helpers/example_helpers.rb | spec/support/helpers/example_helpers.rb | # frozen_string_literal: true
# Helper methods for HTML2RSS example specs that keep assertions aligned with the production pipeline.
require_relative 'configuration_helpers'
module ExampleHelpers
include ConfigurationHelpers
FIXTURE_ROOT = File.join('spec', 'examples').freeze
##
# Stub the request service with an HTML fixture for a given URL.
#
# @param fixture_name [String] base name of the fixture in `spec/examples`
# @param url [String] URL the request service should return
# @param content_type [String] MIME type to expose on the response
# @return [void]
def mock_request_service_with_html_fixture(fixture_name, url, content_type: 'text/html')
stub_request_service(fixture_path(fixture_name, 'html'), url, content_type)
end
##
# Stub the request service with a JSON fixture for a given URL.
#
# @param fixture_name [String] base name of the fixture in `spec/examples`
# @param url [String] URL the request service should return
# @param content_type [String] MIME type to expose on the response
# @return [void]
def mock_request_service_with_json_fixture(fixture_name, url, content_type: 'application/json')
stub_request_service(fixture_path(fixture_name, 'json'), url, content_type)
end
##
# Build a feed by loading configuration and stubbing the HTTP layer with fixtures.
#
# @param config [Hash] feed configuration
# @param fixture_name [String] base name for the fixture files
# @param fixture_type [Symbol] either :html or :json to pick the transport stub
# @param url [String] URL to associate with the feed channel
# @return [RSS::Rss] rendered feed instance
def generate_feed_from_config(config, fixture_name, fixture_type = :html, url = 'https://example.com')
case fixture_type
when :html
mock_request_service_with_html_fixture(fixture_name, url)
when :json
mock_request_service_with_json_fixture(fixture_name, url)
else
raise ArgumentError, "Invalid fixture_type: #{fixture_type}. Must be :html or :json"
end
channel_config = config.fetch(:channel).merge(url:)
Html2rss.feed(config.merge(channel: channel_config))
end
##
# Assert the shape of produced feed items against a declarative expectation hash.
#
# @param items [Array<RSS::Rss::Channel::Item>] actual RSS items
# @param expected_items [Array<Hash>] expectation descriptors
# @return [void]
def expect_feed_items(items, expected_items)
expect(items.size).to eq(expected_items.size)
expected_items.each_with_index do |expected, index|
verify_item_expectations(items.fetch(index), expected, index)
end
end
private
def fixture_path(fixture_name, extension)
File.join(FIXTURE_ROOT, "#{fixture_name}.#{extension}")
end
def stub_request_service(fixture_path, url, content_type)
body = File.read(fixture_path)
response_url = Html2rss::Url.from_relative(url, url)
allow(Html2rss::RequestService).to receive(:execute).and_return(
Html2rss::RequestService::Response.new(
body:,
url: response_url,
headers: { 'content-type': content_type }
)
)
end
def verify_item_expectations(item, expected, index)
aggregate_failures("item ##{index + 1}") do
expect_item_title(item, expected)
expect_item_link(item, expected)
expect_item_description(item, expected)
expect_item_categories(item, expected)
expect_item_pub_date(item, expected)
expect_item_enclosure(item, expected)
end
end
def expect_item_title(item, expected)
return unless expected.key?(:title)
expect(item.title).to eq(expected[:title])
end
def expect_item_link(item, expected)
return unless expected.key?(:link)
expect_optional_value(item.link, expected[:link])
end
def expect_item_description(item, expected)
Array(expected[:description_includes]).each do |snippet|
expect(item.description).to include(snippet)
end
return unless expected.key?(:description_starts_with)
expect(item.description).to start_with(expected[:description_starts_with])
end
def expect_item_categories(item, expected)
return unless expected.key?(:categories)
expect(item.categories.map(&:content)).to eq(expected[:categories])
end
def expect_item_pub_date(item, expected)
return unless expected.key?(:pub_date)
actual_pub_date = item.pubDate&.rfc2822
expect_optional_value(actual_pub_date, expected[:pub_date])
end
def expect_item_enclosure(item, expected)
return unless expected.key?(:enclosure)
expected_enclosure = expected[:enclosure]
if expected_enclosure.nil?
expect(item.enclosure).to be_nil
return
end
expect(item.enclosure).not_to be_nil
expect_enclosure_attributes(item.enclosure, expected_enclosure)
end
def expect_enclosure_attributes(enclosure, expected)
expect_enclosure_field(enclosure, expected, :url)
expect_enclosure_field(enclosure, expected, :type)
expect_enclosure_field(enclosure, expected, :length)
end
def expect_optional_value(actual, expected)
expected.nil? ? expect(actual).to(be_nil) : expect(actual).to(eq(expected))
end
def expect_enclosure_field(enclosure, expected, field)
return unless expected.key?(field)
expect(enclosure.public_send(field)).to eq(expected[field])
end
end
# Include the helper methods in RSpec configuration
RSpec.configure do |config|
config.include ExampleHelpers
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/examples/conditional_processing_spec.rb | spec/examples/conditional_processing_spec.rb | # frozen_string_literal: true
require 'spec_helper'
require 'time'
RSpec.describe 'Conditional Processing Configuration' do
subject(:feed) do
mock_request_service_with_html_fixture('conditional_processing_site', 'https://example.com')
Html2rss.feed(config)
end
let(:config_file) { File.join(%w[spec examples conditional_processing_site.yml]) }
let(:config) { Html2rss.config_from_yaml_file(config_file) }
let(:items) { feed.items }
let(:expected_items) do
[
{
title: "Breaking News: ACME Corp's New Debugging Tool",
link: 'https://example.com/articles/technology-update',
description_starts_with: '[Status: Published]',
description_includes: [
'latest debugging tool',
'built-in rubber duck'
],
categories: ['Published'],
pub_date: 'Mon, 15 Jan 2024 10:30:00 +0000'
},
{
title: "Draft Article: ACME Corp's Green Coding Initiative",
link: 'https://example.com/articles/environmental-research',
description_starts_with: '[Status: Draft]',
description_includes: [
'environmental research',
'tabs instead of spaces'
],
categories: ['Draft'],
pub_date: 'Sun, 14 Jan 2024 14:20:00 +0000'
},
{
title: "Archived Article: ACME Corp's Economic Analysis of Bug Fixes",
link: 'https://example.com/articles/economic-analysis',
description_starts_with: '[Status: Archived]',
description_includes: [
'99% of bugs are caused by cosmic rays',
'missing semicolon that cost $1.2 billion'
],
categories: ['Archived'],
pub_date: 'Sat, 13 Jan 2024 09:15:00 +0000'
},
{
title: "ACME Corp's Developer Health and Wellness Guide",
link: 'https://example.com/articles/health-wellness',
description_starts_with: '[Status: Published]',
description_includes: [
'coffee is not a food group',
'Standing desks are great'
],
categories: ['Published'],
pub_date: 'Fri, 12 Jan 2024 08:30:00 +0000'
},
{
title: "Pending Article: ACME Corp's Annual Code Golf Tournament",
link: 'https://example.com/articles/sports-update',
description_starts_with: '[Status: Pending]',
description_includes: [
'lifetime supply of coffee',
'Debug this code blindfolded'
],
categories: ['Pending'],
pub_date: 'Thu, 11 Jan 2024 16:45:00 +0000'
},
{
title: "ACME Corp's Article Without Status (Status: Unknown)",
link: 'https://example.com/articles/no-status',
description_starts_with: '[Status: ]',
description_includes: [
"doesn't have a status field",
'null pointer exception'
],
categories: [],
pub_date: 'Wed, 10 Jan 2024 12:00:00 +0000'
}
]
end
it 'publishes the configured channel metadata', :aggregate_failures do
expect(feed.channel.title).to eq('ACME Conditional Processing Site News')
expect(feed.channel.link).to eq('https://example.com')
end
it 'renders templated descriptions that expose the item status' do
expect_feed_items(items, expected_items)
end
it 'gracefully handles missing statuses in both the template output and category list', :aggregate_failures do
empty_status_item = items.find { |item| item.title.include?('Without Status') }
expect(empty_status_item.description).to start_with('[Status: ]')
expect(empty_status_item.categories).to be_empty
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/examples/json_api_site_spec.rb | spec/examples/json_api_site_spec.rb | # frozen_string_literal: true
require 'spec_helper'
require 'json'
require 'time'
RSpec.describe 'JSON API Site Configuration' do
subject(:feed) do
mock_request_service_with_json_fixture('json_api_site', 'https://example.com/posts')
Html2rss.feed(config)
end
let(:config_file) { File.join(%w[spec examples json_api_site.yml]) }
let(:config) { Html2rss.config_from_yaml_file(config_file) }
let(:items) { feed.items }
let(:expected_items) do
[
{
title: "ACME Corp's Revolutionary AI Breakthrough Changes Everything",
link: nil,
description_includes: [
'<img src="https://example.com/images/ai-breakthrough.jpg"',
"It can finally understand 'it works on my machine'"
],
categories: ['Technology', 'Artificial Intelligence', 'Machine Learning', 'Innovation'],
pub_date: 'Mon, 15 Jan 2024 14:30:00 +0000',
enclosure: { url: 'https://example.com/images/ai-breakthrough.jpg', type: 'image/jpeg', length: 0 }
},
{
title: 'Climate Change Summit Reaches Historic Agreement',
link: nil,
description_includes: [
'<img src="https://example.com/images/climate-summit.jpg"',
'groundbreaking agreement on climate change mitigation'
],
categories: ['Environment', 'Climate Change', 'Sustainability', 'Policy'],
pub_date: 'Sun, 14 Jan 2024 09:15:00 +0000',
enclosure: { url: 'https://example.com/images/climate-summit.jpg', type: 'image/jpeg', length: 0 }
},
{
title: 'Space Exploration Mission Discovers New Planet',
link: nil,
description_includes: [
'<img src="https://example.com/images/space-discovery.jpg"',
'This discovery opens up new possibilities for future space exploration'
],
categories: ['Science', 'Space Exploration', 'Astronomy', 'Discovery'],
pub_date: 'Sat, 13 Jan 2024 16:45:00 +0000',
enclosure: { url: 'https://example.com/images/space-discovery.jpg', type: 'image/jpeg', length: 0 }
},
{
title: 'Medical Breakthrough Offers Hope for Cancer Patients',
link: nil,
description_includes: [
'<img src="https://example.com/images/cancer-research.jpg"',
'Clinical trials have shown a 75% success rate'
],
categories: ['Health', 'Cancer Research', 'Immunotherapy', 'Medical Breakthrough'],
pub_date: 'Fri, 12 Jan 2024 11:20:00 +0000',
enclosure: { url: 'https://example.com/images/cancer-research.jpg', type: 'image/jpeg', length: 0 }
},
{
title: 'Renewable Energy Reaches New Milestone',
link: nil,
description_includes: [
'<img src="https://example.com/images/renewable-energy.jpg"',
'Solar and wind power have led this transformation'
],
categories: ['Energy', 'Renewable Energy', 'Solar Power', 'Wind Power'],
pub_date: 'Thu, 11 Jan 2024 15:10:00 +0000',
enclosure: { url: 'https://example.com/images/renewable-energy.jpg', type: 'image/jpeg', length: 0 }
},
{
title: 'Cybersecurity Threats Reach All-Time High',
link: nil,
description_includes: [
'<img src="https://example.com/images/cybersecurity.jpg"',
'Organizations are being urged to implement stronger security measures'
],
categories: ['Security', 'Cybersecurity', 'Threat Detection', 'Infrastructure Security'],
pub_date: 'Wed, 10 Jan 2024 08:30:00 +0000',
enclosure: { url: 'https://example.com/images/cybersecurity.jpg', type: 'image/jpeg', length: 0 }
}
]
end
it 'loads channel metadata from the configuration file', :aggregate_failures do
expect(feed.channel.title).to eq('ACME JSON API Site News')
expect(feed.channel.link).to eq('https://example.com/posts')
end
it 'materialises feed items directly from the API payload' do
expect_feed_items(items, expected_items)
end
it 'omits item links when no selector is configured' do
expect(items.map(&:link)).to all(be_nil)
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/examples/performance_optimized_spec.rb | spec/examples/performance_optimized_spec.rb | # frozen_string_literal: true
require 'spec_helper'
require 'time'
RSpec.describe 'Performance-Optimized Configuration' do
subject(:feed) do
mock_request_service_with_html_fixture('performance_optimized_site', 'https://example.com')
Html2rss.feed(config)
end
let(:config_file) { File.join(%w[spec examples performance_optimized_site.yml]) }
let(:config) { Html2rss.config_from_yaml_file(config_file) }
let(:items) { feed.items }
let(:expected_items) do
[
{
title: "Breaking News: ACME Corp's Technology Breakthrough",
link: 'https://example.com/articles/technology-breakthrough',
description_includes: [
'major breakthrough in quantum computing technology',
'They also discovered that coffee makes quantum computers work better.'
],
pub_date: 'Mon, 15 Jan 2024 10:30:00 +0000'
},
{
title: "ACME Corp's Environmental Research Update",
link: 'https://example.com/articles/environmental-research',
description_includes: [
'climate change is affecting different regions around the world',
'The study found that using tabs instead of spaces can reduce your carbon footprint'
],
pub_date: 'Sun, 14 Jan 2024 14:20:00 +0000'
},
{
title: "ACME Corp's Economic Analysis Report",
link: 'https://example.com/articles/economic-analysis',
description_includes: [
'quarterly economic analysis shows positive trends',
'the demand for rubber ducks will increase by 42%'
],
pub_date: 'Sat, 13 Jan 2024 09:15:00 +0000'
},
{
title: "ACME Corp's Developer Health and Wellness Tips",
link: 'https://example.com/articles/health-tips',
description_includes: [
'ACME Corp expert recommendations for maintaining good health during the winter months.',
'Also, remember to take breaks from your computer every 2 hours'
],
pub_date: 'Fri, 12 Jan 2024 08:30:00 +0000'
}
]
end
it 'applies the high-signal CSS selector and ignores adverts', :aggregate_failures do
expect(items.size).to eq(4)
expect(items.map(&:title)).to all(include('ACME Corp'))
end
it 'converts relative article links to absolute URLs and preserves editorial tone' do
expect_feed_items(items, expected_items)
end
it 'parses datetime attributes directly from the markup' do
expect(items.map { |item| item.pubDate.rfc2822 }).to eq(expected_items.map { |expected| expected[:pub_date] })
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/examples/media_enclosures_spec.rb | spec/examples/media_enclosures_spec.rb | # frozen_string_literal: true
require 'spec_helper'
require 'time'
RSpec.describe 'Media Enclosures Configuration', type: :example do
subject(:feed) { generate_feed_from_config(config, config_name, :html) }
let(:config_name) { 'media_enclosures_site' }
let(:config) { load_example_configuration(config_name) }
let(:items) { feed.items }
let(:expected_items) do
[
{
title: 'Episode 42: The Future of AI in Web Development',
link: 'https://example.com/episodes/episode-42-ai-web-dev',
description_includes: [
'<audio controls',
'AI-assisted coding'
],
categories: ['3240'],
pub_date: 'Mon, 15 Jan 2024 10:00:00 +0000',
enclosure: { url: 'https://example.com/episodes/episode-42-ai-web-dev.mp3', type: 'audio/mpeg', length: 0 }
},
{
title: 'Episode 41: Building Scalable React Applications',
link: 'https://example.com/episodes/episode-41-scalable-react',
description_includes: [
'<audio controls',
'performance optimization'
],
categories: ['2880'],
pub_date: 'Mon, 08 Jan 2024 10:00:00 +0000',
enclosure: { url: 'https://example.com/episodes/episode-41-scalable-react.mp3', type: 'audio/mpeg', length: 0 }
},
{
title: 'Episode 40: Special - Interview with Tech Industry Leaders',
link: 'https://example.com/episodes/episode-40-special-interview',
description_includes: [
'<audio controls',
'tech industry leaders'
],
categories: ['4500'],
pub_date: 'Mon, 01 Jan 2024 10:00:00 +0000',
enclosure: { url: 'https://example.com/episodes/episode-40-special-interview.mp3', type: 'audio/mpeg', length: 0 }
},
{
title: 'Episode 39: Quick Tips for CSS Grid',
link: 'https://example.com/episodes/episode-39-css-grid-tips',
description_includes: [
'<audio controls',
'essential CSS Grid tips'
],
categories: ['1800'],
pub_date: 'Mon, 25 Dec 2023 10:00:00 +0000',
enclosure: { url: 'https://example.com/episodes/episode-39-css-grid-tips.mp3', type: 'audio/mpeg', length: 0 }
},
{
title: 'Episode 38: Live Coding Session - Building a Todo App',
link: 'https://example.com/episodes/episode-38-live-coding',
description_includes: [
'live coding session',
'Implementing core functionality'
],
categories: ['5400'],
pub_date: 'Mon, 18 Dec 2023 10:00:00 +0000',
enclosure: nil
},
{
title: 'Episode 37: Text-Only Episode - Reading List',
link: 'https://example.com/episodes/episode-37-reading-list',
description_includes: [
'text-only episode',
"This month's recommendations include books on software architecture"
],
categories: ['0'],
pub_date: 'Mon, 11 Dec 2023 10:00:00 +0000',
enclosure: nil
}
]
end
it 'translates every episode into an RSS item with markdown summaries' do
expect_feed_items(items, expected_items)
end
it 'emits absolute URLs for episode pages and media assets' do
urls = items.map(&:link)
expect(urls).to all(start_with('https://example.com/episodes/'))
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/examples/combined_scraper_sources_spec.rb | spec/examples/combined_scraper_sources_spec.rb | # frozen_string_literal: true
require 'spec_helper'
# This spec demonstrates the combined scraper sources configuration,
# which uses both auto-source detection and manual selectors to extract
# RSS feed data from HTML content.
RSpec.describe 'Combined Scraper Sources Configuration', type: :example do
# RSS feed generation tests
# These tests validate that the configuration successfully generates
# a valid RSS feed with proper content extraction
subject(:feed) { generate_feed_from_config(config, config_name, :html) }
let(:config_name) { 'combined_scraper_sources' }
let(:config) { load_example_configuration(config_name) }
let(:items) { feed.items }
let(:expected_articles) do
[
{ title: 'ACME Corp Releases New Laptop with M3 Chip',
link: 'https://example.com/articles/acme-laptop-m3-2024' },
{ title: 'ACME Corp Launches New AI Assistant Features',
link: 'https://example.com/articles/acme-ai-assistant-update' },
{ title: 'ACME Motors Announces New Electric Vehicle Model',
link: 'https://example.com/articles/acme-new-ev-model-2024' },
{ title: 'ACME Software Updates Operating System Preview',
link: 'https://example.com/articles/acme-os-preview' },
{ title: 'ACME Reality Introduces New VR Headset',
link: 'https://example.com/articles/acme-vr-headset-2024' },
{ title: 'ACME Cloud Services Expands Cloud Services',
link: 'https://example.com/articles/acme-cloud-services-expansion' }
]
end
it 'collects the six expected articles with matching titles and links' do
aggregate_failures do
expect(items.size).to eq(expected_articles.size)
expect(items.map(&:title)).to eq(expected_articles.map { |article| article[:title] })
expect(items.map(&:link)).to eq(expected_articles.map { |article| article[:link] })
end
end
context 'with templated item metadata' do
subject(:guid_template) { '%<self>s-%<url>s' }
it 'keeps the first item description intact' do
expect(items.first.description)
.to include("It's so fast, it can compile Hello World in under 0.001 seconds!")
end
it 'generates the first item GUID from the template' do
first_item = items.first
expected_guid = Zlib.crc32(format(guid_template, self: first_item.title, url: first_item.link)).to_s(36)
expect(first_item.guid.content).to eq(expected_guid)
end
it 'keeps the second item description intact' do
expect(items[1].description)
.to include("It can now understand 'it works on my machine' and translate it to 'it's broken in production'.")
end
it 'generates the second item GUID from the template' do
second_item = items[1]
expected_guid = Zlib.crc32(format(guid_template, self: second_item.title, url: second_item.link)).to_s(36)
expect(second_item.guid.content).to eq(expected_guid)
end
end
it 'rewrites News categories and exposes tags as discrete categories' do
first_item_categories = items.first.categories.map(&:content)
expect(first_item_categories).to eq(
['Hardware Breaking News', 'ACME Corp', 'Laptops', 'Processors']
)
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/examples/unreliable_site_spec.rb | spec/examples/unreliable_site_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Unreliable Site Configuration' do
subject(:feed) do
mock_request_service_with_html_fixture('unreliable_site', 'https://example.com')
Html2rss.feed(config)
end
let(:config_file) { File.join(%w[spec examples unreliable_site.yml]) }
let(:config) { Html2rss.config_from_yaml_file(config_file) }
let(:items) { feed.items }
let(:expected_items) do
[
{
title: "Breaking News: ACME Corp's Technology Advances",
link: 'https://example.com/articles/breaking-news-technology-advances',
description_includes: [
'latest technology advances',
'Warning: May contain traces of bugs.'
]
},
{
title: 'ACME Corp Science Discovery: New Findings',
link: 'https://example.com/articles/science-discovery-new-findings',
description_includes: [
'groundbreaking discoveries in the field of quantum physics',
'They discovered that quantum computers work better with coffee.'
]
},
{
title: 'ACME Corp Environmental Impact Report',
link: 'https://example.com/articles/environmental-impact-report',
description_includes: [
'environmental changes and their impact on global ecosystems',
'ACME Corp is trying to make infinite loops carbon-neutral.'
]
},
{
title: 'ACME Corp Economic Analysis: Market Trends',
link: 'https://example.com/articles/economic-analysis-market-trends',
description_includes: [
'current market trends and their implications',
'coffee stocks are up 42%'
]
},
{
title: 'ACME Corp Developer Health and Wellness Update',
link: 'https://example.com/articles/health-wellness-update',
description_includes: [
'health and wellness trends that are gaining popularity among developers',
'standing desks are great'
]
}
]
end
it 'emits channel metadata suitable for flaky upstream sources' do
expect(feed.channel.ttl).to eq(60)
end
it 'extracts resilient titles across heterogeneous markup' do
expect_feed_items(items, expected_items)
end
it 'sanitises and truncates body content to keep feeds lightweight' do
expect(items.map { |item| item.description.length }).to all(be <= 500)
end
it 'normalises every hyperlink via parse_uri post-processing' do
expect(items.map(&:link)).to eq(expected_items.map { |item| item[:link] })
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/examples/dynamic_content_site_spec.rb | spec/examples/dynamic_content_site_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Dynamic Content Site Configuration' do
subject(:feed) do
mock_request_service_with_html_fixture('dynamic_content_site', 'https://example.com/news')
Html2rss.feed(config)
end
let(:config_file) { File.join(%w[spec examples dynamic_content_site.yml]) }
let(:config) { Html2rss.config_from_yaml_file(config_file) }
let(:items) { feed.items }
let(:expected_items) do
[
{
title: "ACME Corp's Revolutionary AI Breakthrough Changes Everything",
link: 'https://example.com/articles/ai-breakthrough-2024',
description_includes: [
'It can finally understand "it works on my machine"',
"It also knows when you're lying about your commit messages."
],
pub_date: 'Mon, 15 Jan 2024 14:30:00 -0500'
},
{
title: "ACME Corp's Green Coding Summit Reaches Historic Agreement",
link: 'https://example.com/articles/climate-summit-2024',
description_includes: [
'green coding practices',
"They're banning tabs in favor of spaces to save trees."
],
pub_date: 'Sun, 14 Jan 2024 09:15:00 -0500'
},
{
title: "ACME Corp's Space Exploration Mission Discovers New Planet",
link: 'https://example.com/articles/space-mission-discovery',
description_includes: [
'The planet, designated ACME-442b',
'inhabitants are reportedly very good at debugging code.'
],
pub_date: 'Sat, 13 Jan 2024 16:45:00 -0500'
},
{
title: "ACME Corp's Medical Breakthrough Offers Hope for Bug Patients",
link: 'https://example.com/articles/cancer-treatment-breakthrough',
description_includes: [
'debugging treatment',
'The treatment involves lots of coffee and rubber ducks.'
],
pub_date: 'Fri, 12 Jan 2024 11:20:00 -0500'
},
{
title: "ACME Corp's Renewable Energy Reaches New Milestone",
link: 'https://example.com/articles/renewable-energy-milestone',
description_includes: [
"They're powering servers with coffee beans.",
'The wind turbines are powered by the hot air from marketing meetings.'
],
pub_date: 'Thu, 11 Jan 2024 15:10:00 -0500'
},
{
title: "ACME Corp's Cybersecurity Threats Reach All-Time High",
link: 'https://example.com/articles/cybersecurity-threats-2024',
description_includes: [
'The most dangerous threat is still developers using "password123".',
'ACME Corp recommends using "password1234" instead.'
],
pub_date: 'Wed, 10 Jan 2024 08:30:00 -0500'
}
]
end
it 'builds the channel with the configured metadata', :aggregate_failures do
expect(feed.channel.title).to eq('ACME Dynamic Content Site News')
expect(feed.channel.link).to eq('https://example.com/news')
expect(feed.channel.generator).to include('Selectors')
end
it 'extracts every rendered article with sanitized descriptions and parsed timestamps' do
expect_feed_items(items, expected_items)
end
it 'captures the long-form excerpts exactly as rendered on the site', :aggregate_failures do
ai_article = items.find { |item| item.title.include?('AI Breakthrough') }
expect(ai_article.description).to include("It also knows when you're lying about your commit messages.")
expect(ai_article.description).to include('translate it to "it\'s broken in production".')
end
it 'preserves temporal ordering using the configured time zone', :aggregate_failures do
expect(items.map(&:pubDate)).to eq(items.map(&:pubDate).sort.reverse)
expect(items.first.pubDate.utc_offset).to eq(-18_000)
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/examples/multilang_site_spec.rb | spec/examples/multilang_site_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Multi-Language Site Configuration' do
subject(:feed) do
mock_request_service_with_html_fixture('multilang_site', 'https://example.com')
Html2rss.feed(config)
end
let(:config_file) { File.join(%w[spec examples multilang_site.yml]) }
let(:config) { Html2rss.config_from_yaml_file(config_file) }
let(:items) { feed.items }
let(:expected_items) do
[
{
title: "[en] Breaking News: ACME Corp's Technology Update",
description_includes: [
'quantum computing algorithm that promises to revolutionize data processing',
"It's so fast, it can compile Hello World before you finish typing it."
],
categories: %w[en Technology]
},
{
title: '[es] Noticias: Actualización Tecnológica de ACME Corp',
description_includes: [
'gran innovación tecnológica',
'También viene con una taza de café integrada.'
],
categories: %w[es Tecnología]
},
{
title: "[fr] Actualités: Mise à jour technologique d'ACME Corp",
description_includes: [
'percée technologique majeure',
"Il est si rapide qu'il peut compiler \"Bonjour le monde\""
],
categories: %w[fr Technologie]
},
{
title: '[de] Nachrichten: ACME Corp Technologie-Update',
description_includes: [
'wichtiger technologischer Durchbruch',
'Es ist so schnell, dass es "Hallo Welt" kompilieren kann'
],
categories: %w[de Technologie]
},
{
title: '[en] Environmental Research Update',
description_includes: [
'climate change is accelerating faster than previously predicted',
'Immediate action is required'
],
categories: %w[en Environment]
},
{
title: '[es] Investigación Ambiental Actualizada',
description_includes: [
'El estudio, realizado por un equipo internacional de científicos',
'Se requiere acción inmediata'
],
categories: ['es', 'Medio Ambiente']
},
{
title: '[en] Health and Wellness Guide',
description_includes: [
'Maintaining good health requires a balanced approach',
'Experts recommend at least 30 minutes of moderate exercise daily'
],
categories: %w[en Health]
},
{
title: '[fr] Guide Santé et Bien-être',
description_includes: [
'Maintenir une bonne santé nécessite une approche équilibrée',
'Les experts recommandent au moins 30 minutes d\'exercice modéré quotidien'
],
categories: %w[fr Santé]
}
]
end
it 'applies the configured channel metadata', :aggregate_failures do
expect(feed.channel.title).to eq('ACME Multi-Language Site News')
expect(feed.channel.link).to eq('https://example.com')
expect(feed.channel.language).to eq('en')
end
it 'renders every post with language-prefixed titles and sanitised body copy' do
expect_feed_items(items, expected_items)
end
it 'keeps multilingual content grouped correctly' do
groups = items.group_by { |item| item.categories.first.content }
expect(groups.transform_values(&:count)).to eq('en' => 3, 'es' => 2, 'fr' => 2, 'de' => 1)
end
it 'retains the source language copy within descriptions', :aggregate_failures do
spanish_item = items.find { |item| item.title.start_with?('[es]') }
french_item = items.find { |item| item.title.start_with?('[fr]') }
expect(spanish_item.description).to include('Los científicos de ACME Corp han desarrollado')
expect(french_item.description).to include("Les scientifiques d'ACME Corp ont développé")
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/exe/html2rss_spec.rb | spec/exe/html2rss_spec.rb | # frozen_string_literal: true
RSpec.describe 'exe/html2rss', :slow do
let(:executable) do
matches = Gem::Specification.find_all_by_name 'html2rss'
spec = matches.first
File.expand_path('exe/html2rss', spec.full_gem_path)
end
let(:doctype_xml) do
'<?xml version="1.0" encoding="UTF-8"?>'
end
let(:stylesheets_xml) do
<<~XML
<?xml-stylesheet href="/style.xls" type="text/xsl" media="all"?>
<?xml-stylesheet href="/rss.css" type="text/css" media="all"?>
XML
end
let(:rss_xml) do
<<~RSS
<rss version="2.0"
xmlns:content="http://purl.org/rss/1.0/modules/content/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd"
xmlns:trackback="http://madskills.com/public/xml/rss/module/trackback/">
RSS
end
let(:rss_title_pattern) { %r{<title>.+</title>} }
context 'without any arguments' do
it 'prints usage information' do
expect(`#{executable}`).to start_with("Commands:\n html2rss")
end
end
context 'without argument: help' do
it 'prints usage information' do
expect(`#{executable} help`).to start_with("Commands:\n html2rss")
end
end
context 'with feed config: nuxt-releases' do
context 'with arguments: feed YAML_FILE' do
subject(:output) do
capture_cli_output('feed', 'spec/fixtures/single.test.yml', cassette: 'nuxt-releases')
end
it 'generates the RSS', :aggregate_failures do
expect(output).to start_with(doctype_xml)
expect(output).not_to include(stylesheets_xml)
expect(output).to include(rss_xml)
expect(output).to match(rss_title_pattern)
end
end
context 'with arguments: feed YAML_FILE FEED_NAME' do
subject(:output) do
capture_cli_output('feed', 'spec/fixtures/feeds.test.yml', 'nuxt-releases', cassette: 'nuxt-releases')
end
it 'generates the RSS', :aggregate_failures do
expect(output).to start_with(doctype_xml)
expect(output).to include(stylesheets_xml)
expect(output).to include(rss_xml)
expect(output).to match(rss_title_pattern)
end
end
end
context 'with feed config: withparams' do
subject(:output) do
capture_cli_output('feed', 'spec/fixtures/feeds.test.yml', 'withparams', '--params', 'sign:10', 'param:value',
cassette: 'notitle')
end
it 'processes and escapes the params' do
expect(output)
.to include('<description>The value of param is: value</description>',
'horoscope-general-daily-today.aspx?sign=10</link>')
end
end
context 'with argument: auto URL' do
it 'exists with error' do
`#{executable} auto file://etc/passwd`
expect($?.exitstatus).to eq(1) # rubocop:disable Style/SpecialGlobalVars
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/html2rss/articles/deduplicator_spec.rb | spec/html2rss/articles/deduplicator_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::Articles::Deduplicator do
subject(:deduplicated) { described_class.new(articles).call }
let(:scraper) { Class.new }
describe '#initialize' do
it 'requires articles' do
expect { described_class.new(nil) }.to raise_error(ArgumentError, 'articles must be provided')
end
end
describe '#call' do
context 'when multiple sources provide overlapping articles' do
let(:articles) do
defaults = { description: 'Description', scraper: }
article_args = [
{ id: 'a', url: 'https://example.com/a', title: 'Alpha' },
{ id: 'b', url: 'https://example.com/b', title: 'Beta' },
{ id: 'a', url: 'https://example.com/a', title: 'Alpha (selectors)' },
{ id: 'c', url: 'https://example.com/c', title: 'Gamma' },
{ id: 'b', url: 'https://example.com/b', title: 'Beta (auto)' }
]
article_args.map do |attrs|
Html2rss::RssBuilder::Article.new(**defaults, **attrs)
end
end
let(:expected_articles) { articles.values_at(0, 1, 3) }
it 'removes duplicates while preserving order of first occurrences' do
expect(deduplicated).to eq(expected_articles)
end
it 'keeps articles in their original relative order' do
expect(deduplicated.map(&:id)).to eq(%w[a b c])
end
end
context 'when articles do not expose a guid' do
let(:articles) do
shared_fingerprint = 'https://example.com/shared#!/shared'
unique_fingerprint = 'https://example.com/unique#!/unique'
shared_url = instance_double(Html2rss::Url, to_s: 'https://example.com/shared')
unique_url = instance_double(Html2rss::Url, to_s: 'https://example.com/unique')
first_article = instance_double(
Html2rss::RssBuilder::Article,
guid: nil,
id: 'shared',
url: shared_url,
scraper:
)
second_article = instance_double(
Html2rss::RssBuilder::Article,
guid: nil,
id: 'shared',
url: shared_url,
scraper:
)
third_article = instance_double(
Html2rss::RssBuilder::Article,
guid: nil,
id: 'unique',
url: unique_url,
scraper:
)
allow(first_article).to receive(:deduplication_fingerprint).and_return(shared_fingerprint)
allow(second_article).to receive(:deduplication_fingerprint).and_return(shared_fingerprint)
allow(third_article).to receive(:deduplication_fingerprint).and_return(unique_fingerprint)
[first_article, second_article, third_article]
end
it 'falls back to the combination of id and URL to deduplicate' do
expect(deduplicated.map(&:id)).to eq(%w[shared unique])
end
it 'delegates fingerprint calculation to the article', :aggregate_failures do
deduplicated
expect(articles[0]).to have_received(:deduplication_fingerprint)
expect(articles[1]).to have_received(:deduplication_fingerprint)
expect(articles[2]).to have_received(:deduplication_fingerprint)
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss_spec.rb | spec/lib/html2rss_spec.rb | # frozen_string_literal: true
require 'nokogiri'
RSpec.describe Html2rss do
let(:config_file) { File.join(%w[spec fixtures feeds.test.yml]) }
let(:name) { 'nuxt-releases' }
it 'has a version number' do
expect(Html2rss::VERSION).not_to be_nil
end
describe '.config_from_yaml_file(file, feed_name = nil)' do
subject(:feed) do
described_class.config_from_yaml_file(config_file, name)
end
context 'with known name' do
it { expect(feed).to be_a(Hash) }
end
context 'with unknown name' do
it 'raises an ArgumentError' do
expect { described_class.config_from_yaml_file(config_file, 'unknown') }.to raise_error(ArgumentError)
end
end
end
describe '.feed' do
context 'with config being a Hash' do
subject(:xml) { Nokogiri.XML(feed_return.to_s) }
let(:config) do
described_class.config_from_yaml_file(config_file, name)
end
let(:feed_return) { VCR.use_cassette(name) { described_class.feed(config) } }
before do
allow(Faraday).to receive(:new).with(Hash).and_call_original
end
it 'returns a RSS::Rss instance & sets the request headers', :aggregate_failures do
expect(feed_return).to be_a(RSS::Rss)
expect(Faraday).to have_received(:new).with(
hash_including(headers: hash_including(config[:headers].transform_keys(&:to_s)))
)
end
describe 'feed.channel' do
it 'sets the channel attributes', :aggregate_failures do # rubocop:disable RSpec/ExampleLength
expect(xml.css('channel > title').text).to eq 'Releases · nuxt/nuxt.js · GitHub'
expect(xml.css('channel > description').text).to \
eq('The Vue.js Framework. Contribute to nuxt/nuxt.js development by creating an account on GitHub.')
expect(xml.css('channel > ttl').text.to_i).to eq 0
expect(xml.css('channel > item').count).to be > 0
expect(xml.css('channel > link').text).to eq 'https://github.com/nuxt/nuxt.js/releases'
expect(xml.css('channel > generator').text).to start_with("html2rss V. #{Html2rss::VERSION}")
end
end
describe 'feed.items' do
subject(:item) { xml.css('channel > item').first }
it 'sets item attributes', :aggregate_failures do
expect(item.css('title').text).to eq 'v2.10.2 (pi)'
expect(item.css('link').text).to eq 'https://github.com/nuxt/nuxt.js/releases/tag/v2.10.2'
expect(item.css('author').text).to eq 'pi'
expect(item.css('guid').text).to eq 'resdti'
end
describe 'item.pubDate' do
it 'has one in rfc822 format' do
pub_date = item.css('pubDate').text
expect(pub_date).to be_a(String) & eq(Time.parse(pub_date).rfc822.to_s)
end
end
describe 'item.category' do
subject(:categories) { item.css('category').to_s }
it 'sets the author as category' do
expect(categories).to include '<category>pi</category>'
end
end
describe 'item.enclosure' do
subject(:enclosure) { item.css('enclosure') }
it 'sets the enclosure', :aggregate_failures do
expect(enclosure.attr('url').value).to start_with('https://'), 'url'
expect(enclosure.attr('type').value).to eq('application/octet-stream'), 'type'
expect(enclosure.attr('length').value).to eq('0'), 'length'
end
end
describe 'item.description' do
subject(:description) { item.css('description').text }
it 'has a description' do
expect(description).to be_a(String)
end
it 'adds rel="nofollow noopener noreferrer" to all anchor elements' do
Nokogiri.HTML(description).css('a').each do |anchor|
expect(anchor.attr('rel')).to eq 'nofollow noopener noreferrer'
end
end
it 'changes target="_blank" on all anchor elements' do
Nokogiri.HTML(description).css('a').each { |anchor| expect(anchor.attr('target')).to eq '_blank' }
end
end
describe 'item.guid' do
it 'stays the same string for each run' do
feed = VCR.use_cassette("#{name}-second-run") do
described_class.feed(config)
end
first_guid = feed.items.first.guid.content
expect(feed_return.items.first.guid.content).to eq first_guid
end
it 'sets isPermaLink attribute to false' do
expect(feed_return.items.first.guid.isPermaLink).to be false
end
end
end
context 'with items having order key and reverse as value' do
before do
config[:selectors][:items][:order] = 'reverse'
end
it 'reverses the item ordering' do
expect(xml.css('channel > item').last.css('title').text).to eq 'v2.10.2 (pi)'
end
end
end
context 'with config having channel headers and header accepts json' do
let(:feed) do
VCR.use_cassette('httpbin-headers') do
described_class.feed(feed_config)
end
end
let(:feed_config) do
{
headers: {
Accept: 'application/json',
'User-Agent': 'html2rss-request',
'X-Something': 'Foobar',
Authorization: 'Token deadbea7',
Cookie: 'monster=MeWantCookie'
},
channel: {
url: 'https://httpbin.org/headers',
title: 'httpbin headers'
},
selectors: {
items: { selector: 'object > headers' },
title: { selector: 'host' },
something: { selector: 'x-something' },
authorization: { selector: 'authorization' },
cookie: { selector: 'cookie' },
categories: %i[title something authorization cookie]
}
}
end
it 'converts response to xml which has the information', :aggregate_failures do
expect(feed.items.size).to eq 1
expect(feed.items.first.categories.map(&:content)).to include('httpbin.org', 'Foobar', 'Token deadbea7',
'monster=MeWantCookie')
end
end
context 'with config having selectors and is json response' do
subject(:feed) do
VCR.use_cassette(name) do
config = described_class.config_from_yaml_file(config_file, name)
described_class.feed(config)
end
end
let(:name) { 'json' }
context 'with returned config' do
subject(:xml) { Nokogiri.XML(feed.to_s) }
it 'has the description derived from markdown' do
expect(
xml.css('item > description').first.text
).to eq '<h1>JUDAS AND THE BLACK MESSIAH</h1> <p>MPAA rating: R</p>'
end
end
end
context 'with config without title selector' do
subject(:feed) do
VCR.use_cassette(name) do
config = described_class.config_from_yaml_file(config_file, name)
described_class.feed(config)
end
end
let(:name) { 'notitle' }
it 'returns a RSS:Rss instance' do
expect(feed).to be_a(RSS::Rss)
end
context 'with item' do
let(:guid) { feed.items.first.guid.content }
it 'autogenerates a guid', :aggregate_failures do
expect(guid).to be_a(String)
expect(guid).not_to be_empty
end
end
end
end
describe '.auto_source' do
let(:url) { 'https://www.welt.de/' }
let(:feed_return) { VCR.use_cassette('welt') { described_class.auto_source(url) } }
it 'returns a RSS::Rss instance with channel and items', :aggregate_failures, :slow do
expect(feed_return).to be_a(RSS::Rss)
expect(feed_return.channel.title).to eq 'WELT - Aktuelle Nachrichten, News, Hintergründe & Videos'
expect(feed_return.channel.link).to eq 'https://www.welt.de/'
expect(feed_return.items.size >= 29).to be true
end
context 'with items_selector' do
before do
allow(described_class).to receive(:feed).and_return(nil)
end
let(:items_selector) { '.css.selector' }
it 'adds selectors.items selector and enhance to config' do
described_class.auto_source(url, items_selector:)
expect(described_class).to have_received(:feed).with(
hash_including(selectors: { items: { selector: items_selector, enhance: true } })
)
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/category_extractor_spec.rb | spec/lib/html2rss/category_extractor_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::CategoryExtractor do
describe '.call' do
let(:html) { Nokogiri::HTML.fragment(html_content) }
let(:article_tag) { html.at_css('article') }
context 'when article has category classes' do
let(:html_content) do
<<~HTML
<article>
<div class="category-news">News</div>
<span class="post-tag">Technology</span>
<div class="article-category">Science</div>
</article>
HTML
end
it 'extracts categories from elements with category-related class names' do
categories = described_class.call(article_tag)
expect(categories).to contain_exactly('News', 'Technology', 'Science')
end
end
context 'when article has additional category patterns' do
let(:html_content) do
<<~HTML
<article>
<div class="topic-politics">Politics</div>
<span class="section-sports">Sports</span>
<div class="label-health">Health</div>
<div class="theme-tech">Tech</div>
<div class="subject-science">Science</div>
</article>
HTML
end
it 'extracts categories from additional category patterns' do
categories = described_class.call(article_tag)
expect(categories).to contain_exactly('Politics', 'Sports', 'Health', 'Tech', 'Science')
end
end
context 'when article has tag classes' do
let(:html_content) do
<<~HTML
<article>
<div class="post-tags">Sports</div>
<span class="tag-item">Politics</span>
<div class="article-tag">Health</div>
</article>
HTML
end
it 'extracts categories from elements with tag-related class names' do
categories = described_class.call(article_tag)
expect(categories).to contain_exactly('Sports', 'Politics', 'Health')
end
end
context 'when article has data attributes with category info' do
let(:html_content) do
<<~HTML
<article>
<div class="post-topic" data-topic="Business">Business News</div>
<span class="item-tag" data-tag="Finance">Finance Update</span>
<div class="content-category" data-category="Economy">Economy Report</div>
</article>
HTML
end
it 'extracts categories from both text content and data attributes' do
categories = described_class.call(article_tag)
expect(categories).to contain_exactly('Business', 'Business News', 'Economy', 'Economy Report', 'Finance',
'Finance Update')
end
end
context 'when article has mixed category sources' do
let(:html_content) do
<<~HTML
<article>
<div class="category-news">News</div>
<span class="post-tag">Technology</span>
<div class="post" data-category="Science">Post</div>
<span class="item" data-tag="Health">Item</span>
</article>
HTML
end
it 'extracts categories from all sources' do
categories = described_class.call(article_tag)
expect(categories).to contain_exactly('News', 'Technology', 'Science', 'Health')
end
end
context 'when article has empty or whitespace-only categories' do
let(:html_content) do
<<~HTML
<article>
<div class="category-news">News</div>
<span class="post-tag"> </span>
<div class="article-category"></div>
<span class="tag-item">Technology</span>
</article>
HTML
end
it 'filters out empty categories' do
categories = described_class.call(article_tag)
expect(categories).to contain_exactly('News', 'Technology')
end
end
context 'when article has duplicate categories' do
let(:html_content) do
<<~HTML
<article>
<div class="category-news">News</div>
<span class="post-tag">Technology</span>
<div class="article-category">News</div>
<span class="tag-item">Technology</span>
</article>
HTML
end
it 'removes duplicate categories' do
categories = described_class.call(article_tag)
expect(categories).to contain_exactly('News', 'Technology')
end
end
context 'when article has no category-related elements' do
let(:html_content) do
<<~HTML
<article>
<h1>Title</h1>
<p>Content</p>
<div class="author">Author</div>
</article>
HTML
end
it 'returns empty array' do
categories = described_class.call(article_tag)
expect(categories).to eq([])
end
end
context 'when article_tag is nil' do
it 'returns empty array' do
categories = described_class.call(nil)
expect(categories).to eq([])
end
end
context 'when categories have extra whitespace' do
let(:html_content) do
<<~HTML
<article>
<div class="category-news"> News </div>
<span class="post-tag"> Technology </span>
</article>
HTML
end
it 'strips whitespace from categories' do
categories = described_class.call(article_tag)
expect(categories).to contain_exactly('News', 'Technology')
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/cli_spec.rb | spec/lib/html2rss/cli_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::CLI do
subject(:cli) { described_class.new }
describe '#feed' do
let(:rss_xml) { '<rss><channel><title>Example</title></channel></rss>' }
before do
allow(Html2rss).to receive(:feed).and_return(rss_xml)
end
it 'parses the YAML file and prints the RSS feed to stdout' do
allow(Html2rss).to receive(:config_from_yaml_file).and_return({ url: 'https://example.com' })
expect { cli.feed('example.yml') }.to output("#{rss_xml}\n").to_stdout
end
it 'passes the feed_name to config_from_yaml_file' do
allow(Html2rss).to receive(:config_from_yaml_file).with('example.yml',
'feed_name').and_return({ url: 'https://example.com' })
expect { cli.feed('example.yml', 'feed_name') }.to output("#{rss_xml}\n").to_stdout
end
it 'passes the strategy option to the config' do
allow(Html2rss).to receive(:config_from_yaml_file).and_return({})
cli.invoke(:feed, ['example.yml'], { strategy: 'browserless' })
expect(Html2rss).to have_received(:feed).with(hash_including(strategy: :browserless))
end
it 'passes the params option to the config' do
allow(Html2rss).to receive(:config_from_yaml_file).and_return({})
cli.invoke(:feed, ['example.yml'], { params: { 'foo' => 'bar' } })
expect(Html2rss).to have_received(:feed).with(hash_including(params: { 'foo' => 'bar' }))
end
end
describe '#auto' do
let(:auto_rss_xml) { '<rss><channel><title>Auto Source</title></channel></rss>' }
before do
allow(Html2rss).to receive(:auto_source).and_return(auto_rss_xml)
end
it 'calls Html2rss.auto_source and prints the result to stdout' do
expect { cli.auto('https://example.com') }.to output("#{auto_rss_xml}\n").to_stdout
end
it 'passes the strategy option to Html2rss.auto_source' do
cli.invoke(:auto, ['https://example.com'], { strategy: 'browserless' })
expect(Html2rss).to have_received(:auto_source)
.with('https://example.com', strategy: :browserless, items_selector: nil)
end
it 'passes the items_selector option to Html2rss.auto_source' do
cli.invoke(:auto, ['https://example.com'], { items_selector: '.item' })
expect(Html2rss).to have_received(:auto_source)
.with('https://example.com', strategy: :faraday, items_selector: '.item')
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/html_navigator_spec.rb | spec/lib/html2rss/html_navigator_spec.rb | # frozen_string_literal: true
require 'nokogiri'
RSpec.describe Html2rss::HtmlNavigator do
describe '.parent_until_condition' do
let(:html) do
<<-HTML
<div>
<section>
<article>
<p id="target">Some text here</p>
</article>
</section>
</div>
HTML
end
let(:document) { Nokogiri::HTML(html) }
let(:target_node) { document.at_css('#target') }
it 'returns the node itself if the condition is met' do
condition = ->(node) { node.name == 'p' }
result = described_class.parent_until_condition(target_node, condition)
expect(result).to eq(target_node)
end
it 'returns the first parent that satisfies the condition' do
condition = ->(node) { node.name == 'article' }
result = described_class.parent_until_condition(target_node, condition)
expect(result.name).to eq('article')
end
it 'returns nil if the node has no parents that satisfy the condition' do
condition = ->(node) { node.name == 'footer' }
result = described_class.parent_until_condition(target_node, condition)
expect(result).to be_nil
end
it 'returns nil if target_node is nil' do
condition = ->(node) { node.name == 'article' }
result = described_class.parent_until_condition(nil, condition)
expect(result).to be_nil
end
end
describe '.find_closest_selector_upwards' do
let(:html) do
<<-HTML
<div>
<p>
<a href="#" id="link">Link</a>
<span id="span">
<p>:rocket:</p>
</span>
</p>
</div>
HTML
end
let(:document) { Nokogiri::HTML(html) }
let(:expected_anchor) { document.at_css('a') }
context 'when an anchor is sibling to current_tag' do
let(:current_tag) { document.at_css('#span') }
it 'returns the closest anchor in as sibling' do
anchor = described_class.find_closest_selector_upwards(current_tag, 'a')
expect(anchor).to eq(expected_anchor)
end
end
context 'when an anchor is not below current_tag' do
let(:current_tag) { document.at_css('p') }
it 'returns the anchor upwards from current_tag' do
anchor = described_class.find_closest_selector_upwards(current_tag, 'a')
expect(anchor).to eq(expected_anchor)
end
end
end
describe '.find_tag_in_ancestors' do
let(:html) do
<<-HTML
<body>
<article>
<p>
<a href="#" id="link">Link</a>
</p>
</article>
</body>
HTML
end
let(:document) { Nokogiri::HTML(html) }
let(:current_tag) { document.at_css('#link') }
context 'when the anchor is inside the specified tag' do
it 'returns the specified tag' do
article_tag = described_class.find_tag_in_ancestors(current_tag, 'article')
expect(article_tag.name).to eq('article')
end
end
context 'when the anchor is not inside the specified tag' do
it 'returns stop_tag' do
article_tag = described_class.find_tag_in_ancestors(current_tag, 'body')
expect(article_tag).to be document.at_css('body')
end
end
context 'when the anchor is the specified tag' do
let(:html) do
<<-HTML
<article id="link">
<p>Content</p>
</article>
HTML
end
it 'returns the anchor itself' do
article_tag = described_class.find_tag_in_ancestors(current_tag, 'article')
expect(article_tag).to eq(current_tag)
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/url_spec.rb | spec/lib/html2rss/url_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Url do
describe '.from_relative' do
let(:base_url) { 'https://example.com' }
{
'/sprite.svg#play' => 'https://example.com/sprite.svg#play',
'/search?q=term' => 'https://example.com/search?q=term'
}.each_pair do |relative_url, expected_absolute|
it "resolves #{relative_url} to #{expected_absolute}" do
url = described_class.from_relative(relative_url, base_url)
expect(url.to_s).to eq(expected_absolute)
end
end
end
describe '.sanitize' do
{
nil => nil,
' ' => nil,
'http://example.com/ ' => 'http://example.com/',
'http://ex.ampl/page?sc=345s#abc' => 'http://ex.ampl/page?sc=345s#abc',
'https://example.com/sprite.svg#play' => 'https://example.com/sprite.svg#play',
'mailto:bogus@void.space' => 'mailto:bogus@void.space',
'http://übermedien.de' => 'http://xn--bermedien-p9a.de/',
'http://www.詹姆斯.com/' => 'http://www.xn--8ws00zhy3a.com/',
',https://wurstfing.er:4711' => 'https://wurstfing.er:4711/',
'feed:https://h2r.example.com/auto_source/aHR123' => 'https://h2r.example.com/auto_source/aHR123',
'https://[2001:470:30:84:e276:63ff:fe72:3900]/blog/' => 'https://[2001:470:30:84:e276:63ff:fe72:3900]/blog/'
}.each_pair do |raw_url, expected|
it "sanitizes #{raw_url.inspect} to #{expected.inspect}" do
result = described_class.sanitize(raw_url)
expect(result&.to_s).to eq(expected)
end
end
end
describe '#titleized' do
{
'http://www.example.com' => '',
'http://www.example.com/foobar/' => 'Foobar',
'http://www.example.com/foobar/baz.txt' => 'Foobar Baz',
'http://www.example.com/foo-bar/baz_qux.pdf' => 'Foo Bar Baz Qux',
'http://www.example.com/foo%20bar/baz%20qux.php' => 'Foo Bar Baz Qux',
'http://www.example.com/foo%20bar/baz%20qux-4711.html' => 'Foo Bar Baz Qux 4711'
}.each_pair do |url_string, expected|
it "titleizes #{url_string} to #{expected}" do
url = described_class.from_relative(url_string, 'https://example.com')
expect(url.titleized).to eq(expected)
end
end
end
describe '#channel_titleized' do
{
'http://www.example.com' => 'www.example.com',
'http://www.example.com/foobar' => 'www.example.com: Foobar',
'http://www.example.com/foobar/baz' => 'www.example.com: Foobar Baz'
}.each_pair do |url_string, expected|
it "channel titleizes #{url_string} to #{expected}" do
url = described_class.from_relative(url_string, 'https://example.com')
expect(url.channel_titleized).to eq(expected)
end
end
end
describe 'delegation' do
let(:url) { described_class.from_relative('/path', 'https://example.com') }
it 'delegates scheme method' do
expect(url.scheme).to eq('https')
end
it 'delegates host method' do
expect(url.host).to eq('example.com')
end
it 'delegates path method' do
expect(url.path).to eq('/path')
end
it 'delegates absolute? method' do
expect(url.absolute?).to be true
end
end
describe 'comparison' do
let(:first_url) { described_class.from_relative('/path1', 'https://example.com') }
let(:second_url) { described_class.from_relative('/path2', 'https://example.com') }
let(:first_url_dup) { described_class.from_relative('/path1', 'https://example.com') }
it 'compares equal URLs correctly' do
expect(first_url).to eq(first_url_dup)
end
it 'compares different URLs correctly' do
expect(first_url).not_to eq(second_url)
end
it 'compares URLs with spaceship operator for equality' do
expect(first_url <=> first_url_dup).to eq(0)
end
it 'compares URLs with spaceship operator for inequality' do
expect(first_url <=> second_url).not_to eq(0)
end
end
describe '.for_channel' do
context 'with valid absolute URLs' do
{
'https://example.com' => 'https://example.com/',
'http://example.com' => 'http://example.com/',
'https://www.example.com/path' => 'https://www.example.com/path',
'http://subdomain.example.com:8080/path?query=value#fragment' => 'http://subdomain.example.com:8080/path?query=value#fragment',
'https://example.com/path with spaces' => 'https://example.com/path%20with%20spaces'
}.each_pair do |input_url, expected_url|
it "accepts #{input_url.inspect} and returns normalized URL" do
result = described_class.for_channel(input_url)
expect(result).to be_a(described_class)
end
it "normalizes #{input_url.inspect} to #{expected_url.inspect}" do
result = described_class.for_channel(input_url)
expect(result.to_s).to eq(expected_url)
end
end
end
context 'with invalid relative URLs' do
[
'/relative/path',
'relative/path',
'./relative/path',
'../relative/path'
].each do |relative_url|
it "raises ArgumentError for relative URL #{relative_url.inspect}" do
expect { described_class.for_channel(relative_url) }
.to raise_error(ArgumentError, 'URL must be absolute')
end
end
end
context 'with URLs containing @ character' do
[
'https://user@example.com',
'https://example.com/path@fragment',
'https://example.com?param=value@test'
].each do |url_with_at|
it "raises ArgumentError for URL with @ character: #{url_with_at.inspect}" do
expect { described_class.for_channel(url_with_at) }
.to raise_error(ArgumentError, 'URL must not contain an @ character')
end
end
end
context 'with unsupported schemes' do
[
'ftp://example.com',
'file:///path/to/file',
'javascript:alert("test")',
'data:text/plain,test'
].each do |unsupported_url|
it "raises ArgumentError for unsupported scheme: #{unsupported_url.inspect}" do
expect { described_class.for_channel(unsupported_url) }
.to raise_error(ArgumentError, /URL scheme '[^']+' is not supported/)
end
end
it 'raises ArgumentError for mailto URL (contains @ character)' do
expect { described_class.for_channel('mailto:test@example.com') }
.to raise_error(ArgumentError, 'URL must not contain an @ character')
end
end
context 'with edge cases' do
it 'returns nil for nil input' do
expect(described_class.for_channel(nil)).to be_nil
end
it 'returns nil for empty string' do
expect(described_class.for_channel('')).to be_nil
end
it 'returns nil for whitespace-only string' do
expect(described_class.for_channel(' ')).to be_nil
end
end
context 'with malformed URLs' do
it 'raises ArgumentError for malformed URL' do
expect { described_class.for_channel('not-a-url') }
.to raise_error(ArgumentError, 'URL must be absolute')
end
it 'raises ArgumentError for URL with only scheme' do
expect { described_class.for_channel('https://') }
.to raise_error(ArgumentError, 'URL must be absolute')
end
end
end
describe 'immutability' do
let(:url) { described_class.from_relative('/path', 'https://example.com') }
it 'is frozen' do
expect(url).to be_frozen
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/request_service_spec.rb | spec/lib/html2rss/request_service_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::RequestService do
specify(:aggregate_failures) do
expect(described_class::UnknownStrategy).to be < Html2rss::Error
expect(described_class::InvalidUrl).to be < Html2rss::Error
expect(described_class::UnsupportedUrlScheme).to be < Html2rss::Error
end
describe '.default_strategy_name' do
specify(:aggregate_failures) do
expect(described_class.default_strategy_name).to be :faraday
expect(described_class.strategy_registered?(:faraday)).to be true
end
end
describe '#execute' do
subject(:execute) { described_class.execute(ctx, strategy:) }
let(:strategy) { :faraday }
let(:ctx) { instance_double(Html2rss::RequestService::Context) }
let(:strategy_class) { described_class::FaradayStrategy }
let(:strategy_instance) do
instance_double strategy_class, execute: nil
end
context 'with a known strategy' do
it do
allow(strategy_class).to receive(:new).with(ctx).and_return(strategy_instance)
execute
expect(strategy_class).to have_received(:new).with(ctx)
end
end
context 'with an unknown strategy' do
let(:strategy) { :unknown }
it do
expect { execute }.to raise_error(Html2rss::RequestService::UnknownStrategy)
end
end
end
describe '.register_strategy' do
let(:new_strategy) { Class.new }
let(:strategy_name) { :new_strategy }
it 'registers a new strategy' do
expect do
described_class.register_strategy(strategy_name, new_strategy)
end.to change { described_class.strategy_registered?(strategy_name) }.from(false).to(true)
end
it 'raises an error if the strategy class is not a class' do
expect { described_class.register_strategy(strategy_name, 'not a class') }.to raise_error(ArgumentError)
end
end
describe '.strategy_registered?' do
context 'when the strategy is registered' do
it 'returns true' do
expect(described_class.strategy_registered?(:faraday)).to be true
end
end
context 'when the strategy is not registered' do
it 'returns false' do
expect(described_class.strategy_registered?(:unknown)).to be false
end
end
context 'when the strategy name is a string' do
it 'returns true for a registered strategy' do
expect(described_class.strategy_registered?('faraday')).to be true
end
it 'returns false for an unregistered strategy' do
expect(described_class.strategy_registered?('unknown')).to be false
end
end
end
describe '.default_strategy_name=' do
after do
described_class.default_strategy_name = :faraday
end
context 'when the strategy is registered' do
it 'sets the default strategy' do
described_class.default_strategy_name = :browserless
expect(described_class.default_strategy_name).to be :browserless
end
end
context 'when the strategy is not registered' do
it 'raises an UnknownStrategy error' do
expect do
described_class.default_strategy_name = :unknown
end.to raise_error(Html2rss::RequestService::UnknownStrategy)
end
end
end
describe '.unregister_strategy' do
context 'when the strategy is registered' do
before { described_class.register_strategy(:foobar, Class) }
let(:strategy_name) { :foobar }
it 'unregisters the strategy' do
expect do
described_class.unregister_strategy(strategy_name)
end.to change { described_class.strategy_registered?(strategy_name) }.from(true).to(false)
end
end
context 'when the strategy is not registered' do
let(:strategy_name) { :unknown }
it 'returns false' do
expect(described_class.unregister_strategy(strategy_name)).to be false
end
end
context 'when trying to unregister the default strategy' do
it 'raises an ArgumentError' do
expect do
described_class.unregister_strategy(described_class.default_strategy_name)
end.to raise_error(ArgumentError, 'Cannot unregister the default strategy.')
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/rss_builder_spec.rb | spec/lib/html2rss/rss_builder_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::RssBuilder do
subject(:instance) do
described_class.new(channel:,
articles:,
stylesheets: [
{ href: 'rss.xsl', type: 'text/xsl' }
])
end
let(:channel) do
instance_double(Html2rss::RssBuilder::Channel,
title: 'Test Channel',
url: 'http://example.com',
description: 'A test channel',
language: 'en',
image: 'http://example.com/image.jpg',
ttl: 12,
last_build_date: 'Tue, 01 Jan 2019 00:00:00 GMT')
end
let(:articles) do
[
Html2rss::RssBuilder::Article.new(url: 'http://example.com/1',
id: 1,
title: 'Title 1',
description: 'Description 1',
published_at: '1969-12-31 23:59:59',
image: 'http://example.com/image1.jpg',
scraper: RSpec),
Html2rss::RssBuilder::Article.new(url: 'http://example.com/2',
id: 2,
title: 'Title 2',
description: 'Description 2',
published_at: '1969-12-31 23:59:59',
image: 'http://example.com/image2.jpg',
scraper: Html2rss::AutoSource::Scraper::Html)
]
end
it { expect(described_class).to be_a Class }
describe '#call' do
subject(:rss) { instance.call }
let(:rss_feed) do
<<~RSS.strip
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet href="rss.xsl" type="text/xsl" media="all"?>
<rss version="2.0"\n
RSS
end
it 'returns an RSS 2.0 Rss object', :aggregate_failures do
expect(rss).to be_a(RSS::Rss)
expect(rss.to_s).to start_with(rss_feed)
end
context 'with <channel> tag' do
subject(:channel_tag) { Nokogiri::XML(rss.to_s).css('channel').first }
let(:tags) do
{
'language' => 'en',
'title' => 'Test Channel',
'link' => 'http://example.com',
'description' => 'A test channel',
'generator' => "html2rss V. #{Html2rss::VERSION} (scrapers: RSpec (1), AutoSource::Html (1))"
}
end
it 'has the correct attributes', :aggregate_failures do
tags.each do |tag, matcher|
expect(channel_tag.css(tag).text).to match(matcher), tag
end
end
end
context 'with the <item> tags' do
let(:items) { Nokogiri::XML(rss.to_s).css('item') }
it 'has the correct number of items' do
expect(items.size).to eq(articles.size)
end
end
context 'with one <item> tags' do
let(:item) { Nokogiri::XML(rss.to_s).css('item').first }
let(:article) { articles.first }
it 'has tags with correct values', :aggregate_failures do
%i[title description guid].each do |tag|
expect(item.css(tag).text).to eq(article.public_send(tag).to_s), tag
end
expect(item.css('link').text).to eq(article.url.to_s), 'link'
expect(item.css('pubDate').text).to eq(article.published_at.rfc822), 'pubDate'
end
it 'has an enclosure tag with the correct attributes' do
enclosure = item.css('enclosure').first
expect(enclosure[:url]).to match(article.image.to_s)
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors_spec.rb | spec/lib/html2rss/selectors_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::Selectors do
subject(:instance) { described_class.new(response, selectors:, time_zone:) }
let(:response) { Html2rss::RequestService::Response.new url: 'http://example.com', headers: { 'content-type': 'text/html' }, body: }
let(:selectors) do
{
items: { selector: 'article' },
title: { selector: 'h1' }
}
end
let(:time_zone) { 'UTC' }
let(:body) do
<<~HTML
<html><body>
<article><h1>article1</h1><a href="/article1">More</a></article>
<article><h1>article2</h1><a href="/article2">More</a></article>
</body></html>
HTML
end
describe '#initialize' do
it 'raises an error if the URL and link selectors are both present' do
selectors[:link] = {}
selectors[:url] = {}
expect { instance }.to raise_error(described_class::InvalidSelectorName, /either use "url" or "link"/)
end
context 'when attribute is renamed' do
before do
allow(Html2rss::Log).to receive(:warn)
selectors[:pubDate] = selectors.delete(:published_at)
end
it 'handles renamed attributes', :aggregate_failures do
expect(instance.instance_variable_get(:@selectors)).to include(:published_at)
expect(Html2rss::Log).to have_received(:warn).with(/deprecated. Please rename to 'published_at'./)
end
end
end
describe '#articles' do
subject(:titles) { instance.articles.map(&:title) }
it 'returns the articles' do
expect(titles).to eq(%w[article1 article2])
end
context 'when the order is set to reverse' do
before { selectors[:items][:order] = 'reverse' }
it 'reverses the articles' do
expect(titles).to eq(%w[article2 article1])
end
end
end
describe '#each' do
it 'returns an Enumerator if no block is given' do
expect(instance.each).to be_a(Enumerator)
end
it 'yields the articles to given block' do
expect { |b| instance.each(&b) }.to yield_successive_args(
Html2rss::RssBuilder::Article,
Html2rss::RssBuilder::Article
)
end
end
describe '#extract_article(item)' do
subject(:article) { instance.extract_article(item) }
let(:item) { Nokogiri::HTML(body).at('html') }
context 'when title is static and description the html of <body>' do
# Issue was reported at: https://github.com/html2rss/html2rss/issues/157
let(:selectors) do
{
title: { extractor: 'static', static: 'Test string' },
description: { extractor: 'html', selector: 'body' }
}
end
let(:body) do
<<~HTML
<html><body>
<main>
<h1>article1</h1>
<script>alert('');</script>
</main>
</body></html>
HTML
end
it 'yields the articles with the static title and the <body> as description' do
expect(instance.extract_article(item)).to a_hash_including(
title: 'Test string',
description: "<body>\n <main>\n <h1>article1</h1>\n <script>alert('');</script>\n </main>\n</body>"
)
end
end
end
describe '#enhance_article_hash(article_hash, item)' do
subject(:enhanced_article) do
item = Nokogiri::HTML(body).at('article:first')
instance.enhance_article_hash(article_hash, item)
end
before { selectors[:items][:enhance] = true }
let(:article_hash) { {} }
it 'enhances the article_hash' do
expect(enhanced_article).to be(article_hash) & include(:title, :url)
end
context 'when selector/key is already present in article_hash' do
let(:article_hash) { { title: 'Selected Article1 Headline' } }
it 'does not override the existing value' do
expect(enhanced_article[:title]).to eq(article_hash[:title])
end
end
context 'when extractor returns nil' do
before do
allow(Html2rss::HtmlExtractor).to receive(:new).and_return(instance_double(Html2rss::HtmlExtractor, call: nil))
end
it 'returns article_hash' do
expect(enhanced_article).to be(article_hash)
end
end
end
describe '#select' do
subject(:value) { instance.select(:title, item) }
let(:item) { Nokogiri::HTML(body).at('article:first') }
it 'returns the selected value' do
expect(value).to eq('article1')
end
context 'when name is not a referencing a selector' do
subject(:value) { instance.select(:unknown, item) }
it 'raises an error' do
expect { value }.to raise_error(described_class::InvalidSelectorName, "Selector for 'unknown' is not defined.")
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/error_spec.rb | spec/lib/html2rss/error_spec.rb | # frozen_string_literal: true
require 'nokogiri'
RSpec.describe Html2rss::Error do
it { expect(described_class).to be < StandardError }
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/auto_source_spec.rb | spec/lib/html2rss/auto_source_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::AutoSource do
subject(:auto_source) { described_class.new(response, config) }
let(:config) { described_class::DEFAULT_CONFIG }
let(:url) { Html2rss::Url.from_relative('https://example.com', 'https://example.com') }
let(:headers) { { 'content-type': 'text/html' } }
let(:response) { Html2rss::RequestService::Response.new(body:, headers:, url:) }
let(:body) do
<<~HTML
<html>
<body>
<article id="article-1">
<h2>Article 1 Title <!-- remove this --></h2>
<a href="/article1">Read more</a>
</article>
</body>
</html>
HTML
end
describe '::DEFAULT_CONFIG' do
subject(:default_config) { described_class::DEFAULT_CONFIG }
it 'is a frozen Hash' do
expect(default_config).to be_a(Hash).and be_frozen
end
end
describe '::Config' do
subject(:schema) { described_class::Config }
it 'validates the default config' do
expect(schema.call(described_class::DEFAULT_CONFIG)).to be_success
end
it 'allows toggling the json_state scraper' do
toggled_config = described_class::DEFAULT_CONFIG.merge(
scraper: described_class::DEFAULT_CONFIG[:scraper].merge(json_state: { enabled: false })
)
expect(schema.call(toggled_config)).to be_success
end
describe 'optional(:cleanup)' do
let(:config) do
config = described_class::DEFAULT_CONFIG.dup
config[:auto_source] = { cleanup: described_class::Cleanup::DEFAULT_CONFIG }
config
end
it 'validates cleanup defaults' do
expect(schema.call(config)).to be_success
end
end
end
describe '#articles' do
subject(:articles) { auto_source.articles }
before do
allow(Parallel).to receive(:flat_map).and_wrap_original do |_original, scrapers, **_kwargs, &block|
scrapers.flat_map(&block)
end
end
describe 'when scraping succeeds' do
subject(:article) { articles.first }
it 'returns a single Html2rss::RssBuilder::Article', :aggregate_failures do
expect(articles.size).to eq(1)
expect(article).to be_a(Html2rss::RssBuilder::Article)
end
it 'preserves article content', :aggregate_failures do
expect(article.title).to eq('Article 1 Title')
expect(article.id).to eq('article-1')
expect(article.description).to include('Read more')
expect(article.scraper).to eq(Html2rss::AutoSource::Scraper::SemanticHtml)
end
it 'sanitizes the article url' do
expected_url = Html2rss::Url.from_relative('https://example.com/article1', 'https://example.com')
expect(article.url).to eq(expected_url)
end
end
context 'when no scrapers are found' do
before do
allow(Html2rss::AutoSource::Scraper)
.to receive(:from)
.and_raise(Html2rss::AutoSource::Scraper::NoScraperFound, 'no scrapers')
allow(Html2rss::Log).to receive(:warn)
end
it 'logs a warning and returns an empty array', :aggregate_failures do
expect(articles).to eq([])
expect(Html2rss::Log).to have_received(:warn)
.with(/No auto source scraper found for URL: #{Regexp.escape(url.to_s)}/)
end
end
context 'with custom configuration' do
let(:config) do
described_class::DEFAULT_CONFIG.merge(
scraper: { schema: { enabled: false }, html: { enabled: false } },
cleanup: { keep_different_domain: true, min_words_title: 5 }
) { |_key, old_val, new_val| old_val.is_a?(Hash) ? old_val.merge(new_val) : new_val }
end
before do
allow(Html2rss::AutoSource::Scraper).to receive(:from).and_return([])
end
it 'passes the overrides to the scraper lookup', :aggregate_failures do
expect(articles).to eq([])
expect(Html2rss::AutoSource::Scraper).to have_received(:from)
.with(instance_of(Nokogiri::HTML::Document),
hash_including(schema: hash_including(enabled: false),
html: hash_including(enabled: false)))
end
end
context 'when multiple scrapers emit overlapping articles' do # rubocop:disable RSpec/MultipleMemoizedHelpers
let(:first_scraper_articles) do
[
{ id: 'shared-first', title: 'Shared Article Title', description: 'Same url',
url: 'https://example.com/shared' },
{ id: 'first-only', title: 'First Exclusive Story', description: 'Only first', url: 'https://example.com/first' }
]
end
let(:second_scraper_articles) do
[
{ id: 'shared-second', title: 'Shared Article Title', description: 'Same url',
url: 'https://example.com/shared' },
{ id: 'second-only', title: 'Second Exclusive Story', description: 'Only second', url: 'https://example.com/second' }
]
end
let(:semantic_scraper_instance) do
instance_double(Html2rss::AutoSource::Scraper::SemanticHtml, each: first_scraper_articles.each)
end
let(:html_scraper_instance) do
instance_double(Html2rss::AutoSource::Scraper::Html, each: second_scraper_articles.each)
end
let(:semantic_scraper_class) do
class_double(Html2rss::AutoSource::Scraper::SemanticHtml,
options_key: :semantic_html,
new: semantic_scraper_instance)
end
let(:html_scraper_class) do
class_double(Html2rss::AutoSource::Scraper::Html,
options_key: :html,
new: html_scraper_instance)
end
before do
allow(Html2rss::AutoSource::Scraper).to receive(:from).and_return([semantic_scraper_class, html_scraper_class])
allow(Html2rss::AutoSource::Cleanup).to receive(:call).and_call_original
end
it 'deduplicates aggregated articles by url' do
expect(articles.map { |article| article.url.to_s })
.to match_array(%w[https://example.com/shared https://example.com/first https://example.com/second])
end
end
context 'when scraper lookup raises an error' do
before do
allow(Html2rss::AutoSource::Scraper).to receive(:from).and_raise(StandardError, 'Test error')
end
it 're-raises the error' do
expect { articles }.to raise_error(StandardError, 'Test error')
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/config_spec.rb | spec/lib/html2rss/config_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::Config do
it { expect(described_class::InvalidConfig).to be < Html2rss::Error }
describe Html2rss::Config::MultipleFeedsConfig do
describe '::CONFIG_KEY_FEEDS' do
it { expect(described_class::CONFIG_KEY_FEEDS).to eq :feeds }
end
end
describe '.load_yaml' do
context 'when the file does not exist' do
let(:file) { 'non-existing-file' }
it 'raises an ArgumentError' do
expect do
described_class.load_yaml(file)
end.to raise_error(ArgumentError,
"File 'non-existing-file' does not exist")
end
end
context 'when the file exists & the feed name is reserved' do
let(:file) { 'spec/fixtures/feeds.test.yml' }
it 'raises an ArgumentError' do
expect do
described_class.load_yaml(file,
described_class::MultipleFeedsConfig::CONFIG_KEY_FEEDS)
end.to raise_error(ArgumentError,
"`#{described_class::MultipleFeedsConfig::CONFIG_KEY_FEEDS}` is a reserved feed name")
end
end
context 'when the file exists & is single config' do
let(:file) { 'spec/fixtures/single.test.yml' }
it 'raises an ArgumentError' do
expect(described_class.load_yaml(file)).to be_a(Hash)
end
end
context 'when the file exists with multiple feeds & the feed name is not found' do
let(:file) { 'spec/fixtures/feeds.test.yml' }
it 'raises an ArgumentError' do
expect do
described_class.load_yaml(file,
'non-existing-feed')
end.to raise_error(ArgumentError, /Feed 'non-existing-feed' not found under `feeds` key/)
end
end
context 'when the file exists with multiple feeds & the feed name is found' do
let(:file) { 'spec/fixtures/feeds.test.yml' }
let(:expected_config) do
{
headers: { 'User-Agent': String, 'Content-Language': 'en' },
stylesheets: [{ href: '/style.xls', media: 'all', type: 'text/xsl' },
{ href: '/rss.css', media: 'all', type: 'text/css' },
{ href: '/special.css', type: 'text/css' }],
channel: { language: 'en', url: String },
selectors: { description: { selector: 'p' }, items: { selector: 'div.main-horoscope' },
link: { extractor: 'href', selector: '#src-horo-today' } }
}
end
it 'returns the configuration' do
expect(described_class.load_yaml(file, 'notitle')).to match(expected_config)
end
end
end
describe '.from_hash' do
let(:hash) do
{
headers: { 'User-Agent': 'Agent-User', 'Content-Language': 'en' },
stylesheets: [{ href: '/style.xls', media: 'all', type: 'text/xsl' },
{ href: '/rss.css', media: 'all', type: 'text/css' },
{ href: '/special.css', type: 'text/css' }],
channel: { language: 'en', url: 'http://example.com' },
selectors: { description: { selector: 'p' }, items: { selector: 'div.main-horoscope' },
link: { extractor: 'href', selector: '#src-horo-today' } }
}
end
it 'returns the configuration' do
expect(described_class.from_hash(hash)).to be_a(described_class)
end
context 'with frozen hash' do
it 'returns the configuration' do
expect(described_class.from_hash(hash.freeze)).to be_a(described_class)
end
end
end
describe '#initialize' do
subject(:instance) { described_class.new(config) }
context 'when the configuration is valid' do
let(:config) do
{
headers: { 'User-Agent': 'Agent-User', 'Content-Language': 'en' },
stylesheets: [{ href: '/style.xls', media: 'all', type: 'text/xsl' },
{ href: '/rss.css', media: 'all', type: 'text/css' },
{ href: '/special.css', type: 'text/css' }],
channel: { language: 'en', url: 'http://example.com' },
selectors: { description: { selector: 'p' }, items: { selector: 'div.main-horoscope' },
link: { extractor: 'href', selector: '#src-horo-today' } }
}
end
it 'inits' do
expect { instance }.not_to raise_error
end
it 'leaves out auto_source' do
expect(instance.auto_source).to be_nil
end
it 'applies default configuration' do
expect(instance.time_zone).to eq('UTC')
end
it 'deep merges with the default configuration' do
expect(instance.url).to eq('http://example.com')
end
it 'freezes @config ivar' do
expect(instance.instance_variable_get(:@config)).to be_frozen
end
end
context 'when the configuration is valid with auto_source' do
let(:config) do
{
channel: { url: 'http://example.com' },
auto_source: {
scraper: {
schema: { enabled: false },
html: { minimum_selector_frequency: 3 }
}
}
}
end
let(:expected_auto_source_config) do
{
scraper: {
semantic_html: { enabled: true }, # wasn't explicitly set -> default
schema: { enabled: false }, # keeps the value from the config
html: {
enabled: true,
minimum_selector_frequency: 3, # was explicitly set -> overrides default
use_top_selectors: 5 # wasn't explicitly set -> default
},
json_state: { enabled: true }, # wasn't explicitly set -> default
rss_feed_detector: { enabled: true } # wasn't explicitly set -> default
},
cleanup: {
keep_different_domain: false, # wasn't explicitly set -> default
min_words_title: 3 # wasn't explicitly set -> default
}
}
end
it 'applies default auto_source configuration' do
expect(instance.auto_source).to eq(expected_auto_source_config)
end
end
context 'when the configuration is invalid' do
let(:config) { { headers: { 'User-Agent': nil, 'Content-Language': 0xBADF00D } } }
it 'raises an ArgumentError' do
expect { instance }.to raise_error(described_class::InvalidConfig, /Invalid configuration:/)
end
end
context 'when configuration uses deprecated channel attributes' do
before do
allow(Html2rss::Log).to receive(:warn).and_return(nil)
end
let(:config) do
{
channel: { url: 'https://example.com',
headers: { 'User-Agent': 'Agent-User', 'Content-Language': 'en' },
strategy: :browserless },
auto_source: {}
}
end
%i[strategy headers].each do |key|
it "warns about deprecated #{key}" do
instance
expect(Html2rss::Log).to have_received(:warn).with(/`channel.#{key}` key is deprecated./)
end
it "moves deprecated #{key} to top level" do
value = config.dig(:channel, key)
matcher = key == :headers ? include(value.transform_keys(&:to_s)) : eq(value)
expect(instance.public_send(key)).to matcher
end
end
end
end
describe '#headers' do
subject(:headers) { described_class.new(config).headers }
let(:config) do
{
channel: { url: 'https://example.com/articles', language: channel_language },
selectors: { items: { selector: '.item' } },
headers: custom_headers
}
end
let(:custom_headers) { { 'accept' => 'application/json', 'x-custom-id' => '123' } }
let(:channel_language) { 'fr' }
let(:expected_headers) do
{
'Host' => 'example.com',
'Accept-Language' => 'fr',
'X-Custom-Id' => '123',
'Accept' => "application/json,#{Html2rss::Config::RequestHeaders::DEFAULT_ACCEPT}"
}
end
it 'normalizes caller provided headers and adds defaults' do
expect(headers).to include(expected_headers)
end
context 'when the channel language is missing' do
let(:channel_language) { nil }
it 'falls back to en-US for Accept-Language' do
expect(headers).to include('Accept-Language' => 'en-US,en;q=0.9')
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/html_extractor_spec.rb | spec/lib/html2rss/html_extractor_spec.rb | # frozen_string_literal: true
require 'nokogiri'
RSpec.describe Html2rss::HtmlExtractor do
subject(:article_hash) { described_class.new(article_tag, base_url: 'https://example.com').call }
describe '.extract_visible_text' do
subject(:visible_text) { described_class.extract_visible_text(tag) }
let(:tag) do
Nokogiri::HTML.fragment('<div>Hello <span>World</span><script>App = {}</script></div>').at_css('div')
end
it 'returns the visible text from the tag and its children' do
expect(visible_text).to eq('Hello World')
end
end
context 'when heading is present' do
let(:html) do
<<~HTML
<article id="fck-ptn">
<a href="#">Scroll to top</a>
<h1>
<a href="/sample">Sample Heading</a>
</h1>
<time datetime="2024-02-24T12:00-03:00">FCK PTN</time>
<p>Sample description</p>
<img src="image.jpg" alt="Image" />
<video> <source src="video.mp4" type="video/mp4"></video>
</article>
HTML
end
describe '#call' do
let(:article_tag) { Nokogiri::HTML.fragment(html) }
let(:heading) { article_tag.at_css('h1') }
it 'returns the article_hash', :aggregate_failures do # rubocop:disable RSpec/ExampleLength
expect(article_hash).to a_hash_including(
title: 'Sample Heading',
description: 'Sample Heading FCK PTN Sample description',
id: 'fck-ptn',
published_at: an_instance_of(DateTime),
url: Html2rss::Url.from_relative('https://example.com/sample', 'https://example.com'),
image: be_a(Html2rss::Url),
categories: [],
enclosures: contain_exactly(a_hash_including(
url: be_a(Html2rss::Url),
type: 'video/mp4'
), a_hash_including(
url: be_a(Html2rss::Url),
type: 'image/jpeg'
))
)
expect(article_hash[:published_at].to_s).to eq '2024-02-24T12:00:00-03:00'
expect(article_hash[:url].to_s).to eq 'https://example.com/sample'
expect(article_hash[:image].to_s).to eq 'https://example.com/image.jpg'
end
end
context 'with invalid datetime' do
let(:html) do
<<~HTML
<article id="fck-ptn">
<h1>Sample Heading</h1>
<time datetime="invalid">FCK PTN</time>
</article>
HTML
end
let(:article_tag) { Nokogiri::HTML.fragment(html) }
it 'returns the article_hash with a nil published_at' do
expect(article_hash[:published_at]).to be_nil
end
end
end
context 'when heading is not present' do
let(:html) do
<<~HTML
<article>
<time datetime="2024-02-24 12:00">FCK PTN</time>
<p>Sample description</p>
<img src="image.jpg" alt="Image" />
</article>
HTML
end
let(:article_tag) { Nokogiri::HTML.fragment(html) }
let(:details) do
{ title: nil,
url: nil,
image: be_a(Html2rss::Url),
description: 'FCK PTN Sample description',
id: nil,
published_at: be_a(DateTime),
categories: [],
enclosures: [a_hash_including(
url: be_a(Html2rss::Url),
type: 'image/jpeg'
)] }
end
it 'returns the details' do
expect(article_hash).to match(details)
end
end
describe '#heading' do
subject(:heading) { described_class.new(article_tag, base_url: 'https://example.com').send(:heading) }
let(:article_tag) { Nokogiri::HTML.fragment(html) }
context 'when heading is present' do
let(:html) do
<<~HTML
<article>
<h1>Heading 1</h1>
<h2>Heading 2</h2>
<h3>Heading 3</h3>
</article>
HTML
end
it 'returns the smallest heading with the largest visible text', :aggregate_failures do
expect(heading.name).to eq('h1')
expect(heading.text).to eq('Heading 1')
end
end
context 'when heading is not present' do
let(:html) do
<<~HTML
<article>
<p>Paragraph 1</p>
<p>Paragraph 2</p>
</article>
HTML
end
it 'returns nil' do
expect(heading).to be_nil
end
end
end
describe 'category extraction' do
context 'when article has category classes' do
let(:html) do
<<~HTML
<article>
<h1>Sample Heading</h1>
<div class="category-news">News</div>
<span class="post-tag">Technology</span>
<div class="article-category">Science</div>
</article>
HTML
end
let(:article_tag) { Nokogiri::HTML.fragment(html) }
it 'extracts categories from elements with category-related class names' do
expect(article_hash[:categories]).to contain_exactly('News', 'Technology', 'Science')
end
end
context 'when article has no category classes' do
let(:html) do
<<~HTML
<article>
<h1>Sample Heading</h1>
<p>Sample description</p>
</article>
HTML
end
let(:article_tag) { Nokogiri::HTML.fragment(html) }
it 'returns empty categories array' do
expect(article_hash[:categories]).to eq([])
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/auto_source/scraper_spec.rb | spec/lib/html2rss/auto_source/scraper_spec.rb | # frozen_string_literal: true
require 'rspec'
RSpec.describe Html2rss::AutoSource::Scraper do
it { is_expected.to be_a(Module) }
it { expect(described_class::SCRAPERS).to be_an(Array) }
describe '.from(parsed_body, opts)' do
context 'when suitable scraper is found' do
let(:parsed_body) do
Nokogiri::HTML('<html><body><article><a href="#"></a></article></body></html>')
end
it 'returns an array of scrapers' do
expect(described_class.from(parsed_body)).to be_an(Array)
end
end
context 'when no suitable scraper is found' do
let(:parsed_body) { Nokogiri::HTML('<html><body></body></html>') }
it 'raises NoScraperFound error' do
expect do
described_class.from(parsed_body)
end.to raise_error(Html2rss::AutoSource::Scraper::NoScraperFound)
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/auto_source/cleanup_spec.rb | spec/lib/html2rss/auto_source/cleanup_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::AutoSource::Cleanup do
let(:url) { Html2rss::Url.from_relative('http://example.com', 'http://example.com') }
let(:articles) do
[
instance_double(Html2rss::RssBuilder::Article,
valid?: true,
url: Html2rss::Url.from_relative('http://example.com/article0', 'http://example.com'),
title: 'Valid Article One'),
instance_double(Html2rss::RssBuilder::Article,
valid?: true,
url: Html2rss::Url.from_relative('http://example.com/article1', 'http://example.com'),
title: 'Valid Article Two'),
instance_double(Html2rss::RssBuilder::Article,
valid?: false,
url: Html2rss::Url.from_relative('http://example.com/article2', 'http://example.com'),
title: 'Invalid Article'),
instance_double(Html2rss::RssBuilder::Article,
valid?: true,
url: Html2rss::Url.from_relative('http://otherdomain.com/article3', 'http://example.com'),
title: 'Different Domain Article'),
instance_double(Html2rss::RssBuilder::Article,
valid?: true,
url: Html2rss::Url.from_relative('ftp://example.com/article4', 'http://example.com'),
title: 'Non-HTTP Article'),
instance_double(Html2rss::RssBuilder::Article,
valid?: true,
url: Html2rss::Url.from_relative('http://example.com/article5', 'http://example.com'),
title: 'Short')
]
end
describe '.call' do
subject { described_class.call(articles, url:, keep_different_domain:, min_words_title:) }
let(:keep_different_domain) { false }
let(:min_words_title) { 2 }
it 'removes invalid articles' do
expect(subject).not_to include(articles[2])
end
context 'with duplicated articles' do
let(:duplicated_url_article) do
instance_double(Html2rss::RssBuilder::Article,
valid?: true,
url: articles.first.url,
title: 'Duplicated Article')
end
before do
articles << duplicated_url_article
end
it 'removes duplicate articles by URL', :aggregate_failures do
expect(subject).not_to include(duplicated_url_article)
expect(subject.first.url).to eq(duplicated_url_article.url)
end
end
it 'keeps only HTTP and HTTPS articles' do
expect(subject).not_to include(articles[4])
end
context 'when keep_different_domain is false' do
it 'removes articles from different domains' do
expect(subject).not_to include(articles[3])
end
end
context 'when keep_different_domain is true' do
let(:keep_different_domain) { true }
it 'keeps articles from different domains' do
different_domain_article = articles[3]
expect(subject).to include(different_domain_article)
end
end
it 'keeps only articles with a title having at least min_words_title words' do
expect(subject).not_to include(articles[5])
end
end
describe '.keep_only_with_min_words_title!' do
subject(:keep_only_with_min_words_title!) do
described_class.keep_only_with_min_words_title!(articles, min_words_title:)
end
let(:articles) do
[
instance_double(Html2rss::RssBuilder::Article, title: 'A valid title'),
instance_double(Html2rss::RssBuilder::Article, title: 'Short'),
instance_double(Html2rss::RssBuilder::Article, title: 'Another valid article title'),
instance_double(Html2rss::RssBuilder::Article, title: nil),
instance_double(Html2rss::RssBuilder::Article, title: ''),
instance_double(Html2rss::RssBuilder::Article, title: 'Two words')
]
end
context 'when min_words_title is 3' do
let(:min_words_title) { 3 }
it 'keeps only articles with at least 3 words in the title or nil title', :aggregate_failures do
keep_only_with_min_words_title!
expect(articles.map(&:title)).to contain_exactly('A valid title', 'Another valid article title', nil)
end
end
context 'when min_words_title is 1' do
let(:min_words_title) { 1 }
it 'keeps all articles except those with empty string title', :aggregate_failures do
keep_only_with_min_words_title!
expect(articles.map(&:title)).to contain_exactly(
'A valid title', 'Short', 'Another valid article title', nil, 'Two words'
)
end
end
context 'when all titles are nil or empty' do
let(:articles) do
[
instance_double(Html2rss::RssBuilder::Article, title: nil),
instance_double(Html2rss::RssBuilder::Article, title: '')
]
end
let(:min_words_title) { 2 }
it 'keeps only articles with nil title' do
keep_only_with_min_words_title!
expect(articles.map(&:title)).to contain_exactly(nil)
end
end
context 'with non-Latin titles' do
let(:articles) do
[
instance_double(Html2rss::RssBuilder::Article, title: 'Привет мир'),
instance_double(Html2rss::RssBuilder::Article, title: 'مرحبا بالعالم'),
instance_double(Html2rss::RssBuilder::Article, title: '你好 世界'),
instance_double(Html2rss::RssBuilder::Article, title: '你好世界'),
instance_double(Html2rss::RssBuilder::Article, title: nil)
]
end
let(:min_words_title) { 2 }
it 'counts Unicode words correctly', :aggregate_failures do
keep_only_with_min_words_title!
expect(articles.map(&:title)).to contain_exactly('Привет мир', 'مرحبا بالعالم', '你好 世界', nil)
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/auto_source/scraper/json_state_spec.rb | spec/lib/html2rss/auto_source/scraper/json_state_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::AutoSource::Scraper::JsonState do
let(:base_url) { Html2rss::Url.from_relative('https://example.com', 'https://example.com') }
def load_fixture(name)
file = File.join(__dir__, '../../../..', 'fixtures/auto_source/json_state', name)
File.read(file)
end
describe '.articles?' do
it 'detects Next.js JSON state' do
parsed_body = Nokogiri::HTML(load_fixture('next.html'))
expect(described_class).to be_articles(parsed_body)
end
it 'detects Nuxt JSON state' do
parsed_body = Nokogiri::HTML(load_fixture('nuxt.html'))
expect(described_class).to be_articles(parsed_body)
end
it 'detects custom window state blobs' do
parsed_body = Nokogiri::HTML(load_fixture('state.html'))
expect(described_class).to be_articles(parsed_body)
end
it 'detects arrays containing nested article arrays' do
parsed_body = Nokogiri::HTML(load_fixture('nested_array.html'))
expect(described_class).to be_articles(parsed_body)
end
it 'returns false when no JSON state is present' do
parsed_body = Nokogiri::HTML('<html><body><script>console.log("hello")</script></body></html>')
expect(described_class).not_to be_articles(parsed_body)
end
end
describe '#each' do
subject(:articles) { described_class.new(parsed_body, url: base_url).each.to_a }
context 'with Next.js data' do
let(:parsed_body) { Nokogiri::HTML(load_fixture('next.html')) }
it 'normalises the article data' do # rubocop:disable RSpec/ExampleLength
expect(articles).to contain_exactly(
a_hash_including(
title: 'Next.js powers the latest headlines',
description: 'A summary sourced from Next.js JSON state.',
url: Html2rss::Url.from_relative('/next/headline', base_url),
image: Html2rss::Url.from_relative('https://cdn.example.com/images/next/headline.jpg', base_url),
published_at: '2024-04-01T12:00:00Z',
categories: %w[nextjs spa],
id: 'next-article-1'
)
)
end
end
context 'with Nuxt data' do
let(:parsed_body) { Nokogiri::HTML(load_fixture('nuxt.html')) }
it 'extracts relative URLs and nested categories' do # rubocop:disable RSpec/ExampleLength
expect(articles).to contain_exactly(
a_hash_including(
title: 'Nuxt article arrives',
description: 'Nuxt.js embeds article data into a global.',
url: Html2rss::Url.from_relative('/nuxt/article', base_url),
image: Html2rss::Url.from_relative('/images/nuxt/article.jpg', base_url),
published_at: '2024-04-02T10:00:00Z',
categories: %w[nuxt spa],
id: 'https://example.com/nuxt/article'
)
)
end
end
context 'with custom window state' do
let(:parsed_body) { Nokogiri::HTML(load_fixture('state.html')) }
it 'handles bespoke globals' do # rubocop:disable RSpec/ExampleLength
expect(articles).to contain_exactly(
a_hash_including(
title: 'Window state update',
description: 'Content embedded in a custom window.STATE blob.',
url: Html2rss::Url.from_relative('/state/update', base_url),
image: Html2rss::Url.from_relative('/images/state/update.png', base_url),
published_at: '2024-04-03T08:30:00Z',
categories: %w[updates custom],
id: 'state-post-42'
)
)
end
end
context 'with nested array data' do
let(:parsed_body) { Nokogiri::HTML(load_fixture('nested_array.html')) }
it 'finds articles nested inside array entries' do
expect(articles).to contain_exactly(a_hash_including(
title: 'Nested article',
url: Html2rss::Url.from_relative('/nested/article',
base_url)
))
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/auto_source/scraper/schema_spec.rb | spec/lib/html2rss/auto_source/scraper/schema_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::AutoSource::Scraper::Schema do
# Test factories for maintainability
def build_script_tag(json_content)
Nokogiri::HTML("<script type=\"application/ld+json\">#{json_content}</script>")
end
def mock_logging
allow(Html2rss::Log).to receive(:warn)
allow(Html2rss::Log).to receive(:debug)
end
def build_simple_article(type: 'Article', title: 'Sample Title')
{ '@type': type, title:, url: 'https://example.com' }
end
let(:news_article_schema_object) do
# src: https://schema.org/NewsArticle
{
'@context': 'https://schema.org',
'@type': 'NewsArticle',
url: 'http://www.bbc.com/news/world-us-canada-39324587',
publisher: {
'@type': 'Organization',
name: 'BBC News',
logo: 'http://www.bbc.co.uk/news/special/2015/newsspec_10857/bbc_news_logo.png?cb=1'
},
title: "Trump Russia claims: FBI's Comey confirms investigation of election 'interference'",
mainEntityOfPage: 'http://www.bbc.com/news/world-us-canada-39324587',
articleBody: "Director Comey says the probe into last year's US election would assess if crimes were committed.",
image: [
'http://ichef-1.bbci.co.uk/news/560/media/images/75306000/jpg/_75306515_line976.jpg',
'http://ichef.bbci.co.uk/news/560/cpsprodpb/8AB9/production/_95231553_comey2.jpg',
'http://ichef.bbci.co.uk/news/560/cpsprodpb/17519/production/_95231559_committee.jpg',
'http://ichef.bbci.co.uk/news/560/cpsprodpb/CC81/production/_95235325_f704a6dc-c017-4971-aac3-04c03eb097fb.jpg',
'http://ichef-1.bbci.co.uk/news/560/cpsprodpb/11AA1/production/_95235327_c0b59f9e-316e-4641-aa7e-3fec6daea62b.jpg',
'http://ichef.bbci.co.uk/news/560/cpsprodpb/0F99/production/_95239930_trumptweet.png',
'http://ichef-1.bbci.co.uk/news/560/cpsprodpb/10DFA/production/_95241196_mediaitem95241195.jpg',
'http://ichef.bbci.co.uk/news/560/cpsprodpb/2CA0/production/_95242411_comey.jpg',
'http://ichef.bbci.co.uk/news/560/cpsprodpb/11318/production/_95242407_mediaitem95242406.jpg',
'http://ichef-1.bbci.co.uk/news/560/cpsprodpb/BCED/production/_92856384_line976.jpg',
'http://ichef-1.bbci.co.uk/news/560/cpsprodpb/12B64/production/_95244667_mediaitem95244666.jpg'
],
datePublished: '2017-03-20T20:30:54+00:00'
}
end
let(:article_schema_object) do
{
'@context': 'https://schema.org',
'@id': '4582066',
'@type': 'Article',
additionalType: 'ArticleTeaser',
url: '/news/Google-entlaesst-Python-Team-fuer-billigere-Arbeitskraefte-in-Muenchen-9703029.html',
title: 'Für Einsparungen kündigt Google komplettem Python-Team',
kicker: 'Ersatz wohl in München',
abstract: 'Einem Python-Team wurde offenbar komplett gekündigt.',
image: 'https://www.heise.de/imgs/18/4/5/8/2/0/6/6/shutterstock_1777981682-958a1d575a8f5e3e.jpeg'
}
end
describe '.options_key' do
specify { expect(described_class.options_key).to eq(:schema) }
end
describe '.articles?(parsed_body)' do
subject(:articles?) { described_class.articles?(parsed_body) }
context 'with a NewsArticle' do
let(:parsed_body) do
Nokogiri::HTML("<script type=\"application/ld+json\">#{news_article_schema_object.to_json}</script>")
end
it { is_expected.to be_truthy }
end
context 'with an Article' do
let(:parsed_body) do
Nokogiri::HTML("<script type=\"application/ld+json\">#{article_schema_object.to_json}</script>")
end
it { is_expected.to be_truthy }
end
context 'with an empty body' do
let(:parsed_body) { Nokogiri::HTML.fragment('') }
it { is_expected.to be_falsey }
end
context 'with excessive spacing in JSON and supported @type' do
let(:parsed_body) do
Nokogiri::HTML('<script type="application/ld+json">{"@type" : "NewsArticle" }</script>')
end
it { is_expected.to be_truthy }
end
end
describe '.self.from(object)' do
subject(:array) { described_class.from(object) }
context 'with nil' do
let(:object) { nil }
it 'scrapes the article' do
expect(array).to eq([])
end
end
context 'with a Article schema object' do
let(:object) { article_schema_object }
it 'scrapes the article' do
expect(array).to include(hash_including('@type': 'Article'))
end
end
context 'with an ItemList schema object' do
let(:object) do
{
'@context': 'https://schema.org',
'@type': 'ItemList',
itemListElement: [
{
'@type': 'ListItem',
position: 1,
url: 'https://www.example.com/breakdancerin-raygun-geht-weiter-110168077.html'
},
{
'@type': 'ListItem',
position: 2,
url: 'https://www.example.com/in-frankfurt-macht-die-neue-grundsteuer-das-wohnen-noch-teurer-110165876.html'
}
]
}
end
it 'returns the ItemList' do
expect(array).to include(hash_including('@type': 'ItemList'))
end
end
context 'with a deeply nested object' do
let(:object) do
{
foo: [
{
bar: { baz: { qux: { quux: { corge: [news_article_schema_object] } } } },
grault: { garply: { waldo: { fred: { plugh: { xyzzy: [article_schema_object] } } } } }
}
# Good to have these documented. *cough*
]
}
end
it 'scrapes the NewsArticle and Article stabile', :aggregate_failures do
first, second = array
expect(first).to include(:@type => 'NewsArticle')
expect(second).to include(:@type => 'Article')
end
end
end
describe '#each' do
subject(:new) { described_class.new(parsed_body, url: '') }
let(:parsed_body) { Nokogiri::HTML('') }
context 'without a block' do
it 'returns an enumerator' do
expect(new.each).to be_a(Enumerator)
end
end
context 'with a NewsArticle' do
let(:parsed_body) do
Nokogiri::HTML("<script type=\"application/ld+json\">#{news_article_schema_object.to_json}</script>")
end
it 'scrapes the article_hash' do
expect { |b| new.each(&b) }.to yield_with_args(
hash_including(
title: "Trump Russia claims: FBI's Comey confirms investigation of election 'interference'"
)
)
end
end
context 'with an Article' do
let(:parsed_body) do
Nokogiri::HTML("<script type=\"application/ld+json\">#{article_schema_object.to_json}</script>")
end
it 'scrapes the article' do
expect do |b|
new.each(&b)
end.to yield_with_args hash_including(title: 'Für Einsparungen kündigt Google komplettem Python-Team')
end
end
context 'with an empty body' do
it 'returns an empty array' do
expect { |b| new.each(&b) }.not_to yield_with_args
end
end
context 'with an unsupported @type' do
let(:parsed_body) { Nokogiri::HTML('<script type="application/ld+json">{"@type": "foo"}</script>') }
it 'returns an empty array' do
expect { |b| new.each(&b) }.not_to yield_with_args
end
end
context 'with malformed JSON' do
let(:parsed_body) { build_script_tag('{invalid json}') }
before { mock_logging }
it 'logs a warning and returns an empty array', :aggregate_failures do
expect { |b| new.each(&b) }.not_to yield_with_args
expect(Html2rss::Log).to have_received(:warn).with(/Failed to parse JSON/, error: anything)
end
end
context 'with an ItemList that returns an array' do
let(:parsed_body) { build_script_tag('{"@type": "ItemList", "itemListElement": []}') }
before do
item_list_instance = instance_double(Html2rss::AutoSource::Scraper::Schema::ItemList)
allow(Html2rss::AutoSource::Scraper::Schema::ItemList).to receive(:new).and_return(item_list_instance)
allow(item_list_instance).to receive(:call).and_return([
{ title: 'Item 1' },
{ title: 'Item 2' }
])
end
it 'yields each item in the array' do
expect { |b| new.each(&b) }.to yield_successive_args(
{ title: 'Item 1' },
{ title: 'Item 2' }
)
end
end
context 'with a scraper that returns nil' do
let(:parsed_body) { build_script_tag('{"@type": "Article"}') }
before do
thing_instance = instance_double(Html2rss::AutoSource::Scraper::Schema::Thing)
allow(Html2rss::AutoSource::Scraper::Schema::Thing).to receive(:new).and_return(thing_instance)
allow(thing_instance).to receive(:call).and_return(nil)
end
it 'does not yield anything' do
expect { |b| new.each(&b) }.not_to yield_with_args
end
end
end
describe '.supported_schema_object?' do
context 'with a supported schema object' do
let(:object) { build_simple_article }
it 'returns true' do
expect(described_class.supported_schema_object?(object)).to be true
end
end
context 'with an unsupported schema object' do
let(:object) { build_simple_article(type: 'UnsupportedType') }
it 'returns false' do
expect(described_class.supported_schema_object?(object)).to be false
end
end
end
describe '.scraper_for_schema_object' do
context 'with a Thing type' do
let(:object) { build_simple_article }
it 'returns Thing class' do
expect(described_class.scraper_for_schema_object(object)).to eq(Html2rss::AutoSource::Scraper::Schema::Thing)
end
end
context 'with an ItemList type' do
let(:object) { build_simple_article(type: 'ItemList') }
it 'returns ItemList class' do
expect(described_class.scraper_for_schema_object(object)).to eq(Html2rss::AutoSource::Scraper::Schema::ItemList)
end
end
context 'with an unsupported type' do
let(:object) { build_simple_article(type: 'UnsupportedType') }
before { mock_logging }
it 'logs debug message and returns nil', :aggregate_failures do
expect(described_class.scraper_for_schema_object(object)).to be_nil
expect(Html2rss::Log).to have_received(:debug).with(/Unsupported schema object @type: UnsupportedType/)
end
end
end
describe '.from' do
context 'with a Nokogiri::XML::Element' do
let(:script_tag) { build_script_tag('{"@type": "Article"}').at_css('script') }
it 'parses the script tag and returns schema objects' do
expect(described_class.from(script_tag)).to include(hash_including('@type': 'Article'))
end
end
context 'with an array of objects' do
let(:objects) { [article_schema_object, news_article_schema_object] }
it 'returns flattened array of schema objects', :aggregate_failures do
result = described_class.from(objects)
expect(result).to include(hash_including('@type': 'Article'))
expect(result).to include(hash_including('@type': 'NewsArticle'))
end
end
context 'with a hash containing unsupported objects' do
let(:object) { { '@type': 'UnsupportedType', data: 'test' } }
it 'returns empty array' do
expect(described_class.from(object)).to eq([])
end
end
context 'with a hash containing nested supported objects' do
let(:object) { { 'nested' => { 'article' => article_schema_object } } }
it 'recursively finds and returns supported objects' do
expect(described_class.from(object)).to include(hash_including('@type': 'Article'))
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/auto_source/scraper/html_spec.rb | spec/lib/html2rss/auto_source/scraper/html_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::AutoSource::Scraper::Html do
let(:html) do
<<~HTML
<!DOCTYPE html>
<html>
<head>
<title>Sample Document</title>
</head>
<body>
<h1>Main Heading</h1>
<article>
<h2>Article 1 Headline</h2>
<p>
Teaser for article 1.
<a href="article1/">Read more</a>
</p>
</article>
<article>
<h2>Article 2 Headline</h2>
<p>
Teaser for article 2.
<a href="article2/">Read more</a>
</p>
</article>
</body>
</html>
HTML
end
let(:parsed_body) do
Nokogiri::HTML(html)
end
describe '.options_key' do
specify { expect(described_class.options_key).to eq(:html) }
end
describe '.articles?(parsed_body)' do
subject(:articles?) { described_class.articles?(parsed_body) }
it { is_expected.to be_truthy }
end
describe '#each' do
subject(:articles) { described_class.new(parsed_body, url: 'http://example.com') }
let(:first_article) do
{ title: 'Article 1 Headline',
url: be_a(Html2rss::Url),
image: nil,
description: 'Article 1 Headline Teaser for article 1. Read more',
id: '/article1/',
published_at: nil,
enclosures: [] }
end
let(:second_article) do
{ title: 'Article 2 Headline',
url: be_a(Html2rss::Url),
image: nil,
description: 'Article 2 Headline Teaser for article 2. Read more',
id: '/article2/',
published_at: nil,
enclosures: [] }
end
it 'yields articles' do
expect { |b| articles.each(&b) }.to yield_control.twice
end
it 'contains the two articles', :aggregate_failures do
first, last = articles.to_a
expect(first).to include(first_article)
expect(last).to include(second_article)
end
context 'when parsed_body does not wrap article in an element' do
let(:html) do
<<~HTML
<!doctype html>
<html lang="de"><meta charset="utf-8">
<h3>Sun Oct 27 2024</h3>
<ul>
<li>
<a href="?ts=deadh0rse">[Plonk]</a>
<a href="https://www.tagesschau.de/wirtschaft/verbraucher/kosten-autos-deutsche-hersteller-100.html">Bla bla bla</a>
</ul>
</html>
HTML
end
let(:first_article) do
{ title: nil,
url: be_a(Html2rss::Url),
image: nil,
description: '[Plonk]',
id: '/',
published_at: nil,
enclosures: [] }
end
let(:second_article) do
{
title: nil,
url: be_a(Html2rss::Url),
image: nil,
description: 'Bla bla bla',
id: '/',
published_at: nil,
enclosures: []
}
end
it 'contains the articles with same id' do
first, second = articles.to_a
expect(first[:id]).to eq(second[:id])
end
it 'contains the first_article' do
expect(articles.first).to include(first_article)
end
it 'contains the second_article' do
expect(articles.to_a[-1]).to include(second_article)
end
end
end
describe '.simplify_xpath' do
it 'converts an XPath selector to an index-less xpath' do
xpath = '/html/body/div[1]/div[2]/span[3]'
expected = '/html/body/div/div/span'
simplified = described_class.simplify_xpath(xpath)
expect(simplified).to eq(expected)
end
end
describe '#article_tag_condition' do
let(:html) do
<<-HTML
<html>
<body>
<nav>
<a href="link1">Link 1</a>
</nav>
<div class="content">
<a href="link2">Link 2</a>
<article>
<a href="link3">Link 3</a>
<div>
<a href="link6">Link 6</a>
</div>
</article>
</div>
<footer>
<a href="link4">Link 4</a>
</footer>
<div class="navigation">
<a href="link5">Link 5</a>
</div>
</body>
</html>
HTML
end
let(:parsed_body) { Nokogiri::HTML(html) }
let(:scraper) { described_class.new(parsed_body, url: 'http://example.com') }
it 'returns false for nodes within ignored tags' do
node = parsed_body.at_css('nav a')
expect(scraper).not_to be_article_tag_condition(node)
end
it 'returns true for body and html tags', :aggregate_failures do
body_node = parsed_body.at_css('body')
html_node = parsed_body.at_css('html')
expect(scraper).to be_article_tag_condition(body_node)
expect(scraper).to be_article_tag_condition(html_node)
end
it 'returns true if parent has 2 or more anchor tags' do
node = parsed_body.at_css('article a')
expect(scraper).to be_article_tag_condition(node)
end
it 'returns false if none of the conditions are met' do
node = parsed_body.at_css('footer a')
expect(scraper).not_to be_article_tag_condition(node)
end
it 'returns false if parent class matches' do
node = parsed_body.at_css('.navigation a')
expect(scraper).not_to be_article_tag_condition(node)
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/auto_source/scraper/semantic_html_spec.rb | spec/lib/html2rss/auto_source/scraper/semantic_html_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::AutoSource::Scraper::SemanticHtml do
describe '.options_key' do
specify { expect(described_class.options_key).to eq(:semantic_html) }
end
describe '.articles?' do
let(:parsed_body) do
Nokogiri::HTML.parse <<~HTML
<html><body><article><a href="">Article 1</a></article></body></html>
HTML
end
it 'returns true when there are extractable articles' do
expect(described_class.articles?(parsed_body)).to be true
end
it 'returns false when there are no extractable articles' do
expect(described_class.articles?(nil)).to be false
end
end
describe '#each' do
subject(:new) { described_class.new(parsed_body, url: 'https://page.com') }
let(:parsed_body) { Nokogiri::HTML.parse(File.read('spec/fixtures/page_1.html')) }
let(:grouped_expected_articles) do
# rubocop:disable Layout/LineLength
[
{ title: 'Brittney Griner: What I Endured in Russia', url: 'https://page.com/6972085/brittney-griner-book-coming-home/', image: 'https://api.PAGE.com/wp-content/uploads/2024/04/brittney-griner-basketball-russia.jpg?quality=85&w=925&h=617&crop=1&resize=925,617', description: %(Chris Coduto—Getty Images Brittney Griner: What I Endured in Russia 17 MIN READ May 3, 2024 • 8:00 AM EDT "Prison is more than a place. It’s also a mindset," Brittney Griner writes in an excerpt from her book\n about surviving imprisonment in Russia.), id: '/6972085/brittney-griner-book-coming-home/' },
{ title: 'Driver Dies After Crashing Into White House Security Barrier', url: 'https://page.com/6974836/white-house-car-crash-driver-dies-security-barrier/', image: 'https://api.PAGE.com/wp-content/uploads/2024/05/AP24126237101577.jpg?quality=85&w=925&h=617&crop=1&resize=925,617', description: 'Driver Dies After Crashing Into White House Security Barrier 1 MIN READ May 5, 2024 • 7:46 AM EDT', id: '/6974836/white-house-car-crash-driver-dies-security-barrier/' }
].group_by { |article| article[:id] }
# rubocop:enable Layout/LineLength
end
it 'yields and includes all expected articles', :aggregate_failures, :slow do # rubocop:disable RSpec/ExampleLength
new.each do |article|
expected_article = grouped_expected_articles[article[:id]]&.shift
next unless expected_article
expected_article.each do |key, value|
expect(article[key].to_s).to eq(value.to_s)
end
end
expect(grouped_expected_articles.values.flatten).to be_empty
end
it 'returns the expected number of articles', :slow do
# Many articles are extracted from the page, but only 3 are expected [above].
# The SemanticHtml class tries to catch as many article as possibile.
# RSS readers respecting the items' guid will only show the other articles once.
#
# However, to catch larger changes in the algorithm, the number of articles is expected.
expect { |b| new.each(&b) }.to yield_control.at_least(189).times
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/auto_source/scraper/rss_feed_detector_spec.rb | spec/lib/html2rss/auto_source/scraper/rss_feed_detector_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::AutoSource::Scraper::RssFeedDetector do
subject(:instance) { described_class.new(parsed_body, url:) }
let(:url) { 'https://example.com' }
let(:parsed_body) { Nokogiri::HTML(html) }
describe '.articles?' do
context 'when RSS feed links are present' do
let(:html) do
<<~HTML
<html>
<head>
<link rel="alternate" type="application/rss+xml" href="/feed.xml" title="RSS Feed">
</head>
<body></body>
</html>
HTML
end
it 'returns true' do
expect(described_class.articles?(parsed_body)).to be true
end
end
context 'when no RSS feed links are present' do
let(:html) do
<<~HTML
<html>
<head>
<link rel="stylesheet" href="/style.css">
</head>
<body></body>
</html>
HTML
end
it 'returns false' do
expect(described_class.articles?(parsed_body)).to be false
end
end
context 'when parsed_body is nil' do
it 'returns false' do
expect(described_class.articles?(nil)).to be false
end
end
end
describe '#each' do
context 'when RSS feed links are present' do
let(:html) do
<<~HTML
<html>
<head>
<title>Test Blog</title>
<link rel="alternate" type="application/rss+xml" href="/feed.xml" title="Main RSS Feed">
<link rel="alternate" type="application/rss+xml" href="/comments.xml" title="Comments Feed">
</head>
<body></body>
</html>
HTML
end
let(:html_with_xss) do
<<~HTML
<html>
<head>
<title>Test Blog</title>
<link rel="alternate" type="application/rss+xml" href="/feed.xml" title="<script>alert('xss')</script>RSS Feed">
</head>
<body></body>
</html>
HTML
end
it 'yields the correct number of articles' do
articles = instance.each.to_a
expect(articles.size).to eq 2
end
it 'yields articles with correct title' do
articles = instance.each.to_a
first_article = articles.first
expect(first_article[:title]).to eq 'Main RSS Feed'
end
it 'yields articles with correct URL' do
articles = instance.each.to_a
first_article = articles.first
expect(first_article[:url].to_s).to eq 'https://example.com/feed.xml'
end
it 'yields articles with helpful description containing clickable link' do
articles = instance.each.to_a
first_article = articles.first
expect(first_article[:description]).to include 'https://example.com/feed.xml'
end
it 'yields articles with clickable link in description', :aggregate_failures do
first_article = instance.each.first
expect(first_article[:description]).to include '<a href="https://example.com/feed.xml"'
expect(first_article[:description]).to include('>Main RSS Feed</a>')
expect(first_article[:description]).to include('rel="nofollow noopener noreferrer"')
expect(first_article[:description]).to include('target="_blank"')
end
it 'yields articles with correct categories' do
articles = instance.each.to_a
first_article = articles.first
expect(first_article[:categories]).to include 'feed', 'auto-detected', 'rss'
end
it 'yields articles with correct scraper' do
articles = instance.each.to_a
first_article = articles.first
expect(first_article[:scraper]).to eq described_class
end
it 'yields articles with monthly rotating ID' do
articles = instance.each.to_a
first_article = articles.first
current_month = Time.now.strftime('%Y-%m')
expect(first_article[:id]).to match(/^rss-feed-\d+-#{current_month}$/)
end
it 'generates different IDs for different months' do
allow(Time).to receive(:now).and_return(Time.new(2024, 1, 15))
jan_id = instance.each.to_a.first[:id]
allow(Time).to receive(:now).and_return(Time.new(2024, 2, 15))
feb_id = instance.each.to_a.first[:id]
expect(jan_id).not_to eq(feb_id)
end
it 'includes month in January ID' do
allow(Time).to receive(:now).and_return(Time.new(2024, 1, 15))
articles = instance.each.to_a
jan_id = articles.first[:id]
expect(jan_id).to include('2024-01')
end
it 'includes month in February ID' do
allow(Time).to receive(:now).and_return(Time.new(2024, 2, 15))
articles = instance.each.to_a
feb_id = articles.first[:id]
expect(feb_id).to include('2024-02')
end
it 'yields articles with author information' do
articles = instance.each.to_a
first_article = articles.first
expect(first_article[:author]).to eq 'Test Blog'
end
it 'sanitizes HTML in feed titles for security', :aggregate_failures do
xss_instance = described_class.new(Nokogiri::HTML(html_with_xss), url:)
first_article = xss_instance.first
expect(first_article[:description]).to include 'RSS Feed'
expect(first_article[:description]).not_to include '<script>'
expect(first_article[:description]).not_to include 'alert('
end
it 'yields articles for all RSS feeds' do
articles = instance.each.to_a
second_article = articles.last
expect(second_article[:title]).to eq 'Comments Feed'
end
it 'yields articles with correct URLs for all feeds' do
articles = instance.each.to_a
second_article = articles.last
expect(second_article[:url].to_s).to eq 'https://example.com/comments.xml'
end
end
context 'when different feed types are present' do
let(:html) do
<<~HTML
<html>
<head>
<title>Test Site</title>
<link rel="alternate" type="application/rss+xml" href="/feed.xml" title="Main RSS Feed">
<link rel="alternate" type="application/atom+xml" href="/atom.xml" title="Atom News Feed">
<link rel="alternate" type="application/json" href="/feed.json" title="JSON Data Feed">
</head>
<body></body>
</html>
HTML
end
it 'detects different feed types correctly' do
articles = instance.each.to_a
expect(articles.size).to eq 3
end
it 'categorizes RSS feeds correctly' do
articles = instance.each.to_a
rss_article = articles.find { |a| a[:url].to_s.include?('feed.xml') }
expect(rss_article[:categories]).to include 'rss'
end
it 'categorizes Atom feeds correctly' do
articles = instance.each.to_a
atom_article = articles.find { |a| a[:url].to_s.include?('atom.xml') }
expect(atom_article[:categories]).to include 'atom'
end
it 'categorizes JSON feeds correctly' do
articles = instance.each.to_a
json_article = articles.find { |a| a[:url].to_s.include?('feed.json') }
expect(json_article[:categories]).to include 'json-feed'
end
end
context 'when no RSS feed links are present' do
let(:html) do
<<~HTML
<html>
<head>
<link rel="stylesheet" href="/style.css">
</head>
<body></body>
</html>
HTML
end
it 'yields nothing' do
expect(instance.each.to_a).to be_empty
end
end
context 'when RSS feed link has no href' do
let(:html) do
<<~HTML
<html>
<head>
<link rel="alternate" type="application/rss+xml" title="Broken Feed">
</head>
<body></body>
</html>
HTML
end
it 'yields nothing' do
expect(instance.each.to_a).to be_empty
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/auto_source/scraper/schema/category_extractor_spec.rb | spec/lib/html2rss/auto_source/scraper/schema/category_extractor_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::AutoSource::Scraper::Schema::CategoryExtractor do
describe '.call' do
subject(:categories) { described_class.call(schema_object) }
context 'when schema object has field categories' do
let(:schema_object) do
{
keywords: %w[technology science],
categories: %w[news tech],
tags: 'politics, sports'
}
end
it 'extracts categories from all field sources' do
expect(categories).to contain_exactly('technology', 'science', 'news', 'tech', 'politics', 'sports')
end
end
context 'when schema object has about field with array' do
let(:schema_object) do
{
about: [
{ name: 'Technology' },
{ name: 'Science' },
'Politics'
]
}
end
it 'extracts categories from about array' do
expect(categories).to contain_exactly('Technology', 'Science', 'Politics')
end
end
context 'when schema object has about field with string' do
let(:schema_object) do
{
about: 'Technology, Science; Politics|Health'
}
end
it 'extracts categories from about string by splitting on separators' do
expect(categories).to contain_exactly('Technology', 'Science', 'Politics', 'Health')
end
end
context 'when schema object has mixed field and about categories' do
let(:schema_object) do
{
keywords: ['tech'],
about: 'science, politics'
}
end
it 'combines categories from both sources' do
expect(categories).to contain_exactly('tech', 'science', 'politics')
end
end
context 'when schema object has empty or nil values' do
let(:schema_object) do
{
keywords: [],
categories: nil,
tags: '',
about: nil
}
end
it 'returns empty array' do
expect(categories).to eq([])
end
end
context 'when schema object has no category fields' do
let(:schema_object) { { title: 'Test', url: 'http://example.com' } }
it 'returns empty array' do
expect(categories).to eq([])
end
end
context 'when schema object is empty' do
let(:schema_object) { {} }
it 'returns empty array' do
expect(categories).to eq([])
end
end
end
describe '.extract_field_categories' do
subject(:categories) { described_class.extract_field_categories(schema_object) }
context 'with array values' do
let(:schema_object) do
{
keywords: %w[tech science],
categories: %w[news politics],
tags: %w[sports health]
}
end
it 'extracts categories from all array fields' do
expect(categories).to contain_exactly('tech', 'science', 'news', 'politics', 'sports', 'health')
end
end
context 'with string values' do
let(:schema_object) do
{
keywords: 'tech, science',
categories: 'news; politics',
tags: 'sports|health'
}
end
it 'extracts categories from all string fields' do
expect(categories).to contain_exactly('tech', 'science', 'news', 'politics', 'sports', 'health')
end
end
context 'with mixed array and string values' do
let(:schema_object) do
{
keywords: ['tech'],
categories: 'science, politics',
tags: ['sports']
}
end
it 'extracts categories from all fields' do
expect(categories).to contain_exactly('tech', 'science', 'politics', 'sports')
end
end
context 'with empty arrays and strings' do
let(:schema_object) do
{
keywords: [],
categories: '',
tags: ' , , '
}
end
it 'filters out empty categories' do
expect(categories).to be_empty
end
end
context 'with non-string, non-array values' do
let(:schema_object) do
{
keywords: 123,
categories: { nested: 'value' },
tags: true
}
end
it 'ignores non-string, non-array values' do
expect(categories).to be_empty
end
end
end
describe '.extract_about_categories' do
subject(:categories) { described_class.extract_about_categories(schema_object) }
context 'when about is nil' do
let(:schema_object) { { about: nil } }
it 'returns empty set' do
expect(categories).to eq(Set.new)
end
end
context 'when about is missing' do
let(:schema_object) { {} }
it 'returns empty set' do
expect(categories).to eq(Set.new)
end
end
context 'when about is an array' do
let(:schema_object) do
{
about: [
{ name: 'Technology' },
{ name: 'Science' },
'Politics',
{ other: 'value' },
123
]
}
end
it 'extracts categories from array items' do
expect(categories).to contain_exactly('Technology', 'Science', 'Politics')
end
end
context 'when about is a string' do
let(:schema_object) do
{
about: 'Technology, Science; Politics|Health'
}
end
it 'extracts categories by splitting on separators' do
expect(categories).to contain_exactly('Technology', 'Science', 'Politics', 'Health')
end
end
context 'when about is neither array nor string' do
let(:schema_object) { { about: 123 } }
it 'returns empty set' do
expect(categories).to eq(Set.new)
end
end
end
describe '.extract_field_value' do
subject(:categories) { described_class.extract_field_value(schema_object, field) }
context 'when field value is an array' do
let(:schema_object) { { keywords: ['tech', 'science', ''] } }
let(:field) { 'keywords' }
it 'extracts non-empty string values' do
expect(categories).to contain_exactly('tech', 'science')
end
end
context 'when field value is a string' do
let(:schema_object) { { keywords: 'tech, science; politics' } }
let(:field) { 'keywords' }
it 'extracts categories by splitting on separators' do
expect(categories).to contain_exactly('tech', 'science', 'politics')
end
end
context 'when field value is nil' do
let(:schema_object) { { keywords: nil } }
let(:field) { 'keywords' }
it 'returns empty set' do
expect(categories).to eq(Set.new)
end
end
context 'when field value is missing' do
let(:schema_object) { {} }
let(:field) { 'keywords' }
it 'returns empty set' do
expect(categories).to eq(Set.new)
end
end
context 'when field value is neither array nor string' do
let(:schema_object) { { keywords: 123 } }
let(:field) { 'keywords' }
it 'returns empty set' do
expect(categories).to eq(Set.new)
end
end
end
describe '.extract_about_array' do
subject(:categories) { described_class.extract_about_array(about) }
context 'with hash items containing name' do
let(:about) do
[
{ name: 'Technology' },
{ name: 'Science' },
{ name: 'Politics' }
]
end
it 'extracts name values from hash items' do
expect(categories).to contain_exactly('Technology', 'Science', 'Politics')
end
end
context 'with string items' do
let(:about) { %w[Technology Science Politics] }
it 'extracts string items directly' do
expect(categories).to contain_exactly('Technology', 'Science', 'Politics')
end
end
context 'with mixed hash and string items' do
let(:about) do
[
{ name: 'Technology' },
'Science',
{ name: 'Politics' },
'Health'
]
end
it 'extracts from both hash names and strings' do
expect(categories).to contain_exactly('Technology', 'Science', 'Politics', 'Health')
end
end
context 'with hash items without name' do
let(:about) do
[
{ name: 'Technology' },
{ other: 'value' },
'Science'
]
end
it 'ignores hash items without name' do
expect(categories).to contain_exactly('Technology', 'Science')
end
end
context 'with non-hash, non-string items' do
let(:about) do
[
{ name: 'Technology' },
123,
'Science',
true
]
end
it 'ignores non-hash, non-string items' do
expect(categories).to contain_exactly('Technology', 'Science')
end
end
end
describe '.extract_string_categories' do
subject(:categories) { described_class.extract_string_categories(string) }
context 'with comma-separated values' do
let(:string) { 'Technology, Science, Politics' }
it 'splits on commas and strips whitespace' do
expect(categories).to contain_exactly('Technology', 'Science', 'Politics')
end
end
context 'with semicolon-separated values' do
let(:string) { 'Technology; Science; Politics' }
it 'splits on semicolons and strips whitespace' do
expect(categories).to contain_exactly('Technology', 'Science', 'Politics')
end
end
context 'with pipe-separated values' do
let(:string) { 'Technology|Science|Politics' }
it 'splits on pipes and strips whitespace' do
expect(categories).to contain_exactly('Technology', 'Science', 'Politics')
end
end
context 'with mixed separators' do
let(:string) { 'Technology, Science; Politics|Health' }
it 'splits on all separators' do
expect(categories).to contain_exactly('Technology', 'Science', 'Politics', 'Health')
end
end
context 'with extra whitespace' do
let(:string) { ' Technology , Science ; Politics | Health ' }
it 'strips whitespace from all values' do
expect(categories).to contain_exactly('Technology', 'Science', 'Politics', 'Health')
end
end
context 'with empty values' do
let(:string) { 'Technology, , Science, , Politics' }
it 'filters out empty values' do
expect(categories).to contain_exactly('Technology', 'Science', 'Politics')
end
end
context 'with only separators and whitespace' do
let(:string) { ' , ; | ' }
it 'returns empty set' do
expect(categories).to be_empty
end
end
context 'with empty string' do
let(:string) { '' }
it 'returns empty set' do
expect(categories).to be_empty
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/auto_source/scraper/schema/list_item_spec.rb | spec/lib/html2rss/auto_source/scraper/schema/list_item_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::AutoSource::Scraper::Schema::ListItem do
let(:schema_object) do
{ item: { '@id': '123', name: 'Test Item', description: 'Test Description', url: 'http://example.com/foobar' } }
end
let(:url) { 'http://example.com' }
let(:list_item) { described_class.new(schema_object, url:) }
describe '#id' do
it 'returns the id from the schema object' do
expect(list_item.id).to eq('123')
end
it 'falls back to super if id is not present' do
schema_object[:item].delete(:@id)
expect(list_item.id).to eq '/foobar'
end
it 'converts symbols to strings' do
schema_object[:item][:@id] = :foo
expect(list_item.id).to eq('foo')
end
it 'is nil when string is empty' do
schema_object[:item][:@id] = ''
expect(list_item.id).to be_nil
end
end
describe '#title' do
it 'returns the title from the schema object' do
expect(list_item.title).to eq('Test Item')
end
it 'falls back to titleized url if title and super are not present' do
schema_object[:item].delete(:name)
expect(list_item.title).to eq('Foobar')
end
it 'is nil when all params absent' do
schema_object[:item].delete(:name)
schema_object[:item].delete(:url)
schema_object[:item].delete(:description)
expect(list_item.title).to be_nil
end
end
describe '#description' do
it 'returns the description from the schema object' do
expect(list_item.description).to eq('Test Description')
end
it 'falls back to super if description is not present' do
schema_object[:item].delete(:description)
expect(list_item.description).to be_nil
end
end
describe '#url' do
it 'returns the url from the schema object' do
expect(list_item.url.to_s).to eq('http://example.com/foobar')
end
it 'falls back to super if url is not present' do
schema_object[:item].delete(:url)
expect(list_item.url).to be_nil
end
it 'builds absolute url from relative url' do
schema_object[:item][:url] = '/relative/path'
expect(list_item.url.to_s).to eq('http://example.com/relative/path')
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/auto_source/scraper/schema/item_list_spec.rb | spec/lib/html2rss/auto_source/scraper/schema/item_list_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::AutoSource::Scraper::Schema::ItemList do
subject(:instance) { described_class.new(schema_object, url: nil) }
let(:schema_object) do
{
'@context': 'https://schema.org',
'@type': 'ItemList',
itemListElement: [
{
'@type': 'ListItem',
position: 1,
url: 'https://www.example.com/breakdancerin-raygun-geht-weiter-110168077.html'
},
{
'@type': 'ListItem',
position: 2,
url: 'https://www.example.com/in-frankfurt-macht-die-neue-grundsteuer-das-wohnen-noch-teurer-110165876.html'
}
]
}
end
describe '#call' do
subject(:call) { instance.call }
it 'returns an array of hashes' do
expect(call).to be_an(Array)
end
it 'includes the correct number of items' do
expect(call.size).to eq(3)
end
it 'sets the title' do
expect(call).to include(
hash_including(id: '/breakdancerin-raygun-geht-weiter-110168077.html'),
hash_including(id: '/in-frankfurt-macht-die-neue-grundsteuer-das-wohnen-noch-teurer-110165876.html')
)
end
context 'when the schema_object does not contain itemListElement' do
let(:schema_object) { {} }
it 'returns an array with one hash' do
expect(call).to contain_exactly(an_instance_of(Hash))
end
end
context 'when the schema_object contains a single itemListElement' do
let(:schema_object) do
{
'@context': 'https://schema.org',
'@type': 'ItemList',
itemListElement: {
'@type': 'ListItem',
position: 1,
url: 'https://www.example.com/breakdancerin-raygun-geht-weiter-110168077.html'
}
}
end
it 'returns an array with two hashes' do
expect(call).to contain_exactly(an_instance_of(Hash), an_instance_of(Hash))
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/auto_source/scraper/schema/thing_spec.rb | spec/lib/html2rss/auto_source/scraper/schema/thing_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::AutoSource::Scraper::Schema::Thing do
subject(:instance) { described_class.new(schema_object, url: 'https://example.com') }
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation' }
end
specify { expect(described_class::SUPPORTED_TYPES).to be_a(Set) }
describe '#call' do
subject(:call) { instance.call }
it 'sets the title' do
expect(call).to include(title: 'Baustellen der Nation')
end
end
describe '#id' do
subject(:id) { instance.id }
context 'when schema_object contains an @id' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation', '@id': 'https://example.com/123' }
end
it 'returns the @id' do
expect(id).to eq('https://example.com/123')
end
end
context 'when schema_object does not contain an @id or URL' do
let(:schema_object) { { '@type': 'ScholarlyArticle', title: 'Baustellen der Nation' } }
it 'returns nil' do
expect(id).to be_nil
end
end
end
describe '#image' do
subject(:image) { instance.image }
context 'when image is a string' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation', image: '/image.jpg' }
end
it 'returns the absolute image URL' do
expect(image.to_s).to eq('https://example.com/image.jpg')
end
end
context 'when image is an ImageObject' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation', image: { '@type': 'ImageObject', url: 'http://example.com/image.jpg' } }
end
it 'returns the image URL from the ImageObject' do
expect(image.to_s).to eq('http://example.com/image.jpg')
end
end
context 'when image is an ImageObject with contentUrl' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation', image: { '@type': 'ImageObject', contentUrl: 'http://example.com/image.jpg' } }
end
it 'returns the contentUrl from the ImageObject' do
expect(image.to_s).to eq('http://example.com/image.jpg')
end
end
context 'when image is a String' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation', image: 'http://example.com/image1.jpg' }
end
it 'returns the first image URL' do
expect(image.to_s).to eq('http://example.com/image1.jpg')
end
end
context 'when thumbnailUrl is a String' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation', thumbnailUrl: 'http://example.com/image1.jpg' }
end
it 'returns the first image URL' do
expect(image.to_s).to eq('http://example.com/image1.jpg')
end
end
context 'when image is nil' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation', image: nil }
end
it 'returns nil' do
expect(image).to be_nil
end
end
end
describe '#categories' do
subject(:categories) { instance.categories }
context 'when schema_object has keywords as array' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation', keywords: %w[Politics Society Analysis] }
end
it 'returns the keywords as categories' do
expect(categories).to eq(%w[Politics Society Analysis])
end
end
context 'when schema_object has keywords as string' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation', keywords: 'Politics, Society, Analysis' }
end
it 'splits keywords by comma and returns as categories' do
expect(categories).to eq(%w[Politics Society Analysis])
end
end
context 'when schema_object has categories field' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation', categories: %w[News Technology] }
end
it 'returns the categories' do
expect(categories).to eq(%w[News Technology])
end
end
context 'when schema_object has tags field' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation', tags: %w[Science Research] }
end
it 'returns the tags as categories' do
expect(categories).to eq(%w[Science Research])
end
end
context 'when schema_object has about field with objects' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation',
about: [{ name: 'Politics' }, { name: 'Society' }] }
end
it 'extracts names from about objects' do
expect(categories).to eq(%w[Politics Society])
end
end
context 'when schema_object has mixed category sources' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation', keywords: ['Politics'], categories: ['Society'],
tags: 'Science, Research' }
end
it 'combines all category sources' do
expect(categories).to eq(%w[Politics Society Science Research])
end
end
context 'when schema_object has no category fields' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation' }
end
it 'returns empty array' do
expect(categories).to eq([])
end
end
context 'when schema_object has empty category fields' do
let(:schema_object) do
{ '@type': 'ScholarlyArticle', title: 'Baustellen der Nation', keywords: [], categories: '', tags: nil }
end
it 'returns empty array' do
expect(categories).to eq([])
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/rendering/description_builder_spec.rb | spec/lib/html2rss/rendering/description_builder_spec.rb | # frozen_string_literal: true
require 'spec_helper'
require 'nokogiri'
RSpec.describe Html2rss::Rendering::DescriptionBuilder do
describe '#call' do
context 'when base is plain text' do
subject(:description) { described_class.new(base:, title: 'Sample instance', url: 'http://example.com', enclosures: nil, image: nil).call }
let(:base) { 'By John Doe' }
it 'returns the base description unchanged' do
expect(description).to eq('By John Doe')
end
end
context 'when base contains HTML' do
subject(:description) do
described_class.new(base:, title: 'Sample instance', url:, enclosures: nil, image: nil).call
end
let(:base) { '<b>Some bold text</b>' }
let(:url) { 'http://example.com' }
before do
allow(Html2rss::Selectors::PostProcessors::SanitizeHtml).to receive(:get).with(base, url).and_call_original
end
it 'sanitizes the HTML', :aggregate_failures do
expect(description).to eq('<b>Some bold text</b>')
expect(Html2rss::Selectors::PostProcessors::SanitizeHtml).to have_received(:get).with(base, url)
end
end
context 'when base starts with the title' do
subject(:description) { described_class.new(base:, title: 'Sample instance', url: 'http://example.com', enclosures: nil, image: nil).call }
let(:base) { 'Sample instance By John Doe' }
it 'removes the title from the start' do
expect(description).to include('By John Doe')
end
end
context 'when base is empty' do
subject(:description) { described_class.new(base:, title: 'Sample instance', url: 'http://example.com', enclosures: nil, image: nil).call }
let(:base) { '' }
it 'returns nil' do
expect(description).to be_nil
end
end
context 'when enclosure is an image' do
subject(:doc) do
html = described_class.new(base:, title: 'Sample instance', url: 'http://example.com', enclosures:,
image: nil).call
Nokogiri::HTML.fragment(html)
end
let(:base) { 'Caption' }
let(:enclosures) { [instance_double(Html2rss::RssBuilder::Enclosure, url: 'http://example.com/image.jpg', type: 'image/jpeg')] }
it 'renders <img> with attributes', :aggregate_failures do
img = doc.at_css('img')
expect(img['src']).to eq('http://example.com/image.jpg')
expect(img['alt']).to eq('Sample instance')
expect(img['title']).to eq('Sample instance')
end
end
context 'when fallback image is present (rendering)' do
subject(:doc) do
html = described_class.new(base:, title: 'Sample instance', url: 'http://example.com', enclosures: nil,
image:).call
Nokogiri::HTML.fragment(html)
end
let(:base) { 'Something' }
let(:image) { 'http://example.com/fallback.jpg' }
it 'renders fallback <img>' do
img = doc.at_css('img')
expect(img['src']).to eq('http://example.com/fallback.jpg')
end
end
context 'when enclosure is a video' do
subject(:doc) do
html = described_class.new(base:, title: 'Sample instance', url: 'http://example.com', enclosures:,
image: nil).call
Nokogiri::HTML.fragment(html)
end
let(:base) { 'Watch this' }
let(:enclosures) { [instance_double(Html2rss::RssBuilder::Enclosure, url: 'http://example.com/video.mp4', type: 'video/mp4')] }
it 'renders <video> and <source>', :aggregate_failures do # rubocop:disable RSpec/ExampleLength
video = doc.at_css('video')
source = video.at_css('source')
expect(video).not_to be_nil
expect(source).not_to be_nil
expect(source['src']).to eq('http://example.com/video.mp4')
expect(source['type']).to eq('video/mp4')
end
end
context 'when enclosure is audio' do
subject(:doc) do
html = described_class.new(base:, title: 'Sample instance', url: 'http://example.com', enclosures:,
image: nil).call
Nokogiri::HTML.fragment(html)
end
let(:base) { 'Listen to this' }
let(:enclosures) { [instance_double(Html2rss::RssBuilder::Enclosure, url: 'http://example.com/audio.mp3', type: 'audio/mpeg')] }
it 'renders <audio> and <source>', :aggregate_failures do # rubocop:disable RSpec/ExampleLength
audio = doc.at_css('audio')
source = audio.at_css('source')
expect(audio).not_to be_nil
expect(source).not_to be_nil
expect(source['src']).to eq('http://example.com/audio.mp3')
expect(source['type']).to eq('audio/mpeg')
end
end
context 'when enclosure is a PDF' do
subject(:doc) do
html = described_class.new(base:, title: 'Sample instance', url: 'http://example.com', enclosures:,
image: nil).call
Nokogiri::HTML.fragment(html)
end
let(:base) { 'See this document' }
let(:enclosures) { [instance_double(Html2rss::RssBuilder::Enclosure, url: 'http://example.com/doc.pdf', type: 'application/pdf')] }
it 'renders <iframe>', :aggregate_failures do
iframe = doc.at_css('iframe')
expect(iframe).not_to be_nil
expect(iframe['src']).to eq('http://example.com/doc.pdf')
expect(iframe['width']).to eq('100%')
expect(iframe['height']).to eq('75vh')
end
end
end
describe '.remove_pattern_from_start' do
it 'removes the pattern when it is within the specified range' do
original_text = 'Hello world! Start here.'
pattern = 'world!'
sanitized_text = described_class.remove_pattern_from_start(original_text, pattern)
expect(sanitized_text).to eq('Hello Start here.')
end
it 'does not remove the pattern when it is outside the specified range' do
original_text = 'This is a test. Remove this part.'
pattern = 'part'
sanitized_text = described_class.remove_pattern_from_start(original_text, pattern, end_of_range: 10)
expect(sanitized_text).to eq(original_text)
end
it 'returns the original text if the pattern is not found' do
original_text = 'No match here.'
pattern = 'missing'
sanitized_text = described_class.remove_pattern_from_start(original_text, pattern)
expect(sanitized_text).to eq(original_text)
end
it 'returns the original text if the text is empty' do
original_text = ''
pattern = 'any'
sanitized_text = described_class.remove_pattern_from_start(original_text, pattern)
expect(sanitized_text).to eq(original_text)
end
it 'removes pattern at the beginning of the text' do
original_text = 'pattern should be removed from start'
pattern = 'pattern'
sanitized_text = described_class.remove_pattern_from_start(original_text, pattern)
expect(sanitized_text).to eq(' should be removed from start')
end
it 'handles pattern appearing multiple times in the text' do
original_text = 'Repeat pattern and again pattern in text.'
pattern = 'pattern'
sanitized_text = described_class.remove_pattern_from_start(original_text, pattern)
expect(sanitized_text).to eq('Repeat and again pattern in text.')
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/rendering/image_renderer_spec.rb | spec/lib/html2rss/rendering/image_renderer_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::Rendering::ImageRenderer do
describe '#to_html' do
context 'with valid title' do
it 'renders an img tag with escaped title', :aggregate_failures do # rubocop:disable RSpec/ExampleLength
renderer = described_class.new(url: 'https://example.com/image.jpg', title: 'Test & Title')
expect(renderer.to_html).to include('src="https://example.com/image.jpg"')
expect(renderer.to_html).to include('alt="Test & Title"')
expect(renderer.to_html).to include('title="Test & Title"')
expect(renderer.to_html).to include('loading="lazy"')
expect(renderer.to_html).to include('referrerpolicy="no-referrer"')
expect(renderer.to_html).to include('decoding="async"')
expect(renderer.to_html).to include('crossorigin="anonymous"')
end
end
context 'with nil title' do
it 'renders an img tag with empty alt and title attributes', :aggregate_failures do # rubocop:disable RSpec/ExampleLength
renderer = described_class.new(url: 'https://example.com/image.jpg', title: nil)
expect(renderer.to_html).to include('src="https://example.com/image.jpg"')
expect(renderer.to_html).to include('alt=""')
expect(renderer.to_html).to include('title=""')
expect(renderer.to_html).to include('loading="lazy"')
expect(renderer.to_html).to include('referrerpolicy="no-referrer"')
expect(renderer.to_html).to include('decoding="async"')
expect(renderer.to_html).to include('crossorigin="anonymous"')
end
end
context 'with empty string title' do
it 'renders an img tag with empty alt and title attributes', :aggregate_failures do # rubocop:disable RSpec/ExampleLength
renderer = described_class.new(url: 'https://example.com/image.jpg', title: '')
expect(renderer.to_html).to include('src="https://example.com/image.jpg"')
expect(renderer.to_html).to include('alt=""')
expect(renderer.to_html).to include('title=""')
expect(renderer.to_html).to include('loading="lazy"')
expect(renderer.to_html).to include('referrerpolicy="no-referrer"')
expect(renderer.to_html).to include('decoding="async"')
expect(renderer.to_html).to include('crossorigin="anonymous"')
end
end
context 'with special characters in title' do
it 'properly escapes HTML special characters', :aggregate_failures do
renderer = described_class.new(url: 'https://example.com/image.jpg', title: '<script>alert("xss")</script>')
expect(renderer.to_html).to include('alt="<script>alert("xss")</script>"')
expect(renderer.to_html).to include('title="<script>alert("xss")</script>"')
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/rendering/media_table_renderer_spec.rb | spec/lib/html2rss/rendering/media_table_renderer_spec.rb | # frozen_string_literal: true
require 'spec_helper'
require 'nokogiri'
RSpec.describe Html2rss::Rendering::MediaTableRenderer do
describe '#to_html' do
context 'when no media is available' do
subject(:renderer) { described_class.new(enclosures: [], image: nil) }
it 'returns nil' do
expect(renderer.to_html).to be_nil
end
end
context 'when only enclosures are present' do
subject(:doc) { Nokogiri::HTML.fragment(html) }
let(:html) { renderer.to_html }
let(:renderer) do
described_class.new(
enclosures: [
instance_double(Html2rss::RssBuilder::Enclosure, url: 'http://example.com/image.jpg', type: 'image/jpeg'),
instance_double(Html2rss::RssBuilder::Enclosure, url: 'http://example.com/video.mp4', type: 'video/mp4'),
instance_double(Html2rss::RssBuilder::Enclosure, url: 'http://example.com/audio.mp3', type: 'audio/mpeg'),
instance_double(Html2rss::RssBuilder::Enclosure, url: 'http://example.com/doc.pdf', type: 'application/pdf')
],
image: nil
)
end
it 'renders a details element with summary', :aggregate_failures do
details = doc.at_css('details')
summary = details.at_css('summary')
expect(details).not_to be_nil
expect(summary).not_to be_nil
expect(summary.text).to eq('Available resources')
end
it 'renders a table with proper headers', :aggregate_failures do
table = doc.at_css('table')
headers = table.css('th')
expect(table).not_to be_nil
expect(headers.map(&:text)).to eq(%w[Type URL Actions])
end
it 'renders all enclosure rows with proper content', :aggregate_failures do
rows = doc.css('tbody tr')
expect(rows.length).to eq(4)
expect_all_enclosure_rows(rows)
end
it 'escapes URLs properly' do
renderer = create_renderer_with_special_chars
html = renderer.to_html
expect_escaped_html(html)
end
end
context 'when only fallback image is present' do
subject(:doc) { Nokogiri::HTML.fragment(html) }
let(:html) { renderer.to_html }
let(:renderer) do
described_class.new(
enclosures: [],
image: 'http://example.com/fallback.jpg'
)
end
it 'renders a single image row', :aggregate_failures do
rows = doc.css('tbody tr')
expect(rows.length).to eq(1)
expect_fallback_image_row(rows[0])
end
end
context 'when both enclosures and fallback image are present' do
subject(:doc) { Nokogiri::HTML.fragment(html) }
let(:html) { renderer.to_html }
let(:renderer) do
described_class.new(
enclosures: [
instance_double(Html2rss::RssBuilder::Enclosure, url: 'http://example.com/video.mp4', type: 'video/mp4')
],
image: 'http://example.com/fallback.jpg'
)
end
it 'renders both enclosure and fallback image rows', :aggregate_failures do
rows = doc.css('tbody tr')
expect(rows.length).to eq(2)
expect_video_row(rows[0])
expect_fallback_image_row(rows[1])
end
end
context 'when fallback image duplicates an image enclosure' do
subject(:doc) { Nokogiri::HTML.fragment(html) }
let(:html) { renderer.to_html }
let(:renderer) do
described_class.new(
enclosures: [
instance_double(Html2rss::RssBuilder::Enclosure, url: 'http://example.com/image.jpg', type: 'image/jpeg')
],
image: 'http://example.com/image.jpg'
)
end
it 'does not duplicate the image row', :aggregate_failures do
rows = doc.css('tbody tr')
expect(rows.length).to eq(1)
expect(rows[0].at_css('td:first-child').text).to include('🖼️ Image')
end
end
context 'with unknown file types' do
subject(:doc) { Nokogiri::HTML.fragment(html) }
let(:html) { renderer.to_html }
let(:renderer) do
described_class.new(
enclosures: [
instance_double(Html2rss::RssBuilder::Enclosure, url: 'http://example.com/file.xyz',
type: 'application/unknown')
],
image: nil
)
end
it 'renders with generic file icon and label', :aggregate_failures do
row = doc.at_css('tbody tr')
expect(row.at_css('td:first-child').text).to include('📎 File')
expect(row.at_css('td:last-child').text).to include('Download')
expect(row.css('td:last-child a').attr('download').value).to be_a(String) & be_empty
end
end
end
private
def expect_image_row(row)
expect(row.at_css('td:first-child').text).to include('🖼️ Image')
expect(row.at_css('td:nth-child(2) a')['href']).to eq('http://example.com/image.jpg')
expect(row.at_css('td:last-child').text).to include('View')
end
def expect_video_row(row)
expect(row.at_css('td:first-child').text).to include('🎥 Video')
expect(row.at_css('td:nth-child(2) a')['href']).to eq('http://example.com/video.mp4')
expect(row.at_css('td:last-child').text).to include('Play')
end
def expect_audio_row(row)
expect(row.at_css('td:first-child').text).to include('🎵 Audio')
expect(row.at_css('td:nth-child(2) a')['href']).to eq('http://example.com/audio.mp3')
expect(row.at_css('td:last-child').text).to include('Play')
end
def expect_pdf_row(row)
expect(row.at_css('td:first-child').text).to include('📄 PDF Document')
expect(row.at_css('td:nth-child(2) a')['href']).to eq('http://example.com/doc.pdf')
expect(row.at_css('td:last-child').text).to include('Open')
end
def expect_fallback_image_row(row)
expect(row.at_css('td:first-child').text).to include('🖼️ Image')
expect(row.at_css('td:nth-child(2) a')['href']).to eq('http://example.com/fallback.jpg')
expect(row.at_css('td:last-child').text).to include('View')
end
def expect_all_enclosure_rows(rows)
expect_image_row(rows[0])
expect_video_row(rows[1])
expect_audio_row(rows[2])
expect_pdf_row(rows[3])
end
def expect_escaped_html(html)
expect(html).to include('http://example.com/file with spaces.jpg')
expect(html).not_to include('<script>')
end
def create_renderer_with_special_chars
described_class.new(
enclosures: [
instance_double(Html2rss::RssBuilder::Enclosure, url: 'http://example.com/file with spaces.jpg',
type: 'image/jpeg')
],
image: nil
)
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/request_service/response_spec.rb | spec/lib/html2rss/request_service/response_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::RequestService::Response do
subject(:instance) { described_class.new(body:, headers:, url: Html2rss::Url.from_relative('https://example.com', 'https://example.com')) }
describe '#headers' do
subject(:returned_headers) { instance.headers }
let(:body) { nil }
let(:headers) { { key: 42 } }
it 'returns hash w/ string keys', :aggregate_failures do
expect(returned_headers).to eq('key' => 42)
expect(returned_headers).not_to be headers
end
end
describe '#parsed_body' do
subject(:parsed_body) { instance.parsed_body }
context 'when the response is HTML' do
let(:body) do
<<-HTML
<html>
<body>
<!-- This is a comment -->
<div>Hello World</div>
</body>
</html>
HTML
end
let(:headers) { { 'content-type' => 'text/html' } }
it { expect(parsed_body).to be_frozen }
it 'parses the body and removes comments', :aggregate_failures do
expect(parsed_body.at_xpath('//comment()')).to be_nil
expect(parsed_body.at_css('div').text).to eq('Hello World')
end
end
context 'when the response is JSON' do
let(:body) { '{"key": "value"}' }
let(:headers) { { 'content-type' => 'application/json' } }
it { expect(parsed_body).to be_frozen }
it 'parses the body as JSON' do
expect(parsed_body).to eq({ key: 'value' })
end
end
context 'when the response content type is not supported' do
let(:body) { 'Some unsupported content' }
let(:headers) { { 'content-type' => 'text/plain' } }
it 'raises an UnsupportedResponseContentType error' do
expect do
parsed_body
end.to raise_error(Html2rss::RequestService::UnsupportedResponseContentType,
'Unsupported content type: text/plain')
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/request_service/context_spec.rb | spec/lib/html2rss/request_service/context_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::RequestService::Context do
describe '#initialize' do
subject(:instance) { described_class.new(url:, headers:) }
let(:url) { 'http://www.example.com' }
let(:headers) { {} }
context 'with a valid URL (String)' do
it 'does not raise an error' do
expect { instance }.not_to raise_error
end
it 'creates a valid context', :aggregate_failures do
expect(instance.url).to be_a(Html2rss::Url)
expect(instance.url.to_s).to eq('http://www.example.com')
expect(instance.headers).to eq({})
end
end
context 'with a valid URL (Html2rss::Url)' do
let(:url) { Html2rss::Url.from_relative('http://example.com', 'http://example.com') }
it 'does not raise an error' do
expect { instance }.not_to raise_error
end
it 'creates a valid context', :aggregate_failures do
expect(instance.url).to be_a(Html2rss::Url)
expect(instance.url.to_s).to eq('http://example.com')
end
end
context 'with custom headers' do
let(:headers) { { 'User-Agent' => 'Custom Agent' } }
it 'stores the headers' do
expect(instance.headers).to eq(headers)
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/request_service/strategy_spec.rb | spec/lib/html2rss/request_service/strategy_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::RequestService::Strategy do
subject(:instance) { described_class.new(ctx) }
let(:ctx) { Html2rss::RequestService::Context.new(url: 'https://example.com') }
describe '#execute' do
it do
expect { instance.execute }.to raise_error(NotImplementedError, /Subclass/)
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/request_service/browserless_strategy_spec.rb | spec/lib/html2rss/request_service/browserless_strategy_spec.rb | # frozen_string_literal: true
require 'spec_helper'
require 'climate_control'
RSpec.describe Html2rss::RequestService::BrowserlessStrategy do
subject(:instance) { described_class.new(ctx) }
let(:ctx) { Html2rss::RequestService::Context.new(url: 'https://example.com') }
describe '#execute' do
let(:response) { instance_double(Html2rss::RequestService::Response) }
let(:commander) { instance_double(Html2rss::RequestService::PuppetCommander, call: response) }
before do
browser = instance_double(Puppeteer::Browser, disconnect: nil)
allow(Puppeteer).to receive(:connect).and_yield(browser)
allow(Html2rss::RequestService::PuppetCommander).to receive(:new).with(ctx, browser).and_return(commander)
end
it 'calls PuppetCommander', :aggregate_failures do
expect { instance.execute }.not_to raise_error
expect(Puppeteer).to have_received(:connect)
expect(commander).to have_received(:call)
end
end
describe '#browser_ws_endpoint' do
context 'without specified ENV vars' do
it do
expect(instance.browser_ws_endpoint).to eq 'ws://127.0.0.1:3000?token=6R0W53R135510'
end
end
context 'with specified ENV vars' do
around do |example|
ClimateControl.modify(
BROWSERLESS_IO_API_TOKEN: 'foobar',
BROWSERLESS_IO_WEBSOCKET_URL: 'wss://host.tld'
) { example.run }
end
it do
expect(instance.browser_ws_endpoint).to eq 'wss://host.tld?token=foobar'
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/request_service/puppet_commander_spec.rb | spec/lib/html2rss/request_service/puppet_commander_spec.rb | # frozen_string_literal: true
require 'spec_helper'
require 'puppeteer'
RSpec.describe Html2rss::RequestService::PuppetCommander do
let(:ctx) do
instance_double(Html2rss::RequestService::Context,
url: Html2rss::Url.from_relative('https://example.com', 'https://example.com'),
headers: { 'User-Agent' => 'RSpec' })
end
let(:browser) { instance_double(Puppeteer::Browser, new_page: page) }
let(:page) { instance_double(Puppeteer::Page) }
let(:response) { instance_double(Puppeteer::HTTPResponse, headers: { 'Content-Type' => 'text/html' }) }
let(:puppet_commander) { described_class.new(ctx, browser) }
before do
allow(page).to receive(:extra_http_headers=)
allow(page).to receive(:request_interception=)
allow(page).to receive(:on)
allow(page).to receive_messages(goto: response, content: '<html></html>')
allow(page).to receive(:close)
end
describe '#call' do
it 'returns a Response with the correct body and headers', :aggregate_failures do
result = puppet_commander.call
expect(result.body).to eq('<html></html>')
expect(result.headers).to eq({ 'Content-Type' => 'text/html' })
end
it 'closes the page after execution' do
puppet_commander.call
expect(page).to have_received(:close)
end
end
describe '#new_page' do
it 'sets extra HTTP headers on the page' do
puppet_commander.new_page
expect(page).to have_received(:extra_http_headers=).with(ctx.headers)
end
it 'sets up request interception if skip_request_resources is not empty', :aggregate_failures do
puppet_commander.new_page
expect(page).to have_received(:request_interception=).with(true)
expect(page).to have_received(:on).with('request')
end
end
describe '#navigate_to_destination' do
it 'navigates to the given URL' do
puppet_commander.navigate_to_destination(page, ctx.url)
expect(page).to have_received(:goto).with(ctx.url, wait_until: 'networkidle0', referer: 'https://example.com')
end
end
describe '#body' do
it 'returns the content of the page' do
result = puppet_commander.body(page)
expect(result).to eq('<html></html>')
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/extractors_spec.rb | spec/lib/html2rss/selectors/extractors_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::Extractors do
describe '.get(attribute_options, xml)' do
context 'with valid extractor name' do
it do
expect(described_class.get({ extractor: 'static' }, nil)).to be_nil
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/post_processors_spec.rb | spec/lib/html2rss/selectors/post_processors_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::PostProcessors do
describe '::NAME_TO_CLASS' do
specify(:aggregate_failures) do
expect(described_class::NAME_TO_CLASS).to be_a(Hash)
expect(described_class::NAME_TO_CLASS).to include(
:gsub, :html_to_markdown, :markdown_to_html, :parse_time, :parse_uri, :sanitize_html, :substring, :template
)
end
end
describe '.get' do
context 'with unknown post processor name' do
it do
expect { described_class.get('inexistent', nil, nil) }
.to raise_error described_class::UnknownPostProcessorName
end
end
context 'with known post processor name' do
it do
expect(described_class.get('parse_uri', 'http://example.com/',
{ config: { channel: { url: '' } } })).to be_a(String)
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/object_to_xml_converter_spec.rb | spec/lib/html2rss/selectors/object_to_xml_converter_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::ObjectToXmlConverter do
describe '.call' do
context 'with object being an object' do
let(:object) { { 'data' => [{ 'title' => 'Headline', 'url' => 'https://example.com' }] } }
let(:xml) do
'<object><data><array><object><title>Headline</title><url>https://example.com</url></object></array></data></object>'
end
it 'converts the hash to xml' do
expect(described_class.new(object).call).to eq xml
end
end
context 'with object being an array' do
let(:object) { [{ 'title' => 'Headline', 'url' => 'https://example.com' }] }
let(:xml) do
'<array><object><title>Headline</title><url>https://example.com</url></object></array>'
end
it 'converts the hash to xml' do
expect(described_class.new(object).call).to eq xml
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/config_spec.rb | spec/lib/html2rss/selectors/config_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::Selectors::Config do
subject(:result) { described_class.call(config) }
let(:config) do
{}
end
describe 'Items' do
let(:config) do
{ items: { selector: '.article' } }
end
it { is_expected.to be_success }
context 'with invalid order' do
let(:config) do
{ items: { selector: 'items', order: 'invalid' } }
end
it { expect { result }.to raise_error(/must be one of: reverse/) }
end
end
describe 'Selector' do
context 'when contains a selector of type Hash' do
let(:config) do
{ description: { selector: {} } }
end
it { expect { result }.to raise_error(/`selector` must be a string/) }
end
context 'when does not contain a selector but post_process' do
let(:config) do
{ description: { post_process: [] } }
end
it { is_expected.to be_success }
end
context 'when does not contain a selector but static' do
let(:config) do
{ description: { static: 'foobar' } }
end
it { is_expected.to be_success }
end
end
describe 'Selectors: Array Selector' do
%i[categories guid].each do |array_selector|
context "when #{array_selector} used symbol keys" do
let(:config) do
{ array_selector => [:foo], foo: { selector: 'bar' } }
end
it { is_expected.to be_success }
end
context "when #{array_selector} uses string keys" do
let(:config) do
{ array_selector => ['foo'], foo: { selector: 'bar' } }
end
it { is_expected.to be_success }
end
context "when #{array_selector} is not an array" do
let(:config) do
{ array_selector => {} }
end
it { is_expected.to be_failure }
end
context "when #{array_selector} is empty" do
let(:config) do
{ array_selector => %w[] }
end
it { is_expected.to be_failure }
end
context "when #{array_selector} is references unspecificed" do
let(:config) do
{ array_selector => %w[bar] }
end
it { is_expected.to be_failure }
end
end
end
describe 'Selectors post_process' do
context 'with gsub' do
let(:config) do
{ title: { post_process: [{ name: 'gsub', pattern: 'foo', replacement: 'bar' }] } }
end
it { is_expected.to be_success }
end
context 'with substring' do
let(:config) do
{ title: { post_process: [{ name: 'substring', start: 0, end: 1 }] } }
end
it { is_expected.to be_success }
end
context 'with template' do
let(:config) do
{ title: { post_process: [{ name: 'template', string: 'foo' }] } }
end
it { is_expected.to be_success }
end
context 'with html_to_markdown' do
let(:config) do
{ title: { post_process: [{ name: 'html_to_markdown' }] } }
end
it { is_expected.to be_success }
end
context 'with markdown_to_html' do
let(:config) do
{ title: { post_process: [{ name: 'markdown_to_html' }] } }
end
it { is_expected.to be_success }
end
context 'with parse_time' do
let(:config) do
{ title: { post_process: [{ name: 'parse_time' }] } }
end
it { is_expected.to be_success }
end
context 'with parse_uri' do
let(:config) do
{ title: { post_process: [{ name: 'parse_uri' }] } }
end
it { is_expected.to be_success }
end
context 'with sanitize_html' do
let(:config) do
{ title: { post_process: [{ name: 'sanitize_html' }] } }
end
it { is_expected.to be_success }
end
context 'with unknown post_processor' do
let(:config) do
{ title: { post_process: [{ name: 'unknown' }] } }
end
it { expect { result }.to raise_error(/Unknown post_processor/) }
end
context 'with missing post_processor name' do
let(:config) do
{ title: { post_process: [{}] } }
end
it { expect { result }.to raise_error(/Missing post_processor `name`/) }
end
context 'without gsub.pattern' do
let(:config) do
{ title: { post_process: [{ name: 'gsub' }] } }
end
it { expect { result }.to raise_error(/`pattern` must be a string/) }
end
context 'without gsub.replacement' do
let(:config) do
{ title: { post_process: [{ name: 'gsub', pattern: '' }] } }
end
it { expect { result }.to raise_error(/`replacement` must be a string/) }
end
context 'without substring.start' do
let(:config) do
{ title: { post_process: [{ name: 'substring' }] } }
end
it { expect { result }.to raise_error(/`start` must be an integer/) }
end
context 'with invalid substring.end' do
let(:config) do
{ title: { post_process: [{ name: 'substring', start: 0, end: 'foo' }] } }
end
it { expect { result }.to raise_error(/`end` must be an integer or omitted/) }
end
context 'without template.string' do
let(:config) do
{ title: { post_process: [{ name: 'template' }] } }
end
it { expect { result }.to raise_error(/`string` must be a string/) }
end
end
describe 'Selectors :extractor' do
context 'with attribute' do
let(:config) do
{ title: { selector: '', extractor: 'attribute', attribute: 'title' } }
end
it { is_expected.to be_success }
end
context 'with static' do
let(:config) do
{ title: { extractor: 'static', static: 'foo' } }
end
it { is_expected.to be_success }
end
context 'with invalid attribute' do
let(:config) do
{ title: { selector: '', extractor: 'attribute' } }
end
it { expect { result }.to raise_error(/`attribute` must be a string/) }
end
context 'with invalid static' do
let(:config) do
{ title: { selector: '', extractor: 'static' } }
end
it { expect { result }.to raise_error(/`static` must be a string/) }
end
end
describe 'Enclosure' do
specify { expect(described_class::Enclosure).to be < described_class::Selector }
context 'with selector' do
let(:config) do
{ enclosure: { selector: 'enclosure' } }
end
it { is_expected.to be_success }
end
context 'without selector' do
let(:config) do
{ enclosure: { post_process: [] } }
end
it { is_expected.to be_success }
end
context 'with invalid content_type' do
let(:config) do
{ enclosure: { selector: 'enclosure', content_type: 'audio' } }
end
it { expect { result }.to raise_error(/invalid format.*content_type/) }
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/extractors/static_spec.rb | spec/lib/html2rss/selectors/extractors/static_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::Extractors::Static do
subject { described_class.new(nil, options).get }
let(:options) { instance_double(Struct::StaticOptions, static: 'Foobar') }
it { is_expected.to eq 'Foobar' }
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/extractors/text_spec.rb | spec/lib/html2rss/selectors/extractors/text_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::Extractors::Text do
subject { described_class.new(xml, options).get }
let(:xml) { Nokogiri.HTML('<p>Lorem <b>ipsum</b> dolor ...</p>') }
let(:options) { instance_double(Struct::TextOptions, 'selector' => 'p') }
it { is_expected.to eq 'Lorem ipsum dolor ...' }
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/extractors/html_spec.rb | spec/lib/html2rss/selectors/extractors/html_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::Extractors::Html do
subject { described_class.new(xml, options).get }
let(:xml) { Nokogiri.HTML('<p>Lorem <b>ipsum</b> dolor ...</p>') }
let(:options) { instance_double(Struct::HtmlOptions, selector: 'p') }
it { is_expected.to eq '<p>Lorem <b>ipsum</b> dolor ...</p>' }
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/extractors/attribute_spec.rb | spec/lib/html2rss/selectors/extractors/attribute_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::Extractors::Attribute do
subject { described_class.new(xml, options).get }
let(:xml) { Nokogiri.HTML('<div><time datetime="2019-07-01">...</time></div>') }
let(:options) { instance_double(Struct::AttributeOptions, selector: 'time', attribute: 'datetime') }
it { is_expected.to eq '2019-07-01' }
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/extractors/href_spec.rb | spec/lib/html2rss/selectors/extractors/href_spec.rb | # frozen_string_literal: true
require 'nokogiri'
RSpec.describe Html2rss::Selectors::Extractors::Href do
subject { described_class.new(xml, options).get }
let(:channel) { { url: 'https://example.com' } }
let(:options) { instance_double(Struct::HrefOptions, selector: 'a', channel:) }
context 'with relative href url' do
let(:xml) { Nokogiri.HTML('<div><a href="/posts/latest-findings">...</a></div>') }
specify(:aggregate_failures) do
expect(subject).to be_a(Html2rss::Url)
expect(subject).to eq Html2rss::Url.from_relative('https://example.com/posts/latest-findings', 'http://example.com')
end
end
context 'with absolute href url' do
let(:xml) { Nokogiri.HTML('<div><a href="http://example.com/posts/absolute">...</a></div>') }
specify(:aggregate_failures) do
expect(subject).to be_a(Html2rss::Url)
expect(subject).to eq Html2rss::Url.from_relative('http://example.com/posts/absolute', 'http://example.com')
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/post_processors/sanitize_html_spec.rb | spec/lib/html2rss/selectors/post_processors/sanitize_html_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::PostProcessors::SanitizeHtml do
it { expect(described_class).to be < Html2rss::Selectors::PostProcessors::Base }
describe '#get' do
subject { described_class.new(html, config:).get }
let(:config) do
{
channel: { title: 'Example: questions', url: 'https://example.com/questions' },
selectors: {
items: { selector: '#questions > ul > li' },
title: { selector: 'a' },
link: { selector: 'a', extractor: 'href' }
}
}
end
let(:sanitized_html) do
<<~HTML
Breaking news: I'm a deprecated tag
<div>
<a href="https://example.com/lol.gif" rel="nofollow noopener noreferrer" target="_blank"><img src="https://example.com/lol.gif" alt="An animal looking cute" referrerpolicy="no-referrer" crossorigin="anonymous" loading="lazy" decoding="async"></a>
<a href="http://example.com" title="foo" rel="nofollow noopener noreferrer" target="_blank">example.com</a> <a href="https://example.com/article-123" rel="nofollow noopener noreferrer" target="_blank">Click here!</a>
</div>
HTML
end
let(:html) do
<<~HTML
<html lang="en">
<body>
<script src="http://evil.js"></script>
<script>alert('lol')</script>
<marquee>Breaking news: I'm a deprecated tag</marquee>
<iframe hidden src="http://mine.currency"></iframe>
<div>
<img src='/lol.gif' id="funnypic" alt="An animal looking cute">
</html>
<a href="http://example.com" class="link" title="foo" style="color: red">example.com</a>
<a href="/article-123">Click here!</a>
</div>
</body>
</html>
HTML
end
it do
result = Nokogiri::HTML.fragment(subject).to_html.chomp.gsub(/\s+/, ' ')
expected_html = Nokogiri::HTML.fragment(sanitized_html).to_html.chomp.gsub(/\s+/, ' ')
expect(result).to eq(expected_html)
end
end
describe '.get' do
subject { described_class.get(html, 'http://example.com') }
let(:html) { '<p>Hi <a href="/world">World!</a><script></script></p>' }
let(:sanitized_html) do
'<p>Hi <a href="http://example.com/world" rel="nofollow noopener noreferrer" target="_blank">World!</a></p>'
end
it 'returns the sanitized HTML' do
expect(subject).to eq(sanitized_html)
end
context 'with html being nil' do
let(:html) { nil }
it 'returns nil' do
expect(subject).to be_nil
end
end
end
describe '.validate_args!' do
let(:context) { { foo: :bar } }
it 'does not raise when value is a String' do
expect do
described_class.validate_args!('some html', context)
end.not_to raise_error
end
it 'raises when value is not a String' do
expect do
described_class.validate_args!(123, context)
end.to raise_error(Html2rss::Selectors::PostProcessors::InvalidType)
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/post_processors/gsub_spec.rb | spec/lib/html2rss/selectors/post_processors/gsub_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::PostProcessors::Gsub do
it { expect(described_class).to be < Html2rss::Selectors::PostProcessors::Base }
context 'with args validation' do
context 'without pattern option' do
it do
expect do
described_class.new('hello',
options: { replacement: 'world' })
end.to raise_error(Html2rss::Selectors::PostProcessors::MissingOption,
/The `pattern` option is missing in: {/)
end
end
context 'without replacement option' do
it do
expect do
described_class.new('hello',
options: { pattern: 'world' })
end.to raise_error(Html2rss::Selectors::PostProcessors::MissingOption,
/The `replacement` option is missing in: {/)
end
end
context 'without replacement option not being a String or Hash' do
it do
expect do
described_class.new('hello', options: { pattern: 'world', replacement: [] })
end.to raise_error(Html2rss::Selectors::PostProcessors::InvalidType,
/The type of `replacement` must be String or Hash, but is: Array in: {/)
end
end
end
context 'with string pattern' do
context 'with string replacement' do
subject do
described_class.new('Foo bar and boo', options: { pattern: 'boo', replacement: 'baz' }).get
end
it { is_expected.to eq 'Foo bar and baz' }
end
end
context 'with pattern being a Regexp as String' do
context 'with hash replacement' do
subject do
described_class.new('hello',
options: { pattern: '/[eo]/', replacement: { 'e' => 3, 'o' => '*' } }).get
end
it { is_expected.to eq 'h3ll*' }
end
context 'with single character string' do
subject do
described_class.new('hello',
options: { pattern: '/', replacement: 'X' }).get
end
it { is_expected.to eq 'hello' }
end
context 'with three character string with slashes' do
subject do
described_class.new('hello',
options: { pattern: '/e/', replacement: 'X' }).get
end
it { is_expected.to eq 'hXllo' }
end
end
context 'with whitespace and empty string patterns' do
context 'with empty string' do
subject do
described_class.new('', options: { pattern: '^\\s*$', replacement: 'Untitled' }).get
end
it { is_expected.to eq 'Untitled' }
end
context 'with whitespace only string' do
subject do
described_class.new(' ', options: { pattern: '^\\s*$', replacement: 'Untitled' }).get
end
it { is_expected.to eq 'Untitled' }
end
context 'with mixed whitespace string' do
subject do
described_class.new(" \t\n ", options: { pattern: '^\\s*$', replacement: 'Untitled' }).get
end
it { is_expected.to eq 'Untitled' }
end
context 'with non-empty string containing whitespace' do
subject do
described_class.new(' hello ', options: { pattern: '^\\s*$', replacement: 'Untitled' }).get
end
it { is_expected.to eq ' hello ' }
end
context 'with newlines and tabs only' do
subject do
described_class.new("\n\t\n", options: { pattern: '^\\s*$', replacement: 'Untitled' }).get
end
it { is_expected.to eq 'Untitled' }
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/post_processors/parse_uri_spec.rb | spec/lib/html2rss/selectors/post_processors/parse_uri_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::PostProcessors::ParseUri do
subject do
described_class.new(url, context).get
end
let(:context) do
Html2rss::Selectors::Context.new(
config: { channel: { url: 'http://example.com' } }
)
end
it { expect(described_class).to be < Html2rss::Selectors::PostProcessors::Base }
context 'with Html2rss::Url value' do
let(:url) { Html2rss::Url.from_relative('http://example.com', 'http://example.com') }
it { is_expected.to eq 'http://example.com' }
end
context 'with String value' do
context 'with an absolute url containing a trailing space' do
let(:url) { 'http://example.com ' }
it { is_expected.to eq 'http://example.com' }
end
context 'with relative url' do
let(:url) { '/foo/bar' }
it { is_expected.to eq 'http://example.com/foo/bar' }
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/post_processors/parse_time_spec.rb | spec/lib/html2rss/selectors/post_processors/parse_time_spec.rb | # frozen_string_literal: true
require 'tzinfo'
RSpec.describe Html2rss::Selectors::PostProcessors::ParseTime do
it { expect(described_class).to be < Html2rss::Selectors::PostProcessors::Base }
context 'with known time_zone' do
{
'America/New_York' => 'Mon, 01 Jul 2019 12:00:00 -0400',
'Europe/London' => 'Mon, 01 Jul 2019 12:00:00 +0100',
'Europe/Berlin' => 'Mon, 01 Jul 2019 12:00:00 +0200'
}.each_pair do |time_zone, expected|
it "parses in time_zone #{time_zone}" do
ctx = Html2rss::Selectors::Context.new(config: { channel: { time_zone: } })
expect(described_class.new('2019-07-01 12:00', ctx).get).to eq expected
end
end
end
context 'with unknown time_zone' do
it 'raises TZInfo::InvalidTimezoneIdentifier' do
ctx = Html2rss::Selectors::Context.new(config: { channel: { time_zone: 'Foobar/Baz' } })
expect { described_class.new('2019-07-01 12:00', ctx).get }.to raise_error(TZInfo::InvalidTimezoneIdentifier)
end
end
context 'with nil time_zone' do
it 'raises ArgumentError' do
ctx = Html2rss::Selectors::Context.new(config: { channel: { time_zone: nil } })
expect { described_class.new('2019-07-01 12:00', ctx).get }
.to raise_error(ArgumentError, 'time_zone cannot be nil or empty')
end
end
context 'with empty time_zone' do
it 'raises ArgumentError' do
ctx = Html2rss::Selectors::Context.new(config: { channel: { time_zone: '' } })
expect { described_class.new('2019-07-01 12:00', ctx).get }
.to raise_error(ArgumentError, 'time_zone cannot be nil or empty')
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/post_processors/markdown_to_html_spec.rb | spec/lib/html2rss/selectors/post_processors/markdown_to_html_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::PostProcessors::MarkdownToHtml do
subject { described_class.new(markdown, config:).get }
let(:html) do
['<h1>Section</h1>',
'<p>Price: 12.34</p>',
'<ul>',
'<li>Item 1</li>',
'<li>Item 2</li>',
'</ul>',
"<p><code>puts 'hello world'</code></p>"].join(' ')
end
let(:markdown) do
<<~MD
# Section
Price: 12.34
- Item 1
- Item 2
`puts 'hello world'`
MD
end
let(:config) do
{ channel: { title: 'Example: questions', url: 'https://example.com/questions' },
selectors: { items: {} } }
end
it { expect(described_class).to be < Html2rss::Selectors::PostProcessors::Base }
it { is_expected.to eq html }
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/post_processors/html_to_markdown_spec.rb | spec/lib/html2rss/selectors/post_processors/html_to_markdown_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::PostProcessors::HtmlToMarkdown do
subject { described_class.new(html, config:).get }
let(:markdown) do
[
"# Very interesting\n Breaking news: I'm a deprecated tag \n ",
'[](https://example.com/lol.gif) ',
'[example.com](http://example.com "foo") ',
"[Click here!](https://example.com/article-123) \n"
].join
end
let(:html) do
<<~HTML
<html lang="en">
<body>
<script src="http://evil.js"></script>
<script>alert('lol')</script>
<h1>Very interesting</h1>
<marquee>Breaking news: I'm a deprecated tag</marquee>
<iframe hidden src="http://mine.currency"></iframe>
<div>
<img src='/lol.gif' id="funnypic" alt="An animal looking cute">
</html>
<a href="http://example.com" class="link" title="foo" style="color: red">example.com</a>
<a href="/article-123">Click here!</a>
</div>
</body>
</html>
HTML
end
let(:config) do
{
channel: { title: 'Example: questions', url: 'https://example.com/questions' },
selectors: {
items: { selector: '#questions > ul > li' },
title: { selector: 'a' },
link: { selector: 'a', extractor: 'href' }
}
}
end
it { expect(described_class).to be < Html2rss::Selectors::PostProcessors::Base }
it { is_expected.to eq markdown }
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/post_processors/base_spec.rb | spec/lib/html2rss/selectors/post_processors/base_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::PostProcessors::Base do
subject(:instance) { described_class.new(value, context) }
let(:value) { 'test' }
describe '.expect_options' do
let(:context) { Html2rss::Selectors::Context.new({ options: { key1: 'value1', key2: 'value2' } }) }
it 'does not raise an error if all keys are present' do
expect { described_class.send(:expect_options, %i[key1 key2], context) }.not_to raise_error
end
it 'raises an error if a key is missing' do
expect do
described_class.send(:expect_options, %i[key1 key3], context)
end.to raise_error(Html2rss::Selectors::PostProcessors::MissingOption,
/The `key3` option is missing in:/)
end
end
describe '.assert_type' do
let(:context) { nil }
it 'does not raise an error if value is of the correct type' do
expect { described_class.send(:assert_type, 'string', String, 'test', context:) }.not_to raise_error
end
it 'raises an error if value is of the incorrect type' do
expect do
described_class.send(:assert_type, 123, String, 'test', context:)
end.to raise_error(Html2rss::Selectors::PostProcessors::InvalidType,
/The type of `test` must be String, but is: Integer in: {.*"base_spec.rb"}/)
end
it 'supports multiple types', :aggregate_failures do
expect do
described_class.send(:assert_type, 'string', [String, Symbol], 'test', context:)
described_class.send(:assert_type, :symbol, [String, Symbol], 'test', context:)
end.not_to raise_error
end
end
describe '.validate_args!' do
it 'raises NotImplementedError' do
expect do
described_class.send(:validate_args!, '', Html2rss::Selectors::Context.new({}))
end.to raise_error(NotImplementedError, 'You must implement the `validate_args!` method in the post processor')
end
end
describe '#initialize' do
before { allow(described_class).to receive(:validate_args!).with(value, context) }
let(:value) { 'test' }
let(:context) { Html2rss::Selectors::Context.new({ options: { key1: 'value1' } }) }
it 'calls validate_args! with value and context' do
described_class.new(value, context)
expect(described_class).to have_received(:validate_args!).with(value, context)
end
end
describe '#get' do
before do
allow(described_class).to receive_messages(assert_type: nil, validate_args!: nil)
end
it 'raises NotImplementedError' do
expect do
described_class.new('value',
Html2rss::Selectors::Context.new({ options: {} })).get
end.to raise_error(NotImplementedError, 'You must implement the `get` method in the post processor')
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/post_processors/template_spec.rb | spec/lib/html2rss/selectors/post_processors/template_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::PostProcessors::Template do
subject { described_class.new('Hi', options:, item:, scraper:).get }
let(:item) { Object.new }
let(:scraper) { instance_double(Html2rss::Selectors) }
before do
allow(scraper).to receive(:select).with(:name, item).and_return('My name')
allow(scraper).to receive(:select).with(:author, item).and_return('Slim Shady')
allow(scraper).to receive(:select).with(:returns_nil, item).and_return(nil)
end
it { expect(described_class).to be < Html2rss::Selectors::PostProcessors::Base }
context 'when the string is empty' do
it 'raises an error' do
expect do
described_class.new('', {})
end.to raise_error(Html2rss::Selectors::PostProcessors::InvalidType, 'The `string` template is absent.')
end
end
context 'with mixed complex formatting notation' do
let(:options) { { string: '%{self}! %<name>s is %{author}! %{returns_nil}' } } # rubocop:disable Style/FormatStringToken
it { is_expected.to eq 'Hi! My name is Slim Shady! ' }
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/post_processors/substring_spec.rb | spec/lib/html2rss/selectors/post_processors/substring_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::Selectors::PostProcessors::Substring do
it { expect(described_class).to be < Html2rss::Selectors::PostProcessors::Base }
context 'with end' do
subject { described_class.new('Foo bar and baz', options: { start: 4, end: 6 }).get }
it { is_expected.to eq 'bar' }
end
context 'without end' do
subject { described_class.new('foobarbaz', options: { start: 3 }).get }
it { is_expected.to eq 'barbaz' }
end
describe '#range' do
subject { described_class.new('value', options:) }
context 'when start and end options are provided' do
let(:options) { { start: 2, end: 4 } }
it 'returns the correct range' do
expect(subject.range).to eq(2..4)
end
end
context 'when only start option is provided' do
let(:options) { { start: 3 } }
it 'returns the range from start index to the end of the string' do
expect(subject.range).to eq(3..)
end
end
context 'when start and end options are equal' do
let(:options) { { start: 2, end: 2 } }
it 'raises an ArgumentError' do
expect do
subject.range
end.to raise_error(ArgumentError, 'The `start` value must be unequal to the `end` value.')
end
end
context 'when start option is missing' do
let(:options) { { end: 4 } }
it 'raises an error' do
expect do
subject.range
end.to raise_error(Html2rss::Selectors::PostProcessors::InvalidType, /but is: NilClass in:/)
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/selectors/post_processors/html_transformers/wrap_img_in_a_spec.rb | spec/lib/html2rss/selectors/post_processors/html_transformers/wrap_img_in_a_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::Selectors::PostProcessors::HtmlTransformers::WrapImgInA do
subject(:transformer) { described_class.new }
describe '#call' do
subject(:call) { transformer.call(node_name:, node:) }
let(:node_name) { 'img' }
let(:node) { Nokogiri::HTML('<html><p><img src="https://example.com/image.jpg"></p></html>').at('img') }
it 'wraps the image in an anchor tag', :aggregate_failures do
expect { call }.to change { node.parent.name }.from('p').to('a')
expect(node.parent['href']).to eq('https://example.com/image.jpg')
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/rss_builder/article_spec.rb | spec/lib/html2rss/rss_builder/article_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::RssBuilder::Article do
subject(:instance) { described_class.new(**options) }
let(:options) { { title: 'Sample instance', url: 'http://example.com', description: 'By John Doe' } }
describe '#initialize' do
it 'stores the options as a hash' do
expect(instance.instance_variable_get(:@to_h)).to eq(options)
end
context 'when unknown options are present' do
let(:options) { { title: 'Sample instance', url: 'http://example.com', description: 'By John Doe', unknown_key: 'value' } }
before { allow(Html2rss::Log).to receive(:warn) }
it 'logs a warning' do
described_class.new(**options)
expect(Html2rss::Log).to have_received(:warn).with('Article: unknown keys found: unknown_key')
end
end
end
describe '#each' do
let(:yields) do
described_class::PROVIDED_KEYS.map do |key|
[key, instance.public_send(key)]
end
end
it 'yields each PROVIDED_KEY with their values' do
expect { |b| instance.each(&b) }.to yield_successive_args(*yields)
end
it 'returns an Enumerator if no block is given' do
expect(instance.each).to be_an(Enumerator)
end
it 'returns frozen values' do
instance.each { |value| expect(value).to be_frozen } # rubocop:disable RSpec/IteratedExpectation
end
end
describe '#description' do
before do
allow(Html2rss::Rendering::DescriptionBuilder).to receive(:new).and_call_original
instance.description
end
it 'calls the DescriptionBuilder' do
expect(Html2rss::Rendering::DescriptionBuilder).to have_received(:new)
.with(base: 'By John Doe', title: 'Sample instance', url: instance.url, enclosures: [], image: nil)
end
end
describe '#url' do
it 'returns the url if present', :aggregate_failures do
url = instance.url
expect(url).to be_a(Html2rss::Url)
expect(url.to_s).to eq('http://example.com/')
end
it 'returns nil if no url is present' do
instance = described_class.new(title: 'Sample instance')
expect(instance.url).to be_nil
end
end
describe '#valid?' do
context 'when url, title, and id are present' do
let(:options) { { url: 'http://example.com', title: 'Sample Title', id: 'foobar' } }
it { is_expected.to be_valid }
end
context 'when url is missing' do
let(:options) { { title: 'Sample Title' } }
it { is_expected.not_to be_valid }
end
context 'when title is missing' do
let(:options) { { url: 'http://example.com' } }
it { is_expected.not_to be_valid }
end
context 'when url, title, and guid are missing' do
let(:options) { {} }
it { is_expected.not_to be_valid }
end
end
describe '#guid' do
it 'returns a unique identifier based on the url and id', :aggregate_failures do
instance = described_class.new(url: 'http://example.com/article', id: '123')
expect(instance.guid).to eq('vikwuv')
expect(instance.guid.encoding).to eq(Encoding::UTF_8)
end
it 'returns a different identifier for different urls' do
instance1 = described_class.new(url: 'http://example.com/article1', id: '123')
instance2 = described_class.new(url: 'http://example.com/article2', id: '123')
expect(instance1.guid).not_to eq(instance2.guid)
end
it 'returns a different identifier for different ids' do
instance1 = described_class.new(url: 'http://example.com/article1', id: '123')
instance2 = described_class.new(url: 'http://example.com/article2', id: '456')
expect(instance1.guid).not_to eq(instance2.guid)
end
it 'returns the same identifier for the same url and id' do
instance1 = described_class.new(url: 'http://example.com/article', id: '123')
instance2 = described_class.new(url: 'http://example.com/article', id: '123')
expect(instance1.guid).to eq(instance2.guid)
end
it 'returns the same identifier for the same url and id with different case' do
instance1 = described_class.new(url: 'http://example.com/article', id: '123')
instance2 = described_class.new(url: 'http://EXAMPLE.com/article', id: '123')
expect(instance1.guid).to eq(instance2.guid)
end
end
describe '#deduplication_fingerprint' do
let(:separator) { described_class::DEDUP_FINGERPRINT_SEPARATOR }
it 'prefers the sanitized URL combined with the id' do
article = described_class.new(url: 'http://example.com/article', id: '123')
expected = [article.url.to_s, '123'].join(separator)
expect(article.deduplication_fingerprint).to eq(expected)
end
it 'falls back to the id when the URL is missing' do
article = described_class.new(id: 'only-id')
expect(article.deduplication_fingerprint).to eq('only-id')
end
it 'falls back to the guid enriched with metadata', :aggregate_failures do
article = described_class.new(title: 'Alpha', description: 'Beta', guid: ['custom-guid'])
expected = [article.guid, article.title, article.description].join(separator)
expect(article.deduplication_fingerprint).to eq(expected)
end
end
describe '#categories' do
it 'returns an array of unique and present categories' do
instance = described_class.new(categories: ['Category 1', '', 'Category 2', 'Category 1 '])
expect(instance.categories).to eq(['Category 1', 'Category 2'])
end
it 'returns an empty array if no categories are present' do
instance = described_class.new
expect(instance.categories).to eq([])
end
end
describe '#published_at' do
it 'returns a Time object if published_at is present and valid' do
instance = described_class.new(published_at: '2022-01-01T12:00:00Z')
expect(instance.published_at).to be_a(DateTime)
end
it 'returns nil if published_at is not present' do
instance = described_class.new
expect(instance.published_at).to be_nil
end
it 'returns nil if published_at is invalid' do
instance = described_class.new(published_at: 'invalid_date')
expect(instance.published_at).to be_nil
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/rss_builder/enclosure_spec.rb | spec/lib/html2rss/rss_builder/enclosure_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::RssBuilder::Enclosure do
describe '.guess_content_type_from_url(url)' do
{
'https://example.com/image.jpg' => 'image/jpeg',
'https://example.com/image.png' => 'image/png',
'https://example.com/image.gif' => 'image/gif',
'https://example.com/image.svg' => 'image/svg+xml',
'https://example.com/image.webp' => 'image/webp',
'https://example.com/image' => 'application/octet-stream',
'https://api.PAGE.com/wp-content/photo.jpg?quality=85&w=925&h=617&crop=1&resize=925,617' => 'image/jpeg'
}.each_pair do |url, expected|
it { expect(described_class.guess_content_type_from_url(Html2rss::Url.from_relative(url, 'https://example.com'))).to eq expected }
end
end
describe '#initialize' do
subject { described_class.new(url:, type:, bits_length:) }
let(:url) { Html2rss::Url.from_relative('https://example.com/image.jpg', 'https://example.com') }
let(:type) { 'image/jpeg' }
let(:bits_length) { 123 }
it { expect(subject.url).to eq url }
it { expect(subject.type).to eq type }
it { expect(subject.bits_length).to eq bits_length }
context 'when URL is nil' do
let(:url) { nil }
it { expect { subject }.to raise_error(ArgumentError, 'An Enclosure requires an absolute URL') }
end
context 'when URL is relative' do
let(:url) { Html2rss::Url.from_relative('/image.jpg', 'https://example.com') }
it 'does not raise error' do
expect { subject }.not_to raise_error
end
it 'resolves to absolute URL' do
expect(subject.url.absolute?).to be true
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/rss_builder/stylesheet_spec.rb | spec/lib/html2rss/rss_builder/stylesheet_spec.rb | # frozen_string_literal: true
RSpec.describe Html2rss::RssBuilder::Stylesheet do
let(:rss_maker) { RSS::Maker::RSS20.new }
let(:stylesheet_config) do
described_class.new(
href: 'http://example.com/style.css',
type: 'text/css',
media: 'all'
)
end
describe '.add' do
it 'adds stylesheet XML tags to the RSS maker' do
expect do
described_class.add(rss_maker, [stylesheet_config])
end.to change { rss_maker.xml_stylesheets.size }.by(1)
end
end
describe '#initialize' do
context 'with valid parameters' do
it 'creates a Stylesheet object', :aggregate_failures do
stylesheet = described_class.new(href: 'http://example.com/style.css', type: 'text/css')
expect(stylesheet.href).to eq('http://example.com/style.css')
expect(stylesheet.type).to eq('text/css')
expect(stylesheet.media).to eq('all')
end
end
context 'with an invalid href' do
it 'raises an ArgumentError' do
expect do
described_class.new(href: 123, type: 'text/css')
end.to raise_error(ArgumentError, 'stylesheet.href must be a String')
end
end
context 'with an invalid type' do
it 'raises an ArgumentError' do
expect do
described_class.new(href: 'http://example.com/style.css', type: 'invalid/type')
end.to raise_error(ArgumentError, 'stylesheet.type invalid')
end
end
context 'with an invalid media' do
it 'raises an ArgumentError' do
expect do
described_class.new(href: 'http://example.com/style.css', type: 'text/css', media: 123)
end.to raise_error(ArgumentError, 'stylesheet.media must be a String')
end
end
end
describe '#to_xml' do
it 'returns the correct XML string' do
stylesheet = described_class.new(href: 'http://example.com/style.css', type: 'text/css')
expected_xml = <<~XML
<?xml-stylesheet href="http://example.com/style.css" type="text/css" media="all"?>
XML
expect(stylesheet.to_xml).to eq(expected_xml)
end
context 'with a different media' do
it 'returns the correct XML string with the specified media' do
stylesheet = described_class.new(href: 'http://example.com/style.css', type: 'text/css', media: 'screen')
expected_xml = <<~XML
<?xml-stylesheet href="http://example.com/style.css" type="text/css" media="screen"?>
XML
expect(stylesheet.to_xml).to eq(expected_xml)
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/rss_builder/channel_spec.rb | spec/lib/html2rss/rss_builder/channel_spec.rb | # frozen_string_literal: true
require 'timecop'
RSpec.describe Html2rss::RssBuilder::Channel do
subject(:instance) { described_class.new(response, overrides:) }
let(:overrides) { {} }
let(:response) { build_response(body:, headers:, url:) }
let(:body) { '' }
let(:headers) { default_headers }
let(:url) { Html2rss::Url.from_relative('https://example.com', 'https://example.com') }
# Test factories and shared data
def build_response(body:, headers:, url:)
Html2rss::RequestService::Response.new(body:, headers:, url:)
end
def default_headers
{
'content-type' => 'text/html',
'cache-control' => 'max-age=120, private, must-revalidate',
'last-modified' => 'Tue, 01 Jan 2019 00:00:00 GMT'
}
end
def build_html_with_meta(name:, content:)
"<head><meta name=\"#{name}\" content=\"#{content}\"></head>"
end
def build_html_with_property(property:, content:)
"<head><meta property=\"#{property}\" content=\"#{content}\"></head>"
end
# Shared examples for override behavior
shared_examples 'returns overridden value' do |method, override_key, expected_value|
context "when overrides[:#{override_key}] is present" do
let(:overrides) { { override_key => expected_value } }
it { expect(instance.public_send(method)).to eq(expected_value) }
end
end
shared_examples 'falls back to meta content' do |method, meta_name, expected_content|
context "with #{meta_name} meta tag" do
let(:body) { build_html_with_meta(name: meta_name, content: expected_content) }
it { expect(instance.public_send(method)).to eq(expected_content) }
end
end
describe '#title' do
context 'with a title' do
let(:body) { '<html><head><title>Example</title></head></html>' }
it 'extracts the title' do
expect(instance.title).to eq('Example')
end
end
context 'with a title containing extra spaces' do
let(:body) { '<html><head><title> Example Title </title></head></html>' }
it 'extracts and strips the title' do
expect(instance.title).to eq('Example Title')
end
end
context 'without a title' do
let(:body) { '<html><head></head></html>' }
it 'generates a title from the URL' do
expect(instance.title).to eq('example.com')
end
end
it_behaves_like 'returns overridden value', :title, :title, 'Custom Title'
context 'with empty title tag' do
let(:body) { '<html><head><title></title></head></html>' }
it 'generates a title from the URL' do
expect(instance.title).to eq('example.com')
end
end
end
describe '#language' do
let(:headers) { { 'content-language' => nil, 'content-type': 'text/html' } }
context 'with <html lang> attribute' do
let(:body) { '<!doctype html><html lang="fr"><body></body></html>' }
it 'extracts the language' do
expect(instance.language).to eq('fr')
end
end
context 'with a content-language header' do
let(:headers) { { 'content-language' => 'en-US', 'content-type': 'text/html' } }
it 'extracts the language' do
expect(instance.language).to eq('en')
end
end
context 'without a language' do
let(:body) { '<html></html>' }
it 'extracts nil' do
expect(instance.language).to be_nil
end
end
it_behaves_like 'returns overridden value', :language, :language, 'es'
context 'with lang attribute on a child element' do
let(:body) { '<html><body><div lang="de">Content</div></body></html>' }
it 'extracts the language from child element' do
expect(instance.language).to eq('de')
end
end
end
describe '#description' do
context 'with html_response having a description' do
let(:body) do
'<head><meta name="description" content="Example"></head>'
end
it 'extracts the description' do
expect(instance.description).to eq('Example')
end
end
context 'with html_response without having a description' do
let(:body) { '<head></head>' }
it 'generates a default description' do
expect(instance.description).to eq 'Latest items from https://example.com'
end
end
context 'when overrides[:description] is present and not empty' do
let(:overrides) { { description: 'Overridden Description' } }
it 'returns the overridden description' do
expect(instance.description).to eq('Overridden Description')
end
end
it_behaves_like 'falls back to meta content', :description, 'description', 'Example'
context 'when overrides[:description] is empty' do
let(:overrides) { { description: '' } }
let(:body) { build_html_with_meta(name: 'description', content: 'Example') }
it 'falls back to meta description' do
expect(instance.description).to eq('Example')
end
end
end
describe '#image' do
context 'with a og:image' do
let(:body) do
'<head><meta property="og:image" content="https://example.com/images/rock.jpg" />
</head>'
end
it 'extracts the url' do
expect(instance.image.to_s).to eq('https://example.com/images/rock.jpg')
end
end
context 'without a og:image' do
let(:body) { '<head></head>' }
it 'extracts nil' do
expect(instance.image).to be_nil
end
end
it_behaves_like 'returns overridden value', :image, :image, 'https://example.com/override.jpg'
context 'with og:image meta tag' do
let(:body) { build_html_with_property(property: 'og:image', content: 'https://example.com/image.jpg') }
it 'extracts the image URL' do
expect(instance.image.to_s).to eq('https://example.com/image.jpg')
end
end
context 'without html_response' do
let(:body) { '' }
let(:headers) { { 'content-type' => 'application/json' } }
it 'returns nil' do
expect(instance.image).to be_nil
end
end
end
describe '#last_build_date' do
context 'with a last-modified header' do
it 'extracts the last-modified header' do
expect(instance.last_build_date).to eq('Tue, 01 Jan 2019 00:00:00 GMT')
end
end
context 'without a last-modified header' do
let(:headers) do
{
'content-type' => 'text/html',
'cache-control' => 'max-age=120, private, must-revalidate'
}
end
it 'defaults to Time.now' do
Timecop.freeze(Time.now) do
expect(instance.last_build_date).to eq Time.now
end
end
end
end
describe '#ttl' do
context 'with a cache-control header' do
it 'extracts the ttl' do
expect(instance.ttl).to eq(2)
end
end
context 'without a cache-control header' do
let(:headers) { { 'content-type' => 'text/html' } }
it 'defaults to 360 minutes' do
expect(instance.ttl).to eq(360)
end
end
it_behaves_like 'returns overridden value', :ttl, :ttl, 60
end
describe '#author' do
it_behaves_like 'falls back to meta content', :author, 'author', 'John Doe'
it_behaves_like 'returns overridden value', :author, :author, 'Jane Doe'
context 'without html_response' do
let(:body) { '' }
let(:headers) { { 'content-type' => 'application/json' } }
it 'returns nil' do
expect(instance.author).to be_nil
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/config/dynamic_params_spec.rb | spec/lib/html2rss/config/dynamic_params_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::Config::DynamicParams do
describe '.call' do
let(:params) { { 'name' => 'John', 'age' => '30' } }
context 'when value is a String' do
it 'replaces format string with given params' do
value = 'Hello, %<name>s. You are %<age>s years old.'
result = described_class.call(value, params)
expect(result).to eq('Hello, John. You are 30 years old.')
end
end
context 'when value is a Hash' do
it 'replaces format string with given params recursively' do
value = { greeting: 'Hello, %<name>s.', details: { age: 'You are %<age>s years old.' } }
result = described_class.call(value, params)
expect(result).to eq({ greeting: 'Hello, John.', details: { age: 'You are 30 years old.' } })
end
end
context 'when value is an Array' do
it 'replaces format string with given params recursively' do
value = ['Hello, %<name>s.', 'You are %<age>s years old.']
result = described_class.call(value, params)
expect(result).to eq(['Hello, John.', 'You are 30 years old.'])
end
end
context 'when value is an Object' do
it 'returns the value as is' do
value = 42
result = described_class.call(value, params)
expect(result).to eq(42)
end
end
context 'with getter' do
let(:getter) { ->(key) { "Mr. #{key.capitalize}" } }
it 'replaces format string with given params and getter' do
value = 'Hello, %<name>s. You are %<age>s years old.'
result = described_class.call(value, params, getter:)
expect(result).to eq('Hello, Mr. Name. You are Mr. Age years old.')
end
end
context 'with "%<foo>d : %<bar>f" template format' do
it 'replaces format string with given params' do
value = '%<foo>d : %<bar>f'
result = described_class.call(value, { foo: 1, bar: 2.0 })
expect(result).to eq('1 : 2.000000')
end
end
context 'with replace_missing_with being a String' do
it 'replaces missing params with the given value' do
value = 'Hello, %<name>s. You are %<age>s years old. Your city is %<city>s.'
result = described_class.call(value, params, replace_missing_with: 'unknown')
expect(result).to eq('Hello, John. You are 30 years old. Your city is unknown.')
end
end
context 'with replace_missing_with being nil' do
it 'raises ParamsMissing error when a param is missing' do
value = 'Hello, %<name>s. You are %<age>s years old. Your city is %<city>s.'
expect do
described_class.call(value, params)
end.to raise_error(described_class::ParamsMissing)
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/config/request_headers_spec.rb | spec/lib/html2rss/config/request_headers_spec.rb | # frozen_string_literal: true
require 'spec_helper'
RSpec.describe Html2rss::Config::RequestHeaders do
subject(:normalized) do
described_class.normalize(headers, channel_language:, url:)
end
let(:headers) { {} }
let(:channel_language) { 'de-DE' }
let(:url) { 'https://example.com/feed' }
describe '.browser_defaults' do
it 'returns a mutable copy of the default headers' do
expect { described_class.browser_defaults['User-Agent'] = 'Custom' }
.not_to(change { described_class.browser_defaults['User-Agent'] })
end
end
describe '#to_h' do
context 'when no overrides are provided' do
it 'adds Accept-Language from the channel language' do
expect(normalized).to include('Accept-Language' => 'de-DE,de;q=0.9')
end
it 'infers the Host header from the URL' do
expect(normalized).to include('Host' => 'example.com')
end
end
context 'when overrides are provided' do
let(:headers) do
{ 'accept' => 'application/json', 'x-test-header' => 'abc' }
end
it 'capitalizes custom header keys' do
expect(normalized).to include('X-Test-Header' => 'abc')
end
it 'prepends custom Accept values while keeping defaults' do
expected = "application/json,#{described_class::DEFAULT_ACCEPT}"
expect(normalized).to include('Accept' => expected)
end
end
context 'when the channel language is blank' do
let(:channel_language) { ' ' }
it 'falls back to en-US' do
expect(normalized).to include('Accept-Language' => 'en-US,en;q=0.9')
end
end
context 'when the URL is blank' do
let(:url) { nil }
it 'does not add a Host header' do
expect(normalized).not_to include('Host')
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/html_extractor/enclosure_extractor_spec.rb | spec/lib/html2rss/html_extractor/enclosure_extractor_spec.rb | # frozen_string_literal: true
require 'nokogiri'
RSpec.describe Html2rss::HtmlExtractor::EnclosureExtractor do
describe '.call' do
subject(:enclosures) { described_class.call(article_tag, base_url) }
let(:base_url) { 'http://example.com' }
# Helper method to create article tag from HTML
def article_tag_from(html)
Nokogiri::HTML(html).at('article')
end
# Helper method to create expected enclosure hash
def expected_enclosure(path, type)
{ url: Html2rss::Url.from_relative("http://example.com#{path}", 'http://example.com'), type: }
end
context 'when article_tag contains video and audio sources' do
let(:article_tag) do
article_tag_from(<<~HTML)
<article>
<video>
<source src="/videos/video1.mp4" type="video/mp4">
<source src="/videos/video2.webm" type="video/webm">
</video>
<audio src="/audios/audio1.mp3" type="audio/mpeg"></audio>
</article>
HTML
end
it 'extracts the enclosures with correct URLs and types' do
expect(enclosures).to contain_exactly(
expected_enclosure('/videos/video1.mp4', 'video/mp4'),
expected_enclosure('/videos/video2.webm', 'video/webm'),
expected_enclosure('/audios/audio1.mp3', 'audio/mpeg')
)
end
end
context 'when article_tag contains no media sources' do
let(:article_tag) { article_tag_from('<article><p>No media here</p></article>') }
it 'returns an empty array' do
expect(enclosures).to be_empty
end
end
context 'when article_tag contains sources with empty src attributes' do
let(:article_tag) do
article_tag_from(<<~HTML)
<article>
<video>
<source src="" type="video/mp4">
</video>
<audio src="" type="audio/mpeg"></audio>
</article>
HTML
end
it 'ignores sources with empty src attributes' do
expect(enclosures).to be_empty
end
end
context 'when article_tag contains PDF links' do
let(:article_tag) do
article_tag_from(<<~HTML)
<article>
<a href="/documents/report.pdf">Download Report</a>
<a href="/files/manual.pdf">Manual</a>
</article>
HTML
end
it 'extracts PDF enclosures with correct URLs and types' do
expect(enclosures).to contain_exactly(
expected_enclosure('/documents/report.pdf', 'application/pdf'),
expected_enclosure('/files/manual.pdf', 'application/pdf')
)
end
end
context 'when article_tag contains iframe sources' do
let(:article_tag) do
article_tag_from(<<~HTML)
<article>
<iframe src="/embeds/video.html"></iframe>
<iframe src="/widgets/chart.html"></iframe>
</article>
HTML
end
it 'extracts iframe enclosures with correct URLs and types' do
expect(enclosures).to contain_exactly(
expected_enclosure('/embeds/video.html', 'text/html'),
expected_enclosure('/widgets/chart.html', 'text/html')
)
end
end
context 'when article_tag contains archive links' do
let(:article_tag) do
article_tag_from(<<~HTML)
<article>
<a href="/downloads/data.zip">Download ZIP</a>
<a href="/archives/backup.tar.gz">Backup TAR.GZ</a>
<a href="/files/package.tgz">Package TGZ</a>
</article>
HTML
end
it 'extracts archive enclosures with correct URLs and types' do
expect(enclosures).to contain_exactly(
expected_enclosure('/downloads/data.zip', 'application/zip'),
expected_enclosure('/archives/backup.tar.gz', 'application/zip'),
expected_enclosure('/files/package.tgz', 'application/zip')
)
end
end
context 'when article_tag contains PDF links with empty href attributes' do
let(:article_tag) do
article_tag_from(<<~HTML)
<article>
<a href="">Empty PDF Link</a>
<a href="/documents/valid.pdf">Valid PDF</a>
</article>
HTML
end
it 'ignores links with empty href attributes' do
expect(enclosures).to contain_exactly(
expected_enclosure('/documents/valid.pdf', 'application/pdf')
)
end
end
context 'when article_tag contains images' do
let(:article_tag) do
article_tag_from(<<~HTML)
<article>
<img src="/images/photo.jpg" alt="Photo">
<img src="/gallery/image.png" alt="Gallery Image">
</article>
HTML
end
it 'extracts image enclosures with correct URLs and types' do
expect(enclosures).to contain_exactly(
expected_enclosure('/images/photo.jpg', 'image/jpeg'),
expected_enclosure('/gallery/image.png', 'image/png')
)
end
end
context 'when article_tag contains mixed content types' do
let(:article_tag) do
article_tag_from(<<~HTML)
<article>
<img src="/images/hero.jpg" alt="Hero">
<video>
<source src="/videos/demo.mp4" type="video/mp4">
</video>
<a href="/documents/guide.pdf">Guide</a>
<iframe src="/widgets/map.html"></iframe>
<a href="/downloads/source.zip">Source Code</a>
</article>
HTML
end
let(:expected_enclosures) do
[
expected_enclosure('/images/hero.jpg', 'image/jpeg'),
expected_enclosure('/videos/demo.mp4', 'video/mp4'),
expected_enclosure('/documents/guide.pdf', 'application/pdf'),
expected_enclosure('/widgets/map.html', 'text/html'),
expected_enclosure('/downloads/source.zip', 'application/zip')
]
end
it 'extracts all types of enclosures' do
expect(enclosures).to match_array(expected_enclosures)
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/spec/lib/html2rss/html_extractor/image_extractor_spec.rb | spec/lib/html2rss/html_extractor/image_extractor_spec.rb | # frozen_string_literal: true
require 'nokogiri'
RSpec.describe Html2rss::HtmlExtractor::ImageExtractor do
let(:article_tag) { Nokogiri::HTML.fragment(html) }
describe '.call' do
subject(:url) { described_class.call(article_tag, base_url: 'https://example.com').to_s.encode('UTF-8') }
let(:html) do
<<~HTML
<article>
<img src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAgAAZABkAAD" alt="Data Image" />
<img src="image.jpg" alt="Image" />
</article>
HTML
end
context 'when image source is present in article tag' do
it 'returns the absolute URL of the image source' do
expect(url).to eq('https://example.com/image.jpg')
end
end
context 'when image source is present and image url contains commas' do
let(:html) do
<<~HTML
<article>
<img srcset="image,with,commas.jpg 256w, another,image,with,commas.jpg 1w" alt="Image with commas" />
</article>
HTML
end
it 'returns the absolute URL of the image source' do
expect(url).to eq('https://example.com/image,with,commas.jpg')
end
end
context 'when image source is present in srcset attribute' do
let(:html) do
<<~HTML
<article>
<picture>
<source srcset="
https://example.com/wirtschaft/Deutschland-muss-sich-technologisch-weiterentwickeln-schnell.1200w.jpg 1200w,
https://example.com/wirtschaft/Deutschland-muss-sich-technologisch-weiterentwickeln-schnell.200w.jpg 200w,
https://example.com/wirtschaft/Deutschland-muss-sich-technologisch-weiterentwickeln-schnell.2000w.jpg 2000w" />
<img src="https://example.com/wirtschaft/Deutschland-muss-sich-technologisch-weiterentwickeln-schnell.20w.jpg"
alt="Kein alternativer Text für dieses Bild vorhanden" loading="lazy" decoding="async" />
</picture>
</article>
HTML
end
it 'returns the absolute URL of the "largest" image source' do
expect(url).to eq('https://example.com/wirtschaft/Deutschland-muss-sich-technologisch-weiterentwickeln-schnell.2000w.jpg')
end
end
context 'when [srcset] contains no spaces between sources' do
let(:html) do
<<~HTML
<picture>
<img srcset="https://example.com/image.88w.jpg 88w,https://example.com/image.175w.jpg 175w"/>
</picture>
HTML
end
it { is_expected.to eq('https://example.com/image.175w.jpg') }
end
context 'when image source is present in style attribute' do
['background-image: url("image.jpg");',
'background: url(image.jpg);',
"background: url('image.jpg');"].each do |style|
let(:html) do
<<~HTML
<article>
<div style="#{style}"></div>
</article>
HTML
end
it "returns the absolute URL from #{style}" do
expect(url).to eq('https://example.com/image.jpg')
end
end
end
context 'when image source is not present' do
let(:html) { '<article></article>' }
it 'returns nil' do
expect(described_class.call(article_tag, base_url: nil)).to be_nil
end
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/lib/html2rss.rb | lib/html2rss.rb | # frozen_string_literal: true
require 'zeitwerk'
loader = Zeitwerk::Loader.for_gem
loader.inflector.inflect('cli' => 'CLI')
loader.setup
require 'logger'
##
# The Html2rss namespace.
module Html2rss
##
# The logger instance.
Log = Logger.new($stdout)
Log.level = ENV.fetch('LOG_LEVEL', :warn).upcase.to_sym
Log.formatter = proc do |severity, datetime, _progname, msg|
"#{datetime} [#{severity}] #{msg}\n"
end
def self.config_from_yaml_file(file, feed_name = nil)
Config.load_yaml(file, feed_name)
end
##
# Returns an RSS object generated from the provided configuration.
#
# Example:
#
# feed = Html2rss.feed(
# strategy: :faraday,
# headers: { 'User-Agent' => 'Mozilla/5.0' },
# channel: { name: 'StackOverflow: Hot Network Questions', url: 'https://stackoverflow.com' },
# selectors: {
# items: { selector: '#hot-network-questions > ul > li' },
# title: { selector: 'a' },
# link: { selector: 'a', extractor: 'href' }
# },
# auto_source: {}
# )
# # => #<RSS::Rss:0x00007fb2f48d14a0 ...>
#
# @param config [Hash<Symbol, Object>] configuration.
# @return [RSS::Rss] RSS object generated from the configuration.
def self.feed(raw_config)
config = Config.from_hash(raw_config, params: raw_config[:params])
response = perform_request(config)
articles = collect_articles(response, config)
processed_articles = Articles::Deduplicator.new(articles).call
build_feed(response, config, processed_articles)
end
##
# Scrapes the provided URL and returns an RSS object.
# No need for a "feed config".
#
# @param url [String] the URL to automatically source the feed from
# @param strategy [Symbol] the request strategy to use
# @param items_selector [String] CSS selector for items (will be enhanced) (optional)
# @return [RSS::Rss]
def self.auto_source(url, strategy: :faraday, items_selector: nil)
config = Html2rss::Config.default_config.merge!(strategy:)
config[:channel][:url] = url
config[:auto_source] = Html2rss::AutoSource::DEFAULT_CONFIG
config[:selectors] = { items: { selector: items_selector, enhance: true } } if items_selector
feed(config)
end
class << self
private
def perform_request(config)
RequestService.execute(
RequestService::Context.new(
url: config.url,
headers: config.headers
),
strategy: config.strategy
)
end
def collect_articles(response, config)
[].tap do |articles|
if (selectors = config.selectors)
selector_service = Selectors.new(response, selectors:, time_zone: config.time_zone)
articles.concat(selector_service.articles)
end
next unless (auto_source = config.auto_source)
auto_source_service = AutoSource.new(response, auto_source)
articles.concat(auto_source_service.articles)
end
end
def build_feed(response, config, articles)
channel = RssBuilder::Channel.new(response, overrides: config.channel)
RssBuilder.new(channel:, articles:, stylesheets: config.stylesheets).call
end
end
end
loader.eager_load
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/lib/html2rss/request_service.rb | lib/html2rss/request_service.rb | # frozen_string_literal: true
require 'singleton'
require 'forwardable'
module Html2rss
##
# Requests website URLs to retrieve their HTML for further processing.
# Provides strategies, e.g. integrating Browserless.io.
class RequestService
include Singleton
class UnknownStrategy < Html2rss::Error; end
class InvalidUrl < Html2rss::Error; end
class UnsupportedUrlScheme < Html2rss::Error; end
class UnsupportedResponseContentType < Html2rss::Error; end
class << self
extend Forwardable
%i[default_strategy_name
default_strategy_name=
strategy_names
register_strategy
unregister_strategy
strategy_registered?
execute].each do |method|
def_delegator :instance, method
end
end
def initialize
@strategies = {
faraday: FaradayStrategy,
browserless: BrowserlessStrategy
}
@default_strategy_name = :faraday
end
# @return [Symbol] the default strategy name
attr_reader :default_strategy_name
##
# Sets the default strategy.
# @param strategy [Symbol] the name of the strategy
# @raise [UnknownStrategy] if the strategy is not registered
def default_strategy_name=(strategy)
raise UnknownStrategy unless strategy_registered?(strategy)
@default_strategy_name = strategy.to_sym
end
# @return [Array<String>] the names of the registered strategies
def strategy_names = @strategies.keys.map(&:to_s)
##
# Registers a new strategy.
# @param name [Symbol] the name of the strategy
# @param strategy_class [Class] the class implementing the strategy
# @raise [ArgumentError] if strategy_class is not a Class
def register_strategy(name, strategy_class)
unless strategy_class.is_a?(Class)
raise ArgumentError, "Expected a Class for strategy, got #{strategy_class.class}"
end
@strategies[name.to_sym] = strategy_class
end
##
# Checks if a strategy is registered.
# @param name [Symbol] the name of the strategy
# @return [Boolean] true if the strategy is registered, false otherwise.
def strategy_registered?(name)
@strategies.key?(name.to_sym)
end
##
# Unregisters a strategy.
# @param name [Symbol] the name of the strategy
# @return [Boolean] true if the strategy was unregistered, false otherwise.
# @raise [ArgumentError] if attempting to unregister the default strategy.
def unregister_strategy(name) # rubocop:disable Naming/PredicateMethod
name_sym = name.to_sym
raise ArgumentError, 'Cannot unregister the default strategy.' if name_sym == @default_strategy_name
!!@strategies.delete(name_sym)
end
##
# Executes the request using the specified strategy.
# @param ctx [Context] the context for the request.
# @param strategy [Symbol] the strategy to use (defaults to the default strategy).
# @return [Response] the response from the executed strategy.
# @raise [ArgumentError] if the context is nil.
# @raise [UnknownStrategy] if the strategy is not registered.
def execute(ctx, strategy: default_strategy_name)
strategy_class = @strategies.fetch(strategy.to_sym) do
raise UnknownStrategy,
"The strategy '#{strategy}' is not known. Available strategies: #{strategy_names.join(', ')}"
end
strategy_class.new(ctx).execute
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/lib/html2rss/html_extractor.rb | lib/html2rss/html_extractor.rb | # frozen_string_literal: true
module Html2rss
##
# HtmlExtractor is responsible for extracting details (headline, url, images, etc.)
# from an article_tag.
class HtmlExtractor
INVISIBLE_CONTENT_TAGS = %w[svg script noscript style template].to_set.freeze
HEADING_TAGS = %w[h1 h2 h3 h4 h5 h6].freeze
NON_HEADLINE_SELECTOR = (HEADING_TAGS.map { |tag| ":not(#{tag})" } + INVISIBLE_CONTENT_TAGS.to_a).freeze
MAIN_ANCHOR_SELECTOR = begin
buf = +'a[href]:not([href=""])'
%w[# javascript: mailto: tel: file:// sms: data:].each do |prefix|
buf << %[:not([href^="#{prefix}"])]
end
buf.freeze
end
class << self
##
# Extracts visible text from a given node and its children.
#
# @param tag [Nokogiri::XML::Node] the node from which to extract visible text
# @param separator [String] separator used to join text fragments (default is a space)
# @return [String, nil] the concatenated visible text, or nil if none is found
def extract_visible_text(tag, separator: ' ')
parts = tag.children.each_with_object([]) do |child, result|
next unless visible_child?(child)
raw_text = child.children.empty? ? child.text : extract_visible_text(child)
next unless raw_text
text = raw_text.strip
result << text unless text.empty?
end
parts.join(separator).squeeze(' ').strip unless parts.empty?
end
private
def visible_child?(node)
!INVISIBLE_CONTENT_TAGS.include?(node.name) &&
!(node.name == 'a' && node['href']&.start_with?('#'))
end
end
def initialize(article_tag, base_url:)
raise ArgumentError, 'article_tag is required' unless article_tag
@article_tag = article_tag
@base_url = base_url
end
def call
{
title: extract_title,
url: extract_url,
image: extract_image,
description: extract_description,
id: generate_id,
published_at: extract_published_at,
enclosures: extract_enclosures,
categories: extract_categories
}
end
private
attr_reader :article_tag, :base_url
def extract_url
@extract_url ||= begin
href = find_main_anchor&.[]('href').to_s
Url.from_relative(href.split('#').first.strip, base_url) unless href.empty?
end
end
# Finds the closest ancestor anchor element matching the MAIN_ANCHOR_SELECTOR.
def find_main_anchor
HtmlNavigator.find_closest_selector_upwards(article_tag, MAIN_ANCHOR_SELECTOR)
end
def extract_title
self.class.extract_visible_text(heading) if heading
end
def heading
@heading ||= begin
heading_tags = article_tag.css(HEADING_TAGS.join(',')).group_by(&:name)
smallest_heading = heading_tags.keys.min
if smallest_heading
heading_tags[smallest_heading]&.max_by do |tag|
self.class.extract_visible_text(tag)&.size.to_i
end
end
end
end
def extract_description
text = self.class.extract_visible_text(article_tag.css(NON_HEADLINE_SELECTOR), separator: '<br>')
return text if text && !text.empty?
description = self.class.extract_visible_text(article_tag)
return nil if description.nil? || description.strip.empty?
description.strip
end
def generate_id
[
article_tag['id'],
article_tag.at_css('[id]')&.attr('id'),
extract_url&.path,
extract_url&.query
].compact.reject(&:empty?).first
end
def extract_image = ImageExtractor.call(article_tag, base_url:)
def extract_published_at = DateExtractor.call(article_tag)
def extract_enclosures = EnclosureExtractor.call(article_tag, base_url)
def extract_categories = CategoryExtractor.call(article_tag)
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/lib/html2rss/category_extractor.rb | lib/html2rss/category_extractor.rb | # frozen_string_literal: true
module Html2rss
##
# CategoryExtractor is responsible for extracting categories from HTML elements
# by looking for CSS class names containing common category-related terms.
class CategoryExtractor
# Common category-related terms to look for in class names
CATEGORY_TERMS = %w[category tag topic section label theme subject].freeze
# CSS selectors to find elements with category-related class names
CATEGORY_SELECTORS = CATEGORY_TERMS.map { |term| "[class*=\"#{term}\"]" }.freeze
# Regex pattern for matching category-related attribute names
CATEGORY_ATTR_PATTERN = /#{CATEGORY_TERMS.join('|')}/i
##
# Extracts categories from the given article tag by looking for elements
# with class names containing common category-related terms.
#
# @param article_tag [Nokogiri::XML::Element] The article element to extract categories from
# @return [Array<String>] Array of category strings, empty if none found
def self.call(article_tag)
return [] unless article_tag
# Single optimized traversal that extracts all category types
extract_all_categories(article_tag)
.map(&:strip)
.reject(&:empty?)
end
##
# Optimized single DOM traversal that extracts all category types.
#
# @param article_tag [Nokogiri::XML::Element] The article element
# @return [Set<String>] Set of category strings
def self.extract_all_categories(article_tag)
Set.new.tap do |categories|
article_tag.css('*').each do |element|
# Extract text categories from elements with category-related class names
categories.merge(extract_text_categories(element)) if element['class']&.match?(CATEGORY_ATTR_PATTERN)
# Extract data categories from all elements
categories.merge(extract_element_data_categories(element))
end
end
end
##
# Extracts categories from data attributes of a single element.
#
# @param element [Nokogiri::XML::Element] The element to process
# @return [Set<String>] Set of category strings
def self.extract_element_data_categories(element)
Set.new.tap do |categories|
element.attributes.each_value do |attr|
next unless attr.name.match?(CATEGORY_ATTR_PATTERN)
value = attr.value&.strip
categories.add(value) if value && !value.empty?
end
end
end
##
# Extracts text-based categories from elements, splitting content into discrete values.
#
# @param element [Nokogiri::XML::Element] The element to process
# @return [Set<String>] Set of category strings
def self.extract_text_categories(element)
anchor_values = element.css('a').filter_map do |node|
HtmlExtractor.extract_visible_text(node)
end
return Set.new(anchor_values.reject(&:empty?)) if anchor_values.any?
text = HtmlExtractor.extract_visible_text(element)
return Set.new unless text
Set.new(text.split(/\n+/).map(&:strip).reject(&:empty?))
end
end
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
html2rss/html2rss | https://github.com/html2rss/html2rss/blob/400e796540e82a69e1f1e014b6f89c626acf32fd/lib/html2rss/version.rb | lib/html2rss/version.rb | # frozen_string_literal: true
##
# The Html2rss namespace.
module Html2rss
VERSION = '0.17.0'
public_constant :VERSION
end
| ruby | MIT | 400e796540e82a69e1f1e014b6f89c626acf32fd | 2026-01-04T17:45:12.820049Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.