repo stringlengths 5 92 | file_url stringlengths 80 287 | file_path stringlengths 5 197 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:37:27 2026-01-04 17:58:21 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
picatz/shodanz | https://github.com/picatz/shodanz/blob/134eac5265d700efe1b6eec004dc3c25270cbc6c/lib/shodanz.rb | lib/shodanz.rb | # frozen_string_literal: true
require 'json'
require 'async'
require 'console'
require 'async/http/internet'
require 'shodanz/version'
require 'shodanz/errors'
require 'shodanz/api'
require 'shodanz/client'
Console.logger.level = 4
# Shodanz is a modern Ruby gem for Shodan, the world's
# first search engine for Internet-connected devices.
module Shodanz
# Shortcut for {Shodanz::API}
def self.api
API
end
# Shortcut for {Shodanz::Client}
def self.client
Client
end
end
| ruby | MIT | 134eac5265d700efe1b6eec004dc3c25270cbc6c | 2026-01-04T17:51:46.633447Z | false |
picatz/shodanz | https://github.com/picatz/shodanz/blob/134eac5265d700efe1b6eec004dc3c25270cbc6c/lib/shodanz/version.rb | lib/shodanz/version.rb | # frozen_string_literal: true
module Shodanz
VERSION = '2.0.8'
end
| ruby | MIT | 134eac5265d700efe1b6eec004dc3c25270cbc6c | 2026-01-04T17:51:46.633447Z | false |
picatz/shodanz | https://github.com/picatz/shodanz/blob/134eac5265d700efe1b6eec004dc3c25270cbc6c/lib/shodanz/errors.rb | lib/shodanz/errors.rb | # frozen_string_literal: true
module Shodanz
module Errors
class RateLimited < StandardError
def initialize(msg = 'Request rate limit reached (1 request/ second). Please wait a second before trying again and slow down your API calls.')
super
end
end
class NoInformation < StandardError
def initialize(msg = 'No information available.')
super
end
end
class NoAPIKey < StandardError
def initialize(msg = 'No API key has been found or provided! ( setup your SHODAN_API_KEY environment variable )')
super
end
end
class NoQuery < StandardError
def initialize(msg = 'Empty search query.')
super
end
end
class AccessDenied < StandardError
def initialize(msg = 'Shodan subscription doesn\'t support action, check API permissions!')
super
end
end
class InvalidKey < StandardError
def initialize(msg = 'Invalid API key used, or none given!')
super
end
end
end
end
| ruby | MIT | 134eac5265d700efe1b6eec004dc3c25270cbc6c | 2026-01-04T17:51:46.633447Z | false |
picatz/shodanz | https://github.com/picatz/shodanz/blob/134eac5265d700efe1b6eec004dc3c25270cbc6c/lib/shodanz/api.rb | lib/shodanz/api.rb | require_relative 'apis/rest.rb'
require_relative 'apis/streaming.rb'
require_relative 'apis/exploits.rb'
module Shodanz
# There are 2 APIs for accessing Shodan: the REST API
# and the Streaming API. The REST API provides methods
# to search Shodan, look up hosts, get summary information
# on queries and a variety of utility methods to make
# developing easier. The Streaming API provides a raw,
# real-time feed of the data that Shodan is currently
# collecting. There are several feeds that can be subscribed
# to, but the data can't be searched or otherwise interacted
# with; it's a live feed of data meant for large-scale
# consumption of Shodan's information.
#
# @author Kent 'picat' Gruber
module API
# REST API class.
def self.rest
REST
end
# Streaming API class.
def self.streaming
Streaming
end
# Exploits API class.
def self.exploits
Exploits
end
end
end
| ruby | MIT | 134eac5265d700efe1b6eec004dc3c25270cbc6c | 2026-01-04T17:51:46.633447Z | false |
picatz/shodanz | https://github.com/picatz/shodanz/blob/134eac5265d700efe1b6eec004dc3c25270cbc6c/lib/shodanz/client.rb | lib/shodanz/client.rb | # frozen_string_literal: true
module Shodanz
# General client container class for all three
# of the available API endpoints in a
# convenient place to use.
#
# @author Kent 'picat' Gruber
class Client
# @return [Shodanz::API::REST]
attr_reader :rest_api
# @return [Shodanz::API::Streaming]
attr_reader :streaming_api
# @return [Shodanz::API::Exploits]
attr_reader :exploits_api
# Create a new client to connect to any of the APIs.
#
# Optionally provide your Shodan API key, or the environment
# variable SHODAN_API_KEY will be used.
def initialize(key: ENV['SHODAN_API_KEY'])
raise Shodanz::Errors::NoAPIKey if key.nil?
# pass the given API key to each of the underlying clients
#
# Note: you can optionally change these API keys later, if you
# had multiple for whatever reason. ;)
#
@rest_api = Shodanz.api.rest.new(key: key)
@streaming_api = Shodanz.api.streaming.new(key: key)
@exploits_api = Shodanz.api.exploits.new(key: key)
end
def host(ip, **params)
rest_api.host(ip, **params)
end
def host_count(query = '', facets: {}, **params)
rest_api.host_count(query, facets: facets, **params)
end
def host_search(query = '', facets: {}, page: 1, minify: true, **params)
rest_api.host_search(query, facets: facets, page: page, minify: minify, **params)
end
def host_search_tokens(query = '', **params)
rest_api.host_search(query, params)
end
def ports
rest_api.ports
end
def protocols
rest_api.protocols
end
def scan(*ips)
rest_api.scan(ips)
end
def crawl_for(**params)
rest_api.scan(params)
end
def scan_status(id)
rest_api.scan_status(id)
end
def community_queries(**params)
rest_api.community_queries(params)
end
def search_for_community_query(query, **params)
rest_api.search_for_community_query(query, params)
end
def popular_query_tags(size = 10)
rest_api.popular_query_tags(size)
end
def profile
rest_api.profile
end
def resolve(*hostnames)
rest_api.resolve(hostnames)
end
def reverse_lookup(*ips)
rest_api.reverse_lookup(ips)
end
def http_headers
rest_api.http_headers
end
def my_ip
rest_api.my_ip
end
def honeypot_score(ip)
rest_api.honeypot_score(ip)
end
def info
rest_api.info
end
def exploit_search(query = '', page: 1, **params)
exploits_api.search(query, page: page, **params)
end
def exploit_count(query = '', page: 1, **params)
exploits_api.count(query, page: page, **params)
end
end
end
| ruby | MIT | 134eac5265d700efe1b6eec004dc3c25270cbc6c | 2026-01-04T17:51:46.633447Z | false |
picatz/shodanz | https://github.com/picatz/shodanz/blob/134eac5265d700efe1b6eec004dc3c25270cbc6c/lib/shodanz/apis/rest.rb | lib/shodanz/apis/rest.rb | require_relative 'utils.rb'
# frozen_string_literal: true
module Shodanz
module API
# The REST API provides methods to search Shodan, look up
# hosts, get summary information on queries and a variety
# of other utilities. This requires you to have an API key
# which you can get from Shodan.
#
# @author Kent 'picat' Gruber
class REST
include Shodanz::API::Utils
# @return [String]
attr_accessor :key
# The path to the REST API endpoint.
URL = 'https://api.shodan.io/'
# @param key [String] SHODAN API key, defaulted to the *SHODAN_API_KEY* enviroment variable.
def initialize(key: ENV['SHODAN_API_KEY'])
@url = URL
@client = Async::HTTP::Client.new(Async::HTTP::Endpoint.parse(@url))
self.key = key
warn 'No key has been found or provided!' unless key?
end
# Check if there's an API key.
def key?
return true if @key
false
end
# Returns all services that have been found on the given host IP.
# @param ip [String]
# @option params [Hash]
# @return [Hash]
# == Examples
# # Typical usage.
# rest_api.host("8.8.8.8")
#
# # All historical banners should be returned.
# rest_api.host("8.8.8.8", history: true)
#
# # Only return the list of ports and the general host information, no banners.
# rest_api.host("8.8.8.8", minify: true)
def host(ip, **params)
get("shodan/host/#{ip}", **params)
end
# This method behaves identical to "/shodan/host/search" with the only
# difference that this method does not return any host results, it only
# returns the total number of results that matched the query and any
# facet information that was requested. As a result this method does
# not consume query credits.
# == Examples
# rest_api.host_count("apache")
# rest_api.host_count("apache", country: "US")
# rest_api.host_count("apache", country: "US", state: "MI")
# rest_api.host_count("apache", country: "US", state: "MI", city: "Detroit")
def host_count(query = '', facets: {}, **params)
params[:query] = query
params = turn_into_query(**params)
facets = turn_into_facets(**facets)
get('shodan/host/count', **params.merge(**facets))
end
# Search Shodan using the same query syntax as the website and use facets
# to get summary information for different properties.
# == Example
# rest_api.host_search("apache", country: "US", facets: { city: "Detroit" }, page: 1, minify: false)
def host_search(query = '', facets: {}, page: 1, minify: true, **params)
params[:query] = query
params = turn_into_query(**params)
facets = turn_into_facets(**facets)
params[:page] = page
params[:minify] = minify
get('shodan/host/search', **params.merge(**facets))
end
# This method lets you determine which filters are being used by
# the query string and what parameters were provided to the filters.
def host_search_tokens(query = '', **params)
params[:query] = query
params = turn_into_query(**params)
get('shodan/host/search/tokens', **params)
end
# This method returns a list of port numbers that the crawlers are looking for.
def ports
get('shodan/ports')
end
# List all protocols that can be used when performing on-demand Internet scans via Shodan.
def protocols
get('shodan/protocols')
end
# Use this method to request Shodan to crawl a network.
#
# This method uses API scan credits: 1 IP consumes 1 scan credit. You
# must have a paid API plan (either one-time payment or subscription)
# in order to use this method.
#
# IP, IPs or netblocks (in CIDR notation) that should get crawled.
def scan(*ips)
post('shodan/scan', body: {ips: ips.join(',')})
end
# Use this method to request Shodan to crawl the Internet for a specific port.
#
# This method is restricted to security researchers and companies with
# a Shodan Data license. To apply for access to this method as a researcher,
# please email jmath@shodan.io with information about your project.
# Access is restricted to prevent abuse.
#
# == Example
# rest_api.crawl_for(port: 80, protocol: "http")
def crawl_for(**params)
params[:query] = ''
params = turn_into_query(**params)
post('shodan/scan/internet', **params)
end
# Check the progress of a previously submitted scan request.
def scan_status(id)
get("shodan/scan/#{id}")
end
# Use this method to obtain a list of search queries that users have saved in Shodan.
def community_queries(**params)
get('shodan/query', **params)
end
# Use this method to search the directory of search queries that users have saved in Shodan.
def search_for_community_query(query, **params)
params[:query] = query
params = turn_into_query(**params)
get('shodan/query/search', **params)
end
# Use this method to obtain a list of popular tags for the saved search queries in Shodan.
def popular_query_tags(size = 10)
params = {}
params[:size] = size
get('shodan/query/tags', **params)
end
# Returns information about the Shodan account linked to this API key.
def profile
get('account/profile')
end
# Look up the IP address for the provided list of hostnames.
def resolve(*hostnames)
get('dns/resolve', hostnames: hostnames.join(','))
end
# Look up the hostnames that have been defined for the
# given list of IP addresses.
def reverse_lookup(*ips)
get('dns/reverse', ips: ips.join(','))
end
# Shows the HTTP headers that your client sends when
# connecting to a webserver.
def http_headers
get('tools/httpheaders')
end
# Get your current IP address as seen from the Internet.
def my_ip
get('tools/myip')
end
# Calculates a honeypot probability score ranging
# from 0 (not a honeypot) to 1.0 (is a honeypot).
def honeypot_score(ip)
get("labs/honeyscore/#{ip}")
end
# Returns information about the API plan belonging to the given API key.
def info
get('api-info')
end
end
end
end
| ruby | MIT | 134eac5265d700efe1b6eec004dc3c25270cbc6c | 2026-01-04T17:51:46.633447Z | false |
picatz/shodanz | https://github.com/picatz/shodanz/blob/134eac5265d700efe1b6eec004dc3c25270cbc6c/lib/shodanz/apis/utils.rb | lib/shodanz/apis/utils.rb | # frozen_string_literal: true
require_relative 'utils.rb'
# fronzen_string_literal: true
module Shodanz
module API
# Utils provides simply get, post, and slurp_stream functionality
# to the client. Under the hood they support both async and non-async
# usage. You should basically never need to use these methods directly.
#
# @author Kent 'picat' Gruber
module Utils
# Perform a direct GET HTTP request to the REST API.
def get(path, **params)
return sync_get(path, **params) unless Async::Task.current?
async_get(path, **params)
end
# Perform a direct POST HTTP request to the REST API.
def post(path, body: nil, **params)
return sync_post(path, params: params, body: body) unless Async::Task.current?
async_post(path, params: params, body: body)
end
# Perform the main function of consuming the streaming API.
def slurp_stream(path, **params)
if Async::Task.current?
async_slurp_stream(path, **params) do |result|
yield result
end
else
sync_slurp_stream(path, **params) do |result|
yield result
end
end
end
def turn_into_query(**params)
filters = params.reject { |key, _| key == :query }
filters.each do |key, value|
params[:query] << " #{key}:#{value}"
end
params.select { |key, _| key == :query }
end
def turn_into_facets(**facets)
return {} if facets.nil?
filters = facets.reject { |key, _| key == :facets }
facets[:facets] = []
filters.each do |key, value|
facets[:facets] << "#{key}:#{value}"
end
facets[:facets] = facets[:facets].join(',')
facets.select { |key, _| key == :facets }
end
private
RATELIMIT = 'rate limit reached'
NOINFO = 'no information available'
NOQUERY = 'empty search query'
ACCESSDENIED = 'access denied'
INVALIDKEY = 'invalid API key'
def handle_any_json_errors(json)
if json.is_a?(Hash) && json.key?('error')
raise Shodanz::Errors::RateLimited if json['error'].casecmp(RATELIMIT) >= 0
raise Shodanz::Errors::NoInformation if json['error'].casecmp(NOINFO) >= 0
raise Shodanz::Errors::NoQuery if json['error'].casecmp(NOQUERY) >= 0
raise Shodanz::Errors::AccessDenied if json['error'].casecmp(ACCESSDENIED) >= 0
raise Shodanz::Errors::InvalidKey if json['error'].casecmp(INVALIDKEY) >= 0
end
return json
end
def getter(path, **params)
# param keys should all be strings
params = params.transform_keys(&:to_s)
# build up url string based on special params
url = "/#{path}?key=#{@key}"
# special params
params.each do |param,value|
next if value.is_a?(String) && value.empty?
value = URI.encode_www_form_component("#{value}")
url += "&#{param}=#{value}"
end
resp = @client.get(url)
if resp.success?
# parse all lines in the response body as JSON
json = JSON.parse(resp.body.join)
handle_any_json_errors(json)
return json
else
raise "Got response status #{resp.status}"
end
ensure
@client.pool.close
resp&.close
end
def poster(path, params: nil, body: nil)
# param keys should all be strings
params = params.transform_keys(&:to_s)
# and the key param is constant
params["key"] = @key
# encode as a URL string
params = URI.encode_www_form(params)
# build URL path
path = "/#{path}?#{params}"
headers = nil
if body
body = URI.encode_www_form(body)
headers = [['Content-Type', 'application/x-www-form-urlencoded']]
end
# make POST request to server
resp = @client.post(path, headers, body)
if resp.success?
json = JSON.parse(resp.body.join)
handle_any_json_errors(json)
return json
else
raise "Got response status #{resp.status}"
end
ensure
@client.pool.close
resp&.close
end
def slurper(path, **params)
# param keys should all be strings
params = params.transform_keys(&:to_s)
# check if limit
if (limit = params.delete('limit'))
counter = 0
end
# make GET request to server
resp = @client.get("/#{path}?key=#{@key}", params)
# read body line-by-line
until resp.body.nil? || resp.body.empty?
resp.body.read.each_line do |line|
next if line.strip.empty?
yield JSON.parse(line)
if limit
counter += 1
resp.close if counter == limit
end
end
end
ensure
resp&.close
end
def async_get(path, **params)
Async::Task.current.async do
getter(path, **params)
end
end
def sync_get(path, **params)
Async do
getter(path, **params)
end.wait
end
def async_post(path, params: nil, body: nil)
Async::Task.current.async do
poster(path, params: params, body: body)
end
end
def sync_post(path, params: nil, body: nil)
Async do
poster(path, params: params, body: body)
end.wait
end
def async_slurp_stream(path, **params)
Async::Task.current.async do
slurper(path, **params) { |data| yield data }
end
end
def sync_slurp_stream(path, **params)
Async do
slurper(path, **params) { |data| yield data }
end.wait
end
end
end
end
| ruby | MIT | 134eac5265d700efe1b6eec004dc3c25270cbc6c | 2026-01-04T17:51:46.633447Z | false |
picatz/shodanz | https://github.com/picatz/shodanz/blob/134eac5265d700efe1b6eec004dc3c25270cbc6c/lib/shodanz/apis/streaming.rb | lib/shodanz/apis/streaming.rb | require_relative 'utils.rb'
# frozen_string_literal: true
module Shodanz
module API
# The REST API provides methods to search Shodan, look up
# hosts, get summary information on queries and a variety
# of other utilities. This requires you to have an API key
# which you can get from Shodan.
#
# Note: Only 1-5% of the data is currently provided to
# subscription-based API plans. If your company is interested
# in large-scale, real-time access to all of the Shodan data
# please contact us for pricing information (sales@shodan.io).
#
# @author Kent 'picat' Gruber
class Streaming
include Shodanz::API::Utils
# @return [String]
attr_accessor :key
# The Streaming API is an HTTP-based service that returns
# a real-time stream of data collected by Shodan.
URL = 'https://stream.shodan.io/'
# @param key [String] SHODAN API key, defaulted to the *SHODAN_API_KEY* enviroment variable.
def initialize(key: ENV['SHODAN_API_KEY'])
@url = URL
@client = Async::HTTP::Client.new(Async::HTTP::Endpoint.parse(@url))
self.key = key
warn 'No key has been found or provided!' unless key?
end
# Check if there's an API key.
def key?
return true if @key; false
end
# This stream provides ALL of the data that Shodan collects.
# Use this stream if you need access to everything and/ or want to
# store your own Shodan database locally. If you only care about specific
# ports, please use the Ports stream.
#
# Sometimes data may be piped down stream that is weird to parse. You can choose
# to keep this data optionally; and it will not be parsed for you.
#
# == Example
# api.banners do |banner|
# # do something with banner as hash
# puts data
# end
def banners(**params)
slurp_stream('shodan/banners', **params) do |data|
yield data
end
end
# This stream provides a filtered, bandwidth-saving view of the Banners
# stream in case you are only interested in devices located in certain ASNs.
# == Example
# api.banners_within_asns(3303, 32475) do |data|
# # do something with the banner hash
# puts data
# end
def banners_within_asns(*asns, **params)
slurp_stream("shodan/asn/#{asns.join(',')}", **params) do |data|
yield data
end
end
# This stream provides a filtered, bandwidth-saving view of the Banners
# stream in case you are only interested in devices located in a certain ASN.
# == Example
# api.banners_within_asn(3303) do |data|
# # do something with the banner hash
# puts data
# end
def banners_within_asn(param)
banners_within_asns(param) do |data|
yield data
end
end
# Only returns banner data for the list of specified ports. This
# stream provides a filtered, bandwidth-saving view of the Banners
# stream in case you are only interested in a specific list of ports.
# == Example
# api.banners_within_countries("US","DE","JP") do |data|
# # do something with the banner hash
# puts data
# end
def banners_within_countries(*params)
slurp_stream("shodan/countries/#{params.join(',')}") do |data|
yield data
end
end
# Only returns banner data for the list of specified ports. This
# stream provides a filtered, bandwidth-saving view of the
# Banners stream in case you are only interested in a
# specific list of ports.
# == Example
# api.banners_on_port(80, 443) do |data|
# # do something with the banner hash
# puts data
# end
def banners_on_ports(*params)
slurp_stream("shodan/ports/#{params.join(',')}") do |data|
yield data
end
end
# Only returns banner data for a specific port. This
# stream provides a filtered, bandwidth-saving view of the
# Banners stream in case you are only interested in a
# specific list of ports.
# == Example
# api.banners_on_port(80) do |banner|
# # do something with the banner hash
# puts data
# end
def banners_on_port(param)
banners_on_ports(param) do |data|
yield data
end
end
# Subscribe to banners discovered on all IP ranges described in the network alerts.
# Use the REST API methods to create/ delete/ manage your network alerts and
# use the Streaming API to subscribe to them.
def alerts
slurp_stream('alert') do |data|
yield data
end
end
# Subscribe to banners discovered on the IP range defined in a specific network alert.
def alert(id)
slurp_stream("alert/#{id}") do |data|
yield data
end
end
end
end
end
| ruby | MIT | 134eac5265d700efe1b6eec004dc3c25270cbc6c | 2026-01-04T17:51:46.633447Z | false |
picatz/shodanz | https://github.com/picatz/shodanz/blob/134eac5265d700efe1b6eec004dc3c25270cbc6c/lib/shodanz/apis/exploits.rb | lib/shodanz/apis/exploits.rb | require_relative 'utils.rb'
# frozen_string_literal: true
module Shodanz
module API
# The Exploits API provides access to several exploit
# and vulnerability data sources. At the moment, it
# searches across the following:
# - Exploit DB
# - Metasploit
# - Common Vulnerabilities and Exposures (CVE)
#
# @author Kent 'picat' Gruber
class Exploits
include Shodanz::API::Utils
# @return [String]
attr_accessor :key
# The path to the REST API endpoint.
URL = 'https://exploits.shodan.io/'
# @param key [String] SHODAN API key, defaulted to
# the *SHODAN_API_KEY* enviroment variable.
def initialize(key: ENV['SHODAN_API_KEY'])
@url = URL
@client = Async::HTTP::Client.new(Async::HTTP::Endpoint.parse(@url))
self.key = key
warn 'No key has been found or provided!' unless key?
end
# Check if there's an API key.
# @return [String]
def key?
return true if @key
false
end
# Search across a variety of data sources for exploits and
# use facets to get summary information.
# == Example
# api.search("SQL", port: 443)
# api.search(port: 22)
# api.search(type: "dos")
def search(query = '', facets: {}, page: 1, **params)
params[:query] = query
params = turn_into_query(**params)
facets = turn_into_facets(**facets)
params[:page] = page
get('api/search', **params.merge(**facets))
end
# This method behaves identical to the "/search" method with
# the difference that it doesn't return any results.
# == Example
# api.count(type: "dos")
def count(query = '', facets: {}, page: 1, **params)
params[:query] = query
params = turn_into_query(**params)
facets = turn_into_facets(**facets)
params[:page] = page
get('api/count', **params.merge(**facets))
end
end
end
end
| ruby | MIT | 134eac5265d700efe1b6eec004dc3c25270cbc6c | 2026-01-04T17:51:46.633447Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/test/test_file_writer.rb | test/test_file_writer.rb | require_relative './helper'
require 'embulk/output/bigquery/file_writer'
require 'fileutils'
require 'zlib'
module Embulk
class Output::Bigquery
class TestFileWriter < Test::Unit::TestCase
class << self
def startup
FileUtils.mkdir_p('tmp')
end
def shutdown
FileUtils.rm_rf('tmp')
end
end
def default_task
{
'compression' => 'GZIP',
'payload_column' => nil,
'source_format' => 'CSV',
'path_prefix' => 'tmp/path_prefix',
'sequence_format' => '.%d.%03d',
'file_ext' => nil,
}
end
def schema
Schema.new([
Column.new({index: 0, name: 'boolean', type: :boolean}),
Column.new({index: 1, name: 'long', type: :long}),
Column.new({index: 2, name: 'double', type: :double}),
Column.new({index: 3, name: 'string', type: :string}),
Column.new({index: 4, name: 'timestamp', type: :timestamp}),
Column.new({index: 5, name: 'json', type: :json}),
])
end
def converters
@converters ||= ValueConverterFactory.create_converters(default_task, schema)
end
def record
[true, 1, 1.1, 'foo', Time.parse("2016-02-26 00:00:00 +00:00").utc, {"foo"=>"foo"}]
end
def page
[record]
end
sub_test_case "path" do
def test_path
task = default_task.merge('path_prefix' => 'tmp/foo', 'sequence_format' => '', 'file_ext' => '.1')
file_writer = FileWriter.new(task, schema, 0, converters)
begin
file_writer.add(page)
ensure
io.close rescue nil
end
path = file_writer.io.path
assert_equal 'tmp/foo.1', path
end
end
sub_test_case "formatter" do
def test_payload_column_index
task = default_task.merge('payload_column_index' => 0)
file_writer = FileWriter.new(task, schema, 0, converters)
formatter_proc = file_writer.instance_variable_get(:@formatter_proc)
assert_equal :to_payload, formatter_proc.name
assert_equal %Q[true\n], formatter_proc.call(record)
end
def test_csv
task = default_task.merge('source_format' => 'CSV')
file_writer = FileWriter.new(task, schema, 0, converters)
formatter_proc = file_writer.instance_variable_get(:@formatter_proc)
assert_equal :to_csv, formatter_proc.name
expected = %Q[true,1,1.1,foo,2016-02-26 00:00:00.000000 +00:00,"{""foo"":""foo""}"\n]
assert_equal expected, formatter_proc.call(record)
end
def test_jsonl
task = default_task.merge('source_format' => 'NEWLINE_DELIMITED_JSON')
file_writer = FileWriter.new(task, schema, 0, converters)
formatter_proc = file_writer.instance_variable_get(:@formatter_proc)
assert_equal :to_jsonl, formatter_proc.name
expected = %Q[{"boolean":true,"long":1,"double":1.1,"string":"foo","timestamp":"2016-02-26 00:00:00.000000 +00:00","json":"{\\"foo\\":\\"foo\\"}"}\n]
assert_equal expected, formatter_proc.call(record)
end
end
sub_test_case "compression" do
def test_gzip
task = default_task.merge('compression' => 'GZIP')
file_writer = FileWriter.new(task, schema, 0, converters)
begin
file_writer.add(page)
io = file_writer.io
assert_equal Zlib::GzipWriter, io.class
ensure
io.close rescue nil
end
path = file_writer.io.path
assert_true File.exist?(path)
assert_nothing_raised { Zlib::GzipReader.open(path) {|gz| } }
end
def test_uncompressed
task = default_task.merge('compression' => 'NONE')
file_writer = FileWriter.new(task, schema, 0, converters)
begin
file_writer.add(page)
io = file_writer.io
assert_equal File, io.class
ensure
io.close rescue nil
end
path = file_writer.io.path
assert_true File.exist?(path)
assert_raise { Zlib::GzipReader.open(path) {|gz| } }
end
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/test/test_value_converter_factory.rb | test/test_value_converter_factory.rb | require_relative './helper'
require 'embulk/output/bigquery/value_converter_factory'
module Embulk
class Output::Bigquery
class TestValueConverterFactory < Test::Unit::TestCase
class TestCreateConverters < Test::Unit::TestCase
def test_create_default_converter
schema = Schema.new([
Column.new({index: 0, name: 'boolean', type: :boolean}),
Column.new({index: 1, name: 'long', type: :long}),
Column.new({index: 2, name: 'double', type: :double}),
Column.new({index: 3, name: 'string', type: :string}),
Column.new({index: 4, name: 'timestamp', type: :timestamp}),
Column.new({index: 5, name: 'json', type: :json}),
])
converters = ValueConverterFactory.create_converters({}, schema)
assert_equal schema.size, converters.size
# Check correct converters are created
# Proc can not have names, so we have to execute to check...
assert_equal true, converters[0].call(true)
assert_equal 1, converters[1].call(1)
assert_equal 1.1, converters[2].call(1.1)
assert_equal 'foo', converters[3].call('foo')
timestamp = Time.parse("2016-02-26 00:00:00.500000 +00:00")
assert_equal "2016-02-26 00:00:00.500000 +00:00", converters[4].call(timestamp)
assert_equal %Q[{"foo":"foo"}], converters[5].call({'foo'=>'foo'})
end
def test_create_custom_converter
schema = Schema.new([
Column.new({index: 0, name: 'boolean', type: :boolean}),
Column.new({index: 1, name: 'long', type: :long}),
Column.new({index: 2, name: 'double', type: :double}),
Column.new({index: 3, name: 'string', type: :string}),
Column.new({index: 4, name: 'timestamp', type: :timestamp}),
Column.new({index: 5, name: 'json', type: :json}),
])
task = {
'column_options' => [
{'name' => 'boolean', 'type' => 'STRING'},
{'name' => 'long', 'type' => 'STRING'},
{'name' => 'double', 'type' => 'STRING'},
{'name' => 'string', 'type' => 'INTEGER'},
{'name' => 'timestamp', 'type' => 'INTEGER'},
{'name' => 'json', 'type' => 'RECORD'},
],
}
converters = ValueConverterFactory.create_converters(task, schema)
assert_equal schema.size, converters.size
# Check correct converters are created
# Proc can not have names, so we have to execute to check...
assert_equal 'true', converters[0].call(true)
assert_equal '1', converters[1].call(1)
assert_equal '1.1', converters[2].call(1.1)
assert_equal 1, converters[3].call('1')
timestamp = Time.parse("2016-02-26 00:00:00.100000 +00:00")
assert_equal 1456444800, converters[4].call(timestamp)
assert_equal({'foo'=>'foo'}, converters[5].call({'foo'=>'foo'}))
end
end
class TestBooleanConverter < Test::Unit::TestCase
SCHEMA_TYPE = :boolean
def test_boolean
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'BOOLEAN').create_converter
assert_equal nil, converter.call(nil)
assert_equal true, converter.call(true)
assert_equal false, converter.call(false)
end
def test_integer
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'INTEGER').create_converter }
end
def test_float
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'FLOAT').create_converter }
end
def test_string
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'STRING').create_converter
assert_equal nil, converter.call(nil)
assert_equal "true", converter.call(true)
assert_equal "false", converter.call(false)
end
def test_timestamp
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'TIMESTAMP').create_converter }
end
def test_date
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'DATE').create_converter }
end
def test_datetime
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'DATETIME').create_converter }
end
def test_record
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'RECORD').create_converter }
end
end
class TestLongConverter < Test::Unit::TestCase
SCHEMA_TYPE = :long
def test_boolean
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'BOOLEAN').create_converter
assert_equal nil, converter.call(nil)
assert_equal true, converter.call(1)
assert_equal false, converter.call(0)
assert_raise { converter.call(2) }
end
def test_integer
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'INTEGER').create_converter
assert_equal nil, converter.call(nil)
assert_equal 1, converter.call(1)
end
def test_float
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'FLOAT').create_converter
assert_equal nil, converter.call(nil)
assert_equal 1.0, converter.call(1)
end
def test_string
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'STRING').create_converter
assert_equal nil, converter.call(nil)
assert_equal "1", converter.call(1)
end
def test_timestamp
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'TIMESTAMP').create_converter
assert_equal nil, converter.call(nil)
assert_equal 1408452095, converter.call(1408452095)
end
def test_date
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'DATE').create_converter }
end
def test_datetime
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'DATETIME').create_converter }
end
def test_record
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'RECORD').create_converter }
end
end
class TestDoubleConverter < Test::Unit::TestCase
SCHEMA_TYPE = :double
def test_boolean
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'BOOLEAN').create_converter }
end
def test_integer
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'INTEGER').create_converter
assert_equal nil, converter.call(nil)
assert_equal 1, converter.call(1.1)
end
def test_float
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'FLOAT').create_converter
assert_equal nil, converter.call(nil)
assert_equal 1.1, converter.call(1.1)
end
def test_string
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'STRING').create_converter
assert_equal nil, converter.call(nil)
assert_equal "1.1", converter.call(1.1)
end
def test_timestamp
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'TIMESTAMP').create_converter
assert_equal nil, converter.call(nil)
assert_equal 1408452095.188766, converter.call(1408452095.188766)
end
def test_date
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'DATE').create_converter }
end
def test_datetime
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'DATETIME').create_converter }
end
def test_record
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'RECORD').create_converter }
end
end
class TestStringConverter < Test::Unit::TestCase
SCHEMA_TYPE = :string
def test_boolean
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'BOOLEAN').create_converter
assert_equal nil, converter.call(nil)
assert_equal true, converter.call('true')
assert_equal false, converter.call('false')
assert_raise { converter.call('foo') }
end
def test_integer
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'INTEGER').create_converter
assert_equal nil, converter.call(nil)
assert_equal 1, converter.call('1')
assert_raise { converter.call('1.1') }
end
def test_float
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'FLOAT').create_converter
assert_equal nil, converter.call(nil)
assert_equal 1.1, converter.call('1.1')
assert_raise { converter.call('foo') }
end
def test_string
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'STRING').create_converter
assert_equal nil, converter.call(nil)
assert_equal "foo", converter.call("foo")
end
def test_timestamp
converter = ValueConverterFactory.new(
SCHEMA_TYPE, 'TIMESTAMP',
timestamp_format: '%Y-%m-%d', timezone: 'Asia/Tokyo'
).create_converter
assert_equal nil, converter.call(nil)
assert_equal "2016-02-26 00:00:00.000000 +09:00", converter.call("2016-02-26")
# Users must care of BQ timestamp format by themselves with no timestamp_format
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'TIMESTAMP').create_converter
assert_equal nil, converter.call(nil)
assert_equal "2016-02-26 00:00:00", converter.call("2016-02-26 00:00:00")
end
def test_date
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'DATE').create_converter
assert_equal nil, converter.call(nil)
assert_equal "2016-02-26", converter.call("2016-02-26")
assert_equal "2016-02-26", converter.call("2016-02-26 00:00:00")
assert_raise { converter.call('foo') }
end
def test_datetime
converter = ValueConverterFactory.new(
SCHEMA_TYPE, 'DATETIME',
timestamp_format: '%Y/%m/%d'
).create_converter
assert_equal nil, converter.call(nil)
assert_equal "2016-02-26 00:00:00.000000", converter.call("2016/02/26")
# Users must care of BQ datetime format by themselves with no timestamp_format
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'DATETIME').create_converter
assert_equal nil, converter.call(nil)
assert_equal "2016-02-26 00:00:00", converter.call("2016-02-26 00:00:00")
end
def test_time
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'TIME').create_converter
assert_equal nil, converter.call(nil)
assert_equal "00:03:22.000000", converter.call("00:03:22")
assert_equal "15:22:00.000000", converter.call("3:22 PM")
assert_equal "03:22:00.000000", converter.call("3:22 AM")
assert_equal "00:00:00.000000", converter.call("2016-02-26 00:00:00")
# TimeWithZone doesn't affect any change to the time value
converter = ValueConverterFactory.new(
SCHEMA_TYPE, 'TIME', timezone: 'Asia/Tokyo'
).create_converter
assert_equal "15:00:01.000000", converter.call("15:00:01")
assert_raise { converter.call('foo') }
end
def test_record
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'RECORD').create_converter
assert_equal({'foo'=>'foo'}, converter.call(%Q[{"foo":"foo"}]))
assert_raise { converter.call('foo') }
end
end
class TestTimestampConverter < Test::Unit::TestCase
SCHEMA_TYPE = :timestamp
def test_boolean
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'BOOLEAN').create_converter }
end
def test_integer
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'INTEGER').create_converter
assert_equal nil, converter.call(nil)
expected = 1456444800
assert_equal expected, converter.call(Time.at(expected))
end
def test_float
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'FLOAT').create_converter
assert_equal nil, converter.call(nil)
expected = 1456444800.500000
assert_equal expected, converter.call(Time.at(expected))
end
def test_string
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'STRING').create_converter
assert_equal nil, converter.call(nil)
timestamp = Time.parse("2016-02-26 00:00:00.500000 +00:00")
expected = "2016-02-26 00:00:00.500000"
assert_equal expected, converter.call(timestamp)
converter = ValueConverterFactory.new(
SCHEMA_TYPE, 'STRING',
timestamp_format: '%Y-%m-%d', timezone: 'Asia/Tokyo'
).create_converter
timestamp = Time.parse("2016-02-25 15:00:00.500000 +00:00")
expected = "2016-02-26"
assert_equal expected, converter.call(timestamp)
end
def test_timestamp
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'TIMESTAMP').create_converter
assert_equal nil, converter.call(nil)
subject = 1456444800.500000
expected = "2016-02-26 00:00:00.500000 +00:00"
assert_equal expected, converter.call(Time.at(subject).utc)
end
def test_date
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'DATE').create_converter
assert_equal nil, converter.call(nil)
timestamp = Time.parse("2016-02-26 00:00:00.500000 +00:00")
expected = "2016-02-26"
assert_equal expected, converter.call(timestamp)
converter = ValueConverterFactory.new(
SCHEMA_TYPE, 'DATE', timezone: 'Asia/Tokyo'
).create_converter
assert_equal nil, converter.call(nil)
timestamp = Time.parse("2016-02-25 15:00:00.500000 +00:00")
expected = "2016-02-26"
assert_equal expected, converter.call(timestamp)
assert_raise { converter.call('foo') }
end
def test_datetime
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'DATETIME').create_converter
assert_equal nil, converter.call(nil)
timestamp = Time.parse("2016-02-26 00:00:00.500000 +00:00")
expected = "2016-02-26 00:00:00.500000"
assert_equal expected, converter.call(timestamp)
converter = ValueConverterFactory.new(
SCHEMA_TYPE, 'DATETIME', timezone: 'Asia/Tokyo'
).create_converter
assert_equal nil, converter.call(nil)
timestamp = Time.parse("2016-02-25 15:00:00.500000 +00:00")
expected = "2016-02-26 00:00:00.500000"
assert_equal expected, converter.call(timestamp)
assert_raise { converter.call('foo') }
end
def test_time
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'TIME').create_converter
assert_equal nil, converter.call(nil)
timestamp = Time.parse("2016-02-26 00:00:00.500000 +00:00")
expected = "00:00:00.500000"
assert_equal expected, converter.call(timestamp)
converter = ValueConverterFactory.new(
SCHEMA_TYPE, 'TIME', timezone: 'Asia/Tokyo'
).create_converter
assert_equal nil, converter.call(nil)
timestamp = Time.parse("2016-02-25 15:00:00.500000 +00:00")
expected = "00:00:00.500000"
assert_equal expected, converter.call(timestamp)
assert_raise { converter.call('foo') }
end
def test_record
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'RECORD').create_converter }
end
end
class TestJsonConverter < Test::Unit::TestCase
SCHEMA_TYPE = :json
def test_boolean
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'BOOLEAN').create_converter }
end
def test_integer
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'INTEGER').create_converter }
end
def test_float
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'FLOAT').create_converter }
end
def test_string
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'STRING').create_converter
assert_equal nil, converter.call(nil)
assert_equal(%Q[{"foo":"foo"}], converter.call({'foo'=>'foo'}))
end
def test_timestamp
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'TIMESTAMP').create_converter }
end
def test_date
assert_raise { ValueConverterFactory.new(SCHEMA_TYPE, 'DATE').create_converter }
end
def test_record
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'RECORD').create_converter
assert_equal nil, converter.call(nil)
assert_equal({'foo'=>'foo'}, converter.call({'foo'=>'foo'}))
end
def test_json
converter = ValueConverterFactory.new(SCHEMA_TYPE, 'JSON').create_converter
assert_equal nil, converter.call(nil)
assert_equal({'foo'=>'foo'}, converter.call({'foo'=>'foo'}))
end
end
def test_strict_false
converter = ValueConverterFactory.new(:string, 'BOOLEAN', strict: false).create_converter
assert_equal nil, converter.call('foo')
converter = ValueConverterFactory.new(:string, 'INTEGER', strict: false).create_converter
assert_equal nil, converter.call('foo')
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/test/test_transaction.rb | test/test_transaction.rb | require_relative './helper'
require 'embulk/output/bigquery'
Bigquery = Embulk::Output::Bigquery unless defined?(Bigquery)
module Embulk
class Output::Bigquery
class TestTransaction < Test::Unit::TestCase
def least_config
DataSource.new({
'project' => 'your_project_name',
'dataset' => 'your_dataset_name',
'table' => 'your_table_name',
'temp_table' => 'temp_table', # randomly created is not good for our test
'path_prefix' => 'tmp/', # randomly created is not good for our test
})
end
def schema
Schema.new([
Column.new({index: 0, name: 'boolean', type: :boolean}),
Column.new({index: 1, name: 'long', type: :long}),
Column.new({index: 2, name: 'double', type: :double}),
Column.new({index: 3, name: 'string', type: :string}),
Column.new({index: 4, name: 'timestamp', type: :timestamp}),
Column.new({index: 5, name: 'json', type: :json}),
])
end
def processor_count
1
end
def control
Proc.new {|task| task_reports = [] }
end
def setup
stub(Bigquery).transaction_report { {'num_input_rows' => 1, 'num_output_rows' => 1, 'num_rejected_rows' => 0} }
end
sub_test_case "append_direct" do
def test_append_direc_without_auto_create
config = least_config.merge('mode' => 'append_direct', 'auto_create_dataset' => false, 'auto_create_table' => false)
any_instance_of(BigqueryClient) do |obj|
mock(obj).get_dataset(config['dataset'])
mock(obj).get_table(config['table'])
end
Bigquery.transaction(config, schema, processor_count, &control)
end
def test_append_direct_with_auto_create
config = least_config.merge('mode' => 'append_direct', 'auto_create_dataset' => true, 'auto_create_table' => true)
task = Bigquery.configure(config, schema, processor_count)
any_instance_of(BigqueryClient) do |obj|
mock(obj).create_dataset(config['dataset'])
mock(obj).create_table_if_not_exists(config['table'])
end
Bigquery.transaction(config, schema, processor_count, &control)
end
def test_append_direct_with_partition_without_auto_create
config = least_config.merge('mode' => 'append_direct', 'table' => 'table$20160929', 'auto_create_dataset' => false, 'auto_create_table' => false)
any_instance_of(BigqueryClient) do |obj|
mock(obj).get_dataset(config['dataset'])
mock(obj).get_table(config['table'])
end
Bigquery.transaction(config, schema, processor_count, &control)
end
def test_append_direct_with_partition_with_auto_create
config = least_config.merge('mode' => 'append_direct', 'table' => 'table$20160929', 'auto_create_dataset' => true, 'auto_create_table' => true)
task = Bigquery.configure(config, schema, processor_count)
any_instance_of(BigqueryClient) do |obj|
mock(obj).create_dataset(config['dataset'])
mock(obj).create_table_if_not_exists(config['table'])
end
Bigquery.transaction(config, schema, processor_count, &control)
end
end
sub_test_case "delete_in_advance" do
def test_delete_in_advance
config = least_config.merge('mode' => 'delete_in_advance')
task = Bigquery.configure(config, schema, processor_count)
any_instance_of(BigqueryClient) do |obj|
mock(obj).get_dataset(config['dataset'])
mock(obj).delete_table_or_partition(config['table'])
mock(obj).create_table_if_not_exists(config['table'])
end
Bigquery.transaction(config, schema, processor_count, &control)
end
def test_delete_in_advance_with_partitioning
config = least_config.merge('mode' => 'delete_in_advance', 'table' => 'table$20160929', 'auto_create_table' => true)
task = Bigquery.configure(config, schema, processor_count)
any_instance_of(BigqueryClient) do |obj|
mock(obj).get_dataset(config['dataset'])
mock(obj).delete_table_or_partition(config['table'])
mock(obj).create_table_if_not_exists(config['table'])
end
Bigquery.transaction(config, schema, processor_count, &control)
end
end
sub_test_case "replace" do
def test_replace
config = least_config.merge('mode' => 'replace')
task = Bigquery.configure(config, schema, processor_count)
any_instance_of(BigqueryClient) do |obj|
mock(obj).get_dataset(config['dataset'])
mock(obj).create_table_if_not_exists(config['temp_table'], options: {"expiration_time"=>nil})
mock(obj).create_table_if_not_exists(config['table'])
mock(obj).copy(config['temp_table'], config['table'], write_disposition: 'WRITE_TRUNCATE')
mock(obj).delete_table(config['temp_table'])
end
Bigquery.transaction(config, schema, processor_count, &control)
end
def test_replace_with_partitioning
config = least_config.merge('mode' => 'replace', 'table' => 'table$20160929')
task = Bigquery.configure(config, schema, processor_count)
any_instance_of(BigqueryClient) do |obj|
mock(obj).get_dataset(config['dataset'])
mock(obj).create_table_if_not_exists(config['temp_table'], options: {"expiration_time"=>nil})
mock(obj).create_table_if_not_exists(config['table'])
mock(obj).copy(config['temp_table'], config['table'], write_disposition: 'WRITE_TRUNCATE')
mock(obj).delete_table(config['temp_table'])
end
Bigquery.transaction(config, schema, processor_count, &control)
end
end
sub_test_case "replace_backup" do
def test_replace_backup
config = least_config.merge('mode' => 'replace_backup', 'dataset_old' => 'dataset_old', 'table_old' => 'table_old', 'temp_table' => 'temp_table')
task = Bigquery.configure(config, schema, processor_count)
any_instance_of(BigqueryClient) do |obj|
mock(obj).get_dataset(config['dataset'])
mock(obj).get_dataset(config['dataset_old'])
mock(obj).create_table_if_not_exists(config['temp_table'], options: {"expiration_time"=>nil})
mock(obj).create_table_if_not_exists(config['table'])
mock(obj).create_table_if_not_exists(config['table_old'], dataset: config['dataset_old'])
mock(obj).get_table_or_partition(config['table'])
mock(obj).copy(config['table'], config['table_old'], config['dataset_old'])
mock(obj).copy(config['temp_table'], config['table'], write_disposition: 'WRITE_TRUNCATE')
mock(obj).delete_table(config['temp_table'])
end
Bigquery.transaction(config, schema, processor_count, &control)
end
def test_replace_backup_auto_create_dataset
config = least_config.merge('mode' => 'replace_backup', 'dataset_old' => 'dataset_old', 'table_old' => 'table_old', 'temp_table' => 'temp_table', 'auto_create_dataset' => true)
task = Bigquery.configure(config, schema, processor_count)
any_instance_of(BigqueryClient) do |obj|
mock(obj).create_dataset(config['dataset'])
mock(obj).create_dataset(config['dataset_old'], reference: config['dataset'])
mock(obj).create_table_if_not_exists(config['table'])
mock(obj).create_table_if_not_exists(config['temp_table'], options: {"expiration_time"=>nil})
mock(obj).create_table_if_not_exists(config['table_old'], dataset: config['dataset_old'])
mock(obj).get_table_or_partition(config['table'])
mock(obj).copy(config['table'], config['table_old'], config['dataset_old'])
mock(obj).copy(config['temp_table'], config['table'], write_disposition: 'WRITE_TRUNCATE')
mock(obj).delete_table(config['temp_table'])
end
Bigquery.transaction(config, schema, processor_count, &control)
end
def test_replace_backup_with_partitioning
config = least_config.merge('mode' => 'replace_backup', 'table' => 'table$20160929', 'dataset_old' => 'dataset_old', 'table_old' => 'table_old$20160929', 'temp_table' => 'temp_table', 'auto_create_table' => true)
task = Bigquery.configure(config, schema, processor_count)
any_instance_of(BigqueryClient) do |obj|
mock(obj).get_dataset(config['dataset'])
mock(obj).get_dataset(config['dataset_old'])
mock(obj).create_table_if_not_exists(config['temp_table'], options: {"expiration_time"=>nil})
mock(obj).create_table_if_not_exists(config['table'])
mock(obj).create_table_if_not_exists(config['table_old'], dataset: config['dataset_old'])
mock(obj).get_table_or_partition(config['table'])
mock(obj).copy(config['table'], config['table_old'], config['dataset_old'])
mock(obj).copy(config['temp_table'], config['table'], write_disposition: 'WRITE_TRUNCATE')
mock(obj).delete_table(config['temp_table'])
end
Bigquery.transaction(config, schema, processor_count, &control)
end
end
sub_test_case "append" do
def test_append
config = least_config.merge('mode' => 'append')
task = Bigquery.configure(config, schema, processor_count)
any_instance_of(BigqueryClient) do |obj|
mock(obj).get_dataset(config['dataset'])
mock(obj).create_table_if_not_exists(config['temp_table'], options: {"expiration_time"=>nil})
mock(obj).create_table_if_not_exists(config['table'])
mock(obj).copy(config['temp_table'], config['table'], write_disposition: 'WRITE_APPEND')
mock(obj).delete_table(config['temp_table'])
end
Bigquery.transaction(config, schema, processor_count, &control)
end
def test_append_with_partitioning
config = least_config.merge('mode' => 'append', 'table' => 'table$20160929', 'auto_create_table' => true)
task = Bigquery.configure(config, schema, processor_count)
any_instance_of(BigqueryClient) do |obj|
mock(obj).get_dataset(config['dataset'])
mock(obj).create_table_if_not_exists(config['temp_table'], options: {"expiration_time"=>nil})
mock(obj).create_table_if_not_exists(config['table'])
mock(obj).copy(config['temp_table'], config['table'], write_disposition: 'WRITE_APPEND')
mock(obj).delete_table(config['temp_table'])
end
Bigquery.transaction(config, schema, processor_count, &control)
end
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/test/test_configure.rb | test/test_configure.rb | require_relative './helper'
require 'embulk/output/bigquery'
Bigquery = Embulk::Output::Bigquery unless defined?(Bigquery)
module Embulk
class Output::Bigquery
class TestConfigure < Test::Unit::TestCase
class << self
def startup
FileUtils.mkdir_p('tmp')
end
def shutdown
FileUtils.rm_rf('tmp')
end
end
def least_config
DataSource.new({
'project' => 'your_project_name',
'dataset' => 'your_dataset_name',
'table' => 'your_table_name',
})
end
def schema
Schema.new([
Column.new({index: 0, name: 'boolean', type: :boolean}),
Column.new({index: 1, name: 'long', type: :long}),
Column.new({index: 2, name: 'double', type: :double}),
Column.new({index: 3, name: 'string', type: :string}),
Column.new({index: 4, name: 'timestamp', type: :timestamp}),
Column.new({index: 5, name: 'json', type: :json}),
])
end
def processor_count
1
end
def test_configure_default
task = Bigquery.configure(least_config, schema, processor_count)
assert_equal "append", task['mode']
assert_equal "application_default", task['auth_method']
assert_equal nil, task['json_keyfile']
assert_equal "your_project_name", task['project']
assert_equal "your_project_name", task['destination_project']
assert_equal "your_dataset_name", task['dataset']
assert_equal nil, task['location']
assert_equal "your_table_name", task['table']
assert_equal nil, task['dataset_old']
assert_equal nil, task['table_old']
assert_equal nil, task['table_name_old']
assert_equal false, task['auto_create_dataset']
assert_equal true, task['auto_create_table']
assert_equal nil, task['schema_file']
assert_equal nil, task['template_table']
assert_equal true, task['delete_from_local_when_job_end']
assert_equal 3600, task['job_status_max_polling_time']
assert_equal 10, task['job_status_polling_interval']
assert_equal false, task['is_skip_job_result_check']
assert_equal false, task['with_rehearsal']
assert_equal 1000, task['rehearsal_counts']
assert_equal [], task['column_options']
assert_equal "UTC", task['default_timezone']
assert_equal "%Y-%m-%d %H:%M:%S.%6N", task['default_timestamp_format']
assert_equal nil, task['payload_column']
assert_equal nil, task['payload_column_index']
assert_equal 5, task['retries']
assert_equal "Embulk BigQuery plugin", task['application_name']
# assert_equal "/tmp/embulk_output_bigquery_20160228-27184-pubcn0", task['path_prefix']
assert_equal ".%d.%d", task['sequence_format']
assert_equal ".csv", task['file_ext']
assert_equal false, task['skip_file_generation']
assert_equal "NONE", task['compression']
assert_equal "CSV", task['source_format']
assert_equal 0, task['max_bad_records']
assert_equal ",", task['field_delimiter']
assert_equal "UTF-8", task['encoding']
assert_equal false, task['ignore_unknown_values']
assert_equal false, task['allow_quoted_newlines']
assert_equal nil, task['time_partitioning']
assert_equal nil, task['clustering']
assert_equal false, task['skip_load']
end
def test_mode
config = least_config.merge('mode' => 'foobar')
assert_raise { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('mode' => 'append')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('mode' => 'replace')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('mode' => 'delete_in_advance')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('mode' => 'replace_backup')
assert_raise { Bigquery.configure(config, schema, processor_count) }
end
def test_location
config = least_config.merge('location' => 'us')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('location' => 'eu')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('location' => 'asia-northeast1')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
end
def test_dataset_table_old
task = nil
config = least_config.merge('mode' => 'replace_backup', 'table_old' => 'backup')
assert_nothing_raised { task = Bigquery.configure(config, schema, processor_count) }
assert_equal task['dataset_old'], task['dataset']
assert_equal task['table_old'], 'backup'
config = least_config.merge('mode' => 'replace_backup', 'dataset_old' => 'backup')
assert_nothing_raised { task = Bigquery.configure(config, schema, processor_count) }
assert_equal task['dataset_old'], 'backup'
assert_equal task['table_old'], task['table']
end
def test_auth_method
config = least_config.merge('auth_method' => 'foobar')
assert_raise { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('auth_method' => 'json_key').tap {|h| h.delete('json_keyfile') }
assert_raise { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('auth_method' => 'json_key', 'json_keyfile' => "#{EXAMPLE_ROOT}/json_key.json")
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('auth_method' => 'compute_engine')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
end
def test_json_keyfile
json_keyfile = "#{EXAMPLE_ROOT}/json_key.json"
config = least_config.merge('auth_method' => 'json_key', 'json_keyfile' => json_keyfile).tap {|h| h.delete('project') }
task = Bigquery.configure(config, schema, processor_count)
assert_not_equal nil, task['project'] # project is obtained from json_keyfile if available
config = least_config.merge('auth_method' => 'json_key', 'json_keyfile' => { 'content' => File.read(json_keyfile) }).tap {|h| h.delete('project') }
task = Bigquery.configure(config, schema, processor_count)
assert_not_equal nil, task['project'] # project is obtained from json_keyfile if available
config = least_config.merge('auth_method' => 'json_key', 'json_keyfile' => { 'content' => 'not a json' })
assert_raise { Bigquery.configure(config, schema, processor_count) }
end
def test_payload_column
config = least_config.merge('payload_column' => schema.first.name, 'auto_create_table' => false, 'mode' => 'append_direct')
task = Bigquery.configure(config, schema, processor_count)
assert_equal task['payload_column_index'], 0
config = least_config.merge('payload_column' => 'not_exist', 'auto_create_table' => false, 'mode' => 'append_direct')
assert_raise { Bigquery.configure(config, schema, processor_count) }
end
def test_payload_column_index
config = least_config.merge('payload_column_index' => 0, 'auto_create_table' => false, 'mode' => 'append_direct')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('payload_column_index' => -1, 'auto_create_table' => false, 'mode' => 'append_direct')
assert_raise { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('payload_column_index' => schema.size, 'auto_create_table' => false, 'mode' => 'append_direct')
assert_raise { Bigquery.configure(config, schema, processor_count) }
end
def test_auto_create_table_with_payload_column
config = least_config.merge('auto_create_table' => true, 'payload_column' => 'json')
assert_raise { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('auto_create_table' => true, 'payload_column' => 'json', 'schema_file' => "#{EXAMPLE_ROOT}/schema.json")
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('auto_create_table' => true, 'payload_column' => 'json', 'template_table' => 'foo')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
end
def test_auto_create_table_with_payload_column_index
config = least_config.merge('auto_create_table' => true, 'payload_column_index' => 0)
assert_raise { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('auto_create_table' => true, 'payload_column_index' => 0, 'schema_file' => "#{EXAMPLE_ROOT}/schema.json")
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('auto_create_table' => true, 'payload_column_index' => 0, 'template_table' => 'foo')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
end
def test_schema_file
config = least_config.merge('schema_file' => "#{EXAMPLE_ROOT}/schema.json")
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('schema_file' => "not_found.json")
assert_raise { Bigquery.configure(config, schema, processor_count) }
File.write("tmp/bad_schema.json", "not_a_json")
config = least_config.merge('schema_file' => "tmp/bad_schema.json")
assert_raise { Bigquery.configure(config, schema, processor_count) }
end
def test_source_format
config = least_config.merge('source_format' => 'csv')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('source_format' => 'jsonl')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('source_format' => 'newline_delimited_json')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('source_format' => 'foobar')
assert_raise { Bigquery.configure(config, schema, processor_count) }
end
def test_compression
config = least_config.merge('compression' => 'gzip')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('compression' => 'none')
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('compression' => 'foobar')
assert_raise { Bigquery.configure(config, schema, processor_count) }
end
def test_file_ext
config = least_config.merge('source_format' => 'csv', 'compression' => 'gzip')
task = Bigquery.configure(config, schema, processor_count)
assert_equal '.csv.gz', task['file_ext']
config = least_config.merge('source_format' => 'NEWLINE_DELIMITED_JSON', 'compression' => 'gzip')
task = Bigquery.configure(config, schema, processor_count)
assert_equal '.jsonl.gz', task['file_ext']
config = least_config.merge('source_format' => 'csv', 'compression' => 'none')
task = Bigquery.configure(config, schema, processor_count)
assert_equal '.csv', task['file_ext']
config = least_config.merge('source_format' => 'NEWLINE_DELIMITED_JSON', 'compression' => 'none')
task = Bigquery.configure(config, schema, processor_count)
assert_equal '.jsonl', task['file_ext']
config = least_config.merge('file_ext' => '.foo')
task = Bigquery.configure(config, schema, processor_count)
assert_equal '.foo', task['file_ext']
end
def test_time_partitioning
config = least_config.merge('time_partitioning' => {'type' => 'DAY'})
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('time_partitioning' => {'foo' => 'bar'})
assert_raise { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('table' => 'table')
task = Bigquery.configure(config, schema, processor_count)
assert_equal nil, task['time_partitioning']
config = least_config.merge('table' => 'table_name$20160912')
task = Bigquery.configure(config, schema, processor_count)
assert_equal 'DAY', task['time_partitioning']['type']
end
def test_range_partitioning
config = least_config.merge('range_partitioning' => {'field' => 'foo', 'range' => { 'start' => 1, 'end' => 3, 'interval' => 1 }})
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
# field is required
config = least_config.merge('range_partitioning' => {'range' => { 'start' => 1, 'end' => 2, 'interval' => 1 }})
assert_raise { Bigquery.configure(config, schema, processor_count) }
# range is required
config = least_config.merge('range_partitioning' => {'field' => 'foo'})
assert_raise { Bigquery.configure(config, schema, processor_count) }
# range.start is required
config = least_config.merge('range_partitioning' => {'field' => 'foo', 'range' => { 'end' => 2, 'interval' => 1 }})
assert_raise { Bigquery.configure(config, schema, processor_count) }
# range.end is required
config = least_config.merge('range_partitioning' => {'field' => 'foo', 'range' => { 'start' => 1, 'interval' => 1 }})
assert_raise { Bigquery.configure(config, schema, processor_count) }
# range.interval is required
config = least_config.merge('range_partitioning' => {'field' => 'foo', 'range' => { 'start' => 1, 'end' => 2 }})
assert_raise { Bigquery.configure(config, schema, processor_count) }
# range.start + range.interval should be less than range.end
config = least_config.merge('range_partitioning' => {'field' => 'foo', 'range' => { 'start' => 1, 'end' => 2, 'interval' => 2 }})
assert_raise { Bigquery.configure(config, schema, processor_count) }
end
def test_time_and_range_partitioning_error
config = least_config.merge('time_partitioning' => {'type' => 'DAY'}, 'range_partitioning' => {'field' => 'foo', 'range' => { 'start' => 1, 'end' => 2, 'interval' => 1 }})
assert_raise { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('table' => 'table_name$20160912', 'range_partitioning' => {'field' => 'foo', 'range' => { 'start' => 1, 'end' => 2, 'interval' => 1 }})
assert_raise { Bigquery.configure(config, schema, processor_count) }
end
def test_clustering
config = least_config.merge('clustering' => {'fields' => ['field_a']})
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('clustering' => {})
assert_raise { Bigquery.configure(config, schema, processor_count) }
end
def test_schema_update_options
config = least_config.merge('schema_update_options' => ['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'])
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
config = least_config.merge('schema_update_options' => ['FOO'])
assert_raise { Bigquery.configure(config, schema, processor_count) }
end
def test_destination_project
config = least_config.merge('destination_project' => 'your_destination_project_name')
task = Bigquery.configure(config, schema, processor_count)
assert_nothing_raised { Bigquery.configure(config, schema, processor_count) }
assert_equal 'your_destination_project_name', task['destination_project']
assert_equal 'your_project_name', task['project']
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/test/test_helper.rb | test/test_helper.rb | require_relative './helper'
require 'embulk/output/bigquery/helper'
module Embulk
class Output::Bigquery
class TestHelper < Test::Unit::TestCase
class << self
def startup
FileUtils.mkdir_p('tmp')
end
def shutdown
FileUtils.rm_rf('tmp')
end
end
def has_partition_decorator?
assert_true Helper.has_partition_decorator?('table$20160929')
assert_false Helper.has_partition_decorator?('table')
end
def chomp_partition_decorator
assert_equal 'table', Helper.chomp_partition_decorator?('table$20160929')
assert_equal 'table', Helper.chomp_partition_decorator?('table')
end
def bq_type_from_embulk_type
assert_equal 'BOOLEAN', Helper.bq_type_from_embulk_type(:boolean)
assert_equal 'STRING', Helper.bq_type_from_embulk_type(:string)
assert_equal 'FLOAT', Helper.bq_type_from_embulk_type(:double)
assert_equal 'STRING', Helper.bq_type_from_embulk_type(:string)
assert_equal 'TIMESTAMP', Helper.bq_type_from_embulk_type(:timestamp)
assert_equal 'STRING', Helper.bq_type_from_embulk_type(:json)
end
sub_test_case "fields_from_embulk_schema" do
def test_fields_from_embulk_schema_without_column_options
schema = Schema.new([
Column.new({index: 0, name: 'boolean', type: :boolean}),
Column.new({index: 1, name: 'long', type: :long}),
Column.new({index: 2, name: 'double', type: :double}),
Column.new({index: 3, name: 'string', type: :string}),
Column.new({index: 4, name: 'timestamp', type: :timestamp}),
Column.new({index: 5, name: 'json', type: :json}),
])
expected = [
{name: 'boolean', type: 'BOOLEAN'},
{name: 'long', type: 'INTEGER'},
{name: 'double', type: 'FLOAT'},
{name: 'string', type: 'STRING'},
{name: 'timestamp', type: 'TIMESTAMP'},
{name: 'json', type: 'STRING'},
]
fields = Helper.fields_from_embulk_schema({}, schema)
assert_equal expected, fields
end
def test_fields_from_embulk_schema_with_column_options
schema = Schema.new([
Column.new({index: 0, name: 'boolean', type: :boolean}),
Column.new({index: 1, name: 'long', type: :long}),
Column.new({index: 2, name: 'double', type: :double}),
Column.new({index: 3, name: 'string', type: :string}),
Column.new({index: 4, name: 'timestamp', type: :timestamp}),
Column.new({index: 5, name: 'date', type: :timestamp}),
Column.new({index: 6, name: 'datetime', type: :timestamp}),
Column.new({index: 7, name: 'json', type: :json}),
])
task = {
'column_options' => [
{'name' => 'boolean', 'type' => 'STRING', 'mode' => 'REQUIRED', 'description' => 'hoge'},
{'name' => 'long', 'type' => 'STRING'},
{'name' => 'double', 'type' => 'STRING'},
{'name' => 'string', 'type' => 'INTEGER'},
{'name' => 'timestamp', 'type' => 'INTEGER'},
{'name' => 'date', 'type' => 'DATE'},
{'name' => 'datetime', 'type' => 'DATETIME'},
{'name' => 'json', 'type' => 'RECORD', 'fields' => [
{ 'name' => 'key1', 'type' => 'STRING' },
]},
],
}
expected = [
{name: 'boolean', type: 'STRING', mode: 'REQUIRED', description: 'hoge'},
{name: 'long', type: 'STRING'},
{name: 'double', type: 'STRING'},
{name: 'string', type: 'INTEGER'},
{name: 'timestamp', type: 'INTEGER'},
{name: 'date', type: 'DATE'},
{name: 'datetime', type: 'DATETIME'},
{name: 'json', type: 'RECORD', fields: [
{name: 'key1', type: 'STRING'},
]},
]
fields = Helper.fields_from_embulk_schema(task, schema)
assert_equal expected, fields
end
end
def test_create_load_job_id
task = {
'dataset' => 'your_dataset_name',
'location' => 'asia-northeast1',
'table' => 'your_table_name',
'source_format' => 'CSV',
'max_bad_records' => nil,
'field_delimiter' => ',',
'encoding' => 'UTF-8',
'ignore_unknown_values' => nil,
'allow_quoted_newlines' => nil,
}
fields = {
name: 'a', type: 'STRING',
}
File.write("tmp/your_file_name", "foobarbaz")
job_id = Helper.create_load_job_id(task, 'tmp/your_file_name', fields)
assert job_id.is_a?(String)
assert_equal 'embulk_load_job_2abaf528b69987db0224e52bbd1f0eec', job_id
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/test/test_bigquery_client.rb | test/test_bigquery_client.rb | require_relative './helper'
require 'embulk/output/bigquery/bigquery_client'
require 'csv'
# 1. Prepare example/your-project-000.json
# 2. bunlde exec ruby test/test_bigquery_client.rb
unless File.exist?(JSON_KEYFILE)
puts "#{JSON_KEYFILE} is not found. Skip test/test_bigquery_client.rb"
else
module Embulk
class Output::Bigquery
class TestBigqueryClient < Test::Unit::TestCase
class << self
def startup
FileUtils.mkdir_p('tmp')
end
def shutdown
FileUtils.rm_rf('tmp')
end
end
def client(task = {})
task = least_task.merge(task)
BigqueryClient.new(task, schema)
end
def least_task
{
'project' => JSON.parse(File.read(JSON_KEYFILE))['project_id'],
'destination_project' => JSON.parse(File.read(JSON_KEYFILE))['project_id'],
'dataset' => 'your_dataset_name',
'table' => 'your_table_name',
'auth_method' => 'json_key',
'json_keyfile' => File.read(JSON_KEYFILE),
'retries' => 3,
'timeout_sec' => 300,
'open_timeout_sec' => 300,
'job_status_max_polling_time' => 3600,
'job_status_polling_interval' => 10,
'source_format' => 'CSV'
}
end
def schema
Schema.new([
Column.new({index: 0, name: 'boolean', type: :boolean}),
Column.new({index: 1, name: 'long', type: :long}),
Column.new({index: 2, name: 'double', type: :double}),
Column.new({index: 3, name: 'string', type: :string}),
Column.new({index: 4, name: 'timestamp', type: :timestamp}),
Column.new({index: 5, name: 'json', type: :json}),
])
end
def record
[true,1,1.1,'1',Time.parse("2016-02-26 +00:00"),'{"foo":"bar"}']
end
sub_test_case "client" do
def test_json_keyfile
assert_nothing_raised { BigqueryClient.new(least_task, schema).client }
end
end
sub_test_case "create_dataset" do
def test_create_dataset
assert_nothing_raised { client.create_dataset }
end
def test_create_dataset_with_reference
response = client.get_dataset
any_instance_of(BigqueryClient) do |obj|
mock(obj).get_dataset('your_dataset_name') { response }
end
assert_nothing_raised do
client.create_dataset('your_dataset_name_old', reference: 'your_dataset_name')
end
end
end
sub_test_case "get_dataset" do
def test_get_dataset
assert_nothing_raised { client.create_dataset }
assert_nothing_raised { client.get_dataset }
end
def test_get_dataset_not_found
assert_raise(NotFoundError) {
client.get_dataset('something_does_not_exist')
}
end
end
sub_test_case "create_table_if_not_exists" do
def test_create_table_if_not_exists
client.delete_table('your_table_name')
assert_nothing_raised { client.create_table_if_not_exists('your_table_name') }
end
def test_create_table_if_not_exists_already_exists
assert_nothing_raised { client.create_table_if_not_exists('your_table_name') }
end
def test_create_partitioned_table
client.delete_table('your_table_name')
assert_nothing_raised do
client.create_table_if_not_exists('your_table_name$20160929', options:{
'time_partitioning' => {'type'=>'DAY', 'expiration_ms'=>1000}
})
end
end
end
sub_test_case "delete_table" do
def test_delete_table
client.create_table_if_not_exists('your_table_name')
assert_nothing_raised { client.delete_table('your_table_name') }
end
def test_delete_table_not_found
assert_nothing_raised { client.delete_table('your_table_name') }
end
def test_delete_partitioned_table
client.create_table_if_not_exists('your_table_name')
assert_nothing_raised { client.delete_table('your_table_name$20160929') }
end
end
sub_test_case "get_table" do
def test_get_table
client.create_table_if_not_exists('your_table_name')
assert_nothing_raised { client.get_table('your_table_name') }
end
def test_get_table_not_found
client.delete_table('your_table_name')
assert_raise(NotFoundError) {
client.get_table('your_table_name')
}
end
def test_get_partitioned_table
client.create_table_if_not_exists('your_table_name')
assert_nothing_raised { client.get_table('your_table_name$20160929') }
end
end
sub_test_case "delete_partition" do
def test_delete_partition
client.delete_table('your_table_name')
client.create_table_if_not_exists('your_table_name$20160929')
assert_nothing_raised { client.delete_partition('your_table_name$20160929') }
ensure
client.delete_table('your_table_name')
end
def test_delete_partition_of_non_partitioned_table
client.delete_table('your_table_name')
client.create_table_if_not_exists('your_table_name')
assert_raise { client.delete_partition('your_table_name$20160929') }
ensure
client.delete_table('your_table_name')
end
def test_delete_partition_table_not_found
assert_nothing_raised { client.delete_partition('your_table_name$20160929') }
end
end
sub_test_case "fields" do
def test_fields_from_table
client.create_table_if_not_exists('your_table_name')
fields = client.fields_from_table('your_table_name')
expected = [
{:type=>"BOOLEAN", :name=>"boolean"},
{:type=>"INTEGER", :name=>"long"},
{:type=>"FLOAT", :name=>"double"},
{:type=>"STRING", :name=>"string"},
{:type=>"TIMESTAMP", :name=>"timestamp"},
{:type=>"STRING", :name=>"json"},
]
assert_equal expected, fields
end
end
sub_test_case "copy" do
def test_create_table_if_not_exists
client.create_table_if_not_exists('your_table_name')
assert_nothing_raised { client.copy('your_table_name', 'your_table_name_old') }
end
end
sub_test_case "load" do
def test_load
client.create_table_if_not_exists('your_table_name')
File.write("tmp/your_file_name.csv", record.to_csv)
assert_nothing_raised { client.load("/tmp/your_file_name.csv", 'your_table_name') }
end
end
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/test/helper.rb | test/helper.rb | #!/usr/bin/env ruby
require 'bundler/setup'
require 'test/unit'
require 'test/unit/rr'
# Embulk 0.10.x introduced new bootstrap mechanism.
# https://github.com/embulk/embulk/blob/641f35fec064cca7b1a7314d634a4b64ef8637f1/embulk-ruby/test/vanilla/run-test.rb#L8-L13
static_initializer = Java::org.embulk.EmbulkDependencyClassLoader.staticInitializer().useSelfContainedJarFiles()
static_initializer.java_send :initialize
require 'embulk/java/bootstrap'
require 'embulk'
Embulk.logger = Embulk::Logger.new('/dev/null')
APP_ROOT = File.expand_path('../', __dir__)
EXAMPLE_ROOT = File.expand_path('../example', __dir__)
TEST_ROOT = File.expand_path(File.dirname(__FILE__))
JSON_KEYFILE = File.join(EXAMPLE_ROOT, 'your-project-000.json')
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/test/test_example.rb | test/test_example.rb | require_relative './helper'
# 1. Prepare example/your-project-000.json
# 2. embulk bundle
# 3. bundle exec ruby test/test_example.rb
unless File.exist?(JSON_KEYFILE)
puts "#{JSON_KEYFILE} is not found. Skip test/test_example.rb"
else
class TestExample < Test::Unit::TestCase
def embulk_path
if File.exist?("#{ENV['HOME']}/.embulk/bin/embulk")
"#{ENV['HOME']}/.embulk/bin/embulk"
elsif File.exist?("#{ENV['PWD']}/embulk.jar")
"#{ENV['PWD']}/embulk.jar"
elsif File.exist?("/usr/local/bin/embulk")
"/usr/local/bin/embulk"
else
"embulk"
end
end
def embulk_run(config_path)
::Bundler.with_clean_env do
cmd = "#{embulk_path} run -X page_size=1 -b . -l trace #{config_path}"
puts "=" * 64
puts cmd
system(cmd)
end
end
files = Dir.glob("#{APP_ROOT}/example/config_*.yml").reject {|file| File.symlink?(file) }.sort
files.each do |config_path|
if %w[
config_expose_errors.yml
].include?(File.basename(config_path))
define_method(:"test_#{File.basename(config_path, ".yml")}") do
assert_false embulk_run(config_path)
end
else
define_method(:"test_#{File.basename(config_path, ".yml")}") do
assert_true embulk_run(config_path)
end
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/lib/embulk/output/bigquery.rb | lib/embulk/output/bigquery.rb | require 'uri'
require 'json'
require 'tempfile'
require 'fileutils'
require 'securerandom'
require_relative 'bigquery/bigquery_client'
require_relative 'bigquery/gcs_client'
require_relative 'bigquery/file_writer'
require_relative 'bigquery/value_converter_factory'
module Embulk
module Output
class Bigquery < OutputPlugin
Plugin.register_output('bigquery', self)
class Error < StandardError; end
# To support configuration like below as org.embulk.spi.unit.LocalFile
#
# json_keyfile:
# content: |
class LocalFile
# @return JSON string
def self.load(v)
if v.is_a?(String) # path
File.read(File.expand_path(v))
elsif v.is_a?(Hash)
v['content']
end
end
end
def self.configure(config, schema, task_count)
task = {
'mode' => config.param('mode', :string, :default => 'append'),
'auth_method' => config.param('auth_method', :string, :default => 'application_default'),
'json_keyfile' => config.param('json_keyfile', LocalFile, :default => nil),
'project' => config.param('project', :string, :default => nil),
'destination_project' => config.param('destination_project', :string, :default => nil),
'dataset' => config.param('dataset', :string),
'location' => config.param('location', :string, :default => nil),
'table' => config.param('table', :string),
'dataset_old' => config.param('dataset_old', :string, :default => nil),
'table_old' => config.param('table_old', :string, :default => nil),
'table_name_old' => config.param('table_name_old', :string, :default => nil), # lower version compatibility
'auto_create_dataset' => config.param('auto_create_dataset', :bool, :default => false),
'auto_create_table' => config.param('auto_create_table', :bool, :default => true),
'schema_file' => config.param('schema_file', :string, :default => nil),
'template_table' => config.param('template_table', :string, :default => nil),
'delete_from_local_when_job_end' => config.param('delete_from_local_when_job_end', :bool, :default => true),
'job_status_max_polling_time' => config.param('job_status_max_polling_time', :integer, :default => 3600),
'job_status_polling_interval' => config.param('job_status_polling_interval', :integer, :default => 10),
'is_skip_job_result_check' => config.param('is_skip_job_result_check', :bool, :default => false),
'with_rehearsal' => config.param('with_rehearsal', :bool, :default => false),
'rehearsal_counts' => config.param('rehearsal_counts', :integer, :default => 1000),
'abort_on_error' => config.param('abort_on_error', :bool, :default => nil),
'progress_log_interval' => config.param('progress_log_interval', :float, :default => nil),
'column_options' => config.param('column_options', :array, :default => []),
'default_timezone' => config.param('default_timezone', :string, :default => ValueConverterFactory::DEFAULT_TIMEZONE),
'default_timestamp_format' => config.param('default_timestamp_format', :string, :default => ValueConverterFactory::DEFAULT_TIMESTAMP_FORMAT),
'payload_column' => config.param('payload_column', :string, :default => nil),
'payload_column_index' => config.param('payload_column_index', :integer, :default => nil),
'description' => config.param('description', :string, :default => nil),
'open_timeout_sec' => config.param('open_timeout_sec', :integer, :default => nil),
'timeout_sec' => config.param('timeout_sec', :integer, :default => nil), # google-api-ruby-client < v0.11.0
'send_timeout_sec' => config.param('send_timeout_sec', :integer, :default => nil), # google-api-ruby-client >= v0.11.0
'read_timeout_sec' => config.param('read_timeout_sec', :integer, :default => nil), # google-api-ruby-client >= v0.11.0
'retries' => config.param('retries', :integer, :default => 5),
'application_name' => config.param('application_name', :string, :default => 'Embulk BigQuery plugin'),
'sdk_log_level' => config.param('sdk_log_level', :string, :default => nil),
'path_prefix' => config.param('path_prefix', :string, :default => nil),
'sequence_format' => config.param('sequence_format', :string, :default => '.%d.%d'),
'file_ext' => config.param('file_ext', :string, :default => nil),
'skip_file_generation' => config.param('skip_file_generation', :bool, :default => false),
'compression' => config.param('compression', :string, :default => 'NONE'),
'gcs_bucket' => config.param('gcs_bucket', :string, :default => nil),
'auto_create_gcs_bucket' => config.param('auto_create_gcs_bucket', :bool, :default => false),
'source_format' => config.param('source_format', :string, :default => 'CSV'),
'max_bad_records' => config.param('max_bad_records', :integer, :default => 0),
'field_delimiter' => config.param('field_delimiter', :string, :default => ','),
'encoding' => config.param('encoding', :string, :default => 'UTF-8'),
'ignore_unknown_values' => config.param('ignore_unknown_values', :bool, :default => false),
'allow_quoted_newlines' => config.param('allow_quoted_newlines', :bool, :default => false),
'time_partitioning' => config.param('time_partitioning', :hash, :default => nil),
'range_partitioning' => config.param('range_partitioning', :hash, :default => nil),
'clustering' => config.param('clustering', :hash, :default => nil), # google-api-ruby-client >= v0.21.0
'schema_update_options' => config.param('schema_update_options', :array, :default => nil),
'temporary_table_expiration' => config.param('temporary_table_expiration', :integer, :default => nil),
# for debug
'skip_load' => config.param('skip_load', :bool, :default => false),
'temp_table' => config.param('temp_table', :string, :default => nil),
'rehearsal_table' => config.param('rehearsal_table', :string, :default => nil),
}
now = Time.now
task['mode'] = task['mode'].downcase
unless %w[append append_direct replace delete_in_advance replace_backup].include?(task['mode'])
raise ConfigError.new "`mode` must be one of append, append_direct, replace, delete_in_advance, replace_backup"
end
if %w[append replace delete_in_advance replace_backup].include?(task['mode']) and !task['auto_create_table']
raise ConfigError.new "`mode: #{task['mode']}` requires `auto_create_table: true`"
end
if task['mode'] == 'replace_backup'
task['table_old'] ||= task['table_name_old'] # for lower version compatibility
if task['dataset_old'].nil? and task['table_old'].nil?
raise ConfigError.new "`mode: replace_backup` requires either of `dataset_old` or `table_old`"
end
task['dataset_old'] ||= task['dataset']
task['table_old'] ||= task['table']
end
if task['table_old']
task['table_old'] = now.strftime(task['table_old'])
end
if task['table']
task['table'] = now.strftime(task['table'])
end
task['auth_method'] = task['auth_method'].downcase
unless %w[json_key service_account authorized_user compute_engine application_default].include?(task['auth_method'])
raise ConfigError.new "`auth_method` must be one of service_account (or json_key), authorized_user, compute_engine, application_default"
end
if (task['auth_method'] == 'service_account' or task['auth_method'] == 'json_key') and task['json_keyfile'].nil?
raise ConfigError.new "`json_keyfile` is required for auth_method: service_account (or json_key)"
end
if task['json_keyfile']
begin
json_key = JSON.parse(task['json_keyfile'])
task['project'] ||= json_key['project_id']
rescue => e
raise ConfigError.new "Parsing 'json_keyfile' failed with error: #{e.class} #{e.message}"
end
end
if task['project'].nil?
raise ConfigError.new "Required field \"project\" is not set"
end
task['destination_project'] ||= task['project']
if (task['payload_column'] or task['payload_column_index']) and task['auto_create_table']
if task['schema_file'].nil? and task['template_table'].nil?
raise ConfigError.new "Cannot guess table schema from Embulk schema with `payload_column` or `payload_column_index`. Either of `schema_file` or `template_table` is required for auto_create_table true"
end
end
if task['payload_column_index']
if task['payload_column_index'] < 0 || schema.size <= task['payload_column_index']
raise ConfigError.new "payload_column_index #{task['payload_column_index']} is out of schema size"
end
elsif task['payload_column']
task['payload_column_index'] = schema.find_index {|c| c[:name] == task['payload_column'] }
if task['payload_column_index'].nil?
raise ConfigError.new "payload_column #{task['payload_column']} does not exist in schema"
end
end
if task['schema_file']
unless File.exist?(task['schema_file'])
raise ConfigError.new "schema_file #{task['schema_file']} is not found"
end
begin
JSON.parse(File.read(task['schema_file']))
rescue => e
raise ConfigError.new "Parsing 'schema_file' #{task['schema_file']} failed with error: #{e.class} #{e.message}"
end
end
if task['path_prefix'].nil?
task['path_prefix'] = Tempfile.create('embulk_output_bigquery_') {|fp| fp.path }
end
task['source_format'] = task['source_format'].upcase
if task['source_format'] == 'JSONL'
task['source_format'] = 'NEWLINE_DELIMITED_JSON'
end
unless %w[CSV NEWLINE_DELIMITED_JSON].include?(task['source_format'])
raise ConfigError.new "`source_format` must be CSV or NEWLINE_DELIMITED_JSON (JSONL)"
end
task['compression'] = task['compression'].upcase
unless %w[GZIP NONE].include?(task['compression'])
raise ConfigError.new "`compression` must be GZIP or NONE"
end
if task['file_ext'].nil?
case task['source_format']
when 'CSV'
file_ext = '.csv'
else # newline_delimited_json
file_ext = '.jsonl'
end
case task['compression']
when 'GZIP'
file_ext << '.gz'
end
task['file_ext'] = file_ext
end
unique_name = SecureRandom.uuid.gsub('-', '_')
if %w[replace replace_backup append].include?(task['mode'])
task['temp_table'] ||= "LOAD_TEMP_#{unique_name}_#{task['table']}"
else
task['temp_table'] = nil
end
if task['with_rehearsal']
task['rehearsal_table'] ||= "LOAD_REHEARSAL_#{unique_name}_#{task['table']}"
end
if task['sdk_log_level']
Google::Apis.logger.level = eval("::Logger::#{task['sdk_log_level'].upcase}")
end
if task['abort_on_error'].nil?
task['abort_on_error'] = (task['max_bad_records'] == 0)
end
if task['time_partitioning'] && task['range_partitioning']
raise ConfigError.new "`time_partitioning` and `range_partitioning` cannot be used at the same time"
end
if task['time_partitioning']
unless task['time_partitioning']['type']
raise ConfigError.new "`time_partitioning` must have `type` key"
end
end
if Helper.has_partition_decorator?(task['table'])
if task['range_partitioning']
raise ConfigError.new "Partition decorators(`#{task['table']}`) don't support `range_partition`"
end
task['time_partitioning'] = {'type' => 'DAY'}
end
if task['range_partitioning']
unless task['range_partitioning']['field']
raise ConfigError.new "`range_partitioning` must have `field` key"
end
unless task['range_partitioning']['range']
raise ConfigError.new "`range_partitioning` must have `range` key"
end
range = task['range_partitioning']['range']
unless range['start']
raise ConfigError.new "`range_partitioning` must have `range.start` key"
end
unless range['start'].is_a?(Integer)
raise ConfigError.new "`range_partitioning.range.start` must be an integer"
end
unless range['end']
raise ConfigError.new "`range_partitioning` must have `range.end` key"
end
unless range['end'].is_a?(Integer)
raise ConfigError.new "`range_partitioning.range.end` must be an integer"
end
unless range['interval']
raise ConfigError.new "`range_partitioning` must have `range.interval` key"
end
unless range['interval'].is_a?(Integer)
raise ConfigError.new "`range_partitioning.range.interval` must be an integer"
end
if range['start'] + range['interval'] >= range['end']
raise ConfigError.new "`range_partitioning.range.start` + `range_partitioning.range.interval` must be less than `range_partitioning.range.end`"
end
end
if task['clustering']
unless task['clustering']['fields']
raise ConfigError.new "`clustering` must have `fields` key"
end
end
if task['schema_update_options']
task['schema_update_options'].each do |schema_update_option|
unless %w[ALLOW_FIELD_ADDITION ALLOW_FIELD_RELAXATION].include?(schema_update_option)
raise ConfigError.new "`schema_update_options` must contain either of ALLOW_FIELD_ADDITION or ALLOW_FIELD_RELAXATION or both"
end
end
end
task
end
def self.bigquery
@bigquery
end
def self.converters
@converters
end
def self.rehearsal_thread
@rehearsal_thread
end
def self.rehearsal_thread=(rehearsal_thread)
@rehearsal_thread = rehearsal_thread
end
def self.transaction_report(task, responses)
num_input_rows = file_writers.empty? ? 0 : file_writers.map(&:num_rows).inject(:+)
return {'num_input_rows' => num_input_rows} if task['is_skip_job_result_check']
num_response_rows = responses.inject(0) do |sum, response|
sum + (response ? response.statistics.load.output_rows.to_i : 0)
end
if task['temp_table']
num_output_rows = bigquery.get_table_or_partition(task['temp_table']).num_rows.to_i
else
num_output_rows = num_response_rows
end
num_rejected_rows = num_input_rows - num_output_rows
transaction_report = {
'num_input_rows' => num_input_rows,
'num_response_rows' => num_response_rows,
'num_output_rows' => num_output_rows,
'num_rejected_rows' => num_rejected_rows,
}
end
def self.auto_create(task, bigquery)
if task['auto_create_dataset']
bigquery.create_dataset(task['dataset'])
else
bigquery.get_dataset(task['dataset']) # raises NotFoundError
end
if task['mode'] == 'replace_backup' and task['dataset_old'] != task['dataset']
if task['auto_create_dataset']
bigquery.create_dataset(task['dataset_old'], reference: task['dataset'])
else
bigquery.get_dataset(task['dataset_old']) # raises NotFoundError
end
end
temp_table_expiration = task['temporary_table_expiration']
temp_options = {'expiration_time' => temp_table_expiration}
case task['mode']
when 'delete_in_advance'
bigquery.delete_table_or_partition(task['table'])
bigquery.create_table_if_not_exists(task['table'])
when 'replace'
bigquery.create_table_if_not_exists(task['temp_table'], options: temp_options)
bigquery.create_table_if_not_exists(task['table']) # needs for when task['table'] is a partition
when 'append'
bigquery.create_table_if_not_exists(task['temp_table'], options: temp_options)
bigquery.create_table_if_not_exists(task['table']) # needs for when task['table'] is a partition
when 'replace_backup'
bigquery.create_table_if_not_exists(task['temp_table'], options: temp_options)
bigquery.create_table_if_not_exists(task['table'])
bigquery.create_table_if_not_exists(task['table_old'], dataset: task['dataset_old']) # needs for when a partition
else # append_direct
if task['auto_create_table']
bigquery.create_table_if_not_exists(task['table'])
else
bigquery.get_table(task['table']) # raises NotFoundError
end
end
end
def self.transaction(config, schema, task_count, &control)
task = self.configure(config, schema, task_count)
@task = task
@schema = schema
@bigquery = BigqueryClient.new(task, schema)
@converters = ValueConverterFactory.create_converters(task, schema)
self.auto_create(@task, @bigquery)
begin
paths = []
if task['skip_file_generation']
yield(task) # does nothing, but seems it has to be called
path_pattern = "#{task['path_prefix']}*#{task['file_ext']}"
Embulk.logger.info { "embulk-output-bigquery: Skip file generation. Get paths from `#{path_pattern}`" }
paths = Dir.glob(path_pattern)
else
task_reports = yield(task) # generates local files
ios = file_writers.map(&:io)
paths = ios.map(&:path)
ios.each do |io|
Embulk.logger.debug { "close #{io.path}" }
io.close rescue nil
end
end
if rehearsal_thread
rehearsal_thread.join
end
if task['skip_load'] # only for debug
Embulk.logger.info { "embulk-output-bigquery: Skip load" }
else
if !paths.empty?
target_table = task['temp_table'] ? task['temp_table'] : task['table']
if bucket = task['gcs_bucket']
gcs = GcsClient.new(task)
gcs.insert_temporary_bucket(bucket) if task['auto_create_gcs_bucket']
objects = paths.size.times.map { SecureRandom.uuid.to_s }
gcs.insert_objects(paths, objects: objects, bucket: bucket)
object_uris = objects.map {|object| URI.join("gs://#{bucket}", object).to_s }
responses = bigquery.load_from_gcs(object_uris, target_table)
objects.each {|object| gcs.delete_object(object, bucket: bucket) }
else
responses = bigquery.load_in_parallel(paths, target_table)
end
else
responses = []
end
transaction_report = self.transaction_report(task, responses)
Embulk.logger.info { "embulk-output-bigquery: transaction_report: #{transaction_report.to_json}" }
if task['abort_on_error'] && !task['is_skip_job_result_check']
if transaction_report['num_input_rows'] != transaction_report['num_output_rows']
raise Error, "ABORT: `num_input_rows (#{transaction_report['num_input_rows']})` and " \
"`num_output_rows (#{transaction_report['num_output_rows']})` does not match"
end
end
if task['mode'] == 'replace_backup'
begin
bigquery.get_table_or_partition(task['table'])
bigquery.copy(task['table'], task['table_old'], task['dataset_old'])
rescue NotFoundError
end
end
if task['temp_table']
if task['mode'] == 'append'
bigquery.copy(task['temp_table'], task['table'], write_disposition: 'WRITE_APPEND')
else # replace or replace_backup
bigquery.copy(task['temp_table'], task['table'], write_disposition: 'WRITE_TRUNCATE')
end
end
end
ensure
begin
if task['temp_table'] # append or replace or replace_backup
bigquery.delete_table(task['temp_table'])
end
ensure
if task['delete_from_local_when_job_end']
paths.each do |path|
Embulk.logger.info { "embulk-output-bigquery: delete #{path}" }
File.unlink(path) rescue nil
end
else
paths.each do |path|
if File.exist?(path)
Embulk.logger.info { "embulk-output-bigquery: keep #{path}" }
end
end
end
end
end
# this is for -c next_config option, add some paramters for next execution if wants
next_config_diff = {}
return next_config_diff
end
@file_writers_mutex = Mutex.new
@file_writers = Array.new
def self.reset_file_writers
@file_writers = Array.new
end
def self.file_writers
@file_writers
end
def self.add_file_writer(file_writer)
@file_writers_mutex.synchronize do
@file_writers << file_writer
end
end
FILE_WRITER_KEY = :embulk_output_bigquery_file_writer
# Create one FileWriter object for one output thread, that is, share among tasks.
# Close theses shared objects in transaction.
# This is mainly to suppress (or control by -X max_threads) number of files, which
# equals to number of concurrency to load in parallel, when number of input tasks is many
#
# #file_writer must be called at only #add because threads in other methods
# are different (called from non-output threads). Note also that #add method
# of the same task instance would be called in different output threads
def file_writer
return Thread.current[FILE_WRITER_KEY] if Thread.current[FILE_WRITER_KEY]
file_writer = FileWriter.new(@task, @schema, @index, self.class.converters)
self.class.add_file_writer(file_writer)
Thread.current[FILE_WRITER_KEY] = file_writer
end
# instance is created on each task
def initialize(task, schema, index)
super
if task['with_rehearsal'] and @index == 0
@rehearsaled = false
end
end
# called for each page in each task
def close
end
# called for each page in each task
def add(page)
return if task['skip_file_generation']
num_rows = file_writer.add(page)
if task['with_rehearsal'] and @index == 0 and !@rehearsaled
if num_rows >= task['rehearsal_counts']
load_rehearsal
@rehearsaled = true
end
end
end
def load_rehearsal
bigquery = self.class.bigquery
Embulk.logger.info { "embulk-output-bigquery: Rehearsal started" }
io = file_writer.close # need to close once for gzip
rehearsal_path = "#{io.path}.rehearsal"
Embulk.logger.debug { "embulk_output_bigquery: cp #{io.path} #{rehearsal_path}" }
FileUtils.cp(io.path, rehearsal_path)
file_writer.reopen
self.class.rehearsal_thread = Thread.new do
begin
bigquery.create_table_if_not_exists(task['rehearsal_table'])
response = bigquery.load(rehearsal_path, task['rehearsal_table'])
num_output_rows = response ? response.statistics.load.output_rows.to_i : 0
Embulk.logger.info { "embulk-output-bigquery: Loaded rehearsal #{num_output_rows}" }
ensure
Embulk.logger.debug { "embulk_output_bigquery: delete #{rehearsal_path}" }
File.unlink(rehearsal_path) rescue nil
bigquery.delete_table(task['rehearsal_table'])
end
end
end
def finish
end
def abort
end
# called after processing all pages in each task, returns a task_report
def commit
{}
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/lib/embulk/output/bigquery/google_client.rb | lib/embulk/output/bigquery/google_client.rb | require_relative 'auth'
module Embulk
module Output
class Bigquery < OutputPlugin
class Error < StandardError; end
class JobTimeoutError < Error; end
class NotFoundError < Error; end
class BackendError < Error; end
class InternalError < Error; end
class RateLimitExceeded < Error; end
class GoogleClient
def initialize(task, scope, client_class)
@task = task
@scope = scope
@auth = Auth.new(task, scope)
@client_class = client_class
end
def client
return @cached_client if @cached_client && @cached_client_expiration > Time.now
client = @client_class.new
client.client_options.application_name = @task['application_name']
client.request_options.retries = @task['retries']
if client.request_options.respond_to?(:timeout_sec)
client.request_options.timeout_sec = @task['timeout_sec'] || 300
client.request_options.open_timeout_sec = @task['open_timeout_sec'] || 300
else # google-api-ruby-client >= v0.11.0
if @task['timeout_sec']
Embulk.logger.warn { "embulk-output-bigquery: timeout_sec is deprecated in google-api-ruby-client >= v0.11.0. Use read_timeout_sec instead" }
end
client.client_options.open_timeout_sec = @task['open_timeout_sec'] || 300 # default: 60
client.client_options.send_timeout_sec = @task['send_timeout_sec'] || 300 # default: 120
client.client_options.read_timeout_sec = @task['read_timeout_sec'] || @task['timeout_sec'] || 300 # default: 60
end
Embulk.logger.debug { "embulk-output-bigquery: client_options: #{client.client_options.to_h}" }
Embulk.logger.debug { "embulk-output-bigquery: request_options: #{client.request_options.to_h}" }
client.authorization = @auth.authenticate
@cached_client_expiration = Time.now + 1800
@cached_client = client
end
# google-api-ruby-client itself has a retry feature, but it does not retry with SocketException
def with_network_retry(&block)
retries = 0
begin
yield
rescue ::Java::Java.net.SocketException, ::Java::Java.net.ConnectException, ::Java::JavaxNetSsl::SSLException => e
retry_messages = [
'Broken pipe',
'Connection reset',
'Connection timed out',
'Connection or outbound has closed',
]
if retry_messages.select { |x| e.message.include?(x) }.empty?
raise e
else
if retries < @task['retries']
retries += 1
Embulk.logger.warn { "embulk-output-bigquery: retry \##{retries}, #{e.class} #{e.message}" }
retry
else
Embulk.logger.error { "embulk-output-bigquery: retry exhausted \##{retries}, #{e.class} #{e.message}" }
raise e
end
end
end
end
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/lib/embulk/output/bigquery/value_converter_factory.rb | lib/embulk/output/bigquery/value_converter_factory.rb | require 'time'
require 'time_with_zone'
require 'json'
require_relative 'helper'
module Embulk
module Output
class Bigquery < OutputPlugin
class ValueConverterFactory
class NotSupportedType < StandardError; end
class TypeCastError < StandardError; end
# ref. https://cloud.google.com/bigquery/preparing-data-for-bigquery
DEFAULT_TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S.%6N" # BigQuery timestamp format
DEFAULT_TIMEZONE = "UTC"
# @param [Hash] task
# @option task [String] default_timestamp_format
# @option task [String] default_timezone
# @option task [Hash] column_options user defined column types
# @param [Schema] schema embulk defined column types
# @return [Array] an arary whose key is column_index, and value is its converter (Proc)
def self.create_converters(task, schema)
column_options_map = Helper.column_options_map(task['column_options'])
default_timestamp_format = task['default_timestamp_format'] || DEFAULT_TIMESTAMP_FORMAT
default_timezone = task['default_timezone'] || DEFAULT_TIMEZONE
schema.map do |column|
column_name = column[:name]
embulk_type = column[:type]
column_option = column_options_map[column_name] || {}
self.new(
embulk_type, column_option['type'],
timestamp_format: column_option['timestamp_format'],
timezone: column_option['timezone'],
strict: column_option['strict'],
default_timestamp_format: default_timestamp_format,
default_timezone: default_timezone,
).create_converter
end
end
attr_reader :embulk_type, :type, :timestamp_format, :timezone, :zone_offset, :strict
def initialize(
embulk_type, type = nil,
timestamp_format: nil, timezone: nil, strict: nil,
default_timestamp_format: DEFAULT_TIMESTAMP_FORMAT,
default_timezone: DEFAULT_TIMEZONE
)
@embulk_type = embulk_type
@type = (type || Helper.bq_type_from_embulk_type(embulk_type)).upcase
@timestamp_format = timestamp_format
@default_timestamp_format = default_timestamp_format
@timezone = timezone || default_timezone
@zone_offset = TimeWithZone.zone_offset(@timezone)
@strict = strict.nil? ? true : strict
end
def create_converter
case embulk_type
when :boolean then boolean_converter
when :long then long_converter
when :double then double_converter
when :string then string_converter
when :timestamp then timestamp_converter
when :json then json_converter
else raise NotSupportedType, "embulk type #{embulk_type} is not supported"
end
end
def with_typecast_error(val)
begin
yield(val)
rescue => e
raise_typecast_error(val)
end
end
def raise_typecast_error(val)
message = "cannot cast #{@embulk_type} `#{val}` to #{@type}"
if @strict
raise TypeCastError, message
else
Embulk.logger.trace { message }
return nil
end
end
def boolean_converter
case type
when 'BOOLEAN'
Proc.new {|val|
val
}
when 'STRING'
Proc.new {|val|
next nil if val.nil?
val.to_s
}
else
raise NotSupportedType, "cannot take column type #{type} for boolean column"
end
end
def long_converter
case type
when 'BOOLEAN'
Proc.new {|val|
next nil if val.nil?
next true if val == 1
next false if val == 0
raise_typecast_error(val)
}
when 'INTEGER'
Proc.new {|val|
val
}
when 'FLOAT'
Proc.new {|val|
next nil if val.nil?
val.to_f
}
when 'STRING'
Proc.new {|val|
next nil if val.nil?
val.to_s
}
when 'TIMESTAMP'
Proc.new {|val|
next nil if val.nil?
val # BigQuery supports UNIX timestamp
}
else
raise NotSupportedType, "cannot take column type #{type} for long column"
end
end
def double_converter
case type
when 'INTEGER'
Proc.new {|val|
next nil if val.nil?
val.to_i
}
when 'FLOAT'
Proc.new {|val|
val
}
when 'STRING'
Proc.new {|val|
next nil if val.nil?
val.to_s
}
when 'TIMESTAMP'
Proc.new {|val|
next nil if val.nil?
val # BigQuery supports UNIX timestamp
}
else
raise NotSupportedType, "cannot take column type #{type} for double column"
end
end
def string_converter
case type
when 'BOOLEAN'
Proc.new {|val|
next nil if val.nil?
next true if val == 'true'.freeze
next false if val == 'false'.freeze
raise_typecast_error(val)
}
when 'INTEGER'
Proc.new {|val|
next nil if val.nil?
with_typecast_error(val) do |val|
Integer(val)
end
}
when 'FLOAT'
Proc.new {|val|
next nil if val.nil?
with_typecast_error(val) do |val|
Float(val)
end
}
when 'STRING'
Proc.new {|val|
val
}
when 'TIMESTAMP'
if @timestamp_format
Proc.new {|val|
next nil if val.nil?
with_typecast_error(val) do |val|
TimeWithZone.set_zone_offset(Time.strptime(val, @timestamp_format), zone_offset).strftime("%Y-%m-%d %H:%M:%S.%6N %:z")
end
}
else
Proc.new {|val|
next nil if val.nil?
val # Users must care of BQ timestamp format
}
end
when 'DATE'
Proc.new {|val|
next nil if val.nil?
with_typecast_error(val) do |val|
TimeWithZone.set_zone_offset(Time.parse(val), zone_offset).strftime("%Y-%m-%d")
end
}
when 'DATETIME'
if @timestamp_format
Proc.new {|val|
next nil if val.nil?
with_typecast_error(val) do |val|
Time.strptime(val, @timestamp_format).strftime("%Y-%m-%d %H:%M:%S.%6N")
end
}
else
Proc.new {|val|
next nil if val.nil?
val # Users must care of BQ timestamp format
}
end
when 'TIME'
# TimeWithZone doesn't affect any change to the time value
Proc.new {|val|
next nil if val.nil?
with_typecast_error(val) do |val|
TimeWithZone.set_zone_offset(Time.parse(val), zone_offset).strftime("%H:%M:%S.%6N")
end
}
when 'RECORD'
Proc.new {|val|
next nil if val.nil?
with_typecast_error(val) do |val|
JSON.parse(val)
end
}
else
raise NotSupportedType, "cannot take column type #{type} for string column"
end
end
def timestamp_converter
case type
when 'INTEGER'
Proc.new {|val|
next nil if val.nil?
val.to_i
}
when 'FLOAT'
Proc.new {|val|
next nil if val.nil?
val.to_f
}
when 'STRING'
_timestamp_format = @timestamp_format || @default_timestamp_format
Proc.new {|val|
next nil if val.nil?
with_typecast_error(val) do |val|
val.localtime(zone_offset).strftime(_timestamp_format)
end
}
when 'TIMESTAMP'
Proc.new {|val|
next nil if val.nil?
val.strftime("%Y-%m-%d %H:%M:%S.%6N %:z")
}
when 'DATE'
Proc.new {|val|
next nil if val.nil?
val.localtime(zone_offset).strftime("%Y-%m-%d")
}
when 'DATETIME'
Proc.new {|val|
next nil if val.nil?
val.localtime(zone_offset).strftime("%Y-%m-%d %H:%M:%S.%6N")
}
when 'TIME'
Proc.new {|val|
next nil if val.nil?
val.localtime(zone_offset).strftime("%H:%M:%S.%6N")
}
else
raise NotSupportedType, "cannot take column type #{type} for timestamp column"
end
end
# ToDo: recursive conversion
def json_converter
case type
when 'STRING'
Proc.new {|val|
next nil if val.nil?
val.to_json
}
when 'RECORD'
Proc.new {|val|
val
}
when 'JSON'
Proc.new {|val|
val
}
else
raise NotSupportedType, "cannot take column type #{type} for json column"
end
end
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/lib/embulk/output/bigquery/file_writer.rb | lib/embulk/output/bigquery/file_writer.rb | require 'zlib'
require 'json'
require 'csv'
require_relative 'value_converter_factory'
module Embulk
module Output
class Bigquery < OutputPlugin
class FileWriter
attr_reader :num_rows
def initialize(task, schema, index, converters = nil)
@task = task
@schema = schema
@index = index
@converters = converters || ValueConverterFactory.create_converters(task, schema)
@num_rows = 0
if @task['progress_log_interval']
@progress_log_interval = @task['progress_log_interval']
@progress_log_timer = Time.now
@previous_num_rows = 0
end
if @task['payload_column_index']
@payload_column_index = @task['payload_column_index']
@formatter_proc = self.method(:to_payload)
else
case @task['source_format'].downcase
when 'csv'
@formatter_proc = self.method(:to_csv)
else
@formatter_proc = self.method(:to_jsonl)
end
end
end
def io
return @io if @io
path = sprintf(
"#{@task['path_prefix']}#{@task['sequence_format']}#{@task['file_ext']}",
Process.pid, Thread.current.object_id
)
if File.exist?(path)
Embulk.logger.warn { "embulk-output-bigquery: unlink already existing #{path}" }
File.unlink(path) rescue nil
end
Embulk.logger.info { "embulk-output-bigquery: create #{path}" }
@io = open(path, 'w')
end
def open(path, mode = 'w')
file_io = File.open(path, mode)
case @task['compression'].downcase
when 'gzip'
io = Zlib::GzipWriter.new(file_io)
else
io = file_io
end
io
end
def close
io.close rescue nil
io
end
def reopen
@io = open(io.path, 'a')
end
def to_payload(record)
"#{record[@payload_column_index]}\n"
end
def to_csv(record)
record.map.with_index do |value, column_index|
@converters[column_index].call(value)
end.to_csv
end
def to_jsonl(record)
hash = {}
column_names = @schema.names
record.each_with_index do |value, column_index|
column_name = column_names[column_index]
hash[column_name] = @converters[column_index].call(value)
end
"#{hash.to_json}\n"
end
def num_format(number)
number.to_s.gsub(/(\d)(?=(\d{3})+(?!\d))/, '\1,')
end
def add(page)
_io = io
# I once tried to split IO writing into another IO thread using SizedQueue
# However, it resulted in worse performance, so I removed the codes.
page.each do |record|
Embulk.logger.trace { "embulk-output-bigquery: record #{record}" }
formatted_record = @formatter_proc.call(record)
Embulk.logger.trace { "embulk-output-bigquery: formatted_record #{formatted_record.chomp}" }
_io.write formatted_record
@num_rows += 1
end
show_progress if @task['progress_log_interval']
@num_rows
end
private
def show_progress
now = Time.now
if @progress_log_timer < now - @progress_log_interval
speed = ((@num_rows - @previous_num_rows) / (now - @progress_log_timer).to_f).round(1)
@progress_log_timer = now
@previous_num_rows = @num_rows
Embulk.logger.info { "embulk-output-bigquery: num_rows #{num_format(@num_rows)} (#{num_format(speed)} rows/sec)" }
end
end
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/lib/embulk/output/bigquery/gcs_client.rb | lib/embulk/output/bigquery/gcs_client.rb | require 'uri'
require 'java'
require 'google/apis/storage_v1'
require_relative 'google_client'
require_relative 'helper'
# ToDo: Use https://cloud.google.com/storage/docs/streaming if google-api-ruby-client supports streaming transfers
# ToDo: Tests are not written because this implementation will probably entirely changed on supporting streaming transfers
module Embulk
module Output
class Bigquery < OutputPlugin
class GcsClient < GoogleClient
def initialize(task)
scope = "https://www.googleapis.com/auth/cloud-platform"
client_class = Google::Apis::StorageV1::StorageService
super(task, scope, client_class)
@project = @task['project']
@destination_project = @task['destination_project']
@bucket = @task['gcs_bucket']
@location = @task['location']
end
def insert_temporary_bucket(bucket = nil)
bucket ||= @bucket
begin
Embulk.logger.info { "embulk-output-bigquery: Insert bucket... #{@destination_project}:#{bucket}" }
body = {
name: bucket,
lifecycle: {
rule: [
{
action: {
type: "Delete",
},
condition: {
age: 1,
}
},
]
}
}
if @location
body[:location] = @location
end
opts = {}
Embulk.logger.debug { "embulk-output-bigquery: insert_temporary_bucket(#{@project}, #{body}, #{opts})" }
with_network_retry { client.insert_bucket(@project, body, **opts) }
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
if e.status_code == 409 && /conflict:/ =~ e.message
# ignore 'Already Exists' error
return nil
end
response = {status_code: e.status_code, message: e.message, error_class: e.class}
Embulk.logger.error {
"embulk-output-bigquery: insert_temporary_bucket(#{@project}, #{body}, #{opts}), response:#{response}"
}
raise Error, "failed to insert bucket #{@destination_project}:#{bucket}, response:#{response}"
end
end
def insert_object(path, object: nil, bucket: nil)
bucket ||= @bucket
object ||= path
object = object.start_with?('/') ? object[1..-1] : object
object_uri = URI.join("gs://#{bucket}", object).to_s
started = Time.now
begin
Embulk.logger.info { "embulk-output-bigquery: Insert object... #{path} => #{@destination_project}:#{object_uri}" }
body = {
name: object,
}
opts = {
upload_source: path,
content_type: 'application/octet-stream'
}
Embulk.logger.debug { "embulk-output-bigquery: insert_object(#{bucket}, #{body}, #{opts})" }
# memo: gcs is strongly consistent for insert (read-after-write). ref: https://cloud.google.com/storage/docs/consistency
with_network_retry { client.insert_object(bucket, body, **opts) }
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
response = {status_code: e.status_code, message: e.message, error_class: e.class}
Embulk.logger.error {
"embulk-output-bigquery: insert_object(#{bucket}, #{body}, #{opts}), response:#{response}"
}
raise Error, "failed to insert object #{@destination_project}:#{object_uri}, response:#{response}"
end
end
def insert_objects(paths, objects: nil, bucket: nil)
return [] if paths.empty?
bucket ||= @bucket
objects ||= paths
raise "number of paths and objects are different" if paths.size != objects.size
responses = []
paths.each_with_index do |path, idx|
object = objects[idx]
responses << insert_object(path, object: object, bucket: bucket)
end
responses
end
def delete_object(object, bucket: nil)
bucket ||= @bucket
object = object.start_with?('/') ? object[1..-1] : object
object_uri = URI.join("gs://#{bucket}", object).to_s
begin
Embulk.logger.info { "embulk-output-bigquery: Delete object... #{@destination_project}:#{object_uri}" }
opts = {}
Embulk.logger.debug { "embulk-output-bigquery: delete_object(#{bucket}, #{object}, #{opts})" }
response = with_network_retry { client.delete_object(bucket, object, **opts) }
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
if e.status_code == 404 # ignore 'notFound' error
return nil
end
response = {status_code: e.status_code, message: e.message, error_class: e.class}
Embulk.logger.error {
"embulk-output-bigquery: delete_object(#{bucket}, #{object}, #{opts}), response:#{response}"
}
raise Error, "failed to delete object #{@destination_project}:#{object_uri}, response:#{response}"
end
end
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/lib/embulk/output/bigquery/helper.rb | lib/embulk/output/bigquery/helper.rb | require 'digest/md5'
require 'securerandom'
module Embulk
module Output
class Bigquery < OutputPlugin
class Helper
PARTITION_DECORATOR_REGEXP = /\$.+\z/
def self.field_partitioning?(task)
(task['time_partitioning'] || {}).key?('field')
end
def self.has_partition_decorator?(table_name)
!!(table_name =~ PARTITION_DECORATOR_REGEXP)
end
def self.chomp_partition_decorator(table_name)
table_name.sub(PARTITION_DECORATOR_REGEXP, '')
end
def self.bq_type_from_embulk_type(embulk_type)
case embulk_type
when :boolean then 'BOOLEAN'
when :long then 'INTEGER'
when :double then 'FLOAT'
when :string then 'STRING'
when :timestamp then 'TIMESTAMP'
when :json then 'STRING' # NOTE: Default is not RECORD since it requires `fields`
else raise ArgumentError, "embulk type #{embulk_type} is not supported"
end
end
# @return [Hash] name => column_option.
# ToDo: recursively map fields?
def self.column_options_map(column_options)
(column_options || {}).map do |column_option|
[column_option['name'], column_option]
end.to_h
end
def self.fields_from_embulk_schema(task, schema)
column_options_map = self.column_options_map(task['column_options'])
schema.map do |column|
column_name = column[:name]
embulk_type = column[:type]
column_option = column_options_map[column_name] || {}
{}.tap do |field|
field[:name] = column_name
field[:type] = (column_option['type'] || bq_type_from_embulk_type(embulk_type)).upcase
field[:mode] = column_option['mode'] if column_option['mode']
field[:fields] = deep_symbolize_keys(column_option['fields']) if column_option['fields']
field[:description] = column_option['description'] if column_option['description']
end
end
end
def self.deep_symbolize_keys(obj)
if obj.is_a?(Hash)
obj.inject({}) do |options, (key, value)|
options[(key.to_sym rescue key) || key] = deep_symbolize_keys(value)
options
end
elsif obj.is_a?(Array)
obj.map {|value| deep_symbolize_keys(value) }
else
obj
end
end
def self.create_load_job_id(task, path, fields)
elements = [
Digest::MD5.file(path).hexdigest,
task['dataset'],
task['location'],
task['table'],
fields,
task['source_format'],
task['max_bad_records'],
task['field_delimiter'],
task['encoding'],
task['ignore_unknown_values'],
task['allow_quoted_newlines'],
]
str = elements.map(&:to_s).join('')
md5 = Digest::MD5.hexdigest(str)
"embulk_load_job_#{md5}"
end
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/lib/embulk/output/bigquery/bigquery_client.rb | lib/embulk/output/bigquery/bigquery_client.rb | require 'google/apis/bigquery_v2'
require 'json'
require 'thwait'
require_relative 'google_client'
require_relative 'helper'
module Embulk
module Output
class Bigquery < OutputPlugin
class BigqueryClient < GoogleClient
BIGQUERY_TABLE_OPERATION_INTERVAL = 2 # https://cloud.google.com/bigquery/quotas
def initialize(task, schema, fields = nil)
scope = "https://www.googleapis.com/auth/bigquery"
client_class = Google::Apis::BigqueryV2::BigqueryService
super(task, scope, client_class)
@schema = schema
reset_fields(fields) if fields
@project = @task['project']
@destination_project = @task['destination_project']
@dataset = @task['dataset']
@location = @task['location']
@location_for_log = @location.nil? ? 'Primary location' : @location
@task['source_format'] ||= 'CSV'
@task['max_bad_records'] ||= 0
@task['field_delimiter'] ||= ','
@task['source_format'] == 'CSV' ? @task['field_delimiter'] : nil
@task['encoding'] ||= 'UTF-8'
@task['ignore_unknown_values'] = false if @task['ignore_unknown_values'].nil?
@task['allow_quoted_newlines'] = false if @task['allow_quoted_newlines'].nil?
end
def fields
return @fields if @fields
if @task['schema_file']
@fields = Helper.deep_symbolize_keys(JSON.parse(File.read(@task['schema_file'])))
elsif @task['template_table']
@fields = fields_from_table(@task['template_table'])
else
@fields = Helper.fields_from_embulk_schema(@task, @schema)
end
end
def fields_from_table(table)
response = get_table(table)
response.schema.fields.map {|field| field.to_h }
end
def reset_fields(fields = nil)
@fields = fields
self.fields
end
def with_job_retry(&block)
retries = 0
begin
yield
rescue BackendError, InternalError, RateLimitExceeded => e
if e.is_a?(RateLimitExceeded)
sleep(BIGQUERY_TABLE_OPERATION_INTERVAL)
end
if retries < @task['retries']
retries += 1
Embulk.logger.warn { "embulk-output-bigquery: retry \##{retries}, #{e.message}" }
retry
else
Embulk.logger.error { "embulk-output-bigquery: retry exhausted \##{retries}, #{e.message}" }
raise e
end
end
end
# @params gcs_patsh [Array] arary of gcs paths such as gs://bucket/path
# @return [Array] responses
def load_from_gcs(object_uris, table)
with_job_retry do
begin
# As https://cloud.google.com/bigquery/docs/managing_jobs_datasets_projects#managingjobs says,
# we should generate job_id in client code, otherwise, retrying would cause duplication
job_id = "embulk_load_job_#{SecureRandom.uuid}"
Embulk.logger.info { "embulk-output-bigquery: Load job starting... job_id:[#{job_id}] #{object_uris} => #{@destination_project}:#{@dataset}.#{table} in #{@location_for_log}" }
body = {
job_reference: {
project_id: @project,
job_id: job_id,
},
configuration: {
load: {
destination_table: {
project_id: @destination_project,
dataset_id: @dataset,
table_id: table,
},
schema: {
fields: fields,
},
write_disposition: 'WRITE_APPEND',
source_format: @task['source_format'],
max_bad_records: @task['max_bad_records'],
field_delimiter: @task['source_format'] == 'CSV' ? @task['field_delimiter'] : nil,
encoding: @task['encoding'],
ignore_unknown_values: @task['ignore_unknown_values'],
allow_quoted_newlines: @task['allow_quoted_newlines'],
source_uris: object_uris,
}
}
}
if @location
body[:job_reference][:location] = @location
end
if @task['schema_update_options']
body[:configuration][:load][:schema_update_options] = @task['schema_update_options']
end
opts = {}
Embulk.logger.debug { "embulk-output-bigquery: insert_job(#{@project}, #{body}, #{opts})" }
response = with_network_retry { client.insert_job(@project, body, **opts) }
unless @task['is_skip_job_result_check']
response = wait_load('Load', response)
end
[response]
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
response = {status_code: e.status_code, message: e.message, error_class: e.class}
Embulk.logger.error {
"embulk-output-bigquery: insert_job(#{@project}, #{body}, #{opts}), response:#{response}"
}
raise Error, "failed to load #{object_uris} to #{@destination_project}:#{@dataset}.#{table} in #{@location_for_log}, response:#{response}"
end
end
end
def load_in_parallel(paths, table)
return [] if paths.empty?
# You may think as, load job is a background job, so sending requests in parallel
# does not improve performance. However, with actual experiments, this parallel
# loadings drastically shortened waiting time. It looks one jobs.insert takes about 50 sec.
# NOTICE: parallel uploadings of files consumes network traffic. With 24 concurrencies
# with 100MB files consumed about 500Mbps in the experimented environment at a peak.
#
# We before had a `max_load_parallels` option, but this was not extensible for map reduce executor
# So, we dropped it. See https://github.com/embulk/embulk-output-bigquery/pull/35
responses = []
threads = []
Embulk.logger.debug { "embulk-output-bigquery: LOAD IN PARALLEL #{paths}" }
paths.each_with_index do |path, idx|
threads << Thread.new(path, idx) do |path, idx|
# I am not sure whether google-api-ruby-client is thread-safe,
# so let me create new instances for each thread for safe
bigquery = self.class.new(@task, @schema, fields)
response = bigquery.load(path, table)
[idx, response]
end
end
ThreadsWait.all_waits(*threads) do |th|
idx, response = th.value # raise errors occurred in threads
responses[idx] = response
end
responses
end
def load(path, table, write_disposition: 'WRITE_APPEND')
with_job_retry do
begin
if File.exist?(path)
# As https://cloud.google.com/bigquery/docs/managing_jobs_datasets_projects#managingjobs says,
# we should generate job_id in client code, otherwise, retrying would cause duplication
job_id = "embulk_load_job_#{SecureRandom.uuid}"
Embulk.logger.info { "embulk-output-bigquery: Load job starting... job_id:[#{job_id}] #{path} => #{@destination_project}:#{@dataset}.#{table} in #{@location_for_log}" }
else
Embulk.logger.info { "embulk-output-bigquery: Load job starting... #{path} does not exist, skipped" }
return
end
body = {
job_reference: {
project_id: @project,
job_id: job_id,
},
configuration: {
load: {
destination_table: {
project_id: @destination_project,
dataset_id: @dataset,
table_id: table,
},
schema: {
fields: fields,
},
write_disposition: write_disposition,
source_format: @task['source_format'],
max_bad_records: @task['max_bad_records'],
field_delimiter: @task['source_format'] == 'CSV' ? @task['field_delimiter'] : nil,
encoding: @task['encoding'],
ignore_unknown_values: @task['ignore_unknown_values'],
allow_quoted_newlines: @task['allow_quoted_newlines'],
}
}
}
if @location
body[:job_reference][:location] = @location
end
if @task['schema_update_options']
body[:configuration][:load][:schema_update_options] = @task['schema_update_options']
end
opts = {
upload_source: path,
content_type: "application/octet-stream",
# options: {
# retries: @task['retries'],
# timeout_sec: @task['timeout_sec'],
# open_timeout_sec: @task['open_timeout_sec']
# },
}
Embulk.logger.debug { "embulk-output-bigquery: insert_job(#{@project}, #{body}, #{opts})" }
response = with_network_retry { client.insert_job(@project, body, **opts) }
if @task['is_skip_job_result_check']
response
else
response = wait_load('Load', response)
end
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
response = {status_code: e.status_code, message: e.message, error_class: e.class}
Embulk.logger.error {
"embulk-output-bigquery: insert_job(#{@project}, #{body}, #{opts}), response:#{response}"
}
raise Error, "failed to load #{path} to #{@destination_project}:#{@dataset}.#{table} in #{@location_for_log}, response:#{response}"
end
end
end
def copy(source_table, destination_table, destination_dataset = nil, write_disposition: 'WRITE_TRUNCATE')
with_job_retry do
begin
destination_dataset ||= @dataset
job_id = "embulk_copy_job_#{SecureRandom.uuid}"
Embulk.logger.info {
"embulk-output-bigquery: Copy job starting... job_id:[#{job_id}] " \
"#{@destination_project}:#{@dataset}.#{source_table} => #{@destination_project}:#{destination_dataset}.#{destination_table}"
}
body = {
job_reference: {
project_id: @project,
job_id: job_id,
},
configuration: {
copy: {
create_deposition: 'CREATE_IF_NEEDED',
write_disposition: write_disposition,
source_table: {
project_id: @destination_project,
dataset_id: @dataset,
table_id: source_table,
},
destination_table: {
project_id: @destination_project,
dataset_id: destination_dataset,
table_id: destination_table,
},
}
}
}
if @location
body[:job_reference][:location] = @location
end
opts = {}
Embulk.logger.debug { "embulk-output-bigquery: insert_job(#{@project}, #{body}, #{opts})" }
response = with_network_retry { client.insert_job(@project, body, **opts) }
wait_load('Copy', response)
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
response = {status_code: e.status_code, message: e.message, error_class: e.class}
Embulk.logger.error {
"embulk-output-bigquery: insert_job(#{@project}, #{body}, #{opts}), response:#{response}"
}
raise Error, "failed to copy #{@destination_project}:#{@dataset}.#{source_table} " \
"to #{@destination_project}:#{destination_dataset}.#{destination_table}, response:#{response}"
end
end
end
def wait_load(kind, response)
started = Time.now
wait_interval = @task['job_status_polling_interval']
max_polling_time = @task['job_status_max_polling_time']
_response = response
while true
job_id = _response.job_reference.job_id
location = @location || _response.job_reference.location
elapsed = Time.now - started
status = _response.status.state
if status == "DONE"
Embulk.logger.info {
"embulk-output-bigquery: #{kind} job completed... " \
"job_id:[#{job_id}] elapsed_time:#{elapsed.to_f}sec status:[#{status}]"
}
break
elsif elapsed.to_i > max_polling_time
message = "embulk-output-bigquery: #{kind} job checking... " \
"job_id:[#{job_id}] elapsed_time:#{elapsed.to_f}sec status:[TIMEOUT]"
Embulk.logger.info { message }
raise JobTimeoutError.new(message)
else
Embulk.logger.info {
"embulk-output-bigquery: #{kind} job checking... " \
"job_id:[#{job_id}] elapsed_time:#{elapsed.to_f}sec status:[#{status}]"
}
sleep wait_interval
_response = with_network_retry { client.get_job(@project, job_id, location: location) }
end
end
# `errors` returns Array<Google::Apis::BigqueryV2::ErrorProto> if any error exists.
_errors = _response.status.errors
# cf. http://www.rubydoc.info/github/google/google-api-ruby-client/Google/Apis/BigqueryV2/JobStatus#errors-instance_method
# `error_result` returns Google::Apis::BigqueryV2::ErrorProto if job failed.
# Otherwise, this returns nil.
if _response.status.error_result
msg = "failed during waiting a #{kind} job, get_job(#{@project}, #{job_id}), errors:#{_errors.map(&:to_h)}"
if _errors.any? {|error| error.reason == 'backendError' }
raise BackendError, msg
elsif _errors.any? {|error| error.reason == 'internalError' }
raise InternalError, msg
elsif _errors.any? {|error| error.reason == 'rateLimitExceeded' }
raise RateLimitExceeded, msg
else
Embulk.logger.error { "embulk-output-bigquery: #{msg}" }
raise Error, msg
end
end
if _errors
Embulk.logger.warn { "embulk-output-bigquery: #{kind} job errors... job_id:[#{job_id}] errors:#{_errors.map(&:to_h)}" }
end
Embulk.logger.info { "embulk-output-bigquery: #{kind} job response... job_id:[#{job_id}] response.statistics:#{_response.statistics.to_h}" }
_response
end
def create_dataset(dataset = nil, reference: nil)
dataset ||= @dataset
begin
Embulk.logger.info { "embulk-output-bigquery: Create dataset... #{@destination_project}:#{dataset} in #{@location_for_log}" }
hint = {}
if reference
response = get_dataset(reference)
hint = { access: response.access }
end
body = {
dataset_reference: {
project_id: @project,
dataset_id: dataset,
},
}.merge(hint)
if @location
body[:location] = @location
end
opts = {}
Embulk.logger.debug { "embulk-output-bigquery: insert_dataset(#{@project}, #{dataset}, #{@location_for_log}, #{body}, #{opts})" }
with_network_retry { client.insert_dataset(@project, body, **opts) }
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
if e.status_code == 409 && /Already Exists:/ =~ e.message
# ignore 'Already Exists' error
return
end
response = {status_code: e.status_code, message: e.message, error_class: e.class}
Embulk.logger.error {
"embulk-output-bigquery: insert_dataset(#{@project}, #{body}, #{opts}), response:#{response}"
}
raise Error, "failed to create dataset #{@destination_project}:#{dataset} in #{@location_for_log}, response:#{response}"
end
end
def get_dataset(dataset = nil)
dataset ||= @dataset
begin
Embulk.logger.info { "embulk-output-bigquery: Get dataset... #{@destination_project}:#{dataset}" }
with_network_retry { client.get_dataset(@destination_project, dataset) }
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
if e.status_code == 404
raise NotFoundError, "Dataset #{@destination_project}:#{dataset} is not found"
end
response = {status_code: e.status_code, message: e.message, error_class: e.class}
Embulk.logger.error {
"embulk-output-bigquery: get_dataset(#{@destination_project}, #{dataset}), response:#{response}"
}
raise Error, "failed to get dataset #{@destination_project}:#{dataset}, response:#{response}"
end
end
def create_table_if_not_exists(table, dataset: nil, options: nil)
begin
dataset ||= @dataset
options ||= {}
options['time_partitioning'] ||= @task['time_partitioning']
if Helper.has_partition_decorator?(table)
options['time_partitioning'] ||= {'type' => 'DAY'}
table = Helper.chomp_partition_decorator(table)
end
Embulk.logger.info { "embulk-output-bigquery: Create table... #{@destination_project}:#{dataset}.#{table}" }
body = {
table_reference: {
table_id: table,
},
description: @task['description'],
schema: {
fields: fields,
}
}
if options['time_partitioning']
body[:time_partitioning] = {
type: options['time_partitioning']['type'],
expiration_ms: options['time_partitioning']['expiration_ms'],
field: options['time_partitioning']['field'],
}
end
options['range_partitioning'] ||= @task['range_partitioning']
if options['range_partitioning']
body[:range_partitioning] = {
field: options['range_partitioning']['field'],
range: {
start: options['range_partitioning']['range']['start'].to_s,
end: options['range_partitioning']['range']['end'].to_s,
interval: options['range_partitioning']['range']['interval'].to_s,
},
}
end
options['clustering'] ||= @task['clustering']
if options['clustering']
body[:clustering] = {
fields: options['clustering']['fields'],
}
end
if options['expiration_time']
# expiration_time is expressed in milliseconds
body[:expiration_time] = (Time.now.to_i + options['expiration_time']) * 1000
end
opts = {}
Embulk.logger.debug { "embulk-output-bigquery: insert_table(#{@destination_project}, #{dataset}, #{@location_for_log}, #{body}, #{opts})" }
with_network_retry { client.insert_table(@destination_project, dataset, body, **opts) }
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
if e.status_code == 409 && /Already Exists:/ =~ e.message
# ignore 'Already Exists' error
return
end
response = {status_code: e.status_code, message: e.message, error_class: e.class}
Embulk.logger.error {
"embulk-output-bigquery: insert_table(#{@destination_project}, #{dataset}, #{@location_for_log}, #{body}, #{opts}), response:#{response}"
}
raise Error, "failed to create table #{@destination_project}:#{dataset}.#{table} in #{@location_for_log}, response:#{response}"
end
end
def delete_table(table, dataset: nil)
table = Helper.chomp_partition_decorator(table)
delete_table_or_partition(table, dataset: dataset)
end
def delete_partition(table, dataset: nil)
delete_table_or_partition(table, dataset: dataset)
end
# if `table` with a partition decorator is given, a partition is deleted.
def delete_table_or_partition(table, dataset: nil)
begin
dataset ||= @dataset
Embulk.logger.info { "embulk-output-bigquery: Delete table... #{@destination_project}:#{dataset}.#{table}" }
with_network_retry { client.delete_table(@destination_project, dataset, table) }
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
if e.status_code == 404 && /Not found:/ =~ e.message
# ignore 'Not Found' error
return
end
response = {status_code: e.status_code, message: e.message, error_class: e.class}
Embulk.logger.error {
"embulk-output-bigquery: delete_table(#{@destination_project}, #{dataset}, #{table}), response:#{response}"
}
raise Error, "failed to delete table #{@destination_project}:#{dataset}.#{table}, response:#{response}"
end
end
def get_table(table, dataset: nil)
table = Helper.chomp_partition_decorator(table)
get_table_or_partition(table)
end
def get_partition(table, dataset: nil)
get_table_or_partition(table)
end
def get_table_or_partition(table, dataset: nil)
begin
dataset ||= @dataset
Embulk.logger.info { "embulk-output-bigquery: Get table... #{@destination_project}:#{dataset}.#{table}" }
with_network_retry { client.get_table(@destination_project, dataset, table) }
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
if e.status_code == 404
raise NotFoundError, "Table #{@destination_project}:#{dataset}.#{table} is not found"
end
response = {status_code: e.status_code, message: e.message, error_class: e.class}
Embulk.logger.error {
"embulk-output-bigquery: get_table(#{@destination_project}, #{dataset}, #{table}), response:#{response}"
}
raise Error, "failed to get table #{@destination_project}:#{dataset}.#{table}, response:#{response}"
end
end
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
embulk/embulk-output-bigquery | https://github.com/embulk/embulk-output-bigquery/blob/e4cc3fcbb6a481a71874c357a947b298ceb2bc23/lib/embulk/output/bigquery/auth.rb | lib/embulk/output/bigquery/auth.rb | require 'googleauth'
module Embulk
module Output
class Bigquery < OutputPlugin
class Auth
attr_reader :auth_method, :json_key, :scope
def initialize(task, scope)
@auth_method = task['auth_method']
@json_key = task['json_keyfile']
@scope = scope
end
def authenticate
case auth_method
when 'authorized_user'
key = StringIO.new(json_key)
return Google::Auth::UserRefreshCredentials.make_creds(json_key_io: key, scope: scope)
when 'compute_engine'
return Google::Auth::GCECredentials.new
when 'service_account', 'json_key' # json_key is for backward compatibility
key = StringIO.new(json_key)
return Google::Auth::ServiceAccountCredentials.make_creds(json_key_io: key, scope: scope)
when 'application_default'
return Google::Auth.get_application_default([scope])
else
raise ConfigError.new("Unknown auth method: #{auth_method}")
end
end
end
end
end
end
| ruby | MIT | e4cc3fcbb6a481a71874c357a947b298ceb2bc23 | 2026-01-04T17:51:46.760044Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/app/controllers/showcase/previews_controller.rb | app/controllers/showcase/previews_controller.rb | class Showcase::PreviewsController < Showcase::EngineController
def show
@preview = Showcase::Path.new(params[:id]).preview_for view_context
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/app/controllers/showcase/engine_controller.rb | app/controllers/showcase/engine_controller.rb | class Showcase::EngineController < ActionController::Base
layout "showcase"
helper Showcase::RouteHelper
if defined?(::ApplicationController)
helper all_helpers_from_path ::ApplicationController.helpers_path
end
def index
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/app/models/showcase/path.rb | app/models/showcase/path.rb | class Showcase::Path
class Tree < Struct.new(:id, :children, :root)
def initialize(id, children = [])
super(id, children, false)
end
alias_method :root?, :root
delegate :<<, to: :children
cached_partial_path = "showcase/engine/path/tree"
define_method(:to_partial_path) { cached_partial_path }
def name
id == "." ? "Previews" : id
end
def open?
Showcase.tree_opens.call(self)
end
def active?(id)
children.any? { _1.active?(id) }
end
def ordered_children
children.partition { !_1.is_a?(Tree) }.flatten
end
def ordered_paths
children.flat_map { _1.is_a?(Tree) ? _1.ordered_paths : _1 }
end
def self.index(paths)
paths.each_with_object new(:discardable_root) do |path, root|
yield(path).reduce(root, :edge_for) << path
end.children.sort_by(&:id).each { _1.root = true }
end
def edge_for(id)
find(id) || insert(id)
end
private
def find(id)
children.find { _1.id == id }
end
def insert(id)
self.class.new(id).tap { self << _1 }
end
end
def self.tree
paths = Showcase.previews.map { new _1 }.sort_by!(&:id)
Tree.index(paths, &:segments)
end
attr_reader :id, :segments, :basename
def initialize(path)
@id = path.split(".").first.delete_prefix("_").sub(/\/_/, "/")
@basename = File.basename(@id)
@segments = File.dirname(@id).split("/")
end
cached_partial_path = "showcase/engine/path/path"
define_method(:to_partial_path) { cached_partial_path }
def active?(id)
self.id == id
end
def preview_for(view_context)
Showcase::Preview.new(view_context, id: id, title: basename.titleize).tap(&:render_associated_partial)
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/app/models/showcase/preview.rb | app/models/showcase/preview.rb | class Showcase::Preview
attr_reader :id, :badges, :samples
def initialize(view_context, id:, title: nil)
@view_context, @id = view_context, id
@badges, @samples = [], []
title title
end
# Set a custom title for the Preview. By default, it's automatically inferred from the sidebar title,
# e.g. showcase/previews/_button.html.erb will have Button as the title.
def title(content = nil)
@title = content if content
@title
end
# Describe the Preview in more detail to help guide other developers on what the inner partial/component etc.'s purpose is.
#
# <% showcase.description "Our button element" %>
# <% showcase.description do %>
# <h3>Our button element</h3> — <span>but with custom description HTML</span>
# <% end %>
def description(content = nil, &block)
@description = content || @view_context.capture(&block) if content || block_given?
@description
end
# Optional badges you can give to a preview:
#
# <% showcase.badge :partial, :view_helper %>
def badge(*badges)
@badges.concat badges
end
# Allows linking out to other Showcases
#
# <%= showcase.link_to "components/button", id: "extra-large" %>
# # => <a href="components/button#extra-large"><showcase components/button#extra-large></a>
#
# Can link to other samples on the current showcase too:
#
# # If we're within app/views/showcase/previews/components/_button.html.erb
# <%= showcase.link_to id: "extra-large" %>
# # => <a href="components/button#extra-large"><showcase components/button#extra-large></a>
def link_to(preview_id = id, id: nil)
@view_context.link_to Showcase::Engine.routes.url_helpers.preview_path(preview_id, anchor: id), class: "sc-link sc-font-mono sc-text-sm" do
"<showcase #{[preview_id, id].compact.join("#").squish}>"
end
end
# Adds a named sample to demonstrate with the Showcase can do.
#
# By default, sample takes a block that'll automatically have its source extracted, like this:
#
# <% showcase.sample "Basic" do %>
# <%= render "components/button", content: "Button Content", mode: :small %>
# <% end %>
#
# This outputs a `<showcase-sample>` custom HTML element.
# The sample name is used to generate the `id` via `name.parameterize` by default, pass `id:` to override.
#
# If more advanced rendering is needed, the sample is available as a block argument:
#
# <% showcase.sample "Advanced" do |sample| %>
# <% sample.preview do %>
# <%= render "components/button", content: "Button Content", mode: :small %>
# <% end %>
#
# <% sample.extract do %>
# This will be in the source output.
# <% end %>
#
# The sample also supports several extra options:
#
# <% showcase.sample "Basic", id: "custom-id", description: "Please use this", events: "toggle:toggled", custom: "option" do %>
# <%# … %>
# <% end %>
#
# Here we set:
# - the `sample.id` with the HTML element `id` is overriden
# - the `sample.description`
# - the `sample.events` what JavaScript `events` to listen for on the element
# - any other custom options are available in `sample.details`.
def sample(name, **options, &block)
@samples << Showcase::Sample.new(@view_context, name, **options).tap { _1.evaluate(&block) }
end
# Yields an Options object to help define the configuration table for a Preview.
#
# <% showcase.options do |o| %>
# <% o.required :content, "Pass the inner content text that the button should display" %>
# <% o.optional :mode, "Pass an optional mode override", default: :small, options: %i[ small medium large ] %>
# <% o.optional :method, "What HTTP method to use", type: "String | Symbol", default: :post %>
# <% o.optional :reversed, "Whether the inner text should be reversed", default: false %> # type: "Boolean" is inferred from the default here.
# <% o.optional "**options", "Every other option is passed on as options to the inner `button_tag`", type: Hash %>
# <% end %>
#
# The `type:` is derived if a `default:` is passed, otherwise it's assumed to be a String.
#
# Showcase outputs the columns with this order [:name, :required, :type, :default, :description], any other passed column is
# automatically rendered after those.
def options
@options ||= Showcase::Options.new(@view_context).tap { yield _1 if block_given? }
end
def render_associated_partial
@view_context.render "showcase/previews/#{id}", showcase: self
nil
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/app/models/showcase/sample.rb | app/models/showcase/sample.rb | class Showcase::Sample
attr_reader :name, :id, :events, :details
attr_reader :rendered, :source, :instrumented
def initialize(view_context, name, description: nil, id: name.parameterize, syntax: :erb, events: nil, **details)
@view_context = view_context
@name, @id, @syntax, @details = name, id, syntax, details
@events = Array(events)
description description if description
end
def description(content = nil, &block)
@description = content || @view_context.capture(&block) if content || block_given?
@description
end
def evaluate(&block)
if block.arity.zero?
consume(&block)
else
@view_context.capture(self, &block)
end
end
def consume(&block)
render(&block)
extract_source(&block)
end
def render(&block)
# TODO: Remove `is_a?` check when Rails 6.1 support is dropped.
assigns = proc { @instrumented = _1 if _1.is_a?(ActiveSupport::Notifications::Event) }
ActiveSupport::Notifications.subscribed(assigns, "render_partial.action_view") do
@rendered = @view_context.capture(&block)
end
end
def extract_source(&block)
source = extract_source_block_via_matched_indentation_from(*block.source_location)
@source = @view_context.instance_exec(source, @syntax, &Showcase.sample_renderer)
end
private
def extract_source_block_via_matched_indentation_from(file, source_location_index)
# `Array`s are zero-indexed, but `source_location` indexes are not, hence `pred`.
starting_line, *lines = File.readlines(file).slice(source_location_index.pred..)
indentation = starting_line.match(/^\s+/).to_s
matcher = /^#{indentation}\S/
index = lines.index { _1.match?(matcher) }
lines.slice!(index..) if index
lines.join.strip_heredoc
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/capybara_extensions.rb | test/capybara_extensions.rb | module CapybaraExtensions
end
require "capybara_extensions/assertions"
require "capybara_extensions/filters/data"
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/test_helper.rb | test/test_helper.rb | # Configure Rails Environment
ENV["RAILS_ENV"] = "test"
require_relative "../test/dummy/config/environment"
ActiveRecord::Migrator.migrations_paths = [File.expand_path("../test/dummy/db/migrate", __dir__)]
# Don't eager load, since that expects Action Mailbox tables to be present in database.
Rails.configuration.eager_load = false
require "rails/test_help"
require "capybara_extensions"
require "template_helpers"
Showcase.sample_renderer = proc { _1 } if ENV["ROUGE_ENABLED"] == "false"
# Load fixtures from the engine
if ActiveSupport::TestCase.respond_to?(:fixture_path=)
ActiveSupport::TestCase.fixture_path = File.expand_path("fixtures", __dir__)
ActionDispatch::IntegrationTest.fixture_path = ActiveSupport::TestCase.fixture_path
ActiveSupport::TestCase.file_fixture_path = ActiveSupport::TestCase.fixture_path + "/files"
ActiveSupport::TestCase.fixtures :all
end
class ActiveSupport::TestCase
include TemplateHelpers
end
class Showcase::IntegrationTest < ActionDispatch::IntegrationTest
include Showcase::Engine.routes.url_helpers
setup { @routes = Showcase::Engine.routes }
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/template_helpers.rb | test/template_helpers.rb | module TemplateHelpers
def setup
super
@temporary_view_path = Pathname.new(Dir.mktmpdir).join("app", "views")
@view_paths = ActionController::Base.view_paths
ActionController::Base.prepend_view_path(@temporary_view_path)
end
def teardown
super
ActionController::Base.view_paths = @view_paths
end
def template_file(partial, html)
@temporary_view_path.join(partial).tap do |file|
file.dirname.mkpath
file.write(html)
end
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/capybara_extensions/assertions.rb | test/capybara_extensions/assertions.rb | module CapybaraExtensions::Assertions
%i[element disclosure link region section table_row].each do |selector|
class_eval <<~RUBY, __FILE__, __LINE__ + 1
def assert_#{selector}(...)
assert_selector(#{selector.inspect}, ...)
end
def assert_no_#{selector}(...)
assert_no_selector(#{selector.inspect}, ...)
end
RUBY
end
end
Capybara::Node::Matchers.include CapybaraExtensions::Assertions
Capybara::Node::Simple.include CapybaraExtensions::Assertions if RUBY_VERSION < "3.0"
ActiveSupport.on_load :action_dispatch_integration_test do
include CapybaraExtensions::Assertions
end
ActiveSupport.on_load :action_view_test_case do
include Capybara::Minitest::Assertions
include CapybaraExtensions::Assertions
def page
@page ||= Capybara.string(rendered.to_s)
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/capybara_extensions/filters/data.rb | test/capybara_extensions/filters/data.rb | Capybara::Selector.all.each_key do |selector|
Capybara.modify_selector selector do
# Accept a `data: {...}` filter that transforms nested keys in the same
# style as Action View's `tag` builder:
#
# https://edgeapi.rubyonrails.org/classes/ActionView/Helpers/TagHelper.html#method-i-tag-label-Options
#
# Values are passed straight through to Capybara, and transformed
# to XPath queries
#
# # this assertion fails for
# # => <button data-controller="element another-controller"></button>
# assert_button "Hello", data: {controller: "element"}
#
# # this assertion passes for
# # => <button data-controller="element another-controller"></button>
# assert_button "Hello", data: {controller: /element/}
#
expression_filter(:data, Hash, skip_if: nil) do |scope, nested_attributes|
prefixed_attributes = nested_attributes.transform_keys { |key| "data-#{key.to_s.dasherize}" }
case scope
when String
selectors = prefixed_attributes.map { |key, value| %([#{key}="#{value}"]) }
[scope, *selectors].join
else
expressions = prefixed_attributes.map do |key, value|
builder(XPath.self).add_attribute_conditions(key.to_sym => value)
end
scope[expressions.reduce(:&)]
end
end
describe(:expression_filters) do |data: {}, **|
attributes = data.map { |key, value| %(data-#{key.to_s.dasherize}="#{value}") }
" with #{attributes.join(" and ")}" unless attributes.empty?
end
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/controllers/showcase/previews_controller_test.rb | test/controllers/showcase/previews_controller_test.rb | require "test_helper"
class Showcase::PreviewsControllerTest < Showcase::IntegrationTest
test "#show renders samples and options" do
get preview_path("components/button")
assert_response :ok
within :section, "Samples" do
assert_button "Button content", class: %w[sc-text-xs]
assert_button "Button content", class: %w[sc-text-xl]
end
within :section, "Options" do
assert_table with_rows: [
{"Name" => "content", "Required" => "", "Type" => "String", "Default" => "", "Description" => "The content to output as the button text", "Options" => ""},
{"Name" => "mode", "Required" => "", "Type" => "Symbol", "Default" => ":small", "Description" => "We support three modes", "Options" => "[:small, :medium, :large]"}
]
end
end
test "#show does not render a <table>" do
get preview_path("components/combobox")
assert_response :ok
assert_no_section "Options"
assert_no_table
end
test "#show renders a title and description" do
get preview_path("stimulus_controllers/welcome")
assert_response :ok
assert_section "Welcome", text: "The welcome controller says hello when it enters the screen"
end
test "#show renders samples" do
get preview_path("stimulus_controllers/welcome")
within :section, "Samples" do
assert_region "Basic", text: "I've just said welcome!"
within :region, "With greeter" do
within :element, data: {controller: "welcome"} do
assert_element text: "Somebody", data: {welcome_target: "greeter"}
end
end
within :region, "Yelling!!!" do
assert_element data: {controller: "welcome", welcome_yell_value: "true"}
end
end
end
test "#show reads samples from partials in app/views/showcase/previews/" do
name = SecureRandom.uuid
template_file "showcase/previews/_test_local_sample.html.erb", <<~HTML
<% showcase.sample "#{name}" do %>
A new sample: #{name}
<% end %>
HTML
get preview_path("test_local_sample")
within :navigation do
assert_link "Test Local Sample", href: preview_path("test_local_sample")
end
within :section, "Samples" do
assert_region name, text: "A new sample: #{name}"
end
end
test "#show samples can access URL helpers for the main_app" do
template_file "showcase/previews/_link.html.erb", <<~HTML
<% showcase.sample "root" do %>
<%= link_to "root", main_app_root_path %>
<% end %>
HTML
get preview_path("link")
assert_link "root", href: "/main_app_root"
end
test "#show renders Custom sample partials" do
template_file "showcase/engine/_sample.html.erb", <<~HTML
<turbo-frame id="<%= sample.id %>_frame">
<%= sample.name %>
</turbo-frame>
HTML
get preview_path("stimulus_controllers/welcome")
within :section, "Samples" do
assert_element "turbo-frame", text: "Basic"
assert_element "turbo-frame", text: "With greeter"
assert_element "turbo-frame", text: "Yelling!!!"
end
end
test "#show renders options" do
get preview_path("stimulus_controllers/welcome")
within :section, "Options" do
assert_table with_rows: [
{"Name" => %(data-welcome-target="greeter"), "Required" => "", "Type" => "String", "Default" => "", "Description" => "If the id of the target element must be printed"},
{"Name" => "data-welcome-yell-value", "Required" => "", "Type" => "Boolean", "Default" => "false", "Description" => "Whether the hello is to be YELLED"},
{"Name" => "data-welcome-success-class", "Required" => "", "Type" => "String", "Default" => "", "Description" => "The success class to append after greeting"},
{"Name" => "data-welcome-list-outlet", "Required" => "", "Type" => "String", "Default" => "", "Description" => "An outlet to append each yelled greeter to"},
{"Name" => %(data-action="greet"), "Required" => "", "Type" => "String", "Default" => "", "Description" => "An action to repeat the greeting, if need be"},
{"Name" => "body", "Required" => "", "Type" => "Content Block", "Default" => "", "Description" => "An optional content block to set the body" }
]
assert_checked_field type: "checkbox", disabled: true, count: 3
end
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/controllers/showcase/engine_controller_test.rb | test/controllers/showcase/engine_controller_test.rb | require "test_helper"
class Showcase::EngineControllerTest < Showcase::IntegrationTest
test "#index renders Welcome content" do
get showcase_path
assert_response :ok
assert_title "Showcase"
within :main, "Showcase" do
within :article, "Welcome to Showcase — your UI Pattern Library" do
assert_section "What is this thing?"
assert_section "How do I use it?"
assert_section "But I don't see the thing I need"
assert_section "I have questions, who do I reach out to?"
assert_section "Additional resources"
end
end
end
test "#index renders navigation" do
get showcase_path
assert_response :ok
within :navigation do
assert_link "Showcase", href: root_url
within :disclosure, "Previews", expanded: true do
assert_link "Plain Ruby", href: preview_path("plain_ruby")
end
within :disclosure, "Components", expanded: true do
assert_link "Button", href: preview_path("components/button")
assert_link "Combobox", href: preview_path("components/combobox")
end
within :disclosure, "Helpers", expanded: true do
assert_link "Upcase Helper", href: preview_path("helpers/upcase_helper")
end
within :disclosure, "Stimulus Controllers", expanded: true do
assert_link "Welcome", href: preview_path("stimulus_controllers/welcome")
end
end
end
test "#index template can be overridden" do
template_file "showcase/engine/index.html.erb", <<~HTML
<section aria-labelledby="title">
<h2 id="title">A Custom Welcome</h2>
</section>
HTML
get showcase_path
assert_response :ok
within :main, "Showcase" do
assert_region "A Custom Welcome"
end
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/views/showcase_test.rb | test/views/showcase_test.rb | require "test_helper"
class ShowcaseTest < Showcase::PreviewsTest
test "it has a version number" do
assert Showcase::VERSION
end
test "defines tests reflectively" do
assert_method /\Atest_Showcase/
end
test "defines tests for deeply nested previews" do
assert_method %r{renders_showcase/previews/deeply/nested/partial}
end
test showcase: "components/combobox" do
assert_element id: "basic" do
assert_text "This is totally a combobox, for sure."
end
end
test "showcase generated a components/combobox test" do
assert_method "test_Showcase:_showcase/previews/components/combobox"
end
test showcase: "helpers/upcase_helper" do
assert_element id: "with-extract" do
assert_text /<%= upcase_string "extract" %>\Z/, normalize_ws: true
end
end
private
def assert_method(name)
refute_empty self.class.runnable_methods.grep(name),
"Found no generated test in: \n#{self.class.runnable_methods.join("\n")}"
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/views/showcase/previews/_sample.html.erb_test.rb | test/views/showcase/previews/_sample.html.erb_test.rb | require "test_helper"
module Showcase::Previews
class SamplePartialTest < ActionView::TestCase
test "showcase/previews/sample renders its name and description" do
sample = showcase_sample "A sample" do |partial|
partial.description { "A description" }
end
render "showcase/engine/sample", sample: sample
assert_region "A sample" do |section|
section.assert_element "showcase-sample" do |showcase_sample|
showcase_sample.assert_link "A sample", href: "#a-sample"
showcase_sample.assert_text "A description"
end
end
end
test "showcase/previews/sample renders a preview and its source" do
sample = showcase_sample { "<pre>ERB</pre>" }
render "showcase/engine/sample", sample: sample
assert_element "showcase-sample" do |showcase_sample|
showcase_sample.assert_text "ERB"
showcase_sample.assert_disclosure "View Source", expanded: false
end
end
test "showcase/previews/sample renders a region to capture JavaScript events" do
sample = showcase_sample("with events", events: "click") { "<pre>ERB</pre>" }
render "showcase/engine/sample", sample: sample
assert_element "showcase-sample", events: ["click"] do |showcase_sample|
showcase_sample.assert_region "JavaScript Events" do |section|
section.assert_element data: {showcase_sample_target: "relay"}
end
end
end
def showcase_sample(name = "sample name", **options, &block)
preview = Showcase::Preview.new(view, id: "showcase_test")
preview.sample(name, **options, &block).first
end
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/views/showcase/engine/path/_tree.html.erb_test.rb | test/views/showcase/engine/path/_tree.html.erb_test.rb | require "test_helper"
module Showcase::Engine::Path
class TreePartialTest < ActionView::TestCase
setup { view.extend Showcase::Engine.routes.url_helpers }
setup { @old_opens = Showcase.tree_opens }
teardown { Showcase.tree_opens = @old_opens }
test "tree_opens true" do
Showcase.tree_opens = true
render "showcase/engine/path/tree", tree: Showcase::Path::Tree.new(".")
render "showcase/engine/path/tree", tree: Showcase::Path::Tree.new("helpers")
assert_disclosure "Previews", expanded: true
assert_disclosure "Helpers", expanded: true
end
test "tree_opens false" do
Showcase.tree_opens = false
render "showcase/engine/path/tree", tree: Showcase::Path::Tree.new(".")
render "showcase/engine/path/tree", tree: Showcase::Path::Tree.new("helpers")
assert_disclosure "Previews", expanded: false
assert_disclosure "Helpers", expanded: false
end
test "tree_opens just root trees" do
tree = Showcase::Path::Tree.new("deeply")
tree.root = true
tree.edge_for("nested")
Showcase.tree_opens = ->(tree) { tree.root? }
render "showcase/engine/path/tree", tree: Showcase::Path::Tree.new(".")
render "showcase/engine/path/tree", tree: tree
assert_disclosure "Previews", expanded: false
assert_disclosure "Deeply", expanded: true
assert_disclosure "Nested", expanded: false
end
test "params overrides tree_opens" do
Showcase.tree_opens = false
tree = Showcase::Path::Tree.new("deeply")
tree.edge_for("nested") << Showcase::Path.new("deeply/nested/partial")
@controller.params = { id: "deeply/nested/partial" }
render "showcase/engine/path/tree", tree: tree
assert_disclosure "Deeply", expanded: true
assert_disclosure "Nested", expanded: true
end
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/app/jobs/application_job.rb | test/dummy/app/jobs/application_job.rb | class ApplicationJob < ActiveJob::Base
# Automatically retry jobs that encountered a deadlock
# retry_on ActiveRecord::Deadlocked
# Most jobs are safe to ignore if the underlying records are no longer available
# discard_on ActiveJob::DeserializationError
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/app/helpers/upcase_helper.rb | test/dummy/app/helpers/upcase_helper.rb | module UpcaseHelper
def upcase_string(string)
string.upcase
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/app/helpers/application_helper.rb | test/dummy/app/helpers/application_helper.rb | module ApplicationHelper
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/app/controllers/application_controller.rb | test/dummy/app/controllers/application_controller.rb | class ApplicationController < ActionController::Base
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/app/models/application_record.rb | test/dummy/app/models/application_record.rb | class ApplicationRecord < ActiveRecord::Base
respond_to?(:primary_abstract_class) ? primary_abstract_class : abstract_class
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/app/mailers/application_mailer.rb | test/dummy/app/mailers/application_mailer.rb | class ApplicationMailer < ActionMailer::Base
default from: "from@example.com"
layout "mailer"
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/app/channels/application_cable/channel.rb | test/dummy/app/channels/application_cable/channel.rb | module ApplicationCable
class Channel < ActionCable::Channel::Base
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/app/channels/application_cable/connection.rb | test/dummy/app/channels/application_cable/connection.rb | module ApplicationCable
class Connection < ActionCable::Connection::Base
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/config/application.rb | test/dummy/config/application.rb | require_relative "boot"
require "rails/all"
# Require the gems listed in Gemfile, including any gems
# you've limited to :test, :development, or :production.
Bundler.require(*Rails.groups)
require "showcase"
module Dummy
class Application < Rails::Application
config.load_defaults Rails::VERSION::STRING.to_f
# For compatibility with applications that use this config
config.action_controller.include_all_helpers = false
# Configuration for the application, engines, and railties goes here.
#
# These settings can be overridden in specific environments using the files
# in config/environments, which are processed later.
#
# config.time_zone = "Central Time (US & Canada)"
# config.eager_load_paths << Rails.root.join("extras")
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/config/environment.rb | test/dummy/config/environment.rb | # Load the Rails application.
require_relative "application"
# Initialize the Rails application.
Rails.application.initialize!
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/config/puma.rb | test/dummy/config/puma.rb | # Puma can serve each request in a thread from an internal thread pool.
# The `threads` method setting takes two numbers: a minimum and maximum.
# Any libraries that use thread pools should be configured to match
# the maximum value specified for Puma. Default is set to 5 threads for minimum
# and maximum; this matches the default thread size of Active Record.
#
max_threads_count = ENV.fetch("RAILS_MAX_THREADS") { 5 }
min_threads_count = ENV.fetch("RAILS_MIN_THREADS") { max_threads_count }
threads min_threads_count, max_threads_count
# Specifies the `worker_timeout` threshold that Puma will use to wait before
# terminating a worker in development environments.
#
worker_timeout 3600 if ENV.fetch("RAILS_ENV", "development") == "development"
# Specifies the `port` that Puma will listen on to receive requests; default is 3000.
#
port ENV.fetch("PORT") { 3000 }
# Specifies the `environment` that Puma will run in.
#
environment ENV.fetch("RAILS_ENV") { "development" }
# Specifies the `pidfile` that Puma will use.
pidfile ENV.fetch("PIDFILE") { "tmp/pids/server.pid" }
# Specifies the number of `workers` to boot in clustered mode.
# Workers are forked web server processes. If using threads and workers together
# the concurrency of the application would be max `threads` * `workers`.
# Workers do not work on JRuby or Windows (both of which do not support
# processes).
#
# workers ENV.fetch("WEB_CONCURRENCY") { 2 }
# Use the `preload_app!` method when specifying a `workers` number.
# This directive tells Puma to first boot the application and load code
# before forking the application. This takes advantage of Copy On Write
# process behavior so workers use less memory.
#
# preload_app!
# Allow puma to be restarted by `bin/rails restart` command.
plugin :tmp_restart
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/config/routes.rb | test/dummy/config/routes.rb | Rails.application.routes.draw do
get "/main_app_root" => redirect("/"), as: :main_app_root
root to: redirect("/docs/showcase")
mount Showcase::Engine, at: "docs/showcase"
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/config/boot.rb | test/dummy/config/boot.rb | # Set up gems listed in the Gemfile.
ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../../../Gemfile", __dir__)
require "bundler/setup" if File.exist?(ENV["BUNDLE_GEMFILE"])
$LOAD_PATH.unshift File.expand_path("../../../lib", __dir__)
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/config/initializers/content_security_policy.rb | test/dummy/config/initializers/content_security_policy.rb | # Be sure to restart your server when you modify this file.
# Define an application-wide content security policy.
# See the Securing Rails Applications Guide for more information:
# https://guides.rubyonrails.org/security.html#content-security-policy-header
# Rails.application.configure do
# config.content_security_policy do |policy|
# policy.default_src :self, :https
# policy.font_src :self, :https, :data
# policy.img_src :self, :https, :data
# policy.object_src :none
# policy.script_src :self, :https
# policy.style_src :self, :https
# # Specify URI for violation reports
# # policy.report_uri "/csp-violation-report-endpoint"
# end
#
# # Generate session nonces for permitted importmap and inline scripts
# config.content_security_policy_nonce_generator = ->(request) { request.session.id.to_s }
# config.content_security_policy_nonce_directives = %w(script-src)
#
# # Report violations without enforcing the policy.
# # config.content_security_policy_report_only = true
# end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/config/initializers/filter_parameter_logging.rb | test/dummy/config/initializers/filter_parameter_logging.rb | # Be sure to restart your server when you modify this file.
# Configure parameters to be filtered from the log file. Use this to limit dissemination of
# sensitive information. See the ActiveSupport::ParameterFilter documentation for supported
# notations and behaviors.
Rails.application.config.filter_parameters += [
:passw, :secret, :token, :_key, :crypt, :salt, :certificate, :otp, :ssn
]
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/config/initializers/inflections.rb | test/dummy/config/initializers/inflections.rb | # Be sure to restart your server when you modify this file.
# Add new inflection rules using the following format. Inflections
# are locale specific, and you may define rules for as many different
# locales as you wish. All of these examples are active by default:
# ActiveSupport::Inflector.inflections(:en) do |inflect|
# inflect.plural /^(ox)$/i, "\\1en"
# inflect.singular /^(ox)en/i, "\\1"
# inflect.irregular "person", "people"
# inflect.uncountable %w( fish sheep )
# end
# These inflection rules are supported but not enabled by default:
# ActiveSupport::Inflector.inflections(:en) do |inflect|
# inflect.acronym "RESTful"
# end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/config/initializers/permissions_policy.rb | test/dummy/config/initializers/permissions_policy.rb | # Define an application-wide HTTP permissions policy. For further
# information see https://developers.google.com/web/updates/2018/06/feature-policy
#
# Rails.application.config.permissions_policy do |f|
# f.camera :none
# f.gyroscope :none
# f.microphone :none
# f.usb :none
# f.fullscreen :self
# f.payment :self, "https://secure.example.com"
# end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/config/initializers/assets.rb | test/dummy/config/initializers/assets.rb | # Be sure to restart your server when you modify this file.
# Version of your assets, change this if you want to expire all your assets.
Rails.application.config.assets.version = "1.0"
# Add additional assets to the asset load path.
# Rails.application.config.assets.paths << Emoji.images_path
# Precompile additional assets.
# application.js, application.css, and all non-JS/CSS in the app/assets
# folder are already added.
# Rails.application.config.assets.precompile += %w( admin.js admin.css )
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/config/environments/test.rb | test/dummy/config/environments/test.rb | require "active_support/core_ext/integer/time"
# The test environment is used exclusively to run your application's
# test suite. You never need to work with it otherwise. Remember that
# your test database is "scratch space" for the test suite and is wiped
# and recreated between test runs. Don't rely on the data there!
Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# Turn false under Spring and add config.action_view.cache_template_loading = true.
config.cache_classes = true
# Eager loading loads your whole application. When running a single test locally,
# this probably isn't necessary. It's a good idea to do in a continuous integration
# system, or in some way before deploying your code.
config.eager_load = ENV["CI"].present?
# Configure public file server for tests with Cache-Control for performance.
config.public_file_server.enabled = true
config.public_file_server.headers = {
"Cache-Control" => "public, max-age=#{1.hour.to_i}"
}
# Show full error reports and disable caching.
config.consider_all_requests_local = true
config.action_controller.perform_caching = false
config.cache_store = :null_store
# Raise exceptions instead of rendering exception templates.
config.action_dispatch.show_exceptions = false
# Disable request forgery protection in test environment.
config.action_controller.allow_forgery_protection = false
# Store uploaded files on the local file system in a temporary directory.
config.active_storage.service = :test
config.action_mailer.perform_caching = false
# Tell Action Mailer not to deliver emails to the real world.
# The :test delivery method accumulates sent emails in the
# ActionMailer::Base.deliveries array.
config.action_mailer.delivery_method = :test
# Print deprecation notices to the stderr.
config.active_support.deprecation = :stderr
# Raise exceptions for disallowed deprecations.
config.active_support.disallowed_deprecation = :raise
# Tell Active Support which deprecation messages to disallow.
config.active_support.disallowed_deprecation_warnings = []
# Raises error for missing translations.
# config.i18n.raise_on_missing_translations = true
# Annotate rendered view with file names.
# config.action_view.annotate_rendered_view_with_filenames = true
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/config/environments/development.rb | test/dummy/config/environments/development.rb | require "active_support/core_ext/integer/time"
Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# In the development environment your application's code is reloaded any time
# it changes. This slows down response time but is perfect for development
# since you don't have to restart the web server when you make code changes.
config.cache_classes = false
# Do not eager load code on boot.
config.eager_load = false
# Show full error reports.
config.consider_all_requests_local = true
# Enable server timing
config.server_timing = true
# Enable/disable caching. By default caching is disabled.
# Run rails dev:cache to toggle caching.
if Rails.root.join("tmp/caching-dev.txt").exist?
config.action_controller.perform_caching = true
config.action_controller.enable_fragment_cache_logging = true
config.cache_store = :memory_store
config.public_file_server.headers = {
"Cache-Control" => "public, max-age=#{2.days.to_i}"
}
else
config.action_controller.perform_caching = false
config.cache_store = :null_store
end
# Store uploaded files on the local file system (see config/storage.yml for options).
config.active_storage.service = :local
# Don't care if the mailer can't send.
config.action_mailer.raise_delivery_errors = false
config.action_mailer.perform_caching = false
# Print deprecation notices to the Rails logger.
config.active_support.deprecation = :log
# Raise exceptions for disallowed deprecations.
config.active_support.disallowed_deprecation = :raise
# Tell Active Support which deprecation messages to disallow.
config.active_support.disallowed_deprecation_warnings = []
# Raise an error on page load if there are pending migrations.
config.active_record.migration_error = :page_load
# Highlight code that triggered database queries in logs.
config.active_record.verbose_query_logs = true
# Suppress logger output for asset requests.
config.assets.quiet = true
# Raises error for missing translations.
# config.i18n.raise_on_missing_translations = true
# Annotate rendered view with file names.
# config.action_view.annotate_rendered_view_with_filenames = true
# Uncomment if you wish to allow Action Cable access from any origin.
# config.action_cable.disable_request_forgery_protection = true
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/test/dummy/config/environments/production.rb | test/dummy/config/environments/production.rb | require "active_support/core_ext/integer/time"
Rails.application.configure do
# Settings specified here will take precedence over those in config/application.rb.
# Code is not reloaded between requests.
config.cache_classes = true
# Eager load code on boot. This eager loads most of Rails and
# your application in memory, allowing both threaded web servers
# and those relying on copy on write to perform better.
# Rake tasks automatically ignore this option for performance.
config.eager_load = true
# Full error reports are disabled and caching is turned on.
config.consider_all_requests_local = false
config.action_controller.perform_caching = true
# Ensures that a master key has been made available in either ENV["RAILS_MASTER_KEY"]
# or in config/master.key. This key is used to decrypt credentials (and other encrypted files).
# config.require_master_key = true
# Disable serving static files from the `/public` folder by default since
# Apache or NGINX already handles this.
config.public_file_server.enabled = ENV["RAILS_SERVE_STATIC_FILES"].present?
# Compress CSS using a preprocessor.
# config.assets.css_compressor = :sass
# Do not fallback to assets pipeline if a precompiled asset is missed.
config.assets.compile = false
# Enable serving of images, stylesheets, and JavaScripts from an asset server.
# config.asset_host = "http://assets.example.com"
# Specifies the header that your server uses for sending files.
# config.action_dispatch.x_sendfile_header = "X-Sendfile" # for Apache
# config.action_dispatch.x_sendfile_header = "X-Accel-Redirect" # for NGINX
# Store uploaded files on the local file system (see config/storage.yml for options).
config.active_storage.service = :local
# Mount Action Cable outside main process or domain.
# config.action_cable.mount_path = nil
# config.action_cable.url = "wss://example.com/cable"
# config.action_cable.allowed_request_origins = [ "http://example.com", /http:\/\/example.*/ ]
# Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
# config.force_ssl = true
# Include generic and useful information about system operation, but avoid logging too much
# information to avoid inadvertent exposure of personally identifiable information (PII).
config.log_level = :info
# Prepend all log lines with the following tags.
config.log_tags = [ :request_id ]
# Use a different cache store in production.
# config.cache_store = :mem_cache_store
# Use a real queuing backend for Active Job (and separate queues per environment).
# config.active_job.queue_adapter = :resque
# config.active_job.queue_name_prefix = "dummy_production"
config.action_mailer.perform_caching = false
# Ignore bad email addresses and do not raise email delivery errors.
# Set this to true and configure the email server for immediate delivery to raise delivery errors.
# config.action_mailer.raise_delivery_errors = false
# Enable locale fallbacks for I18n (makes lookups for any locale fall back to
# the I18n.default_locale when a translation cannot be found).
config.i18n.fallbacks = true
# Don't log any deprecations.
config.active_support.report_deprecations = false
# Use default logging formatter so that PID and timestamp are not suppressed.
config.log_formatter = ::Logger::Formatter.new
# Use a different logger for distributed setups.
# require "syslog/logger"
# config.logger = ActiveSupport::TaggedLogging.new(Syslog::Logger.new "app-name")
if ENV["RAILS_LOG_TO_STDOUT"].present?
logger = ActiveSupport::Logger.new(STDOUT)
logger.formatter = config.log_formatter
config.logger = ActiveSupport::TaggedLogging.new(logger)
end
# Do not dump schema after migrations.
config.active_record.dump_schema_after_migration = false
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/lib/showcase-rails.rb | lib/showcase-rails.rb | require "showcase"
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/lib/showcase.rb | lib/showcase.rb | require_relative "showcase/version"
module Showcase
autoload :PreviewsTest, "showcase/previews_test"
autoload :RouteHelper, "showcase/route_helper"
autoload :Options, "showcase/options"
singleton_class.attr_reader :tree_opens
def self.tree_opens=(opens)
@tree_opens = opens.respond_to?(:call) ? opens : proc { opens }
end
self.tree_opens = true # All open by default
singleton_class.attr_writer :sample_renderer
def self.sample_renderer
@sample_renderer ||=
begin
gem "rouge" # Activate the app-bundled Rouge gem to setup default syntax highlighting.
require "rouge"
formatter = Rouge::Formatters::HTML.new
@sample_renderer = ->(source, syntax) do
lexed = Rouge::Lexer.find(syntax).lex(source)
formatter.format(lexed).html_safe
end
rescue LoadError
proc { _1 }
end
end
def self.previews
Showcase::EngineController.view_paths.map(&:path).flat_map do |root|
Dir.glob("**/*.*", base: File.join(root, "showcase/previews"))
end.uniq
end
def self.options
Options
end
options.define :stimulus do
def targets(name, ...)
option(%(data-#{@controller}-target="#{name}"), ...)
end
def values(name, ...)
option("data-#{@controller}-#{name}-value", ...)
end
def classes(name, ...)
option("data-#{@controller}-#{name}-class", ...)
end
def outlet(name, ...)
option("data-#{@controller}-#{name}-outlet", ...)
end
def action(name, ...)
option(%(data-action="#{name}"), ...)
end
end
options.define :nice_partials do
def content_block(*arguments, **options, &block)
option(*arguments, **options, type: "Content Block", &block)
end
end
end
require "showcase/engine" if defined?(Rails::Engine)
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/lib/showcase/version.rb | lib/showcase/version.rb | module Showcase
VERSION = "0.5.0"
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/lib/showcase/options.rb | lib/showcase/options.rb | require "active_support/option_merger"
class Showcase::Options
include Enumerable
def initialize(view_context)
@view_context = view_context
@options = []
@order = [:name, :required, :type, :default, :description]
end
delegate :empty?, to: :@options
# Showcase.options.define :stimulus do
# def value(name, ...)
# option("data-#{@controller}-#{name}-value", ...)
# end
# end
singleton_class.attr_reader :contexts
@contexts = Hash.new { |h,k| h[k] = Class.new Context }
def self.define(key, &block)
contexts[key].class_eval(&block) # Lets users reopen an already defined context class.
end
# showcase.options.stimulus controller: :welcome do |o|
# o.value :greeting, default: "Hello"
# end
def context(key, **options, &block)
context = self.class.contexts.fetch(key)
context.new(@view_context, @options, **options).tap { yield _1 if block_given? }
end
def required(*arguments, **keywords, &block)
if arguments.none?
ActiveSupport::OptionMerger.new(self, required: true)
else
option(*arguments, **keywords, required: true, &block)
end
end
def optional(*arguments, **keywords, &block)
if arguments.none?
ActiveSupport::OptionMerger.new(self, required: false)
else
option(*arguments, **keywords, required: false, &block)
end
end
DEFAULT_OMITTED = Object.new
def option(name, description = nil, required: false, type: nil, default: DEFAULT_OMITTED, **options, &block)
description ||= @view_context.capture(&block).remove(/^\s+/).html_safe if block
type ||= type_from_default(default)
default = default == DEFAULT_OMITTED ? nil : default.inspect
@options << options.with_defaults(name: name, default: default, type: type, description: description, required: required)
end
def headers
@headers ||= @order | @options.flat_map(&:keys).uniq.sort
end
def each(&block)
@options.each do |option|
yield headers.index_with { option[_1] }
end
end
private
class Context < Showcase::Options
def initialize(view_context, options, **kwargs)
super(view_context)
@options = options
kwargs.each { instance_variable_set(:"@#{_1}", _2) }
end
end
def type_from_default(default)
case default
when DEFAULT_OMITTED then String
when true, false then "Boolean"
when nil then "nil"
else
default.class
end
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/lib/showcase/previews_test.rb | lib/showcase/previews_test.rb | class Showcase::PreviewsTest < ActionView::TestCase
setup { view.extend Showcase::EngineController._helpers }
def self.inherited(test_class)
super
test_class.prepare
end
def self.prepare
tree = Showcase::Path.tree
tree.flat_map(&:ordered_paths).each do |path|
test "Showcase: automatically renders showcase/previews/#{path.id}" do
render "showcase/engine/preview", preview: path.preview_for(view)
assert_showcase_preview(path.id)
end
end
test "Showcase: isn't empty" do
assert_not_empty tree, "Showcase couldn't find any samples to generate tests for"
end
end
def self.test(name = nil, showcase: nil, &block)
if name
super(name, &block)
else
super "Showcase: showcase/previews/#{showcase}" do
path = Showcase::Path.new(showcase)
render "showcase/engine/preview", preview: path.preview_for(view)
assert_showcase_preview(path.id)
instance_eval(&block)
end
end
end
# Override `assert_showcase_preview` to add custom assertions.
def assert_showcase_preview(id)
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/lib/showcase/route_helper.rb | lib/showcase/route_helper.rb | # frozen_string_literal: true
module Showcase::RouteHelper
def method_missing(name, ...)
if name.end_with?("_path", "_url")
main_app.public_send(name, ...)
else
super
end
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/lib/showcase/engine.rb | lib/showcase/engine.rb | module Showcase
class Engine < ::Rails::Engine
isolate_namespace Showcase
initializer "showcase.assets" do
config.assets.precompile += %w[showcase_manifest]
end
end
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
bullet-train-co/showcase | https://github.com/bullet-train-co/showcase/blob/9dac8befb1b8ca446b46ae287a922ed4f2d76c6c/config/routes.rb | config/routes.rb | Showcase::Engine.routes.draw do
get "previews/*id", to: "previews#show", as: :preview
root to: "engine#index"
end
| ruby | MIT | 9dac8befb1b8ca446b46ae287a922ed4f2d76c6c | 2026-01-04T17:51:52.951854Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/app_generators/togify/togify_generator.rb | app_generators/togify/togify_generator.rb | require 'tog'
require 'zip/zip'
require 'net/http'
class TogifyGenerator < RubiGen::Base
DEFAULT_SHEBANG = File.join(Config::CONFIG['bindir'],
Config::CONFIG['ruby_install_name'])
default_options :author => nil
attr_reader :name
def initialize(runtime_args, runtime_options = {})
super
usage if args.empty?
@destination_root = File.expand_path(args.shift)
@name = base_name
extract_options
end
def manifest
record do |m|
BASEDIRS.each { |path| m.directory path }
# Include tog rake tasks on the app
include_tog_rake_tasks("#{destination_root}/Rakefile")
# Install desert dependency
require_desert_on_environment("#{destination_root}/config/environment.rb")
# PLugins
plugins = install_default_plugins
# Migrations
m.migration_template 'integrate_tog.rb', 'db/migrate', :assigns => {
:migration_name => "IntegrateTog",
:plugins => plugins
},:migration_file_name => 'integrate_tog'
end
end
protected
def banner
<<-EOS
Apply tog platform to an existing Rails app
USAGE: #{spec.name} name
EOS
end
def add_options!(opts)
opts.separator ''
opts.separator 'Options:'
# For each option below, place the default
# at the top of the file next to "default_options"
# opts.on("-a", "--author=\"Your Name\"", String,
# "Some comment about this option",
# "Default: none") { |options[:author]| }
opts.on("--skip-tog_user",
"Don't add tog_user in the tog integration process. Use this if you have a User model and signup process already working") { |v| options[:skip_tog_user] = v }
opts.on("--development",
"Clone the repositories from the private clone urls allowing the developers to develop the plugins on a togified app.") { |v| options[:development] = v }
opts.on("-v", "--version", "Show the #{File.basename($0)} version number and quit.")
end
def extract_options
# for each option, extract it into a local variable (and create an "attr_reader :author" at the top)
# Templates can access these value via the attr_reader-generated methods, but not the
# raw instance variable value.
# @author = options[:author]
end
def include_tog_rake_tasks(rakefile)
sentinel = "require 'tasks/rails'"
logger.create "require tog rake tasks"
unless options[:pretend]
gsub_file rakefile, /(#{Regexp.escape(sentinel)})/mi do |match|
"#{match}\n\nrequire 'tasks/tog'\n"
end
end
end
def require_desert_on_environment(env_file)
sentinel = 'Rails::Initializer.run do |config|'
logger.create "require 'desert' on environment"
unless options[:pretend]
gsub_file env_file, /(#{Regexp.escape(sentinel)})/mi do |match|
"require 'desert'\n#{match}\n "
end
end
end
# Tog plugins
def default_plugins
plugins = %w{ tog_core }
plugins << "tog_user" unless options[:skip_tog_user]
plugins += %w{tog_social tog_mail}
end
def install_default_plugins
default_plugins.collect{|plugin|
plugin_path = "#{destination_root}/vendor/plugins/#{plugin}"
checkout_code(plugin_path, plugin)
logger.create "vendor/plugins/#{plugin}"
route_from_plugins("#{destination_root}/config/routes.rb", plugin)
{:name => plugin,:current_migration => current_migration_number(plugin_path) }
}
end
def checkout_code(plugin_path, plugin)
options[:development] ? clone_repo(plugin_path, plugin): tarball_repo(plugin_path, plugin)
end
# "vendor/plugins/tog_core", "tog_core"
def tarball_repo(plugin_path, plugin)
uri = "http://github.com/tog/#{plugin}/zipball/v#{Tog::Version::STRING}"
zip = tarball_fetch(uri)
tarball_unpack(zip, plugin)
end
def tarball_fetch(uri, redirect_limit = 10)
raise ArgumentError, "HTTP redirect too deep trying to get #{uri}" if redirect_limit == 0
response = Net::HTTP.get_response(URI.parse(uri))
case response
when Net::HTTPSuccess
temp_zip = Time.now.to_i.to_s
open(temp_zip, "wb") { |file|
file.write(response.read_body)
}
temp_zip
when Net::HTTPRedirection then tarball_fetch(response['location'], redirect_limit - 1)
else
tarball_fetch(uri, redirect_limit - 1)
end
end
def tarball_unpack(file, plugin)
destination = "#{destination_root}/vendor/plugins"
begin
Zip::ZipFile.open(file) { |zip_file|
zip_file.each { |f|
f_path=File.join(destination, f.name)
FileUtils.mkdir_p(File.dirname(f_path))
zip_file.extract(f, f_path) unless File.exist?(f_path)
}
}
temp = Dir.glob(File.join(destination, "tog-#{plugin}*")).first
FileUtils.mv temp, File.join(destination, plugin)
FileUtils.rm_rf file
rescue Exception => e
logger.error "There has been a problem trying to unpack the #{plugin} tarball downloaded from github. Remove the changes made on your app by togify and try again. Sorry for the inconveniences."
exit(-1)
end
end
def clone_repo(plugin_path, plugin)
repository = "git@github.com:tog/#{plugin}.git"
revision = "head"
FileUtils.rm_rf(plugin_path)
system("git clone #{repository} #{plugin_path}")
end
def current_migration_number(plugin_path)
Dir.glob("#{plugin_path}/db/migrate/*.rb").inject(0) do |max, file_path|
n = File.basename(file_path).split('_', 2).first.to_i
if n > max then n else max end
end
end
def route_from_plugins(routes_file, plugin)
sentinel = 'ActionController::Routing::Routes.draw do |map|'
logger.route "map.routes_from_plugin #{plugin}"
unless options[:pretend]
gsub_file routes_file, /(#{Regexp.escape(sentinel)})/mi do |match|
"#{match}\n map.routes_from_plugin '#{plugin}'\n"
end
end
end
def gsub_file(path, regexp, *args, &block)
content = File.read(path).gsub(regexp, *args, &block)
File.open(path, 'wb') { |file| file.write(content) }
end
BASEDIRS = %w(
config
db
db/migrate
lib
lib/tasks
vendor
vendor/plugins
)
end | ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/app_generators/togify/templates/integrate_tog.rb | app_generators/togify/templates/integrate_tog.rb | class <%= migration_name %> < ActiveRecord::Migration
def self.up<% plugins.each do |plugin| %>
migrate_plugin "<%= plugin[:name] %>", <%= plugin[:current_migration] %><%- end %>
end
def self.down<% plugins.reverse.each do |plugin| %>
migrate_plugin "<%= plugin[:name] %>", 0 <%- end %>
end
end
| ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/test/test_tog_plugin_generator.rb | test/test_tog_plugin_generator.rb | require File.join(File.dirname(__FILE__), "test_generator_helper.rb")
class TestTogPluginGenerator < Test::Unit::TestCase
include RubiGen::GeneratorTestHelper
def setup
bare_setup
end
def teardown
bare_teardown
end
# Some generator-related assertions:
# assert_generated_file(name, &block) # block passed the file contents
# assert_directory_exists(name)
# assert_generated_class(name, &block)
# assert_generated_module(name, &block)
# assert_generated_test_for(name, &block)
# The assert_generated_(class|module|test_for) &block is passed the body of the class/module within the file
# assert_has_method(body, *methods) # check that the body has a list of methods (methods with parentheses not supported yet)
#
# Other helper methods are:
# app_root_files - put this in teardown to show files generated by the test method (e.g. p app_root_files)
# bare_setup - place this in setup method to create the APP_ROOT folder for each test
# bare_teardown - place this in teardown method to destroy the TMP_ROOT or APP_ROOT folder after each test
def test_generator_without_options
name = "myapp"
run_generator('tog_plugin', [name], sources)
#todo implement this
#assert_generated_file("some_file")
end
private
def sources
[RubiGen::PathSource.new(:test, File.join(File.dirname(__FILE__),"..", generator_path))
]
end
def generator_path
"generators"
end
end
| ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/test/test_generator_helper.rb | test/test_generator_helper.rb | begin
require File.dirname(__FILE__) + '/test_helper'
rescue LoadError
require 'test/unit'
end
require 'fileutils'
# Must set before requiring generator libs.
TMP_ROOT = File.dirname(__FILE__) + "/tmp" unless defined?(TMP_ROOT)
PROJECT_NAME = "tog" unless defined?(PROJECT_NAME)
app_root = File.join(TMP_ROOT, PROJECT_NAME)
if defined?(APP_ROOT)
APP_ROOT.replace(app_root)
else
APP_ROOT = app_root
end
if defined?(RAILS_ROOT)
RAILS_ROOT.replace(app_root)
else
RAILS_ROOT = app_root
end
begin
require 'active_record'
require 'rubigen'
rescue LoadError
require 'rubygems'
require 'active_record'
require 'rubigen'
end
require 'rubigen/helpers/generator_test_helper'
def copy_to_fake (orig, dest)
FileUtils.cp(File.join(File.dirname(__FILE__), orig), File.join(APP_ROOT, dest))
end
def setup_fake_tog_app
bare_setup
FileUtils.mkdir_p(File.join(APP_ROOT, "/config"))
copy_to_fake("/templates/environment.rb", "/config/environment.rb")
copy_to_fake("/templates/routes.rb", "/config/routes.rb")
copy_to_fake("/templates/Rakefile", "/Rakefile")
@plugins = %w{ tog_core tog_social tog_mail tog_user}
end
def teardown_fake_tog_app
bare_teardown
end
def assert_has_require(body,*requires)
requires.each do |req|
assert body=~/require '#{req.to_s}'/,"should have require '#{req.to_s}'"
yield( req, $1 ) if block_given?
end
end
def assert_has_plugin_routes(body,*routes)
routes.each do |route|
assert body=~/map.routes_from_plugin '#{route.to_s}'/,"should have routes '#{route.to_s}'"
yield( route, $1 ) if block_given?
end
end
| ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/test/test_tog.rb | test/test_tog.rb | require File.dirname(__FILE__) + '/test_helper.rb'
class TestTog < Test::Unit::TestCase
def setup
end
def test_truth
assert true
end
end
| ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/test/test_helper.rb | test/test_helper.rb | require 'test/unit'
require File.dirname(__FILE__) + '/../lib/tog'
| ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/test/test_rake_tasks.rb | test/test_rake_tasks.rb | require File.join(File.dirname(__FILE__), "test_generator_helper.rb")
require "rake"
class TestRakeTasks < Test::Unit::TestCase
include RubiGen::GeneratorTestHelper
def setup
setup_fake_tog_app
run_generator('togify', [APP_ROOT], sources)
@rake = Rake::Application.new
Rake.application = @rake
load File.join(File.dirname(__FILE__), "..", "lib", "tasks", "platform.rake")
@tog_core_resources_on_public = File.join(APP_ROOT, "public", "tog_core")
end
def teardown
teardown_fake_tog_app
end
def test_copy_resources
@rake["tog:plugins:copy_resources"].invoke
assert File.exists?(@tog_core_resources_on_public)
end
def test_copy_resources_not_copy_svn_dirs
svn_dir_on_tog_core = File.join(APP_ROOT, "vendor", "plugins", "tog_core", "public", ".svn")
FileUtils.mkdir_p(svn_dir_on_tog_core)
assert File.exists?(svn_dir_on_tog_core)
@rake["tog:plugins:copy_resources"].invoke
assert !File.exists?(File.join(@tog_core_resources_on_public, ".svn"))
end
private
def sources
[RubiGen::PathSource.new(:test, File.join(File.dirname(__FILE__),"..", generator_path))]
end
def generator_path
"app_generators"
end
end
| ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/test/test_togify_generator.rb | test/test_togify_generator.rb | require File.join(File.dirname(__FILE__), "test_generator_helper.rb")
class TestTogifyGenerator < Test::Unit::TestCase
include RubiGen::GeneratorTestHelper
def setup
setup_fake_tog_app
end
def teardown
teardown_fake_tog_app
end
def test_generator_without_options
run_generator('togify', [APP_ROOT], sources)
assert_generated_file "Rakefile" do |body|
assert_has_require body, 'tasks/tog'
end
assert_generated_file "config/environment.rb" do |body|
assert_has_require body, 'desert'
end
@plugins.each{|plugin|
assert_directory_exists("vendor/plugins/#{plugin}")
assert_generated_file "config/routes.rb" do |body|
assert_has_plugin_routes body, plugin
end
}
generated_migration = Dir.glob("#{APP_ROOT}/db/migrate/*_integrate_tog.rb")[0]
assert generated_migration, "should be a IntegrateTog migration in the togified app"
File.open(generated_migration, "r") do |file|
assert file.read=~/tog_core.*tog_user.*tog_social.*tog_mail.*tog_mail.*tog_social.*tog_user.*tog_core/m,"plugins migrations should be in correct order"
end
end
def test_generator_with_dev_repositories
run_generator('togify', [APP_ROOT], sources, {:development => true})
@plugins.each{|plugin|
assert_remote_origin_of_plugin(plugin, "git@github.com:tog/(.*).git")
}
end
private
def sources
[RubiGen::PathSource.new(:test, File.join(File.dirname(__FILE__),"..", generator_path))]
end
def generator_path
"app_generators"
end
def assert_remote_origin_of_plugin(plugin, match)
FileUtils.chdir File.join(APP_ROOT, "vendor", "plugins", "#{plugin}") do
remote_origin = %x{git config remote.origin.url}
assert remote_origin.match(match)
end
end
end
| ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/test/templates/environment.rb | test/templates/environment.rb | RAILS_GEM_VERSION = '2.1.0' unless defined? RAILS_GEM_VERSION
require File.join(File.dirname(__FILE__), 'boot')
Rails::Initializer.run do |config|
config.time_zone = 'UTC'
config.action_controller.session = {
:session_key => '_tog_app_session',
:secret => 'c06f15cbc11cb6b70d45df5f9b527aeb18003879c5527b734a862558e596dfc9c4e96e841a7ff5dd44c129aba275cf50f244301956347815699432ba3a1fd53a'
}
end
| ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/test/templates/routes.rb | test/templates/routes.rb | ActionController::Routing::Routes.draw do |map|
map.connect ':controller/:action/:id'
map.connect ':controller/:action/:id.:format'
end
| ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/generators/tog_plugin/tog_plugin_generator.rb | generators/tog_plugin/tog_plugin_generator.rb | class TogPluginGenerator < RubiGen::Base
default_options :author => nil
attr_reader :name
def initialize(runtime_args, runtime_options = {})
super
usage if args.empty?
@name = args.shift
extract_options
end
def manifest
record do |m|
# Ensure appropriate folder(s) exists
m.directory 'some_folder'
# Create stubs
# m.template "template.rb", "some_file_after_erb.rb"
# m.template_copy_each ["template.rb", "template2.rb"]
# m.file "file", "some_file_copied"
# m.file_copy_each ["path/to/file", "path/to/file2"]
end
end
protected
def banner
<<-EOS
Creates a ...
USAGE: #{$0} #{spec.name} name
EOS
end
def add_options!(opts)
# opts.separator ''
# opts.separator 'Options:'
# For each option below, place the default
# at the top of the file next to "default_options"
# opts.on("-a", "--author=\"Your Name\"", String,
# "Some comment about this option",
# "Default: none") { |options[:author]| }
# opts.on("-v", "--version", "Show the #{File.basename($0)} version number and quit.")
end
def extract_options
# for each option, extract it into a local variable (and create an "attr_reader :author" at the top)
# Templates can access these value via the attr_reader-generated methods, but not the
# raw instance variable value.
# @author = options[:author]
end
end | ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/generators/tog_migration/tog_migration_generator.rb | generators/tog_migration/tog_migration_generator.rb | class TogMigrationGenerator < Rails::Generator::NamedBase
def manifest
record do |m|
m.migration_template 'migration.rb', 'db/migrate', :assigns => get_local_assigns
end
end
private
def get_local_assigns
returning(assigns = {}) do
if class_name.underscore =~ /^integrate_(.*)_(?:version)(.*)_(?:from)(.*)/
assigns[:plugins] = [{:name => $1, :to_version => $2, :from_version => $3 }]
else
assigns[:plugins] = []
end
end
end
end
| ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/generators/tog_migration/templates/migration.rb | generators/tog_migration/templates/migration.rb | class <%= class_name.underscore.camelize %> < ActiveRecord::Migration
def self.up<% plugins.each do |plugin| %>
migrate_plugin "<%= plugin[:name] %>", <%= plugin[:to_version] %><%- end %>
end
def self.down<% plugins.reverse.each do |plugin| %>
migrate_plugin "<%= plugin[:name] %>", <%= plugin[:from_version] %><%- end %>
end
end | ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/lib/tog.rb | lib/tog.rb | $:.unshift(File.dirname(__FILE__)) unless $:.include?(File.dirname(__FILE__)) || $:.include?(File.expand_path(File.dirname(__FILE__)))
module Tog
end
require "tog/version" | ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/lib/tasks/tog.rb | lib/tasks/tog.rb | # Load tog rakefile extensions
Dir["#{File.dirname(__FILE__)}/*.rake"].each { |ext| load ext } | ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/lib/tog/version.rb | lib/tog/version.rb | module Tog
module Version
MAJOR = 0
MINOR = 6
TINY = 0
MODULE = "Europa"
STRING = [MAJOR, MINOR, TINY].join('.')
class << self
def to_s
STRING
end
def full_version
"#{MODULE} #{STRING}"
end
alias :to_str :to_s
end
end
end | ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/config/hoe.rb | config/hoe.rb | require 'tog/version'
DEVELOPERS = [
["Aitor García", "aitor@linkingpaths.com"],
["Alberto Molpeceres", "alberto@linkingpaths.com"],
["Roberto Salicio", "roberto@linkingpaths.com"]
]
GEM_NAME = 'tog'
EXTRA_DEPENDENCIES = [
['mislav-will_paginate', '>= 2.3.2'],
['rubigen', '>= 1.3.2'],
['newgem', '>= 1.0.3'],
['desert', '>= 0.5.2'],
['RedCloth', '>= 3.0.4'],
['rubyzip', '>= 0.9.1'],
['oauth', '>= 0.2.7']
]
REV = nil
# UNCOMMENT IF REQUIRED:
# REV = YAML.load(`svn info`)['Revision']
VERS = Tog::Version::STRING + (REV ? ".#{REV}" : "")
class Hoe
def extra_deps
@extra_deps.reject! { |x| Array(x).first == 'hoe' }
@extra_deps
end
end
# Generate all the Rake tasks
# Run 'rake -T' to see list of generated tasks (from gem root directory)
$hoe = Hoe.new(GEM_NAME, VERS) do |p|
DEVELOPERS.each{|dev|
p.developer(dev[0], dev[1])
}
p.description = p.summary = "extensible open source social network platform"
p.url = "http://github.com/tog/tog"
p.test_globs = ["test/**/test_*.rb"]
p.clean_globs |= ['**/.*.sw?', '*.gem', '.config', '**/.DS_Store'] #An array of file patterns to delete on clean.
# == Optional
p.post_install_message = File.open(File.dirname(__FILE__) + "/../POST_INSTALL").read rescue ""
p.changes = p.paragraphs_of("CHANGELOG.md", 0..1).join("\n\n") rescue ""
p.extra_deps = EXTRA_DEPENDENCIES
#p.spec_extras = {} # A hash of extra values to set in the gemspec.
end
require 'newgem/tasks'
| ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
tog/tog | https://github.com/tog/tog/blob/4083b176a982d6d8ba36cc0de30e510e149c6520/config/requirements.rb | config/requirements.rb | require 'fileutils'
include FileUtils
require 'rubygems'
%w[rake hoe rubigen].each do |req_gem|
begin
require req_gem
rescue LoadError
puts "This Rakefile requires the '#{req_gem}' RubyGem."
puts "Installation: gem install #{req_gem} -y"
exit
end
end
$:.unshift(File.join(File.dirname(__FILE__), %w[.. lib])) | ruby | MIT | 4083b176a982d6d8ba36cc0de30e510e149c6520 | 2026-01-04T17:51:51.420773Z | false |
shiroyasha/factory_bot_instruments | https://github.com/shiroyasha/factory_bot_instruments/blob/8306a388288cc13358463f6666ae836c7e1f0218/spec/db_structure.rb | spec/db_structure.rb | require "active_record"
ActiveRecord::Base.establish_connection(
:adapter => "sqlite3",
:database => "/tmp/factory_bot_instruments_test.db"
)
ActiveRecord::Schema.define do
unless ActiveRecord::Base.connection.tables.include? 'users'
create_table :users do |table|
table.column :name, :string
table.column :username, :string
end
end
unless ActiveRecord::Base.connection.tables.include? 'articles'
create_table :articles do |table|
table.column :title, :string
table.column :content, :string
table.column :user_id, :string
end
end
unless ActiveRecord::Base.connection.tables.include? 'comments'
create_table :comments do |table|
table.column :content, :string
table.column :user_id, :string
table.column :article_id, :string
end
end
end
Dir["spec/models/*.rb"].each { |f| require "./#{f}" }
Dir["spec/factories/*.rb"].each { |f| require "./#{f}" }
| ruby | MIT | 8306a388288cc13358463f6666ae836c7e1f0218 | 2026-01-04T17:51:48.615283Z | false |
shiroyasha/factory_bot_instruments | https://github.com/shiroyasha/factory_bot_instruments/blob/8306a388288cc13358463f6666ae836c7e1f0218/spec/factory_bot_instruments_spec.rb | spec/factory_bot_instruments_spec.rb | require "spec_helper"
RSpec.describe FactoryBotInstruments do
it "has a version number" do
expect(FactoryBotInstruments::VERSION).not_to be_nil
end
describe ".benchmark_report" do
it "keeps the db clean" do
expect { FactoryBotInstruments.benchmark_report }.to_not change { User.count }
end
it "prints the benchmark on STDOUT" do
output = IOHelper.capture do
FactoryBotInstruments.benchmark_report(progress: false)
end
output.split("\n") do |line|
expect(line).to match(/\d+.\d+s\: FactoryBot\..+\(\:.+\)/)
end
end
end
end
| ruby | MIT | 8306a388288cc13358463f6666ae836c7e1f0218 | 2026-01-04T17:51:48.615283Z | false |
shiroyasha/factory_bot_instruments | https://github.com/shiroyasha/factory_bot_instruments/blob/8306a388288cc13358463f6666ae836c7e1f0218/spec/spec_helper.rb | spec/spec_helper.rb | require "bundler/setup"
require "factory_bot_instruments"
require_relative "db_structure"
require_relative "io_helper"
RSpec.configure do |config|
config.expect_with :rspec do |c|
c.syntax = :expect
end
end
| ruby | MIT | 8306a388288cc13358463f6666ae836c7e1f0218 | 2026-01-04T17:51:48.615283Z | false |
shiroyasha/factory_bot_instruments | https://github.com/shiroyasha/factory_bot_instruments/blob/8306a388288cc13358463f6666ae836c7e1f0218/spec/io_helper.rb | spec/io_helper.rb | module IOHelper
def self.capture(&block)
begin
$stdout = StringIO.new
yield
result = $stdout.string
ensure
$stdout = STDOUT
end
result
end
end
| ruby | MIT | 8306a388288cc13358463f6666ae836c7e1f0218 | 2026-01-04T17:51:48.615283Z | false |
shiroyasha/factory_bot_instruments | https://github.com/shiroyasha/factory_bot_instruments/blob/8306a388288cc13358463f6666ae836c7e1f0218/spec/factories/users.rb | spec/factories/users.rb | FactoryBot.define do
factory :user do
name { "Peter Parker" }
username { "spiderman" }
end
end
| ruby | MIT | 8306a388288cc13358463f6666ae836c7e1f0218 | 2026-01-04T17:51:48.615283Z | false |
shiroyasha/factory_bot_instruments | https://github.com/shiroyasha/factory_bot_instruments/blob/8306a388288cc13358463f6666ae836c7e1f0218/spec/factories/comments.rb | spec/factories/comments.rb | FactoryBot.define do
factory :comment do
content { "First!" }
user
article
end
end
| ruby | MIT | 8306a388288cc13358463f6666ae836c7e1f0218 | 2026-01-04T17:51:48.615283Z | false |
shiroyasha/factory_bot_instruments | https://github.com/shiroyasha/factory_bot_instruments/blob/8306a388288cc13358463f6666ae836c7e1f0218/spec/factories/articles.rb | spec/factories/articles.rb | FactoryBot.define do
factory :article do
title { "New Article" }
content { "article content" }
user
end
end
| ruby | MIT | 8306a388288cc13358463f6666ae836c7e1f0218 | 2026-01-04T17:51:48.615283Z | false |
shiroyasha/factory_bot_instruments | https://github.com/shiroyasha/factory_bot_instruments/blob/8306a388288cc13358463f6666ae836c7e1f0218/spec/models/article.rb | spec/models/article.rb | class Article < ActiveRecord::Base
belongs_to :user
has_many :comments
end
| ruby | MIT | 8306a388288cc13358463f6666ae836c7e1f0218 | 2026-01-04T17:51:48.615283Z | false |
shiroyasha/factory_bot_instruments | https://github.com/shiroyasha/factory_bot_instruments/blob/8306a388288cc13358463f6666ae836c7e1f0218/spec/models/comment.rb | spec/models/comment.rb | class Comment < ActiveRecord::Base
belongs_to :user
belongs_to :article
end
| ruby | MIT | 8306a388288cc13358463f6666ae836c7e1f0218 | 2026-01-04T17:51:48.615283Z | false |
shiroyasha/factory_bot_instruments | https://github.com/shiroyasha/factory_bot_instruments/blob/8306a388288cc13358463f6666ae836c7e1f0218/spec/models/user.rb | spec/models/user.rb | class User < ActiveRecord::Base
has_many :articles
has_many :comments
end
| ruby | MIT | 8306a388288cc13358463f6666ae836c7e1f0218 | 2026-01-04T17:51:48.615283Z | false |
shiroyasha/factory_bot_instruments | https://github.com/shiroyasha/factory_bot_instruments/blob/8306a388288cc13358463f6666ae836c7e1f0218/spec/lib/factory_bot_instruments/benchmarking_spec.rb | spec/lib/factory_bot_instruments/benchmarking_spec.rb | require "spec_helper"
RSpec.describe FactoryBotInstruments::Benchmarking do
describe ".benchmark_all" do
it "keeps the db clean" do
expect { FactoryBot.benchmark_all }.to_not change { User.count }
end
it "benchmarks all factories" do
benchmarked_factories = FactoryBot.benchmark_all.map(&:factory)
expect(benchmarked_factories).to include(:user)
expect(benchmarked_factories).to include(:article)
end
it "benchmarks by create, build, and build_stubbed by default" do
benchmarked_methods = FactoryBot.benchmark_all.map(&:method)
expect(benchmarked_methods).to include(:create)
expect(benchmarked_methods).to include(:build_stubbed)
expect(benchmarked_methods).to include(:build)
end
describe "limiting factory bot methods" do
it "runs only passed factory bot methods" do
benchmarked_methods = FactoryBot.benchmark_all(:methods => [:create, :build]).map(&:method)
expect(benchmarked_methods).to include(:create)
expect(benchmarked_methods).to include(:build)
expect(benchmarked_methods).to_not include(:build_stubbed)
end
end
describe "skipping factories" do
it "skipps passed factories" do
benchmarked_factories = FactoryBot.benchmark_all(:except => [:article]).map(&:factory)
expect(benchmarked_factories).to include(:user)
expect(benchmarked_factories).to_not include(:article)
end
end
end
describe ".benchmark" do
it "keeps the db clean" do
expect { FactoryBot.benchmark(:user) }.to_not change { User.count }
end
it "returns the duration in seconds" do
expect(FactoryBot.benchmark(:user)).to be_instance_of(FactoryBotInstruments::Benchmark)
end
it "measures 'FactoryBot.create' by default" do
expect(FactoryBot).to receive(:create)
FactoryBot.benchmark(:user)
end
it "can measure 'FactoryBot.build'" do
expect(FactoryBot).to receive(:build)
FactoryBot.benchmark(:user, :method => :build)
end
it "can measure 'FactoryBot.build_stubbed'" do
expect(FactoryBot).to receive(:build_stubbed)
FactoryBot.benchmark(:user, :method => :build_stubbed)
end
end
end
| ruby | MIT | 8306a388288cc13358463f6666ae836c7e1f0218 | 2026-01-04T17:51:48.615283Z | false |
shiroyasha/factory_bot_instruments | https://github.com/shiroyasha/factory_bot_instruments/blob/8306a388288cc13358463f6666ae836c7e1f0218/spec/lib/factory_bot_instruments/tracing_spec.rb | spec/lib/factory_bot_instruments/tracing_spec.rb | require "spec_helper"
RSpec.describe FactoryBotInstruments::Tracing do
describe ".trace" do
context "default options" do
before do
@output = IOHelper.capture do
FactoryBot.trace { FactoryBot.create(:comment) }
end
puts @output
@output = FactoryBotInstruments::TracingHelpers.uncolorize(@output)
end
it "prints the factory bot steps" do
expect(@output).to include("(start) create :comment")
expect(@output).to include("(start) create :article")
expect(@output).to include("(start) create :user")
expect(@output).to include("(finish) create :comment")
expect(@output).to include("(finish) create :article")
expect(@output).to include("(finish) create :user")
end
it "prints SQL statements" do
expect(@output).to include("INSERT INTO \"comments\"")
end
end
context "without SQL logs" do
before do
@output = IOHelper.capture do
FactoryBot.trace(:sql => false) { FactoryBot.create(:comment) }
end
puts @output
@output = FactoryBotInstruments::TracingHelpers.uncolorize(@output)
end
it "prints the factory bot steps" do
expect(@output).to include("(start) create :comment")
expect(@output).to include("(start) create :article")
expect(@output).to include("(start) create :user")
expect(@output).to include("(finish) create :comment")
expect(@output).to include("(finish) create :article")
expect(@output).to include("(finish) create :user")
end
it "doesn't print SQL statements" do
expect(@output).to_not include("INSERT INTO \"comments\"")
end
end
end
end
| ruby | MIT | 8306a388288cc13358463f6666ae836c7e1f0218 | 2026-01-04T17:51:48.615283Z | false |
shiroyasha/factory_bot_instruments | https://github.com/shiroyasha/factory_bot_instruments/blob/8306a388288cc13358463f6666ae836c7e1f0218/lib/factory_bot_instruments.rb | lib/factory_bot_instruments.rb | require "factory_bot_instruments/version"
require "factory_bot"
require "active_record"
require_relative "factory_bot_instruments/benchmarking"
require_relative "factory_bot_instruments/tracing"
FactoryBot.extend(FactoryBotInstruments::Benchmarking)
FactoryBot.extend(FactoryBotInstruments::Tracing)
module FactoryBotInstruments
def self.benchmark_report(options = {})
options = { :progress => true }.merge(options)
FactoryBot.benchmark_all(**options).each do |benchmark|
puts benchmark
end
end
end
| ruby | MIT | 8306a388288cc13358463f6666ae836c7e1f0218 | 2026-01-04T17:51:48.615283Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.