hexsha
stringlengths 40
40
| size
int64 2
1.01M
| content
stringlengths 2
1.01M
| avg_line_length
float64 1.5
100
| max_line_length
int64 2
1k
| alphanum_fraction
float64 0.25
1
|
|---|---|---|---|---|---|
032a74f3542424058eefbd2eacf4a631bf0114a1
| 2,303
|
# encoding: utf-8
RobbinSite.controllers :blog do
get :index do
@blogs = Blog.order('id DESC').page(params[:page])
render 'blog/index'
end
get :tag_cloud, :map => '/tag' do
render 'blog/tag_cloud'
end
get :tag, :map => '/tag/:name' do
@blogs = Blog.tagged_with(params[:name]).order('content_updated_at DESC').page(params[:page])
if @blogs.blank?
halt 404
else
render 'blog/tag'
end
end
get :show_url, :map => '/blog/:id/:url', :provides => [:html, :md] do
@blog = Blog.find params[:id].to_i
case content_type
when :md then
@blog.content
when :html then
@blog.increment_view_count
render 'blog/show'
end
end
get :show, :map => '/blog/:id', :provides => [:html, :md] do
@blog = Blog.find params[:id].to_i
redirect blog_url(@blog, mime_type = content_type), 301 unless @blog.slug_url.blank?
case content_type
when :md then
@blog.content
when :html then
@blog.increment_view_count
render 'blog/show'
end
end
get :quote_comment, :map => '/comment/quote' do
return false unless account_login?
return false unless params[:id]
comment = BlogComment.find params[:id].to_i
body = "\n> #{comment.account.name} 评论:\n"
comment.content.gsub(/\n{3,}/, "\n\n").split("\n").each {|line| body << "> #{line}\n"}
body
end
post :comment_preview, :map => '/comment/preview' do
return false unless account_login?
Sanitize.clean(GitHub::Markdown.to_html(params[:term], :gfm), Sanitize::Config::RELAXED) if params[:term]
end
post :create_comment, :map => '/blog/:id/comments' do
content_type :js
halt 401 unless account_login?
blog = Blog.find params[:id]
halt 403 unless blog.commentable?
@comment = blog.comments.create(:account => current_account, :content => params[:blog_comment][:content])
render 'blog/create_comment'
end
delete :comment, :map => '/comment/:id' do
content_type :js
comment = BlogComment.find params[:id]
if account_admin? || (account_commenter? && comment.account == current_account)
comment.destroy
"$('div#comments>ul>li##{comment.id}').fadeOut('slow', function(){$(this).remove();});"
else
halt 403
end
end
end
| 28.7875
| 109
| 0.628311
|
1d0b10b7e5d8eab726352ae604886333b90fea12
| 1,566
|
require 'action_controller'
module Rails5BeforeRender
module BeforeRender
extend ActiveSupport::Concern
include ActiveSupport::Callbacks
included do
define_callbacks :render,
terminator: ->(controller, result_lambda) { result_lambda.call if result_lambda.is_a?(Proc); controller.performed? },
skip_after_callbacks_if_terminated: true
end
module ClassMethods
def append_before_render_filter(*names, &blk)
_insert_callbacks(names, blk) do |name, options|
set_callback(:render, :before, name, options)
end
end
def prepend_before_render_filter(*names, &blk)
_insert_callbacks(names, blk) do |name, options|
set_callback(:render, :before, name, options.merge(:prepend => true))
end
end
def skip_before_render_filter(*names, &blk)
_insert_callbacks(names, blk) do |name, options|
skip_callback(:render, :before, name, options)
end
end
alias_method :before_render, :append_before_render_filter
alias_method :append_before_render, :append_before_render_filter
alias_method :prepend_before_render, :prepend_before_render_filter
alias_method :skip_before_render, :skip_before_render_filter
end
end
end
ActionController::Base.include Rails5BeforeRender::BeforeRender
| 36.418605
| 146
| 0.615581
|
d5da55568fcfc852fbbf669d9d6524f3f3baa7af
| 320
|
require 'hanami/interactor'
class FindOrCreateUserForAuth
include Hanami::Interactor
expose :user
def initialize repository: UserRepository.new
@repository = repository
end
def call(auth)
@user = @repository.find_by_provider_and_uid(auth.provider, auth.uid) || @repository.register(auth)
end
end
| 20
| 103
| 0.759375
|
ab4bb4dc3e785c304924475507663d93799e5b79
| 1,278
|
# frozen_string_literal: true
require "active_record_unit"
require "active_record/railties/collection_cache_association_loading"
ActionView::PartialRenderer.prepend(ActiveRecord::Railties::CollectionCacheAssociationLoading)
class MultifetchCacheTest < ActiveRecordTestCase
fixtures :topics, :replies
def setup
view_paths = ActionController::Base.view_paths
view_paths.each(&:clear_cache)
ActionView::LookupContext.fallbacks.each(&:clear_cache)
@view = Class.new(ActionView::Base.with_empty_template_cache) do
def view_cache_dependencies
[]
end
def combined_fragment_cache_key(key)
[ :views, key ]
end
end.with_view_paths(view_paths, {})
controller = ActionController::Base.new
controller.perform_caching = true
@view.controller = controller
end
def test_only_preloading_for_records_that_miss_the_cache
@view.render partial: "test/partial", collection: [topics(:rails)], cached: true
@topics = Topic.preload(:replies)
@view.render partial: "test/partial", collection: @topics, cached: true
assert_not @topics.detect { |topic| topic.id == topics(:rails).id }.replies.loaded?
assert @topics.detect { |topic| topic.id != topics(:rails).id }.replies.loaded?
end
end
| 30.428571
| 94
| 0.737089
|
4a5afd03331b6b26a3949e4d385141ae6ba17c07
| 1,248
|
require 'rails_helper'
RSpec.describe StaticPagesController, type: :controller do
render_views
describe "GET #home" do
it "returns http success" do
get :home
expect(response).to have_http_status(:success)
assert_select "title", "Home | Ruby on Rails Tutorial Sample App"
assert_template "static_pages/home"
assert_select "a[href=?]", root_path, count: 2
assert_select "a[href=?]", help_path
assert_select "a[href=?]", about_path
assert_select "a[href=?]", contact_path
assert_select "a[href=?]", signup_path
end
end
describe "GET #help" do
it "returns http success" do
get :help
expect(response).to have_http_status(:success)
assert_select "title", "Help | Ruby on Rails Tutorial Sample App"
end
end
describe "GET #about" do
it "returns http success" do
get :about
expect(response).to have_http_status(:success)
assert_select "title", "About | Ruby on Rails Tutorial Sample App"
end
end
describe "GET #contact" do
it "returns http success" do
get :contact
expect(response).to have_http_status(:success)
assert_select "title", "Contact | Ruby on Rails Tutorial Sample App"
end
end
end
| 27.733333
| 74
| 0.671474
|
18c428483be8dedba0df3275ffc77001ab6c8c21
| 1,213
|
# frozen_string_literal: true
require "test_helper"
class CommandEventsFetchServiceTest < ActiveSupport::TestCase
test "fetches command events tied to a given project" do
# Given
user = User.create!(email: "test@cloud.tuist.io", password: Devise.friendly_token.first(16))
account = user.account
project = Project.create!(name: "tuist-project", account_id: account.id, token: Devise.friendly_token.first(16))
command_event_one = CommandEvent.create!(
name: "fetch",
subcommand: "",
command_arguments: ["fetch", "--path", "./"],
duration: 120,
client_id: "client id",
tuist_version: "3.1.0",
swift_version: "5.5.0",
macos_version: "12.1.0",
project: project
)
command_event_two = CommandEvent.create!(
name: "generate",
subcommand: "",
command_arguments: ["generate"],
duration: 40,
client_id: "client id",
tuist_version: "3.1.0",
swift_version: "5.5.0",
macos_version: "12.1.0",
project: project
)
# When
got = CommandEventsFetchService.call(project_id: project.id, user: user)
# Then
assert_equal [command_event_two, command_event_one], got
end
end
| 29.585366
| 116
| 0.649629
|
5dd18a6ff841089135da4234febadc2f97d0fba2
| 169
|
module DocRipper
module Formats
class MsDocRipper < Ripper::Base
def rip
@text ||= %x(antiword #{to_shell(file_path)})
end
end
end
end
| 14.083333
| 53
| 0.60355
|
6202d123d16bae70e76e4c3a917f12a8f72d7175
| 13,214
|
# frozen_string_literal: true
require 'find'
require 'yaml'
require 'shellwords'
require_relative '../ext/swiftlint/swiftlint'
module Danger
# Lint Swift files inside your projects.
# This is done using the [SwiftLint](https://github.com/realm/SwiftLint) tool.
# Results are passed out as a table in markdown.
#
# @example Specifying custom config file.
#
# # Runs a linter with comma style disabled
# swiftlint.config_file = '.swiftlint.yml'
# swiftlint.lint_files
#
# @see artsy/eigen
# @tags swift
#
class DangerSwiftlint < Plugin
# The path to SwiftLint's execution
attr_accessor :binary_path
# The path to SwiftLint's configuration file
attr_accessor :config_file
# Allows you to specify a directory from where swiftlint will be run.
attr_accessor :directory
# Maximum number of issues to be reported.
attr_accessor :max_num_violations
# Provides additional logging diagnostic information.
attr_accessor :verbose
# Whether all files should be linted in one pass
attr_accessor :lint_all_files
# Whether we should fail on warnings
attr_accessor :strict
# Warnings found
attr_accessor :warnings
# Errors found
attr_accessor :errors
# All issues found
attr_accessor :issues
# Whether all issues or ones in PR Diff to be reported
attr_accessor :filter_issues_in_diff
# Lints Swift files. Will fail if `swiftlint` cannot be installed correctly.
# Generates a `markdown` list of warnings for the prose in a corpus of
# .markdown and .md files.
#
# @param [String] files
# A globbed string which should return the files that you want to
# lint, defaults to nil.
# if nil, modified and added files from the diff will be used.
# @return [void]
#
def lint_files(files = nil, inline_mode: false, fail_on_error: false, additional_swiftlint_args: '', no_comment: false, &select_block)
# Fails if swiftlint isn't installed
raise 'swiftlint is not installed' unless swiftlint.installed?
config_file_path = if config_file
config_file
elsif File.file?('.swiftlint.yml')
File.expand_path('.swiftlint.yml')
end
log "Using config file: #{config_file_path}"
dir_selected = directory ? File.expand_path(directory) : Dir.pwd
log "Swiftlint will be run from #{dir_selected}"
# Get config
config = load_config(config_file_path)
# Extract excluded paths
excluded_paths = format_paths(config['excluded'] || [], config_file_path)
log "Swiftlint will exclude the following paths: #{excluded_paths}"
# Extract included paths
included_paths = format_paths(config['included'] || [], config_file_path)
log "Swiftlint includes the following paths: #{included_paths}"
# Prepare swiftlint options
options = {
# Make sure we don't fail when config path has spaces
config: config_file_path ? Shellwords.escape(config_file_path) : nil,
reporter: 'json',
quiet: true,
pwd: dir_selected,
force_exclude: true
}
log "linting with options: #{options}"
if lint_all_files
issues = run_swiftlint(options, additional_swiftlint_args)
else
# Extract swift files (ignoring excluded ones)
files = find_swift_files(dir_selected, files, excluded_paths, included_paths)
log "Swiftlint will lint the following files: #{files.join(', ')}"
# Lint each file and collect the results
issues = run_swiftlint_for_each(files, options, additional_swiftlint_args)
end
if filter_issues_in_diff
# Filter issues related to changes in PR Diff
issues = filter_git_diff_issues(issues)
end
@issues = issues
other_issues_count = 0
unless @max_num_violations.nil? || no_comment
other_issues_count = issues.count - @max_num_violations if issues.count > @max_num_violations
issues = issues.take(@max_num_violations)
end
log "Received from Swiftlint: #{issues}"
# filter out any unwanted violations with the passed in select_block
if select_block && !no_comment
issues = issues.select { |issue| select_block.call(issue) }
end
# Filter warnings and errors
@warnings = issues.select { |issue| issue['severity'] == 'Warning' }
@errors = issues.select { |issue| issue['severity'] == 'Error' }
# Early exit so we don't comment
return if no_comment
if inline_mode
# Report with inline comment
send_inline_comment(warnings, strict ? :fail : :warn)
send_inline_comment(errors, (fail_on_error || strict) ? :fail : :warn)
warn other_issues_message(other_issues_count) if other_issues_count > 0
elsif warnings.count > 0 || errors.count > 0
# Report if any warning or error
message = "### SwiftLint found issues\n\n".dup
message << markdown_issues(warnings, 'Warnings') unless warnings.empty?
message << markdown_issues(errors, 'Errors') unless errors.empty?
message << "\n#{other_issues_message(other_issues_count)}" if other_issues_count > 0
markdown message
# Fail danger on errors
should_fail_by_errors = fail_on_error && errors.count > 0
# Fail danger if any warnings or errors and we are strict
should_fail_by_strict = strict && (errors.count > 0 || warnings.count > 0)
if should_fail_by_errors || should_fail_by_strict
fail 'Failed due to SwiftLint errors'
end
end
end
# Run swiftlint on all files and returns the issues
#
# @return [Array] swiftlint issues
def run_swiftlint(options, additional_swiftlint_args)
result = swiftlint.lint(options, additional_swiftlint_args)
if result == ''
{}
else
JSON.parse(result).flatten
end
end
# Run swiftlint on each file and aggregate collect the issues
#
# @return [Array] swiftlint issues
def run_swiftlint_for_each(files, options, additional_swiftlint_args)
# Use `--use-script-input-files` flag along with `SCRIPT_INPUT_FILE_#` ENV
# variables to pass the list of files we want swiftlint to lint
options.merge!(use_script_input_files: true)
# Set environment variables:
# * SCRIPT_INPUT_FILE_COUNT equal to number of files
# * a variable in the form of SCRIPT_INPUT_FILE_# for each file
env = script_input(files)
result = swiftlint.lint(options, additional_swiftlint_args, env)
if result == ''
{}
else
JSON.parse(result).flatten
end
end
# Converts an array of files into `SCRIPT_INPUT_FILE_#` format
# for use with `--use-script-input-files`
# @return [Hash] mapping from `SCRIPT_INPUT_FILE_#` to file
# SCRIPT_INPUT_FILE_COUNT will be set to the number of files
def script_input(files)
files
.map.with_index { |file, i| ["SCRIPT_INPUT_FILE_#{i}", file.to_s] }
.push(['SCRIPT_INPUT_FILE_COUNT', files.size.to_s])
.to_h
end
# Find swift files from the files glob
# If files are not provided it will use git modifield and added files
#
# @return [Array] swift files
def find_swift_files(dir_selected, files = nil, excluded_paths = [], included_paths = [])
# Assign files to lint
files = if files.nil?
(git.modified_files - git.deleted_files) + git.added_files
else
Dir.glob(files)
end
# Filter files to lint
excluded_paths_list = Find.find(*excluded_paths).to_a
included_paths_list = Find.find(*included_paths).to_a
files.
# Ensure only swift files are selected
select { |file| file.end_with?('.swift') }.
# Convert to absolute paths
map { |file| File.expand_path(file) }.
# Remove dups
uniq.
# Ensure only files in the selected directory
select { |file| file.start_with?(dir_selected) }.
# Reject files excluded on configuration
reject { |file| excluded_paths_list.include?(file) }.
# Accept files included on configuration
select do |file|
next true if included_paths.empty?
included_paths_list.include?(file)
end
end
# Get the configuration file
def load_config(filepath)
return {} if filepath.nil? || !File.exist?(filepath)
config_file = File.open(filepath).read
# Replace environment variables
config_file = parse_environment_variables(config_file)
YAML.safe_load(config_file)
end
# Find all requested environment variables in the given string and replace them with the correct values.
def parse_environment_variables(file_contents)
# Matches the file contents for environment variables defined like ${VAR_NAME}.
# Replaces them with the environment variable value if it exists.
file_contents.gsub(/\$\{([^{}]+)\}/) do |env_var|
return env_var if ENV[Regexp.last_match[1]].nil?
ENV[Regexp.last_match[1]]
end
end
# Parses the configuration file and return the specified files in path
#
# @return [Array] list of files specified in path
def format_paths(paths, filepath)
# Extract included paths
paths
.map { |path| File.join(File.dirname(filepath), path) }
.map { |path| File.expand_path(path) }
.select { |path| File.exist?(path) || Dir.exist?(path) }
end
# Create a markdown table from swiftlint issues
#
# @return [String]
def markdown_issues(results, heading)
message = "#### #{heading}\n\n".dup
message << "File | Line | Reason |\n"
message << "| --- | ----- | ----- |\n"
results.each do |r|
filename = r['file'].split('/').last
line = r['line']
reason = r['reason']
rule = r['rule_id']
# Other available properties can be found int SwiftLint/…/JSONReporter.swift
message << "#{filename} | #{line} | #{reason} (#{rule})\n"
end
message
end
# Send inline comment with danger's warn or fail method
#
# @return [void]
def send_inline_comment(results, method)
dir = "#{Dir.pwd}/"
results.each do |r|
github_filename = r['file'].gsub(dir, '')
message = "#{r['reason']}".dup
# extended content here
filename = r['file'].split('/').last
message << "\n"
message << "`#{r['rule_id']}`" # helps writing exceptions // swiftlint:disable:this rule_id
message << " `#{filename}:#{r['line']}`" # file:line for pasting into Xcode Quick Open
send(method, message, file: github_filename, line: r['line'])
end
end
def other_issues_message(issues_count)
violations = issues_count == 1 ? 'violation' : 'violations'
"SwiftLint also found #{issues_count} more #{violations} with this PR."
end
# Make SwiftLint object for binary_path
#
# @return [SwiftLint]
def swiftlint
Swiftlint.new(binary_path)
end
def log(text)
puts(text) if @verbose
end
# Filters issues reported against changes in the modified files
#
# @return [Array] swiftlint issues
def filter_git_diff_issues(issues)
modified_files_info = git_modified_files_info()
return issues.select { |i|
modified_files_info["#{i['file']}"] != nil && modified_files_info["#{i['file']}"].include?(i['line'].to_i)
}
end
# Finds modified files and added files, creates array of files with modified line numbers
#
# @return [Array] Git diff changes for each file
def git_modified_files_info()
modified_files_info = Hash.new
updated_files = (git.modified_files - git.deleted_files) + git.added_files
updated_files.each {|file|
modified_lines = git_modified_lines(file)
modified_files_info[File.expand_path(file)] = modified_lines
}
modified_files_info
end
# Gets git patch info and finds modified line numbers, excludes removed lines
#
# @return [Array] Modified line numbers i
def git_modified_lines(file)
git_range_info_line_regex = /^@@ .+\+(?<line_number>\d+),/
git_modified_line_regex = /^\+(?!\+|\+)/
git_removed_line_regex = /^[-]/
git_not_removed_line_regex = /^[^-]/
file_info = git.diff_for_file(file)
line_number = 0
lines = []
file_info.patch.split("\n").each do |line|
starting_line_number = 0
case line
when git_range_info_line_regex
starting_line_number = Regexp.last_match[:line_number].to_i
when git_modified_line_regex
lines << line_number
end
line_number += 1 if line_number > 0
line_number = starting_line_number if line_number == 0 && starting_line_number > 0
end
lines
end
end
end
| 35.050398
| 138
| 0.642122
|
5d8ac6056dba8d1bd0c59fc541ade0e1c4b19e9e
| 7,583
|
=begin
#NSX-T Manager API
#VMware NSX-T Manager REST API
OpenAPI spec version: 2.5.1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Swagger Codegen version: 2.4.7
=end
require 'date'
module NSXT
# HPM client data collection configuration
class ClientTypeCollectionConfiguration
# The client type for which this data collection frequency setting applies
attr_accessor :client_type
# The set of data collection type configurations, one for each data collection type
attr_accessor :data_type_configurations
class EnumAttributeValidator
attr_reader :datatype
attr_reader :allowable_values
def initialize(datatype, allowable_values)
@allowable_values = allowable_values.map do |value|
case datatype.to_s
when /Integer/i
value.to_i
when /Float/i
value.to_f
else
value
end
end
end
def valid?(value)
!value || allowable_values.include?(value)
end
end
# Attribute mapping from ruby-style variable name to JSON key.
def self.attribute_map
{
:'client_type' => :'client_type',
:'data_type_configurations' => :'data_type_configurations'
}
end
# Attribute type mapping.
def self.swagger_types
{
:'client_type' => :'String',
:'data_type_configurations' => :'Array<DataTypeCollectionConfiguration>'
}
end
# Initializes the object
# @param [Hash] attributes Model attributes in the form of hash
def initialize(attributes = {})
return unless attributes.is_a?(Hash)
# convert string to symbol for hash key
attributes = attributes.each_with_object({}) { |(k, v), h| h[k.to_sym] = v }
if attributes.has_key?(:'client_type')
self.client_type = attributes[:'client_type']
end
if attributes.has_key?(:'data_type_configurations')
if (value = attributes[:'data_type_configurations']).is_a?(Array)
self.data_type_configurations = value
end
end
end
# Show invalid properties with the reasons. Usually used together with valid?
# @return Array for valid properties with the reasons
def list_invalid_properties
invalid_properties = Array.new
if @client_type.nil?
invalid_properties.push('invalid value for "client_type", client_type cannot be nil.')
end
if @data_type_configurations.nil?
invalid_properties.push('invalid value for "data_type_configurations", data_type_configurations cannot be nil.')
end
invalid_properties
end
# Check to see if the all the properties in the model are valid
# @return true if the model is valid
def valid?
return false if @client_type.nil?
client_type_validator = EnumAttributeValidator.new('String', ['HYPERVISOR', 'EDGE', 'CONTROL_PLANE', 'CONTROL_PLANE_PLATFORM', 'MANAGEMENT_PLANE', 'MANAGEMENT_PLANE_PLATFORM'])
return false unless client_type_validator.valid?(@client_type)
return false if @data_type_configurations.nil?
true
end
# Custom attribute writer method checking allowed values (enum).
# @param [Object] client_type Object to be assigned
def client_type=(client_type)
validator = EnumAttributeValidator.new('String', ['HYPERVISOR', 'EDGE', 'CONTROL_PLANE', 'CONTROL_PLANE_PLATFORM', 'MANAGEMENT_PLANE', 'MANAGEMENT_PLANE_PLATFORM'])
unless validator.valid?(client_type)
fail ArgumentError, 'invalid value for "client_type", must be one of #{validator.allowable_values}.'
end
@client_type = client_type
end
# Checks equality by comparing each attribute.
# @param [Object] Object to be compared
def ==(o)
return true if self.equal?(o)
self.class == o.class &&
client_type == o.client_type &&
data_type_configurations == o.data_type_configurations
end
# @see the `==` method
# @param [Object] Object to be compared
def eql?(o)
self == o
end
# Calculates hash code according to all attributes.
# @return [Fixnum] Hash code
def hash
[client_type, data_type_configurations].hash
end
# Builds the object from hash
# @param [Hash] attributes Model attributes in the form of hash
# @return [Object] Returns the model itself
def build_from_hash(attributes)
return nil unless attributes.is_a?(Hash)
self.class.swagger_types.each_pair do |key, type|
if type =~ /\AArray<(.*)>/i
# check to ensure the input is an array given that the the attribute
# is documented as an array but the input is not
if attributes[self.class.attribute_map[key]].is_a?(Array)
self.send("#{key}=", attributes[self.class.attribute_map[key]].map { |v| _deserialize($1, v) })
end
elsif !attributes[self.class.attribute_map[key]].nil?
self.send("#{key}=", _deserialize(type, attributes[self.class.attribute_map[key]]))
end # or else data not found in attributes(hash), not an issue as the data can be optional
end
self
end
# Deserializes the data based on type
# @param string type Data type
# @param string value Value to be deserialized
# @return [Object] Deserialized data
def _deserialize(type, value)
case type.to_sym
when :DateTime
DateTime.parse(value)
when :Date
Date.parse(value)
when :String
value.to_s
when :Integer
value.to_i
when :Float
value.to_f
when :BOOLEAN
if value.to_s =~ /\A(true|t|yes|y|1)\z/i
true
else
false
end
when :Object
# generic object (usually a Hash), return directly
value
when /\AArray<(?<inner_type>.+)>\z/
inner_type = Regexp.last_match[:inner_type]
value.map { |v| _deserialize(inner_type, v) }
when /\AHash<(?<k_type>.+?), (?<v_type>.+)>\z/
k_type = Regexp.last_match[:k_type]
v_type = Regexp.last_match[:v_type]
{}.tap do |hash|
value.each do |k, v|
hash[_deserialize(k_type, k)] = _deserialize(v_type, v)
end
end
else # model
temp_model = NSXT.const_get(type).new
temp_model.build_from_hash(value)
end
end
# Returns the string representation of the object
# @return [String] String presentation of the object
def to_s
to_hash.to_s
end
# to_body is an alias to to_hash (backward compatibility)
# @return [Hash] Returns the object in the form of hash
def to_body
to_hash
end
# Returns the object in the form of hash
# @return [Hash] Returns the object in the form of hash
def to_hash
hash = {}
self.class.attribute_map.each_pair do |attr, param|
value = self.send(attr)
next if value.nil?
hash[param] = _to_hash(value)
end
hash
end
# Outputs non-array value in the form of hash
# For object, use to_hash. Otherwise, just return the value
# @param [Object] value Any valid value
# @return [Hash] Returns the value in the form of hash
def _to_hash(value)
if value.is_a?(Array)
value.compact.map { |v| _to_hash(v) }
elsif value.is_a?(Hash)
{}.tap do |hash|
value.each { |k, v| hash[k] = _to_hash(v) }
end
elsif value.respond_to? :to_hash
value.to_hash
else
value
end
end
end
end
| 31.334711
| 182
| 0.642754
|
18b0be79bb5212a0a8fa16bcd713a1e75feb2eae
| 7,176
|
# -*- encoding: binary -*-
# :enddoc:
require 'socket'
module Unicorn
module SocketHelper
# :stopdoc:
include Socket::Constants
# prevents IO objects in here from being GC-ed
IO_PURGATORY = []
# internal interface, only used by Rainbows!/Zbatery
DEFAULTS = {
# The semantics for TCP_DEFER_ACCEPT changed in Linux 2.6.32+
# with commit d1b99ba41d6c5aa1ed2fc634323449dd656899e9
# This change shouldn't affect Unicorn users behind nginx (a
# value of 1 remains an optimization), but Rainbows! users may
# want to use a higher value on Linux 2.6.32+ to protect against
# denial-of-service attacks
:tcp_defer_accept => 1,
# FreeBSD, we need to override this to 'dataready' if we
# eventually get HTTPS support
:accept_filter => 'httpready',
# same default value as Mongrel
:backlog => 1024,
# favor latency over bandwidth savings
:tcp_nopush => nil,
:tcp_nodelay => true,
}
#:startdoc:
# configure platform-specific options (only tested on Linux 2.6 so far)
case RUBY_PLATFORM
when /linux/
# from /usr/include/linux/tcp.h
TCP_DEFER_ACCEPT = 9 unless defined?(TCP_DEFER_ACCEPT)
# do not send out partial frames (Linux)
TCP_CORK = 3 unless defined?(TCP_CORK)
when /freebsd/
# do not send out partial frames (FreeBSD)
TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH)
def accf_arg(af_name)
[ af_name, nil ].pack('a16a240')
end if defined?(SO_ACCEPTFILTER)
end
def set_tcp_sockopt(sock, opt)
# just in case, even LANs can break sometimes. Linux sysadmins
# can lower net.ipv4.tcp_keepalive_* sysctl knobs to very low values.
sock.setsockopt(SOL_SOCKET, SO_KEEPALIVE, 1) if defined?(SO_KEEPALIVE)
if defined?(TCP_NODELAY)
val = opt[:tcp_nodelay]
val = DEFAULTS[:tcp_nodelay] if nil == val
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, val ? 1 : 0)
end
val = opt[:tcp_nopush]
unless val.nil?
if defined?(TCP_CORK) # Linux
sock.setsockopt(IPPROTO_TCP, TCP_CORK, val)
elsif defined?(TCP_NOPUSH) # TCP_NOPUSH is lightly tested (FreeBSD)
sock.setsockopt(IPPROTO_TCP, TCP_NOPUSH, val)
end
end
# No good reason to ever have deferred accepts off
# (except maybe benchmarking)
if defined?(TCP_DEFER_ACCEPT)
# this differs from nginx, since nginx doesn't allow us to
# configure the the timeout...
seconds = opt[:tcp_defer_accept]
seconds = DEFAULTS[:tcp_defer_accept] if [true,nil].include?(seconds)
seconds = 0 unless seconds # nil/false means disable this
sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, seconds)
elsif respond_to?(:accf_arg)
name = opt[:accept_filter]
name = DEFAULTS[:accept_filter] if nil == name
begin
sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, accf_arg(name))
rescue => e
logger.error("#{sock_name(sock)} " \
"failed to set accept_filter=#{name} (#{e.inspect})")
end
end
end
def set_server_sockopt(sock, opt)
opt = DEFAULTS.merge(opt || {})
TCPSocket === sock and set_tcp_sockopt(sock, opt)
if opt[:rcvbuf] || opt[:sndbuf]
log_buffer_sizes(sock, "before: ")
sock.setsockopt(SOL_SOCKET, SO_RCVBUF, opt[:rcvbuf]) if opt[:rcvbuf]
sock.setsockopt(SOL_SOCKET, SO_SNDBUF, opt[:sndbuf]) if opt[:sndbuf]
log_buffer_sizes(sock, " after: ")
end
sock.listen(opt[:backlog])
rescue => e
Unicorn.log_error(logger, "#{sock_name(sock)} #{opt.inspect}", e)
end
def log_buffer_sizes(sock, pfx = '')
rcvbuf = sock.getsockopt(SOL_SOCKET, SO_RCVBUF).unpack('i')
sndbuf = sock.getsockopt(SOL_SOCKET, SO_SNDBUF).unpack('i')
logger.info "#{pfx}#{sock_name(sock)} rcvbuf=#{rcvbuf} sndbuf=#{sndbuf}"
end
# creates a new server, socket. address may be a HOST:PORT or
# an absolute path to a UNIX socket. address can even be a Socket
# object in which case it is immediately returned
def bind_listen(address = '0.0.0.0:8080', opt = {})
return address unless String === address
sock = if address[0] == ?/
if File.exist?(address)
if File.socket?(address)
begin
UNIXSocket.new(address).close
# fall through, try to bind(2) and fail with EADDRINUSE
# (or succeed from a small race condition we can't sanely avoid).
rescue Errno::ECONNREFUSED
logger.info "unlinking existing socket=#{address}"
File.unlink(address)
end
else
raise ArgumentError,
"socket=#{address} specified but it is not a socket!"
end
end
old_umask = File.umask(opt[:umask] || 0)
begin
Kgio::UNIXServer.new(address)
ensure
File.umask(old_umask)
end
elsif /\A\[([a-fA-F0-9:]+)\]:(\d+)\z/ =~ address
new_ipv6_server($1, $2.to_i, opt)
elsif /\A(\d+\.\d+\.\d+\.\d+):(\d+)\z/ =~ address
Kgio::TCPServer.new($1, $2.to_i)
else
raise ArgumentError, "Don't know how to bind: #{address}"
end
set_server_sockopt(sock, opt)
sock
end
def new_ipv6_server(addr, port, opt)
opt.key?(:ipv6only) or return Kgio::TCPServer.new(addr, port)
defined?(IPV6_V6ONLY) or
abort "Socket::IPV6_V6ONLY not defined, upgrade Ruby and/or your OS"
sock = Socket.new(AF_INET6, SOCK_STREAM, 0)
sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, opt[:ipv6only] ? 1 : 0)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind(Socket.pack_sockaddr_in(port, addr))
IO_PURGATORY << sock
Kgio::TCPServer.for_fd(sock.fileno)
end
# returns rfc2732-style (e.g. "[::1]:666") addresses for IPv6
def tcp_name(sock)
port, addr = Socket.unpack_sockaddr_in(sock.getsockname)
/:/ =~ addr ? "[#{addr}]:#{port}" : "#{addr}:#{port}"
end
module_function :tcp_name
# Returns the configuration name of a socket as a string. sock may
# be a string value, in which case it is returned as-is
# Warning: TCP sockets may not always return the name given to it.
def sock_name(sock)
case sock
when String then sock
when UNIXServer
Socket.unpack_sockaddr_un(sock.getsockname)
when TCPServer
tcp_name(sock)
when Socket
begin
tcp_name(sock)
rescue ArgumentError
Socket.unpack_sockaddr_un(sock.getsockname)
end
else
raise ArgumentError, "Unhandled class #{sock.class}: #{sock.inspect}"
end
end
module_function :sock_name
# casts a given Socket to be a TCPServer or UNIXServer
def server_cast(sock)
begin
Socket.unpack_sockaddr_in(sock.getsockname)
Kgio::TCPServer.for_fd(sock.fileno)
rescue ArgumentError
Kgio::UNIXServer.for_fd(sock.fileno)
end
end
end # module SocketHelper
end # module Unicorn
| 34.334928
| 79
| 0.625697
|
6ab9ab331fdb927b1839ba1f8e12116b98c4fc31
| 2,562
|
class Admin::ShipmentsController < Admin::BaseController
before_filter :load_data, :except => :country_changed
before_filter :require_object_editable_by_current_user, :only => [:update]
resource_controller
belongs_to :order
update.wants.html do
if @order.in_progress?
redirect_to admin_order_adjustments_url(@order)
else
redirect_to edit_object_url
end
end
create do
wants.html { redirect_to edit_object_url }
end
edit.before :edit_before
update.before :assign_inventory_units
update.after :update_after
create.before :assign_inventory_units
create.after :recalculate_order
destroy.success.wants.js { render_js_for_destroy }
def fire
@shipment.send("#{params[:e]}!")
flash.notice = t('shipment_updated')
redirect_to :back
end
private
def build_object
@object ||= end_of_association_chain.send parent? ? :build : :new
@object.address ||= @order.ship_address
@object.address ||= Address.new(:country_id => Spree::Config[:default_country_id])
@object.shipping_method ||= @order.shipping_method
@object.attributes = object_params
@object
end
def load_data
load_object
@selected_country_id = params[:shipment_presenter][:address_country_id].to_i if params.has_key?('shipment_presenter')
@selected_country_id ||= @order.bill_address.country_id unless @order.nil? || @order.bill_address.nil?
@selected_country_id ||= Spree::Config[:default_country_id]
@shipping_methods = ShippingMethod.all_available(@order, :back_end)
@states = State.find_all_by_country_id(@selected_country_id, :order => 'name')
@countries = Checkout.countries.sort
@countries = [Country.find(Spree::Config[:default_country_id])] if @countries.empty?
end
def edit_before # copy into instance variable before editing
@shipment.special_instructions = @order.checkout.special_instructions
end
def update_after # copy back to order if instructions are enabled
@order.checkout.special_instructions = object_params[:special_instructions] if Spree::Config[:shipping_instructions]
@order.checkout.shipping_method = @order.shipment.shipping_method
@order.save
recalculate_order
end
def assign_inventory_units
return unless params.has_key? :inventory_units
#params[:inventory_units].each { |id, value| @shipment.inventory_units << InventoryUnit.find(id) }
@shipment.inventory_unit_ids = params[:inventory_units].keys
end
def recalculate_order
@shipment.recalculate_order if params[:recalculate]
end
end
| 32.025
| 121
| 0.750585
|
03f8089a8d9791e2565f14b40b1e3760a7b89123
| 1,710
|
class Chezmoi < Formula
desc "Manage your dotfiles across multiple diverse machines, securely"
homepage "https://chezmoi.io/"
url "https://github.com/twpayne/chezmoi.git",
tag: "v2.0.9",
revision: "7c7213b32435823ce3fd0dc468fdecefe7c5e8f9"
license "MIT"
head "https://github.com/twpayne/chezmoi.git"
bottle do
sha256 cellar: :any_skip_relocation, arm64_big_sur: "b69dcb8fe67d45871edd6cd584d07dfd4552b34e4eea48d018886f2c5c30ceb9"
sha256 cellar: :any_skip_relocation, big_sur: "5f672038d174d2980ac38079397be44432ac17b28f30f257a13db356f1e6638f"
sha256 cellar: :any_skip_relocation, catalina: "12293263380c9da027b3b172a8df757e11180dedb092724655730ada76d2f9fd"
sha256 cellar: :any_skip_relocation, mojave: "d2d5115645a279381787d2d5a0118c2309e81c4e3654fd5b10844132b091119f"
end
depends_on "go" => :build
def install
ldflags = %W[
-s -w
-X main.version=#{version}
-X main.commit=#{Utils.git_head}
-X main.date=#{Time.now.utc.rfc3339}
-X main.builtBy=homebrew
].join(" ")
system "go", "build", *std_go_args, "-ldflags", ldflags
bash_completion.install "completions/chezmoi-completion.bash"
fish_completion.install "completions/chezmoi.fish"
zsh_completion.install "completions/chezmoi.zsh" => "_chezmoi"
prefix.install_metafiles
end
test do
# test version to ensure that version number is embedded in binary
assert_match "version v#{version}", shell_output("#{bin}/chezmoi --version")
assert_match "built by homebrew", shell_output("#{bin}/chezmoi --version")
system "#{bin}/chezmoi", "init"
assert_predicate testpath/".local/share/chezmoi", :exist?
end
end
| 38
| 122
| 0.72807
|
f8929313170b74b3cb21fbcce497b5a2268f540a
| 208
|
require 'test_helper'
class AuthenticationControllerTest < ActionDispatch::IntegrationTest
test "should get authenticate" do
get authentication_authenticate_url
assert_response :success
end
end
| 20.8
| 68
| 0.8125
|
3823941763a21e19a841db3c43d344293bb023fe
| 452
|
module Liquid
module Forms
module Account
class BillingAddress < Forms::Update
def html_class_name
'formtastic account'
end
def path
admin_account_payment_details_path
end
end
class PersonalDetails < Forms::Update
def html_class_name
'formtastic account'
end
def path
admin_account_path
end
end
end
end
end
| 16.142857
| 44
| 0.575221
|
b94535c537e1ccc0950228a6bbca6fe8ef19830d
| 59
|
require 'krump/kafka_consumer'
require 'krump/ssh_tunnels'
| 19.666667
| 30
| 0.830508
|
39aa28783f968f3ffe86dc2d0f001f6ec7266862
| 1,210
|
#
# Copyright:: Copyright (c) Chef Software Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "knife_spec_helper"
require "support/shared/integration/integration_helper"
require "support/shared/context/config"
describe "knife role list", :workstation do
include IntegrationSupport
include KnifeSupport
include_context "default config options"
when_the_chef_server "has some roles" do
before do
role "cons", {}
role "car", {}
role "cdr", {}
role "cat", {}
end
it "lists all cookbooks" do
knife("role list").should_succeed <<~EOM
car
cat
cdr
cons
EOM
end
end
end
| 26.304348
| 74
| 0.700826
|
015f95d658be9392bee4da867adca6d56b4a5b5b
| 140
|
# Be sure to restart your server when you modify this file.
Rails.application.config.session_store :cookie_store, key: '_birdious_session'
| 35
| 78
| 0.807143
|
3333e32c8d59951e3fed5c1ba2f4899b0eab3692
| 455
|
class Micropost < ApplicationRecord
belongs_to :user
default_scope -> { order(created_at: :desc) }
mount_uploader :picture, PictureUploader
validates :user_id, presence: true
validates :content, presence: true, length: {maximum: 140}
validate :picture_size
private
# Validates the size of an uploaded picture.
def picture_size
if picture.size > 5.megabytes
error.add(:picture, "should be less that 5MB")
end
end
end
| 23.947368
| 60
| 0.720879
|
edfc4a1e8af3c6c30c96139e16da60eec7804a14
| 1,352
|
# Copyright (c) 2018 Public Library of Science
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
if defined?(Bullet) && ENV.fetch('BULLET', false)
Bullet.enable = true
Bullet.bullet_logger = true
Bullet.console = true
Bullet.rails_logger = true
Bullet.add_footer = true
Bullet.stacktrace_includes = [
'tahi_standard_tasks'
]
end
| 43.612903
| 76
| 0.76997
|
ff1d0e20c45e639334040d2b6d0932cbbba2fa1a
| 414
|
# frozen_string_literal: true
module Blacklight
module Response
class SortComponent < ViewComponent::Base
def initialize(search_state:, param: 'sort', choices: {}, id: 'sort-dropdown', classes: [], selected: nil)
@param = param
@choices = choices
@search_state = search_state
@id = id
@classes = classes
@selected = selected
end
end
end
end
| 24.352941
| 112
| 0.620773
|
398b74161e50ab2d1b4a9c09dbe00f49b368373a
| 3,144
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
spec_dir = File.expand_path(File.join(File.dirname(__FILE__)))
$:.unshift(spec_dir)
$:.uniq!
require 'spec_helper'
require 'signet/oauth_2'
describe Signet do
describe 'when parsing an auth param list' do
it 'should correctly handle commas' do
parameters = Signet.parse_auth_param_list(
'a="1, 2" , b="3,4",c="5 , 6" ,d="7 ,8"'
).inject({}) { |h,(k,v)| h[k]=v; h }
parameters['a'].should == '1, 2'
parameters['b'].should == '3,4'
parameters['c'].should == '5 , 6'
parameters['d'].should == '7 ,8'
end
it 'should correctly handle backslash-escaped pairs' do
parameters = Signet.parse_auth_param_list(
'token="\t\o\k\e\n" sigalg="\s\i\g\a\l\g" data="\d\a\t\a"'
).inject({}) { |h,(k,v)| h[k]=v; h }
parameters['token'].should == 'token'
parameters['sigalg'].should == 'sigalg'
parameters['data'].should == 'data'
end
it 'should liberally handle space-separated auth-param lists' do
parameters = Signet.parse_auth_param_list(
'token="token" sigalg="sigalg" data="data" sig="sig"'
).inject({}) { |h,(k,v)| h[k]=v; h }
parameters['token'].should == 'token'
parameters['sigalg'].should == 'sigalg'
parameters['data'].should == 'data'
parameters['sig'].should == 'sig'
end
it 'should liberally handle single-quoted auth-param lists' do
parameters = Signet.parse_auth_param_list(
'token=\'token\' sigalg=\'sigalg\' data=\'data\' sig=\'sig\''
).inject({}) { |h,(k,v)| h[k]=v; h }
parameters['token'].should == 'token'
parameters['sigalg'].should == 'sigalg'
parameters['data'].should == 'data'
parameters['sig'].should == 'sig'
end
it 'should liberally handle unquoted auth-param lists' do
parameters = Signet.parse_auth_param_list(
'token=token sigalg=sigalg data=data sig=sig'
).inject({}) { |h,(k,v)| h[k]=v; h }
parameters['token'].should == 'token'
parameters['sigalg'].should == 'sigalg'
parameters['data'].should == 'data'
parameters['sig'].should == 'sig'
end
it 'should liberally handle auth-param lists with empty sections' do
parameters = Signet.parse_auth_param_list(
'token=token, , sigalg=sigalg,, data=data, sig=sig'
).inject({}) { |h,(k,v)| h[k]=v; h }
parameters['token'].should == 'token'
parameters['sigalg'].should == 'sigalg'
parameters['data'].should == 'data'
parameters['sig'].should == 'sig'
end
end
end
| 36.988235
| 77
| 0.618957
|
39a862b380fbca743a1dfeb22fc04851587161a3
| 2,734
|
class UpdateBackersByPeriodViewWithDeletedBackerState < ActiveRecord::Migration
def up
drop_view :backers_by_periods
create_view :backers_by_periods, <<-SQL
WITH weeks AS (
SELECT
generate_series * 7 AS days
FROM generate_series(0, 7)
),
current_period AS (
SELECT
'current_period'::text as series,
sum(b.value),
w.days / 7 as week
FROM
backers b
RIGHT JOIN weeks w ON b.confirmed_at::date >= (current_date - w.days - 7) AND b.confirmed_at < (current_date - w.days)
WHERE
state NOT IN ('pending', 'canceled', 'waiting_confirmation', 'deleted')
GROUP BY week
),
previous_period AS (
SELECT
'previous_period'::text as series,
sum(b.value),
w.days / 7 as week
FROM
backers b
RIGHT JOIN weeks w ON b.confirmed_at::date >= (current_date - w.days - 7 - 56) AND b.confirmed_at < (current_date - w.days - 56)
WHERE
state NOT IN ('pending', 'canceled', 'waiting_confirmation', 'deleted')
GROUP BY week
),
last_year AS (
SELECT
'last_year'::text as series,
sum(b.value),
w.days / 7 as week
FROM
backers b
RIGHT JOIN weeks w ON b.confirmed_at::date >= (current_date - w.days - 7 - 365) AND b.confirmed_at < (current_date - w.days - 365)
WHERE
state NOT IN ('pending', 'canceled', 'waiting_confirmation', 'deleted')
GROUP BY week
)
(SELECT * FROM current_period)
UNION ALL
(SELECT * FROM previous_period)
UNION ALL
(SELECT * FROM last_year)
ORDER BY series, week;
SQL
end
def down
drop_view :backers_by_periods
create_view :backers_by_periods, <<-SQL
WITH weeks AS (
SELECT
generate_series * 7 AS days
FROM generate_series(0, 7)
),
current_period AS (
SELECT
'current_period'::text as series,
sum(b.value),
w.days / 7 as week
FROM
backers b
RIGHT JOIN weeks w ON b.confirmed_at::date >= (current_date - w.days - 7) AND b.confirmed_at < (current_date - w.days)
WHERE
state NOT IN ('pending', 'canceled', 'waiting_confirmation')
GROUP BY week
),
previous_period AS (
SELECT
'previous_period'::text as series,
sum(b.value),
w.days / 7 as week
FROM
backers b
RIGHT JOIN weeks w ON b.confirmed_at::date >= (current_date - w.days - 7 - 56) AND b.confirmed_at < (current_date - w.days - 56)
WHERE
state NOT IN ('pending', 'canceled', 'waiting_confirmation')
GROUP BY week
),
last_year AS (
SELECT
'last_year'::text as series,
sum(b.value),
w.days / 7 as week
FROM
backers b
RIGHT JOIN weeks w ON b.confirmed_at::date >= (current_date - w.days - 7 - 365) AND b.confirmed_at < (current_date - w.days - 365)
WHERE
state NOT IN ('pending', 'canceled', 'waiting_confirmation')
GROUP BY week
)
(SELECT * FROM current_period)
UNION ALL
(SELECT * FROM previous_period)
UNION ALL
(SELECT * FROM last_year)
ORDER BY series, week;
SQL
end
end
| 25.314815
| 132
| 0.704828
|
62c4c9a67381c5e42cfed5c806cf677dc7fba948
| 767
|
#
# To learn more about a Podspec see http://guides.cocoapods.org/syntax/podspec.html
#
Pod::Spec.new do |s|
s.name = 'my_plugin_ios'
s.version = '0.0.1'
s.summary = 'An iOS implementation of the my_plugin plugin.'
s.description = <<-DESC
An iOS implementation of the my_plugin plugin.
DESC
s.homepage = 'http://example.com'
s.license = { :type => 'BSD', :file => '../LICENSE' }
s.author = { 'Your Company' => 'email@example.com' }
s.source = { :path => '.' }
s.source_files = 'Classes/**/*'
s.public_header_files = 'Classes/**/*.h'
s.dependency 'Flutter'
s.platform = :ios, '9.0'
s.pod_target_xcconfig = { 'DEFINES_MODULE' => 'YES' }
end
| 34.863636
| 83
| 0.556714
|
2807805b44fd8fe1c183d74fd1f3ceedaf8d13de
| 698
|
Pod::Spec.new do |s|
s.name = "MONK"
s.version = "1.1.2"
s.summary = "Mobelux Network Kit, a simple networking library based on URLSession in Swift"
s.homepage = "https://github.com/Mobelux/MONK"
s.license = "MIT"
s.author = { "Mobelux" => "contact@mobelux.com" }
s.social_media_url = "http://twitter.com/mobelux"
s.ios.deployment_target = "10.0"
s.osx.deployment_target = "10.11"
s.watchos.deployment_target = "3.0"
s.tvos.deployment_target = "10.0"
s.source = { :git => "https://github.com/Mobelux/MONK.git", :tag => "#{s.version}" }
s.source_files = "MONK", "MONK/**/*.{h,m,swift}"
s.framework = "Foundation"
end
| 31.727273
| 98
| 0.603152
|
d5d378e8b1a8c30aa6a2538aed8b637da78b2ef4
| 20,455
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "minitest/autorun"
require "minitest/spec"
require "google/gax"
require "google/cloud/os_login"
require "google/cloud/os_login/v1/os_login_service_client"
require "google/cloud/oslogin/v1/oslogin_services_pb"
class CustomTestError_v1 < StandardError; end
# Mock for the GRPC::ClientStub class.
class MockGrpcClientStub_v1
# @param expected_symbol [Symbol] the symbol of the grpc method to be mocked.
# @param mock_method [Proc] The method that is being mocked.
def initialize(expected_symbol, mock_method)
@expected_symbol = expected_symbol
@mock_method = mock_method
end
# This overrides the Object#method method to return the mocked method when the mocked method
# is being requested. For methods that aren't being tested, this method returns a proc that
# will raise an error when called. This is to assure that only the mocked grpc method is being
# called.
#
# @param symbol [Symbol] The symbol of the method being requested.
# @return [Proc] The proc of the requested method. If the requested method is not being mocked
# the proc returned will raise when called.
def method(symbol)
return @mock_method if symbol == @expected_symbol
# The requested method is not being tested, raise if it called.
proc do
raise "The method #{symbol} was unexpectedly called during the " \
"test for #{@expected_symbol}."
end
end
end
class MockOsLoginServiceCredentials_v1 < Google::Cloud::OsLogin::V1::Credentials
def initialize(method_name)
@method_name = method_name
end
def updater_proc
proc do
raise "The method `#{@method_name}` was trying to make a grpc request. This should not " \
"happen since the grpc layer is being mocked."
end
end
end
describe Google::Cloud::OsLogin::V1::OsLoginServiceClient do
describe 'delete_posix_account' do
custom_error = CustomTestError_v1.new "Custom test error for Google::Cloud::OsLogin::V1::OsLoginServiceClient#delete_posix_account."
it 'invokes delete_posix_account without error' do
# Create request parameters
formatted_name = Google::Cloud::OsLogin::V1::OsLoginServiceClient.project_path("[USER]", "[PROJECT]")
# Mock Grpc layer
mock_method = proc do |request|
assert_instance_of(Google::Cloud::Oslogin::V1::DeletePosixAccountRequest, request)
assert_equal(formatted_name, request.name)
OpenStruct.new(execute: nil)
end
mock_stub = MockGrpcClientStub_v1.new(:delete_posix_account, mock_method)
# Mock auth layer
mock_credentials = MockOsLoginServiceCredentials_v1.new("delete_posix_account")
Google::Cloud::Oslogin::V1::OsLoginService::Stub.stub(:new, mock_stub) do
Google::Cloud::OsLogin::V1::Credentials.stub(:default, mock_credentials) do
client = Google::Cloud::OsLogin.new(version: :v1)
# Call method
response = client.delete_posix_account(formatted_name)
# Verify the response
assert_nil(response)
# Call method with block
client.delete_posix_account(formatted_name) do |response, operation|
# Verify the response
assert_nil(response)
refute_nil(operation)
end
end
end
end
it 'invokes delete_posix_account with error' do
# Create request parameters
formatted_name = Google::Cloud::OsLogin::V1::OsLoginServiceClient.project_path("[USER]", "[PROJECT]")
# Mock Grpc layer
mock_method = proc do |request|
assert_instance_of(Google::Cloud::Oslogin::V1::DeletePosixAccountRequest, request)
assert_equal(formatted_name, request.name)
raise custom_error
end
mock_stub = MockGrpcClientStub_v1.new(:delete_posix_account, mock_method)
# Mock auth layer
mock_credentials = MockOsLoginServiceCredentials_v1.new("delete_posix_account")
Google::Cloud::Oslogin::V1::OsLoginService::Stub.stub(:new, mock_stub) do
Google::Cloud::OsLogin::V1::Credentials.stub(:default, mock_credentials) do
client = Google::Cloud::OsLogin.new(version: :v1)
# Call method
err = assert_raises Google::Gax::GaxError, CustomTestError_v1 do
client.delete_posix_account(formatted_name)
end
# Verify the GaxError wrapped the custom error that was raised.
assert_match(custom_error.message, err.message)
end
end
end
end
describe 'delete_ssh_public_key' do
custom_error = CustomTestError_v1.new "Custom test error for Google::Cloud::OsLogin::V1::OsLoginServiceClient#delete_ssh_public_key."
it 'invokes delete_ssh_public_key without error' do
# Create request parameters
formatted_name = Google::Cloud::OsLogin::V1::OsLoginServiceClient.fingerprint_path("[USER]", "[FINGERPRINT]")
# Mock Grpc layer
mock_method = proc do |request|
assert_instance_of(Google::Cloud::Oslogin::V1::DeleteSshPublicKeyRequest, request)
assert_equal(formatted_name, request.name)
OpenStruct.new(execute: nil)
end
mock_stub = MockGrpcClientStub_v1.new(:delete_ssh_public_key, mock_method)
# Mock auth layer
mock_credentials = MockOsLoginServiceCredentials_v1.new("delete_ssh_public_key")
Google::Cloud::Oslogin::V1::OsLoginService::Stub.stub(:new, mock_stub) do
Google::Cloud::OsLogin::V1::Credentials.stub(:default, mock_credentials) do
client = Google::Cloud::OsLogin.new(version: :v1)
# Call method
response = client.delete_ssh_public_key(formatted_name)
# Verify the response
assert_nil(response)
# Call method with block
client.delete_ssh_public_key(formatted_name) do |response, operation|
# Verify the response
assert_nil(response)
refute_nil(operation)
end
end
end
end
it 'invokes delete_ssh_public_key with error' do
# Create request parameters
formatted_name = Google::Cloud::OsLogin::V1::OsLoginServiceClient.fingerprint_path("[USER]", "[FINGERPRINT]")
# Mock Grpc layer
mock_method = proc do |request|
assert_instance_of(Google::Cloud::Oslogin::V1::DeleteSshPublicKeyRequest, request)
assert_equal(formatted_name, request.name)
raise custom_error
end
mock_stub = MockGrpcClientStub_v1.new(:delete_ssh_public_key, mock_method)
# Mock auth layer
mock_credentials = MockOsLoginServiceCredentials_v1.new("delete_ssh_public_key")
Google::Cloud::Oslogin::V1::OsLoginService::Stub.stub(:new, mock_stub) do
Google::Cloud::OsLogin::V1::Credentials.stub(:default, mock_credentials) do
client = Google::Cloud::OsLogin.new(version: :v1)
# Call method
err = assert_raises Google::Gax::GaxError, CustomTestError_v1 do
client.delete_ssh_public_key(formatted_name)
end
# Verify the GaxError wrapped the custom error that was raised.
assert_match(custom_error.message, err.message)
end
end
end
end
describe 'get_login_profile' do
custom_error = CustomTestError_v1.new "Custom test error for Google::Cloud::OsLogin::V1::OsLoginServiceClient#get_login_profile."
it 'invokes get_login_profile without error' do
# Create request parameters
formatted_name = Google::Cloud::OsLogin::V1::OsLoginServiceClient.user_path("[USER]")
# Create expected grpc response
name_2 = "name2-1052831874"
suspended = false
expected_response = { name: name_2, suspended: suspended }
expected_response = Google::Gax::to_proto(expected_response, Google::Cloud::Oslogin::V1::LoginProfile)
# Mock Grpc layer
mock_method = proc do |request|
assert_instance_of(Google::Cloud::Oslogin::V1::GetLoginProfileRequest, request)
assert_equal(formatted_name, request.name)
OpenStruct.new(execute: expected_response)
end
mock_stub = MockGrpcClientStub_v1.new(:get_login_profile, mock_method)
# Mock auth layer
mock_credentials = MockOsLoginServiceCredentials_v1.new("get_login_profile")
Google::Cloud::Oslogin::V1::OsLoginService::Stub.stub(:new, mock_stub) do
Google::Cloud::OsLogin::V1::Credentials.stub(:default, mock_credentials) do
client = Google::Cloud::OsLogin.new(version: :v1)
# Call method
response = client.get_login_profile(formatted_name)
# Verify the response
assert_equal(expected_response, response)
# Call method with block
client.get_login_profile(formatted_name) do |response, operation|
# Verify the response
assert_equal(expected_response, response)
refute_nil(operation)
end
end
end
end
it 'invokes get_login_profile with error' do
# Create request parameters
formatted_name = Google::Cloud::OsLogin::V1::OsLoginServiceClient.user_path("[USER]")
# Mock Grpc layer
mock_method = proc do |request|
assert_instance_of(Google::Cloud::Oslogin::V1::GetLoginProfileRequest, request)
assert_equal(formatted_name, request.name)
raise custom_error
end
mock_stub = MockGrpcClientStub_v1.new(:get_login_profile, mock_method)
# Mock auth layer
mock_credentials = MockOsLoginServiceCredentials_v1.new("get_login_profile")
Google::Cloud::Oslogin::V1::OsLoginService::Stub.stub(:new, mock_stub) do
Google::Cloud::OsLogin::V1::Credentials.stub(:default, mock_credentials) do
client = Google::Cloud::OsLogin.new(version: :v1)
# Call method
err = assert_raises Google::Gax::GaxError, CustomTestError_v1 do
client.get_login_profile(formatted_name)
end
# Verify the GaxError wrapped the custom error that was raised.
assert_match(custom_error.message, err.message)
end
end
end
end
describe 'get_ssh_public_key' do
custom_error = CustomTestError_v1.new "Custom test error for Google::Cloud::OsLogin::V1::OsLoginServiceClient#get_ssh_public_key."
it 'invokes get_ssh_public_key without error' do
# Create request parameters
formatted_name = Google::Cloud::OsLogin::V1::OsLoginServiceClient.fingerprint_path("[USER]", "[FINGERPRINT]")
# Create expected grpc response
key = "key106079"
expiration_time_usec = 2058878882
fingerprint = "fingerprint-1375934236"
expected_response = {
key: key,
expiration_time_usec: expiration_time_usec,
fingerprint: fingerprint
}
expected_response = Google::Gax::to_proto(expected_response, Google::Cloud::Oslogin::Common::SshPublicKey)
# Mock Grpc layer
mock_method = proc do |request|
assert_instance_of(Google::Cloud::Oslogin::V1::GetSshPublicKeyRequest, request)
assert_equal(formatted_name, request.name)
OpenStruct.new(execute: expected_response)
end
mock_stub = MockGrpcClientStub_v1.new(:get_ssh_public_key, mock_method)
# Mock auth layer
mock_credentials = MockOsLoginServiceCredentials_v1.new("get_ssh_public_key")
Google::Cloud::Oslogin::V1::OsLoginService::Stub.stub(:new, mock_stub) do
Google::Cloud::OsLogin::V1::Credentials.stub(:default, mock_credentials) do
client = Google::Cloud::OsLogin.new(version: :v1)
# Call method
response = client.get_ssh_public_key(formatted_name)
# Verify the response
assert_equal(expected_response, response)
# Call method with block
client.get_ssh_public_key(formatted_name) do |response, operation|
# Verify the response
assert_equal(expected_response, response)
refute_nil(operation)
end
end
end
end
it 'invokes get_ssh_public_key with error' do
# Create request parameters
formatted_name = Google::Cloud::OsLogin::V1::OsLoginServiceClient.fingerprint_path("[USER]", "[FINGERPRINT]")
# Mock Grpc layer
mock_method = proc do |request|
assert_instance_of(Google::Cloud::Oslogin::V1::GetSshPublicKeyRequest, request)
assert_equal(formatted_name, request.name)
raise custom_error
end
mock_stub = MockGrpcClientStub_v1.new(:get_ssh_public_key, mock_method)
# Mock auth layer
mock_credentials = MockOsLoginServiceCredentials_v1.new("get_ssh_public_key")
Google::Cloud::Oslogin::V1::OsLoginService::Stub.stub(:new, mock_stub) do
Google::Cloud::OsLogin::V1::Credentials.stub(:default, mock_credentials) do
client = Google::Cloud::OsLogin.new(version: :v1)
# Call method
err = assert_raises Google::Gax::GaxError, CustomTestError_v1 do
client.get_ssh_public_key(formatted_name)
end
# Verify the GaxError wrapped the custom error that was raised.
assert_match(custom_error.message, err.message)
end
end
end
end
describe 'import_ssh_public_key' do
custom_error = CustomTestError_v1.new "Custom test error for Google::Cloud::OsLogin::V1::OsLoginServiceClient#import_ssh_public_key."
it 'invokes import_ssh_public_key without error' do
# Create request parameters
formatted_parent = Google::Cloud::OsLogin::V1::OsLoginServiceClient.user_path("[USER]")
ssh_public_key = {}
# Create expected grpc response
expected_response = {}
expected_response = Google::Gax::to_proto(expected_response, Google::Cloud::Oslogin::V1::ImportSshPublicKeyResponse)
# Mock Grpc layer
mock_method = proc do |request|
assert_instance_of(Google::Cloud::Oslogin::V1::ImportSshPublicKeyRequest, request)
assert_equal(formatted_parent, request.parent)
assert_equal(Google::Gax::to_proto(ssh_public_key, Google::Cloud::Oslogin::Common::SshPublicKey), request.ssh_public_key)
OpenStruct.new(execute: expected_response)
end
mock_stub = MockGrpcClientStub_v1.new(:import_ssh_public_key, mock_method)
# Mock auth layer
mock_credentials = MockOsLoginServiceCredentials_v1.new("import_ssh_public_key")
Google::Cloud::Oslogin::V1::OsLoginService::Stub.stub(:new, mock_stub) do
Google::Cloud::OsLogin::V1::Credentials.stub(:default, mock_credentials) do
client = Google::Cloud::OsLogin.new(version: :v1)
# Call method
response = client.import_ssh_public_key(formatted_parent, ssh_public_key)
# Verify the response
assert_equal(expected_response, response)
# Call method with block
client.import_ssh_public_key(formatted_parent, ssh_public_key) do |response, operation|
# Verify the response
assert_equal(expected_response, response)
refute_nil(operation)
end
end
end
end
it 'invokes import_ssh_public_key with error' do
# Create request parameters
formatted_parent = Google::Cloud::OsLogin::V1::OsLoginServiceClient.user_path("[USER]")
ssh_public_key = {}
# Mock Grpc layer
mock_method = proc do |request|
assert_instance_of(Google::Cloud::Oslogin::V1::ImportSshPublicKeyRequest, request)
assert_equal(formatted_parent, request.parent)
assert_equal(Google::Gax::to_proto(ssh_public_key, Google::Cloud::Oslogin::Common::SshPublicKey), request.ssh_public_key)
raise custom_error
end
mock_stub = MockGrpcClientStub_v1.new(:import_ssh_public_key, mock_method)
# Mock auth layer
mock_credentials = MockOsLoginServiceCredentials_v1.new("import_ssh_public_key")
Google::Cloud::Oslogin::V1::OsLoginService::Stub.stub(:new, mock_stub) do
Google::Cloud::OsLogin::V1::Credentials.stub(:default, mock_credentials) do
client = Google::Cloud::OsLogin.new(version: :v1)
# Call method
err = assert_raises Google::Gax::GaxError, CustomTestError_v1 do
client.import_ssh_public_key(formatted_parent, ssh_public_key)
end
# Verify the GaxError wrapped the custom error that was raised.
assert_match(custom_error.message, err.message)
end
end
end
end
describe 'update_ssh_public_key' do
custom_error = CustomTestError_v1.new "Custom test error for Google::Cloud::OsLogin::V1::OsLoginServiceClient#update_ssh_public_key."
it 'invokes update_ssh_public_key without error' do
# Create request parameters
formatted_name = Google::Cloud::OsLogin::V1::OsLoginServiceClient.fingerprint_path("[USER]", "[FINGERPRINT]")
ssh_public_key = {}
# Create expected grpc response
key = "key106079"
expiration_time_usec = 2058878882
fingerprint = "fingerprint-1375934236"
expected_response = {
key: key,
expiration_time_usec: expiration_time_usec,
fingerprint: fingerprint
}
expected_response = Google::Gax::to_proto(expected_response, Google::Cloud::Oslogin::Common::SshPublicKey)
# Mock Grpc layer
mock_method = proc do |request|
assert_instance_of(Google::Cloud::Oslogin::V1::UpdateSshPublicKeyRequest, request)
assert_equal(formatted_name, request.name)
assert_equal(Google::Gax::to_proto(ssh_public_key, Google::Cloud::Oslogin::Common::SshPublicKey), request.ssh_public_key)
OpenStruct.new(execute: expected_response)
end
mock_stub = MockGrpcClientStub_v1.new(:update_ssh_public_key, mock_method)
# Mock auth layer
mock_credentials = MockOsLoginServiceCredentials_v1.new("update_ssh_public_key")
Google::Cloud::Oslogin::V1::OsLoginService::Stub.stub(:new, mock_stub) do
Google::Cloud::OsLogin::V1::Credentials.stub(:default, mock_credentials) do
client = Google::Cloud::OsLogin.new(version: :v1)
# Call method
response = client.update_ssh_public_key(formatted_name, ssh_public_key)
# Verify the response
assert_equal(expected_response, response)
# Call method with block
client.update_ssh_public_key(formatted_name, ssh_public_key) do |response, operation|
# Verify the response
assert_equal(expected_response, response)
refute_nil(operation)
end
end
end
end
it 'invokes update_ssh_public_key with error' do
# Create request parameters
formatted_name = Google::Cloud::OsLogin::V1::OsLoginServiceClient.fingerprint_path("[USER]", "[FINGERPRINT]")
ssh_public_key = {}
# Mock Grpc layer
mock_method = proc do |request|
assert_instance_of(Google::Cloud::Oslogin::V1::UpdateSshPublicKeyRequest, request)
assert_equal(formatted_name, request.name)
assert_equal(Google::Gax::to_proto(ssh_public_key, Google::Cloud::Oslogin::Common::SshPublicKey), request.ssh_public_key)
raise custom_error
end
mock_stub = MockGrpcClientStub_v1.new(:update_ssh_public_key, mock_method)
# Mock auth layer
mock_credentials = MockOsLoginServiceCredentials_v1.new("update_ssh_public_key")
Google::Cloud::Oslogin::V1::OsLoginService::Stub.stub(:new, mock_stub) do
Google::Cloud::OsLogin::V1::Credentials.stub(:default, mock_credentials) do
client = Google::Cloud::OsLogin.new(version: :v1)
# Call method
err = assert_raises Google::Gax::GaxError, CustomTestError_v1 do
client.update_ssh_public_key(formatted_name, ssh_public_key)
end
# Verify the GaxError wrapped the custom error that was raised.
assert_match(custom_error.message, err.message)
end
end
end
end
end
| 39.110899
| 137
| 0.695869
|
1daeff5c53d76d19625b04ba2d2c61651cb21146
| 2,551
|
# Xcode 4.3 provides the Apple libtool.
# This is not the same so as a result we must install this as glibtool.
class Libtool < Formula
desc "Generic library support script"
homepage "https://www.gnu.org/software/libtool/"
url "https://ftp.gnu.org/gnu/libtool/libtool-2.4.6.tar.xz"
mirror "https://ftpmirror.gnu.org/libtool/libtool-2.4.6.tar.xz"
sha256 "7c87a8c2c8c0fc9cd5019e402bed4292462d00a718a7cd5f11218153bf28b26f"
revision OS.linux? ? 2 : 1
bottle do
cellar :any
sha256 "ebb50367eb2336ee317841587e24690de124fb2c3e4d346405e9b41c4e6120ae" => :high_sierra
sha256 "78a1f6c6644eae01eb5c204ef705f7e48721a0fe8ece492c10c84791061885db" => :sierra
sha256 "b7651d0a082e2f103f03ca3a5ed831e2ff5655ccc1044ac0452e4d1825475a35" => :el_capitan
sha256 "0eb206c0f51e8ce2e3e9340b5ce3c8ecef961ae6696f676073327a7ac04e5c0b" => :yosemite
sha256 "2e51ef82ef2bd1ad9d921a9016b9e5d7fa82d131849e2c32a3c90daa119e2eda" => :mavericks
sha256 "1efb2596f487af0e666e0a3d236ee8ac83db17d9e8e94066802e000f75b4b045" => :x86_64_linux # glibc 2.19
end
keg_only :provided_until_xcode43
depends_on "m4" => :build unless OS.mac?
option "with-default-names", "Do not prepend 'g' to the binary"
def install
ENV["SED"] = "sed" # prevent libtool from hardcoding sed path from superenv
if OS.linux? && build.bottle?
# prevent libtool from hardcoding GCC 4.8
ENV["CC"] = "cc"
ENV["CXX"] = "c++"
end
system "./configure", "--disable-dependency-tracking",
"--prefix=#{prefix}",
("--program-prefix=g" if build.without? "default-names"),
"--enable-ltdl-install"
system "make", "install"
if build.with? "default-names"
bin.install_symlink "libtool" => "glibtool"
bin.install_symlink "libtoolize" => "glibtoolize"
end
end
def caveats; <<~EOS
In order to prevent conflicts with Apple's own libtool we have prepended a "g"
so, you have instead: glibtool and glibtoolize.
EOS
end
test do
system "#{bin}/glibtool", "execute", File.executable?("/usr/bin/true") ? "/usr/bin/true" : "/bin/true"
(testpath/"hello.c").write <<-EOS
#include <stdio.h>
int main() { puts("Hello, world!"); return 0; }
EOS
system bin/"glibtool", "--mode=compile", "--tag=CC",
ENV.cc, "-c", "hello.c", "-o", "hello.o"
system bin/"glibtool", "--mode=link", "--tag=CC",
ENV.cc, "hello.o", "-o", "hello"
assert_match "Hello, world!", shell_output("./hello")
end
end
| 36.971014
| 107
| 0.673853
|
e219041e43d29a8c28c6766e7b4261700584605b
| 140
|
require 'rails_helper'
RSpec.describe "chart1/index.html.erb", type: :view do
pending "add some examples to (or delete) #{__FILE__}"
end
| 23.333333
| 56
| 0.735714
|
e257b12826da9b8f98f374e7000cdbb6c721a73e
| 3,130
|
require 'forwardable'
require 'set'
class HBase
class Schema
extend Forwardable
def_delegators :@schema, :inspect, :to_s
def initialize
@schema = {}
@lookup = {}
end
# [cq]
# [cf:cq]
# @param [Symbol] table
# @param [Hash] definition
def []= table, definition
if definition.nil? || definition.empty?
delete table
return nil
end
unless definition.is_a?(Hash)
raise ArgumentError, 'invalid schema definition: Hash required'
end
definition = definition.dup.freeze
lookup = empty_lookup_table
definition.each do |cf, cols|
unless [Symbol, String].any? { |k| cf.is_a? k }
raise ArgumentError,
"invalid schema: use String or Symbol for column family name"
end
# CF:CQ => Type shortcut
cf = cf.to_s
if cf.index(':')
cf, q = cf.to_s.split ':', 2
cols = { q => cols }
else
raise ArgumentError, "invalid schema: expected Hash" unless cols.is_a?(Hash)
end
# Family => { Column => Type }
cols.each do |cq, type|
type = type.to_sym
unless KNOWN_TYPES.include? type
raise ArgumentError, "invalid schema: unknown type: #{type}"
end
# Pattern
case cq
when Regexp
lookup[:pattern][cq] = [cf, nil, type]
# Exact
when String, Symbol
cq = cq.to_s
cfcq = [cf, cq].join(':')
[cq, cq.to_sym, cfcq].each do |key|
lookup[:exact][key] = [cf, cq.to_sym, type]
end
else
raise ArgumentError, "invalid schema"
end
end
end
table = table.to_sym
@lookup = @lookup.dup.tap { |h| h[table] = lookup }
@schema = @schema.dup.tap { |h| h[table] = definition }
definition
end
# @private
# @param [Symbol] table
# @return [Array] CF, CQ, Type. When not found, nil.
def lookup table, col
return nil unless lookup = @lookup[table]
case col
when String, Symbol
if match = lookup[:exact][col]
return match
elsif pair = lookup[:pattern].find { |k, v| col.to_s =~ k }
colsym = col.to_sym rescue nil
return colsym && pair[1].dup.tap { |e| e[1] = colsym }
end
else
return nil
end
end
# @private
# @param [Symbol] table
def lookup_and_parse table, col, expect_cq
cf, cq, type = lookup table, col
cf, cq = Util.parse_column_name(cf ? [cf, cq] : col)
raise ArgumentError, "Invalid column key: #{col}" if expect_cq && cq.nil?
return [cf, cq, type]
end
# Delete schema for the table
# @param [Symbol] table
def delete table
table = table.to_sym
@lookup = @lookup.reject { |k, v| k == table }
@schema = @schema.reject { |k, v| k == table }
nil
end
# @return [Hash]
def to_h
@schema
end
private
def empty_lookup_table
{
:exact => {},
:pattern => {},
}
end
KNOWN_TYPES = Set[
:string, :str, :symbol, :sym,
:byte,
:boolean, :bool,
:int,
:short,
:long, :fixnum,
:float, :double,
:bigdecimal,
:raw
]
end
end
| 22.357143
| 84
| 0.565815
|
bf5aee42f44b32226fd1b435595a1c82df5244d8
| 1,285
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v4/enums/app_placeholder_field.proto
require 'google/protobuf'
require 'google/api/annotations_pb'
Google::Protobuf::DescriptorPool.generated_pool.build do
add_file("google/ads/googleads/v4/enums/app_placeholder_field.proto", :syntax => :proto3) do
add_message "google.ads.googleads.v4.enums.AppPlaceholderFieldEnum" do
end
add_enum "google.ads.googleads.v4.enums.AppPlaceholderFieldEnum.AppPlaceholderField" do
value :UNSPECIFIED, 0
value :UNKNOWN, 1
value :STORE, 2
value :ID, 3
value :LINK_TEXT, 4
value :URL, 5
value :FINAL_URLS, 6
value :FINAL_MOBILE_URLS, 7
value :TRACKING_URL, 8
value :FINAL_URL_SUFFIX, 9
end
end
end
module Google
module Ads
module GoogleAds
module V4
module Enums
AppPlaceholderFieldEnum = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v4.enums.AppPlaceholderFieldEnum").msgclass
AppPlaceholderFieldEnum::AppPlaceholderField = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v4.enums.AppPlaceholderFieldEnum.AppPlaceholderField").enummodule
end
end
end
end
end
| 33.815789
| 201
| 0.732296
|
f749046724e6f720082b4a5d278eb21a994b9222
| 71
|
{ :'bo_IN' => { :i18n => { :plural => { :keys => nil, :rule => } } } }
| 71
| 71
| 0.366197
|
b9e62ab021e3223e7b19ef396095fe9fdce93a29
| 1,834
|
# config valid for current version and patch releases of Capistrano
lock '~> 3.16.0'
set :application, 'opencourts-prokuratura'
set :repo_url, 'git@github.com:otvorenesudy/otvorenesudy-prokuratura.git'
# Sidekiq
set :sidekiq_processes, 1
# Rbenv
set :rbenv_type, :user
set :rbenv_ruby, File.read('.ruby-version').strip
# Whenever
set :whenever_identifier, -> { "#{fetch(:application)}-#{fetch(:stage)}" }
# Links
set :linked_files, fetch(:linked_files, []).push('config/master.key', 'config/credentials/production.key')
set :linked_dirs,
fetch(:linked_dirs, []).push('log', 'tmp/pids', 'tmp/cache', 'tmp/sockets', 'tmp/downloads', 'vendor/bundle')
set :keep_releases, 2
set :ssh_options, { forward_agent: true }
namespace :deploy do
after 'deploy:publishing', 'deploy:restart'
after 'finishing', 'deploy:cleanup'
after 'finishing', 'cache:clear'
desc 'Deploy app for first time'
task :cold do
invoke 'deploy:starting'
invoke 'deploy:started'
invoke 'deploy:updating'
invoke 'bundler:install'
invoke 'deploy:database' # This replaces deploy:migrations
invoke 'deploy:compile_assets'
invoke 'deploy:normalize_assets'
invoke 'deploy:publishing'
invoke 'deploy:published'
invoke 'deploy:finishing'
invoke 'deploy:finished'
end
desc 'Setup database'
task :database do
on roles(:db) do
within release_path do
with rails_env: (fetch(:rails_env) || fetch(:stage)) do
execute :rake, 'db:create'
execute :rake, 'db:migrate'
execute :rake, 'db:seed'
end
end
end
end
end
namespace :cache do
task :clear do
on roles(:app) do |host|
with rails_env: fetch(:rails_env) do
within current_path do
execute :bundle, :exec, 'rake cache:clear'
end
end
end
end
end
| 26.2
| 113
| 0.675027
|
38d380d0a2bf8f07b4da963d2169145c538f1107
| 1,348
|
# frozen_string_literal: true
lib = File.expand_path('lib', __dir__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'super_gem/version'
Gem::Specification.new do |spec|
spec.name = 'super_gem'
spec.version = SuperGem::VERSION
spec.authors = ['Oleg Zubchenko']
spec.email = ['RedGreenBlueDiamond@gmail.com']
spec.summary = 'Example c bindings gem'
spec.homepage = 'https://example.com/gems/super_gem'
spec.license = 'MIT'
# Specify which files should be added to the gem when it is released.
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
spec.files = Dir.chdir(File.expand_path(__dir__)) do
`git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
end
spec.bindir = 'exe'
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
spec.require_paths = ['lib']
spec.add_development_dependency 'benchmark-ips', '~> 2.7'
spec.add_development_dependency 'bundler', '~> 2.0'
spec.add_development_dependency 'minitest', '~> 5.11'
spec.add_development_dependency 'pry-nav', '~> 0.3'
spec.add_development_dependency 'rake', '~> 10.0'
spec.add_development_dependency 'rake-compiler', '~> 1.0'
spec.add_development_dependency 'rubocop', '~> 0.69'
end
| 39.647059
| 87
| 0.672107
|
33371c2951be831e899b42ffb29ee7e8dd9b4a0b
| 1,949
|
require 'spec_helper'
module Finitio
describe Syntax, "relation_type" do
subject{
Syntax.parse(input, root: "relation_type")
}
describe "compilation result" do
let(:compiled){
subject.compile(type_factory)
}
context 'empty heading' do
let(:input){ '{{ }}' }
it 'compiles to a RelationType' do
expect(compiled).to be_a(RelationType)
expect(compiled.heading).to be_empty
end
end
context '{{a: .Integer}}' do
let(:input){ '{{a: .Integer}}' }
it 'compiles to a RelationType' do
expect(compiled).to be_a(RelationType)
expect(compiled.heading.size).to eq(1)
end
end
context '{{a: .Integer, b: .Float}}' do
let(:input){ '{{a: .Integer, b: .Float}}' }
it 'compiles to a RelationType' do
expect(compiled).to be_a(RelationType)
expect(compiled.heading.size).to eq(2)
end
end
context '{{a: .Integer, b :? .Float}}' do
let(:input){ '{{a: .Integer, b :? .Float}}' }
it 'compiles to a MultiRelationType' do
expect(compiled).to be_a(MultiRelationType)
expect(compiled.heading.size).to eq(2)
end
end
end
describe "AST" do
let(:ast){ subject.to_ast }
context '{{a: .Integer}}' do
let(:input){ '{{a: .Integer}}' }
it{
expect(ast).to eq([
:relation_type,
[
:heading,
[ :attribute, "a", [:builtin_type, "Integer" ]]
]
])
}
end
context '{{a :? .Integer}}' do
let(:input){ '{{a :? .Integer}}' }
it{
expect(ast).to eq([
:multi_relation_type,
[
:heading,
[ :attribute, "a", [:builtin_type, "Integer" ], false]
]
])
}
end
end
end
end
| 22.929412
| 68
| 0.493586
|
1a4883efb54675295e633b2ef71645f20279c663
| 1,430
|
# Copyright (C) 2006-2007 Kurt Stephens <ruby-currency(at)umleta.com>
# See LICENSE.txt for details.
# The Currency::Exchange package is responsible for
# the buying and selling of currencies.
#
# This feature is currently unimplemented.
#
# Exchange rate sources are configured via Currency::Exchange::Rate::Source.
#
module Currency::Exchange
@@default = nil
@@current = nil
# Returns the default Currency::Exchange object.
#
# If one is not specfied an instance of Currency::Exchange::Base is
# created. Currency::Exchange::Base cannot service any
# conversion rate requests.
def self.default
@@default ||= raise("UNIMPLEMENTED")
end
# Sets the default Currency::Exchange object.
def self.default=(x)
@@default = x
end
# Returns the current Currency::Exchange object used during
# explicit and implicit Money conversions.
#
# If #current= has not been called and #default= has not been called,
# then UndefinedExchange is raised.
def self.current
@@current || self.default || (raise ::Currency::Exception::UndefinedExchange.new("Currency::Exchange.current not defined"))
end
# Sets the current Currency::Exchange object used during
# explicit and implicit Money conversions.
def self.current=(x)
@@current = x
end
end # module
require 'currency/exchange/rate'
require 'currency/exchange/rate/source'
| 29.183673
| 129
| 0.697203
|
5d5a4c91e254f0e084dd7eac194c8b0f69cc3b33
| 3,627
|
require 'text_alignment'
module AnnotationsHelper
def annotations_count_helper(project, doc = nil, span = nil)
project = doc.projects.first if project.nil? && doc.projects_num == 1
if project
if project.annotations_accessible?(current_user)
if doc.present?
doc.get_denotations_count(project.id, span)
else
project.denotations_num
end
else
'<i class="fa fa-bars" aria-hidden="true" title="blinded"></i>'.html_safe
end
else
if doc.present?
doc.get_denotations_count(nil, span)
else
raise "count of all denotations?"
end
end
end
def annotations_url
"#{url_for(only_path: false)}".sub('/visualize', '').sub('/list_view', '').sub('/merge_view', '').sub('/annotations', '') + '/annotations'
end
def annotations_path
"#{url_for(only_path: true)}".sub('/visualize', '').sub('/list_view', '').sub('/merge_view', '').sub('/annotations', '') + '/annotations'
end
def annotations_json_path
url_query = URI.parse( request.fullpath ).query
url_query = "?#{url_query}" if url_query.present?
"#{ annotations_path }.json#{ url_query }"
end
def editor_annotation_url(editor, source_url)
editor.parameters.each_key{|k| editor.parameters[k] = source_url + '.json' if editor.parameters[k] == '_annotations_url_'}
parameters_str = editor.parameters.map{|p| p.join('=')}.join('&')
connector = editor.url.include?('?') ? '&' : '?'
url = "#{editor.url}#{connector}#{parameters_str}"
end
def link_to_editor(project, editor, source_url)
editor.parameters.each_key{|k| editor.parameters[k] = source_url + '.json' if editor.parameters[k] == '_annotations_url_'}
editor.parameters[:config] = project.textae_config if editor.name =~ /^TextAE/ && project && project.textae_config.present?
parameters_str = editor.parameters.map{|p| p.join('=')}.join('&')
connector = editor.url.include?('?') ? '&' : '?'
url = "#{editor.url}#{connector}#{parameters_str}"
link_to editor.name, url, :class => 'tab', :title => editor.description
end
def get_focus(options)
if options.present? && options[:params].present? && options[:params][:begin].present? && options[:params][:context_size]
sbeg = options[:params][:begin].to_i
send = options[:params][:end].to_i
context_size = options[:params][:context_size].to_i
fbeg = (context_size < sbeg) ? context_size : sbeg
fend = send - sbeg + fbeg
{begin: fbeg, end: fend}
end
end
def set_denotations_begin_end(denotations, options)
denotations.each do |d|
d[:span][:begin] -= options[:params][:begin].to_i
d[:span][:end] -= options[:params][:begin].to_i
if options[:params][:context_size].present?
d[:span][:begin] += options[:params][:context_size].to_i
d[:span][:end] += options[:params][:context_size].to_i
end
end
return denotations
end
def annotations_obtain_path
if params[:sourceid].present?
if params[:begin].present?
annotations_obtain_in_span_project_sourcedb_sourceid_docs_path(@project.name, @doc.sourcedb, @doc.sourceid, params[:begin], params[:end])
else
annotations_obtain_project_sourcedb_sourceid_docs_path(@project.name, @doc.sourcedb, @doc.sourceid)
end
else
project_annotations_obtain_path(@project.name)
end
end
def annotations_form_action_helper
if params[:id].present?
annotations_project_doc_path(@project.name, @doc.id)
else
annotations_generate_project_sourcedb_sourceid_docs_path(@project.name, @doc.sourcedb, @doc.sourceid)
end
end
def get_doc_info (annotations)
sourcedb = annotations[:sourcedb]
sourceid = annotations[:sourceid]
docinfo = "#{sourcedb}-#{sourceid}"
end
end
| 34.542857
| 141
| 0.6992
|
e205741d597d6917039f9e593acb8875e608ec25
| 1,786
|
# -*- ruby -*-
# encoding: utf-8
require File.expand_path("lib/google/cloud/gaming/v1/version", __dir__)
Gem::Specification.new do |gem|
gem.name = "google-cloud-gaming-v1"
gem.version = Google::Cloud::Gaming::V1::VERSION
gem.authors = ["Google LLC"]
gem.email = "googleapis-packages@google.com"
gem.description = "With Game Servers, studios and publishers can deploy and manage their game server infrastructure hosted on multiple Agones clusters around the world through a single interface. Note that google-cloud-gaming-v1 is a version-specific client library. For most uses, we recommend installing the main client library google-cloud-gaming instead. See the readme for more details."
gem.summary = "API Client library for the Cloud Gaming V1 API"
gem.homepage = "https://github.com/googleapis/google-cloud-ruby"
gem.license = "Apache-2.0"
gem.platform = Gem::Platform::RUBY
gem.files = `git ls-files -- lib/*`.split("\n") +
`git ls-files -- proto_docs/*`.split("\n") +
["README.md", "LICENSE.md", "AUTHENTICATION.md", ".yardopts"]
gem.require_paths = ["lib"]
gem.required_ruby_version = ">= 2.5"
gem.add_dependency "gapic-common", "~> 0.4"
gem.add_dependency "google-cloud-errors", "~> 1.0"
gem.add_development_dependency "google-style", "~> 1.25.1"
gem.add_development_dependency "minitest", "~> 5.14"
gem.add_development_dependency "minitest-focus", "~> 1.1"
gem.add_development_dependency "minitest-rg", "~> 5.2"
gem.add_development_dependency "rake", ">= 12.0"
gem.add_development_dependency "redcarpet", "~> 3.0"
gem.add_development_dependency "simplecov", "~> 0.18"
gem.add_development_dependency "yard", "~> 0.9"
end
| 47
| 396
| 0.676372
|
18fe064a68fed929b77f0e0f5c79cb2004b54edf
| 925
|
cask "deezer" do
version "5.30.170"
sha256 "c090cf238a09a36b2533abf2ac7969afc801312fc20ddf4c5e6c4ffcaccc4d74"
url "https://www.deezer.com/desktop/download/artifact/darwin/x64/#{version}"
name "Deezer"
desc "Music player"
homepage "https://www.deezer.com/download"
livecheck do
url "https://www.deezer.com/desktop/download?platform=darwin&architecture=x64"
strategy :header_match
end
auto_updates true
depends_on macos: ">= :yosemite"
app "Deezer.app"
zap trash: [
"~/Library/Application Support/Caches/deezer-desktop-updater",
"~/Library/Application Support/deezer-desktop",
"~/Library/Caches/com.deezer.deezer*",
"~/Library/Logs/Deezer",
"~/Library/Logs/deezer-desktop",
"~/Library/Preferences/ByHost/com.deezer.*",
"~/Library/Preferences/com.deezer.deezer-desktop.plist",
"~/Library/Saved Application State/com.deezer.deezer-desktop.savedState",
]
end
| 29.83871
| 82
| 0.722162
|
f79a44b89d9cdb8eb80fac3c96d95e4df2325ccd
| 48
|
module SparkPost
VERSION = '0.1.2'.freeze
end
| 12
| 26
| 0.708333
|
e287f5799c7429e3fa36190c1c2229edb5390d49
| 1,116
|
# == Schema Information
#
# Table name: weather_periods
#
# id :integer not null, primary key
# weather_id :integer
# start_time :datetime
# precipitation_type :string(255)
# wind_speed :float
# created_at :datetime
# updated_at :datetime
# summary :string(255)
# icon :string(255)
# precipitation_intensity :float
# precipitation_probability :float
# temperature :float
# dew_point :float
# humidity :float
# wind_bearing :float
# visibility :float
# cloud_cover :float
# pressure :float
# ozone :float
#
require 'test_helper'
class WeatherPeriodTest < ActiveSupport::TestCase
test "weather at_hour method returning weather" do
weather = create(:weather)
weatherdata = WeatherPeriod.at_hour(2.weeks.ago.beginning_of_hour)
assert_not_nil weatherdata, "Weather should return data for period"
end
end
| 30.162162
| 70
| 0.55914
|
ac3cba377b9efd03a4e9902dcb501410f92b9beb
| 2,242
|
#
# Cookbook Name:: gitlab
# Recipe:: mysql
#
# Copyright 2012, Seth Vargo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
mysql2_chef_gem 'default' do
client_version node['mysql']['version'] if node['mysql'] && node['mysql']['version']
action :install
end
# Enable secure password generation
::Chef::Recipe.send(:include, Opscode::OpenSSL::Password)
node.set_unless['gitlab']['database']['password'] = secure_password
ruby_block 'save node data' do
block do
node.save
end
not_if { Chef::Config[:solo] }
end
# install mysql database
mysql_service 'default' do
port '3306'
version node['mysql']['version'] if node['mysql'] && node['mysql']['version']
initial_root_password node['mysql']['server_root_password']
action [:create, :start]
end
# Helper variables
database = node['gitlab']['database']['database']
database_user = node['gitlab']['database']['username']
database_password = node['gitlab']['database']['password']
database_userhost = node['gitlab']['database']['userhost']
database_host = node['gitlab']['database']['host']
database_connection = {
host: database_host,
username: 'root',
password: node['mysql']['server_root_password']
}
# Create the database
mysql_database database do
connection database_connection
action :create
end
# Create the database user
mysql_database_user database_user do
connection database_connection
password database_password
host database_userhost
database_name database
action :create
end
# Grant all privileges to user on database
mysql_database_user database_user do
connection database_connection
database_name database
privileges ['SELECT', 'LOCK TABLES', 'INSERT', 'UPDATE', 'DELETE', 'CREATE', 'DROP', 'INDEX', 'ALTER']
action :grant
end
| 29.5
| 104
| 0.742194
|
bbfb71406d43cd8d1ced8786c903519e2e4fd591
| 594
|
require_relative '../../lib/LitleOnline'
#Void
void_info = {
#litleTxnId contains the Litle Transaction Id returned on the deposit/refund
'litleTxnId' => '100000000000000001',
'id'=>'test'
}
response = LitleOnline::LitleOnlineRequest.new.void(void_info)
#display results
puts "Response: " + response.voidResponse.response
puts "Message: " + response.voidResponse.message
puts "Litle Transaction ID: " + response.voidResponse.litleTxnId
if (!response.voidResponse.message.eql?'Transaction Received')
raise ArgumentError, "LitleVoidTransaction has not been Approved", caller
end
| 31.263158
| 78
| 0.765993
|
1ac966cfe5ab3c0a59e9d2c348b3befa6260d7be
| 3,905
|
require 'rails_helper'
RSpec.describe DocAuthRouter do
describe '.client' do
before do
allow(Figaro.env).to receive(:doc_auth_vendor).and_return(doc_auth_vendor)
allow(Figaro.env).to receive(:acuant_simulator).and_return(acuant_simulator)
end
context 'legacy mock configuration' do
let(:doc_auth_vendor) { '' }
let(:acuant_simulator) { 'true' }
it 'is the mock client' do
expect(DocAuthRouter.client).to be_a(IdentityDocAuth::Mock::DocAuthMockClient)
end
end
context 'for acuant' do
let(:doc_auth_vendor) { 'acuant' }
let(:acuant_simulator) { '' }
it 'is a translation-proxied acuant client' do
expect(DocAuthRouter.client).to be_a(DocAuthRouter::AcuantErrorTranslatorProxy)
expect(DocAuthRouter.client.client).to be_a(IdentityDocAuth::Acuant::AcuantClient)
end
end
context 'for lexisnexis' do
let(:doc_auth_vendor) { 'lexisnexis' }
let(:acuant_simulator) { '' }
it 'is the lexisnexis client' do
expect(DocAuthRouter.client).to be_a(IdentityDocAuth::LexisNexis::LexisNexisClient)
end
end
context 'other config' do
let(:doc_auth_vendor) { 'unknown' }
let(:acuant_simulator) { '' }
it 'errors' do
expect { DocAuthRouter.client }.to raise_error(RuntimeError)
end
end
end
describe '.notify_exception' do
let(:exception) { RuntimeError.new }
it 'notifies NewRelic' do
expect(NewRelic::Agent).to receive(:notice_error).with(exception)
DocAuthRouter.notify_exception(exception)
end
context 'with custom params' do
let(:params) { { count: 1 } }
it 'forwards on custom_params to NewRelic' do
expect(NewRelic::Agent).to receive(:notice_error).with(exception, custom_params: params)
DocAuthRouter.notify_exception(exception, params)
end
end
end
describe DocAuthRouter::AcuantErrorTranslatorProxy do
subject(:proxy) do
DocAuthRouter::AcuantErrorTranslatorProxy.new(IdentityDocAuth::Mock::DocAuthMockClient.new)
end
it 'translates errors[:results] using FriendlyError' do
IdentityDocAuth::Mock::DocAuthMockClient.mock_response!(
method: :get_results,
response: IdentityDocAuth::Response.new(
success: false,
errors: {
some_other_key: ['will not be translated'],
results: [
'The 2D barcode could not be read',
'Some unknown error that will be the generic message',
],
},
),
)
response = I18n.with_locale(:es) { proxy.get_results(instance_id: 'abcdef') }
expect(response.errors[:some_other_key]).to eq(['will not be translated'])
expect(response.errors[:results]).to match_array(
[
I18n.t('errors.doc_auth.general_error', locale: :es),
I18n.t('friendly_errors.doc_auth.barcode_could_not_be_read', locale: :es),
],
)
end
it 'translates generic network errors' do
IdentityDocAuth::Mock::DocAuthMockClient.mock_response!(
method: :get_results,
response: IdentityDocAuth::Response.new(
success: false,
errors: {
network: true,
},
),
)
response = proxy.get_results(instance_id: 'abcdef')
expect(response.errors[:network]).to eq(I18n.t('errors.doc_auth.acuant_network_error'))
end
it 'translates generic selfie errors' do
IdentityDocAuth::Mock::DocAuthMockClient.mock_response!(
method: :get_results,
response: IdentityDocAuth::Response.new(
success: false,
errors: {
selfie: true,
},
),
)
response = proxy.get_results(instance_id: 'abcdef')
expect(response.errors[:selfie]).to eq(I18n.t('errors.doc_auth.selfie'))
end
end
end
| 29.583333
| 97
| 0.641741
|
bbeeb95de1819c3b66a609ed58e0a175a16ff851
| 8,447
|
# frozen_string_literal: true
module Hackbot
module Interactions
module Concerns
# rubocop:disable Metrics/ModuleLength
module Mirrorable
extend ActiveSupport::Concern
MIRROR_CHANNEL = Rails.application.secrets.hackbot_mirror_channel_id
included do
before_handle :mirror_incoming_event
alias_method :_send_msg, :send_msg
alias_method :_send_file, :send_file
alias_method :_update_action_source, :update_action_source
define_method :send_msg do |channel, msg|
resp = _send_msg(channel, msg)
msg = resp[:message]
mirror_msg(bot_slack_user, channel, msg[:ts], msg)
resp
end
define_method :send_file do |channel, filename, file|
resp = _send_file(channel, filename, file)
file = resp[:file]
mirror_file(bot_slack_user, channel, file[:timestamp], filename)
resp
end
define_method :update_action_source do |msg, action_event = event|
resp = _update_action_source(msg)
timestamp = Time.now.to_i
mirror_action_source_update(bot_slack_user, action_event[:channel],
timestamp, action_event[:msg], msg)
resp
end
end
private
def mirror_incoming_event
channel = event[:channel]
ts = event[:ts]
if msg
mirror_msg(current_slack_user, channel, ts, event)
elsif action
mirror_action(current_slack_user, channel, ts, action[:text])
end
end
def mirror_msg(slack_user, channel, timestamp, msg_event)
if msg_event[:attachments]&.any?
mirror_rich_msg(slack_user, channel, timestamp, msg_event)
else
mirror_plain_msg(slack_user, channel, timestamp, msg_event)
end
end
# Mirror a plain text message
def mirror_plain_msg(slack_user, channel, timestamp, msg_event)
attachments = [
text: msg_event[:text],
fallback: mirror_copy(
'mirror_plain.fallback',
slack_mention: mention_for(slack_user),
text: msg_event[:text]
),
**attachment_template(slack_user, channel, timestamp)
]
_send_msg(MIRROR_CHANNEL, attachments: attachments)
end
# Mirror a message that includes more than just text
def mirror_rich_msg(slack_user, channel, timestamp, msg_event)
fallback = mirror_copy('mirror_rich.fallback',
slack_mention: mention_for(slack_user))
attachments = [
{ **attachment_template(slack_user, channel, timestamp),
fallback: fallback },
*rich_msg_to_attachments(msg_event)
]
_send_msg(MIRROR_CHANNEL, attachments: attachments)
end
def mirror_file(slack_user, channel, timestamp, filename)
copy_params = { filename: filename,
slack_mention: mention_for(slack_user) }
_send_msg(
MIRROR_CHANNEL,
attachments: [
text: mirror_copy('mirror_file.text', copy_params),
fallback: mirror_copy('mirror_file.fallback', copy_params),
**attachment_template(slack_user, channel, timestamp)
]
)
end
def mirror_action(slack_user, channel, timestamp, action_text)
copy_params = { action_text: action_text,
slack_mention: mention_for(slack_user) }
_send_msg(
MIRROR_CHANNEL,
attachments: [
text: mirror_copy('mirror_action.text', copy_params),
fallback: mirror_copy('mirror_action.fallback', copy_params),
**attachment_template(slack_user, channel, timestamp)
]
)
end
def mirror_action_source_update(slack_user, channel, timestamp, old_msg,
new_msg)
_send_msg(
MIRROR_CHANNEL,
attachments: action_update_attachments(
slack_user, channel, timestamp, old_msg, new_msg
)
)
end
def action_update_attachments(slack_user, channel, timestamp, old_msg,
new_msg)
[
{ fallback: mirror_copy('mirror_action_source_update.fallback'),
**attachment_template(slack_user, channel, timestamp) },
{ text: mirror_copy('mirror_action_source_update.old_msg_pre'),
color: attachment_color },
*rich_msg_to_attachments(old_msg),
{ text: mirror_copy('mirror_action_source_update.new_msg_pre'),
color: attachment_color },
*rich_msg_to_attachments(new_msg)
]
end
def rich_msg_to_attachments(msg_event)
attachments = []
attachments << { text: msg_event[:text] } if msg_event[:text].present?
attachments += msg_event[:attachments] if msg_event[:attachments]
attachments.each { |a| a[:color] = attachment_color }
attachments
end
def attachment_template(slack_user, channel, timestamp)
{
color: attachment_color,
author_name: mention_for(slack_user),
author_icon: slack_user[:profile][:image_72],
footer: mirror_copy('template.footer',
source: source_for_channel(channel),
interaction: self.class,
id: id),
ts: timestamp
}
end
def bot_slack_user
@_bot_slack_user ||= SlackClient::Users.info(
team.bot_user_id,
access_token
)[:user]
end
def mention_for(slack_user)
'@' + slack_user[:name]
end
def attachment_color
color_hex_from_string(id.to_s)
end
def color_hex_from_string(str)
Digest::MD5.hexdigest(str)[0, 6]
end
# Channel types are categorized by the first character in their ID.
#
# Here's the mapping for first characters:
#
# C -> public channel
# G -> private channel or multiparty IM
# D -> direct message
def source_for_channel(channel_id)
case channel_id
when /^C/ then source_for_public_channel(channel_id)
when /^G/ then source_for_private_channel_or_mpim(channel_id)
when /^D/ then source_for_dm(channel_id)
else mirror_copy('template.source.unknown')
end
end
def source_for_public_channel(channel_id)
channel = SlackClient::Channels.info(channel_id,
access_token)[:channel]
mirror_copy('template.source.public_channel',
channel_name: channel[:name])
end
def source_for_private_channel_or_mpim(channel_id)
group = SlackClient::Groups.info(channel_id, access_token)[:group]
if group[:is_mpim]
source_for_mpim(channel_id)
else
source_for_private_channel(channel_id)
end
end
def source_for_private_channel(channel_id)
group = SlackClient::Groups.info(channel_id, access_token)[:group]
mirror_copy('template.source.private_channel',
channel_name: group[:name])
end
def source_for_mpim(mpim_id)
mpim = SlackClient::Mpim.info(mpim_id, access_token)
mentions = mpim[:members].map do |m|
mention_for(SlackClient::Users.info(m, access_token)[:user])
end
mirror_copy('template.source.mpim', slack_mentions: mentions)
end
def source_for_dm(im_id)
im = SlackClient::Im.info(im_id, access_token)
user = SlackClient::Users.info(im[:user], access_token)[:user]
mirror_copy('template.source.dm', slack_mention: mention_for(user))
end
def mirror_copy(key, hash = {})
copy(key, hash, 'concerns/mirrorable')
end
end
# rubocop:enable Metrics/ModuleLength
end
end
end
| 32.117871
| 80
| 0.580561
|
7ae85e71532571df7357705b8b0758411f5e857a
| 918
|
#
# To learn more about a Podspec see http://guides.cocoapods.org/syntax/podspec.html.
# Run `pod lib lint advanced_in_app_review.podspec` to validate before publishing.
#
Pod::Spec.new do |s|
s.name = 'advanced_in_app_review'
s.version = '0.0.1'
s.summary = 'Advanced In App Reviews for iOS and Android'
s.description = <<-DESC
Advanced In App Reviews for iOS and Android
DESC
s.homepage = 'http://example.com'
s.license = { :file => '../LICENSE' }
s.author = { 'Your Company' => 'email@example.com' }
s.source = { :path => '.' }
s.source_files = 'Classes/**/*'
s.dependency 'Flutter'
s.platform = :ios, '9.0'
# Flutter.framework does not contain a i386 slice.
s.pod_target_xcconfig = { 'DEFINES_MODULE' => 'YES', 'EXCLUDED_ARCHS[sdk=iphonesimulator*]' => 'i386' }
s.swift_version = '5.0'
end
| 38.25
| 105
| 0.604575
|
33aed707071adaa1b57bffe6905a23351f28c3ad
| 1,992
|
class User < ApplicationRecord
attr_accessor :remember_token, :activation_token, :reset_token
before_save :downcase_email
before_create :create_activation_digest
VALID_EMAIL_REGEX = /\A[\w+\-.]+@[a-z\d\-]+(\.[a-z\d\-]+)*\.[a-z]+\z/i
validates :name, presence: true, length: {maximum:50}
validates :email, presence: true, length: {maximum:255},
format: { with: VALID_EMAIL_REGEX },
uniqueness: {case_sensitive: false}
validates :password, presence: true, length: {minimum:6}, allow_nil: true
has_secure_password
def self.digest(string)
cost = ActiveModel::SecurePassword.min_cost ? BCrypt::Engine::MIN_COST :
BCrypt::Engine.cost
BCrypt::Password.create(string, cost: cost)
end
def self.new_token
SecureRandom.urlsafe_base64
end
def remember
self.remember_token = User.new_token
update_attribute(:remember_digest, User.digest(remember_token))
end
def authenticated?(attribute, token)
digest = send("#{attribute}_digest")
return false if digest.nil?
BCrypt::Password.new(digest).is_password?(token)
end
def forget
update_attribute(:remember_digest, nil)
end
def activate
update_attribute(:activated, true)
update_attribute(:activated_at, Time.zone.now)
end
def send_activation_email
UserMailer.account_activation(self).deliver_now
end
def create_reset_digest
self.reset_token = User.new_token
update_attribute(:reset_digest, User.digest(reset_token))
update_attribute(:reset_sent_at, Time.zone.now)
end
def send_password_reset_email
UserMailer.password_reset(self).deliver_now
end
def password_reset_expired?
reset_sent_at < 2.hours.ago
end
private
def downcase_email
self.email = email.downcase
end
def create_activation_digest
self.activation_token = User.new_token
self.activation_digest = User.digest(activation_token)
end
end
| 26.918919
| 76
| 0.702811
|
18074707d5974ae41e496f11861575aa157076ca
| 7,792
|
# May, 2015
#
# Copyright (c) 2015-2016 Cisco and/or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'cisco_node_utils' if Puppet.features.cisco_node_utils?
begin
require 'puppet_x/cisco/autogen'
rescue LoadError # seen on master, not on agent
# See longstanding Puppet issues #4248, #7316, #14073, #14149, etc. Ugh.
require File.expand_path(File.join(File.dirname(__FILE__), '..', '..', '..',
'puppet_x', 'cisco', 'autogen.rb'))
end
Puppet::Type.type(:cisco_ospf_vrf).provide(:cisco) do
desc 'The Cisco provider.'
confine feature: :cisco_node_utils
defaultfor operatingsystem: :nexus
mk_resource_methods
# Property symbol array for method auto-generation.
OSPF_VRF_NON_BOOL_PROPS = [
:default_metric, :log_adjacency, :redistribute, :router_id,
:timer_throttle_lsa_start, :timer_throttle_lsa_hold,
:timer_throttle_lsa_max, :timer_throttle_spf_start,
:timer_throttle_spf_hold, :timer_throttle_spf_max
]
OSPF_VRF_BOOL_PROPS = [
:bfd
]
OSPF_VRF_ALL_PROPS = OSPF_VRF_NON_BOOL_PROPS + OSPF_VRF_BOOL_PROPS
PuppetX::Cisco::AutoGen.mk_puppet_methods(:non_bool, self, '@vrf',
OSPF_VRF_NON_BOOL_PROPS)
PuppetX::Cisco::AutoGen.mk_puppet_methods(:bool, self, '@vrf',
OSPF_VRF_BOOL_PROPS)
def initialize(value={})
super(value)
ospf = @property_hash[:ospf]
vrf = @property_hash[:vrf]
@vrf = Cisco::RouterOspfVrf.vrfs[ospf][vrf] unless ospf.nil?
@property_flush = {}
end
def self.properties_get(ospf, name, vrf)
debug "Checking ospf instance, #{ospf} #{name}"
current_state = {
name: "#{ospf} #{name}",
ospf: ospf,
vrf: name,
ensure: :present,
}
# Call node_utils getter for each property
OSPF_VRF_NON_BOOL_PROPS.each do |prop|
current_state[prop] = vrf.send(prop)
end
OSPF_VRF_BOOL_PROPS.each do |prop|
val = vrf.send(prop)
if val.nil?
current_state[prop] = nil
else
current_state[prop] = val ? :true : :false
end
end
# Special Cases
current_state[:redistribute] = vrf.redistribute
# Display cost_value in MBPS
cost_value, cost_type = vrf.auto_cost
cost_value *= 1000 if
cost_type == Cisco::RouterOspfVrf::OSPF_AUTO_COST[:gbps]
current_state[:auto_cost] = cost_value
debug current_state
new(current_state)
end # self.properties_get
def self.instances
vrf_instances = []
Cisco::RouterOspfVrf.vrfs.each do |ospf, vrfs|
vrfs.each do |name, vrf|
begin
vrf_instances << properties_get(ospf, name, vrf)
end
end
end
vrf_instances
end # self.instances
def self.prefetch(resources)
vrf_instances = instances
resources.keys.each do |name|
provider = vrf_instances.find { |vrf| vrf.name == name }
resources[name].provider = provider unless provider.nil?
end
end # self.prefetch
def exists?
@property_hash[:ensure] == :present
end
def create
@property_flush[:ensure] = :present
end
def destroy
fail 'VRF default cannot be removed by cisco_ospf_vrf. Use cisco_ospf to remove the entire OSPF process including the default VRF.' if @resource[:vrf] == 'default'
@property_flush[:ensure] = :absent
end
def properties_set(new_vrf=false)
OSPF_VRF_ALL_PROPS.each do |prop|
next unless @resource[prop]
send("#{prop}=", @resource[prop]) if new_vrf
unless @property_flush[prop].nil?
@vrf.send("#{prop}=", @property_flush[prop]) if
@vrf.respond_to?("#{prop}=")
end
end
# Set methods that are not autogenerated follow.
auto_cost_set unless @resource[:auto_cost].nil?
timer_throttle_lsa_set
timer_throttle_spf_set
end
# convert auto_cost to Mbps to match manifest.
def convert_cost_type(value, type)
value *= 1000 if type == Cisco::RouterOspfVrf::OSPF_AUTO_COST[:gbps]
value
end
def default_auto_cost_mbps
default_value, default_type = @vrf.default_auto_cost
convert_cost_type(default_value, default_type)
end
def auto_cost
return :default if
@resource[:auto_cost] == :default &&
@property_hash[:auto_cost] == default_auto_cost_mbps
@property_hash[:auto_cost]
end
def auto_cost_set
if @resource[:auto_cost] == :default
value = default_auto_cost_mbps
else
value = @resource[:auto_cost]
end
@vrf.auto_cost_set(value, Cisco::RouterOspfVrf::OSPF_AUTO_COST[:mbps])
end
# redistribute uses a nested array, thus requires special handling
def redistribute
return @property_hash[:redistribute] if @resource[:redistribute].nil?
if @resource[:redistribute][0] == :default &&
@property_hash[:redistribute] == @vrf.default_redistribute
return [:default]
else
@property_hash[:redistribute]
end
end
def redistribute=(should_list)
should_list = @vrf.default_redistribute if should_list[0] == :default
@property_flush[:redistribute] = should_list
end
def timer_throttle_lsa_set
return unless @property_flush[:timer_throttle_lsa_start] ||
@property_flush[:timer_throttle_lsa_hold] ||
@property_flush[:timer_throttle_lsa_max]
if @property_flush[:timer_throttle_lsa_start]
start = @property_flush[:timer_throttle_lsa_start]
else
start = @vrf.timer_throttle_lsa_start
end
if @property_flush[:timer_throttle_lsa_hold]
hold = @property_flush[:timer_throttle_lsa_hold]
else
hold = @vrf.timer_throttle_lsa_hold
end
if @property_flush[:timer_throttle_lsa_max]
max = @property_flush[:timer_throttle_lsa_max]
else
max = @vrf.timer_throttle_lsa_max
end
@vrf.timer_throttle_lsa_set(start, hold, max)
end
def timer_throttle_spf_set
return unless @property_flush[:timer_throttle_spf_start] ||
@property_flush[:timer_throttle_spf_hold] ||
@property_flush[:timer_throttle_spf_max]
if @property_flush[:timer_throttle_spf_start]
start = @property_flush[:timer_throttle_spf_start]
else
start = @vrf.timer_throttle_spf_start
end
if @property_flush[:timer_throttle_spf_hold]
hold = @property_flush[:timer_throttle_spf_hold]
else
hold = @vrf.timer_throttle_spf_hold
end
if @property_flush[:timer_throttle_spf_max]
max = @property_flush[:timer_throttle_spf_max]
else
max = @vrf.timer_throttle_spf_max
end
@vrf.timer_throttle_spf_set(start, hold, max)
end
def flush
if @property_flush[:ensure] == :absent
@vrf.destroy
@vrf = nil
else
if @vrf.nil?
new_vrf = true
@vrf = Cisco::RouterOspfVrf.new(@resource[:ospf], @resource[:vrf])
end
properties_set(new_vrf)
end
puts_config
end
def puts_config
if @vrf.nil?
info "Vrf=#{@resource[:name]} is absent."
return
end
# Dump all current properties for this interface
current = sprintf("\n%30s: %s", 'vrf', @vrf.name)
OSPF_VRF_ALL_PROPS.each do |prop|
current.concat(sprintf("\n%30s: %s", prop, @vrf.send(prop)))
end
debug current
end # puts_config
end
| 29.740458
| 167
| 0.678901
|
2845266f53b67fa6579ebe489526bb7196652ada
| 326
|
class SessionsController < ApplicationController
def create
user = User.find_or_create_by_auth(request.env['omniauth.auth'])
session[:user_id] = user.id
redirect_to root_path, notice: "Logged in as <b>#{user.name}</b>"
end
def destroy
session[:user_id] = nil
redirect_to root_path, notice: "Logged out"
end
end
| 25.076923
| 67
| 0.742331
|
015757fc256ec55d033e1fb2f8377c0bb9edb870
| 259
|
require File.expand_path('../../test_helper', __FILE__)
require 'mocha/inspect'
class StringInspectTest < Mocha::TestCase
def test_should_use_default_inspect_method
string = "my_string"
assert_equal %{"my_string"}, string.mocha_inspect
end
end
| 21.583333
| 55
| 0.760618
|
ab7ff710c21143fec3c1ecb781a9e6c47827843c
| 482
|
# The Book of Ruby - http://www.sapphiresteel.com
# using 'yield' to execute a block
def aMethod
puts('--- In aMethod ---' )
yield
end
aMethod{ puts( "Good morning" ) }
def caps( anarg )
puts('--- In caps method ---' )
yield( anarg )
end
caps( "a lowercase string" ){ |x| x.capitalize! ; puts( x ) }
puts( "And now a block within a block..." )
# a block within a block
["hello","good day","how do you do"].each{
|s|
caps( s ){ |x| x.capitalize!
puts( x )
}
}
| 16.066667
| 61
| 0.595436
|
f79d00ebd71ff83c08bb816ca48039438d89d27a
| 281
|
module Nis::Endpoint
module Chain
module Height
# @return [Nis::Struct::BlockHeight]
# @see https://nemproject.github.io/#block-chain-height
def chain_height
Nis::Struct::BlockHeight.build request!(:get, '/chain/height')
end
end
end
end
| 23.416667
| 70
| 0.644128
|
bf8b8938aab60019a0690dc392195fefa4340314
| 153
|
module Wikidata
class Statement < Wikidata::HashedObject
def mainsnak
@mainsnak ||= Wikidata::Snak.new(data_hash.mainsnak)
end
end
end
| 19.125
| 58
| 0.705882
|
91f0b9121b437f15ac60a5b0c95b7b9211f5eaef
| 76,837
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "google/cloud/bigquery/service"
require "google/cloud/bigquery/data"
require "google/cloud/bigquery/encryption_configuration"
require "google/apis/bigquery_v2"
module Google
module Cloud
module Bigquery
##
# # QueryJob
#
# A {Job} subclass representing a query operation that may be performed
# on a {Table}. A QueryJob instance is created when you call
# {Project#query_job}, {Dataset#query_job}.
#
# @see https://cloud.google.com/bigquery/querying-data Querying Data
# @see https://cloud.google.com/bigquery/docs/reference/v2/jobs Jobs API
# reference
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
#
# job = bigquery.query_job "SELECT COUNT(word) as count FROM " \
# "`bigquery-public-data.samples.shakespeare`"
#
# job.wait_until_done!
#
# if job.failed?
# puts job.error
# else
# puts job.data.first
# end
#
# @example With multiple statements and child jobs:
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
#
# multi_statement_sql = <<~SQL
# -- Declare a variable to hold names as an array.
# DECLARE top_names ARRAY<STRING>;
# -- Build an array of the top 100 names from the year 2017.
# SET top_names = (
# SELECT ARRAY_AGG(name ORDER BY number DESC LIMIT 100)
# FROM `bigquery-public-data.usa_names.usa_1910_current`
# WHERE year = 2017
# );
# -- Which names appear as words in Shakespeare's plays?
# SELECT
# name AS shakespeare_name
# FROM UNNEST(top_names) AS name
# WHERE name IN (
# SELECT word
# FROM `bigquery-public-data.samples.shakespeare`
# );
# SQL
#
# job = bigquery.query_job multi_statement_sql
#
# job.wait_until_done!
#
# child_jobs = bigquery.jobs parent_job: job
#
# child_jobs.each do |child_job|
# script_statistics = child_job.script_statistics
# puts script_statistics.evaluation_kind
# script_statistics.stack_frames.each do |stack_frame|
# puts stack_frame.text
# end
# end
#
class QueryJob < Job
##
# Checks if the priority for the query is `BATCH`.
#
# @return [Boolean] `true` when the priority is `BATCH`, `false`
# otherwise.
#
def batch?
@gapi.configuration.query.priority == "BATCH"
end
##
# Checks if the priority for the query is `INTERACTIVE`.
#
# @return [Boolean] `true` when the priority is `INTERACTIVE`, `false`
# otherwise.
#
def interactive?
val = @gapi.configuration.query.priority
return true if val.nil?
val == "INTERACTIVE"
end
##
# Checks if the the query job allows arbitrarily large results at a
# slight cost to performance.
#
# @return [Boolean] `true` when large results are allowed, `false`
# otherwise.
#
def large_results?
val = @gapi.configuration.query.allow_large_results
return false if val.nil?
val
end
##
# Checks if the query job looks for an existing result in the query
# cache. For more information, see [Query
# Caching](https://cloud.google.com/bigquery/querying-data#querycaching).
#
# @return [Boolean] `true` when the query cache will be used, `false`
# otherwise.
#
def cache?
val = @gapi.configuration.query.use_query_cache
return false if val.nil?
val
end
##
# If set, don't actually run this job. A valid query will return a
# mostly empty response with some processing statistics, while an
# invalid query will return the same error it would if it wasn't a dry
# run.
#
# @return [Boolean] `true` when the dry run flag is set for the query
# job, `false` otherwise.
#
def dryrun?
@gapi.configuration.dry_run
end
alias dryrun dryrun?
alias dry_run dryrun?
alias dry_run? dryrun?
##
# Checks if the query job flattens nested and repeated fields in the
# query results. The default is `true`. If the value is `false`,
# #large_results? should return `true`.
#
# @return [Boolean] `true` when the job flattens results, `false`
# otherwise.
#
def flatten?
val = @gapi.configuration.query.flatten_results
return true if val.nil?
val
end
##
# Limits the billing tier for this job. Queries that have resource usage
# beyond this tier will raise (without incurring a charge). If
# unspecified, this will be set to your project default. For more
# information, see [High-Compute
# queries](https://cloud.google.com/bigquery/pricing#high-compute).
#
# @return [Integer, nil] The tier number, or `nil` for the project
# default.
#
def maximum_billing_tier
@gapi.configuration.query.maximum_billing_tier
end
##
# Limits the bytes billed for this job. Queries that will have bytes
# billed beyond this limit will raise (without incurring a charge). If
# `nil`, this will be set to your project default.
#
# @return [Integer, nil] The number of bytes, or `nil` for the project
# default.
#
def maximum_bytes_billed
Integer @gapi.configuration.query.maximum_bytes_billed
rescue StandardError
nil
end
##
# Checks if the query results are from the query cache.
#
# @return [Boolean] `true` when the job statistics indicate a cache hit,
# `false` otherwise.
#
def cache_hit?
return false unless @gapi.statistics.query
@gapi.statistics.query.cache_hit
end
##
# The number of bytes processed by the query.
#
# @return [Integer, nil] Total bytes processed for the job.
#
def bytes_processed
Integer @gapi.statistics.query.total_bytes_processed
rescue StandardError
nil
end
##
# Describes the execution plan for the query.
#
# @return [Array<Google::Cloud::Bigquery::QueryJob::Stage>, nil] An
# array containing the stages of the execution plan.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
#
# sql = "SELECT word FROM `bigquery-public-data.samples.shakespeare`"
# job = bigquery.query_job sql
#
# job.wait_until_done!
#
# stages = job.query_plan
# stages.each do |stage|
# puts stage.name
# stage.steps.each do |step|
# puts step.kind
# puts step.substeps.inspect
# end
# end
#
def query_plan
return nil unless @gapi&.statistics&.query&.query_plan
Array(@gapi.statistics.query.query_plan).map { |stage| Stage.from_gapi stage }
end
##
# The type of query statement, if valid. Possible values (new values
# might be added in the future):
#
# * "ALTER_TABLE": DDL statement, see [Using Data Definition Language
# Statements](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language)
# * "CREATE_MODEL": DDL statement, see [Using Data Definition Language
# Statements](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language)
# * "CREATE_TABLE": DDL statement, see [Using Data Definition Language
# Statements](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language)
# * "CREATE_TABLE_AS_SELECT": DDL statement, see [Using Data Definition
# Language Statements](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language)
# * "CREATE_VIEW": DDL statement, see [Using Data Definition Language
# Statements](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language)
# * "DELETE": DML statement, see [Data Manipulation Language Syntax](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax)
# * "DROP_MODEL": DDL statement, see [Using Data Definition Language
# Statements](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language)
# * "DROP_TABLE": DDL statement, see [Using Data Definition Language
# Statements](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language)
# * "DROP_VIEW": DDL statement, see [Using Data Definition Language
# Statements](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language)
# * "INSERT": DML statement, see [Data Manipulation Language Syntax](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax)
# * "MERGE": DML statement, see [Data Manipulation Language Syntax](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax)
# * "SELECT": SQL query, see [Standard SQL Query Syntax](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax)
# * "UPDATE": DML statement, see [Data Manipulation Language Syntax](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax)
#
# @return [String, nil] The type of query statement.
#
def statement_type
return nil unless @gapi.statistics.query
@gapi.statistics.query.statement_type
end
##
# Whether the query is a DDL statement.
#
# @see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language
# Using Data Definition Language Statements
#
# @return [Boolean]
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
# query_job = bigquery.query_job "CREATE TABLE my_table (x INT64)"
#
# query_job.statement_type #=> "CREATE_TABLE"
# query_job.ddl? #=> true
#
def ddl?
[
"ALTER_TABLE",
"CREATE_MODEL",
"CREATE_TABLE",
"CREATE_TABLE_AS_SELECT",
"CREATE_VIEW",
"DROP_MODEL",
"DROP_TABLE",
"DROP_VIEW"
].include? statement_type
end
##
# Whether the query is a DML statement.
#
# @see https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax
# Data Manipulation Language Syntax
#
# @return [Boolean]
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
# query_job = bigquery.query_job "UPDATE my_table " \
# "SET x = x + 1 " \
# "WHERE x IS NOT NULL"
#
# query_job.statement_type #=> "UPDATE"
# query_job.dml? #=> true
#
def dml?
[
"INSERT",
"UPDATE",
"MERGE",
"DELETE"
].include? statement_type
end
##
# The DDL operation performed, possibly dependent on the pre-existence
# of the DDL target. (See {#ddl_target_table}.) Possible values (new
# values might be added in the future):
#
# * "CREATE": The query created the DDL target.
# * "SKIP": No-op. Example cases: the query is
# `CREATE TABLE IF NOT EXISTS` while the table already exists, or the
# query is `DROP TABLE IF EXISTS` while the table does not exist.
# * "REPLACE": The query replaced the DDL target. Example case: the
# query is `CREATE OR REPLACE TABLE`, and the table already exists.
# * "DROP": The query deleted the DDL target.
#
# @return [String, nil] The DDL operation performed.
#
def ddl_operation_performed
return nil unless @gapi.statistics.query
@gapi.statistics.query.ddl_operation_performed
end
##
# The DDL target routine, in reference state. (See {Routine#reference?}.)
# Present only for `CREATE/DROP FUNCTION/PROCEDURE` queries. (See
# {#statement_type}.)
#
# @return [Google::Cloud::Bigquery::Routine, nil] The DDL target routine, in
# reference state.
#
def ddl_target_routine
return nil unless @gapi.statistics.query
ensure_service!
routine = @gapi.statistics.query.ddl_target_routine
return nil unless routine
Google::Cloud::Bigquery::Routine.new_reference_from_gapi routine, service
end
##
# The DDL target table, in reference state. (See {Table#reference?}.)
# Present only for `CREATE/DROP TABLE/VIEW` queries. (See
# {#statement_type}.)
#
# @return [Google::Cloud::Bigquery::Table, nil] The DDL target table, in
# reference state.
#
def ddl_target_table
return nil unless @gapi.statistics.query
ensure_service!
table = @gapi.statistics.query.ddl_target_table
return nil unless table
Google::Cloud::Bigquery::Table.new_reference_from_gapi table, service
end
##
# The number of rows affected by a DML statement. Present only for DML
# statements `INSERT`, `UPDATE` or `DELETE`. (See {#statement_type}.)
#
# @return [Integer, nil] The number of rows affected by a DML statement,
# or `nil` if the query is not a DML statement.
#
def num_dml_affected_rows
return nil unless @gapi.statistics.query
@gapi.statistics.query.num_dml_affected_rows
end
##
# The number of deleted rows. Present only for DML statements `DELETE`,
# `MERGE` and `TRUNCATE`. (See {#statement_type}.)
#
# @return [Integer, nil] The number of deleted rows, or `nil` if not
# applicable.
#
def deleted_row_count
@gapi.statistics.query&.dml_stats&.deleted_row_count
end
##
# The number of inserted rows. Present only for DML statements `INSERT`
# and `MERGE`. (See {#statement_type}.)
#
# @return [Integer, nil] The number of inserted rows, or `nil` if not
# applicable.
#
def inserted_row_count
@gapi.statistics.query&.dml_stats&.inserted_row_count
end
##
# The number of updated rows. Present only for DML statements `UPDATE`
# and `MERGE`. (See {#statement_type}.)
#
# @return [Integer, nil] The number of updated rows, or `nil` if not
# applicable.
#
def updated_row_count
@gapi.statistics.query&.dml_stats&.updated_row_count
end
##
# The table in which the query results are stored.
#
# @return [Table] A table instance.
#
def destination
table = @gapi.configuration.query.destination_table
return nil unless table
retrieve_table table.project_id,
table.dataset_id,
table.table_id
end
##
# Checks if the query job is using legacy sql.
#
# @return [Boolean] `true` when legacy sql is used, `false` otherwise.
#
def legacy_sql?
val = @gapi.configuration.query.use_legacy_sql
return true if val.nil?
val
end
##
# Checks if the query job is using standard sql.
#
# @return [Boolean] `true` when standard sql is used, `false` otherwise.
#
def standard_sql?
!legacy_sql?
end
##
# The user-defined function resources used in the query. May be either a
# code resource to load from a Google Cloud Storage URI
# (`gs://bucket/path`), or an inline resource that contains code for a
# user-defined function (UDF). Providing an inline code resource is
# equivalent to providing a URI for a file containing the same code. See
# [User-Defined Functions](https://cloud.google.com/bigquery/docs/reference/standard-sql/user-defined-functions).
#
# @return [Array<String>] An array containing Google Cloud Storage URIs
# and/or inline source code.
#
def udfs
udfs_gapi = @gapi.configuration.query.user_defined_function_resources
return nil unless udfs_gapi
Array(udfs_gapi).map { |udf| udf.inline_code || udf.resource_uri }
end
##
# The encryption configuration of the destination table.
#
# @return [Google::Cloud::BigQuery::EncryptionConfiguration] Custom
# encryption configuration (e.g., Cloud KMS keys).
#
# @!group Attributes
def encryption
EncryptionConfiguration.from_gapi @gapi.configuration.query.destination_encryption_configuration
end
###
# Checks if the destination table will be range partitioned. See [Creating and using integer range partitioned
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
#
# @return [Boolean] `true` when the table is range partitioned, or `false` otherwise.
#
# @!group Attributes
#
def range_partitioning?
!@gapi.configuration.query.range_partitioning.nil?
end
###
# The field on which the destination table will be range partitioned, if any. The field must be a
# top-level `NULLABLE/REQUIRED` field. The only supported type is `INTEGER/INT64`. See
# [Creating and using integer range partitioned
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
#
# @return [String, nil] The partition field, if a field was configured, or `nil` if not range partitioned.
#
# @!group Attributes
#
def range_partitioning_field
@gapi.configuration.query.range_partitioning.field if range_partitioning?
end
###
# The start of range partitioning, inclusive. See [Creating and using integer range partitioned
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
#
# @return [Integer, nil] The start of range partitioning, inclusive, or `nil` if not range partitioned.
#
# @!group Attributes
#
def range_partitioning_start
@gapi.configuration.query.range_partitioning.range.start if range_partitioning?
end
###
# The width of each interval. See [Creating and using integer range partitioned
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
#
# @return [Integer, nil] The width of each interval, for data in range partitions, or `nil` if not range
# partitioned.
#
# @!group Attributes
#
def range_partitioning_interval
@gapi.configuration.query.range_partitioning.range.interval if range_partitioning?
end
###
# The end of range partitioning, exclusive. See [Creating and using integer range partitioned
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
#
# @return [Integer, nil] The end of range partitioning, exclusive, or `nil` if not range partitioned.
#
# @!group Attributes
#
def range_partitioning_end
@gapi.configuration.query.range_partitioning.range.end if range_partitioning?
end
###
# Checks if the destination table will be time-partitioned. See
# [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
#
# @return [Boolean] `true` when the table will be time-partitioned,
# or `false` otherwise.
#
# @!group Attributes
#
def time_partitioning?
!@gapi.configuration.query.time_partitioning.nil?
end
###
# The period for which the destination table will be partitioned, if
# any. See [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
#
# @return [String, nil] The partition type. The supported types are `DAY`,
# `HOUR`, `MONTH`, and `YEAR`, which will generate one partition per day,
# hour, month, and year, respectively; or `nil` if not present.
#
# @!group Attributes
#
def time_partitioning_type
@gapi.configuration.query.time_partitioning.type if time_partitioning?
end
###
# The field on which the destination table will be partitioned, if any.
# If not set, the destination table will be partitioned by pseudo column
# `_PARTITIONTIME`; if set, the table will be partitioned by this field.
# See [Partitioned Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
#
# @return [String, nil] The partition field, if a field was configured.
# `nil` if not partitioned or not set (partitioned by pseudo column
# '_PARTITIONTIME').
#
# @!group Attributes
#
def time_partitioning_field
return nil unless time_partitioning?
@gapi.configuration.query.time_partitioning.field
end
###
# The expiration for the destination table partitions, if any, in
# seconds. See [Partitioned
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
#
# @return [Integer, nil] The expiration time, in seconds, for data in
# partitions, or `nil` if not present.
#
# @!group Attributes
#
def time_partitioning_expiration
tp = @gapi.configuration.query.time_partitioning
tp.expiration_ms / 1_000 if tp && !tp.expiration_ms.nil?
end
###
# If set to true, queries over the destination table will require a
# partition filter that can be used for partition elimination to be
# specified. See [Partitioned
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
#
# @return [Boolean] `true` when a partition filter will be required,
# or `false` otherwise.
#
# @!group Attributes
#
def time_partitioning_require_filter?
tp = @gapi.configuration.query.time_partitioning
return false if tp.nil? || tp.require_partition_filter.nil?
tp.require_partition_filter
end
###
# Checks if the destination table will be clustered.
#
# See {QueryJob::Updater#clustering_fields=}, {Table#clustering_fields} and
# {Table#clustering_fields=}.
#
# @see https://cloud.google.com/bigquery/docs/clustered-tables
# Introduction to clustered tables
# @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
# Creating and using clustered tables
#
# @return [Boolean] `true` when the table will be clustered,
# or `false` otherwise.
#
# @!group Attributes
#
def clustering?
!@gapi.configuration.query.clustering.nil?
end
###
# One or more fields on which the destination table should be clustered.
# Must be specified with time-based partitioning, data in the table will
# be first partitioned and subsequently clustered. The order of the
# returned fields determines the sort order of the data.
#
# BigQuery supports clustering for both partitioned and non-partitioned
# tables.
#
# See {QueryJob::Updater#clustering_fields=}, {Table#clustering_fields} and
# {Table#clustering_fields=}.
#
# @see https://cloud.google.com/bigquery/docs/clustered-tables
# Introduction to clustered tables
# @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
# Creating and using clustered tables
#
# @return [Array<String>, nil] The clustering fields, or `nil` if the
# destination table will not be clustered.
#
# @!group Attributes
#
def clustering_fields
@gapi.configuration.query.clustering.fields if clustering?
end
##
# Refreshes the job until the job is `DONE`.
# The delay between refreshes will incrementally increase.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
#
# sql = "SELECT word FROM `bigquery-public-data.samples.shakespeare`"
# job = bigquery.query_job sql
#
# job.wait_until_done!
# job.done? #=> true
#
def wait_until_done!
return if done?
ensure_service!
loop do
query_results_gapi = service.job_query_results job_id, location: location, max: 0
if query_results_gapi.job_complete
@destination_schema_gapi = query_results_gapi.schema
break
end
end
reload!
end
##
# Retrieves the query results for the job.
#
# @param [String] token Page token, returned by a previous call,
# identifying the result set.
# @param [Integer] max Maximum number of results to return.
# @param [Integer] start Zero-based index of the starting row to read.
#
# @return [Google::Cloud::Bigquery::Data] An object providing access to
# data read from the destination table for the job.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
#
# sql = "SELECT word FROM `bigquery-public-data.samples.shakespeare`"
# job = bigquery.query_job sql
#
# job.wait_until_done!
# data = job.data
#
# # Iterate over the first page of results
# data.each do |row|
# puts row[:word]
# end
# # Retrieve the next page of results
# data = data.next if data.next?
#
def data token: nil, max: nil, start: nil
return nil unless done?
return Data.from_gapi_json({ rows: [] }, nil, @gapi, service) if dryrun?
if ddl? || dml?
data_hash = { totalRows: nil, rows: [] }
return Data.from_gapi_json data_hash, nil, @gapi, service
end
ensure_schema!
data_hash = service.list_tabledata destination_table_dataset_id,
destination_table_table_id,
token: token,
max: max,
start: start
Data.from_gapi_json data_hash, destination_table_gapi, @gapi, service
end
alias query_results data
##
# Yielded to a block to accumulate changes for a patch request.
class Updater < QueryJob
##
# @private Create an Updater object.
def initialize service, gapi
super()
@service = service
@gapi = gapi
end
##
# @private Create an Updater from an options hash.
#
# @return [Google::Cloud::Bigquery::QueryJob::Updater] A job
# configuration object for setting query options.
def self.from_options service, query, options
job_ref = service.job_ref_from options[:job_id], options[:prefix]
dataset_config = service.dataset_ref_from options[:dataset],
options[:project]
req = Google::Apis::BigqueryV2::Job.new(
job_reference: job_ref,
configuration: Google::Apis::BigqueryV2::JobConfiguration.new(
query: Google::Apis::BigqueryV2::JobConfigurationQuery.new(
query: query,
default_dataset: dataset_config,
maximum_billing_tier: options[:maximum_billing_tier]
)
)
)
updater = QueryJob::Updater.new service, req
updater.set_params_and_types options[:params], options[:types] if options[:params]
updater.create = options[:create]
updater.create_session = options[:create_session]
updater.session_id = options[:session_id] if options[:session_id]
updater.write = options[:write]
updater.table = options[:table]
updater.dryrun = options[:dryrun]
updater.maximum_bytes_billed = options[:maximum_bytes_billed]
updater.labels = options[:labels] if options[:labels]
updater.legacy_sql = Convert.resolve_legacy_sql options[:standard_sql], options[:legacy_sql]
updater.external = options[:external] if options[:external]
updater.priority = options[:priority]
updater.cache = options[:cache]
updater.large_results = options[:large_results]
updater.flatten = options[:flatten]
updater.udfs = options[:udfs]
updater
end
##
# Sets the geographic location where the job should run. Required
# except for US and EU.
#
# @param [String] value A geographic location, such as "US", "EU" or
# "asia-northeast1". Required except for US and EU.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
# dataset = bigquery.dataset "my_dataset"
#
# job = bigquery.query_job "SELECT 1;" do |query|
# query.table = dataset.table "my_table", skip_lookup: true
# query.location = "EU"
# end
#
# @!group Attributes
def location= value
@gapi.job_reference.location = value
return unless value.nil?
# Treat assigning value of nil the same as unsetting the value.
unset = @gapi.job_reference.instance_variables.include? :@location
@gapi.job_reference.remove_instance_variable :@location if unset
end
##
# Sets the priority of the query.
#
# @param [String] value Specifies a priority for the query. Possible
# values include `INTERACTIVE` and `BATCH`.
#
# @!group Attributes
def priority= value
@gapi.configuration.query.priority = priority_value value
end
##
# Specifies to look in the query cache for results.
#
# @param [Boolean] value Whether to look for the result in the query
# cache. The query cache is a best-effort cache that will be flushed
# whenever tables in the query are modified. The default value is
# true. For more information, see [query
# caching](https://developers.google.com/bigquery/querying-data).
#
# @!group Attributes
def cache= value
@gapi.configuration.query.use_query_cache = value
end
##
# Allow large results for a legacy SQL query.
#
# @param [Boolean] value This option is specific to Legacy SQL.
# If `true`, allows the query to produce arbitrarily large result
# tables at a slight cost in performance. Requires `table` parameter
# to be set.
#
# @!group Attributes
def large_results= value
@gapi.configuration.query.allow_large_results = value
end
##
# Flatten nested and repeated fields in legacy SQL queries.
#
# @param [Boolean] value This option is specific to Legacy SQL.
# Flattens all nested and repeated fields in the query results. The
# default value is `true`. `large_results` parameter must be `true`
# if this is set to `false`.
#
# @!group Attributes
def flatten= value
@gapi.configuration.query.flatten_results = value
end
##
# Sets the default dataset of tables referenced in the query.
#
# @param [Dataset] value The default dataset to use for unqualified
# table names in the query.
#
# @!group Attributes
def dataset= value
@gapi.configuration.query.default_dataset = @service.dataset_ref_from value
end
##
# Sets the query parameters. Standard SQL only.
#
# Use {set_params_and_types} to set both params and types.
#
# @param [Array, Hash] params Standard SQL only. Used to pass query arguments when the `query` string contains
# either positional (`?`) or named (`@myparam`) query parameters. If value passed is an array `["foo"]`, the
# query must use positional query parameters. If value passed is a hash `{ myparam: "foo" }`, the query must
# use named query parameters. When set, `legacy_sql` will automatically be set to false and `standard_sql`
# to true.
#
# BigQuery types are converted from Ruby types as follows:
#
# | BigQuery | Ruby | Notes |
# |--------------|--------------------------------------|--------------------------------------------------|
# | `BOOL` | `true`/`false` | |
# | `INT64` | `Integer` | |
# | `FLOAT64` | `Float` | |
# | `NUMERIC` | `BigDecimal` | `BigDecimal` values will be rounded to scale 9. |
# | `BIGNUMERIC` | `BigDecimal` | NOT AUTOMATIC: Must be mapped using `types`. |
# | `STRING` | `String` | |
# | `DATETIME` | `DateTime` | `DATETIME` does not support time zone. |
# | `DATE` | `Date` | |
# | `GEOGRAPHY` | `String` (WKT or GeoJSON) | NOT AUTOMATIC: Must be mapped using `types`. |
# | `TIMESTAMP` | `Time` | |
# | `TIME` | `Google::Cloud::BigQuery::Time` | |
# | `BYTES` | `File`, `IO`, `StringIO`, or similar | |
# | `ARRAY` | `Array` | Nested arrays, `nil` values are not supported. |
# | `STRUCT` | `Hash` | Hash keys may be strings or symbols. |
#
# See [Data Types](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types) for an overview
# of each BigQuery data type, including allowed values. For the `GEOGRAPHY` type, see [Working with BigQuery
# GIS data](https://cloud.google.com/bigquery/docs/gis-data).
#
# @!group Attributes
def params= params
set_params_and_types params
end
##
# Sets the query parameters. Standard SQL only.
#
# @param [Array, Hash] params Standard SQL only. Used to pass query arguments when the `query` string contains
# either positional (`?`) or named (`@myparam`) query parameters. If value passed is an array `["foo"]`, the
# query must use positional query parameters. If value passed is a hash `{ myparam: "foo" }`, the query must
# use named query parameters. When set, `legacy_sql` will automatically be set to false and `standard_sql`
# to true.
#
# BigQuery types are converted from Ruby types as follows:
#
# | BigQuery | Ruby | Notes |
# |--------------|--------------------------------------|--------------------------------------------------|
# | `BOOL` | `true`/`false` | |
# | `INT64` | `Integer` | |
# | `FLOAT64` | `Float` | |
# | `NUMERIC` | `BigDecimal` | `BigDecimal` values will be rounded to scale 9. |
# | `BIGNUMERIC` | `BigDecimal` | NOT AUTOMATIC: Must be mapped using `types`. |
# | `STRING` | `String` | |
# | `DATETIME` | `DateTime` | `DATETIME` does not support time zone. |
# | `DATE` | `Date` | |
# | `GEOGRAPHY` | `String` (WKT or GeoJSON) | NOT AUTOMATIC: Must be mapped using `types`. |
# | `TIMESTAMP` | `Time` | |
# | `TIME` | `Google::Cloud::BigQuery::Time` | |
# | `BYTES` | `File`, `IO`, `StringIO`, or similar | |
# | `ARRAY` | `Array` | Nested arrays, `nil` values are not supported. |
# | `STRUCT` | `Hash` | Hash keys may be strings or symbols. |
#
# See [Data Types](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types) for an overview
# of each BigQuery data type, including allowed values. For the `GEOGRAPHY` type, see [Working with BigQuery
# GIS data](https://cloud.google.com/bigquery/docs/gis-data).
# @param [Array, Hash] types Standard SQL only. Types of the SQL parameters in `params`. It is not always
# possible to infer the right SQL type from a value in `params`. In these cases, `types` must be used to
# specify the SQL type for these values.
#
# Arguments must match the value type passed to `params`. This must be an `Array` when the query uses
# positional query parameters. This must be an `Hash` when the query uses named query parameters. The values
# should be BigQuery type codes from the following list:
#
# * `:BOOL`
# * `:INT64`
# * `:FLOAT64`
# * `:NUMERIC`
# * `:BIGNUMERIC`
# * `:STRING`
# * `:DATETIME`
# * `:DATE`
# * `:GEOGRAPHY`
# * `:TIMESTAMP`
# * `:TIME`
# * `:BYTES`
# * `Array` - Lists are specified by providing the type code in an array. For example, an array of integers
# are specified as `[:INT64]`.
# * `Hash` - Types for STRUCT values (`Hash` objects) are specified using a `Hash` object, where the keys
# match the `params` hash, and the values are the types value that matches the data.
#
# Types are optional.
#
# @!group Attributes
def set_params_and_types params, types = nil
types ||= params.class.new
raise ArgumentError, "types must use the same format as params" if types.class != params.class
case params
when Array
@gapi.configuration.query.use_legacy_sql = false
@gapi.configuration.query.parameter_mode = "POSITIONAL"
@gapi.configuration.query.query_parameters = params.zip(types).map do |param, type|
Convert.to_query_param param, type
end
when Hash
@gapi.configuration.query.use_legacy_sql = false
@gapi.configuration.query.parameter_mode = "NAMED"
@gapi.configuration.query.query_parameters = params.map do |name, param|
type = types[name]
Convert.to_query_param(param, type).tap { |named_param| named_param.name = String name }
end
else
raise ArgumentError, "params must be an Array or a Hash"
end
end
##
# Sets the create disposition for creating the query results table.
#
# @param [String] value Specifies whether the job is allowed to
# create new tables. The default value is `needed`.
#
# The following values are supported:
#
# * `needed` - Create the table if it does not exist.
# * `never` - The table must already exist. A 'notFound' error is
# raised if the table does not exist.
#
# @!group Attributes
def create= value
@gapi.configuration.query.create_disposition = Convert.create_disposition value
end
##
# Sets the create_session property. If true, creates a new session,
# where session id will be a server generated random id. If false,
# runs query with an existing {#session_id=}, otherwise runs query in
# non-session mode. The default value is `false`.
#
# @param [Boolean] value The create_session property. The default
# value is `false`.
#
# @!group Attributes
def create_session= value
@gapi.configuration.query.create_session = value
end
##
# Sets the session ID for a query run in session mode. See {#create_session=}.
#
# @param [String] value The session ID. The default value is `nil`.
#
# @!group Attributes
def session_id= value
@gapi.configuration.query.connection_properties ||= []
prop = @gapi.configuration.query.connection_properties.find { |cp| cp.key == "session_id" }
if prop
prop.value = value
else
prop = Google::Apis::BigqueryV2::ConnectionProperty.new key: "session_id", value: value
@gapi.configuration.query.connection_properties << prop
end
end
##
# Sets the write disposition for when the query results table exists.
#
# @param [String] value Specifies the action that occurs if the
# destination table already exists. The default value is `empty`.
#
# The following values are supported:
#
# * `truncate` - BigQuery overwrites the table data.
# * `append` - BigQuery appends the data to the table.
# * `empty` - A 'duplicate' error is returned in the job result if
# the table exists and contains data.
#
# @!group Attributes
def write= value
@gapi.configuration.query.write_disposition = Convert.write_disposition value
end
##
# Sets the dry run flag for the query job.
#
# @param [Boolean] value If set, don't actually run this job. A valid
# query will return a mostly empty response with some processing
# statistics, while an invalid query will return the same error it
# would if it wasn't a dry run..
#
# @!group Attributes
def dryrun= value
@gapi.configuration.dry_run = value
end
alias dry_run= dryrun=
##
# Sets the destination for the query results table.
#
# @param [Table] value The destination table where the query results
# should be stored. If not present, a new table will be created
# according to the create disposition to store the results.
#
# @!group Attributes
def table= value
@gapi.configuration.query.destination_table = table_ref_from value
end
##
# Sets the maximum bytes billed for the query.
#
# @param [Integer] value Limits the bytes billed for this job.
# Queries that will have bytes billed beyond this limit will fail
# (without incurring a charge). Optional. If unspecified, this will
# be set to your project default.
#
# @!group Attributes
def maximum_bytes_billed= value
@gapi.configuration.query.maximum_bytes_billed = value
end
##
# Sets the labels to use for the job.
#
# @param [Hash] value A hash of user-provided labels associated with
# the job. You can use these to organize and group your jobs.
#
# The labels applied to a resource must meet the following requirements:
#
# * Each resource can have multiple labels, up to a maximum of 64.
# * Each label must be a key-value pair.
# * Keys have a minimum length of 1 character and a maximum length of
# 63 characters, and cannot be empty. Values can be empty, and have
# a maximum length of 63 characters.
# * Keys and values can contain only lowercase letters, numeric characters,
# underscores, and dashes. All characters must use UTF-8 encoding, and
# international characters are allowed.
# * The key portion of a label must be unique. However, you can use the
# same key with multiple resources.
# * Keys must start with a lowercase letter or international character.
#
# @!group Attributes
#
def labels= value
@gapi.configuration.update! labels: value
end
##
# Sets the query syntax to legacy SQL.
#
# @param [Boolean] value Specifies whether to use BigQuery's [legacy
# SQL](https://cloud.google.com/bigquery/docs/reference/legacy-sql)
# dialect for this query. If set to false, the query will use
# BigQuery's [standard
# SQL](https://cloud.google.com/bigquery/docs/reference/standard-sql/)
# dialect. Optional. The default value is false.
#
# @!group Attributes
#
def legacy_sql= value
@gapi.configuration.query.use_legacy_sql = value
end
##
# Sets the query syntax to standard SQL.
#
# @param [Boolean] value Specifies whether to use BigQuery's [standard
# SQL](https://cloud.google.com/bigquery/docs/reference/standard-sql/)
# dialect for this query. If set to true, the query will use
# standard SQL rather than the [legacy
# SQL](https://cloud.google.com/bigquery/docs/reference/legacy-sql)
# dialect. Optional. The default value is true.
#
# @!group Attributes
#
def standard_sql= value
@gapi.configuration.query.use_legacy_sql = !value
end
##
# Sets definitions for external tables used in the query.
#
# @param [Hash<String|Symbol, External::DataSource>] value A Hash
# that represents the mapping of the external tables to the table
# names used in the SQL query. The hash keys are the table names,
# and the hash values are the external table objects.
#
# @!group Attributes
#
def external= value
external_table_pairs = value.map { |name, obj| [String(name), obj.to_gapi] }
external_table_hash = Hash[external_table_pairs]
@gapi.configuration.query.table_definitions = external_table_hash
end
##
# Sets user defined functions for the query.
#
# @param [Array<String>, String] value User-defined function resources
# used in the query. May be either a code resource to load from a
# Google Cloud Storage URI (`gs://bucket/path`), or an inline
# resource that contains code for a user-defined function (UDF).
# Providing an inline code resource is equivalent to providing a URI
# for a file containing the same code. See [User-Defined
# Functions](https://cloud.google.com/bigquery/docs/reference/standard-sql/user-defined-functions).
#
# @!group Attributes
def udfs= value
@gapi.configuration.query.user_defined_function_resources = udfs_gapi_from value
end
##
# Sets the encryption configuration of the destination table.
#
# @param [Google::Cloud::BigQuery::EncryptionConfiguration] val
# Custom encryption configuration (e.g., Cloud KMS keys).
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
# dataset = bigquery.dataset "my_dataset"
#
# key_name = "projects/a/locations/b/keyRings/c/cryptoKeys/d"
# encrypt_config = bigquery.encryption kms_key: key_name
# job = bigquery.query_job "SELECT 1;" do |job|
# job.table = dataset.table "my_table", skip_lookup: true
# job.encryption = encrypt_config
# end
#
# @!group Attributes
def encryption= val
@gapi.configuration.query.update! destination_encryption_configuration: val.to_gapi
end
##
# Sets the field on which to range partition the table. See [Creating and using integer range partitioned
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
#
# See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
#
# You can only set range partitioning when creating a table. BigQuery does not allow you to change
# partitioning on an existing table.
#
# @param [String] field The range partition field. the destination table is partitioned by this
# field. The field must be a top-level `NULLABLE/REQUIRED` field. The only supported
# type is `INTEGER/INT64`.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
# dataset = bigquery.dataset "my_dataset"
# destination_table = dataset.table "my_destination_table",
# skip_lookup: true
#
# job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
# job.table = destination_table
# job.range_partitioning_field = "num"
# job.range_partitioning_start = 0
# job.range_partitioning_interval = 10
# job.range_partitioning_end = 100
# end
#
# job.wait_until_done!
# job.done? #=> true
#
# @!group Attributes
#
def range_partitioning_field= field
@gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
)
@gapi.configuration.query.range_partitioning.field = field
end
##
# Sets the start of range partitioning, inclusive, for the destination table. See [Creating and using integer
# range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
#
# You can only set range partitioning when creating a table. BigQuery does not allow you to change
# partitioning on an existing table.
#
# See {#range_partitioning_field=}, {#range_partitioning_interval=} and {#range_partitioning_end=}.
#
# @param [Integer] range_start The start of range partitioning, inclusive.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
# dataset = bigquery.dataset "my_dataset"
# destination_table = dataset.table "my_destination_table",
# skip_lookup: true
#
# job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
# job.table = destination_table
# job.range_partitioning_field = "num"
# job.range_partitioning_start = 0
# job.range_partitioning_interval = 10
# job.range_partitioning_end = 100
# end
#
# job.wait_until_done!
# job.done? #=> true
#
# @!group Attributes
#
def range_partitioning_start= range_start
@gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
)
@gapi.configuration.query.range_partitioning.range.start = range_start
end
##
# Sets width of each interval for data in range partitions. See [Creating and using integer range partitioned
# tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
#
# You can only set range partitioning when creating a table. BigQuery does not allow you to change
# partitioning on an existing table.
#
# See {#range_partitioning_field=}, {#range_partitioning_start=} and {#range_partitioning_end=}.
#
# @param [Integer] range_interval The width of each interval, for data in partitions.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
# dataset = bigquery.dataset "my_dataset"
# destination_table = dataset.table "my_destination_table",
# skip_lookup: true
#
# job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
# job.table = destination_table
# job.range_partitioning_field = "num"
# job.range_partitioning_start = 0
# job.range_partitioning_interval = 10
# job.range_partitioning_end = 100
# end
#
# job.wait_until_done!
# job.done? #=> true
#
# @!group Attributes
#
def range_partitioning_interval= range_interval
@gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
)
@gapi.configuration.query.range_partitioning.range.interval = range_interval
end
##
# Sets the end of range partitioning, exclusive, for the destination table. See [Creating and using integer
# range partitioned tables](https://cloud.google.com/bigquery/docs/creating-integer-range-partitions).
#
# You can only set range partitioning when creating a table. BigQuery does not allow you to change
# partitioning on an existing table.
#
# See {#range_partitioning_start=}, {#range_partitioning_interval=} and {#range_partitioning_field=}.
#
# @param [Integer] range_end The end of range partitioning, exclusive.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
# dataset = bigquery.dataset "my_dataset"
# destination_table = dataset.table "my_destination_table",
# skip_lookup: true
#
# job = bigquery.query_job "SELECT num FROM UNNEST(GENERATE_ARRAY(0, 99)) AS num" do |job|
# job.table = destination_table
# job.range_partitioning_field = "num"
# job.range_partitioning_start = 0
# job.range_partitioning_interval = 10
# job.range_partitioning_end = 100
# end
#
# job.wait_until_done!
# job.done? #=> true
#
# @!group Attributes
#
def range_partitioning_end= range_end
@gapi.configuration.query.range_partitioning ||= Google::Apis::BigqueryV2::RangePartitioning.new(
range: Google::Apis::BigqueryV2::RangePartitioning::Range.new
)
@gapi.configuration.query.range_partitioning.range.end = range_end
end
##
# Sets the partitioning for the destination table. See [Partitioned
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
# The supported types are `DAY`, `HOUR`, `MONTH`, and `YEAR`, which will
# generate one partition per day, hour, month, and year, respectively.
#
# You can only set the partitioning field while creating a table.
# BigQuery does not allow you to change partitioning on an existing
# table.
#
# @param [String] type The partition type. The supported types are `DAY`,
# `HOUR`, `MONTH`, and `YEAR`, which will generate one partition per day,
# hour, month, and year, respectively.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
# dataset = bigquery.dataset "my_dataset"
# destination_table = dataset.table "my_destination_table",
# skip_lookup: true
#
# job = dataset.query_job "SELECT * FROM UNNEST(" \
# "GENERATE_TIMESTAMP_ARRAY(" \
# "'2018-10-01 00:00:00', " \
# "'2018-10-10 00:00:00', " \
# "INTERVAL 1 DAY)) AS dob" do |job|
# job.table = destination_table
# job.time_partitioning_type = "DAY"
# end
#
# job.wait_until_done!
# job.done? #=> true
#
# @!group Attributes
#
def time_partitioning_type= type
@gapi.configuration.query.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
@gapi.configuration.query.time_partitioning.update! type: type
end
##
# Sets the field on which to partition the destination table. If not
# set, the destination table is partitioned by pseudo column
# `_PARTITIONTIME`; if set, the table is partitioned by this field.
# See [Partitioned
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
#
# The destination table must also be partitioned. See
# {#time_partitioning_type=}.
#
# You can only set the partitioning field while creating a table.
# BigQuery does not allow you to change partitioning on an existing
# table.
#
# @param [String] field The partition field. The field must be a
# top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or
# REQUIRED.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
# dataset = bigquery.dataset "my_dataset"
# destination_table = dataset.table "my_destination_table",
# skip_lookup: true
#
# job = dataset.query_job "SELECT * FROM UNNEST(" \
# "GENERATE_TIMESTAMP_ARRAY(" \
# "'2018-10-01 00:00:00', " \
# "'2018-10-10 00:00:00', " \
# "INTERVAL 1 DAY)) AS dob" do |job|
# job.table = destination_table
# job.time_partitioning_type = "DAY"
# job.time_partitioning_field = "dob"
# end
#
# job.wait_until_done!
# job.done? #=> true
#
# @!group Attributes
#
def time_partitioning_field= field
@gapi.configuration.query.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
@gapi.configuration.query.time_partitioning.update! field: field
end
##
# Sets the partition expiration for the destination table. See
# [Partitioned
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
#
# The destination table must also be partitioned. See
# {#time_partitioning_type=}.
#
# @param [Integer] expiration An expiration time, in seconds,
# for data in partitions.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
# dataset = bigquery.dataset "my_dataset"
# destination_table = dataset.table "my_destination_table",
# skip_lookup: true
#
# job = dataset.query_job "SELECT * FROM UNNEST(" \
# "GENERATE_TIMESTAMP_ARRAY(" \
# "'2018-10-01 00:00:00', " \
# "'2018-10-10 00:00:00', " \
# "INTERVAL 1 DAY)) AS dob" do |job|
# job.table = destination_table
# job.time_partitioning_type = "DAY"
# job.time_partitioning_expiration = 86_400
# end
#
# job.wait_until_done!
# job.done? #=> true
#
# @!group Attributes
#
def time_partitioning_expiration= expiration
@gapi.configuration.query.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
@gapi.configuration.query.time_partitioning.update! expiration_ms: expiration * 1000
end
##
# If set to true, queries over the destination table will require a
# partition filter that can be used for partition elimination to be
# specified. See [Partitioned
# Tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
#
# @param [Boolean] val Indicates if queries over the destination table
# will require a partition filter. The default value is `false`.
#
# @!group Attributes
#
def time_partitioning_require_filter= val
@gapi.configuration.query.time_partitioning ||= Google::Apis::BigqueryV2::TimePartitioning.new
@gapi.configuration.query.time_partitioning.update! require_partition_filter: val
end
##
# Sets the list of fields on which data should be clustered.
#
# Only top-level, non-repeated, simple-type fields are supported. When
# you cluster a table using multiple columns, the order of columns you
# specify is important. The order of the specified columns determines
# the sort order of the data.
#
# BigQuery supports clustering for both partitioned and non-partitioned
# tables.
#
# See {QueryJob#clustering_fields}, {Table#clustering_fields} and
# {Table#clustering_fields=}.
#
# @see https://cloud.google.com/bigquery/docs/clustered-tables
# Introduction to clustered tables
# @see https://cloud.google.com/bigquery/docs/creating-clustered-tables
# Creating and using clustered tables
#
# @param [Array<String>] fields The clustering fields. Only top-level,
# non-repeated, simple-type fields are supported.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
# dataset = bigquery.dataset "my_dataset"
# destination_table = dataset.table "my_destination_table",
# skip_lookup: true
#
# job = dataset.query_job "SELECT * FROM my_table" do |job|
# job.table = destination_table
# job.time_partitioning_type = "DAY"
# job.time_partitioning_field = "dob"
# job.clustering_fields = ["last_name", "first_name"]
# end
#
# job.wait_until_done!
# job.done? #=> true
#
# @!group Attributes
#
def clustering_fields= fields
@gapi.configuration.query.clustering ||= Google::Apis::BigqueryV2::Clustering.new
@gapi.configuration.query.clustering.fields = fields
end
def cancel
raise "not implemented in #{self.class}"
end
def rerun!
raise "not implemented in #{self.class}"
end
def reload!
raise "not implemented in #{self.class}"
end
alias refresh! reload!
def wait_until_done!
raise "not implemented in #{self.class}"
end
##
# @private Returns the Google API client library version of this job.
#
# @return [<Google::Apis::BigqueryV2::Job>] (See
# {Google::Apis::BigqueryV2::Job})
def to_gapi
@gapi
end
protected
# Creates a table reference from a table object.
def table_ref_from tbl
return nil if tbl.nil?
Google::Apis::BigqueryV2::TableReference.new(
project_id: tbl.project_id,
dataset_id: tbl.dataset_id,
table_id: tbl.table_id
)
end
def priority_value str
{ "batch" => "BATCH", "interactive" => "INTERACTIVE" }[str.to_s.downcase]
end
def udfs_gapi_from array_or_str
Array(array_or_str).map do |uri_or_code|
resource = Google::Apis::BigqueryV2::UserDefinedFunctionResource.new
if uri_or_code.start_with? "gs://"
resource.resource_uri = uri_or_code
else
resource.inline_code = uri_or_code
end
resource
end
end
end
##
# Represents a stage in the execution plan for the query.
#
# @attr_reader [Float] compute_ratio_avg Relative amount of time the
# average shard spent on CPU-bound tasks.
# @attr_reader [Float] compute_ratio_max Relative amount of time the
# slowest shard spent on CPU-bound tasks.
# @attr_reader [Integer] id Unique ID for the stage within the query
# plan.
# @attr_reader [String] name Human-readable name for the stage.
# @attr_reader [Float] read_ratio_avg Relative amount of time the
# average shard spent reading input.
# @attr_reader [Float] read_ratio_max Relative amount of time the
# slowest shard spent reading input.
# @attr_reader [Integer] records_read Number of records read into the
# stage.
# @attr_reader [Integer] records_written Number of records written by
# the stage.
# @attr_reader [Array<Step>] steps List of operations within the stage
# in dependency order (approximately chronological).
# @attr_reader [Float] wait_ratio_avg Relative amount of time the
# average shard spent waiting to be scheduled.
# @attr_reader [Float] wait_ratio_max Relative amount of time the
# slowest shard spent waiting to be scheduled.
# @attr_reader [Float] write_ratio_avg Relative amount of time the
# average shard spent on writing output.
# @attr_reader [Float] write_ratio_max Relative amount of time the
# slowest shard spent on writing output.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
#
# sql = "SELECT word FROM `bigquery-public-data.samples.shakespeare`"
# job = bigquery.query_job sql
#
# job.wait_until_done!
#
# stages = job.query_plan
# stages.each do |stage|
# puts stage.name
# stage.steps.each do |step|
# puts step.kind
# puts step.substeps.inspect
# end
# end
#
class Stage
attr_reader :compute_ratio_avg
attr_reader :compute_ratio_max
attr_reader :id
attr_reader :name
attr_reader :read_ratio_avg
attr_reader :read_ratio_max
attr_reader :records_read
attr_reader :records_written
attr_reader :status
attr_reader :steps
attr_reader :wait_ratio_avg
attr_reader :wait_ratio_max
attr_reader :write_ratio_avg
attr_reader :write_ratio_max
##
# @private Creates a new Stage instance.
def initialize compute_ratio_avg, compute_ratio_max, id, name, read_ratio_avg, read_ratio_max, records_read,
records_written, status, steps, wait_ratio_avg, wait_ratio_max, write_ratio_avg,
write_ratio_max
@compute_ratio_avg = compute_ratio_avg
@compute_ratio_max = compute_ratio_max
@id = id
@name = name
@read_ratio_avg = read_ratio_avg
@read_ratio_max = read_ratio_max
@records_read = records_read
@records_written = records_written
@status = status
@steps = steps
@wait_ratio_avg = wait_ratio_avg
@wait_ratio_max = wait_ratio_max
@write_ratio_avg = write_ratio_avg
@write_ratio_max = write_ratio_max
end
##
# @private New Stage from a statistics.query.queryPlan element.
def self.from_gapi gapi
steps = Array(gapi.steps).map { |g| Step.from_gapi g }
new gapi.compute_ratio_avg, gapi.compute_ratio_max, gapi.id, gapi.name, gapi.read_ratio_avg,
gapi.read_ratio_max, gapi.records_read, gapi.records_written, gapi.status, steps, gapi.wait_ratio_avg,
gapi.wait_ratio_max, gapi.write_ratio_avg, gapi.write_ratio_max
end
end
##
# Represents an operation in a stage in the execution plan for the
# query.
#
# @attr_reader [String] kind Machine-readable operation type. For a full
# list of operation types, see [Steps
# metadata](https://cloud.google.com/bigquery/query-plan-explanation#steps_metadata).
# @attr_reader [Array<String>] substeps Human-readable stage
# descriptions.
#
# @example
# require "google/cloud/bigquery"
#
# bigquery = Google::Cloud::Bigquery.new
#
# sql = "SELECT word FROM `bigquery-public-data.samples.shakespeare`"
# job = bigquery.query_job sql
#
# job.wait_until_done!
#
# stages = job.query_plan
# stages.each do |stage|
# puts stage.name
# stage.steps.each do |step|
# puts step.kind
# puts step.substeps.inspect
# end
# end
#
class Step
attr_reader :kind
attr_reader :substeps
##
# @private Creates a new Stage instance.
def initialize kind, substeps
@kind = kind
@substeps = substeps
end
##
# @private New Step from a statistics.query.queryPlan[].steps element.
def self.from_gapi gapi
new gapi.kind, Array(gapi.substeps)
end
end
protected
def ensure_schema!
return unless destination_schema.nil?
query_results_gapi = service.job_query_results job_id, location: location, max: 0
# raise "unable to retrieve schema" if query_results_gapi.schema.nil?
@destination_schema_gapi = query_results_gapi.schema
end
def destination_schema
@destination_schema_gapi
end
def destination_table_dataset_id
@gapi.configuration.query.destination_table.dataset_id
end
def destination_table_table_id
@gapi.configuration.query.destination_table.table_id
end
def destination_table_gapi
Google::Apis::BigqueryV2::Table.new(
table_reference: @gapi.configuration.query.destination_table,
schema: destination_schema
)
end
end
end
end
end
| 42.311123
| 150
| 0.559574
|
5de1300660ae64a7ada357ed750e4dcff412c8a0
| 353
|
require 'metasploit/framework/data_service/remote/http/response_data_helper'
module RemoteDbExportDataService
include ResponseDataHelper
DB_EXPORT_API_PATH = '/api/v1/db-export'
def run_db_export(opts)
response = json_to_hash(self.get_data(DB_EXPORT_API_PATH, nil, opts))
process_file(response[:db_export_file], opts[:path])
end
end
| 25.214286
| 76
| 0.793201
|
7a57f181f689019d7b8e1f2783e5290ae1cec424
| 819
|
# frozen_string_literal: true
require_relative 'base'
module Logux
module Test
module Matchers
class BeApproved < Base
def matches?(actual)
@actual = JSON.parse(actual.body)
be_approved?(@actual)
end
def failure_message
"expected that #{pretty(@actual)} to be approved " \
"and doesn't be errored of forbidden"
end
private
def be_approved?(commands)
meta = expected.first
approved = commands.any? do |command|
command.first == 'approved' &&
(meta.nil? || (meta.present? && command[1] == meta))
end
approved && commands.none? do |command|
command.first.in?(%w[forbidden error])
end
end
end
end
end
end
| 21.552632
| 66
| 0.54823
|
e98ec14914a5c271966c961edafe69c77fb06ea3
| 6,650
|
module Transbank
module TransaccionCompleta
class Transaction
extend Utils::NetHelper
CREATE_TRANSACTION_ENDPOINT = 'rswebpaytransaction/api/webpay/v1.0/transactions'
TRANSACTION_INSTALLMENTS_ENDPOINT = 'rswebpaytransaction/api/webpay/v1.0/transactions/:token/installments'
COMMIT_TRANSACTION_ENDPOINT = 'rswebpaytransaction/api/webpay/v1.0/transactions/:token'
TRANSACTION_STATUS_ENDPOINT = 'rswebpaytransaction/api/webpay/v1.0/transactions/:token'
REFUND_TRANSACTION_ENDPOINT = 'rswebpaytransaction/api/webpay/v1.0/transactions/:token/refunds'
class << self
def create(buy_order:, session_id:, amount:, card_number:, cvv:,
card_expiration_date:, options:nil)
api_key = options&.api_key || default_integration_params[:api_key]
commerce_code = options&.commerce_code || default_integration_params[:commerce_code]
integration_type = options&.integration_type || default_integration_params[:integration_type]
base_url = integration_type.nil? ? TransaccionCompleta::Base::integration_type[:TEST] : TransaccionCompleta::Base.integration_type_url(integration_type)
body = {
buy_order: buy_order, session_id: session_id,
amount: amount, card_number: card_number, cvv: cvv,
card_expiration_date: card_expiration_date
}
url = base_url + CREATE_TRANSACTION_ENDPOINT
headers = webpay_headers(commerce_code: commerce_code, api_key: api_key)
resp = http_post(uri_string: url, body: body, headers: headers, camel_case_keys: false)
body = JSON.parse(resp.body)
return ::Transbank::TransaccionCompleta::TransactionCreateResponse.new(body) if resp.kind_of? Net::HTTPSuccess
raise Errors::TransactionCreateError.new(body['error_message'], resp.code)
end
def installments(token:, installments_number:, options:nil)
api_key = options&.api_key || default_integration_params[:api_key]
commerce_code = options&.commerce_code || default_integration_params[:commerce_code]
integration_type = options&.integration_type || default_integration_params[:integration_type]
base_url = integration_type.nil? ? TransaccionCompleta::Base::integration_type[:TEST] : TransaccionCompleta::Base.integration_type_url(integration_type)
url = base_url + TRANSACTION_INSTALLMENTS_ENDPOINT.gsub(':token', token)
headers = webpay_headers(commerce_code: commerce_code, api_key: api_key)
body = {installments_number: installments_number}
resp = http_post(uri_string: url, body: body, headers: headers, camel_case_keys: false)
body = JSON.parse(resp.body)
return ::Transbank::TransaccionCompleta::TransactionInstallmentsResponse.new(body) if resp.kind_of? Net::HTTPSuccess
raise Errors::TransactionInstallmentsError.new(body['error_message'], resp.code)
end
def commit(token:, id_query_installments:, deferred_period_index:,
grace_period:, options:nil)
api_key = options&.api_key || default_integration_params[:api_key]
commerce_code = options&.commerce_code || default_integration_params[:commerce_code]
integration_type = options&.integration_type || default_integration_params[:integration_type]
base_url = integration_type.nil? ? TransaccionCompleta::Base::integration_type[:TEST] : TransaccionCompleta::Base.integration_type_url(integration_type)
url = base_url + COMMIT_TRANSACTION_ENDPOINT.gsub(':token', token)
headers = webpay_headers(commerce_code: commerce_code, api_key: api_key)
body = {
id_query_installments: id_query_installments,
deferred_period_index: deferred_period_index,
grace_period: grace_period
}
resp = http_put(uri_string: url, body: body, headers: headers)
body = JSON.parse(resp.body)
return ::Transbank::TransaccionCompleta::TransactionCommitResponse.new(body) if resp.kind_of? Net::HTTPSuccess
raise Errors::TransactionCommitError.new(body['error_message'], resp.code)
end
def status(token:, options: nil)
api_key = options&.api_key || default_integration_params[:api_key]
commerce_code = options&.commerce_code || default_integration_params[:commerce_code]
integration_type = options&.integration_type || default_integration_params[:integration_type]
base_url = integration_type.nil? ? TransaccionCompleta::Base::integration_type[:TEST] : TransaccionCompleta::Base.integration_type_url(integration_type)
url = base_url + TRANSACTION_STATUS_ENDPOINT.gsub(':token', token)
headers = webpay_headers(commerce_code: commerce_code, api_key: api_key)
resp = http_get(uri_string: url, headers: headers)
body = JSON.parse(resp.body)
return ::Transbank::TransaccionCompleta::TransactionStatusResponse.new(body) if resp.kind_of? Net::HTTPSuccess
raise Errors::TransactionStatusError.new(body['error_message'], resp.code)
end
def refund(token:, amount:, options:nil)
api_key = options&.api_key || default_integration_params[:api_key]
commerce_code = options&.commerce_code || default_integration_params[:commerce_code]
integration_type = options&.integration_type || default_integration_params[:integration_type]
base_url = integration_type.nil? ? TransaccionCompleta::Base::integration_type[:TEST] : TransaccionCompleta::Base.integration_type_url(integration_type)
body = {
amount: amount
}
url = base_url + REFUND_TRANSACTION_ENDPOINT.gsub(':token', token)
headers = webpay_headers(commerce_code: commerce_code, api_key: api_key)
resp = http_post(uri_string: url, body: body, headers: headers, camel_case_keys: false)
body = JSON.parse(resp.body)
return ::Transbank::TransaccionCompleta::TransactionRefundResponse.new(body) if resp.kind_of? Net::HTTPSuccess
raise Errors::TransactionRefundError.new(body['error_message'], resp.code)
end
def default_integration_params
{
api_key: TransaccionCompleta::Base::DEFAULT_API_KEY,
commerce_code: TransaccionCompleta::Base::DEFAULT_COMMERCE_CODE,
integration_type: TransaccionCompleta::Base::integration_type,
base_url: TransaccionCompleta::Base::current_integration_type_url
}
end
end
end
end
end
| 56.355932
| 162
| 0.709774
|
e8be71e627252c815899100658770e8db62786a5
| 27,370
|
require 'spec_helper'
require 'actions/services/service_instance_update'
module VCAP::CloudController
RSpec.describe ServiceInstanceUpdate do
let(:services_event_repo) do
instance_double(Repositories::ServiceEventRepository, record_service_instance_event: nil, user_audit_info: user_audit_info)
end
let(:user_audit_info) { instance_double(UserAuditInfo) }
let(:service_instance_update) do
ServiceInstanceUpdate.new(
accepts_incomplete: false,
services_event_repository: services_event_repo
)
end
let(:service_broker) { ServiceBroker.make }
let(:allow_context_updates) { false }
let(:service) { Service.make(plan_updateable: true, service_broker: service_broker, allow_context_updates: allow_context_updates) }
let(:old_service_plan) { ServicePlan.make(:v2, service: service) }
let(:new_plan_maintenance_info) {}
let(:new_service_plan) { ServicePlan.make(:v2, service: service, maintenance_info: new_plan_maintenance_info) }
let(:service_instance) { ManagedServiceInstance.make(service_plan: old_service_plan,
maintenance_info: old_service_plan.maintenance_info,
tags: [],
name: 'Old name')
}
let(:updated_name) { 'New name' }
let(:updated_parameters) { { 'thing1' => 'thing2' } }
let(:updated_tags) { ['tag1', 'tag2'] }
let(:request_attrs) {
{
'parameters' => updated_parameters,
'name' => updated_name,
'tags' => updated_tags,
'service_plan_guid' => new_service_plan.guid,
'maintenance_info' => { 'version' => '1.4.5a' },
}
}
describe 'updating multiple attributes' do
before do
stub_update service_instance
end
it 'can update all the attributes at the same time' do
service_instance_update.update_service_instance(service_instance, request_attrs)
service_instance.reload
expect(service_instance.name).to eq(updated_name)
expect(service_instance.tags).to eq(updated_tags)
expect(service_instance.service_plan.guid).to eq(new_service_plan.guid)
expect(service_instance.maintenance_info).to eq(new_service_plan.maintenance_info)
expect(
a_request(:patch, update_url(service_instance)).with do |req|
expect(JSON.parse(req.body)).to include({
'parameters' => updated_parameters,
'plan_id' => new_service_plan.broker_provided_id,
'previous_values' => {
'plan_id' => old_service_plan.broker_provided_id,
'service_id' => service_instance.service.broker_provided_id,
'organization_id' => service_instance.organization.guid,
'space_id' => service_instance.space.guid,
}
})
end
).to have_been_made.once
end
describe 'failure cases' do
context 'when the update times out' do
before do
stub_update(service_instance, body: lambda { |r|
sleep 10
raise 'Should time out'
})
end
it 'should mark the service instance as failed' do
expect {
Timeout.timeout(0.5.second) do
service_instance_update.update_service_instance(service_instance, { 'parameters' => { 'foo' => 'bar' } })
end
}.to raise_error(Timeout::Error)
service_instance.reload
expect(a_request(:patch, update_url(service_instance))).
to have_been_made.times(1)
expect(service_instance.last_operation.type).to eq('update')
expect(service_instance.last_operation.state).to eq('failed')
end
end
context 'when there is a validation failure' do
it 'unlocks the service instance' do
tags = ['a'] * 2049
expect {
service_instance_update.update_service_instance(service_instance, { 'tags' => tags })
}.to raise_error(Sequel::ValidationFailed, /too_long/)
expect(service_instance.last_operation.state).to eq('failed')
end
end
context 'when the broker returns an error' do
before do
stub_update(service_instance, status: 500)
end
it 'rolls back other changes' do
old_name = service_instance.name
expect {
service_instance_update.update_service_instance(service_instance, request_attrs)
}.to raise_error(VCAP::Services::ServiceBrokers::V2::Errors::ServiceBrokerBadResponse)
service_instance.reload
expect(service_instance.name).to eq(old_name)
expect(service_instance.service_plan.guid).to eq(old_service_plan.guid)
expect(service_instance.tags).to be_empty
end
context 'when an updated field fails validations' do
let(:request_attrs) {
{
'name' => 'name' * 1000,
'tags' => ['new', 'tags'],
'service_plan_guid' => new_service_plan.guid
}
}
it 'rolls back changes' do
old_tags = service_instance.tags
old_name = service_instance.name
expect {
service_instance_update.update_service_instance(service_instance, request_attrs)
}.to raise_error(Sequel::ValidationFailed, /max_length/)
service_instance.reload
expect(service_instance.name).to eq(old_name)
expect(service_instance.tags).to eq(old_tags)
expect(service_instance.service_plan.guid).to eq(old_service_plan.guid)
end
it 'does not update the broker' do
expect {
service_instance_update.update_service_instance(service_instance, request_attrs)
}.to raise_error(Sequel::ValidationFailed, /max_length/)
expect(
a_request(:patch, update_url(service_instance)).with(
body: hash_including({
'plan_id' => new_service_plan.broker_provided_id
})
)
).not_to have_been_made.once
end
end
end
end
end
describe 'passing in a single attribute to update' do
before do
stub_update service_instance
end
context 'arbitrary params are the only change' do
let(:request_attrs) { { 'parameters' => updated_parameters } }
let(:old_maintenance_info) { { 'version' => '5.0.0' } }
let(:old_service_plan) { ServicePlan.make(:v2, service: service, maintenance_info: old_maintenance_info) }
it 'sends a request to the broker updating only parameters' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(
a_request(:patch, update_url(service_instance)).with do |req|
parsed_body = JSON.parse(req.body)
expect(parsed_body).to include({
'parameters' => updated_parameters,
'plan_id' => old_service_plan.broker_provided_id,
'previous_values' => {
'plan_id' => old_service_plan.broker_provided_id,
'service_id' => service_instance.service.broker_provided_id,
'organization_id' => service_instance.organization.guid,
'space_id' => service_instance.space.guid,
'maintenance_info' => old_maintenance_info,
}
})
expect(parsed_body).not_to include('maintenance_info')
end
).to have_been_made.once
end
it 'updates only last operation on service_instance and keeps all other attributes same' do
original_service_instance = service_instance.to_hash
service_instance_update.update_service_instance(service_instance, request_attrs)
updated_service_instance = service_instance.reload.to_hash
expect(original_service_instance['last_operation']).to be_nil
expect(updated_service_instance['last_operation']).to include('type' => 'update', 'state' => 'succeeded')
expect(updated_service_instance.except('last_operation')).
to eq(original_service_instance.except('last_operation'))
end
end
context 'plan is the only attr passed in' do
context "but didn't change" do
let(:request_attrs) { { 'service_plan_guid' => old_service_plan.guid } }
it 'should not update the broker' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(
a_request(:patch, update_url(service_instance)).with(
body: hash_including({
'plan_id' => old_service_plan.broker_provided_id,
'previous_values' => {
'plan_id' => old_service_plan.broker_provided_id,
'service_id' => service_instance.service.broker_provided_id,
'organization_id' => service_instance.organization.guid,
'space_id' => service_instance.space.guid
}
})
)
).to_not have_been_made
end
end
context 'and changed' do
let(:request_attrs) {
{
'service_plan_guid' => new_service_plan.guid
}
}
it 'should update the broker' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(
a_request(:patch, update_url(service_instance)).with(
body: hash_including({
'plan_id' => new_service_plan.broker_provided_id,
'previous_values' => {
'plan_id' => old_service_plan.broker_provided_id,
'service_id' => service_instance.service.broker_provided_id,
'organization_id' => service_instance.organization.guid,
'space_id' => service_instance.space.guid
}
})
)
).to have_been_made.once
expect(service_instance.service_plan).to eq(new_service_plan)
end
context 'new plan has maintenance_info' do
let(:new_plan_maintenance_info) { { 'version' => '1.0', 'extra' => 'something' } }
let(:maintenance_info_without_extra) { { 'version' => '1.0' } }
it 'should update the service instance maintenance_info to its new plan maintenance_info' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(service_instance.reload.maintenance_info).to eq(maintenance_info_without_extra)
end
end
context 'new plan does not have maintenance_info' do
let(:new_plan_maintenance_info) {}
it 'should reset the service instance maintenance_info to nil' do
service_instance.maintenance_info = { 'version' => '0.1' }
service_instance.save
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(service_instance.reload.maintenance_info).to eq(nil)
end
end
end
end
context 'name is the only change' do
let(:request_attrs) { { 'name' => updated_name } }
context 'allow_context_updates is enabled for service' do
let(:allow_context_updates) { true }
it 'sends a request to the broker' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(
a_request(:patch, update_url(service_instance)).with(
body: hash_including({
'plan_id' => old_service_plan.broker_provided_id,
'previous_values' => {
'plan_id' => old_service_plan.broker_provided_id,
'service_id' => service_instance.service.broker_provided_id,
'organization_id' => service_instance.organization.guid,
'space_id' => service_instance.space.guid
}
})
)
).to have_been_made.once
end
end
context 'allow_context_updates is disabled for service' do
let(:allow_context_updates) { false }
it 'does not send a request to the broker' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(
a_request(:patch, update_url(service_instance))).not_to have_been_made
end
end
end
end
describe 'updating dashboard urls' do
let(:broker_body) {}
let(:stub_opts) { { status: 202, body: broker_body.to_json } }
before do
stub_update(service_instance, stub_opts)
end
context 'when the service instance already has a dashboard url' do
before do
service_instance.dashboard_url = 'http://previous-dashboard-url.com'
service_instance.save
end
context 'and when there is a new dashboard url on update' do
let(:broker_body) { { operation: '123', dashboard_url: 'http://new-dashboard-url.com' } }
it 'updates the service instance model with the new url' do
service_instance_update.update_service_instance(service_instance, request_attrs)
service_instance.reload
expect(service_instance.dashboard_url).to eq 'http://new-dashboard-url.com'
end
end
context 'when there is no dashboard url on update' do
let(:broker_body) { { operation: '123' } }
it 'displays the previous dashboard url' do
service_instance_update.update_service_instance(service_instance, request_attrs)
service_instance.reload
expect(service_instance.dashboard_url).to eq 'http://previous-dashboard-url.com'
end
end
context 'when the dashboard url is present' do
let(:broker_body) { { operation: '123', dashboard_url: '' } }
it 'updates the service instace model with its value' do
service_instance_update.update_service_instance(service_instance, request_attrs)
service_instance.reload
expect(service_instance.dashboard_url).to eq ''
end
end
end
context 'when the service instance does not already have a dashboard url' do
context 'when there is a new dashboard url on update' do
let(:broker_body) { { operation: '123', dashboard_url: 'http://new-dashboard-url.com' } }
it 'updates the service instance model with the new url' do
service_instance_update.update_service_instance(service_instance, request_attrs)
service_instance.reload
expect(service_instance.dashboard_url).to eq 'http://new-dashboard-url.com'
end
end
context 'when there is no dashboard url on update' do
let(:broker_body) { { operation: '123' } }
it 'does not display a url' do
service_instance_update.update_service_instance(service_instance, request_attrs)
service_instance.reload
expect(service_instance.dashboard_url).to be_nil
end
end
end
context 'when the dashboard url is not a string' do
let(:broker_body) { { operation: '123', dashboard_url: {} } }
it 'fails to update the service instance' do
expect {
service_instance_update.update_service_instance(service_instance, request_attrs)
}.to raise_error(VCAP::Services::ServiceBrokers::V2::Errors::ServiceBrokerResponseMalformed,
%r{The property '#/dashboard_url' .* did not match one or more of the following types: string, null})
service_instance.reload
expect(service_instance.dashboard_url).to eq nil
end
end
end
describe 'updating maintenance_info' do
let(:new_maintenance_info) {
{
'version' => '2.0',
}
}
let(:old_maintenance_info) {
{
'version' => '1.0',
}
}
let(:request_attrs) {
{
'maintenance_info' => new_maintenance_info,
}
}
let(:broker_body) { {} }
let(:stub_opts) { { status: 200, body: broker_body.to_json } }
let(:service_instance) { ManagedServiceInstance.make(maintenance_info: old_maintenance_info) }
before do
stub_update(service_instance, stub_opts)
end
it 'sends maintenance_info to the broker' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(
a_request(:patch, update_url(service_instance)).with do |req|
expect(JSON.parse(req.body)).to include({
'maintenance_info' => new_maintenance_info,
'previous_values' => {
'plan_id' => service_instance.service_plan.broker_provided_id,
'service_id' => service_instance.service.broker_provided_id,
'organization_id' => service_instance.organization.guid,
'space_id' => service_instance.space.guid,
'maintenance_info' => old_maintenance_info,
}
})
end
).to have_been_made.once
end
context 'previous values when maintenance_info is nil' do
let(:old_maintenance_info) { nil }
it 'does not include it in the previous_values' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(
a_request(:patch, update_url(service_instance)).with do |req|
expect(JSON.parse(req.body)).not_to include({
'previous_values' => have_key('maintenance_info'),
})
end
).to have_been_made.once
end
end
context 'when the maintenance_info has extra fields' do
let(:new_maintenance_info) {
{
'version' => '2.0',
'extra' => 'some extra information',
}
}
let(:maintenance_info_without_extra) { { 'version' => '2.0' } }
let(:old_maintenance_info) {
{
'version' => '1.0',
'extra' => 'some extra information',
}
}
let(:old_maintenance_info_without_extra) { { 'version' => '1.0' } }
it 'sends maintenance_info to the broker without the extra fields' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(
a_request(:patch, update_url(service_instance)).with do |req|
expect(JSON.parse(req.body)).to include({
'maintenance_info' => maintenance_info_without_extra,
'previous_values' => include(
'maintenance_info' => old_maintenance_info_without_extra,
),
})
end
).to have_been_made.once
end
context 'when the broker responds synchronously' do
it 'updates the service instance maintenance_info in the model' do
service_instance_update.update_service_instance(service_instance, request_attrs)
service_instance.reload
expect(service_instance.maintenance_info).to eq(maintenance_info_without_extra)
end
end
context 'when the broker responds asynchronously' do
let(:service_instance_update) do
ServiceInstanceUpdate.new(
accepts_incomplete: true,
services_event_repository: services_event_repo
)
end
before do
stub_update(service_instance, accepts_incomplete: true, status: 202)
end
it 'saves the new maintenance_info as a proposed change' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(service_instance.last_operation.proposed_changes).to include({ maintenance_info: maintenance_info_without_extra })
end
end
end
context 'when the maintenance_info.version provided is the same as the one on the service instance' do
let(:service_instance) { ManagedServiceInstance.make(maintenance_info: new_maintenance_info.merge({ 'description': 'some description' })) }
it 'does NOT make a call to the broker' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(
a_request(:patch, update_url(service_instance))).not_to have_been_made
end
end
context 'when maintenance_info.version provided and does not exist on service_instance' do
let(:service_instance) { ManagedServiceInstance.make(maintenance_info: nil) }
it 'updates the broker with new maintenance_info' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(
a_request(:patch, update_url(service_instance)).with do |req|
expect(JSON.parse(req.body)).to include({
'maintenance_info' => new_maintenance_info,
})
end
).to have_been_made.once
end
end
context 'when the broker responds synchronously' do
it 'updates the service instance maintenance_info in the model' do
service_instance_update.update_service_instance(service_instance, request_attrs)
service_instance.reload
expect(service_instance.maintenance_info).to eq(new_maintenance_info)
end
context 'when the broker returns an error' do
before do
stub_update(service_instance, status: 418)
end
it 'keeps the old maintenance_info' do
expect {
service_instance_update.update_service_instance(service_instance, request_attrs)
}.to raise_error(VCAP::Services::ServiceBrokers::V2::Errors::ServiceBrokerRequestRejected)
service_instance.reload
expect(service_instance.maintenance_info).to eq(old_maintenance_info)
end
end
end
context 'when the broker responds asynchronously' do
let(:service_instance_update) do
ServiceInstanceUpdate.new(
accepts_incomplete: true,
services_event_repository: services_event_repo
)
end
before do
stub_update(service_instance, accepts_incomplete: true, status: 202)
end
it 'keeps the old maintenance_info before the operation is completed' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(service_instance.maintenance_info).to eq(old_maintenance_info)
end
it 'saves the new maintenance_info as a proposed change' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(service_instance.last_operation.proposed_changes).to include({ maintenance_info: new_maintenance_info })
end
context 'when maintenance_info is present both in the request and from the new plan' do
let(:new_plan_maintenance_info) { { 'version' => '1.0' } }
let(:request_attrs) {
{
'service_plan_guid' => new_service_plan.guid,
'maintenance_info' => new_maintenance_info,
}
}
it 'uses the maintenance_info from the new service plan' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(service_instance.last_operation.proposed_changes).to include({ maintenance_info: new_plan_maintenance_info })
end
end
context 'when maintenance_info is missing from the request body' do
let(:request_attrs) { { 'service_plan_guid' => new_service_plan.guid } }
context 'but the new plan has a maintenance_info' do
let(:new_plan_maintenance_info) { { 'version' => '1.0', 'extra' => 'some extra' } }
let(:maintenance_info_without_extra) { { 'version' => '1.0' } }
it 'saves the new plan maintenance_info as a proposed change' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(service_instance.last_operation.proposed_changes).to include({ maintenance_info: maintenance_info_without_extra })
end
it 'sends the new plan maintenance_info to the broker' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(
a_request(:patch, update_url(service_instance, accepts_incomplete: true)).with do |req|
expect(JSON.parse(req.body)).to include({
'maintenance_info' => maintenance_info_without_extra,
'previous_values' => {
'plan_id' => service_instance.service_plan.broker_provided_id,
'service_id' => service_instance.service.broker_provided_id,
'organization_id' => service_instance.organization.guid,
'space_id' => service_instance.space.guid,
'maintenance_info' => maintenance_info_without_extra,
}
})
end
).to have_been_made.once
end
end
end
context 'when maintenance_info is missing from the body and no plan changed' do
let(:request_attrs) {
{
'parameters' => updated_parameters,
'name' => updated_name,
'tags' => updated_tags,
}
}
it 'remains unchanged in the model' do
service_instance_update.update_service_instance(service_instance, request_attrs)
expect(service_instance.reload.maintenance_info).to eq(old_maintenance_info)
end
end
end
end
context 'when accepts_incomplete is true' do
let(:service_instance_update) do
ServiceInstanceUpdate.new(
accepts_incomplete: true,
services_event_repository: services_event_repo
)
end
context 'when the broker responds asynchronously' do
before do
stub_update(service_instance, accepts_incomplete: true, status: 202)
end
it 'creates audit log event start_update' do
expect(services_event_repo).to receive(:record_service_instance_event).with(:start_update, service_instance, request_attrs).once
service_instance_update.update_service_instance(service_instance, request_attrs)
end
end
end
end
end
| 38.822695
| 147
| 0.618816
|
d544bf7b6b8c6134477e06ce131ac9c252afe4d0
| 270
|
module VagrantPlugins
module GuestFreeBSD
class Config < Vagrant::Config::Base
attr_accessor :halt_timeout
attr_accessor :halt_check_interval
def initialize
@halt_timeout = 30
@halt_check_interval = 1
end
end
end
end
| 19.285714
| 40
| 0.674074
|
4a228d5e3de84521fdc6af2b56f6c0b90e18f4d2
| 687
|
require 'spec_helper'
describe 'cis_hardening::auth::su' do
on_supported_os.each do |os, os_facts|
context "on #{os}" do
let(:facts) { os_facts }
# Check for default class
it {
is_expected.to contain_class('cis_hardening::auth::su')
}
# Ensure that Ensure access to the su command is restricted - Section 5.6
it {
is_expected.to contain_file_line('su_setting').with(
'path' => '/etc/pam.d/su',
'line' => 'auth required pam_wheel.so use_uid',
)
}
# Ensure manifest compiles with all dependencies
it {
is_expected.to compile.with_all_deps
}
end
end
end
| 24.535714
| 79
| 0.595342
|
abbb87dd48ff58006245ce2a036358a58513eaf9
| 491
|
Pod::Spec.new do |s|
s.name = "AeroGearHttp"
s.version = "0.2.0"
s.summary = "Lightweight lib around NSURLSession to ease HTTP calls."
s.homepage = "https://github.com/aerogear/aerogear-ios-http"
s.license = 'Apache License, Version 2.0'
s.author = "Red Hat, Inc."
s.source = { :git => 'https://github.com/aerogear/aerogear-ios-http.git', :branch => 'master'}
s.platform = :ios, 8.0
s.source_files = 'AeroGearHttp/*.{swift}'
end
| 40.916667
| 103
| 0.604888
|
261a514244d3d54e64a20b252f442517851ef76b
| 4,387
|
require 'jekyll/document'
require 'fileutils'
module Jekyll
module Webp
#
# A static file to hold the generated webp image after generation
# so that Jekyll will copy it into the site output directory
class WebpFile < StaticFile
def write(dest)
true # Recover from strange exception when starting server without --auto
end
end #class WebpFile
class WebpGenerator < Generator
# This generator is safe from arbitrary code execution.
safe true
# This generator should be passive with regard to its execution
priority :lowest
# Generate paginated pages if necessary (Default entry point)
# site - The Site.
#
# Returns nothing.
def generate(site)
# Retrieve and merge the configuration from the site yml file
@config = DEFAULT.merge(site.config['webp'] || {})
# If disabled then simply quit
if !@config['enabled']
Jekyll.logger.info "WebP:","Disabled in site.config."
return
end
Jekyll.logger.debug "WebP:","Starting"
# If the site destination directory has not yet been created then create it now. Otherwise, we cannot write our file there.
Dir::mkdir(site.dest) if !File.directory? site.dest
# If nesting is enabled, get all the nested directories too
if @config['nested']
newdir = []
for imgdir in @config['img_dir']
# Get every directory below (and including) imgdir, recursively
newdir.concat(Dir.glob(imgdir + "/**/"))
end
@config['img_dir'] = newdir
end
# Counting the number of files generated
file_count = 0
# Iterate through every image in each of the image folders and create a webp image
# if one has not been created already for that image.
for imgdir in @config['img_dir']
imgdir_source = File.join(site.source, imgdir)
imgdir_destination = File.join(site.dest, imgdir)
FileUtils::mkdir_p(imgdir_destination)
Jekyll.logger.info "WebP:","Processing #{imgdir_source}"
# handle only jpg, jpeg, png and gif
for imgfile in Dir[imgdir_source + "**/*.*"]
imgfile_relative_path = File.dirname(imgfile.sub(imgdir_source, ""))
# Skip empty stuff
file_ext = File.extname(imgfile).downcase
# If the file is not one of the supported formats, exit early
next if !@config['formats'].include? file_ext
# TODO: Do an exclude check
# Create the output file path
file_noext = File.basename(imgfile, file_ext)
outfile_filename = file_noext+ ".webp"
FileUtils::mkdir_p(imgdir_destination + imgfile_relative_path)
outfile_fullpath_webp = File.join(imgdir_destination + imgfile_relative_path, outfile_filename)
# Check if the file already has a webp alternative?
# If we're force rebuilding all webp files then ignore the check
# also check the modified time on the files to ensure that the webp file
# is newer than the source file, if not then regenerate
if @config['regenerate'] || !File.file?(outfile_fullpath_webp) ||
File.mtime(outfile_fullpath_webp) <= File.mtime(imgfile)
Jekyll.logger.info "WebP:", "Change to source image file #{imgfile} detected, regenerating WebP"
# Generate the file
WebpExec.run(@config['quality'], @config['flags'], imgfile, outfile_fullpath_webp)
file_count += 1
end
if File.file?(outfile_fullpath_webp)
# Keep the webp file from being cleaned by Jekyll
site.static_files << WebpFile.new(site,
site.dest,
File.join(imgdir, imgfile_relative_path),
outfile_filename)
end
end # dir.foreach
end # img_dir
Jekyll.logger.info "WebP:","Generator Complete: #{file_count} file(s) generated"
end #function generate
end #class WebPGenerator
end #module Webp
end #module Jekyll
| 39.169643
| 131
| 0.596991
|
f85c4f6e857cc58ea9f9633f6b2d939270c08ef1
| 2,227
|
require 'active_model'
module Wiki
class Attachment
include ActiveModel::AttributeMethods
include ActiveModel::Conversion
include ActiveModel::Dirty
extend ActiveModel::Naming
define_attribute_methods :name, :content
attr_accessor :wiki, :name, :content, :path, :format
alias :id :path
def name=(value)
name_will_change!
@name=value unless value.nil?
end
def content=(value)
content_will_change!
@content=value unless value.nil?
end
def to_s
self.path
end
def path
@path
end
def update(hash)
self.name = hash[:name]
self.content = hash[:content]
return true
end
def initialize(params={})
wiki = params[:wiki]
gollum_file = params[:gollum_file]
path = params[:path]
if wiki
@wiki = wiki
end
if gollum_file
self.gollum_file = gollum_file
elsif path
safepath = path.downcase.gsub(' ','-')
@path = safepath
new_file = @wiki.find_gollum_file(safepath)
if new_file
self.gollum_file = new_file
end
end
end
def gollum_file=(file)
@gollum_file = file
@name = file.name
@format = File.extname(file.name).split('.').last.to_sym
# filename = ::Attachment.basename(fullname, ext)
@path = file.url_path + file.name
end
def gollum_file
@gollum_file
end
def filesystem_path
return File.join(@wiki.base_path, @path)
end
def mime_type
@gollum_file.mime_type
end
def to_param
@path
end
def parent_path
parent_path = File.dirname(@path)
(parent_path == '.') ? '' : parent_path + '/'
end
def destroy!(user)
commit_options = {name: user.name, email: user.email, message: "removed #{@path}"}
Rails.logger.debug("removing #{@path}")
committer = Gollum::Committer.new(@wiki.gollum_wiki, commit_options)
committer.delete(@path)
committer.after_commit do |index, _sha|
dir = File.dirname(@path)
dir = '' if dir == '.'
@wiki.gollum_wiki.clear_cache
committer.update_working_dir(dir, parent_path, @format)
end
@wiki.pull_repo
committer.commit
@wiki.push_repo
end
def persisted?
# TODO: make this a real dirty flag
true
end
end
end
| 19.034188
| 86
| 0.652896
|
5d35139f2ce18ddaec737fc4a7d5376174efe307
| 1,166
|
require "language/node"
class ContentfulCli < Formula
desc "Contentful command-line tools"
homepage "https://github.com/contentful/contentful-cli"
url "https://registry.npmjs.org/contentful-cli/-/contentful-cli-1.4.10.tgz"
sha256 "fe7a3787e68fb33b349bb721ff9fb82a6ae370567355455e8618d666fb6d2a56"
head "https://github.com/contentful/contentful-cli.git"
bottle do
cellar :any_skip_relocation
sha256 "77a0ace140706ad9a0613c585d0b419c3170232e1e4719891ca49b36cab5ba91" => :catalina
sha256 "8ac68f7a2c3fb13f0127021c97c448e7a8408da9a6484d250b060f079aa90f35" => :mojave
sha256 "11b0ac1f6bcaaf4d175ab94e987d089164cd73b7779afc86f208182831d56cb0" => :high_sierra
end
depends_on "node"
def install
system "npm", "install", *Language::Node.std_npm_install_args(libexec)
bin.install_symlink Dir["#{libexec}/bin/*"]
end
test do
output = shell_output("#{bin}/contentful space list 2>&1", 1)
assert_match "🚨 Error: You have to be logged in to do this.", output
assert_match "You can log in via contentful login", output
assert_match "Or provide a managementToken via --management-Token argument", output
end
end
| 37.612903
| 93
| 0.767581
|
617296748f43554748b73f4cfbf9e5f808485f16
| 1,866
|
module Fog
module AWS
class ELBV2
class Real
require 'fog/aws/parsers/elbv2/set_ip_address_type'
IP_ADDRESS_TYPES = ['ipv4', 'dualstack']
# Sets the type of IP addresses used by the subnets of the specified Application Load Balancer.
#
# ==== Parameters
# * lb_id<~String> - The Amazon Resource Name (ARN) of the load balancer.
# * ip_address_type<~String> - The IP address type. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use ipv4.
#
# ==== Returns
# * response<~Excon::Response>:
# * body<~Hash>:
# * 'ResponseMetadata'<~Hash>:
# * 'RequestId'<~String> - Id of request
# * 'SetIpAddressTypeResult'<~Hash>:
# * 'IpAddressType'<~String> - The IP address type.
def set_ip_address_type(lb_id, type)
request({
'Action' => 'SetIpAddressType',
'LoadBalancerArn' => lb_id,
'IpAddressType' => type,
:parser => Fog::Parsers::AWS::ELBV2::SetIpAddressType.new
})
end
end
class Mock
def set_ip_address_type(lb_id, type)
response = Excon::Response.new
response.status = 200
load_balancer = self.data[:load_balancers][lb_id]
raise Fog::AWS::ELBV2::NotFound unless load_balancer
load_balancer['IpAddressType'] = type
response.body = {
'ResponseMetadata' => {
'RequestId' => Fog::AWS::Mock.request_id
},
'SetIpAddressTypeResult' => {
'IpAddressType' => type
}
}
response
end
end
end
end
end
| 33.321429
| 193
| 0.533226
|
bbad7255ef0b53f277298d156da5c7d6f76e468d
| 4,293
|
# encoding: utf-8
require 'spec_helper'
describe Rubocop::Cop::Style::EmptyLinesAroundBody do
subject(:cop) { described_class.new }
it 'registers an offense for method body starting with a blank' do
inspect_source(cop,
['def some_method',
'',
' do_something',
'end'])
expect(cop.offenses.size).to eq(1)
end
# The cop only registers an offense if the extra line is completely emtpy. If
# there is trailing whitespace, then that must be dealt with first. Having
# two cops registering offense for the line with only spaces would cause
# havoc in auto-correction.
it 'accepts method body starting with a line with spaces' do
inspect_source(cop,
['def some_method',
' ',
' do_something',
'end'])
expect(cop.offenses).to be_empty
end
it 'autocorrects method body starting with a blank' do
corrected = autocorrect_source(cop,
['def some_method',
'',
' do_something',
'end'])
expect(corrected).to eq ['def some_method',
' do_something',
'end'].join("\n")
end
it 'registers an offense for class method body starting with a blank' do
inspect_source(cop,
['def Test.some_method',
'',
' do_something',
'end'])
expect(cop.offenses.size).to eq(1)
end
it 'autocorrects class method body starting with a blank' do
corrected = autocorrect_source(cop,
['def Test.some_method',
'',
' do_something',
'end'])
expect(corrected).to eq ['def Test.some_method',
' do_something',
'end'].join("\n")
end
it 'registers an offense for method body ending with a blank' do
inspect_source(cop,
['def some_method',
' do_something',
'',
'end'])
expect(cop.offenses.size).to eq(1)
end
it 'registers an offense for class method body ending with a blank' do
inspect_source(cop,
['def Test.some_method',
' do_something',
'',
'end'])
expect(cop.offenses.size).to eq(1)
end
it 'registers an offense for class body starting with a blank' do
inspect_source(cop,
['class SomeClass',
'',
' do_something',
'end'])
expect(cop.offenses.size).to eq(1)
end
it 'autocorrects class body containing only a blank' do
corrected = autocorrect_source(cop,
['class SomeClass',
'',
'end'])
expect(corrected).to eq ['class SomeClass',
'end'].join("\n")
end
it 'registers an offense for module body starting with a blank' do
inspect_source(cop,
['module SomeModule',
'',
' do_something',
'end'])
expect(cop.offenses.size).to eq(1)
end
it 'registers an offense for class body ending with a blank' do
inspect_source(cop,
['class SomeClass',
' do_something',
'',
'end'])
expect(cop.offenses.size).to eq(1)
end
it 'registers an offense for module body ending with a blank' do
inspect_source(cop,
['module SomeModule',
' do_something',
'',
'end'])
expect(cop.offenses.size).to eq(1)
end
it 'is not fooled by single line methods' do
inspect_source(cop,
['def some_method; do_something; end',
'',
'something_else'])
expect(cop.offenses).to be_empty
end
end
| 32.522727
| 79
| 0.479152
|
28be6d6b7543f2cfd445b62679388df24877d4dc
| 436
|
class MarkupController < ApplicationController
layout 'markup_reference'
verify :params => [:element_id], :xhr => true, :only => :preview
def preview
respond_to(:js)
end
def reference
@examples = WikiEngine.default_engine.markup_examples
@examples[_('Links')] += [
"A solution to this problem can be found\nin changeset \[712\]",
"This problem is described in Ticket [#3733]",
]
end
end
| 24.222222
| 70
| 0.662844
|
ff21a10f028266edada5948e18968a260d667440
| 1,336
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v2/enums/target_cpa_opt_in_recommendation_goal.proto
require 'google/protobuf'
require 'google/api/annotations_pb'
Google::Protobuf::DescriptorPool.generated_pool.build do
add_file("google/ads/googleads/v2/enums/target_cpa_opt_in_recommendation_goal.proto", :syntax => :proto3) do
add_message "google.ads.googleads.v2.enums.TargetCpaOptInRecommendationGoalEnum" do
end
add_enum "google.ads.googleads.v2.enums.TargetCpaOptInRecommendationGoalEnum.TargetCpaOptInRecommendationGoal" do
value :UNSPECIFIED, 0
value :UNKNOWN, 1
value :SAME_COST, 2
value :SAME_CONVERSIONS, 3
value :SAME_CPA, 4
value :CLOSEST_CPA, 5
end
end
end
module Google
module Ads
module GoogleAds
module V2
module Enums
TargetCpaOptInRecommendationGoalEnum = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v2.enums.TargetCpaOptInRecommendationGoalEnum").msgclass
TargetCpaOptInRecommendationGoalEnum::TargetCpaOptInRecommendationGoal = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v2.enums.TargetCpaOptInRecommendationGoalEnum.TargetCpaOptInRecommendationGoal").enummodule
end
end
end
end
end
| 39.294118
| 253
| 0.775449
|
336c6f5f0bf348d83b7fb5602326b16a24553909
| 2,408
|
class BerkeleyDb < Formula
desc "High performance key/value database"
homepage "https://www.oracle.com/technology/products/berkeley-db/index.html"
url "http://download.oracle.com/berkeley-db/db-6.2.23.tar.gz"
sha256 "47612c8991aa9ac2f6be721267c8d3cdccf5ac83105df8e50809daea24e95dc7"
bottle do
cellar :any
sha256 "3c2ad512ec0ecb113c966f9a7cfb06e100cb36e9a9b1698808f31b6c43f37ab6" => :sierra
sha256 "bb75788493c5a0c8bdb5225b571864f82601d3d4974ae38e5ce7e239f9fb24e3" => :el_capitan
sha256 "f194651ba24b94d97ff43629b05d601892f1d7ab87a32184c0e848f1dffaacde" => :yosemite
end
depends_on :java => [:optional, :build]
def install
# BerkeleyDB dislikes parallel builds
ENV.deparallelize
# --enable-compat185 is necessary because our build shadows
# the system berkeley db 1.x
args = %W[
--disable-debug
--prefix=#{prefix}
--mandir=#{man}
--enable-cxx
--enable-compat185
--enable-sql
--enable-sql_codegen
--enable-dbm
--enable-stl
]
args << "--enable-java" if build.with? "java"
# BerkeleyDB requires you to build everything from the build_unix subdirectory
cd "build_unix" do
system "../dist/configure", *args
system "make", "install"
# use the standard docs location
doc.parent.mkpath
mv prefix/"docs", doc
end
end
test do
(testpath/"test.cpp").write <<-EOS.undent
#include <assert.h>
#include <string.h>
#include <db_cxx.h>
int main() {
Db db(NULL, 0);
assert(db.open(NULL, "test.db", NULL, DB_BTREE, DB_CREATE, 0) == 0);
const char *project = "Homebrew";
const char *stored_description = "The missing package manager for macOS";
Dbt key(const_cast<char *>(project), strlen(project) + 1);
Dbt stored_data(const_cast<char *>(stored_description), strlen(stored_description) + 1);
assert(db.put(NULL, &key, &stored_data, DB_NOOVERWRITE) == 0);
Dbt returned_data;
assert(db.get(NULL, &key, &returned_data, 0) == 0);
assert(strcmp(stored_description, (const char *)(returned_data.get_data())) == 0);
assert(db.close(0) == 0);
}
EOS
flags = %W[
-I#{include}
-L#{lib}
-ldb_cxx
]
system ENV.cxx, "test.cpp", "-o", "test", *flags
system "./test"
assert (testpath/"test.db").exist?
end
end
| 31.272727
| 96
| 0.647841
|
87627437015a9b57b6f43251fc88104963c681ae
| 488
|
module ActionView
module Helpers
module AssetUrlHelper
# Modify ActionView to recognize html files and the '/components' path.
ASSET_EXTENSIONS.merge!({ html: '.html' })
ASSET_PUBLIC_DIRECTORIES.merge!({ html: '/components' })
# Convenience method for html. Based on ActionView's standard
# javascript_path method.
def path_to_html(source, options = {})
path_to_asset(source, { type: :html }.merge!(options))
end
end
end
end
| 30.5
| 77
| 0.670082
|
11470c7f437377c3d0deae761b0412fcd9a675ec
| 933
|
module ServerlessRedirector
class Manifest
Redirect = Struct.new(:path, :url) do
def initialize(h = {})
super h.fetch('path'), h.fetch('url')
end
def serializable_hash(options = {})
{
'path' => path,
'url' => url
}
end
end
# Conditions:
# 1. Everything has a valid path.
# 2. All URLs are valid
attr_reader :redirects
def initialize(contents)
@redirects = contents.to_a.map { |item| Redirect.new item }.freeze
end
def validate
@redirects.each do |item|
end
end
def self.dump_file!(path)
File.open path, 'w+' do |out|
@redirects.each do |redirect|
out.puts JSON.dump(redirect.serializable_hash)
end
end
end
def self.load_file!(path)
contents = File.readlines(path).lazy.map { |line| JSON.parse(line) }
new contents
end
end
end
| 19.4375
| 74
| 0.569132
|
e8a9748313174fd2a9caf89b7f7a92c0e6689cad
| 251
|
require 'test_helper'
module DomainStatus
class StatusControllerTest < ActionController::TestCase
setup do
@routes = Engine.routes
end
test "should get index" do
get :index
assert_response :success
end
end
end
| 15.6875
| 57
| 0.685259
|
eda1c66fd8fbdf2ae33dbb31cfe5ef1e6a6692eb
| 8,456
|
=begin
#NSX-T Manager API
#VMware NSX-T Manager REST API
OpenAPI spec version: 2.3.0.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Swagger Codegen version: 2.3.1
=end
require 'date'
module NSXT
class AddManagementNodeSpec
attr_accessor :mpa_msg_client_info
# must be set to AddManagementNodeSpec
attr_accessor :type
# The password to be used to authenticate with the remote node.
attr_accessor :password
# The username to be used to authenticate with the remote node.
attr_accessor :user_name
# The host address of the remote node to which to send this join request.
attr_accessor :remote_address
# The certificate thumbprint of the remote node.
attr_accessor :cert_thumbprint
class EnumAttributeValidator
attr_reader :datatype
attr_reader :allowable_values
def initialize(datatype, allowable_values)
@allowable_values = allowable_values.map do |value|
case datatype.to_s
when /Integer/i
value.to_i
when /Float/i
value.to_f
else
value
end
end
end
def valid?(value)
!value || allowable_values.include?(value)
end
end
# Attribute mapping from ruby-style variable name to JSON key.
def self.attribute_map
{
:'mpa_msg_client_info' => :'mpa_msg_client_info',
:'type' => :'type',
:'password' => :'password',
:'user_name' => :'user_name',
:'remote_address' => :'remote_address',
:'cert_thumbprint' => :'cert_thumbprint'
}
end
# Attribute type mapping.
def self.swagger_types
{
:'mpa_msg_client_info' => :'MsgClientInfo',
:'type' => :'String',
:'password' => :'String',
:'user_name' => :'String',
:'remote_address' => :'String',
:'cert_thumbprint' => :'String'
}
end
# Initializes the object
# @param [Hash] attributes Model attributes in the form of hash
def initialize(attributes = {})
return unless attributes.is_a?(Hash)
# convert string to symbol for hash key
attributes = attributes.each_with_object({}){|(k,v), h| h[k.to_sym] = v}
if attributes.has_key?(:'mpa_msg_client_info')
self.mpa_msg_client_info = attributes[:'mpa_msg_client_info']
end
if attributes.has_key?(:'type')
self.type = attributes[:'type']
end
if attributes.has_key?(:'password')
self.password = attributes[:'password']
end
if attributes.has_key?(:'user_name')
self.user_name = attributes[:'user_name']
end
if attributes.has_key?(:'remote_address')
self.remote_address = attributes[:'remote_address']
end
if attributes.has_key?(:'cert_thumbprint')
self.cert_thumbprint = attributes[:'cert_thumbprint']
end
end
# Show invalid properties with the reasons. Usually used together with valid?
# @return Array for valid properties with the reasons
def list_invalid_properties
invalid_properties = Array.new
if @type.nil?
invalid_properties.push("invalid value for 'type', type cannot be nil.")
end
if @user_name.nil?
invalid_properties.push("invalid value for 'user_name', user_name cannot be nil.")
end
if @remote_address.nil?
invalid_properties.push("invalid value for 'remote_address', remote_address cannot be nil.")
end
return invalid_properties
end
# Check to see if the all the properties in the model are valid
# @return true if the model is valid
def valid?
return false if @type.nil?
type_validator = EnumAttributeValidator.new('String', ["AddManagementNodeSpec"])
return false unless type_validator.valid?(@type)
return false if @user_name.nil?
return false if @remote_address.nil?
return true
end
# Custom attribute writer method checking allowed values (enum).
# @param [Object] type Object to be assigned
def type=(type)
validator = EnumAttributeValidator.new('String', ["AddManagementNodeSpec"])
unless validator.valid?(type)
fail ArgumentError, "invalid value for 'type', must be one of #{validator.allowable_values}."
end
@type = type
end
# Checks equality by comparing each attribute.
# @param [Object] Object to be compared
def ==(o)
return true if self.equal?(o)
self.class == o.class &&
mpa_msg_client_info == o.mpa_msg_client_info &&
type == o.type &&
password == o.password &&
user_name == o.user_name &&
remote_address == o.remote_address &&
cert_thumbprint == o.cert_thumbprint
end
# @see the `==` method
# @param [Object] Object to be compared
def eql?(o)
self == o
end
# Calculates hash code according to all attributes.
# @return [Fixnum] Hash code
def hash
[mpa_msg_client_info, type, password, user_name, remote_address, cert_thumbprint].hash
end
# Builds the object from hash
# @param [Hash] attributes Model attributes in the form of hash
# @return [Object] Returns the model itself
def build_from_hash(attributes)
return nil unless attributes.is_a?(Hash)
self.class.swagger_types.each_pair do |key, type|
if type =~ /\AArray<(.*)>/i
# check to ensure the input is an array given that the the attribute
# is documented as an array but the input is not
if attributes[self.class.attribute_map[key]].is_a?(Array)
self.send("#{key}=", attributes[self.class.attribute_map[key]].map{ |v| _deserialize($1, v) } )
end
elsif !attributes[self.class.attribute_map[key]].nil?
self.send("#{key}=", _deserialize(type, attributes[self.class.attribute_map[key]]))
end # or else data not found in attributes(hash), not an issue as the data can be optional
end
self
end
# Deserializes the data based on type
# @param string type Data type
# @param string value Value to be deserialized
# @return [Object] Deserialized data
def _deserialize(type, value)
case type.to_sym
when :DateTime
DateTime.parse(value)
when :Date
Date.parse(value)
when :String
value.to_s
when :Integer
value.to_i
when :Float
value.to_f
when :BOOLEAN
if value.to_s =~ /\A(true|t|yes|y|1)\z/i
true
else
false
end
when :Object
# generic object (usually a Hash), return directly
value
when /\AArray<(?<inner_type>.+)>\z/
inner_type = Regexp.last_match[:inner_type]
value.map { |v| _deserialize(inner_type, v) }
when /\AHash<(?<k_type>.+?), (?<v_type>.+)>\z/
k_type = Regexp.last_match[:k_type]
v_type = Regexp.last_match[:v_type]
{}.tap do |hash|
value.each do |k, v|
hash[_deserialize(k_type, k)] = _deserialize(v_type, v)
end
end
else # model
temp_model = NSXT.const_get(type).new
temp_model.build_from_hash(value)
end
end
# Returns the string representation of the object
# @return [String] String presentation of the object
def to_s
to_hash.to_s
end
# to_body is an alias to to_hash (backward compatibility)
# @return [Hash] Returns the object in the form of hash
def to_body
to_hash
end
# Returns the object in the form of hash
# @return [Hash] Returns the object in the form of hash
def to_hash
hash = {}
self.class.attribute_map.each_pair do |attr, param|
value = self.send(attr)
next if value.nil?
hash[param] = _to_hash(value)
end
hash
end
# Outputs non-array value in the form of hash
# For object, use to_hash. Otherwise, just return the value
# @param [Object] value Any valid value
# @return [Hash] Returns the value in the form of hash
def _to_hash(value)
if value.is_a?(Array)
value.compact.map{ |v| _to_hash(v) }
elsif value.is_a?(Hash)
{}.tap do |hash|
value.each { |k, v| hash[k] = _to_hash(v) }
end
elsif value.respond_to? :to_hash
value.to_hash
else
value
end
end
end
end
| 29.463415
| 107
| 0.625
|
9129572acf7740a41c6bda22d4868f1feece8507
| 1,058
|
require 'spec_helper'
describe BitWallet::Transaction, vcr: {record: :once}, bitcoin_cleaner: true do
describe 'on initialization' do
it 'should be able to take a bitcoind hash' do
wallet = build(:wallet)
account = wallet.accounts.new('numba')
address = account.addresses.new('mi3G43CcN')
args = {"account" => 'numba',
"address" => "mi3G43CcN",
"category" => "receive",
"amount" => 1.5,
"confirmations" => 0,
"txid" => "a363e027",
"time" => 1362239334,
"timereceived" => 1362239346}
transaction = described_class.new(wallet, args)
transaction.account.should == account
transaction.address.should == address
transaction.category.should == 'receive'
transaction.amount.should == 1.5
transaction.confirmations.should == 0
transaction.id.should == 'a363e027'
transaction.occurred_at.to_i.should == 1362239334
transaction.received_at.to_i.should == 1362239346
end
end
end
| 34.129032
| 79
| 0.612476
|
1a98543eaff3876b64a3556b7153b37309170eee
| 1,685
|
class Yazpp < Formula
desc "C++ API for the Yaz toolkit"
homepage "https://www.indexdata.com/yazpp"
url "http://ftp.indexdata.dk/pub/yazpp/yazpp-1.6.5.tar.gz"
sha256 "802537484d4247706f31c121df78b29fc2f26126995963102e19ef378f3c39d2"
bottle do
cellar :any
sha256 "fc6c551c54b78b477836368f8f4c24f39bc8324ced4aaed418ed6ebde071c130" => :catalina
sha256 "ad3ae23deb4f16249fbfc8794a30116911a211c76adbc024948cf9b8842a55b4" => :mojave
sha256 "870f730cc4ee76700749f4091d111cb0e9a529d43c1ba7cb40b36807e49d9b76" => :high_sierra
sha256 "794e2e265413005b3c26a0fa38e1ab8957bd1ec13cf4abb63730070181d9beb4" => :sierra
sha256 "292447a86953bb10361130542d2db9e0c0fc410e9be3b13b8c80891fbfaeec20" => :el_capitan
sha256 "6f769c30797af9cb98bf02491706f96b7085eed2d5d05c377e51ca5e0bf8541a" => :yosemite
sha256 "e88036eee1f421fa010db0b524bb4595fd841cdd6958ab021486c09978d8d674" => :x86_64_linux
end
depends_on "yaz"
def install
system "./configure", "--disable-dependency-tracking",
"--prefix=#{prefix}"
system "make", "install"
end
test do
(testpath/"test.cpp").write <<~EOS
#include <iostream>
#include <yazpp/zoom.h>
using namespace ZOOM;
int main(int argc, char **argv){
try
{
connection conn("wrong-example.xyz", 210);
}
catch (exception &e)
{
std::cout << "Exception caught";
}
return 0;
}
EOS
system ENV.cxx, "-std=c++11", "-I#{include}/src", "-L#{lib}",
"-lzoompp", "test.cpp", "-o", "test"
output = shell_output("./test")
assert_match "Exception caught", output
end
end
| 32.403846
| 94
| 0.681306
|
21821cd212740826d39a8d324524669e3faaffef
| 140
|
require File.expand_path('../../prepare', __FILE__)
client = Hessian2::Client.new('http://127.0.0.1:8080/')
puts client.undefined_method
| 23.333333
| 55
| 0.714286
|
4a699561762f0f75d28f306434a7672c4fe80ae8
| 487
|
module Spree
module Api
module V2
module Platform
class CmsSectionSerializer < BaseSerializer
include ResourceSerializerConcern
belongs_to :cms_page, serializer: :cms_page
belongs_to :linked_resource, polymorphic: {
Spree::Cms::Pages::StandardPage => :cms_page,
Spree::Cms::Pages::FeaturePage => :cms_page,
Spree::Cms::Pages::Homepage => :cms_page
}
end
end
end
end
end
| 25.631579
| 57
| 0.603696
|
1cb5350077803ef6c78b0caf3acfd9989b0f8ec7
| 1,254
|
#Copyright (c) 2014 silva(http://silva.vc/)
#Released under the MIT license
#http://opensource.org/licenses/mit-license.php
require 'json'
require './twitty/simple_o_auth'
require './twitty/tweet'
require './twitty/tweeter'
class Twitty
attr_accessor :access_token, :access_token_secret, :consumer_key, :consumer_secret, :simple_oauth
def initialize
yield(self) if block_given?
@simple_oauth = SimpleOAuth.new(consumer_key, consumer_secret, access_token, access_token_secret)
Tweet.twitty = self
Tweeter.twitty = self
end
def reply(tweet, tweet_id, query = {})
Tweet.find(tweet_id).reply(tweet, query)
end
def public_reply
Tweet.find(tweet_id).public_reply(tweet, query)
end
def twint(tweet, query = {})
query[:in_reply_to_status_id] = query[:to] if query.has_key? :to
@simple_oauth.post('https://api.twitter.com/1.1/statuses/update.json' + SimpleOAuth.query_to_s(query), status: tweet)
end
def timeline(query = {})
query[:count] = query[:limit] if query.has_key? :limit
response = @simple_oauth.get('https://api.twitter.com/1.1/statuses/home_timeline.json' + SimpleOAuth.query_to_s(query))
JSON.parse(response.body).collect do |status|
Tweet.new(status)
end
end
end
| 28.5
| 123
| 0.720893
|
014a6d65344d372142135f4c70344339b02b271f
| 137
|
# Be sure to restart your server when you modify this file.
Rails.application.config.session_store :cookie_store, key: '_Mappr_session'
| 34.25
| 75
| 0.80292
|
013abe5db5a984767c1475b5e4900853833b5d37
| 41
|
module Incognito
VERSION = "0.1.3"
end
| 10.25
| 19
| 0.682927
|
08ab6a9d1e7460496ae10000246336dce71711d8
| 8,709
|
require "rails_helper"
RSpec.describe CaseContactsController, type: :controller do
let(:organization) { build(:casa_org) }
let(:volunteer) { create(:volunteer, :with_casa_cases, casa_org: organization) }
let(:admin) { create(:casa_admin) }
let(:supervisor) { create(:supervisor) }
let(:case_id) { volunteer.casa_cases.first.id }
let(:params) { {case_contact: {casa_case_id: case_id}} }
let!(:contact_type_group_one) do
create(:contact_type_group, casa_org: organization).tap do |group|
create(:contact_type, contact_type_group: group, name: "Attorney")
end
end
let!(:contact_type_group_two) do
create(:contact_type_group, casa_org: organization).tap do |group|
create(:contact_type, contact_type_group: group, name: "Therapist")
end
end
before do
travel_to Date.new(2021, 1, 1)
allow(controller).to receive(:authenticate_user!).and_return(true)
allow(controller).to receive(:current_user).and_return(volunteer)
end
describe "GET new" do
context "when the case has specific contact types assigned" do
before do
casa_case = volunteer.casa_cases.first
casa_case.contact_types = contact_type_group_one.contact_types
casa_case.save
end
it "only assigns that contact types groups to @current_organization_groups" do
get :new, params: params
expect(assigns(:current_organization_groups)).to eq([contact_type_group_one])
end
end
context "when the case does not have specific contact types assigned" do
it "assigns all the organizations contact type groups to @current_organization_groups" do
get :new, params: params
expect(assigns(:current_organization_groups)).to eq([contact_type_group_one, contact_type_group_two])
end
it "calls contact_types_alphabetically" do
allow(controller).to receive(:current_organization).and_return(organization)
allow(organization).to receive_message_chain(
:contact_type_groups,
:joins,
:where,
:alphabetically,
:uniq
)
expect(organization).to receive_message_chain(
:contact_type_groups,
:joins,
:where,
:alphabetically,
:uniq
)
get :new, params: {case_contact: {casa_case_id: case_id}}
end
end
end
describe "POST #create" do
context "with valid params" do
let(:params) { build(:case_contact).attributes }
it "assigns @case_contact" do
post :create, params: {case_contact: params}, format: :js
expect(assigns(:case_contact)).to be_an_instance_of(CaseContact)
end
it "assigns @casa_cases" do
post :create, params: {case_contact: params}, format: :js
expect(assigns(:casa_cases)).to eq(volunteer.casa_cases)
end
it "assigns @current_organization_groups" do
post :create, params: {case_contact: params}, format: :js
expect(assigns(:current_organization_groups)).to eq(organization.contact_type_groups)
end
context "when a casa case was not selected" do
it "does not create a new case contact" do
expect {
post :create, params: {case_contact: params}, format: :js
}.not_to change(CaseContact, :count)
end
it "renders the new template" do
post :create, params: {case_contact: params}, format: :js
expect(flash[:alert]).to eq("At least one case must be selected")
expect(response).to render_template("new")
end
end
context "when a casa case is selected" do
let(:params) { build(:case_contact).attributes.merge("casa_case_id" => [volunteer.casa_cases.first.id.to_s]) }
it "assigns @selected_cases" do
post :create, params: {case_contact: params}, format: :js
expect(assigns(:selected_cases)).to eq(CasaCase.where(id: volunteer.casa_cases.first.id))
end
it "creates a new case contact for each selected case" do
starter_counts = volunteer.casa_cases.map { |cc| cc.case_contacts.count }
expect(starter_counts).to eq([0, 0])
post :create, params: {case_contact: params}, format: :js
after_counts = volunteer.casa_cases.map { |cc| cc.case_contacts.count }
expect(after_counts).to eq([1, 0])
end
it "renders the casa case show template" do
post :create, params: {case_contact: params}, format: :js
expect(flash[:notice]).to include("Case contact was successfully created.")
expect(response).to redirect_to casa_case_path(CaseContact.last.casa_case)
end
it "renders a random thank you message" do
post :create, params: {case_contact: params}, format: :js
expect(
(1..8).map { |n| "#{I18n.t("create", scope: "case_contact")} #{I18n.t("thank_you_#{n}", scope: "case_contact")}" }
).to include(flash[:notice])
end
end
end
context "with invalid params" do
let(:params) { build(:case_contact).attributes }
it "assigns @case_contact" do
post :create, params: {case_contact: params}, format: :js
expect(assigns(:case_contact)).to be_an_instance_of(CaseContact)
end
it "does not create a new case contact" do
expect {
post :create, params: {case_contact: params}, format: :js
}.not_to change(CaseContact, :count)
end
it "renders the new template" do
post :create, params: {case_contact: params}, format: :js
expect(response).to render_template("new")
end
end
end
describe "DELETE destroy" do
let(:case_contact) { create(:case_contact, creator: volunteer) }
context "when logged in as admin" do
let(:case_contact) { create(:case_contact, creator: volunteer) }
before do
allow(controller).to receive(:authenticate_user!).and_return(true)
allow(controller).to receive(:current_user).and_return(admin)
request.env["HTTP_REFERER"] = "http://example.com"
end
context ".destroy" do
before { delete :destroy, params: {id: case_contact.id} }
it { expect(response).to have_http_status(:redirect) }
it { expect(case_contact.reload.deleted?).to be_truthy }
it { expect(flash[:notice]).to eq("Contact is successfully deleted.") }
end
context ".restore" do
before do
case_contact.destroy
post :restore, params: {id: case_contact.id}
end
it { expect(response).to have_http_status(:redirect) }
it { expect(case_contact.reload.deleted?).to be_falsey }
it { expect(flash[:notice]).to eq("Contact is successfully restored.") }
end
end
context "when logged in as supervisor" do
before do
allow(controller).to receive(:authenticate_user!).and_return(true)
allow(controller).to receive(:current_user).and_return(supervisor)
end
context ".destroy" do
before { delete :destroy, params: {id: case_contact.id} }
it { expect(response).to have_http_status(:redirect) }
it { expect(case_contact.reload.deleted?).to be_falsey }
it { expect(flash[:notice]).to eq("Sorry, you are not authorized to perform this action.") }
end
context ".restore" do
before do
case_contact.destroy
post :restore, params: {id: case_contact.id}
end
it { expect(response).to have_http_status(:redirect) }
it { expect(case_contact.reload.deleted?).to be_truthy }
it { expect(flash[:notice]).to eq("Sorry, you are not authorized to perform this action.") }
end
end
context "when logged in as volunteer" do
before do
allow(controller).to receive(:authenticate_user!).and_return(true)
allow(controller).to receive(:current_user).and_return(volunteer)
end
context ".destroy" do
before { delete :destroy, params: {id: case_contact.id} }
it { expect(response).to have_http_status(:redirect) }
it { expect(case_contact.reload.deleted?).to be_falsey }
it { expect(flash[:notice]).to eq("Sorry, you are not authorized to perform this action.") }
end
context ".restore" do
before do
case_contact.destroy
post :restore, params: {id: case_contact.id}
end
it { expect(response).to have_http_status(:redirect) }
it { expect(case_contact.reload.deleted?).to be_truthy }
it { expect(flash[:notice]).to eq("Sorry, you are not authorized to perform this action.") }
end
end
end
end
| 36.439331
| 126
| 0.646458
|
e8adbdc95f9ec2edf9aec211a1996c09a637b65f
| 691
|
module VagrantPlugins
module ProviderIijGp
module Action
class Boot
def initialize(app, env)
@app = app
@logger = Log4r::Logger.new("vagrant_iijgp::action::boot")
end
def call(env)
@env = env
env[:ui].info I18n.t("vagrant.actions.vm.boot.booting")
gp = env[:machine].provider_config.gp_service_code
gc = env[:machine].id
vm = env[:iijapi].gp(gp).gc(gc)
vm.start
env[:ui].info I18n.t("vagrant_iijgp.wait_for_start")
vm.wait_for_start { env[:ui].info "-- current_status: #{vm.status}" }
@app.call(env)
end
end
end
end
end
| 23.827586
| 79
| 0.554269
|
e2b3f4b046fa443056692af771debb8fb75245fc
| 1,205
|
# frozen_string_literal: true
module Mutations
module ResolvesResourceParent
extend ActiveSupport::Concern
include Mutations::ResolvesGroup
include ResolvesProject
included do
argument :project_path, GraphQL::ID_TYPE,
required: false,
description: 'The project full path the resource is associated with.'
argument :group_path, GraphQL::ID_TYPE,
required: false,
description: 'The group full path the resource is associated with.'
end
def ready?(**args)
unless args[:project_path].present? ^ args[:group_path].present?
raise Gitlab::Graphql::Errors::ArgumentError,
'Exactly one of group_path or project_path arguments is required'
end
super
end
private
def authorized_resource_parent_find!(args)
authorized_find!(project_path: args.delete(:project_path),
group_path: args.delete(:group_path))
end
def find_object(project_path: nil, group_path: nil)
if group_path.present?
resolve_group(full_path: group_path)
else
resolve_project(full_path: project_path)
end
end
end
end
| 27.386364
| 84
| 0.661411
|
edf4ed5961c72acd45f11b21ae4ee4e01a28ed97
| 4,657
|
module KnifeProfitbricks
module CreateServer
private
def create_volumes
unless configured_volumes = server_config['volumes']
error("No volumes specified! Please specify \"profitbricks\": {\"server\": \"volumes\": {\"root\": SIZE_IN_GB}} in your node!")
end
threads = configured_volumes.collect do |hd_name, size_in_gb|
if size_in_gb.is_a? Hash
_thread_for_create_volume hd_name, size_in_gb['size'], size_in_gb['type']
else
_thread_for_create_volume hd_name, size_in_gb
end
end
threads.each(&:join)
threads.collect(&:value)
end
def _thread_for_create_volume(*args)
Thread.new do
_create_volume(*args)
end
end
def _create_volume(hd_name, size_in_gb, type='HDD')
name = "#{server_name}_#{hd_name}"
log_message = "Create Volume '#{name}' size: #{size_in_gb} GB"
options = { :name => name, :size => size_in_gb, :type => type } # type SSD
if hd_name == 'root'
log_message = "#{log_message}\nBased on #{boot_image.name}"
options[:image] = boot_image.id
if boot_image.public
options[:imagePassword] = root_password
options[:sshKeys] = [ssh_key]
end
else
options[:licenceType] = 'OTHER'
end
volume = dc.create_volume(options)
volume.wait_for { ready? }
log "#{log_message}\nVolume '#{name}' (#{type}) created\n\n"
volume
rescue => e
log "#{log_message}: Error\n\n"
raise e
end
def do_create_server
ram_in_gb = server_config['ram_in_gb']
ram = ram_in_gb * 1024
cores = server_config['cores']
cpu = server_config['cpu']
cpu ||= self.class::CPU_DEFAULT_KEY
cpu = cpu.to_sym
if self.class::CPU_FAMILIES.has_key? cpu
cpu = self.class::CPU_FAMILIES[cpu]
else
raise "cpu must be #{self.class::CPU_FAMILIES.keys.join ' or '}!"
end
log "Create server '#{server_name}': #{ram_in_gb} GB - #{cores} Cores (#{cpu})"
server = dc.create_server :cores => cores, :ram => ram, :name => server_name,
:cpuFamily => cpu
server.wait_for { ready? }
add_nic_to_server server
server.reload
log "Server '#{server_name}' created"
log ''
server
end
def public_lan
@public_lan ||= _public_lan
end
def _public_lan
log 'Find or create public lan'
public_lan = dc.lans.detect(&:public?) || dc.create_lan(:public => true)
public_lan.wait_for { ready? }
log 'Public lan is ready'
log ''
public_lan
end
def add_nic_to_server(server)
log 'Add nic to server'
options = {:firewallActive => false, :lan => public_lan.id}
add_options_for_reserved_ip options
nic = server.create_nic options
nic.wait_for { ready? }
log 'Nic for server added!'
log ''
nic
end
def add_options_for_reserved_ip(options)
if reserve_ip?
log 'Reserve 1 IP'
ipblock = ProfitBricks::IPBlock.reserve :location => dc_region,
:size => 1, :name => server_name
log "1 IP reserved: #{ipblock.ips.first}"
options[:ips] = ipblock.ips
end
end
def attach_volumes_to_server(volumes)
volumes.each do |volume|
log "Attach volume #{volume.name} to server #{server.name}"
volume.attach(server.id)
volume.wait_for { ready? }
volume.wait_for { volume.reload; !device_number.nil? }
log "Volume #{volume.name} attached at device_number #{volume.device_number}"
log ''
end
server.reload
end
def set_boot_volume_to_server(boot_volume)
log "Set boot volume: #{boot_volume.name}"
server.update :bootVolume => {:id => boot_volume.id,
:type => 'volume', :href => boot_volume.href}
server.wait_for { ready? }
server.reload
boot_volume.reload
log "Volume #{boot_volume.name} is used as boot volume!"
log ''
end
def create_server
@server_is_new = true
log "Create Server #{server_name.inspect}"
log ''
volumes = create_volumes
boot_volume = volumes.detect {|v| v.name.end_with? 'root' }
@server = server = do_create_server
attach_volumes_to_server volumes
set_boot_volume_to_server boot_volume
check_server_state!
change_password_root
upload_ssh_key
change_password_user
server
end
end
end
| 26.310734
| 135
| 0.596736
|
3856eae8db61b1d49aad7819792cba6d8ec85de4
| 1,105
|
class Pycodestyle < Formula
desc "Simple Python style checker in one Python file"
homepage "http://pycodestyle.pycqa.org"
url "https://github.com/PyCQA/pycodestyle/archive/2.4.0.tar.gz"
sha256 "b8656dc08ab4af23d001a42b3f68510d15790df28d7d666d3f91e3fe9bf8e938"
head "https://github.com/PyCQA/pycodestyle.git"
bottle :unneeded
def install
bin.install "pycodestyle.py" => "pycodestyle"
end
test do
# test invocation on a file with no issues
(testpath/"ok.py").write <<~EOS
print(1)
EOS
assert_equal "",
shell_output("#{bin}/pycodestyle ok.py")
# test invocation on a file with a whitespace style issue
(testpath/"ws.py").write <<~EOS
print( 1)
EOS
assert_equal "ws.py:1:7: E201 whitespace after '('\n",
shell_output("#{bin}/pycodestyle ws.py", 1)
# test invocation on a file with an import not at top of file
(testpath/"imp.py").write <<~EOS
pass
import sys
EOS
assert_equal "imp.py:2:1: E402 module level import not at top of file\n",
shell_output("#{bin}/pycodestyle imp.py", 1)
end
end
| 29.078947
| 77
| 0.673303
|
5dc40acd4196c642bc6c40324a904a3015c5f790
| 370
|
# This migration comes from omerta_logger (originally 20150328213641)
class CreateOmertaLoggerCasinoMaxBetHistories < ActiveRecord::Migration
def change
create_table :omerta_logger_casino_max_bet_histories do |t|
t.references :casino
t.datetime :date
t.integer :max_bet
t.index :casino_id, name: 'index_max_bet_casino_id'
end
end
end
| 30.833333
| 71
| 0.762162
|
8731dd6478cf4cbbf2bf9bd10cecbdc686f6d330
| 629
|
class CreateSocialStreamOstatus < ActiveRecord::Migration
def change
create_table :actor_keys do |t|
t.integer :actor_id
t.binary :key_der
t.timestamps
end
add_index "actor_keys", "actor_id"
create_table :remote_subjects, :force => true do |t|
t.integer :actor_id
t.string :webfinger_id
t.text :webfinger_info
t.timestamps
end
add_index "remote_subjects", "actor_id"
add_foreign_key "actor_keys", "actors", :name => "actor_keys_on_actor_id"
add_foreign_key "remote_subjects", "actors", :name => "remote_subjects_on_actor_id"
end
end
| 23.296296
| 87
| 0.675676
|
ac7ec0ab574679fdab5f42d853403e982cf96b5e
| 35
|
module Oat
VERSION = "0.0.1"
end
| 8.75
| 19
| 0.628571
|
03e6cba65b22356ee385765a0c8bbbff4e1c3a3f
| 125
|
# This file exists to support gem autoloading by bundler.
require 'exception_notification'
require 'exception_notifier/rake'
| 31.25
| 57
| 0.832
|
33a78f24c14fddf5c7434bae4c93457e1d19145e
| 1,079
|
class Gifski < Formula
desc "Highest-quality GIF encoder based on pngquant"
homepage "https://gif.ski/"
url "https://github.com/ImageOptim/gifski/archive/1.2.4.tar.gz"
sha256 "8a968a8b9f605746dfeaf1083a0c6a2a3c68e7d8d62f43bb6a6cd58e9a3d260e"
license "AGPL-3.0-only"
revision 1
bottle do
cellar :any
sha256 "e28252b1eca7477fb8bf612345cb89436a76d2ba5d559fc5a9cc11ca66a4c1fc" => :big_sur
sha256 "8ff94a08d8988c62f9ef1991324d0205b29fd128890746555a546f31c1272b9b" => :arm64_big_sur
sha256 "38deabd72c6a81faf79f9441623eb06a80fb45d468ff5e64976915416cf60231" => :catalina
sha256 "a7f1fe0ecc811745025d5da4ead971f292f40e9f097c4195cc9c24b24778fc84" => :mojave
end
depends_on "pkg-config" => :build
depends_on "rust" => :build
depends_on "ffmpeg"
def install
system "cargo", "install", "--features=video", *std_cargo_args
end
test do
png = test_fixtures("test.png")
system bin/"gifski", "-o", "out.gif", png, png
assert_predicate testpath/"out.gif", :exist?
refute_predicate (testpath/"out.gif").size, :zero?
end
end
| 33.71875
| 95
| 0.751622
|
26ed20d89525891f904c4c81cf7838fb7d44ead6
| 52
|
json.partial! "c_classes/c_class", c_class: @c_class
| 52
| 52
| 0.788462
|
1c34480d5dfd3a7dc2bed062833206bc64b22159
| 148
|
# frozen_string_literal: true
class RemoveDeployBuildId < ActiveRecord::Migration[5.2]
def change
remove_column :deploys, :build_id
end
end
| 21.142857
| 56
| 0.777027
|
d5b32b7bc502747b3801d786f1354bac1ce7b268
| 1,192
|
cask 'carbon-copy-cloner' do
version '5.1.8.5702'
sha256 '705788888d32db8e9c84bf88bc5dc12326cdce683b6ed08c0944189733811778'
# bombich.scdn1.secure.raxcdn.com/software/files was verified as official when first introduced to the cask
url "https://bombich.scdn1.secure.raxcdn.com/software/files/ccc-#{version}.zip"
appcast "https://bombich.com/software/updates/ccc.php?os_major=10&os_minor=14&os_bugfix=0&ccc=#{version.split('.').last}&beta=0"
name 'Carbon Copy Cloner'
homepage 'https://bombich.com/'
auto_updates true
app 'Carbon Copy Cloner.app'
uninstall login_item: 'CCC User Agent',
quit: [
'com.bombich.ccc',
'com.bombich.cccuseragent',
]
zap trash: [
'~/Library/Application Support/com.bombich.ccc',
'~/Library/Caches/com.bombich.ccc',
'~/Library/Preferences/com.bombich.ccc.plist',
'~/Library/Preferences/com.bombich.cccuseragent.plist',
'~/Library/Saved Application State/com.bombich.ccc.savedState',
'/Library/LaunchDaemons/com.bombich.ccchelper.plist',
]
end
| 39.733333
| 130
| 0.633389
|
611ce8b6840e77b4a22b58d8d269e3fe330dc794
| 923
|
# coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
Gem::Specification.new do |spec|
spec.name = "pairs"
spec.version = "0.2.0"
spec.authors = ["Justin Campbell"]
spec.email = ["justin@justincampbell.me"]
spec.summary = "Constraint solver for pairs"
spec.description = "Constraint solver for pairs"
spec.homepage = "https://github.com/justincampbell/pairs"
spec.license = "MIT"
spec.files = `git ls-files -z`.split("\x0")
spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
spec.require_paths = ["lib"]
spec.add_development_dependency "bundler", "~> 1.6"
spec.add_development_dependency "generative"
spec.add_development_dependency "rake"
spec.add_development_dependency "rspec"
end
| 36.92
| 74
| 0.663055
|
d56233abecb8c384911e1464e64c6e8b9e4665f3
| 879
|
class Kf5ExtraCmakeModules < Formula
desc "Extra modules and scripts for CMake"
homepage "https://www.kde.org"
url "https://download.kde.org/stable/frameworks/5.58/extra-cmake-modules-5.58.0.tar.xz"
sha256 "514011c12eeb2ac99d3118975832a279af2c2eea5e8b36b49c81962930b2ecc7"
head "git://anongit.kde.org/extra-cmake-modules"
depends_on "cmake" => :build
depends_on "ninja" => :build
depends_on "qt" => :build
def install
args = std_cmake_args
args << "-DBUILD_TESTING=OFF"
args << "-DBUILD_HTML_DOCS=OFF"
args << "-DBUILD_QTHELP_DOCS=ON"
mkdir "build" do
system "cmake", "-G", "Ninja", "..", *args
system "ninja"
system "ninja", "install"
prefix.install "install_manifest.txt"
end
end
test do
(testpath/"CMakeLists.txt").write("find_package(ECM REQUIRED)")
system "cmake", ".", "-Wno-dev"
end
end
| 27.46875
| 89
| 0.67463
|
01e64429caeb1cc98c4a37b5a17ffb2fc2021c1e
| 1,152
|
Gem::Specification.new do |s|
s.name = 'logstash-filter-ck'
s.version = '1.0.0'
s.licenses = ['MIT']
s.summary = "Adds ChainKit based event/log tamper protection"
s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
s.authors = ["PencilDATA Inc."]
s.email = 'info@chainkit.com'
s.homepage = "http://www.chainkit.com"
s.require_paths = ["lib"]
# Files
s.files = Dir["lib/**/*","spec/**/*","*.gemspec","*.md","CONTRIBUTORS","Gemfile","LICENSE","NOTICE", "vendor/jar-dependencies/**/*.jar", "vendor/jar-dependencies/**/*.rb", "VERSION", "docs/**/*"]
# Tests
s.test_files = s.files.grep(%r{^(test|spec|features)/})
# Special flag to let us know this is actually a logstash plugin
s.metadata = { "logstash_plugin" => "true", "logstash_group" => "filter" }
# Gem dependencies
s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
s.add_development_dependency 'logstash-devutils'
end
| 41.142857
| 205
| 0.637153
|
39901ac4b3b5cc49ff8fec8b06f6781b57ef8adb
| 136
|
require 'spec_helper'
describe "organisation_updates/update.html.haml" do
pending "add some examples to (or delete) #{__FILE__}"
end
| 22.666667
| 56
| 0.772059
|
38b1d9d6f29f67a6a46445416491d59bdca72814
| 1,551
|
require "yaml"
module Jekyll
class TagPage < Page
def initialize(site, base, dir, tag)
@site = site
@base = base
@dir = dir
@name = 'index.html'
self.process(@name)
self.read_yaml(File.join(base, '_layouts'), 'tag_index.html')
self.data['tag'] = tag
tag_title_prefix = site.config['tag_title_prefix'] || 'Tag: '
self.data['title'] = "#{tag_title_prefix}#{tag}"
end
end
class TagPageGenerator < Generator
safe true
def generate(site)
# puts site.layouts.collections.tags
# check if we have the tags_index layout
if site.layouts.key? 'tag_page'
dir = site.config['tag_dir'] || 'tags'
site.tags.each_key do |tag|
tag_filename = File.join(site.source, '_tags', "#{tag}.md")
if File.file? tag_filename
# Jekyll should handle old tag pages just fine
else
if ENV["CI"]
# Make sure test will not pass when tag page not found
abort "Tag page for '#{tag}' not found when running on CI environment"
end
# We're going to copy page template to the _tags folder
# and add the new tag page because _tags index is already built
tag_page_contents = "---\ntag: #{tag}\nlayout: tag_page\n---\n{% include tag_page_common.html %}"
File.write tag_filename, tag_page_contents
# site.pages << TagPage.new(site, site.source, File.join(dir, tag), tag)
end
end
end
end
end
end
| 29.826923
| 109
| 0.590587
|
214f83d2b77d187862b5d64e01b4a4b079e4ab3c
| 4,252
|
require 'openssl'
require 'uri'
require 'faraday'
module Percy
class Client
module Connection
class NiceErrorMiddleware < Faraday::Response::Middleware
CLIENT_ERROR_STATUS_RANGE = 400...600
def on_complete(env)
error_class = nil
case env[:status]
when 400
error_class = Percy::Client::BadRequestError
when 401
error_class = Percy::Client::UnauthorizedError
when 402
error_class = Percy::Client::PaymentRequiredError
when 403
error_class = Percy::Client::ForbiddenError
when 404
error_class = Percy::Client::NotFoundError
when 409
error_class = Percy::Client::ConflictError
when 500
error_class = Percy::Client::InternalServerError
when 502
error_class = Percy::Client::BadGatewayError
when 503
error_class = Percy::Client::ServiceUnavailableError
when 504
error_class = Percy::Client::GatewayTimeoutError
when 520..530
error_class = Percy::Client::CloudflareError
when CLIENT_ERROR_STATUS_RANGE # Catchall.
error_class = Percy::Client::HttpError
end
return unless error_class
raise error_class.new(
env.status, env.method.upcase, env.url, env.body,
"Got #{env.status} (#{env.method.upcase} #{env.url}):\n#{env.body}",
)
end
end
def connection
return @connection if defined?(@connection)
parsed_uri = URI.parse(config.api_url)
base_url = "#{parsed_uri.scheme}://#{parsed_uri.host}:#{parsed_uri.port}"
@connection = Faraday.new(url: base_url) do |faraday|
faraday.request :token_auth, config.access_token if config.access_token
faraday.use Percy::Client::Connection::NiceErrorMiddleware
faraday.adapter :excon
end
@connection
end
def get(path, options = {})
retries = options[:retries] || 3
begin
response = connection.get do |request|
request.url(path)
request.headers.merge! _headers
end
rescue Faraday::TimeoutError
raise Percy::Client::TimeoutError
rescue Faraday::ConnectionFailed
raise Percy::Client::ConnectionFailed
rescue Percy::Client::ServerError => e
# Retry on 5XX errors.
if (retries -= 1) >= 0
sleep(rand(1..3))
retry
end
raise e
end
JSON.parse(response.body)
end
def post(path, data, options = {})
retries = options[:retries] || 3
begin
response = connection.post do |request|
request.url(path)
request.headers.merge! _headers
request.body = data.to_json
end
rescue Faraday::TimeoutError
if (retries -= 1) >= 0
retry
end
raise Percy::Client::TimeoutError
rescue Faraday::ConnectionFailed
raise Percy::Client::ConnectionFailed
rescue Percy::Client::ServerError => e
# Retry on 5XX errors.
if (retries -= 1) >= 0
sleep(rand(1..3))
retry
end
raise e
end
JSON.parse(response.body)
end
def _headers
{
'Content-Type' => 'application/vnd.api+json',
'User-Agent' => _user_agent,
}
end
def _user_agent
@_user_agent ||= begin
client = [
"Percy/#{_api_version}",
client_info,
"percy-client/#{VERSION}",
].compact.join(' ')
environment = [
environment_info,
"ruby/#{_ruby_version}",
Percy::Client::Environment.ci_info,
].compact.join('; ')
"#{client} (#{environment})"
end
end
def _reset_user_agent
@_user_agent = nil
end
def _api_version
config.api_url.match(/\w+$/).to_s
end
def _ruby_version
"#{RUBY_VERSION}p#{RUBY_PATCHLEVEL}"
end
end
end
end
| 27.25641
| 81
| 0.551976
|
03b3eea7a70a18cc60374c4f6f2ec62cf9da60f9
| 611
|
Pod::Spec.new do |s|
s.name = 'WebPKit'
s.version = '0.0.1'
s.license = { :type => 'MIT', :file => 'LICENSE' }
s.summary = 'A view controller that prompts users to enter a passcode.'
s.homepage = 'https://github.com/TimOliver/WebPKit'
s.author = 'Tim Oliver'
s.source = { :git => 'https://github.com/TimOliver/WebPKit.git', :tag => s.version }
s.source_files = 'WebPKit/**/*.{swift}'
s.swift_version = '5.0'
s.ios.deployment_target = '9.0'
s.osx.deployment_target = '10.9'
s.watchos.deployment_target = '2.0'
s.tvos.deployment_target = '9.0'
s.dependency 'libwebp'
end
| 32.157895
| 88
| 0.631751
|
1d3e9813a9bf1c397f56faca33a944c71b26dfcb
| 3,778
|
require 'spec_helper'
require 'active_support/core_ext/module/delegation'
require 'active_support/core_ext/string'
require 'doorkeeper/oauth/scopes'
module Doorkeeper::OAuth
describe Scopes do
describe '#add' do
it 'allows you to add scopes with symbols' do
subject.add :public
expect(subject.all).to eq(['public'])
end
it 'allows you to add scopes with strings' do
subject.add 'public'
expect(subject.all).to eq(['public'])
end
it 'do not add already included scopes' do
subject.add :public
subject.add :public
expect(subject.all).to eq(['public'])
end
end
describe '#exists' do
before do
subject.add :public
end
it 'returns true if scope with given name is present' do
expect(subject.exists?('public')).to be_truthy
end
it 'returns false if scope with given name does not exist' do
expect(subject.exists?('other')).to be_falsey
end
it 'handles symbols' do
expect(subject.exists?(:public)).to be_truthy
expect(subject.exists?(:other)).to be_falsey
end
end
describe '.from_string' do
let(:string) { 'public write' }
subject { Scopes.from_string(string) }
it { should be_a(Scopes) }
describe '#all' do
it 'should be an array of the expected scopes' do
scopes_array = subject.all
expect(scopes_array.size).to eq(2)
expect(scopes_array).to include('public')
expect(scopes_array).to include('write')
end
end
end
describe '#+' do
it 'can add to another scope object' do
scopes = Scopes.from_string('public') + Scopes.from_string('admin')
expect(scopes.all).to eq(%w(public admin))
end
it 'does not change the existing object' do
origin = Scopes.from_string('public')
new_scope = origin + Scopes.from_string('admin')
expect(origin.to_s).to eq('public')
end
it 'raises an error if cannot handle addition' do
expect do
Scopes.from_string('public') + 'admin'
end.to raise_error(NoMethodError)
end
end
describe '#==' do
it 'is equal to another set of scopes' do
expect(Scopes.from_string('public')).to eq(Scopes.from_string('public'))
end
it 'is equal to another set of scopes with no particular order' do
expect(Scopes.from_string('public write')).to eq(Scopes.from_string('write public'))
end
it 'differs from another set of scopes when scopes are not the same' do
expect(Scopes.from_string('public write')).not_to eq(Scopes.from_string('write'))
end
end
describe '#has_scopes?' do
subject { Scopes.from_string('public admin') }
it 'returns true when at least one scope is included' do
expect(subject.has_scopes?(Scopes.from_string('public'))).to be_truthy
end
it 'returns true when all scopes are included' do
expect(subject.has_scopes?(Scopes.from_string('public admin'))).to be_truthy
end
it 'is true if all scopes are included in any order' do
expect(subject.has_scopes?(Scopes.from_string('admin public'))).to be_truthy
end
it 'is false if no scopes are included' do
expect(subject.has_scopes?(Scopes.from_string('notexistent'))).to be_falsey
end
it 'returns false when any scope is not included' do
expect(subject.has_scopes?(Scopes.from_string('public nope'))).to be_falsey
end
it 'is false if no scopes are included even for existing ones' do
expect(subject.has_scopes?(Scopes.from_string('public admin notexistent'))).to be_falsey
end
end
end
end
| 30.467742
| 96
| 0.64108
|
61407f7c569d145e8b4ceba591dd24ed895fc95f
| 917
|
require 'contacts/version'
module Contacts
Identifier = 'Ruby Contacts v' + VERSION::STRING
# An object that represents a single contact
class Contact
attr_accessor :name, :username
attr_reader :emails
def initialize(email = nil, name = nil, username = nil)
@emails = []
@emails << email if email
@name = name
@username = username
end
def email
@emails.first
end
def inspect
%!#<Contacts::Contact "#{name}" (#{email})>!
end
end
def self.verbose?
'irb' == $0
end
class Error < StandardError
end
class TooManyRedirects < Error
attr_reader :response, :location
MAX_REDIRECTS = 2
def initialize(response)
@response = response
@location = @response['Location']
super "exceeded maximum of #{MAX_REDIRECTS} redirects (Location: #{location})"
end
end
end
| 19.104167
| 84
| 0.608506
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.