_id
stringlengths
2
6
title
stringlengths
9
130
partition
stringclasses
3 values
text
stringlengths
66
10.5k
language
stringclasses
1 value
meta_information
dict
q16600
Algolia.Index.browse
train
def browse(page_or_query_parameters = nil, hits_per_page = nil, request_options = {}, &block) params = {} if page_or_query_parameters.is_a?(Hash) params.merge!(page_or_query_parameters) else params[:page] = page_or_query_parameters unless page_or_query_parameters.nil? end if hits_per_page.is_a?(Hash) params.merge!(hits_per_page) else params[:hitsPerPage] = hits_per_page unless hits_per_page.nil? end if block_given? IndexBrowser.new(client, name, params).browse(request_options, &block) else params[:page] ||= 0 params[:hitsPerPage] ||= 1000 client.get(Protocol.browse_uri(name, params), :read, request_options) end end
ruby
{ "resource": "" }
q16601
Algolia.Index.browse_from
train
def browse_from(cursor, hits_per_page = 1000, request_options = {}) client.post(Protocol.browse_uri(name), { :cursor => cursor, :hitsPerPage => hits_per_page }.to_json, :read, request_options) end
ruby
{ "resource": "" }
q16602
Algolia.Index.get_object
train
def get_object(objectID, attributes_to_retrieve = nil, request_options = {}) attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array) if attributes_to_retrieve.nil? client.get(Protocol.object_uri(name, objectID, nil), :read, request_options) else client.get(Protocol.object_uri(name, objectID, { :attributes => attributes_to_retrieve }), :read, request_options) end end
ruby
{ "resource": "" }
q16603
Algolia.Index.get_objects
train
def get_objects(objectIDs, attributes_to_retrieve = nil, request_options = {}) attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array) requests = objectIDs.map do |objectID| req = { :indexName => name, :objectID => objectID.to_s } req[:attributesToRetrieve] = attributes_to_retrieve unless attributes_to_retrieve.nil? req end client.post(Protocol.objects_uri, { :requests => requests }.to_json, :read, request_options)['results'] end
ruby
{ "resource": "" }
q16604
Algolia.Index.save_object
train
def save_object(object, objectID = nil, request_options = {}) client.put(Protocol.object_uri(name, get_objectID(object, objectID)), object.to_json, :write, request_options) end
ruby
{ "resource": "" }
q16605
Algolia.Index.save_object!
train
def save_object!(object, objectID = nil, request_options = {}) res = save_object(object, objectID, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) res end
ruby
{ "resource": "" }
q16606
Algolia.Index.save_objects!
train
def save_objects!(objects, request_options = {}) res = save_objects(objects, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) res end
ruby
{ "resource": "" }
q16607
Algolia.Index.replace_all_objects
train
def replace_all_objects(objects, request_options = {}) safe = request_options[:safe] || request_options['safe'] || false request_options.delete(:safe) request_options.delete('safe') tmp_index = @client.init_index(@name + '_tmp_' + rand(10000000).to_s) responses = [] scope = ['settings', 'synonyms', 'rules'] res = @client.copy_index(@name, tmp_index.name, scope, request_options) responses << res if safe wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) end batch = [] batch_size = 1000 count = 0 objects.each do |object| batch << object count += 1 if count == batch_size res = tmp_index.add_objects(batch, request_options) responses << res batch = [] count = 0 end end if batch.any? res = tmp_index.add_objects(batch, request_options) responses << res end if safe responses.each do |res| tmp_index.wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) end end res = @client.move_index(tmp_index.name, @name, request_options) responses << res if safe wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) end responses end
ruby
{ "resource": "" }
q16608
Algolia.Index.partial_update_objects
train
def partial_update_objects(objects, create_if_not_exits = true, request_options = {}) if create_if_not_exits batch(build_batch('partialUpdateObject', objects, true), request_options) else batch(build_batch('partialUpdateObjectNoCreate', objects, true), request_options) end end
ruby
{ "resource": "" }
q16609
Algolia.Index.partial_update_objects!
train
def partial_update_objects!(objects, create_if_not_exits = true, request_options = {}) res = partial_update_objects(objects, create_if_not_exits, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) res end
ruby
{ "resource": "" }
q16610
Algolia.Index.delete_object
train
def delete_object(objectID, request_options = {}) raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == '' client.delete(Protocol.object_uri(name, objectID), :write, request_options) end
ruby
{ "resource": "" }
q16611
Algolia.Index.delete_object!
train
def delete_object!(objectID, request_options = {}) res = delete_object(objectID, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) res end
ruby
{ "resource": "" }
q16612
Algolia.Index.delete_objects
train
def delete_objects(objects, request_options = {}) check_array(objects) batch(build_batch('deleteObject', objects.map { |objectID| { :objectID => objectID } }, false), request_options) end
ruby
{ "resource": "" }
q16613
Algolia.Index.delete_objects!
train
def delete_objects!(objects, request_options = {}) res = delete_objects(objects, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) res end
ruby
{ "resource": "" }
q16614
Algolia.Index.delete_by_query
train
def delete_by_query(query, params = nil, request_options = {}) raise ArgumentError.new('query cannot be nil, use the `clear` method to wipe the entire index') if query.nil? && params.nil? params = sanitized_delete_by_query_params(params) params[:query] = query params[:hitsPerPage] = 1000 params[:distinct] = false params[:attributesToRetrieve] = ['objectID'] params[:cursor] = '' ids = [] while params[:cursor] != nil result = browse(params, nil, request_options) params[:cursor] = result['cursor'] hits = result['hits'] break if hits.empty? ids += hits.map { |hit| hit['objectID'] } end delete_objects(ids, request_options) end
ruby
{ "resource": "" }
q16615
Algolia.Index.delete_by_query!
train
def delete_by_query!(query, params = nil, request_options = {}) res = delete_by_query(query, params, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res res end
ruby
{ "resource": "" }
q16616
Algolia.Index.clear!
train
def clear!(request_options = {}) res = clear(request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) res end
ruby
{ "resource": "" }
q16617
Algolia.Index.set_settings
train
def set_settings(new_settings, options = {}, request_options = {}) client.put(Protocol.settings_uri(name, options), new_settings.to_json, :write, request_options) end
ruby
{ "resource": "" }
q16618
Algolia.Index.set_settings!
train
def set_settings!(new_settings, options = {}, request_options = {}) res = set_settings(new_settings, options, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) res end
ruby
{ "resource": "" }
q16619
Algolia.Index.get_settings
train
def get_settings(options = {}, request_options = {}) options['getVersion'] = 2 if !options[:getVersion] && !options['getVersion'] client.get(Protocol.settings_uri(name, options).to_s, :read, request_options) end
ruby
{ "resource": "" }
q16620
Algolia.Index.get_api_key
train
def get_api_key(key, request_options = {}) client.get(Protocol.index_key_uri(name, key), :read, request_options) end
ruby
{ "resource": "" }
q16621
Algolia.Index.delete_api_key
train
def delete_api_key(key, request_options = {}) client.delete(Protocol.index_key_uri(name, key), :write, request_options) end
ruby
{ "resource": "" }
q16622
Algolia.Index.batch
train
def batch(request, request_options = {}) client.post(Protocol.batch_uri(name), request.to_json, :batch, request_options) end
ruby
{ "resource": "" }
q16623
Algolia.Index.batch!
train
def batch!(request, request_options = {}) res = batch(request, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) res end
ruby
{ "resource": "" }
q16624
Algolia.Index.search_for_facet_values
train
def search_for_facet_values(facet_name, facet_query, search_parameters = {}, request_options = {}) params = search_parameters.clone params['facetQuery'] = facet_query client.post(Protocol.search_facet_uri(name, facet_name), params.to_json, :read, request_options) end
ruby
{ "resource": "" }
q16625
Algolia.Index.search_disjunctive_faceting
train
def search_disjunctive_faceting(query, disjunctive_facets, params = {}, refinements = {}, request_options = {}) raise ArgumentError.new('Argument "disjunctive_facets" must be a String or an Array') unless disjunctive_facets.is_a?(String) || disjunctive_facets.is_a?(Array) raise ArgumentError.new('Argument "refinements" must be a Hash of Arrays') if !refinements.is_a?(Hash) || !refinements.select { |k, v| !v.is_a?(Array) }.empty? # extract disjunctive facets & associated refinements disjunctive_facets = disjunctive_facets.split(',') if disjunctive_facets.is_a?(String) disjunctive_refinements = {} refinements.each do |k, v| disjunctive_refinements[k] = v if disjunctive_facets.include?(k) || disjunctive_facets.include?(k.to_s) end # build queries queries = [] ## hits + regular facets query filters = [] refinements.to_a.each do |k, values| r = values.map { |v| "#{k}:#{v}" } if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym] # disjunctive refinements are ORed filters << r else # regular refinements are ANDed filters += r end end queries << params.merge({ :index_name => self.name, :query => query, :facetFilters => filters }) ## one query per disjunctive facet (use all refinements but the current one + hitsPerPage=1 + single facet) disjunctive_facets.each do |disjunctive_facet| filters = [] refinements.each do |k, values| if k.to_s != disjunctive_facet.to_s r = values.map { |v| "#{k}:#{v}" } if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym] # disjunctive refinements are ORed filters << r else # regular refinements are ANDed filters += r end end end queries << params.merge({ :index_name => self.name, :query => query, :page => 0, :hitsPerPage => 1, :attributesToRetrieve => [], :attributesToHighlight => [], :attributesToSnippet => [], :facets => disjunctive_facet, :facetFilters => filters, :analytics => false }) end answers = client.multiple_queries(queries, { :request_options => request_options }) # aggregate answers ## first answer stores the hits + regular facets aggregated_answer = answers['results'][0] ## others store the disjunctive facets aggregated_answer['disjunctiveFacets'] = {} answers['results'].each_with_index do |a, i| next if i == 0 a['facets'].each do |facet, values| ## add the facet to the disjunctive facet hash aggregated_answer['disjunctiveFacets'][facet] = values ## concatenate missing refinements (disjunctive_refinements[facet.to_s] || disjunctive_refinements[facet.to_sym] || []).each do |r| if aggregated_answer['disjunctiveFacets'][facet][r].nil? aggregated_answer['disjunctiveFacets'][facet][r] = 0 end end end end aggregated_answer end
ruby
{ "resource": "" }
q16626
Algolia.Index.get_synonym
train
def get_synonym(objectID, request_options = {}) client.get(Protocol.synonym_uri(name, objectID), :read, request_options) end
ruby
{ "resource": "" }
q16627
Algolia.Index.delete_synonym!
train
def delete_synonym!(objectID, forward_to_replicas = false, request_options = {}) res = delete_synonym(objectID, forward_to_replicas, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) res end
ruby
{ "resource": "" }
q16628
Algolia.Index.save_synonym
train
def save_synonym(objectID, synonym, forward_to_replicas = false, request_options = {}) client.put("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", synonym.to_json, :write, request_options) end
ruby
{ "resource": "" }
q16629
Algolia.Index.save_synonym!
train
def save_synonym!(objectID, synonym, forward_to_replicas = false, request_options = {}) res = save_synonym(objectID, synonym, forward_to_replicas, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) res end
ruby
{ "resource": "" }
q16630
Algolia.Index.clear_synonyms!
train
def clear_synonyms!(forward_to_replicas = false, request_options = {}) res = clear_synonyms(forward_to_replicas, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) res end
ruby
{ "resource": "" }
q16631
Algolia.Index.replace_all_synonyms
train
def replace_all_synonyms(synonyms, request_options = {}) forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false batch_synonyms(synonyms, forward_to_replicas, true, request_options) end
ruby
{ "resource": "" }
q16632
Algolia.Index.replace_all_synonyms!
train
def replace_all_synonyms!(synonyms, request_options = {}) res = replace_all_synonyms(synonyms, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) res end
ruby
{ "resource": "" }
q16633
Algolia.Index.export_synonyms
train
def export_synonyms(hits_per_page = 100, request_options = {}, &_block) res = [] page = 0 loop do curr = search_synonyms('', { :hitsPerPage => hits_per_page, :page => page }, request_options)['hits'] curr.each do |synonym| res << synonym yield synonym if block_given? end break if curr.size < hits_per_page page += 1 end res end
ruby
{ "resource": "" }
q16634
Algolia.Index.get_rule
train
def get_rule(objectID, request_options = {}) client.get(Protocol.rule_uri(name, objectID), :read, request_options) end
ruby
{ "resource": "" }
q16635
Algolia.Index.delete_rule!
train
def delete_rule!(objectID, forward_to_replicas = false, request_options = {}) res = delete_rule(objectID, forward_to_replicas, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) return res end
ruby
{ "resource": "" }
q16636
Algolia.Index.save_rule
train
def save_rule(objectID, rule, forward_to_replicas = false, request_options = {}) raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == '' client.put("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", rule.to_json, :write, request_options) end
ruby
{ "resource": "" }
q16637
Algolia.Index.save_rule!
train
def save_rule!(objectID, rule, forward_to_replicas = false, request_options = {}) res = save_rule(objectID, rule, forward_to_replicas, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) return res end
ruby
{ "resource": "" }
q16638
Algolia.Index.clear_rules!
train
def clear_rules!(forward_to_replicas = false, request_options = {}) res = clear_rules(forward_to_replicas, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) return res end
ruby
{ "resource": "" }
q16639
Algolia.Index.replace_all_rules
train
def replace_all_rules(rules, request_options = {}) forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false batch_rules(rules, forward_to_replicas, true, request_options) end
ruby
{ "resource": "" }
q16640
Algolia.Index.replace_all_rules!
train
def replace_all_rules!(rules, request_options = {}) res = replace_all_rules(rules, request_options) wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) res end
ruby
{ "resource": "" }
q16641
Algolia.Index.export_rules
train
def export_rules(hits_per_page = 100, request_options = {}, &_block) res = [] page = 0 loop do curr = search_rules('', { :hits_per_page => hits_per_page, :page => page }, request_options)['hits'] curr.each do |rule| res << rule yield rule if block_given? end break if curr.size < hits_per_page page += 1 end res end
ruby
{ "resource": "" }
q16642
GraphQL::Models.AttributeLoader.hash_to_condition
train
def hash_to_condition(table, hash) conditions = hash.map do |attr, value| if value.is_a?(Array) && value.size > 1 table[attr].in(value) elsif value.is_a?(Array) table[attr].eq(value[0]) else table[attr].eq(value) end end conditions.reduce { |memo, cond| memo.and(cond) } end
ruby
{ "resource": "" }
q16643
Squeel.DSL.method_missing
train
def method_missing(method_id, *args) super if method_id == :to_ary if args.empty? Nodes::Stub.new method_id elsif (args.size == 1) && (Class === args[0]) Nodes::Join.new(method_id, InnerJoin, args[0]) else Nodes::Function.new method_id, args end end
ruby
{ "resource": "" }
q16644
Dragonfly.Job.initialize_copy
train
def initialize_copy(other) @steps = other.steps.map do |step| step.class.new(self, *step.args) end @content = other.content.dup @url_attributes = other.url_attributes.dup end
ruby
{ "resource": "" }
q16645
Dragonfly.Content.update
train
def update(obj, meta=nil) meta ||= {} self.temp_object = TempObject.new(obj, meta['name']) self.meta['name'] ||= temp_object.name if temp_object.name clear_analyser_cache add_meta(obj.meta) if obj.respond_to?(:meta) add_meta(meta) self end
ruby
{ "resource": "" }
q16646
Dragonfly.Content.shell_eval
train
def shell_eval(opts={}) should_escape = opts[:escape] != false command = yield(should_escape ? shell.escape(path) : path) run command, :escape => should_escape end
ruby
{ "resource": "" }
q16647
Dragonfly.Content.shell_generate
train
def shell_generate(opts={}) ext = opts[:ext] || self.ext should_escape = opts[:escape] != false tempfile = Utils.new_tempfile(ext) new_path = should_escape ? shell.escape(tempfile.path) : tempfile.path command = yield(new_path) run(command, :escape => should_escape) update(tempfile) end
ruby
{ "resource": "" }
q16648
LogStashLogger.MultiLogger.method_missing
train
def method_missing(name, *args, &block) @loggers.each do |logger| if logger.respond_to?(name) logger.send(name, args, &block) end end end
ruby
{ "resource": "" }
q16649
LogStashLogger.Buffer.buffer_initialize
train
def buffer_initialize(options={}) if ! self.class.method_defined?(:flush) raise ArgumentError, "Any class including Stud::Buffer must define a flush() method." end @buffer_config = { :max_items => options[:max_items] || 50, :max_interval => options[:max_interval] || 5, :logger => options[:logger] || nil, :autoflush => options.fetch(:autoflush, true), :has_on_flush_error => self.class.method_defined?(:on_flush_error), :has_on_full_buffer_receive => self.class.method_defined?(:on_full_buffer_receive), :drop_messages_on_flush_error => options.fetch(:drop_messages_on_flush_error, false), :drop_messages_on_full_buffer => options.fetch(:drop_messages_on_full_buffer, false), :flush_at_exit => options.fetch(:flush_at_exit, false) } if @buffer_config[:flush_at_exit] at_exit { buffer_flush(final: true) } end reset_buffer end
ruby
{ "resource": "" }
q16650
LogStashLogger.Buffer.buffer_receive
train
def buffer_receive(event, group=nil) buffer_initialize if ! @buffer_state # block if we've accumulated too many events while buffer_full? do on_full_buffer_receive( :pending => @buffer_state[:pending_count], :outgoing => @buffer_state[:outgoing_count] ) if @buffer_config[:has_on_full_buffer_receive] if @buffer_config[:drop_messages_on_full_buffer] reset_buffer else sleep 0.1 end end @buffer_state[:pending_mutex].synchronize do @buffer_state[:pending_items][group] << event @buffer_state[:pending_count] += 1 end if @buffer_config[:autoflush] buffer_flush(force: true) end end
ruby
{ "resource": "" }
q16651
LogStashLogger.Buffer.buffer_flush
train
def buffer_flush(options={}) force = options[:force] || options[:final] final = options[:final] # final flush will wait for lock, so we are sure to flush out all buffered events if options[:final] @buffer_state[:flush_mutex].lock elsif ! @buffer_state[:flush_mutex].try_lock # failed to get lock, another flush already in progress return 0 end items_flushed = 0 begin time_since_last_flush = (Time.now - @buffer_state[:last_flush]) return 0 if @buffer_state[:pending_count] == 0 return 0 if (!force) && (@buffer_state[:pending_count] < @buffer_config[:max_items]) && (time_since_last_flush < @buffer_config[:max_interval]) @buffer_state[:pending_mutex].synchronize do @buffer_state[:outgoing_items] = @buffer_state[:pending_items] @buffer_state[:outgoing_count] = @buffer_state[:pending_count] buffer_clear_pending end @buffer_config[:logger].debug do debug_output = { :outgoing_count => @buffer_state[:outgoing_count], :time_since_last_flush => time_since_last_flush, :outgoing_events => @buffer_state[:outgoing_items], :batch_timeout => @buffer_config[:max_interval], :force => force, :final => final } "Flushing output: #{debug_output}" end if @buffer_config[:logger] @buffer_state[:outgoing_items].each do |group, events| begin if group.nil? flush(events,final) else flush(events, group, final) end @buffer_state[:outgoing_items].delete(group) events_size = events.size @buffer_state[:outgoing_count] -= events_size items_flushed += events_size @buffer_state[:last_flush] = Time.now rescue => e @buffer_config[:logger].warn do warn_output = { :outgoing_count => @buffer_state[:outgoing_count], :exception => e.class.name, :backtrace => e.backtrace } "Failed to flush outgoing items: #{warn_output}" end if @buffer_config[:logger] if @buffer_config[:has_on_flush_error] on_flush_error e end if @buffer_config[:drop_messages_on_flush_error] reset_buffer else cancel_flush end end end ensure @buffer_state[:flush_mutex].unlock end return items_flushed end
ruby
{ "resource": "" }
q16652
Axiom.Relation.each
train
def each return to_enum unless block_given? seen = {} tuples.each do |tuple| tuple = Tuple.coerce(header, tuple) yield seen[tuple] = tuple unless seen.key?(tuple) end self end
ruby
{ "resource": "" }
q16653
Axiom.Relation.replace
train
def replace(other) other = coerce(other) delete(difference(other)).insert(other.difference(self)) end
ruby
{ "resource": "" }
q16654
Axiom.Aliasable.define_inheritable_alias_method
train
def define_inheritable_alias_method(new_method, original_method) define_method(new_method) do |*args, &block| public_send(original_method, *args, &block) end end
ruby
{ "resource": "" }
q16655
Axiom.Tuple.extend
train
def extend(header, extensions) join( header, extensions.map { |extension| Function.extract_value(extension, self) } ) end
ruby
{ "resource": "" }
q16656
Axiom.Tuple.predicate
train
def predicate header.reduce(Function::Proposition::Tautology.instance) do |predicate, attribute| predicate.and(attribute.eq(attribute.call(self))) end end
ruby
{ "resource": "" }
q16657
GeoPattern.GeoPatternTask.run_task
train
def run_task(_verbose) data.each do |path, string| opts = {} path = File.expand_path(path) if string.is_a?(Hash) input = string[:input] opts[:patterns] = string[:patterns] if string.key? :patterns opts[:color] = string[:color] if string.key? :color opts[:base_color] = string[:base_color] if string.key? :base_color else raise 'Invalid data structure for Rake Task' end pattern = GeoPattern.generate(input, opts) logger.info "Creating pattern at \"#{path}\"." FileUtils.mkdir_p File.dirname(path) File.write(path, pattern.to_svg) end end
ruby
{ "resource": "" }
q16658
GeoPattern.RakeTask.include
train
def include(modules) modules = Array(modules) modules.each { |m| self.class.include m } end
ruby
{ "resource": "" }
q16659
Kontena::Cli::Master.LoginCommand.authentication_path
train
def authentication_path(local_port: nil, invite_code: nil, expires_in: nil, remote: false) auth_url_params = {} if remote auth_url_params[:redirect_uri] = "/code" elsif local_port auth_url_params[:redirect_uri] = "http://localhost:#{local_port}/cb" else raise ArgumentError, "Local port not defined and not performing remote login" end auth_url_params[:invite_code] = invite_code if invite_code auth_url_params[:expires_in] = expires_in if expires_in "/authenticate?#{URI.encode_www_form(auth_url_params)}" end
ruby
{ "resource": "" }
q16660
Kontena::Cli::Master.LoginCommand.authentication_url_from_master
train
def authentication_url_from_master(master_url, auth_params) client = Kontena::Client.new(master_url) vspinner "Sending authentication request to receive an authorization URL" do response = client.request( http_method: :get, path: authentication_path(auth_params), expects: [501, 400, 302, 403], auth: false ) if client.last_response.status == 302 client.last_response.headers['Location'] elsif response.kind_of?(Hash) exit_with_error [response['error'], response['error_description']].compact.join(' : ') elsif response.kind_of?(String) && response.length > 1 exit_with_error response else exit_with_error "Invalid response to authentication request : HTTP#{client.last_response.status} #{client.last_response.body if debug?}" end end end
ruby
{ "resource": "" }
q16661
Kontena::Cli::Master.LoginCommand.select_a_server
train
def select_a_server(name, url) # no url, no name, try to use current master if url.nil? && name.nil? if config.current_master return config.current_master else exit_with_error 'URL not specified and current master not selected' end end if name && url exact_match = config.find_server_by(url: url, name: name) return exact_match if exact_match # found an exact match, going to use that one. name_match = config.find_server(name) if name_match #found a server with the provided name, set the provided url to it and return name_match.url = url return name_match else # nothing found, create new. return Kontena::Cli::Config::Server.new(name: name, url: url) end elsif name # only --name provided, try to find a server with that name name_match = config.find_server(name) if name_match && name_match.url return name_match else exit_with_error "Master #{name} was found from config, but it does not have an URL and no URL was provided on command line" end elsif url # only url provided if url =~ /^https?:\/\// # url is actually an url url_match = config.find_server_by(url: url) if url_match return url_match else return Kontena::Cli::Config::Server.new(url: url, name: nil) end else name_match = config.find_server(url) if name_match unless name_match.url exit_with_error "Master #{url} was found from config, but it does not have an URL and no URL was provided on command line" end return name_match else exit_with_error "Can't find a master with name #{name} from configuration" end end end end
ruby
{ "resource": "" }
q16662
Kontena::NetworkAdapters.Weave.ensure_exposed
train
def ensure_exposed(cidr) # configure new address # these will be added alongside any existing addresses if @executor_pool.expose(cidr) info "Exposed host node at cidr=#{cidr}" else error "Failed to expose host node at cidr=#{cidr}" end # cleanup any old addresses @executor_pool.ps('weave:expose') do |name, mac, *cidrs| cidrs.each do |exposed_cidr| if exposed_cidr != cidr warn "Migrating host node from cidr=#{exposed_cidr}" @executor_pool.hide(exposed_cidr) end end end end
ruby
{ "resource": "" }
q16663
Kontena::NetworkAdapters.Weave.get_containers
train
def get_containers containers = { } @executor_pool.ps() do |id, mac, *cidrs| next if id == 'weave:expose' containers[id] = cidrs end containers end
ruby
{ "resource": "" }
q16664
Kontena::NetworkAdapters.Weave.migrate_container
train
def migrate_container(container_id, cidr, attached_cidrs) # first remove any existing addresses # this is required, since weave will not attach if the address already exists, but with a different netmask attached_cidrs.each do |attached_cidr| if cidr != attached_cidr warn "Migrate container=#{container_id} from cidr=#{attached_cidr}" @executor_pool.detach(container_id, attached_cidr) end end # attach with the correct address self.attach_container(container_id, cidr) end
ruby
{ "resource": "" }
q16665
Kontena::NetworkAdapters.Weave.remove_container
train
def remove_container(container_id, overlay_network, overlay_cidr) info "Remove container=#{container_id} from network=#{overlay_network} at cidr=#{overlay_cidr}" @ipam_client.release_address(overlay_network, overlay_cidr) rescue IpamError => error # Cleanup will take care of these later on warn "Failed to release container=#{container_id} from network=#{overlay_network} at cidr=#{overlay_cidr}: #{error}" end
ruby
{ "resource": "" }
q16666
Kontena::Workers.WeaveWorker.start_container
train
def start_container(container) overlay_cidr = container.overlay_cidr if overlay_cidr wait_weave_running? register_container_dns(container) if container.service_container? attach_overlay(container) else debug "skip start for container=#{container.name} without overlay_cidr" end rescue Docker::Error::NotFoundError debug "skip start for missing container=#{container.id}" rescue => exc error "failed to start container: #{exc.class.name}: #{exc.message}" error exc.backtrace.join("\n") end
ruby
{ "resource": "" }
q16667
Kontena.PluginManager.init
train
def init ENV["GEM_HOME"] = Common.install_dir Gem.paths = ENV Common.use_dummy_ui unless Kontena.debug? plugins true end
ruby
{ "resource": "" }
q16668
Kontena.Logging.debug
train
def debug(message = nil, &block) logger.add(Logger::DEBUG, message, self.logging_prefix, &block) end
ruby
{ "resource": "" }
q16669
Kontena.Logging.info
train
def info(message = nil, &block) logger.add(Logger::INFO, message, self.logging_prefix, &block) end
ruby
{ "resource": "" }
q16670
Kontena.Logging.warn
train
def warn(message = nil, &block) logger.add(Logger::WARN, message, self.logging_prefix, &block) end
ruby
{ "resource": "" }
q16671
Kontena.Logging.error
train
def error(message = nil, &block) logger.add(Logger::ERROR, message, self.logging_prefix, &block) end
ruby
{ "resource": "" }
q16672
Mutations.Command.add_error
train
def add_error(key, error, message = nil) if error.is_a? Symbol error = ErrorAtom.new(key, error, message: message) elsif error.is_a?(Mutations::ErrorAtom) || error.is_a?(Mutations::ErrorArray) || error.is_a?(Mutations::ErrorHash) else raise ArgumentError.new("Invalid error of kind #{error.class}") end @errors ||= ErrorHash.new @errors.tap do |errs| path = key.to_s.split(".") last = path.pop inner = path.inject(errs) do |cur_errors,part| cur_errors[part.to_sym] ||= ErrorHash.new end inner[last] = error end end
ruby
{ "resource": "" }
q16673
Kontena::Cli::Helpers.ExecHelper.websocket_exec_write_thread
train
def websocket_exec_write_thread(ws, tty: nil) Thread.new do begin if tty console_height, console_width = TTY::Screen.size websocket_exec_write(ws, 'tty_size' => { width: console_width, height: console_height }) end read_stdin(tty: tty) do |stdin| logger.debug "websocket exec stdin with encoding=#{stdin.encoding}: #{stdin.inspect}" websocket_exec_write(ws, 'stdin' => stdin) end websocket_exec_write(ws, 'stdin' => nil) # EOF rescue => exc logger.error exc ws.close(1001, "stdin read #{exc.class}: #{exc}") end end end
ruby
{ "resource": "" }
q16674
Kontena::Cli::Helpers.ExecHelper.websocket_exec
train
def websocket_exec(path, cmd, interactive: false, shell: false, tty: false) exit_status = nil write_thread = nil query = {} query[:interactive] = interactive if interactive query[:shell] = shell if shell query[:tty] = tty if tty server = require_current_master url = websocket_url(path, query) token = require_token options = WEBSOCKET_CLIENT_OPTIONS.dup options[:headers] = { 'Authorization' => "Bearer #{token.access_token}" } options[:ssl_params] = { verify_mode: ENV['SSL_IGNORE_ERRORS'].to_s == 'true' ? OpenSSL::SSL::VERIFY_NONE : OpenSSL::SSL::VERIFY_PEER, ca_file: server.ssl_cert_path, } options[:ssl_hostname] = server.ssl_subject_cn logger.debug { "websocket exec connect... #{url}" } # we do not expect CloseError, because the server will send an 'exit' message first, # and we return before seeing the close frame # TODO: handle HTTP 404 errors Kontena::Websocket::Client.connect(url, **options) do |ws| logger.debug { "websocket exec open" } # first frame contains exec command websocket_exec_write(ws, 'cmd' => cmd) if interactive # start new thread to write from stdin to websocket write_thread = websocket_exec_write_thread(ws, tty: tty) end # blocks reading from websocket, returns with exec exit code exit_status = websocket_exec_read(ws) fail ws.close_reason unless exit_status end rescue Kontena::Websocket::Error => exc exit_with_error(exc) rescue => exc logger.error { "websocket exec error: #{exc}" } raise else logger.debug { "websocket exec exit: #{exit_status}"} return exit_status ensure if write_thread write_thread.kill write_thread.join end end
ruby
{ "resource": "" }
q16675
Kontena::Launchers.Etcd.update_membership
train
def update_membership(node) info 'checking if etcd previous membership needs to be updated' etcd_connection = find_etcd_node(node) return 'new' unless etcd_connection # No etcd hosts available, bootstrapping first node --> new cluster weave_ip = node.overlay_ip peer_url = "http://#{weave_ip}:2380" client_url = "http://#{weave_ip}:2379" members = JSON.parse(etcd_connection.get.body) members['members'].each do |member| if member['peerURLs'].include?(peer_url) && member['clientURLs'].include?(client_url) # When there's both peer and client URLs, the given peer has been a member of the cluster # and needs to be replaced delete_membership(etcd_connection, member['id']) sleep 1 # There seems to be some race condition with etcd member API, thus some sleeping required add_membership(etcd_connection, peer_url) sleep 1 return 'existing' elsif member['peerURLs'].include?(peer_url) && !member['clientURLs'].include?(client_url) # Peer found but not been part of the cluster yet, no modification needed and it can join as new member return 'new' end end info 'previous member info not found at all, adding' add_membership(etcd_connection, peer_url) 'new' # Newly added member will join as new member end
ruby
{ "resource": "" }
q16676
Kontena::Launchers.Etcd.find_etcd_node
train
def find_etcd_node(node) grid_subnet = IPAddr.new(node.grid['subnet']) tries = node.grid['initial_size'] begin etcd_host = "http://#{grid_subnet[tries]}:2379/v2/members" info "connecting to existing etcd at #{etcd_host}" connection = Excon.new(etcd_host) members = JSON.parse(connection.get.body) return connection rescue Excon::Errors::Error => exc tries -= 1 if tries > 0 info 'retrying next etcd host' retry else info 'no online etcd host found, we\'re probably bootstrapping first node' end end nil end
ruby
{ "resource": "" }
q16677
Kontena::Launchers.Etcd.add_membership
train
def add_membership(connection, peer_url) info "Adding new etcd membership info with peer URL #{peer_url}" connection.post(:body => JSON.generate(peerURLs: [peer_url]), :headers => { 'Content-Type' => 'application/json' }) end
ruby
{ "resource": "" }
q16678
Kontena.Observable.update
train
def update(value) raise RuntimeError, "Observable crashed: #{@value}" if crashed? raise ArgumentError, "Update with nil value" if value.nil? debug { "update: #{value}" } set_and_notify(value) end
ruby
{ "resource": "" }
q16679
Kontena::Workers::Volumes.VolumeManager.volume_exist?
train
def volume_exist?(volume_name, driver) begin debug "volume #{volume_name} exists" volume = Docker::Volume.get(volume_name) if volume && volume.info['Driver'] == driver return true elsif volume && volume.info['Driver'] != driver raise DriverMismatchError.new("Volume driver not as expected. Expected #{driver}, existing volume has #{volume.info['Driver']}") end rescue Docker::Error::NotFoundError debug "volume #{volume_name} does NOT exist" false rescue => error abort error end end
ruby
{ "resource": "" }
q16680
Kontena::Workers.ServicePodWorker.on_container_die
train
def on_container_die(exit_code: ) cancel_restart_timers return unless @service_pod.running? # backoff restarts backoff = @restarts ** 2 backoff = max_restart_backoff if backoff > max_restart_backoff info "#{@service_pod} exited with code #{exit_code}, restarting (delay: #{backoff}s)" log_service_pod_event("service:instance_crash", "service #{@service_pod} instance exited with code #{exit_code}, restarting (delay: #{backoff}s)", Logger::WARN ) ts = Time.now.utc @restarts += 1 @restart_backoff_timer = after(backoff) { debug "restart triggered (from #{ts})" apply } end
ruby
{ "resource": "" }
q16681
Kontena::Workers.ServicePodWorker.restart
train
def restart(at = Time.now, container_id: nil, started_at: nil) if container_id && @container.id != container_id debug "stale #{@service_pod} restart for container id=#{container_id}" return end if started_at && @container.started_at != started_at debug "stale #{@service_pod} restart for container started_at=#{started_at}" return end debug "mark #{@service_pod} for restart at #{at}" @restarting_at = at apply end
ruby
{ "resource": "" }
q16682
Kontena::Workers.ServicePodWorker.check_starting!
train
def check_starting!(service_pod, container) raise "service stopped" if !@service_pod.running? raise "service redeployed" if @service_pod.deploy_rev != service_pod.deploy_rev raise "container recreated" if @container.id != container.id raise "container restarted" if @container.started_at != container.started_at end
ruby
{ "resource": "" }
q16683
GridServices.Helpers.document_changes
train
def document_changes(document) (document.changed + document._children.select{|child| child.changed? }.map { |child| "#{child.metadata_name.to_s}{#{child.changed.join(", ")}}" }).join(", ") end
ruby
{ "resource": "" }
q16684
GridServices.Helpers.save_grid_service
train
def save_grid_service(grid_service) if grid_service.save return grid_service else grid_service.errors.each do |key, message| add_error(key, :invalid, message) end return nil end end
ruby
{ "resource": "" }
q16685
GridServices.Helpers.update_grid_service
train
def update_grid_service(grid_service, force: false) if grid_service.changed? || force grid_service.revision += 1 info "updating service #{grid_service.to_path} revision #{grid_service.revision} with changes: #{document_changes(grid_service)}" else debug "not updating service #{grid_service.to_path} revision #{grid_service.revision} without changes" end save_grid_service(grid_service) end
ruby
{ "resource": "" }
q16686
Kontena.Observer.error
train
def error @values.each_pair{|observable, value| return Error.new(observable, value) if Exception === value } return nil end
ruby
{ "resource": "" }
q16687
Kontena.Observer.each
train
def each(timeout: nil) @deadline = Time.now + timeout if timeout while true # prevent any intervening messages from being processed and discarded before we're back in Celluloid.receive() Celluloid.exclusive { if error? debug { "raise: #{self.describe_observables}" } raise self.error elsif ready? yield *self.values @deadline = Time.now + timeout if timeout end } # must be atomic! debug { "wait: #{self.describe_observables}" } update(receive()) end end
ruby
{ "resource": "" }
q16688
GridServices.Common.validate_secrets
train
def validate_secrets validate_each :secrets do |s| secret = self.grid.grid_secrets.find_by(name: s[:secret]) unless secret [:not_found, "Secret #{s[:secret]} does not exist"] else nil end end end
ruby
{ "resource": "" }
q16689
GridServices.Common.validate_certificates
train
def validate_certificates validate_each :certificates do |c| cert = self.grid.certificates.find_by(subject: c[:subject]) unless cert [:not_found, "Certificate #{c[:subject]} does not exist"] else nil end end end
ruby
{ "resource": "" }
q16690
Docker.StreamingExecutor.start
train
def start(ws) @ws = ws @ws.on(:message) do |event| on_websocket_message(event.data) end @ws.on(:error) do |exc| warn exc end @ws.on(:close) do |event| on_websocket_close(event.code, event.reason) end started! end
ruby
{ "resource": "" }
q16691
Kontena::Cli::Services.UpdateCommand.parse_service_data_from_options
train
def parse_service_data_from_options data = {} data[:strategy] = deploy_strategy if deploy_strategy data[:ports] = parse_ports(ports_list) unless ports_list.empty? data[:links] = parse_links(link_list) unless link_list.empty? data[:memory] = parse_memory(memory) if memory data[:memory_swap] = parse_memory(memory_swap) if memory_swap data[:shm_size] = parse_memory(shm_size) if shm_size data[:cpus] = cpus if cpus data[:cpu_shares] = cpu_shares if cpu_shares data[:affinity] = affinity_list unless affinity_list.empty? data[:env] = env_list unless env_list.empty? data[:secrets] = parse_secrets(secret_list) unless secret_list.empty? data[:container_count] = instances if instances data[:cmd] = Shellwords.split(cmd) if cmd data[:user] = user if user data[:image] = parse_image(image) if image data[:privileged] = privileged? data[:cap_add] = cap_add_list if cap_add_list data[:cap_drop] = cap_drop_list if cap_drop_list data[:net] = net if net data[:log_driver] = log_driver if log_driver data[:log_opts] = parse_log_opts(log_opt_list) if log_opt_list deploy_opts = parse_deploy_opts data[:deploy_opts] = deploy_opts unless deploy_opts.empty? health_check = parse_health_check data[:health_check] = health_check unless health_check.empty? data[:pid] = pid if pid data[:stop_signal] = stop_signal if stop_signal data[:stop_grace_period] = stop_timeout if stop_timeout data end
ruby
{ "resource": "" }
q16692
Kontena::Cli::Helpers.TimeHelper.time_since
train
def time_since(time, terse: false) return '' if time.nil? || time.empty? dt = Time.now - Time.parse(time) dt_s = dt.to_i dt_m, dt_s = dt_s / 60, dt_s % 60 dt_h, dt_m = dt_m / 60, dt_m % 60 dt_d, dt_h = dt_h / 60, dt_h % 60 parts = [] parts << "%dd" % dt_d if dt_d > 0 parts << "%dh" % dt_h if dt_h > 0 parts << "%dm" % dt_m if dt_m > 0 parts << "%ds" % dt_s if terse return parts.first else return parts.join('') end end
ruby
{ "resource": "" }
q16693
Kontena.WebsocketClient.connect!
train
def connect! info "connecting to master at #{@api_uri}" headers = { 'Kontena-Node-Id' => @node_id.to_s, 'Kontena-Node-Name' => @node_name, 'Kontena-Version' => Kontena::Agent::VERSION, 'Kontena-Node-Labels' => @node_labels.join(','), 'Kontena-Connected-At' => Time.now.utc.strftime(STRFTIME), } if @node_token headers['Kontena-Node-Token'] = @node_token.to_s elsif @grid_token headers['Kontena-Grid-Token'] = @grid_token.to_s else fail "Missing grid, node token" end @ws = Kontena::Websocket::Client.new(@api_uri, headers: headers, ssl_params: @ssl_params, ssl_hostname: @ssl_hostname, connect_timeout: CONNECT_TIMEOUT, open_timeout: OPEN_TIMEOUT, ping_interval: PING_INTERVAL, ping_timeout: PING_TIMEOUT, close_timeout: CLOSE_TIMEOUT, ) async.connect_client @ws publish('websocket:connect', nil) rescue => exc error exc reconnect! end
ruby
{ "resource": "" }
q16694
Kontena.WebsocketClient.send_message
train
def send_message(msg) ws.send(msg) rescue => exc warn exc abort exc end
ruby
{ "resource": "" }
q16695
Kontena.WebsocketClient.on_error
train
def on_error(exc) case exc when Kontena::Websocket::SSLVerifyError if exc.cert error "unable to connect to SSL server with KONTENA_SSL_VERIFY=true: #{exc} (subject #{exc.subject}, issuer #{exc.issuer})" else error "unable to connect to SSL server with KONTENA_SSL_VERIFY=true: #{exc}" end when Kontena::Websocket::SSLConnectError error "unable to connect to SSL server: #{exc}" when Kontena::Websocket::ConnectError error "unable to connect to server: #{exc}" when Kontena::Websocket::ProtocolError error "unexpected response from server, check url: #{exc}" else error "websocket error: #{exc}" end end
ruby
{ "resource": "" }
q16696
Kontena.WebsocketClient.on_close
train
def on_close(code, reason) debug "Server closed connection with code #{code}: #{reason}" case code when 4001 handle_invalid_token when 4010 handle_invalid_version(reason) when 4040, 4041 handle_invalid_connection(reason) else warn "connection closed with code #{code}: #{reason}" end end
ruby
{ "resource": "" }
q16697
Kontena::Cli.SubcommandLoader.symbolize_path
train
def symbolize_path(path) path.gsub(/.*\/cli\//, '').split('/').map do |path_part| path_part.split('_').map{ |e| e.capitalize }.join end.map(&:to_sym) end
ruby
{ "resource": "" }
q16698
Kontena.Client.authentication_ok?
train
def authentication_ok?(token_verify_path) return false unless token return false unless token['access_token'] return false unless token_verify_path final_path = token_verify_path.gsub(/\:access\_token/, token['access_token']) debug { "Requesting user info from #{final_path}" } request(path: final_path) true rescue => ex error { "Authentication verification exception" } error { ex } false end
ruby
{ "resource": "" }
q16699
Kontena.Client.exchange_code
train
def exchange_code(code) return nil unless token_account return nil unless token_account['token_endpoint'] response = request( http_method: token_account['token_method'].downcase.to_sym, path: token_account['token_endpoint'], headers: { CONTENT_TYPE => token_account['token_post_content_type'] }, body: { 'grant_type' => 'authorization_code', 'code' => code, 'client_id' => Kontena::Client::CLIENT_ID, 'client_secret' => Kontena::Client::CLIENT_SECRET }, expects: [200,201], auth: false ) response['expires_at'] ||= in_to_at(response['expires_in']) response end
ruby
{ "resource": "" }