query stringlengths 7 9.55k | document stringlengths 10 363k | metadata dict | negatives listlengths 0 101 | negative_scores listlengths 0 101 | document_score stringlengths 3 10 | document_rank stringclasses 102
values |
|---|---|---|---|---|---|---|
user_in_group? determines if the user is part of a team in the repository, where the group of team members is specified by a group identifier | def user_in_group?(login, repo, settings, group_identifier)
trusted = false
repo_setting_key = "#{repo}_#{settings['name']}_#{group_identifier}"
if $permissions[login]
if $permissions[login][repo_setting_key]
trusted = $permissions[login][repo_setting_key] ? true : false
end
else
$permissions[login] = {}
end
if $permissions[login][repo_setting_key].nil?
settings[group_identifier][repo].each do |team|
res = get "#{GITHUB_API_BASE_URL}/teams/#{team}/members/#{login}"
trusted = res.success?
break if trusted
end
$permissions[login][repo_setting_key] = trusted
end
trusted
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def user_in_group?(user, group)\n return get_user_groups(user).include?(group)\n end",
"def in_group?(user, group)\n users_groups.where(user_id: user.id, group_id: group.id).count.positive?\n end",
"def in_group?(group_or_id)\n group_ids.include?(Ecircle::User.group_id(group_or_id))\n... | [
"0.82063127",
"0.7636449",
"0.7624152",
"0.7536441",
"0.7450445",
"0.71581495",
"0.7127574",
"0.7116194",
"0.7050549",
"0.7011511",
"0.69383276",
"0.6938149",
"0.6938149",
"0.6922728",
"0.6921854",
"0.686081",
"0.6848602",
"0.6848273",
"0.6806333",
"0.6783242",
"0.6743527",
... | 0.75803006 | 3 |
Check if we can dequeue and submit the next tests to Jenkins, and if so, update the comment | def validate_and_submit_tests(repo_to_pull_request, base_repo, branch, pull_id, comment_id, extended_tests, all_coreq_triggers_trusted, comments, settings, base_commit=nil)
submitted_tests = submitted_tests_for_branch(branch)
unless settings['allow_multiple']
if submitted_tests[settings['name']] || JenkinsAPI.previous_build_running?(branch, settings, base_repo)
submitted_tests[settings['name']] = true
$stderr.puts " Waiting for existing build to finish"
return
end
end
if !attempt_to_skip_merge_tests(repo_to_pull_request, base_repo, pull_id, all_coreq_triggers_trusted, comments, settings)
$stderr.puts " Running merge tests for pull request ##{pull_id} for repo '#{base_repo}'"
# If the project is stable, submit the tests
build_url = JenkinsAPI.submit_jenkins_job(repo_to_pull_request, branch, extended_tests, settings)
extended_tests = extended_tests.join(', ') unless extended_tests.empty?
new_fields = {:state => :running, :build_url => build_url, :extended_tests => extended_tests, :base_commit => base_commit}
running_comment = compose_bot_comment(settings['test_prefix'], new_fields)
# Update the comments to reflect the new tests running
recreate_comment(pull_id, comment_id, base_repo, running_comment)
repo_to_pull_request.each do |repo, pull_request|
if repo != base_repo
pr_base_commit = base_repo_commit_for_pull_req(pull_request)
new_fields[:base_commit] = pr_base_commit['sha']
running_comment = compose_bot_comment(settings['test_prefix'], new_fields)
process_or_create_comment(pull_request['number'], repo, settings) do |c_id, comment, comment_updated_at|
update_comment(c_id, repo, running_comment)
end
end
commit = last_commit_for_pull_id(pull_request['number'], repo)
update_status(settings['test_prefix'], commit['sha'], repo, 'pending', build_url, 'Testing')
end
end
submitted_tests[settings['name']] = true unless settings['allow_multiple']
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test03_post_closed_board_MediaOneComment_TC_24319\n\t\tlogin $user_1_email, $master_password\n\t\tsleep 2\n\t\t$browser.goto($patch_boards_post_closed_note)\n\t\t\n\t\tcommentPopSubmit \"Test Comment #{random}\"\n\tend",
"def test02_post_closed_board_ArticleOneComment_TC_24319\n\t\tlogin $user_1_email, $mast... | [
"0.6304635",
"0.61540604",
"0.6105782",
"0.60393226",
"0.6033271",
"0.60174125",
"0.59986174",
"0.59827995",
"0.5972537",
"0.5960026",
"0.595945",
"0.5950777",
"0.5935887",
"0.5908998",
"0.59047586",
"0.5898617",
"0.5879725",
"0.5834431",
"0.58300704",
"0.58256847",
"0.580078... | 0.5718781 | 23 |
Tries to extract the commit ID for the base repo from the bot comments by matching the build URL from the jenkins environment to the build URL in the PR bot comments | def base_commit_id_for_merge(pull_request, settings=nil)
repo = pull_request['base']['repo']['name']
base_commit = nil
num_tries = 5
sleep_time = SLEEP_TIME
bot_comment = nil
$stderr.puts " Checking if current base repo commit ID matches what we expect"
if ENV['BUILD_URL'].to_s.empty?
base_commit = ''
else
(1..num_tries).each do |i|
comment_pattern = /#{CONTENT_RUNNING} +\(#{ENV['BUILD_URL']}\)/
bot_comment = get_comment_matching_regex(pull_request['number'], repo, comment_pattern)
if bot_comment
begin
fields = extract_bot_comment_fields(bot_comment['body'], settings)
rescue Exception => e
$stderr.puts e.message
end
if fields[:base_commit]
base_commit = fields[:base_commit]
break
end
end
if i < num_tries
sleep sleep_time
sleep_time *= 2
$stderr.puts " Retrying... attempt: #{i+1}"
end
end
end
if base_commit.nil?
$stderr.puts " No matching bot comment was found for pull request ##{pull_request['number']} on repo #{repo}"
base_commit = ''
elsif !base_commit.empty?
pr_base_commit = base_repo_commit_for_pull_req(pull_request)
if base_commit != pr_base_commit['sha']
delete_comment(bot_comment['id'], repo) if bot_comment
raise "Base repository commit ID #{pr_base_commit['sha']} doesn't match evaluated commit ID #{base_commit}"
end
end
base_commit
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def github_comment_repository_url(comment)\n [comment.repository, 'commit', comment.reference].join('/')\n end",
"def extract_bot_comment_fields(bot_comment_body, settings=nil)\n test_prefix = extract_test_prefix(bot_comment_body, settings)\n content = nil\n build_url = nil\n base_commit = nil\n extende... | [
"0.60928667",
"0.5973124",
"0.55709136",
"0.5409518",
"0.53950155",
"0.5381384",
"0.53689235",
"0.5365013",
"0.5259943",
"0.5248134",
"0.5245233",
"0.5245148",
"0.52416044",
"0.52403665",
"0.52235913",
"0.52174586",
"0.5205975",
"0.5201701",
"0.5200008",
"0.5166218",
"0.51609... | 0.70890325 | 0 |
process_or_create_comment yields the comment body and metadata for the last bot comment for a particular pull request. If no comment exists at the time this is called, a placeholder comment is made to initialize the pull request analysis process | def process_or_create_comment(issue_id, repo, settings, comments=nil)
bot_comment = get_comment_with_prefix(issue_id, repo, settings['test_prefix'], comments)
unless bot_comment
# In the process of evaluating a pull request, we could have called this method
# previously and, since we're not updating our internal cache of comments, the
# comments array, we could have stale data in that cache, so we want to force
# get_comment_with_prefix to fetch a new list of comments to make sure we find
# a bot comment if it exists but we don't have it cached
bot_comment = get_comment_with_prefix(issue_id, repo, settings['test_prefix'])
unless bot_comment
# No comment found, create a new one and yield to it
$stderr.puts " Creating placeholder comment"
evaluating_comment = compose_bot_comment(settings['test_prefix'], :state => :evaluating)
bot_comment = add_comment(issue_id, repo, evaluating_comment)
end
end
yield bot_comment['id'], bot_comment['body'], Time.parse(bot_comment['updated_at'])
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_comment\n if file?\n puts \"| --- file\"\n files.each do |file|\n comment = Gif.where(github_type: \"file\").detect { |gif| file[:filename].downcase.include? gif.keyword.downcase }\n create_pull_request_comment(comment.image, file.filename, 1) if comment.present?\n... | [
"0.7275019",
"0.7058971",
"0.6779901",
"0.67044926",
"0.6558556",
"0.6479299",
"0.6466325",
"0.6434163",
"0.636113",
"0.6347172",
"0.6344625",
"0.62405753",
"0.62026465",
"0.6143643",
"0.6128355",
"0.6035635",
"0.6022427",
"0.6002676",
"0.59858596",
"0.5941353",
"0.59305185",... | 0.7744058 | 0 |
create_or_update_comment tries to update the previous bot comment with the given prefix to contain the new comment body, or, if there is no previous bot comment with the given prefix, tries to create a new bot comment instead | def create_or_update_comment(issue_id, repo, comment_prefix, comment, comments=nil)
bot_comment = get_comment_with_prefix(issue_id, repo, comment_prefix, comments)
if bot_comment
update_comment(bot_comment['id'], repo, comment) if bot_comment['body'] != comment
else
# No comment found, create a new one
add_comment(issue_id, repo, comment)
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compose_bot_comment(prefix, comment_data={})\n # Derive comment data content from state, if needed\n if comment_data[:state] && (comment_data[:content].nil? ||\n # Don't overwrite the content if it matches the state already\n !comment_data[:content].s... | [
"0.7368731",
"0.7340103",
"0.67400885",
"0.57549787",
"0.5733549",
"0.5630458",
"0.5630458",
"0.55913943",
"0.5545386",
"0.5536907",
"0.55235887",
"0.5522152",
"0.5468802",
"0.5456392",
"0.5403375",
"0.53881156",
"0.5387349",
"0.5386741",
"0.53773403",
"0.53664505",
"0.536364... | 0.80648285 | 0 |
Creates or updates a comment with given prefix | def delete_comment_with_prefix(issue_id, repo, comment_prefix, comments=nil)
comment = get_comment_with_prefix(issue_id, repo, comment_prefix, comments)
if comment && comments
comments.delete_if { |c| c['id'] == comment['id'] }
end
delete_comment(comment['id'], repo) if comment
return comment
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def recreate_comment_with_prefix(issue_id, repo, comment_prefix, comment, comments=nil)\n comments = get_comments(issue_id, repo) if comments.nil?\n delete_comment_with_prefix(issue_id, repo, comment_prefix, comments)\n create_or_update_comment(issue_id, repo, comment_prefix, comment, comments)\n ... | [
"0.773643",
"0.7161376",
"0.7121396",
"0.65948755",
"0.62692535",
"0.61859155",
"0.6134123",
"0.6061188",
"0.59601814",
"0.58842933",
"0.57980806",
"0.57980806",
"0.57314295",
"0.5730909",
"0.5696034",
"0.5696034",
"0.5696034",
"0.5696034",
"0.5690734",
"0.56541914",
"0.56390... | 0.60039735 | 8 |
get_comment_matching_regex returns a comment authored by the bot matching the supplied regex, if one exists | def get_comment_matching_regex(issue_id, repo, pattern, comments=nil)
matching_comment = nil
comments = comments ? comments : get_comments(issue_id, repo)
comments.each do |comment|
if comment['body'] =~ pattern && (comment['user']['login'] == Properties['bot_github_user'])
matching_comment = comment
break
end
end
matching_comment
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def comment\n @comment ||= client.issue_comments(repository, pr_id).detect do |comment|\n UrlSectionBuilder.match?(comment[:body])\n end\n end",
"def get_comment_with_prefix(issue_id, repo, comment_prefix, comments=nil)\n get_comment_matching_regex(issue_id, repo, /^#{comment_pre... | [
"0.56686217",
"0.55709577",
"0.5507487",
"0.5406718",
"0.53552026",
"0.5341296",
"0.52526855",
"0.51245993",
"0.5118246",
"0.5096631",
"0.50661093",
"0.5061357",
"0.5035634",
"0.5035634",
"0.49643052",
"0.4945719",
"0.49171764",
"0.49166468",
"0.49097788",
"0.48690763",
"0.48... | 0.75751877 | 0 |
get_comment_with_prefix returns a comment authored by the bot with the given prefix, if one exists | def get_comment_with_prefix(issue_id, repo, comment_prefix, comments=nil)
get_comment_matching_regex(issue_id, repo, /^#{comment_prefix}(\s|$)/, comments)
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def comment(prefix)\n lang_eval { @comment_prefix = prefix }\n nil\n end",
"def delete_comment_with_prefix(issue_id, repo, comment_prefix, comments=nil)\n comment = get_comment_with_prefix(issue_id, repo, comment_prefix, comments)\n if comment && comments\n comments.delete_if ... | [
"0.7313103",
"0.6621266",
"0.64932597",
"0.61769086",
"0.61537087",
"0.5901566",
"0.5746939",
"0.5686596",
"0.54822534",
"0.5415583",
"0.5389662",
"0.538614",
"0.5332634",
"0.5271836",
"0.5251541",
"0.51996934",
"0.5180592",
"0.5128151",
"0.5123716",
"0.5093902",
"0.5087161",... | 0.80846083 | 0 |
Recreates a comment with given prefix | def recreate_comment_with_prefix(issue_id, repo, comment_prefix, comment, comments=nil)
comments = get_comments(issue_id, repo) if comments.nil?
delete_comment_with_prefix(issue_id, repo, comment_prefix, comments)
create_or_update_comment(issue_id, repo, comment_prefix, comment, comments)
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def comment(prefix)\n lang_eval { @comment_prefix = prefix }\n nil\n end",
"def compose_bot_comment(prefix, comment_data={})\n # Derive comment data content from state, if needed\n if comment_data[:state] && (comment_data[:content].nil? ||\n # Don't overwrite the... | [
"0.74855554",
"0.6633524",
"0.64976025",
"0.62688094",
"0.612083",
"0.59608334",
"0.5946876",
"0.5946876",
"0.5946876",
"0.5946876",
"0.5942118",
"0.5942118",
"0.59360176",
"0.5934178",
"0.59211826",
"0.59184057",
"0.59131926",
"0.5876897",
"0.5738138",
"0.5709173",
"0.568316... | 0.7849539 | 0 |
get_evaluated_time returns the time at which the bot last evaluated this pull request, or returns nil if the bot never evaluated the pull request at all | def get_evaluated_time(comments, repo, settings)
comments = sort_comments(comments)
comments.each do |comment|
if comment['user']['login'] == Properties['bot_github_user'] && comment['body'] =~ evaluated_marker_regex(repo, settings)
return Time.parse(comment['updated_at'])
end
end
return nil
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_trusted_trigger_time(pull_request, comments, settings)\n trigger_time_for_user_in_group(pull_request, comments, settings, 'repo_to_teams')\n end",
"def time_of_last_pr\n client = Octokit::Client.new(access_token: @options[:creds], per_page: 50)\n last_pr = client.list_issues('ansible/ansible',\... | [
"0.6251447",
"0.61633223",
"0.6150241",
"0.60926497",
"0.59422153",
"0.5846043",
"0.58079445",
"0.57820344",
"0.56936926",
"0.56346667",
"0.56268334",
"0.5613294",
"0.55976146",
"0.5577462",
"0.55701035",
"0.5558062",
"0.55326474",
"0.55267096",
"0.5503675",
"0.5502674",
"0.5... | 0.7450107 | 0 |
get_trusted_trigger_time returns the time at which the last trusted user added a trigger, as best as we can tell. This will not always be able to determine the exact trigger time. | def get_trusted_trigger_time(pull_request, comments, settings)
trigger_time_for_user_in_group(pull_request, comments, settings, 'repo_to_teams')
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trigger_type\n return @trigger_type unless @trigger_type.nil?\n parse_event_triggers unless @custom_event_triggers_checked\n return @trigger_type\n end",
"def last_refreshed?\n return Time.now.utc\n end",
"def last_query_time(lastRunFile, ageInterval, time=nil, debug)\n\n # TOD... | [
"0.5546445",
"0.5281392",
"0.5141606",
"0.50640947",
"0.5061817",
"0.50601155",
"0.50003403",
"0.4999539",
"0.49621695",
"0.4910725",
"0.49084806",
"0.49078193",
"0.48060736",
"0.4804313",
"0.48009142",
"0.47544518",
"0.47511268",
"0.4731798",
"0.47099712",
"0.4709733",
"0.46... | 0.6550507 | 0 |
get_admin_trigger_time returns the time at which the last administrative user added a trigger, as best as we can tell. This will not always be able to determine the exact trigger time. | def get_admin_trigger_time(pull_request, comments, settings)
trigger_time_for_user_in_group(pull_request, comments, settings, 'repo_to_admin_teams')
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def action_by_admin\n @current_time = Home.last_cut_off_time.first\n end",
"def last_action_date_time\n return @last_action_date_time\n end",
"def admin_name(last_acted_by, admin)\n last_acted_by = last_acted_by.to_i\n if (last_acted_by > 0)\n admin[:n... | [
"0.6240332",
"0.5687121",
"0.5645227",
"0.5577967",
"0.554593",
"0.54383004",
"0.5415529",
"0.5340536",
"0.5335382",
"0.52919",
"0.5271177",
"0.52705526",
"0.5268413",
"0.52420855",
"0.52420855",
"0.52420855",
"0.52420855",
"0.52380896",
"0.5199472",
"0.5194264",
"0.51420105"... | 0.6930439 | 0 |
trigger_time_for_user_in_group returns the time at which the last trigger was added to the pull request by a user in the group identified with the group identifier. This function will return the time of the last trigger as best as we can tell, and will not always be able to determine the exact trigger time. | def trigger_time_for_user_in_group(pull_request, comments, settings, group_identifier)
login = pull_request['user']['login']
updated_at, _ = get_updated_at(pull_request, comments, settings)
repo = pull_request['base']['repo']['name']
trigger_regex = /\[#{settings['name']}\]/i
trigger_time = nil
trigger_login = nil
if pull_request['title'] =~ trigger_regex || pull_request['body'] =~ trigger_regex
if user_in_group?(login, repo, settings, group_identifier)
# Once we determine that the grouped user has a trigger statement in their pull
# request title or body, we need to determine what time we will claim the
# trigger was added. We can't use the created_at time, since the user could
# edit their title or body to add the trigger, and we can't use the updated_at
# time, since *any* action on the pull request will update that time. So,
# we prefer to use the last time that the bot evaluated the pull request, and,
# if the bot has never evaluated the pull request, we use the updated_at time.
evaluated_time = get_evaluated_time(comments, repo, settings)
trigger_time = evaluated_time || Time.parse(pull_request['updated_at'])
trigger_login = login
end
end
comments = sort_comments(comments)
# Even if a trigger statement was found in the pull request body or title,
# a more recent trigger could exist in the comments, and we want to ensure
# that we return the most recent trigger time
comments.each do |comment|
if comment['body'] =~ trigger_regex
comment_login = comment['user']['login']
if user_in_group?(comment_login, repo, settings, group_identifier)
trigger_comment_updated_at = Time.parse(comment['updated_at'])
# If we find a trigger phrase from a grouped user in a comment, we will use
# the time that their comment was made as the trigger time if and only if
# the user posted their trigger phrase after the last commit was created,
# ensuring that the grouped user has signed off on all of the commits to be
# tested
if user_in_group?(login, repo, settings, group_identifier) || trigger_comment_updated_at > updated_at
# Furthermore, we only want to update the trigger time if we have a more
# recent time from the grouped user's comment than the time we got by
# investigating the pull request body and title
if !trigger_time || trigger_comment_updated_at > trigger_time
trigger_time = trigger_comment_updated_at
trigger_login = comment_login
end
break
end
end
end
end
return [trigger_time, trigger_login]
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_trusted_trigger_time(pull_request, comments, settings)\n trigger_time_for_user_in_group(pull_request, comments, settings, 'repo_to_teams')\n end",
"def get_admin_trigger_time(pull_request, comments, settings)\n trigger_time_for_user_in_group(pull_request, comments, settings, 'repo_to_admin_t... | [
"0.6552338",
"0.64325005",
"0.56244284",
"0.5255757",
"0.48586166",
"0.48144716",
"0.46854988",
"0.46402258",
"0.46278918",
"0.46258035",
"0.45930576",
"0.4586346",
"0.45123258",
"0.44753885",
"0.44510156",
"0.44464138",
"0.44464138",
"0.44464138",
"0.44464138",
"0.44361755",
... | 0.78811896 | 0 |
sort_comments sorts an array of comments by the time they were last updated, most recently updated first | def sort_comments(comments)
comments = comments.sort_by do |comment|
Time.parse(comment['updated_at'])
end
comments.reverse!
comments
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def comments_ordered_by_submitted\n Comment.where(commentable_id: id, commentable_type: self.class.name)\n .order('created_at DESC')\n end",
"def game_room_unreviewed_comments(game_room)\n array = unreviewed_comments.select {|comment| comment.game_room == game_room }\n array.sort_by {|comm... | [
"0.6523073",
"0.6469021",
"0.6443735",
"0.64357466",
"0.64257264",
"0.61553174",
"0.6092295",
"0.59891474",
"0.59743696",
"0.5972824",
"0.5911356",
"0.5896976",
"0.58817446",
"0.5863123",
"0.58434385",
"0.5823292",
"0.5761235",
"0.5761235",
"0.575338",
"0.57513463",
"0.574952... | 0.8548868 | 0 |
update_evaluated_markers should be called after it is known that the Jenkins job should be triggered for this pull request. evaluate_updated_markers updates the GitHub bot's `evaluated` comment to reflect the fact that we have now evaluated the pull request again. If the pull request was previously waiting for a stable downstream build or in the build queue, we add a comment to reflect that we are recalculating the build queue position. | def update_evaluated_markers(repo_to_pull_request, trigger_updated_at, settings)
repo_to_pull_request.each do |repo, pull_request|
pull_request_comments = get_comments(pull_request['number'], repo)
test_prefix_comment = get_comment_with_prefix(pull_request['number'], repo, settings['test_prefix'], pull_request_comments)
fields = extract_bot_comment_fields(test_prefix_comment['body'], settings) if test_prefix_comment
if !test_prefix_comment || !waiting_state?(fields[:state])
being_queued_comment = compose_bot_comment(settings['test_prefix'], :state => :wait_queue_pos)
create_or_update_comment(pull_request['number'], repo, settings['test_prefix'], being_queued_comment, pull_request_comments)
end
pull_request_evaluated_time = get_evaluated_time(pull_request_comments, repo, settings)
_, pull_request_changed_after_eval = get_updated_at(pull_request, pull_request_comments, settings)
if pull_request_changed_after_eval || pull_request_evaluated_time < trigger_updated_at
commit = last_commit_for_pull_id(pull_request['number'], repo)
# Add a new evaluated marker
add_comment(pull_request['number'], repo, (evaluated_marker(repo, settings) % commit['sha']))
# Delete the old evaluated markers
pull_request_comments.each do |comment|
if comment['user']['login'] == Properties['bot_github_user'] && comment['body'] =~ evaluated_marker_regex(repo, settings)
delete_comment(comment['id'], repo)
end
end
end
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_pull_request(req, updated_at, changed_after_eval, comments, settings, merge_pretest_success)\n id = req['number']\n branch = req['base']['ref']\n base_repo = req['base']['repo']['name']\n login = req['user']['login']\n repo_to_pull_request = {base_repo => req}\n pr_base_co... | [
"0.57004404",
"0.5333661",
"0.5248042",
"0.51925063",
"0.49694118",
"0.49184573",
"0.48626864",
"0.4861154",
"0.485457",
"0.47630307",
"0.47624487",
"0.47580656",
"0.4715955",
"0.4633864",
"0.45806435",
"0.45540226",
"0.45316976",
"0.45281762",
"0.45172507",
"0.4511221",
"0.4... | 0.81637216 | 0 |
get_updated_at returns the time at which the last commit on the pull request was created, and whether or not the pull request has been updated since the last time the bot saw it, determined by looking at the commit referenced in the last evaluated comment and comparing the latest commit SHA | def get_updated_at(pull_request, comments, settings)
updated_at = nil
comments = sort_comments(comments)
previous_sha = nil
comments.each do |comment|
if comment['user']['login'] == Properties['bot_github_user'] && comment['body'] =~ evaluated_marker_regex(pull_request['base']['repo']['name'], settings)
previous_sha = $1
end
end
commit = last_commit_for_pull_id(pull_request['number'], pull_request['base']['repo']['name'])
updated_at = Time.parse(commit['commit']['committer']['date'])
changed_after_last_evaluation = commit['sha'] != previous_sha
[updated_at, changed_after_last_evaluation]
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def last_updated\n repo = Grit::Repo.init_bare_or_open(File.join(path , '.git'))\n repo.commits.first.commited_date\n end",
"def updated_at\n commit.committer_date rescue Time.now\n end",
"def get_evaluated_time(comments, repo, settings)\n comments = sort_comments(comments)\n\n comments.ea... | [
"0.72069246",
"0.709728",
"0.6785979",
"0.6564206",
"0.6512809",
"0.63513064",
"0.62387884",
"0.6214231",
"0.61689943",
"0.6162615",
"0.6119166",
"0.6106349",
"0.6096626",
"0.60938734",
"0.6089913",
"0.608412",
"0.60534",
"0.6045034",
"0.603851",
"0.6023738",
"0.60157627",
... | 0.8009525 | 0 |
add_coreq adds a corequisite pull request to the repo_to_pull_request mapping if the corequisite statement is trusted and the corequisite pull request is mergeable. | def add_coreq(addtl_pull_id, addtl_pull_repo, login, trigger_login, repo_to_pull_request, settings, trigger_updated_at, updated_at, base_pull_id, base_repo, comments)
$stderr.puts " Processing dependency on pull request #{addtl_pull_id} in repo '#{addtl_pull_repo}'"
# We can add the co-requisite if the author of the original pull request is trusted, or if the author of the trigger
# statement is trusted and they authored the trigger statement after the co-requisite statement was made
author_trusted = user_trusted?(login, addtl_pull_repo, settings)
trigger_author_trusted = user_trusted?(trigger_login, addtl_pull_repo, settings)
coreq_trigger_trusted = author_trusted || (trigger_author_trusted && (trigger_updated_at && trigger_updated_at >= updated_at))
if coreq_trigger_trusted
pull_request = get_pull_request(addtl_pull_id, addtl_pull_repo, 2)
if pull_request
# On co-requisite pull requests, we do not expect there to be trigger statements, as the
# triggers on the parent pull request will trigger builds and tests with the co-requisites
# therefore, we need to manage the mergeable state of the co-requisite pull requests here
# while we're considering them for their parent
if pull_request['mergeable']
repo_to_pull_request[addtl_pull_repo] = pull_request
set_mergeable(addtl_pull_id, addtl_pull_repo, pull_request['user']['login'])
# Do nothing if the PR has been merged
elsif !(pull_request['merged'])
if set_not_mergeable(addtl_pull_id, addtl_pull_repo, pull_request['user']['login']) >= NOT_MERGEABLE
comment = "Linked pull request #{addtl_pull_id} in repo '#{addtl_pull_repo}' is not mergeable"
$stderr.puts " #{comment}"
add_comment(base_pull_id, base_repo, comment) unless get_comment_with_value(base_pull_id, base_repo, comment, comments)
end
end
else
comment = "Linked pull request #{addtl_pull_id} in repo '#{addtl_pull_repo}' not found"
$stderr.puts " #{comment}"
add_comment(base_pull_id, base_repo, comment) unless get_comment_with_value(base_pull_id, base_repo, comment, comments)
end
else
comment = "User '#{login}' is not permitted to #{settings['name']} linked pull request #{addtl_pull_id} in '#{addtl_pull_repo}'. If the #{settings['name']} was requested by another user with permission to #{settings['name']} in '#{addtl_pull_repo}', the linked pull request must be in a comment before the #{settings['name']} comment (not including the pull request description) or be added by a user with permission to #{settings['name']} in '#{addtl_pull_repo}'."
$stderr.puts " #{comment}"
add_comment(base_pull_id, base_repo, comment) unless get_comment_with_value(base_pull_id, base_repo, comment, comments)
end
coreq_trigger_trusted
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_pull_request(req, updated_at, changed_after_eval, comments, settings, merge_pretest_success)\n id = req['number']\n branch = req['base']['ref']\n base_repo = req['base']['repo']['name']\n login = req['user']['login']\n repo_to_pull_request = {base_repo => req}\n pr_base_co... | [
"0.55665416",
"0.47991464",
"0.4779627",
"0.476532",
"0.4685922",
"0.46191874",
"0.459072",
"0.4590334",
"0.45501882",
"0.45467567",
"0.45366785",
"0.45115015",
"0.45084685",
"0.4488187",
"0.44538215",
"0.44528323",
"0.4389067",
"0.43773142",
"0.43248954",
"0.4323505",
"0.432... | 0.7555264 | 0 |
comments_after returns all of the comments in the given list that were updated after the given time | def comments_after(comments, time)
valid_comments = []
comments.each do |comment|
if Time.parse(comment['updated_at']) > time
valid_comments << comment
end
end
valid_comments
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sort_comments(comments)\n comments = comments.sort_by do |comment|\n Time.parse(comment['updated_at'])\n end\n comments.reverse!\n comments\n end",
"def recent_comments\n get_or_make_references('Comment', @data, 'recent_comment_ids')\n end",
"def recent_comments\n ... | [
"0.59505975",
"0.57862896",
"0.57862896",
"0.57737696",
"0.5686304",
"0.56050295",
"0.5581698",
"0.55162495",
"0.54872656",
"0.5480417",
"0.54697526",
"0.54111665",
"0.53905386",
"0.5361567",
"0.53582585",
"0.53582585",
"0.53289926",
"0.5283176",
"0.5216298",
"0.52103907",
"0... | 0.8270999 | 0 |
has_valid_flake_comment? determines if a valid flake explanation comment exists A flake comment is valid iff at least one given issue link in the comment: resides in the correct repository points to issues with the correct label | def has_valid_flake_comment?(comments, flake_config)
general_issue_spec = /(https?:\/\/github.com\/.*\/issues\/[0-9]+)/
general_ref_spec = /(^|\s)#(\d+)/
$stderr.puts " Searching comments for a flake identification comment..."
comments.each do |comment|
body = comment['body']
# We want to allow users to link to issues using full URLs or the
# short-hand #1234 GitHub issue references
comment_contains_issue = body =~ general_issue_spec || body =~ general_ref_spec
# We can't allow the bot to be a valid author, or we would pick up
# comments where the bot explains the flake syntax to GitHub users
comment_author_valid = comment['user']['login'] != Properties['bot_github_user']
if comment_contains_issue && comment_author_valid
$stderr.puts " Investigating comment by #{comment['user']['login']}: #{comment['html_url']}"
has_valid_issue_link = has_valid_issue_link?(body, general_issue_spec, Properties['github_user'], flake_config)
has_valid_issue_ref = has_valid_issue_ref?(body, general_ref_spec, flake_config)
return true if has_valid_issue_ref || has_valid_issue_link
end
end
return false
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_valid_issue_ref?(body, general_ref_spec, flake_config)\n body.scan(general_ref_spec) do |reference|\n $stderr.puts \" Determining if reference ##{reference[1]} meets criteria...\"\n issue_id = reference[1].to_i\n if issue_has_label?(issue_id, flake_config['repo'], flake_confi... | [
"0.70673186",
"0.70409536",
"0.64355147",
"0.63986766",
"0.63893414",
"0.6234285",
"0.62150055",
"0.62150055",
"0.61953753",
"0.6161799",
"0.6143437",
"0.6132171",
"0.6074819",
"0.6072818",
"0.60473937",
"0.604612",
"0.58312535",
"0.58239543",
"0.5815557",
"0.57992405",
"0.57... | 0.7983227 | 0 |
has_valid_issue_link? validates that the comment's issue list contains at least one valid flake issue. We need to validate that the links we found with the general spec are from the correct organization/owner and in the correct repository | def has_valid_issue_link?(body, general_issue_spec, org, flake_config)
body.scan(general_issue_spec) do |issue|
$stderr.puts " Determining if issue #{issue[0]} meets criteria..."
if issue[0] =~ /https?:\/\/github.com\/#{Regexp.quote(org)}\/#{Regexp.quote(flake_config['repo'])}\/issues\/([0-9]+)/
issue_id = $1.to_i
if issue_has_label?(issue_id, flake_config['repo'], flake_config['label'])
$stderr.puts " Issue #{issue[0]} is a valid flake issue"
return true
end
end
end
return false
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_valid_issue_ref?(body, general_ref_spec, flake_config)\n body.scan(general_ref_spec) do |reference|\n $stderr.puts \" Determining if reference ##{reference[1]} meets criteria...\"\n issue_id = reference[1].to_i\n if issue_has_label?(issue_id, flake_config['repo'], flake_confi... | [
"0.67924017",
"0.6268639",
"0.6161667",
"0.5968592",
"0.59415835",
"0.59406126",
"0.5699039",
"0.5698065",
"0.56795603",
"0.5657434",
"0.5657434",
"0.5623953",
"0.5615193",
"0.55844134",
"0.5512837",
"0.55059165",
"0.5491238",
"0.5490208",
"0.5455747",
"0.5448072",
"0.5440908... | 0.7632571 | 0 |
has_valid_issue_ref? validates that the comment's ref list contains at least one valid flake reference. GitHub issue references will always link to the same org/repo that the comment containing the reference is posted in, so we only need to validate that the ref is to an issue with the correct label | def has_valid_issue_ref?(body, general_ref_spec, flake_config)
body.scan(general_ref_spec) do |reference|
$stderr.puts " Determining if reference ##{reference[1]} meets criteria..."
issue_id = reference[1].to_i
if issue_has_label?(issue_id, flake_config['repo'], flake_config['label'])
$stderr.puts " Reference ##{reference[1]} points to a valid flake issue"
return true
end
end
return false
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_refspec\n begin\n valid_git_refspec_path?(explicit_refspec)\n rescue => e\n errors.add(:explicit_refspec, e.message)\n end\n end",
"def has_valid_issue_link?(body, general_issue_spec, org, flake_config)\n body.scan(general_issue_spec) do |issue|\n $stderr.puts \" ... | [
"0.6753024",
"0.65452576",
"0.65057814",
"0.63819534",
"0.6319351",
"0.6285279",
"0.59854716",
"0.59773844",
"0.590871",
"0.58161956",
"0.5726625",
"0.5707775",
"0.56409746",
"0.5630079",
"0.5630079",
"0.5606986",
"0.5583313",
"0.55610836",
"0.5558959",
"0.55281585",
"0.55274... | 0.78016704 | 0 |
issue_has_label? determines if the issue at the repo and issue ID has a label with the given name | def issue_has_label?(issue_id, repo, label_name)
get_labels(issue_id, repo).each do |label|
if label["name"] == label_name
return true
end
end
return false
rescue => e
# Something went wrong but as far as we are
# concerned, this issue does not have the label
$stderr.puts " [e.class] #{e.message}"
return false
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ensure_issue_label(owner, repo, issue_id, name)\n\n issue = ensure_issue(owner, repo, issue_id, false, false, false)\n\n if issue.nil?\n warn \"Could not find issue #{owner}/#{repo} -> #{issue_id} to assign label #{name}\"\n return\n end\n\n label = ensure_repo_label(owner, re... | [
"0.7863625",
"0.73028505",
"0.707209",
"0.69560325",
"0.69081366",
"0.6893504",
"0.6890092",
"0.68509644",
"0.6799466",
"0.6708159",
"0.6664879",
"0.6596852",
"0.6577768",
"0.6575328",
"0.64757633",
"0.6428903",
"0.64278615",
"0.6179928",
"0.61790365",
"0.608358",
"0.60759145... | 0.8939037 | 0 |
format_teams builds a list of links to team rosters from a list of team IDs | def format_teams(team_ids)
links = []
team_ids.each do |id|
url, name = team_url_and_name(id)
links << "[#{name}](#{url})"
end
if links.length == 1
"the #{links[0]} group"
elsif links.length == 2
"the #{links[0]} or #{links[1]} groups"
else
"the #{links[0..-1].join(", ")} or #{links[-1]} groups"
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def formatted_teams(array)\n array.map { |team| team.name } * \", \"\n end",
"def get_teams\n doc = Nokogiri::HTML(open(\"http://www.espn.com/mens-college-basketball/teams\"))\n conferences = doc.css(\"div.mod-teams-list-medium\")\n \n # Cycle through conferences filling in the name of each team ... | [
"0.59098315",
"0.59054446",
"0.5798889",
"0.57170486",
"0.5632471",
"0.54904157",
"0.5441614",
"0.53641343",
"0.5344787",
"0.5339124",
"0.53224045",
"0.53172773",
"0.5316867",
"0.5284421",
"0.52703667",
"0.5267267",
"0.5241788",
"0.5223376",
"0.52058136",
"0.51937497",
"0.519... | 0.8167017 | 0 |
format_flake_comment builds a comment explaining why a rebuild could not be triggered | def format_flake_comment(prefix, flake_config, team_ids)
flake_label = flake_config['label']
flake_repo = flake_config['repo']
org_repo = "#{Properties['github_user']}/#{flake_repo}"
flake_label_query = CGI.escape("label:#{flake_label}")
issue_link = "https://github.com/#{org_repo}/issues?q=#{flake_label_query}"
new_issue_link = "https://github.com/#{org_repo}/issues/new"
"
#{prefix}
- If the proposed changes in this pull request caused the job to fail, update this pull request with new code to fix the issue(s).
- If flaky tests caused the job to fail, leave a comment with links to the GitHub issue(s) in the `#{org_repo}` repository with the [`#{flake_label}` label](#{issue_link}) that are tracking the flakes. If no issue already exists for the flake you encountered, [create one](#{new_issue_link}).
- If something else like CI system downtime or maintenance caused the job to fail, contact a member of #{format_teams(team_ids)} to trigger the job again.
"
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_comment_text(story_data, bug_task, build_number)\n txt = String.new(FIXED_COMMENT_PREFIX) # comment should always begin with this\n txt << \"Fixed in Git and deployed with build #{build_number}\"\n\n if (story_data.current_state != 'finished' && story_data.current_state != 'delivered' && story... | [
"0.71426505",
"0.6615593",
"0.6468751",
"0.64130473",
"0.6326692",
"0.6326692",
"0.6326692",
"0.6326692",
"0.6299087",
"0.6299087",
"0.6299087",
"0.6299087",
"0.6288508",
"0.6239009",
"0.6205057",
"0.6195967",
"0.6171043",
"0.61633295",
"0.6152352",
"0.6148388",
"0.6148388",
... | 0.75073165 | 0 |
format_flake_satisfaction_message builds a formatted message for logging that explains why the flake identification criteria were satisfied | def format_flake_satisfaction_message(explanatory_comment_valid, admin_trigger_valid, changed_after_eval)
reasons = []
reasons << "valid identification comment found" if explanatory_comment_valid
reasons << "administrative trigger found" if admin_trigger_valid
reasons << "new changes found" if changed_after_eval
" Flake identification satisfied: #{reasons.join(", ")}, resubmitting..."
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def foorth_message\r\n \"#{self.foorth_code} #{self.message}\"\r\n end",
"def failure_message\n parts = []\n parts << %(\n Hi. Your friendly Curry bot here. Just letting you know that there are\n commit authors in this Pull Request who appear to not have signed a Chef\n CLA.\n ).squis... | [
"0.5897426",
"0.5834495",
"0.57104033",
"0.5680368",
"0.56221193",
"0.5612928",
"0.5565529",
"0.55337495",
"0.5516569",
"0.54852414",
"0.54560965",
"0.54038984",
"0.5327347",
"0.5318728",
"0.53014493",
"0.5299812",
"0.52746755",
"0.524828",
"0.52465165",
"0.5240342",
"0.52293... | 0.81438535 | 0 |
Processes a specific pull request. Manages the various comment states and will submit tests as necessary and update the comment with the results. Tests will be resubmitted if the issue has been updated since the test have been run | def process_pull_request(req, updated_at, changed_after_eval, comments, settings, merge_pretest_success)
id = req['number']
branch = req['base']['ref']
base_repo = req['base']['repo']['name']
login = req['user']['login']
repo_to_pull_request = {base_repo => req}
pr_base_commit = base_repo_commit_for_pull_req(req)
all_coreq_triggers_trusted = true
$stderr.puts "\n****Processing #{settings['name'].upcase} in '#{branch}' branch for user '#{login}' on: #{GITHUB_BASE_URL}/#{Properties['github_user']}/#{base_repo}/pull/#{id}"
trigger_updated_at, trigger_login = get_trusted_trigger_time(req, comments, settings)
evaluated_time = get_evaluated_time(comments, base_repo, settings)
$stderr.puts " Updated at: #{updated_at}"
$stderr.puts " Changed after evaluated time: #{changed_after_eval}"
$stderr.puts " Trigger updated at: #{trigger_updated_at}"
$stderr.puts " Evaluated time: #{evaluated_time}"
# Gather any dependencies from trusted users and add them to the repo_to_pull_request mapping
$repo_to_pull_regex.each do |repo, regex|
next if repo == base_repo
if req['body'] =~ regex
addtl_pull_id = $2
all_coreq_triggers_trusted &= add_coreq(addtl_pull_id, repo, login, trigger_login, repo_to_pull_request, settings, trigger_updated_at, Time.parse(req['updated_at']), id, base_repo, comments)
end
end
comments.each do |comment|
$repo_to_pull_regex.each do |repo, regex|
next if repo == base_repo
if comment['body'] =~ regex
addtl_pull_id = $2
all_coreq_triggers_trusted &= add_coreq(addtl_pull_id, repo, comment['user']['login'], trigger_login, repo_to_pull_request, settings, trigger_updated_at, Time.parse(comment['updated_at']), id, base_repo, comments)
end
end
end
updated_comment = nil
status = nil
build_url = nil
# Find the bot comment for this pull request (or create one)
process_or_create_comment(id, base_repo, settings, comments) do |comment_id, comment, comment_updated_at|
submit_test_job = false
resubmit_test_job = false
fields = extract_bot_comment_fields(comment, settings)
# Given the last comment made by the bot, we can determine the state in which evaluation
# of this pull request ended previously, depending on which we will take different actions
case fields[:state]
when :evaluating
# In this case, we have just made a placeholder comment as we have not seen this pull
# request previously and are evaluating it for the first time. To move forward, we want
# to ensure that all of the co-requisite pull requests have no updates more recent than
# the most recent trigger we have seen.
# TODO: we should *always* check this, above, not only in this state
#
# Two state transitions are possible out of this state:
# - into the 'waiting for stable build' phase, as we cannot move to begin a test unless
# the downstream jobs in Jenkins are ready to run them
# - into the 'running tests' phase, if we have a trusted trigger that covers all the
# commits in the main pull request and any co-requisites
$stderr.puts " Evaluating..."
if JenkinsAPI.project_stable?(branch, settings)
# Make sure there is a trigger in place that is still later than the updated dates of each of the pull requests
if trigger_updated_at
repo_to_pull_request.each do |repo, pull_request|
next if repo == base_repo
if !user_trusted?(pull_request['user']['login'], repo, settings) && trigger_updated_at < Time.parse(pull_request['head']['repo']['updated_at'])
create_or_update_comment(id, base_repo, ACTION_PREFIX, ACTION_NOT_TEAM, comments)
break
end
end
# The main pull request and all of the co-requisite pull requests haven't been
# updated since the last trusted trigger, so we can begin testing
update_evaluated_markers(repo_to_pull_request, trigger_updated_at, settings)
submit_test_job = true
else
# no trusted trigger statement has been made, so we cannot build or test this pull
create_or_update_comment(id, base_repo, ACTION_PREFIX, ACTION_NOT_TEAM, comments)
end
else
updated_comment = compose_bot_comment(settings['test_prefix'], :content => waiting_for_stable_build_comment_segment(branch, settings), :state => :wait_stable_build)
end
when :wait_stable_build, :wait_queue_pos, :wait_in_queue
# In this case, we are in one of two states:
# 1) waiting for a downstream project in Jenkins to be stable, so we can begin tests
# 2) determining build queue position
#
# Two state transitions are possible from state 1 and 2):
# - loop back into state 1 (or 2) if the Jenkins project is unstable
# - into the 'running tests' state if the Jenkins project is stable
# If the main pull request or any of the co-requisites have been
# updated since the last time we evaluated the main pull request, we need to re-queue the
# build and test.
#
# TODO: currently, we have one invalid state that will make
# it through this logic: the cases where we are not in the
# build queue but have submitted tests. We need to
# reconsider why this is tolerated
$stderr.puts " Waiting..."
# Only submit the tests if the project is stable
if JenkinsAPI.project_stable?(branch, settings)
submitted_tests = submitted_tests_for_branch(branch)
if !submitted_tests[settings['name']]
$stderr.puts " Checking that evaluated times are still up to date"
if changed_after_eval
resubmit_test_job = true
else
submit_test_job = true
repo_to_pull_request.each do |repo, sub_pull_request|
next if repo == base_repo
$stderr.puts " Checking evaluated time for sub pull request #{sub_pull_request['number']} for repo '#{repo}'"
sub_pull_comments = get_comments(sub_pull_request['number'], repo)
sub_pull_request_updated_at, sub_pull_request_changed_after_eval = get_updated_at(sub_pull_request, sub_pull_comments, settings)
$stderr.puts " Updated at: #{sub_pull_request_updated_at}"
$stderr.puts " Changed after evaluated time: #{sub_pull_request_changed_after_eval}"
if sub_pull_request_changed_after_eval
resubmit_test_job = true
break
end
end
end
else
$stderr.puts " Job is already queued"
end
end
when :running
# In this state, we have triggered tests and made it through the build queue, so there
# are running tests
#
# There are two state transitions possible from this state:
# - loop back into this state if the tests are still running
# - into the appropriate post-build state, of which I know of:
# - SUCCESS
# - ABORTED
# - UNSTABLE
# - NOT_FOUND
# Capture the build_url from the regex match
$stderr.puts " Running: #{fields[:build_url]}consoleFull"
# If the build is finished, update with the results
if JenkinsAPI.build_running?(fields[:build_url], branch, settings)
submitted_tests = submitted_tests_for_branch(branch)
submitted_tests[settings['name']] = true unless settings['allow_multiple']
else
result = JenkinsAPI.build_result(fields[:build_url], branch, settings)
# Modify a copy so if we use fields below at some later time, we don't get a surprise
new_fields = fields.dup
new_fields[:state] = CONTENT_TO_STATE[result]
updated_comment = compose_bot_comment(settings['test_prefix'], new_fields)
status = result == 'SUCCESS' ? 'success' : 'failure'
end
when :failure
# In this case, are in the post-test result state, and the tests have failed
#
# The two states that we can transition to from here are:
# - loop back into this state if:
# - flake enforcement is configured and
# - there is no comment linking a valid flake issue to the last failed
# job and
# - the is no administrative trigger, overriding the check and
# - the base branch of the pull request has not been updated from the
# version used to run the tests previous ly
# - into the testing state if either:
# - flake enforcement is not configured, or
# - a contributor has linked the failure to a flake issue, or
# - an administrator has overriden the check, or
# - new code has been pushed to the branch since the last time this bot
# evaluated the pull request
$stderr.puts " Job failed: #{comment}"
flake_config = settings['flake_identification']
if !flake_config
# If no flake configuration exists, it's ok to re-submit the job whenever
# a new trigger is added to the pull request
$stderr.puts " No flake identification configuration exists, resubmitting..."
resubmit_test_job = true
else
# If flake configuration does exist, we have to determine if we are OK to
# re-submit or not
$stderr.puts " Determining if flakes have been identified for failed job: #{fields[:build_url]}"
admin_trigger_updated_at, _ = get_admin_trigger_time(req, comments, settings)
admin_trigger_valid = admin_trigger_updated_at && admin_trigger_updated_at > updated_at
if !admin_trigger_valid && !changed_after_eval
# If there is nothing else that is going to re-trigger this job, we
# look to find an explanatory comment. This is a costly process in
# terms of API calls, so we only do it if we need to.
explanatory_comment_valid = has_valid_flake_comment?(comments_after(comments, comment_updated_at), flake_config)
else
explanatory_comment_valid = false
end
flake_comment_prefix = flake_denied_prefix(base_repo, settings['name'])
flake_comment_body = format_flake_comment(flake_comment_prefix, flake_config, settings['repo_to_admin_teams'][base_repo])
# If we can find an explanatory comment with a valid flake issue in it,
# or we find an admin override comment, or the pull request has had new
# code added to it since the last evaluation, we know that we are good
# to resubmit the pull request for testing
if explanatory_comment_valid || admin_trigger_valid || changed_after_eval
$stderr.puts format_flake_satisfaction_message(explanatory_comment_valid, admin_trigger_valid, changed_after_eval)
resubmit_test_job = true
delete_comment_with_prefix(id, base_repo, flake_comment_prefix, comments)
else
$stderr.puts " Flake identification not satisfied"
if trigger_updated_at && trigger_updated_at > evaluated_time
# If someone's tried to trigger a re-test, but we can't re-test right
# now, we should leave a helpful message explaining why. If we have
# previously warned the user about why we couldn't re-test, we should
# only update the pull request with a new set of reasons if the trigger
# is newer than our last comment
previous_warning = get_comment_with_prefix(id, base_repo, flake_comment_prefix, comments)
if !previous_warning || (previous_warning && trigger_updated_at > Time.parse(previous_warning['updated_at']))
$stderr.puts " New reminder comment is appropriate for this pull request"
recreate_comment_with_prefix(id, base_repo, flake_comment_prefix, flake_comment_body, comments)
end
end
end
end
else
# In this case, we're in one of three states:
# 1) ACTION_NOT_MERGE: the pull request is not mergeable and needs a rebase
# 2) ACTION_NOT_TEAM: the pull request has no trusted triggers
# 3) the build has finished, with one of the following states:
# - SUCCESS
# - ABORTED
# - UNSTABLE
# - NOT_FOUND
#
# Regardless of the current state, since we have a trusted trigger, we want to
# re-submit this pull request for testing.
$stderr.puts " Finished..."
$stderr.puts " #{comment}" if (fields[:prefix] == settings['test_prefix'] && fields[:build_url])
resubmit_test_job = true
end
# Once we have considered the current state of the pull request, we need to determine
# if we are going to submit this pull request for testing
if resubmit_test_job
# If analysis of the current state has determined that we should re-submit the job
# for testing, we need to check that we meet all criteria for resubmission:
# - is the project stable? [TODO: we seem to be checking this always, move up?]
# - have there been any changes in the main pull request since the last evaluation?
# - have there been any changes in the co-requisite pull requests since the last evaluation?
#
# TODO: this logic has bled out and should be moved into a function that is called
# inside of each state case above, instead of being called this way. Ideally each
# state case above should be able to either submit or not submit tests internally.
submit_test_job = false
$stderr.puts " Checking whether we should resubmit"
if trigger_updated_at
# We already trust the primary pull request. Just need to check whether the eval time is older than last update or last trusted trigger.
if changed_after_eval || (evaluated_time < trigger_updated_at)
if JenkinsAPI.project_stable?(branch, settings)
submit_test_job = true
else
updated_comment = compose_bot_comment(settings['test_prefix'], :content => waiting_for_stable_build_comment_segment(branch, settings), :state => :wait_stable_build)
end
end
# Check for any other reason to submit the test job. And make sure non of the sub pull requests have new untrusted changes.
repo_to_pull_request.each do |repo, sub_pull_request|
next if repo == base_repo
$stderr.puts " Checking evaluated time for sub pull request #{sub_pull_request['number']} for repo '#{repo}'"
sub_pull_comments = get_comments(sub_pull_request['number'], repo)
sub_pull_request_updated_at, sub_pull_request_changed_after_eval = get_updated_at(sub_pull_request, sub_pull_comments, settings)
$stderr.puts " Updated at: #{sub_pull_request_updated_at}"
$stderr.puts " Changed after evaluated time: #{sub_pull_request_changed_after_eval}"
# Make sure the trigger on the primary pull request is after the updated date of the sub pull request
# or the user of the sub pull request is trusted
valid_trigger_comment = trigger_updated_at > sub_pull_request_updated_at
$stderr.puts " Has valid trigger comment: #{valid_trigger_comment}"
if valid_trigger_comment || user_trusted?(sub_pull_request['user']['login'], repo, settings)
if sub_pull_request_changed_after_eval
if JenkinsAPI.project_stable?(branch, settings)
submit_test_job = true
else
updated_comment = compose_bot_comment(settings['test_prefix'], :content => waiting_for_stable_build_comment_segment(branch, settings), :state => :wait_stable_build)
end
end
else
create_or_update_comment(id, base_repo, ACTION_PREFIX, ACTION_NOT_TEAM, comments)
submit_test_job = false
break
end
end
else
create_or_update_comment(id, base_repo, ACTION_PREFIX, ACTION_NOT_TEAM, comments)
submit_test_job = false
end
if submit_test_job
update_evaluated_markers(repo_to_pull_request, trigger_updated_at, settings)
end
end
# To complete the transition into the next phase of the pull request evaluation,
# we need to take the correct external actions if necessary and update the bot
# comment to reflect the new state
if submit_test_job
delete_comment_with_prefix(id, base_repo, ACTION_PREFIX, comments)
# Check for pretest_settings_key, so we might skip a round
# of tests prior to merge
extended_tests = get_extended_tests(req, comments, branch, settings)
validate_and_submit_tests(repo_to_pull_request, base_repo, branch, id, comment_id, extended_tests, all_coreq_triggers_trusted, comments, settings, pr_base_commit['sha'])
elsif updated_comment
# If we have an `updated_comment`, we have determined which state we want to
# transition into above literally and simply need to update the comment to
# reflect that
recreate_comment(id, comment_id, base_repo, updated_comment)
repo_to_pull_request.each do |repo, pull_request|
if status && build_url
# One of the literal transitions we specify is the transition from running
# tests to reporting the results, so if a result has been specified we
# furthermore know that we are transitiong into the post-test state and can
# update the GitHub pull request status
commit = last_commit_for_pull_id(pull_request['number'], repo)
update_status(settings['test_prefix'], commit['sha'], repo, status, build_url, (status == 'success') ? 'Passed' : 'Failed')
end
next if repo == base_repo
# Update coreq comments with appropriate base commit ID
coreq_fields = extract_bot_comment_fields(updated_comment, settings)
cr_bot_comment = get_comment_with_prefix(pull_request['number'], repo, settings['test_prefix'])
coreq_fields[:base_commit] = extract_bot_comment_fields(cr_bot_comment['body'], settings)[:base_commit]
updated_comment = compose_bot_comment(settings['test_prefix'], coreq_fields)
recreate_comment_with_prefix(pull_request['number'], repo, settings['test_prefix'], updated_comment)
end
elsif !repo_to_pull_request.empty?
# If we are not running tests and have not specified a literal state to transition
# into, we have one more transition to check: from post-build sucess to merge
Properties['settings'].each_value do |s|
# Check all_coreq_triggers_trusted in case trigger author can't merge all linked repos
if s['pretest_settings_key'] && merge_pretest_success && all_coreq_triggers_trusted
if Properties['settings'][s['pretest_settings_key']]['name'] == settings['name']
if fields[:state] == :success
build_url = fields[:build_url]
trusted_trigger_time, trigger_login = get_trusted_trigger_time(req, comments, s)
if trusted_trigger_time
begin
repo_to_pull_request.each do |repo, pull_request|
test_merge_pull_request(pull_request['number'], repo, s)
end
repo_to_pull_request.each do |repo, pull_request|
$stderr.puts "\n*******Merging pretested pull request: #{GITHUB_BASE_URL}/#{Properties['github_user']}/#{repo}/pull/#{pull_request['number']} "
merge_pull_request(pull_request['number'], repo, s, build_url)
end
rescue Exception => e
$stderr.puts e.message
$stderr.puts e.backtrace
end
end
end
break
end
end
end
end
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_created_issue_comment(issue_comment_payload)\n pr_name = issue_comment_payload['repository']['full_name'].to_s\n pr_number = issue_comment_payload['issue']['number'].to_s\n comment_user = issue_comment_payload['comment']['user']['id'].to_s\n approvals = parse_comment_body(issue_comment_payl... | [
"0.73619753",
"0.680515",
"0.6694909",
"0.66383123",
"0.6479159",
"0.6464815",
"0.6458875",
"0.645525",
"0.6389939",
"0.635773",
"0.6330193",
"0.63050634",
"0.62980413",
"0.6267666",
"0.62626475",
"0.6244148",
"0.6219692",
"0.61633384",
"0.6153654",
"0.6138295",
"0.6120218",
... | 0.73200727 | 1 |
The GitHub mergeable API is flaky, so we use an external file ``database'' to record pull request mergeability responses with a biased saturating counter so that pull request comment/label actions are only taken after a critical amount of net positive or negative API responses. However, a pull request is immediately chosen not to be tested after the first unmergeable result, even if it takes longer for the UX comment and label to be applied. The saturation points are at 0 and 10, with an initial state of 0, so i.e. the default state is to consider the pull request mergeable, and this state will not change until a net of ten unmergeable responses come in from the GitHub API. When set_mergeable is called, the counter for the specific pull ID is decremented if it hasn't saturated; when set_not_mergeable is called, the counter is incremented unless it is saturated. set_mergeable updates the pull request at repo/pull/id to remove labels and comments that indicate a rebase is necessary. The update happens when the counter described above is saturated at 0. | def set_mergeable(id, repo, login, comments=nil)
merge_id="#{repo}_#{id}_#{login}"
count = MERGEABLE
previous_merge_result=`grep #{merge_id} ~/test_pull_request_not_mergeable`.chomp
if !previous_merge_result.empty? && previous_merge_result =~ /#{merge_id}=(\d+)/
count = $1.to_i
end
if count <= MERGEABLE
# If our counter has saturated at 0, we want to remove any comments and labels about
# rebasing as this pull request is mergeable
`sed -i "/#{merge_id}=/d" ~/test_pull_request_not_mergeable`
comments = get_comments(id, repo) if comments.nil?
# In the majority of cases, we have all of the comments for a pull request, so we can
# look to see if an ACTION_NOT_MERGE comment exists with zero additional API traffic.
# If we find an ACTION_NOT_MERGE comment, we remove it, and furthermore we know we should
# also delete the NEEDS_REBASE_LABEL. If we didn't remove the comment (because it didn't
# exist, or because we failed through the API), we want to try to remove the label
# anyway to ensure that the UX for PRs is good. Since we do *not* have the labels for
# a PR, this is a costly operation, so we do not attempt to do it always, only when we
# know for sure we need to, or once in a while randomly.
if delete_comment_with_prefix(id, repo, ACTION_NOT_MERGE, comments) || (rand(5) < 1)
remove_labels(id, repo, [NEEDS_REBASE_LABEL])
end
else
# If our counter is not saturated, we pull it towards 0
`sed -i "/#{merge_id}=/d" ~/test_pull_request_not_mergeable && echo "#{merge_id}=#{(count-1).to_s}" >> ~/test_pull_request_not_mergeable`
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_not_mergeable(id, repo, login)\n merge_id=\"#{repo}_#{id}_#{login}\"\n count = MERGEABLE\n previous_merge_result=`grep #{merge_id} ~/test_pull_request_not_mergeable`.chomp\n if !previous_merge_result.empty? && previous_merge_result =~ /#{merge_id}=(\\d+)/\n count = $1.to_i\n ... | [
"0.71151245",
"0.6267297",
"0.62577415",
"0.6234729",
"0.6173162",
"0.61040485",
"0.592663",
"0.59265584",
"0.5921738",
"0.5896542",
"0.5896048",
"0.5875448",
"0.58311015",
"0.58129466",
"0.57792664",
"0.57692105",
"0.5754938",
"0.5729205",
"0.56923723",
"0.5691061",
"0.56481... | 0.747007 | 0 |
set_not_mergeable updates the pull request at repo/pulls/id to add a comment and label that informs the author that a rebase is necessary to merge the pull request. The update happens when the counter described above is saturated at 10. set_not_mergeable will return the value of the current counter. | def set_not_mergeable(id, repo, login)
merge_id="#{repo}_#{id}_#{login}"
count = MERGEABLE
previous_merge_result=`grep #{merge_id} ~/test_pull_request_not_mergeable`.chomp
if !previous_merge_result.empty? && previous_merge_result =~ /#{merge_id}=(\d+)/
count = $1.to_i
end
if count >= NOT_MERGEABLE
# If our counter is saturated at 10, we want to add the comment and label
create_or_update_comment(id, repo, ACTION_PREFIX, ACTION_NOT_MERGE)
ensure_labels(id, repo, [NEEDS_REBASE_LABEL])
else
# If our counter is not saturated, we pull it towards 10
`sed -i "/#{merge_id}=/d" ~/test_pull_request_not_mergeable && echo "#{merge_id}=#{(count+1).to_s}" >> ~/test_pull_request_not_mergeable`
end
# Return the previous value of the saturating counter so callers can consume it to
# determine stability level
count
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_mergeable(id, repo, login, comments=nil)\n merge_id=\"#{repo}_#{id}_#{login}\"\n count = MERGEABLE\n previous_merge_result=`grep #{merge_id} ~/test_pull_request_not_mergeable`.chomp\n if !previous_merge_result.empty? && previous_merge_result =~ /#{merge_id}=(\\d+)/\n count = $1.t... | [
"0.75605166",
"0.53748554",
"0.53349596",
"0.53082156",
"0.5212254",
"0.5095606",
"0.5094285",
"0.5009327",
"0.49948648",
"0.4955486",
"0.49433714",
"0.483475",
"0.48305166",
"0.4816665",
"0.47835088",
"0.4763416",
"0.4753355",
"0.47240895",
"0.47153312",
"0.4658204",
"0.4655... | 0.8172514 | 0 |
Processes a list of the valid pull requests that are mergeable and are submitted by a trusted user | def process_pull_requests(merge_pretest_success)
pull_requests = []
mergeability_in_flux = false
pull_request_statuses = Hash.new { |h,k| h[k] = Hash.new { |h2,k2| h2[k2] = {} } }
$repo_to_pull_regex.keys.each do |repo|
$stderr.puts "\nProcessing repo '#{repo}'"
pull_request_statuses[:closed_prs][repo] = "#{GITHUB_BASE_URL}/#{Properties['github_user']}/#{repo}/pulls?q=is%3Apr+is%3Aclosed"
list_pull_requests(repo).each do |req|
id = req['number']
$stderr.puts "Analyzing pull request: #{GITHUB_BASE_URL}/#{Properties['github_user']}/#{repo}/pull/#{id}"
branch = req['base']['ref']
# We only want to consider pull requests into branches we care about
if $branches.include?(branch) || $branches.include?('*')
$stderr.puts " Updated at: #{req['updated_at']}"
# We only want to consider pull requests that have been modified in
# the last twelve hours, to stop us from doing extra work when we
# don't need to. Also, just to ensure that we don't forget a pull
# request forever on accident, there is a 10% chance we'll consider
# a pull request even if it is inactive
if Time.now - Time.parse(req['updated_at']) < (12*60*60) || (rand(20) < 1)
login = req['user']['login']
comments = nil
# Skip if it's not mergeable
mergeable = is_mergeable?(id, repo)
$stderr.puts " Mergeable: #{mergeable}"
if mergeable
comments = get_comments(id, repo) if comments.nil?
set_mergeable(id, repo, login, comments)
else
if set_not_mergeable(id, repo, login) == MERGEABLE
mergeability_in_flux = true
end
next
end
comments = get_comments(id, repo) if comments.nil?
# We only want to consider pull requests where the last trigger we found
# is from a trusted user
permission_denied = Array.new(Properties['settings'].length, false)
# Has a merge or test been requested by a trusted user?
Properties['settings'].values.each_with_index do |settings, i|
updated_at, changed_after_eval = get_updated_at(req, comments, settings)
trigger_regex = /\[#{settings['name']}\]/i
if req['title'] =~ trigger_regex || req['body'] =~ trigger_regex
if user_trusted?(login, repo, settings)
pull_requests << [req, updated_at, changed_after_eval, comments, settings]
permission_denied[i] = false
next
else
$stderr.puts " User '#{login}' not trusted"
permission_denied[i] = true
end
end
comments = sort_comments(comments)
comments.each do |comment|
if comment['body'] =~ trigger_regex
comment_login = comment['user']['login']
if user_trusted?(comment_login, repo, settings)
pull_requests << [req, updated_at, changed_after_eval, comments, settings]
permission_denied[i] = false
break
else
$stderr.puts " User '#{comment_login}' not trusted"
permission_denied[i] = true
end
end
end
end
if permission_denied.include? true
create_or_update_comment(id, repo, ACTION_PREFIX, ACTION_NOT_TEAM, comments)
end
else
$stderr.puts " Skipping due to age and inactivity"
end
else
create_or_update_comment(id, repo, ACTION_PREFIX, ACTION_UNSUPPORTED_BRANCH)
end
end
end
if mergeability_in_flux
$stderr.puts "Waiting till next run to see if mergeability is in flux"
exit
end
# Consider the pull requests we have deemed valid in
# order of the time they were last updated, oldest first
sorted_pull_requests = pull_requests.sort_by do |req_info|
req_info[1]
end
skipped_count = {}
$branches.each do |branch|
skipped_count[branch] = {}
end
# If we're only allowing sequential tests in this tag, we want to find
# any pull request in the 'running tests' state and signal that there
# is a test running.
sorted_pull_requests.each do |req_info|
req = req_info[0]
comments = req_info[3]
settings = req_info[4]
branch = req['base']['ref']
if !settings['allow_multiple']
comments.each do |comment|
begin
fields = extract_bot_comment_fields(comment['body'], settings)
if (comment['user']['login'] == Properties['bot_github_user']) && fields[:state] == :running
submitted_tests = submitted_tests_for_branch(branch)
submitted_tests[settings['name']] = true
break
end
rescue Exception => e
next
end
end
end
end
sorted_pull_requests.each do |req_info|
# Process the pull request
req = req_info[0]
updated_at = req_info[1]
changed_after_eval = req_info[2]
comments = req_info[3]
settings = req_info[4]
branch = req['base']['ref']
repo = req['base']['repo']['name']
process_pull_request(req, updated_at, changed_after_eval, comments, settings, merge_pretest_success)
submitted_tests = submitted_tests_for_branch(branch)
if !settings['allow_multiple'] && submitted_tests[settings['name']]
# If we're only allowing sequential tests on this tag and there is a test running,
# and we are waiting to test, we need to correctly determine the position in the
# test queue that we are at and post it in a bot comment on the pull request
comments = get_comments(req['number'], repo)
bot_comment = get_comment_with_prefix(req['number'], repo, settings['test_prefix'], comments)
if bot_comment
fields = extract_bot_comment_fields(bot_comment['body'], settings)
if (waiting_in_queue_state?(fields[:state]))
skipped_count_branch = skipped_count[branch] ? skipped_count[branch] : skipped_count['*']
skipped_count_branch[settings['name']] = 0 if skipped_count_branch[settings['name']].nil?
skipped_count_branch[settings['name']] += 1
queued_comment = compose_bot_comment(settings['test_prefix'], :state => :wait_in_queue, :content => waiting_in_queue_comment_segment(skipped_count_branch[settings['name']].to_s))
pull_request_statuses[:enqueued][req['html_url']][:title] = req['title'].force_encoding("UTF-8")
pull_request_statuses[:enqueued][req['html_url']][:queue_pos] = skipped_count_branch[settings['name']]
pull_request_statuses[:enqueued][req['html_url']][:repo] = repo
create_or_update_comment(req['number'], repo, settings['test_prefix'], queued_comment , comments)
$stderr.puts " Pull ##{req['number']} in repo '#{repo}' is at build position ##{skipped_count_branch[settings['name']]}"
# Get ahead of the game and pretest requests
if settings['pretest_settings_key'] && settings['pretest_comment'] && settings['pretest_queue_threshold'] && (skipped_count_branch[settings['name']] >= settings['pretest_queue_threshold'])
trusted_trigger_time, _ = get_trusted_trigger_time(req, comments, Properties['settings'][settings['pretest_settings_key']])
create_or_update_comment(req['number'], repo, settings['pretest_comment'].gsub('[', '\[').gsub(']', '\]'), settings['pretest_comment'], comments) unless trusted_trigger_time
end
elsif fields[:state] == :running
pull_request_statuses[:running][req['html_url']][:title] = req['title'].force_encoding("UTF-8")
pull_request_statuses[:running][req['html_url']][:status] = "merging"
pull_request_statuses[:running][req['html_url']][:repo] = repo
end
end
end
end
# Commit merge queue records to disk
IO.write(MERGE_QUEUE_RECORD, pull_request_statuses.to_json, {:mode => 'w'})
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_pull_requests\n # This line is where we want to add :accept => 'application/vnd.github.shadow-cat-preview+json' for draft PRs\n pull_requests = github_query(@client) { @client.pull_requests(@repository, :state => 'open', :per_page => 50) }\n\n @pull_request_details = []\n\n pull_requests.each... | [
"0.6782224",
"0.6670089",
"0.644937",
"0.6301873",
"0.6205624",
"0.6188895",
"0.6149079",
"0.60908437",
"0.59575397",
"0.5955183",
"0.59341353",
"0.5928827",
"0.59227663",
"0.5915262",
"0.5911489",
"0.58969146",
"0.58435416",
"0.58239245",
"0.57863945",
"0.577505",
"0.5756686... | 0.77439547 | 0 |
Preloads all models from +app/models+ (or ENV["COUCH_MODELS_DIR"]) directory | def preload_models
raise %Q{Directory "#{models_dir}" doesn't exist!} unless Dir.exists?(models_dir)
pattern = File.join(models_dir, "**", "*.rb")
Dir[pattern].each { |f| require f }
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_models(app)\n app.config.paths[\"app/models\"].expanded.each do |path|\n preload = ::Mongoid.preload_models\n if preload.resizable?\n files = preload.map { |model| \"#{path}/#{model.underscore}.rb\" }\n else\n files = Dir.glob(\"#{path}/**/*.rb\")\n end\n... | [
"0.77284205",
"0.758385",
"0.7551463",
"0.75267017",
"0.7399567",
"0.73293877",
"0.7189128",
"0.7189128",
"0.7165854",
"0.7079531",
"0.70632803",
"0.6983067",
"0.68954957",
"0.6869248",
"0.6752323",
"0.6369518",
"0.6356096",
"0.6346511",
"0.63253284",
"0.63212913",
"0.624229"... | 0.77658314 | 0 |
Loops through models and yields each | def each_model(options = {}, &block)
WingedCouch::Model.subclasses.each do |klass|
if options[:raise_exceptions] and not klass.database.exist?
raise %Q{Database for model #{klass.name} doesn't exist, run rake winged_couch:db:create}
end
block.call(klass)
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def each(&block)\n @models.each {|model| block.call(model)}\n end",
"def each_model(&block)\n DataMapper::Model.descendants.each(&block)\n end",
"def each_model\n result = []\n ole_obj.Models.each do |el|\n model = model_from_ole(el)\n if block_given?\n yiel... | [
"0.7660225",
"0.7434823",
"0.7365559",
"0.69657844",
"0.69150716",
"0.68672645",
"0.6856376",
"0.6553027",
"0.6465477",
"0.63936347",
"0.6142916",
"0.6112676",
"0.60502887",
"0.6018289",
"0.6015128",
"0.5938314",
"0.59344447",
"0.5932522",
"0.59312224",
"0.59309095",
"0.59204... | 0.5871213 | 25 |
Give me an +IO+like object (one that responds to the +each+ method) and I'll parse that sucker for you. | def initialize(input)
@data = Hash.new { |h,k| h[k] = { }}
input.each do |line|
line.chomp!
next if line =~ /^(Provider|$)/
tokens = line.split(/\s+/)
country = tokens[11]
count = tokens[6].to_i
@data[country][:date] = Date.parse(tokens[8])
case tokens[5].to_i
when 7
@data[country][:upgrade] = count
when 1
@data[country][:install] = count
end
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_by_magic(io); end",
"def each_io\n each_file do |filename|\n io = get filename\n yield filename, io\n end\n end",
"def each_io\n each_file do |filename|\n io = get filename\n yield filename, io\n end\n end",
"def each\n lo... | [
"0.69649106",
"0.6940681",
"0.6940681",
"0.6618389",
"0.63266397",
"0.63161445",
"0.62812424",
"0.62634385",
"0.6240593",
"0.5877546",
"0.5835136",
"0.5795435",
"0.5776197",
"0.57582533",
"0.57463884",
"0.5718133",
"0.5663678",
"0.56412363",
"0.56267273",
"0.5606536",
"0.5603... | 0.0 | -1 |
Yields each parsed data row to the given block. Each item yielded has the following attributes: country date install_count upgrade_count | def each # :yields: record
@data.each do |country, value|
if block_given?
yield OpenStruct.new(:country => country,
:date => value[:date],
:install_count => value[:install] || 0,
:upgrade_count => value[:upgrade] || 0)
end
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def each_row\n @data.xml.group.elements.each do |chunk|\n # Set up for running this data chunk - prep timestamp and increment step from source xml\n ts = EGauge::parse_time(chunk['time_stamp'])\n step = chunk['time_delta'].to_i\n \n # Run each row in the chunk, and yield our... | [
"0.66576374",
"0.6309933",
"0.62354803",
"0.61862487",
"0.6185482",
"0.61505896",
"0.6141361",
"0.6134688",
"0.6126608",
"0.5968474",
"0.5949235",
"0.5863454",
"0.5863454",
"0.58445686",
"0.58238024",
"0.5762057",
"0.5751502",
"0.5751502",
"0.57048976",
"0.5702436",
"0.569895... | 0.72329587 | 0 |
The total number of rows in the report | def size
@data.size
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def total_rows\n execute['total_rows']\n end",
"def rows_count\n @rows.size\n end",
"def count\n @rows.count\n end",
"def row_count\n @rows.length;\n end",
"def total_records\n record.records.count\n end",
"def num_rows\n return 0 if rows.nil?\n ... | [
"0.8305046",
"0.8089172",
"0.7936997",
"0.7936964",
"0.770651",
"0.7685303",
"0.758719",
"0.7559617",
"0.7538036",
"0.7516591",
"0.7516591",
"0.7478254",
"0.74743617",
"0.7456478",
"0.74370617",
"0.74171466",
"0.74136174",
"0.73973036",
"0.73857903",
"0.7321879",
"0.7309595",... | 0.0 | -1 |
Sets the key_algorithm to be used in all future key hashes | def key_algorithm=(algorithm)
SaltedHash::assert_supported_algorithm(algorithm)
properties_class.instance.update_attributes :key_algorithm => algorithm
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def key_algorithm\n if self.rsa?\n :rsa\n elsif self.dsa?\n :dsa\n elsif self.ec?\n :ec\n end\n end",
"def digest_algorithm=(algorithm)\n @digester = Kiji::Digester.new(algorithm)\n end",
"def signature_digest_algorithm=(algorithm)\n @sign_digester = Kij... | [
"0.6606305",
"0.6403709",
"0.6316152",
"0.6302974",
"0.6016931",
"0.59667224",
"0.5946414",
"0.59452516",
"0.59064764",
"0.58475363",
"0.58475363",
"0.5842611",
"0.5727529",
"0.5694579",
"0.5673006",
"0.5654447",
"0.5544293",
"0.5497069",
"0.54938245",
"0.5468402",
"0.5466713... | 0.80567396 | 0 |
Here for backwards compatibility | def show_cart
render :action => 'show'
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def private; end",
"def specie; end",
"def specie; end",
"def specie; end",
"def specie; end",
"def probers; end",
"def schubert; end",
"def custom; end",
"def custom; end",
"def wrapper; end",
"def internal; end",
"def refutal()\n end",
"def extra; end",
"def original; end",
"def im... | [
"0.78257686",
"0.64947724",
"0.64947724",
"0.64947724",
"0.64947724",
"0.6493214",
"0.63897425",
"0.6370344",
"0.6370344",
"0.6259001",
"0.6183087",
"0.6123301",
"0.6028515",
"0.59866214",
"0.5979887",
"0.5979887",
"0.5960967",
"0.5895486",
"0.5876774",
"0.5876774",
"0.587677... | 0.0 | -1 |
Get the current customer's cart | def get_cart
if customer_signed_in?
if session[:cart_id].present?
cart = Cart.find(session[:cart_id])
if cart.customer_id.blank?
cart.customer_id = current_customer.id
cart.is_current = true
cart.save
end
return cart
else
cart = Cart.where(:is_current => true, :customer_id => current_customer.id).first
if cart.blank?
cart = Cart.create(:is_current => true, :customer_id => current_customer.id)
end
session[:cart_id] = cart.id
return cart
end
else
if session[:cart_id].present?
# logger.info("*************** signed OUT and session was present")
cart = Cart.find_by_id(session[:cart_id])
if cart.blank?
cart = Cart.create(:is_current => true)
session[:cart_id] = cart.id
return cart
else
cart.is_current = true
cart.save
return cart
end
else
# logger.info("*************** signed OUT and session was NOT present")
cart = Cart.create(:is_current => true)
session[:cart_id] = cart.id
return cart
end
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_customer_cart\n Customer.find(session[:customer_id]).cart\n end",
"def current_cart\n if current_user.current_cart\n current_user.current_cart\n else\n return nil\n end\n end",
"def cart\n current_cart\n end",
"def current_cart\n @current_cart ||= ShoppingCart.new(t... | [
"0.86154664",
"0.79938203",
"0.793624",
"0.78062373",
"0.7737092",
"0.7599968",
"0.75481427",
"0.75440395",
"0.75419676",
"0.74875826",
"0.74796045",
"0.7468567",
"0.7458609",
"0.74001724",
"0.73998135",
"0.739861",
"0.72996235",
"0.72855026",
"0.72624075",
"0.7244123",
"0.72... | 0.74470454 | 13 |
table [Cucumber::MultilineArgument::DataTable] Input table | def to_rules(table)
table.rows.map do |row|
row_data = table.column_names.zip(row).map do |column_name, value|
case column_name
when 'Id'
['id', value.to_i]
when 'Pattern'
['pattern', value]
when 'Tag Id'
['tag_id', value.nil? ? nil : value.to_i]
when 'Tag Name'
['tag_name', value]
else
raise ArgumentError.new("Unknown Transaction column #{column_name}")
end
end
Hash[row_data]
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def history_report(rows)\n table = Terminal::Table.new :headings => ['Date','Exercise', 'Set', 'Weight', 'Reps'], \n :title => \"Exercise Log History\",\n :rows => rows \n puts table\nend",
"def table(header, values, io = $stdout)\n self.puts(io, MiGA.tabulate(header, values, self[:tabular]))\n ... | [
"0.6857293",
"0.677026",
"0.67321235",
"0.6475226",
"0.64599335",
"0.6441268",
"0.6427324",
"0.6394821",
"0.63747376",
"0.6355775",
"0.63375765",
"0.63286155",
"0.62335145",
"0.6198989",
"0.61827284",
"0.6164687",
"0.6164687",
"0.6164687",
"0.6164687",
"0.61494076",
"0.610557... | 0.0 | -1 |
mets_data is global mets_data from CreateMetsPackage::METS | def initialize(job, mets_data)
@job = job
@mets_data = mets_data
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_meta_data\r\n MetaData.new(:':curr-id' => Node.current_id,\r\n :':curr-quest-flag' => QuestMaker.current_quest_flag)\r\n end",
"def meta_data\n @meta_data ||= @internal_struct[:meta_data]\n end",
"def metadata\n @meta_data\n end",
"def data\n @data ||= DataMunger.... | [
"0.6141276",
"0.573298",
"0.5713579",
"0.5650419",
"0.5579994",
"0.55797344",
"0.557898",
"0.55692846",
"0.552456",
"0.5484107",
"0.5484107",
"0.5468133",
"0.5456429",
"0.5449734",
"0.54335326",
"0.5409023",
"0.5354786",
"0.5337557",
"0.53106767",
"0.5271446",
"0.52678317",
... | 0.5141432 | 35 |
Return source XML data, transformed according to ALVIN specifications | def xml_data
%Q(<mods
xmlns:mods="http://www.loc.gov/mods/v3"
xmlns:xlink="http://www.w3.org/1999/xlink"
version="3.5"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.loc.gov/mods/v3 http://www.loc.gov/standards/mods/v3/mods-3-5.xsd">
#{alvin_mods}
</mods>)
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transform(xml_source, seeds={})\n if xml_source.is_a?(REXML::Document)\n xml_document = xml_source\n else\n src = fetch_xml(xml_source)\n xml_document = REXML::Document.new(src)\n end\n queries = REXML::XPath.match(xml_document.root,'//x:query', {'x' => 'http://www.tran... | [
"0.62104267",
"0.61975527",
"0.5923915",
"0.59190196",
"0.58344185",
"0.55817056",
"0.5447026",
"0.54298997",
"0.5364187",
"0.53423786",
"0.53116554",
"0.53096855",
"0.53008044",
"0.5295735",
"0.5276267",
"0.52181363",
"0.5118648",
"0.5109363",
"0.510145",
"0.5087745",
"0.508... | 0.54144764 | 8 |
ALVIN specifies MODS as XML format | def xml_type
"MODS"
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def xml_data\n %Q(<mods \n xmlns:mods=\"http://www.loc.gov/mods/v3\" \n xmlns:xlink=\"http://www.w3.org/1999/xlink\" \n version=\"3.5\" \n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" \n xsi:schemaLocation=\"http://www.loc.gov/mods/v3 http://www.loc.gov/standards/mods/v3/mo... | [
"0.6473388",
"0.60126764",
"0.5764158",
"0.55771524",
"0.55754447",
"0.5571566",
"0.5571566",
"0.5556673",
"0.54912496",
"0.54896015",
"0.54840964",
"0.5457808",
"0.54317194",
"0.54290074",
"0.5421431",
"0.5421431",
"0.53977054",
"0.5354104",
"0.53333175",
"0.5321135",
"0.531... | 0.6308135 | 1 |
Serials have different handling of metadata | def is_serial?
@job.metadata_hash['type_of_record'] == "as"
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_metadata; end",
"def metadata; end",
"def metadata; end",
"def metadata; end",
"def metadata; end",
"def metadata; end",
"def metadata; end",
"def metadata; end",
"def supports_serial?\n true\n end",
"def metadata=(_); end",
"def meta_data( name )\n fd = new name, ... | [
"0.61448896",
"0.6084401",
"0.6084401",
"0.6084401",
"0.6084401",
"0.6084401",
"0.6084401",
"0.6084401",
"0.6008894",
"0.57936305",
"0.57760304",
"0.5762326",
"0.5762326",
"0.5756509",
"0.5756509",
"0.57051736",
"0.56571054",
"0.5620561",
"0.5620561",
"0.5605867",
"0.5588689"... | 0.5588006 | 21 |
Text representation of Type of Record, or code if not available. Used in output METS XML | def type_of_record
tor = @job.metadata_hash['type_of_record']
TYPE_OF_RECORD[tor] || tor
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_record_type_code\n @record_type_code\n end",
"def type_str\n Types.type_str(type)\n end",
"def name(); RECORD_INFO[@type].name; end",
"def type\n name = self.class.name.split('::').last\n return '<type>' if name == 'Record'\n name\n end",
"def type\n ''\n e... | [
"0.67742497",
"0.6667948",
"0.6612007",
"0.65162516",
"0.64990354",
"0.6388641",
"0.6304766",
"0.6265115",
"0.62314355",
"0.62035954",
"0.61975074",
"0.6171738",
"0.61671364",
"0.6163359",
"0.6148591",
"0.6148591",
"0.6148591",
"0.6148591",
"0.6148591",
"0.6148591",
"0.614859... | 0.5698574 | 65 |
Libris does not use image groups, so this returns a simple empty string | def extra_dmdsecs
""
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def image_name\n image.try(:name)\n end",
"def first_available_image\n image = %w[o s m l].each do |size|\n field = \"size_#{size}\"\n value = send(field)\n return \"#{id}-#{size}.#{img_type}\" if value.present?\n end\nend",
"def find_normal_image\n return \"Pictures/pcn%04d.png\" % (80 + id)... | [
"0.6490881",
"0.64398485",
"0.64383733",
"0.6426929",
"0.64044464",
"0.6368296",
"0.62637347",
"0.6167148",
"0.6165889",
"0.6100989",
"0.60490847",
"0.60382044",
"0.5996737",
"0.59569335",
"0.59260255",
"0.5925142",
"0.5902699",
"0.5886585",
"0.5878141",
"0.586396",
"0.585292... | 0.0 | -1 |
Libris does not use image groups, so this returns a simple empty string | def dmdid_attribute(groupname)
""
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def image_name\n image.try(:name)\n end",
"def first_available_image\n image = %w[o s m l].each do |size|\n field = \"size_#{size}\"\n value = send(field)\n return \"#{id}-#{size}.#{img_type}\" if value.present?\n end\nend",
"def find_normal_image\n return \"Pictures/pcn%04d.png\" % (80 + id)... | [
"0.6489519",
"0.64387345",
"0.64377487",
"0.64252394",
"0.6403487",
"0.636642",
"0.6262345",
"0.6166457",
"0.61647123",
"0.6099048",
"0.6049164",
"0.6035941",
"0.5995641",
"0.59550214",
"0.59259874",
"0.59247136",
"0.5900846",
"0.588453",
"0.5877025",
"0.5863299",
"0.5851553"... | 0.0 | -1 |
XSLT template for transforming Libris MARCXML to ALVINMODS | def alvin_xslt
Nokogiri::XSLT(File.open(Rails.root + "app/models/processes/create_mets_package/assets/LibrisToAlvin.xsl", "rb"))
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transform\n remove_custom_chronology_value\n move_custom_provenance_value\n move_collection_names\n copy_holdings\n\n # Removing some Alma-specific nodes we don't want to expose.\n @record.xpath('./datafield[@tag=\"INT\"]').remove\n @record.xpath('./datafield[@tag=\"INST\"]').remove\n\n ... | [
"0.6515038",
"0.58676875",
"0.5646294",
"0.5588028",
"0.5588028",
"0.5559265",
"0.5321481",
"0.53033555",
"0.5202598",
"0.51810354",
"0.5078683",
"0.50731224",
"0.50605315",
"0.5051209",
"0.5041274",
"0.50390965",
"0.5022889",
"0.49900514",
"0.4966511",
"0.49664944",
"0.49650... | 0.665535 | 0 |
Use above template to do transformation if job is a serial, append the metadata relevant for serials | def alvin_mods
alvin_xml = alvin_xslt.transform(Nokogiri::XML(@job.xml)).search("mods").first
is_serial? ? alvin_append_serial(alvin_xml).inner_html : alvin_xml.inner_html
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_serial?\n @job.metadata_hash['type_of_record'] == \"as\"\n end",
"def prepare_job_and_associations\r\n temp_job = @temp_job\r\n payer_group = '--'\r\n @original_job = Job.includes(:images_for_jobs).find(temp_job.job_id)\r\n @micr_record, payer_group = get_micr_and_payer_group if @origi... | [
"0.56650287",
"0.5314324",
"0.5226842",
"0.52209777",
"0.5173802",
"0.51398414",
"0.5030426",
"0.49688634",
"0.49525905",
"0.48838243",
"0.48773593",
"0.48670954",
"0.48489672",
"0.484648",
"0.4831943",
"0.48144946",
"0.47987607",
"0.47924882",
"0.47658297",
"0.47480565",
"0.... | 0.0 | -1 |
Compute an ALVIN compatible addon with extra metadata for serials | def alvin_append_serial(a_mods)
a_part = Nokogiri::XML::Node.new('part', a_mods)
ordinals.each_with_index do |ordinal,i|
a_detail = Nokogiri::XML::Node.new('detail', a_part)
a_detail.set_attribute('type', "ordinal_#{i+1}")
a_number = Nokogiri::XML::Node.new('number', a_detail)
a_number.inner_html = ordinal[1].to_s
a_detail.add_child(a_number)
a_caption = Nokogiri::XML::Node.new('caption', a_detail)
a_caption.inner_html = ordinal[0].to_s
a_detail.add_child(a_caption)
a_part.add_child(a_detail)
end
chronologicals.each_with_index do |chronological,i|
a_detail = Nokogiri::XML::Node.new('detail', a_part)
a_detail.set_attribute('type', "chronological_#{i+1}")
a_number = Nokogiri::XML::Node.new('number', a_detail)
a_number.inner_html = chronological[1].to_s
a_detail.add_child(a_number)
a_caption = Nokogiri::XML::Node.new('caption', a_detail)
a_caption.inner_html = chronological[0].to_s
a_detail.add_child(a_caption)
a_part.add_child(a_detail)
end
a_mods.add_child(a_part)
a_mods
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vin\n generated_vin = [\n # Manufacturer ID / WMI\n fetch_sample(VALID_WMI_REGIONS),\n fetch_sample(VALID_ALPHANUMERIC, count: 2),\n # Vehicle Description\n fetch_sample(VALID_ALPHANUMERIC, count: 3),\n fetch_sample(VALID_ALPHA),\n fetch_sam... | [
"0.58402795",
"0.54363614",
"0.53734326",
"0.51678455",
"0.516566",
"0.5051572",
"0.5035721",
"0.50244546",
"0.5020261",
"0.49680072",
"0.49522495",
"0.49445173",
"0.49414268",
"0.49356192",
"0.49066463",
"0.484887",
"0.48437226",
"0.48260286",
"0.47977877",
"0.4786836",
"0.4... | 0.5708019 | 1 |
Returns an ordinal array for given key | def seq_metadata_num(name, num)
key = @job.metadata_hash["#{name}_#{num}_key"]
value = @job.metadata_hash["#{name}_#{num}_value"]
return nil if key.nil? || key.empty? || value.nil? || value.empty?
[key, value]
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def questions_by_ordinal_array\n hash_to_ordered_array(Marshal.load(questions_by_ordinal))\n end",
"def convert_indices(keys)\n keys.map do |key|\n Integer(key) rescue key\n end\n end",
"def keys\n \t[ranking,val_count(@cards).reverse]\n end",
"def symbol_to_integer(key)\n\t\tkey[-2..-... | [
"0.60524553",
"0.60175985",
"0.5868558",
"0.57880986",
"0.5698945",
"0.56849974",
"0.5533574",
"0.5503383",
"0.54860586",
"0.54704165",
"0.5445521",
"0.53937405",
"0.5380927",
"0.53649855",
"0.5357307",
"0.5344498",
"0.5343597",
"0.5326577",
"0.5319936",
"0.53164893",
"0.5313... | 0.0 | -1 |
compare this with another RfcDocument and determine whether there are any differences between those two instances in respect to the data_attributes to be compared | def modified?( original )
DATA_ATTRIBUTES.any? { |e| send( e ) != original.send( e )}
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ==(other)\n return false unless other.is_a?(Document)\n @attributes.except(:modified_at).except(:created_at) ==\n other.attributes.except(:modified_at).except(:created_at)\n end",
"def documents_equal?(a, b)\n normalize_document(a) == normalize_document(b)\n end",
"def ==(other)... | [
"0.6999662",
"0.66363347",
"0.6564467",
"0.63309556",
"0.62774724",
"0.6276849",
"0.6265579",
"0.62575006",
"0.6237626",
"0.62048227",
"0.6201456",
"0.6190984",
"0.6176623",
"0.6176442",
"0.6165691",
"0.61244726",
"0.61192405",
"0.6083835",
"0.603467",
"0.5998384",
"0.5993068... | 0.5986228 | 21 |
returns true if version is 0 and all fields contain the initial empty values | def initial_version?
version.zero? && DATA_ATTRIBUTES.all? { |e| send( e ).blank? }
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def empty?\n @fields.empty?\n end",
"def empty?\n @fields.values.empty?\n end",
"def empty?\n\t\treturn self.fields.empty?\n\tend",
"def empty?\n attributes.size == 0\n end",
"def empty?()\n fields.empty?\n end",
"def empty_row?\n @row.fields.compact.empty?\n end",
"def ... | [
"0.7103861",
"0.70518696",
"0.70155334",
"0.6996738",
"0.69758725",
"0.6945231",
"0.6826657",
"0.68253213",
"0.68076205",
"0.67870104",
"0.67417073",
"0.6694861",
"0.6626645",
"0.6543701",
"0.65331894",
"0.65032524",
"0.64995736",
"0.64572704",
"0.64301014",
"0.64301014",
"0.... | 0.7502339 | 0 |
answer will delegate its validation to question, and question will inturn add validations on answer on the fly! | def validate_answer(answer)
if rules[:presence] == "1"
answer.validates_presence_of :answer_text
end
if rules[:minimum].present? || rules[:maximum].present?
min_max = { minimum: rules[:minimum].to_i }
min_max[:maximum] = rules[:maximum].to_i if rules[:maximum].present?
answer.validates_length_of :answer_text, min_max
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_answer(answer)\n end",
"def correct_answer(text)\n answer = Answer.new(text, true)\n @errorReporter.register answer\n @test.last_question.add_answer answer\nend",
"def incorrect_answer(text)\n answer = Answer.new(text, false)\n @errorReporter.register answer\n @test.last_question.... | [
"0.79929274",
"0.71052724",
"0.70803195",
"0.6924708",
"0.6911964",
"0.68183315",
"0.6785872",
"0.6755734",
"0.6598308",
"0.6598308",
"0.65358275",
"0.6523902",
"0.6467723",
"0.64112294",
"0.63927877",
"0.63897204",
"0.63634974",
"0.63237137",
"0.6317179",
"0.63140863",
"0.62... | 0.7346186 | 3 |
Logs a message to stderr. Unless otherwise specified, the message will be printed in red. | def error(message)
message = message.red unless message.color?
puts(stderr, message)
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error(message)\n STDERR.puts red(message)\n end",
"def puts_err(msg)\n STDERR.puts(msg.to_s.red)\n end",
"def error(message)\n $stderr.puts colorize(message, :error)\n end",
"def print_stderr(msg)\n puts \" #{color(msg, 31)}\"\n end",
"def err(message)\n stder... | [
"0.8162828",
"0.8014615",
"0.79377604",
"0.79011846",
"0.7866828",
"0.76910996",
"0.7510416",
"0.7501618",
"0.7496219",
"0.7492789",
"0.7356965",
"0.7238357",
"0.7174543",
"0.71287453",
"0.7118538",
"0.71000034",
"0.70792115",
"0.7066593",
"0.705671",
"0.70357466",
"0.6954536... | 0.8320317 | 0 |
Logs a message to stdout. | def info(message)
puts(stdout, message)
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def msg(message)\n stdout.puts message\n end",
"def message(msg)\n STDOUT.puts msg\n end",
"def log(msg)\n # puts msg\n $stdout.write(\"#{msg}\\n\")\n end",
"def log(msg)\n puts msg\n $stdout.flush\n end",
"def log(message)\n puts \">> #{message}\"\n end",
"def... | [
"0.79915625",
"0.78115994",
"0.77715063",
"0.7626064",
"0.762099",
"0.7574283",
"0.75370896",
"0.7454514",
"0.74405175",
"0.73469126",
"0.72785306",
"0.7240068",
"0.72164774",
"0.714694",
"0.7131057",
"0.7121349",
"0.7119547",
"0.7085921",
"0.70840085",
"0.7077729",
"0.707308... | 0.65816593 | 48 |
Logs a message to stdout, runs the given block, and then prints the time it took to run the block. | def benchmark(message)
start = Time.now
print(stdout, "#{message} ")
result = yield
duration = Time.now - start
info("✔".green + format(" %0.3fs", duration).gray)
result
rescue
info("✘".red)
raise
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def with_logging(&block)\n\tstart_time = Time.now\n\tputs \"Starting running code at #{start_time}\"\n\tblock.call\n\tend_time = Time.now\n\tputs \"Finishing running code #{end_time}\"\n\ttime_taken = end_time - start_time\n\tputs \"Time taken was #{time_taken} seconds\"\nend",
"def profile (block_description, &... | [
"0.79048973",
"0.7487452",
"0.7410141",
"0.73928154",
"0.73621166",
"0.7278517",
"0.7236607",
"0.72213846",
"0.71957016",
"0.71873343",
"0.707927",
"0.7078359",
"0.7063949",
"0.7063483",
"0.7053214",
"0.697917",
"0.697447",
"0.69573456",
"0.6908939",
"0.6908939",
"0.6779245",... | 0.70391524 | 15 |
PATCH/PUT /books/1 PATCH/PUT /books/1.json | def update
respond_to do |format|
if @account.update(account_params)
format.html { redirect_to account_path(@account), notice: I18n.t('notice.update', model: t('account.name')) }
format.json { render :show, status: :ok, location: @account }
else
format.html { render :edit }
format.json { render json: @account.errors, status: :unprocessable_entity }
end
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update\n @book = Book.find(params[:id])\n\n respond_to do |format|\n if @book.update_attributes(params[:book])\n \n format.json { render json: @book, status: :created, location: @book }\n else\n \n format.json { render json: @book.errors, status: :unprocessable_entity }\n... | [
"0.7048773",
"0.688817",
"0.68527925",
"0.68271405",
"0.6824262",
"0.6821144",
"0.6802083",
"0.6802083",
"0.6802083",
"0.6802083",
"0.6802083",
"0.6802083",
"0.6802083",
"0.6802083",
"0.6802083",
"0.6800069",
"0.67871505",
"0.67303824",
"0.67264384",
"0.6724246",
"0.67084",
... | 0.0 | -1 |
Render themed text field | def bootstrap_text_field(form, attribute, placeholder = nil)
form.text_field attribute, class: 'form-control', placeholder: placeholder
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ui_text_field(content = nil, options = nil, html_options = nil, &block)\n UiBibz::Ui::Core::Forms::Texts::TextField.new(content, options, html_options, &block).render\n end",
"def text_field; end",
"def apply\n @textBox.set_markup(\"<span style='\"+@style.to_s+\"' weight='\"+@weight.to_s+\"' foregro... | [
"0.68963623",
"0.66271913",
"0.65618336",
"0.65509886",
"0.6418537",
"0.63556355",
"0.63454735",
"0.6290876",
"0.6254857",
"0.6212283",
"0.62068015",
"0.61340106",
"0.6122071",
"0.6102814",
"0.61019474",
"0.60776806",
"0.6071715",
"0.6055032",
"0.6047477",
"0.60145015",
"0.60... | 0.0 | -1 |
Initialize signer for calculating your signature. Params: +config+: configuration data with access keys and region. | def initialize(config)
@access_key = config[:access_key] || config["access_key"]
@secret_key = config[:secret_key] || config["secret_key"]
@region = config[:region] || config["region"]
@date = Time.now.utc.strftime(RFC8601BASIC)
@service = "s3"
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initialize(config)\n $LOG.i('initializing client')\n @config = config\n @epoint = Upwork::Api::DEFAULT_EPOINT\n\t@url_auth, @url_rtoken, @url_atoken = URI_AUTH, URI_RTOKEN, URI_ATOKEN\n @tenant_id = nil\n\n\t@oauth2_client = OAuth2::Client.new(\n @config.client_id,\n ... | [
"0.5692765",
"0.5597047",
"0.548585",
"0.548585",
"0.548585",
"0.5427439",
"0.5427439",
"0.54229605",
"0.54229605",
"0.54229605",
"0.5422475",
"0.5422075",
"0.54157037",
"0.5317112",
"0.52696866",
"0.5247709",
"0.52420396",
"0.52371305",
"0.5235952",
"0.5233791",
"0.5229258",... | 0.6411205 | 0 |
Signature v4 function returns back headers with Authorization header. Params: +method+: http method. +endpoint+: S3 endpoint URL. | def sign_v4(method, endpoint, headers, body = nil, debug = false)
@method = method.upcase
@endpoint = endpoint
@headers = headers
@uri = URI(endpoint)
puts "EP : "+@endpoint
puts "Headers : "+@headers.to_s
headers["X-Amz-Date"] = date
headers["X-Amz-Content-Sha256"] = Digestor.hexdigest(body || "")
headers["Host"] = get_host(@uri)
puts "--->" + get_host(@uri)
dump if debug
signed_headers = headers.dup
signed_headers['Authorization'] = get_authorization(headers)
signed_headers
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def authorization_headers_for(http_verb, url, md5 = nil, content_type = nil)\n url = File.join(\"/\", url.gsub(base_url, \"\"))\n date = Time.now.httpdate\n signed_data = generate_s3_signature(http_verb, md5, content_type, date, url)\n {\n \"Authorization\" => \"AWS #{credentials[:access... | [
"0.7060364",
"0.64316005",
"0.59996843",
"0.5915467",
"0.5895688",
"0.5844129",
"0.5795185",
"0.57844055",
"0.5760655",
"0.56937814",
"0.5599224",
"0.5571342",
"0.5550194",
"0.5546661",
"0.54805267",
"0.5465806",
"0.54588014",
"0.54402924",
"0.54124224",
"0.54062426",
"0.5399... | 0.7068388 | 0 |
Get host header value from endpoint. Params: +endpoint+: endpoint URI object. | def get_host(endpoint)
puts "recieved : "+ endpoint.to_s
puts "port : "+ endpoint.port.to_s
if endpoint.port
if ((endpoint.port == 443) || (endpoint.port == 80))
return endpoint.host
else
return endpoint.host + ":" + endpoint.port.to_s
end
else
#return endpoint.host
return endpoint.host + ":" + endpoint.port.to_s
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lookup_hostname(endpoint)\n @resolved_hostnames.select{ |k,v| v.include?(endpoint) }.shift[0]\n end",
"def host_on_header\n request.headers['HTTP_HOST']\n end",
"def host_header\n conf['host_header'] || '*'\n end",
"def host\n _, _, host, = URI.split url\n host\n end",
"def ... | [
"0.7550576",
"0.62331986",
"0.61328644",
"0.6097642",
"0.5974855",
"0.59192353",
"0.58821535",
"0.5813618",
"0.5768656",
"0.5748094",
"0.5735272",
"0.5732071",
"0.56572",
"0.5648506",
"0.56408733",
"0.56236356",
"0.56109685",
"0.5587544",
"0.55792123",
"0.55791765",
"0.556223... | 0.7527747 | 1 |
Get authorization header value. Params: +headers+: list of headers supplied for the request. | def get_authorization(headers)
[
"AWS4-HMAC-SHA256 Credential=#{access_key}/#{credential_scope}",
"SignedHeaders=#{headers.keys.map(&:downcase).sort.join(";")}",
"Signature=#{signature}"
].join(', ')
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def http_auth_header\n if headers['Authorization'].present?\n return headers['Authorization'].split(' ').last\n end\n\n raise(ExceptionHandler::MissingToken, Message.missing_token)\n end",
"def http_auth_header\n\t \tif headers['Authorization'].present?\n\t \t\treturn headers['Authorization'].... | [
"0.67129064",
"0.6692515",
"0.6564963",
"0.6479152",
"0.6324961",
"0.62705094",
"0.6250882",
"0.6103316",
"0.6076678",
"0.6060409",
"0.6008411",
"0.5982912",
"0.59650254",
"0.59586453",
"0.5903476",
"0.5903441",
"0.58845836",
"0.58701044",
"0.58701044",
"0.5869141",
"0.575170... | 0.6425794 | 4 |
Calculate HMAC based signature in following format. format kSecret = Your AWS Secret Access Key kDate = HMAC("AWS4" + kSecret, Date) kRegion = HMAC(kDate, Region) kService = HMAC(kRegion, Service) kSigning = HMAC(kService, "aws4_request") | def signature
k_date = Digestor.hmac("AWS4" + secret_key, date[0, 8])
k_region = Digestor.hmac(k_date, region)
k_service = Digestor.hmac(k_region, service)
k_credentials = Digestor.hmac(k_service, "aws4_request")
Digestor.hexhmac(k_credentials, string_to_sign)
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def signing_key\n digest = \"SHA256\"\n kDate = OpenSSL::HMAC.digest(digest, \"AWS4\" + credentials.aws_secret, request_datestamp)\n kRegion = OpenSSL::HMAC.digest(digest, kDate, region)\n kService = OpenSSL::HMAC.digest(digest, kRegion, service)\n OpenSSL::HMAC.digest(digest, kService, \"... | [
"0.81183755",
"0.7228004",
"0.7058181",
"0.6877119",
"0.68054473",
"0.674071",
"0.66003376",
"0.65260035",
"0.6494134",
"0.64448476",
"0.6424891",
"0.6353342",
"0.63023037",
"0.62862754",
"0.62855035",
"0.62760055",
"0.62606156",
"0.62533796",
"0.6230142",
"0.6217826",
"0.621... | 0.86618626 | 0 |
Generate string to sign. format StringToSign = Algorithm + '\n' + RequestDate + '\n' + CredentialScope + '\n' + HashedCanonicalRequest | def string_to_sign
[
SIGNV4ALGO,
date,
credential_scope,
Digestor.hexdigest(canonical_request)
].join("\n")
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def string_to_sign\n [\n \"AWS4-HMAC-SHA256\",\n request_timestamp,\n credential_scope,\n digest.hexdigest(canonical)\n ].join(\"\\n\")\n end",
"def string_to_sign\n [\n http_method,\n headers.values_at('content-md5', 'content-type').join(\"\\n\")... | [
"0.807008",
"0.77037597",
"0.71158355",
"0.71055126",
"0.71055126",
"0.7078463",
"0.6974759",
"0.6966258",
"0.68414956",
"0.6834855",
"0.6824067",
"0.67150193",
"0.6658641",
"0.66093594",
"0.6595727",
"0.654806",
"0.6535506",
"0.6491662",
"0.6486681",
"0.6470741",
"0.6403824"... | 0.85886204 | 0 |
Generate credential scope. format ///aws4_request | def credential_scope
[
date[0, 8],
region,
service,
"aws4_request"
].join("/")
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def credential_scope\n [request_datestamp,\n region,\n service,\n \"aws4_request\"].join(\"/\")\n end",
"def create_scope\n sprintf \"%s/%s/signer\", @self_key, @client_id\n end",
"def create_washington_auth_request(scope)\n post user_session_path, params: {user: { \n ... | [
"0.8303936",
"0.6071897",
"0.60711306",
"0.5727282",
"0.56178516",
"0.5553447",
"0.5486874",
"0.54734635",
"0.54734635",
"0.5453613",
"0.5448614",
"0.54269826",
"0.5392404",
"0.5363722",
"0.5331505",
"0.5303159",
"0.52923644",
"0.5273638",
"0.5227616",
"0.52182776",
"0.516053... | 0.81323385 | 1 |
Generate a canonical request of following style. format canonicalRequest = \n \n \n \n \n | def canonical_request
[
method,
Pathname.new(uri.path).cleanpath.to_s,
uri.query,
headers.sort.map { |k, v| [k.downcase, v.strip].join(':') }.join("\n") + "\n",
headers.sort.map { |k, v| k.downcase }.join(";"),
headers["X-Amz-Content-Sha256"]
].join("\n")
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def canonical\n canonical = \"\"\n canonical << method.to_s.upcase << \"\\n\"\n canonical << uri.path << \"\\n\"\n\n canonical_query.each_with_index do |(param,value), index|\n canonical << param << \"=\" << value\n canonical << \"&\" unless index == query.size - 1\n end\n\n ... | [
"0.75749826",
"0.7209021",
"0.568802",
"0.5621549",
"0.55959785",
"0.5566658",
"0.546611",
"0.54633373",
"0.54411095",
"0.54105866",
"0.5400322",
"0.5399279",
"0.53757757",
"0.53298634",
"0.5310225",
"0.5278865",
"0.52667505",
"0.513844",
"0.51206446",
"0.5076739",
"0.5076739... | 0.7510022 | 1 |
return the parsing error message if not valid JSON; otherwise nil | def validate_format
JSON.parse(content) && nil if content
rescue JSON::ParserError => err
err.message
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error_message\n if !success?\n body\n end\n rescue JSON::ParserError => ex\n ex.message\n end",
"def error\n return if success? unless parse_error?\n @data['error'] || @data['message']\n end",
"def parsed_error\n default = %({\"error_description\": \"#{message}... | [
"0.7984221",
"0.75920945",
"0.7558122",
"0.7358468",
"0.71795094",
"0.7081371",
"0.70506734",
"0.7029006",
"0.69410396",
"0.6924949",
"0.6876251",
"0.6865448",
"0.6836348",
"0.66812205",
"0.6679131",
"0.6671477",
"0.66504616",
"0.66386336",
"0.66311854",
"0.6622214",
"0.66203... | 0.76455766 | 1 |
GET /pizzas GET /pizzas.json | def index
@new_pizza = Pizza.new
@pizzas = Pizza.all.paginate(:page => params[:page])
@toppings = Topping.all
@new_topping = Topping.new
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def index\n @pizzas = Pizza.all\n render json: @pizzas\n end",
"def index\n @pizzas = Pizza.all\n end",
"def index\n @pizzas = Pizza.all\n end",
"def index\n @pizzas = Pizza.all\n end",
"def index\n @parishes = Parish.all\n\n render json: @parishes\n end",
"def index\n @pizza... | [
"0.79183954",
"0.69536155",
"0.69536155",
"0.69536155",
"0.693384",
"0.69271004",
"0.6909014",
"0.680435",
"0.6643965",
"0.6541747",
"0.64329296",
"0.6431163",
"0.64141715",
"0.64074016",
"0.63968384",
"0.6334468",
"0.6292841",
"0.621295",
"0.6204518",
"0.61971706",
"0.618697... | 0.0 | -1 |
GET /pizzas/1 GET /pizzas/1.json | def show
id = params[:id].to_i
@pizza = Pizza.find(id)
@toppings = Topping.all
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def index\n @pizzas = Pizza.all\n render json: @pizzas\n end",
"def index\n @parishes = Parish.all\n\n render json: @parishes\n end",
"def index\n @pizzas = Pizza.all\n end",
"def index\n @pizzas = Pizza.all\n end",
"def index\n @pizzas = Pizza.all\n end",
"def index\n @pizza... | [
"0.77091056",
"0.68867713",
"0.678282",
"0.678282",
"0.678282",
"0.6741207",
"0.6692112",
"0.6638869",
"0.6638132",
"0.65948915",
"0.6528712",
"0.65110147",
"0.6476008",
"0.6462232",
"0.64574075",
"0.6448965",
"0.6430566",
"0.6426283",
"0.63608736",
"0.63608736",
"0.63578326"... | 0.0 | -1 |
POST /pizzas POST /pizzas.json | def create
@pizza = Pizza.new(pizza_params)
respond_to do |format|
if @pizza.save
format.html { redirect_to "/", notice: 'Pizza was successfully created.' }
format.json { render :show, status: :created, location: @pizza }
else
@toppings = Topping.all
format.html { render :new }
format.json { render json: @pizza.errors, status: :unprocessable_entity }
end
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def index\n @pizzas = Pizza.all\n render json: @pizzas\n end",
"def create\n @pizza = Pizza.new(pizza_params)\n if @pizza.save\n ActionCable.server.broadcast 'pizzas', {}\n render json: @pizza\n else\n render json: @pizza.errors, status: :unprocessable_entity\n end\n end",
"d... | [
"0.6705813",
"0.6625113",
"0.64994454",
"0.6376425",
"0.60530806",
"0.6009606",
"0.6008267",
"0.5996937",
"0.5992101",
"0.5983067",
"0.5959708",
"0.5923793",
"0.5834801",
"0.57777536",
"0.57760423",
"0.57585835",
"0.57585835",
"0.57585835",
"0.5753043",
"0.57194954",
"0.57187... | 0.5829503 | 13 |
PATCH/PUT /pizzas/1 PATCH/PUT /pizzas/1.json | def update
respond_to do |format|
if @pizza.update(pizza_params)
format.html { redirect_to "/", notice: 'Pizza was successfully updated.' }
format.json { render :show, status: :ok, location: @pizza }
else
@toppings = Topping.all
format.html { render :edit }
format.json { render json: @pizza.errors, status: :unprocessable_entity }
end
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update\n respond_to do |format|\n if @pizzatopping.update(pizzatopping_params)\n format.html { redirect_to @pizzatopping, notice: 'Pizzatopping was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json {... | [
"0.66538805",
"0.6461463",
"0.6320786",
"0.6264658",
"0.6264658",
"0.62588716",
"0.6193537",
"0.615107",
"0.6134751",
"0.6134751",
"0.61214674",
"0.6101401",
"0.60839",
"0.60620135",
"0.60557604",
"0.6032201",
"0.60287195",
"0.6020922",
"0.601863",
"0.600961",
"0.6005493",
... | 0.59983677 | 24 |
DELETE /pizzas/1 DELETE /pizzas/1.json | def destroy
@pizza.destroy
respond_to do |format|
format.html { redirect_to pizzas_url, notice: 'Pizza was successfully destroyed.' }
format.json { head :no_content }
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def destroy\n @pizza.destroy\n respond_to do |format|\n format.html { redirect_to pizzas_url }\n format.json { head :no_content }\n end\n end",
"def destroy\n @pizzatopping.destroy\n respond_to do |format|\n format.html { redirect_to pizzatoppings_url }\n format.json { head :n... | [
"0.7234531",
"0.7140727",
"0.69878125",
"0.6926036",
"0.6911703",
"0.69022393",
"0.6893429",
"0.6889687",
"0.6860238",
"0.6856113",
"0.6856113",
"0.68384856",
"0.68308455",
"0.6809122",
"0.6802509",
"0.6788143",
"0.67851883",
"0.67745614",
"0.6766193",
"0.67617047",
"0.675891... | 0.6975026 | 6 |
Use callbacks to share common setup or constraints between actions. | def set_pizza
@pizza = Pizza.find(params[:id])
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_required_actions\n # TODO: check what fields change to asign required fields\n end",
"def action_hook; end",
"def run_actions; end",
"def define_action_hook; end",
"def actions; end",
"def define_action_helpers\n if super && action == :save\n @instance_helper_module.class_... | [
"0.6163163",
"0.6045976",
"0.5946146",
"0.591683",
"0.5890051",
"0.58349305",
"0.5776858",
"0.5703237",
"0.5703237",
"0.5652805",
"0.5621621",
"0.54210985",
"0.5411113",
"0.5411113",
"0.5411113",
"0.5391541",
"0.53794575",
"0.5357573",
"0.53402257",
"0.53394014",
"0.53321576"... | 0.0 | -1 |
Never trust parameters from the scary internet, only allow the white list through. | def pizza_params
params.require(:pizza).permit(:id, :name, :description, topping_ids: [])
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def strong_params\n params.require(:user).permit(param_whitelist)\n end",
"def strong_params\n params.require(:listing_member).permit(param_whitelist)\n end",
"def allow_params_authentication!; end",
"def allowed_params\n ALLOWED_PARAMS\n end",
"def default_param_whitelist\n [\"mode\"]\n... | [
"0.6980629",
"0.67819995",
"0.67467666",
"0.67419875",
"0.67347664",
"0.65928614",
"0.6504013",
"0.6498014",
"0.64819515",
"0.64797956",
"0.64562726",
"0.64400834",
"0.6380117",
"0.6377456",
"0.63656694",
"0.6320543",
"0.63002014",
"0.62997127",
"0.629425",
"0.6293866",
"0.62... | 0.0 | -1 |
GET topics/1/posts/1 GET topics/1/posts/1.json | def show
topic = Topic.find(params[:topic_id])
post = topic.posts.find(params[:id])
@presenter = PostsShowPresenter.new(topic, post)
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show\n @topic = Topic.find(params[:id]) \n @posts=@topic.posts.page(params[:page]).per(post_per_page)\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @topic }\n end\n end",
"def forum_get_topic_posts(args={})\n JSON.parse(HelpSpot.api_reque... | [
"0.71700764",
"0.7085401",
"0.68966436",
"0.6758746",
"0.6749801",
"0.6725195",
"0.66660815",
"0.6638457",
"0.66152346",
"0.65481615",
"0.6504683",
"0.6451783",
"0.64484006",
"0.6407141",
"0.6397665",
"0.6341833",
"0.63189876",
"0.6310452",
"0.6310037",
"0.6267685",
"0.624703... | 0.6413064 | 13 |
POST topics/1/posts POST topics/1/posts.json | def create
topic = Topic.find(params[:topic_id])
@post = topic.posts.build(post_params)
@post.user = current_user
respond_to do |format|
if @post.save
format.html { redirect_to [topic, @post], notice: 'Post was successfully created.' }
format.json { render :show, status: :created, location: @post }
else
format.html { render :new }
format.json { render json: @post.errors, status: :unprocessable_entity }
end
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_post topic_id, payload\n\t\t\t\t\tFreshdesk::Api::Client.convert_to_hash( @connection.post POSTS, payload, topic_id, nil, \"create_post\" )\n\t\t\t\tend",
"def create\n @topic = Topic.friendly.find(params[:topic_id])\n @post = @topic.posts.new(post_params)\n @post.user_id = current_user.id\n ... | [
"0.7255775",
"0.7200451",
"0.70958173",
"0.69808865",
"0.68992865",
"0.6893679",
"0.6835425",
"0.68294716",
"0.6811876",
"0.68064487",
"0.6793473",
"0.6787541",
"0.6711998",
"0.66704327",
"0.6654238",
"0.65992624",
"0.6590465",
"0.6548802",
"0.65355927",
"0.6490586",
"0.64838... | 0.7171243 | 2 |
PATCH/PUT topics/1/posts/1 PATCH/PUT topics/1/posts/1.json | def update
topic = Topic.find(params[:topic_id])
@post = topic.posts.find(params[:id])
authorize @post
respond_to do |format|
if @post.update(post_params)
format.html { redirect_to [topic, @post], notice: 'Post was successfully updated.' }
format.json { render :show, status: :ok, location: @post }
else
format.html { render :edit }
format.json { render json: @post.errors, status: :unprocessable_entity }
end
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update\n topic = Topic.find(params[:id])\n if topic.update(params_topic)\n render json: topic, status: 200\n else\n render json: topic.errors, status: 422\n end\n\n end",
"def UpdateTopic id,params = {}\n \n APICall(path: \"topics/#{id}.json\",method: 'PUT... | [
"0.6609045",
"0.6462179",
"0.6456123",
"0.64544713",
"0.6443821",
"0.6344135",
"0.6318883",
"0.6282033",
"0.62662715",
"0.62476003",
"0.62398064",
"0.62261117",
"0.61952466",
"0.6149235",
"0.6147185",
"0.61423254",
"0.6132375",
"0.6130962",
"0.6130962",
"0.6130962",
"0.613096... | 0.6180992 | 13 |
DELETE topics/1/posts/1 DELETE topics/1/posts/1.json | def destroy
topic = Topic.find(params[:topic_id])
@post = topic.posts.find(params[:id])
authorize @post
@post.destroy
respond_to do |format|
format.html { redirect_to book_club_topic_path(topic.book_club, topic), notice: 'Post was successfully deleted.' }
format.json { head :no_content }
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DeleteTopic id\n \n APICall(path: \"topics/#{id}.json\",method: 'DELETE')\n \n end",
"def destroy\n Topic.find(params[:id]).destroy\n render :json => {:ok => true}, :head => :no_content\n end",
"def destroy\n @post = @topic.posts.find(params[:id])\n @post.destroy\n\n ... | [
"0.73453844",
"0.72021353",
"0.72004056",
"0.7169228",
"0.7080836",
"0.7057301",
"0.7036528",
"0.70290077",
"0.7014624",
"0.7013132",
"0.70085657",
"0.69958216",
"0.69928765",
"0.6991441",
"0.6991441",
"0.6991441",
"0.6991441",
"0.6991441",
"0.6991441",
"0.69630736",
"0.69560... | 0.6743095 | 38 |
def room_details debugger end | def show
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def details; end",
"def room; end",
"def room\n end",
"def details\n end",
"def details\n\n end",
"def search_room\n @rooms=Room.new\n \n\n # #debugger\n end",
"def show\n @roomtype = DetailRoom.all_types\n @time = Room.all_times\n @day_list = Room.all_days\n if session[:ad... | [
"0.7206317",
"0.71583056",
"0.70895916",
"0.70081264",
"0.693806",
"0.6623011",
"0.66091776",
"0.64362365",
"0.63558567",
"0.6336065",
"0.63001615",
"0.62945366",
"0.6277129",
"0.6275501",
"0.626459",
"0.62576556",
"0.6236348",
"0.6234083",
"0.6223577",
"0.6202023",
"0.620202... | 0.0 | -1 |
Returns a possessive version of the string | def posessivize(name)
return name if name.blank?
if name.last == "s"
name + "'"
else
name + "'s"
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sn(s)\r\n (s == '(none)' || s == '') ? '-' : s\r\n end",
"def double_metaphone(str); end",
"def no_leading_spaces str\n return str.force_encoding('ASCII-8BIT').gsub(/\\302\\240/,'').strip # What a hack.\n end",
"def possessivize(s)\n s + (s[-1,1] == 's' ? \"'\" : \"'s\")\n end",
"def strip(... | [
"0.61616117",
"0.6037281",
"0.6007918",
"0.5982443",
"0.59431034",
"0.59183204",
"0.59142053",
"0.5907412",
"0.5896384",
"0.5824625",
"0.58049667",
"0.57945895",
"0.57936406",
"0.5781384",
"0.57695353",
"0.57695353",
"0.5761465",
"0.5745831",
"0.57411677",
"0.5739859",
"0.573... | 0.0 | -1 |
Returns flash message class for a given flash message name | def flash_message_class_for(name)
{
"notice" => "success",
"alert" => "alert",
"warning" => "warning",
}.fetch(name)
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def flash_message_class_for(name)\n {\n 'notice' => 'success',\n 'alert' => 'alert',\n 'warning' => 'warning'\n }.fetch(name)\n end",
"def flash_class(name)\n if name == :notice\n 'success'\n #elsif name == :notifications\n # 'info'\n else\n 'danger'\n end\n end",... | [
"0.8448762",
"0.7494982",
"0.71567035",
"0.7125019",
"0.692047",
"0.68981314",
"0.68809146",
"0.68277496",
"0.6790968",
"0.6679687",
"0.66744936",
"0.6674135",
"0.6674135",
"0.66588175",
"0.66412395",
"0.6618734",
"0.65675104",
"0.65593016",
"0.65281254",
"0.6500549",
"0.6491... | 0.8500592 | 0 |
Returns a github user's profile url | def github_enterprise_enabled?
ENV["GITHUB_ENTERPRISE_URL"].present? &&
(ENV["GITHUB_ENTERPRISE_URL"] != "YOUR_GITHUB_ENTERPRISE_URL") &&
ENV["GITHUB_CLIENT_OPTION_SITE"].present? &&
(ENV["GITHUB_CLIENT_OPTION_SITE"] != "YOUR_GITHUB_ENTERPRISE_SITE") &&
ENV["GITHUB_CLIENT_OPTION_AUTHORIZE_URL"].present? &&
(ENV["GITHUB_CLIENT_OPTION_AUTHORIZE_URL"] != "YOUR_GITHUB_ENTERPRISE_AUTHORIZE_URL") &&
ENV["GITHUB_CLIENT_OPTION_ACCESS_TOKEN_URL"].present? &&
(ENV["GITHUB_CLIENT_OPTION_ACCESS_TOKEN_URL"] != "YOUR_GITHUB_ENTERPRISE_ACCESS_TOKEN_URL")
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def github_url\n github_user.blank? ? nil : \"#{GITHUB_URL}#{github_user}\"\n end",
"def profile_url\n @json['user']['links']['self']\n end",
"def url\n \"http://github.com/#{login}\"\n end",
"def profile_url\n @data[:profile_url]\n end",
"def profile_url\n \"#{Steam::A... | [
"0.80344355",
"0.7439228",
"0.7426183",
"0.73609316",
"0.73518556",
"0.7347008",
"0.72130865",
"0.72116953",
"0.7159113",
"0.70151156",
"0.7011653",
"0.70048124",
"0.69768584",
"0.69649994",
"0.6866531",
"0.6833184",
"0.68129164",
"0.6794656",
"0.6793103",
"0.677654",
"0.6740... | 0.0 | -1 |
Returns a github account type | def github_account_type
github_enterprise_enabled? ? "GitHub Enterprise" : "GitHub"
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def github_type\n self.class.to_s.underscore.gsub(/github_/, \"\").to_sym\n end",
"def account_type\n User.account_type(current_user)\n end",
"def check_and_get_provider_credential(type: FastlaneCI::ProviderCredential::PROVIDER_CREDENTIAL_TYPES[:github])\n provider_credential = user.provider_c... | [
"0.78230494",
"0.67230886",
"0.66324216",
"0.64974976",
"0.64810234",
"0.64281845",
"0.6404035",
"0.63345176",
"0.6325628",
"0.6254592",
"0.62432796",
"0.62195224",
"0.6202761",
"0.61956406",
"0.6105868",
"0.6038353",
"0.60329163",
"0.6032806",
"0.6017849",
"0.59523463",
"0.5... | 0.81271565 | 0 |
GET /incident_reports/new GET /incident_reports/new.xml | def new
@report = IncidentReport.new(:staff_id => current_staff.id) # new report
super
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def new\n @incident = Incident.new\n \n @title = \"New Incident\" \n \n respond_to do |format|\n format.html # new.html.erb\n format.xml { render :xml => @incident }\n end\n end",
"def new\n @incident = Incident.new\n\n respond_to do |format|\n format.html # new.html.e... | [
"0.7552963",
"0.75451446",
"0.75451446",
"0.722456",
"0.71190476",
"0.7069713",
"0.70286524",
"0.6910538",
"0.69090855",
"0.6907704",
"0.6873394",
"0.6873394",
"0.6861885",
"0.68400717",
"0.68010545",
"0.6730168",
"0.6710331",
"0.6690054",
"0.6689144",
"0.6684845",
"0.6664588... | 0.64133704 | 44 |
Returns a random token. | def new_token
SecureRandom.urlsafe_base64
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def randomToken\n @tokens[rand(@tokens.size)]\n end",
"def rand_token\n\t\ttok = rand(36**8).to_s(36)\n\t\tif tok.length < 8\n\t\t\trand_token\n\t\telse\n\t\t\ttok\n\t\tend\n\tend",
"def generate_token\n o = [('a'..'z'), ('A'..'Z')].map(&:to_a).flatten\n token = (0...50).map { o[rand(o.le... | [
"0.8722138",
"0.8216037",
"0.80283344",
"0.7939367",
"0.7809116",
"0.7720538",
"0.77102304",
"0.753804",
"0.7512597",
"0.74613154",
"0.7444309",
"0.74366266",
"0.7356511",
"0.7296886",
"0.7287629",
"0.7274295",
"0.7255082",
"0.7239316",
"0.7234893",
"0.72232026",
"0.7190798",... | 0.67344654 | 55 |
Remembers a user in the database for use in persistent sessions. | def remember
self.remember_token = User.new_token
update remember_digest: User.digest(remember_token)
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset_user\n return unless exists?(:previous_user)\n set :user, fetch(:previous_user)\n unset :previous_user\n clear_sessions\n end",
"def change_password\n @user = User.find_by_id(session[:remember_token])\n end",
"def set_user\n zeus = User.find(@zeus_user.id)\n ... | [
"0.688533",
"0.6520498",
"0.62893504",
"0.6221808",
"0.62212783",
"0.61783326",
"0.61423343",
"0.6129836",
"0.6129836",
"0.612339",
"0.61226416",
"0.61226416",
"0.6120063",
"0.60949206",
"0.60882413",
"0.60877854",
"0.6086106",
"0.6080627",
"0.60787547",
"0.6068734",
"0.60475... | 0.5787979 | 70 |
Returns true if the given token matches the digest. | def authenticated? attribute, token
digest = send "#{attribute}_digest"
return false if digest.nil?
BCrypt::Password.new(digest).is_password? token
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def authenticated?(digest, token)\n digest = send(\"#{digest}_digest\")\n # Use metaprogramming to select the appropriate token attribute based on\n # the parameter digest.\n return false if digest.nil? # Digest does not exist in the database.\n BCrypt::Password.new(digest).is_password?(token)\n ... | [
"0.7843973",
"0.7617599",
"0.7406501",
"0.7289159",
"0.7142788",
"0.7141913",
"0.7141913",
"0.70264363",
"0.6983468",
"0.6963747",
"0.6905058",
"0.68590236",
"0.6842192",
"0.68239933",
"0.67968875",
"0.6788145",
"0.6774319",
"0.6764836",
"0.6750688",
"0.6746277",
"0.6742721",... | 0.0 | -1 |
Sets the password reset attributes. | def create_reset_digest
self.reset_token = User.new_token
update reset_digest: User.digest(reset_token),
reset_sent_at: Time.zone.now
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_reset_password_instructions(attributes={})\n send_instructions_for(:reset_password, attributes)\n end",
"def set_password_reset\n\t\tself.code = SecureRandom.urlsafe_base64\n\t\tself.expires_at = 4.hours.from_now\n\t\tself.save!\n\tend",
"def attempt_set_password(params)\n p = {}\n ... | [
"0.7047396",
"0.69583577",
"0.6818595",
"0.6749292",
"0.6737656",
"0.670031",
"0.6676551",
"0.66730624",
"0.66577595",
"0.66103",
"0.66103",
"0.66103",
"0.66103",
"0.66103",
"0.66103",
"0.66103",
"0.66103",
"0.66103",
"0.66103",
"0.66103",
"0.66103",
"0.66103",
"0.66103",... | 0.0 | -1 |
Sends password reset email. | def send_password_reset_email
UserMailer.password_reset(self).deliver_now
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_password_reset_email\n\t\tUserMailer.password_reset(id, self.reset_token).deliver_later\n \tend",
"def send_password_reset_email\r\n UserMailer.password_reset(self).deliver_now\r\n end",
"def send_password_reset_email\r\n UserMailer.password_reset(self).deliver_now\r\n end",
"de... | [
"0.8790497",
"0.86796135",
"0.86796135",
"0.8663904",
"0.86629623",
"0.86629623",
"0.86629623",
"0.86629623",
"0.86629623",
"0.86629623",
"0.86629623",
"0.86629623",
"0.86629623",
"0.86587805",
"0.8652506",
"0.8650495",
"0.8648778",
"0.8633209",
"0.8633209",
"0.8633209",
"0.8... | 0.0 | -1 |
Returns true if a password reset has expired. | def password_reset_expired?
reset_sent_at < Settings.timeout_reset_password.hours.ago
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def password_reset_expired?\n reset_sent_at < 2.hours.ago\n end",
"def password_reset_expired?\n reset_sent_at < 2.hours.ago # password reset sent earlier than two hours ago.\n end",
"def password_reset_expired?\n reset_sent_at < 2.hours.ago # reads as password reset sent earlier than 2 hours ag... | [
"0.8744046",
"0.87419695",
"0.86920106",
"0.8689273",
"0.86616915",
"0.8659473",
"0.8658822",
"0.8658822",
"0.8658822",
"0.8658822",
"0.8658822",
"0.8658822",
"0.8658822",
"0.8658822",
"0.865339",
"0.86504006",
"0.862233",
"0.86214966",
"0.8609769",
"0.8609244",
"0.8609244",
... | 0.8868087 | 0 |
This should be used everywhere. Should make it easy to support a driverpermodel system | def neo4j_query(*args)
ActiveGraph::Base.query(*args)
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_and_model; end",
"def driver; end",
"def hardware_model\n self.model\n end",
"def model\n\t\t\t\t@@name ||= cpuinfo.find { |x| x.start_with?('model name') }.to_s.split(?:)[-1].to_s.strip\n\t\t\tend",
"def specie; end",
"def specie; end",
"def specie; end",
"def specie; end",
"def drive... | [
"0.6699621",
"0.665803",
"0.65186477",
"0.6042227",
"0.59375924",
"0.59375924",
"0.59375924",
"0.59375924",
"0.5862792",
"0.5862792",
"0.58120054",
"0.57973665",
"0.57973665",
"0.578948",
"0.5739407",
"0.5721308",
"0.5704522",
"0.57011586",
"0.57011586",
"0.57011586",
"0.5701... | 0.0 | -1 |
serialize the hash into json and save in a cookie add to the responses cookies | def store_session(res)
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cookie_hash\n\t\t\t{ 'Cookie' => @cookies.to_s }\n\t\tend",
"def store_session(res)\n res.cookies << WEBrick::Cookie.new('_rails_lite_app', @hash.to_json)\n end",
"def serialize_into_cookie(record); end",
"def encode_to_cookie h, cookie\n cookie[@name] = encode h\n end",
"def serialize_... | [
"0.7232483",
"0.7079436",
"0.70458454",
"0.69088364",
"0.67860115",
"0.6538753",
"0.6516383",
"0.64779174",
"0.6469998",
"0.643302",
"0.6402495",
"0.6361502",
"0.636",
"0.63407034",
"0.6339202",
"0.63323885",
"0.6327872",
"0.63275915",
"0.629749",
"0.62952596",
"0.6291643",
... | 0.0 | -1 |
Making the assumption that the result section is always last. | def eefps_result
population
.extractions_extraction_forms_projects_sections_type1
.extractions_extraction_forms_projects_section
.extraction_forms_projects_section
.extraction_forms_project
.extraction_forms_projects_sections.last
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merged_result; end",
"def end_section\n end",
"def subsection(output)\n \" #{output}\" unless output.nil? || output.empty?\n end",
"def next_result()\n #This is a stub, used for indexing\n end",
"def section_preparation_finished\n end",
"def generate_section_end(format,... | [
"0.58851147",
"0.56669855",
"0.5520592",
"0.5504253",
"0.5466911",
"0.5386169",
"0.53785294",
"0.53412926",
"0.53330576",
"0.5324164",
"0.5321301",
"0.52459675",
"0.52459675",
"0.52459675",
"0.52459675",
"0.5230512",
"0.5207932",
"0.5198953",
"0.5197537",
"0.51818913",
"0.517... | 0.0 | -1 |
Subject can be set in your I18n file at config/locales/en.yml with the following lookup: en.notification_mailer.notification.subject | def notification(mail_id)
@mail = mail_id
@greeting = "Hi"
mail to: @mail, subject: "Test Mail. Dont reply!!! "
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def translate(mapping, key)\n I18n.t(:\"notifications_subject\", :scope => [:eventifier, :notifications, key],\n :default => [:subject, key.to_s.humanize])\n end",
"def subject (recipient)\n subject_variables = alert_variables[:subject].dup\n subject_variables.merge!(recipient_deta... | [
"0.7165998",
"0.7108778",
"0.7046002",
"0.6853182",
"0.6829124",
"0.68049514",
"0.67673445",
"0.65410846",
"0.6457367",
"0.6402423",
"0.6402302",
"0.6364968",
"0.6293081",
"0.62508965",
"0.6249441",
"0.6230431",
"0.62278724",
"0.62247276",
"0.6173129",
"0.61386085",
"0.611699... | 0.0 | -1 |
=begin 1. Convert string to array 1.5 Create a hash 2. Iterate through array with each, adding to hash values Read values using arrays constructed for this purpose. 3. Return hash values =end | def is_lowercase(string)
lowercase = ('a'..'z').to_a
lowercase.include? string
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_hash_correct\n\t\n\t\tString test_array1 = '2|abb2|George>Amina(16):Henry>James(4):Henry>Cyrus(17):Henry>Kublai(4):George>Rana(1):SYSTEM>Wu(100)|1518892051.753197000|c72d'.split('|').map(&:chomp)\n\n\t\tx = test_array1[0].unpack('U*') + test_array1[1].unpack('U*') + test_array1[2].unpack('U*') + test_arra... | [
"0.69053036",
"0.6803856",
"0.6676117",
"0.65932703",
"0.6450564",
"0.6416501",
"0.63529825",
"0.6345526",
"0.62972957",
"0.6186482",
"0.61718845",
"0.6167016",
"0.61506754",
"0.6097304",
"0.6082936",
"0.6082936",
"0.606489",
"0.6064794",
"0.6063422",
"0.6060528",
"0.6059853"... | 0.0 | -1 |
End of Method: New Method: Create | def create
@ward = Ward.new(params[:ward])
@ward.created_by = 1
@ward.modified_by = 1
if @ward.save
flash[:notice] = 'Ward Created Sucessfully.'
redirect_to :action => 'list'
else
@zone = Zone.find(:all) #Show all zones
render :action => 'new'
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create\n \n end",
"def new\n \n end",
"def create; end",
"def create; end",
"def create; end",
"def create; end",
"def create\n \n end",
"def create\n # TODO: implement create\n end",
"def create\n \t\n end",
"def create!\n end",
"def create\r\n\r\n\r\... | [
"0.83486086",
"0.8118768",
"0.80671453",
"0.80671453",
"0.80671453",
"0.80671453",
"0.8030905",
"0.7999961",
"0.7963049",
"0.79399604",
"0.79264325",
"0.79050714",
"0.7831548",
"0.7759276",
"0.77450836",
"0.77361786",
"0.77361786",
"0.77361786",
"0.77361786",
"0.77361786",
"0... | 0.0 | -1 |
End Method: Create Method: List | def list
@wards = Ward.paginate :page => params[:page], :per_page => 10 #Paginatin of 10 Records
respond_to do |format|
format.html # list.html.erb
format.xml { render :xml => @wards } #Render to XML Files
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list; end",
"def list; end",
"def list; end",
"def list; end",
"def list; end",
"def list\n raise NotImplementedError\n end",
"def list\n\n end",
"def list\n super\n end",
"def list\n super\n end",
"def list\n super\n end",
"def list\n super\n en... | [
"0.7997768",
"0.7997768",
"0.7997768",
"0.7997768",
"0.7997768",
"0.77665836",
"0.7751127",
"0.7741375",
"0.7741375",
"0.7741375",
"0.7741375",
"0.7741375",
"0.7741375",
"0.7741375",
"0.7741375",
"0.7741375",
"0.7741375",
"0.7741375",
"0.7741375",
"0.7741375",
"0.7741375",
... | 0.0 | -1 |
End Method: List Method: Show | def show
@ward = Ward.find(params[:id])
respond_to do |format|
format.html # show.html.erb
format.xml { render :xml => @wards } #Render to XML File
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_list\n\t\t\t@my_list.show\n\t\tend",
"def showlist\n\t\t@people_list.each do |x|\n\t\t\tputs \"* #{capitalize(x.type)}, #{capitalize(x.name)}, #{capitalize(x.party)}\"\n\t\tend\n\tend",
"def show\n run List::Show\n render cell(List::Cell::Show, result['model']), layout: true\n end",
"def show... | [
"0.79420006",
"0.76827675",
"0.7577662",
"0.75613475",
"0.73676735",
"0.73570174",
"0.7269406",
"0.72679186",
"0.72679186",
"0.72679186",
"0.72679186",
"0.72679186",
"0.72679186",
"0.72679186",
"0.72679186",
"0.72679186",
"0.7097166",
"0.7084818",
"0.70719886",
"0.7044156",
"... | 0.0 | -1 |
End Method:Show Method: Edit | def edit
@ward = Ward.find(params[:id])
@zone = Zone.find(:all) #Show all Zones
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit\n \n end",
"def edit; end",
"def edit; end",
"def edit; end",
"def edit; end",
"def edit; end",
"def edit; end",
"def edit; end",
"def edit; end",
"def edit; end",
"def edit; end",
"def edit; end",
"def edit; end",
"def edit; end",
"def edit; end",
"def edit; end",
... | [
"0.86251795",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673",
"0.85924673"... | 0.0 | -1 |
End Method:Edit Method: Update | def update
@ward = Ward.find(params[:id])
@ward.modified_by = 1
if @ward.update_attributes(params[:ward])
flash[:notice] = 'Ward Updated Sucessfully.'
redirect_to :action => 'show', :id => @ward
else
@zone = Zone.find(:all) #Show all zones
render :action => 'edit'
end
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update\n\t\tend",
"def update\n\t\tend",
"def update\n\t\t\n\t\tend",
"def update\r\n end",
"def update\n\n\tend",
"def update\n\n\tend",
"def update\r\n end",
"def update\r\n end",
"def update\r\n end",
"def update\r\n end",
"def update\r\n\r\n end",
"def update\n \... | [
"0.82399505",
"0.82399505",
"0.8202848",
"0.8173279",
"0.81621945",
"0.81621945",
"0.8162152",
"0.8162152",
"0.8162152",
"0.8162152",
"0.8131664",
"0.8067487",
"0.8064091",
"0.80450284",
"0.8009532",
"0.8009532",
"0.8009532",
"0.8009532",
"0.8009532",
"0.80015284",
"0.8001528... | 0.0 | -1 |
End Method: Update Method: Delete | def delete
Ward.find(params[:id]).destroy
redirect_to :action => 'list'
end | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete\n \n end",
"def delete\n end",
"def delete\n end",
"def delete\n \n end",
"def delete\n # TODO: implement delete\n end",
"def delete\n end",
"def delete\n\n end",
"def delete\n end",
"def delete\n end",
"def delete\n end",
"def delete\n e... | [
"0.84369767",
"0.80103457",
"0.797027",
"0.79629153",
"0.7926476",
"0.7893274",
"0.781437",
"0.7790055",
"0.7790055",
"0.7790055",
"0.7790055",
"0.7790055",
"0.7790055",
"0.7790055",
"0.7713235",
"0.7616248",
"0.7449018",
"0.74057823",
"0.739453",
"0.7391029",
"0.7383267",
... | 0.0 | -1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.