content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import getpass
def get_user():
"""
Get the current user.
Returns:
str: The name of the current user.
"""
return getpass.getuser() | 0bf19aa9bed81a3a7b3372472586c253cb5c4dc6 | 691,175 |
def add_spaces(x):
"""Add four spaces to every line in x
This is needed to make html raw blocks in rst format correctly
"""
y = ''
if isinstance(x, str):
x = x.split('\n')
for q in x:
y += ' ' + q
return y | 8443f4d019022b1e4705abbeca5bbd54bc94699d | 691,179 |
def tsorted(a):
"""Sort a tuple"""
return tuple(sorted(a)) | 8b2f4946453759447258fb3c437a9babd2c3e92a | 691,180 |
import os
def isAccessible(path):
"""Check whether user has access to a given path.
Parameters
----------
path : str
a directory or file
"""
if os.path.exists(path):
# This can potentially return a false positive in Python 2 if the path
# exists but the user does not have access. As a workaround, we attempt
# to list the contents of the containing directory, which will throw an
# OSError if the user doesn't have access.
try:
if not os.path.isdir(path):
path = os.path.dirname(path)
os.listdir(path)
return True
except OSError:
return False
else:
return False | b11e1c595ff085497e0bf17d49c2af359fd18831 | 691,181 |
def ignores_leakcheck(func):
"""
Ignore the given object during leakchecks.
Can be applied to a method, in which case the method will run, but
will not be subject to leak checks.
If applied to a class, the entire class will be skipped during leakchecks. This
is intended to be used for classes that are very slow and cause problems such as
test timeouts; typically it will be used for classes that are subclasses of a base
class and specify variants of behaviour (such as pool sizes).
"""
func.ignore_leakcheck = True
return func | 7947a760db8f065fcf6a6d980db57cb1aa5fd2d2 | 691,183 |
def or_condition(field, op, seq):
""" Return an 'or' condition of the format:
((field, ('op seq[0]', 'op seq[1]'...), )
Example
-------
>>> or_condition('tag', 'has', ('Web', 'Case Study', 'Testing'))
>>> ('tag', ('has Web', 'has Case Study', 'has Testing'),
"""
return ((field, tuple(["{} {}".format(op, tag) for tag in seq])),) | dac40cec3ddac98d59676a6b713be59ea7ca1af7 | 691,184 |
def generate_projwfc_node(generate_calc_job_node, fixture_localhost, tmpdir):
"""Fixture to constructure a ``projwfc.x`` calcjob node for a specified test."""
def _generate_projwfc_node(test_name):
"""Generate a mock ``ProjwfcCalculation`` node for testing the parsing.
:param test_name: The name of the test folder that contains the output files.
"""
entry_point_calc_job = 'quantumespresso.projwfc'
retrieve_temporary_list = ['data-file-schema.xml']
attributes = {'retrieve_temporary_list': retrieve_temporary_list}
node = generate_calc_job_node(
entry_point_name=entry_point_calc_job,
computer=fixture_localhost,
test_name=test_name,
attributes=attributes,
retrieve_temporary=(tmpdir, retrieve_temporary_list)
)
return node
return _generate_projwfc_node | 9e167013a320e6b69d0121a820ea62efc9fc9f92 | 691,185 |
def to_fixed_point(val: float, precision: int) -> int:
"""
Converts the given value to fixed point representation with the provided precision.
"""
return int(val * (1 << precision)) | 182cd3b94679dab3c0247c2478029750f1fd6753 | 691,186 |
from typing import Any
from typing import Optional
def try_int(value: Any) -> Optional[int]:
"""Convert a value to an int, if possible, otherwise ``None``"""
return int(str(value)) if str(value).isnumeric() else None | ced219e25a66286df327dd41a85f328c5bc28ec4 | 691,187 |
from typing import OrderedDict
def get_fast_rxn_chattering_spe():
"""
return fast reaction and chattering species
better make them in the same order
"""
fast_transitions = [
{
# 1068 549 O2+npropyl=npropyloo
# reactants 9 O2 60 npropyl products 78 npropyloo
# 1069 -549 O2+npropyl=npropyloo
"rxn": [1068, 1069],
"spe": [60, 78]
},
# 1096 565 O2+ipropyl=ipropyloo
# reactants 9 O2 61 ipropyl products 80 ipropyloo
# 1097 -565 O2+ipropyl=ipropyloo
{
"rxn": [1096, 1097],
"spe": [61, 80]
},
# 132 69 CH3+O2(+M)=CH3OO(+M)
# reactants 25 CH3 9 O2 products 27 CH3OO
# 133 -69 CH3+O2(+M)=CH3OO(+M)
{
"rxn": [132, 133],
"spe": [25, 27]
},
# 1116 575 O2+QOOH_1=well_1
# reactants 9 O2 87 QOOH_1 products 90 well_1
# 1117 -575 O2+QOOH_1=well_1
{
"rxn": [1116, 1117],
"spe": [87, 90]
},
# 348 180 C2H5+O2=CH3CH2OO
# reactants 39 C2H5 9 O2 products 50 CH3CH2OO
# 349 -180 C2H5+O2=CH3CH2OO
{
"rxn": [348, 349],
"spe": [39, 50]
},
# 1080 556 npropyloo=QOOH_1 557 npropyloo=QOOH_1
# reactants 78 npropyloo products 87 QOOH_1
# 1081 -556 npropyloo=QOOH_1 -557 npropyloo=QOOH_1
{
"rxn": [1080, 1081],
"spe": [78, 87]
},
# 586 300 O2C2H4OH=CH2CH2OH+O2
# reactants 85 O2C2H4OH products 54 CH2CH2OH 9 O2
# 587 -300 O2C2H4OH=CH2CH2OH+O2
{
"rxn": [586, 587],
"spe": [85, 54]
},
# 1042 536 allyloxy=vinoxylmethyl
# reactants 72 allyloxy products 108 vinoxylmethyl
# 1043 -536 allyloxy=vinoxylmethyl
{
"rxn": [1042, 1043],
"spe": [72, 108]
},
# 434 224 acetyl+O2=acetylperoxy
# reactants 9 O2 45 acetyl products 47 acetylperoxy
# 435 -224 acetyl+O2=acetylperoxy
{
"rxn": [434, 435],
"spe": [45, 47]
}
]
fast_reaction_list = []
chattering_species_list = []
for _, r_s in enumerate(fast_transitions):
print(r_s)
fast_reaction_list.append(
tuple([str(r_s['rxn'][0]), int(r_s['rxn'][1])]))
chattering_species_list.append(
tuple([str(r_s['spe'][0]), int(r_s['spe'][1])]))
fast_reaction = OrderedDict(fast_reaction_list)
chattering_species = OrderedDict(chattering_species_list)
# print(fast_reaction, chattering_species)
return fast_reaction, chattering_species | 9d54c9c26f63212cdf6d5a1512a6ca50d3ebe3ba | 691,188 |
def pre_process_tags(paragraph_element):
"""
Convert initial italics-tagged text to markdown bold
and convert the rest of a paragraph's I tags to markdown italics.
"""
first_tag = paragraph_element.find("I")
if first_tag:
bold_content = first_tag.text
first_tag.replaceWith("**{}**".format(bold_content))
for element in paragraph_element.find_all("I"):
i_content = element.text
element.replaceWith("*{}*".format(i_content))
return paragraph_element | 2db70456626a50111c664959a5c6591770567bd2 | 691,189 |
def limit_versions(language, limit, operator):
"""
Limits given languages with the given operator:
:param language:
A `Language` instance.
:param limit:
A number to limit the versions.
:param operator:
The operator to use for the limiting.
:return:
A new `Language` instance with limited versions.
:raises ValueError:
If no version is left anymore.
"""
if isinstance(limit, int):
versions = [version for version in language.versions
if operator(int(version), limit)]
else:
versions = [version for version in language.versions
if operator(version, limit)]
if not versions:
raise ValueError('No versions left')
return type(language)(*versions) | 77a7780ca5b6b9e706ebb39c1b39239fae8269be | 691,190 |
def get_moon_noise_temp():
"""
Returns the noise temp (in Kelvin) for the moon.
The moon noise temp is fairly constant across spectrum, with ~140 K during new moon phase and ~280 K during at full
moon. Using the arithmatic mean here as an approximate value.
Ported from MATLAB Code
Ref: Rec. ITU-R P.372-8
Nicholas O'Donoughue
15 March 2021
:return: Moon noise temp [K]
"""
return (140 + 280)/2 | 798f542ff2bb1398a364e0f23fc12819b1f386b7 | 691,191 |
import os
def get_all_templates(directory):
"""Get all template files in directory"""
templates = []
for root, _, files in os.walk(directory):
for file in files:
if file.endswith((".json", ".yml", ".yaml")):
templates.append(os.path.join(root, file))
return templates | c08e718cb226fe1465ce567f4a79e429a742b151 | 691,192 |
def countInterNodeCommunication(G, p, g_nodes, g_ppn):
"""
Simple method that collects the number of internode communication, both sum and bottleneck
for a permutation p, given n_nodes number of nodes with ppn processes per node. The graph is
assumed to be directed
:param G: simpleGraph needed for neighbor extraction
:param p: permutation parameter as list
:return: a tuple with bottleneck b and total s number of inter-node connections b,s
"""
#n_nodes, ppn = getNumberOfNodesUsed(p)
#bottleneck, sum
sum = 0
perNode = [0 for _ in range(g_nodes)]
for index, rank in enumerate(p):
# Takes care of the possibility that we have more hardware ressources than processes
if rank < 0:
continue
rankNodeID = index // g_ppn
for neighbor in G.getUnweightedNeighbors(rank):
neighborIndex = p.index(neighbor)
neighborNodeID = neighborIndex // g_ppn
if neighborNodeID != rankNodeID:
sum += 1
perNode[rankNodeID] += 1
index += 1
return (max(perNode), sum) | 56ae2dd0864791a207ba611e68444e4915799452 | 691,193 |
def cv_paste_image(im, mask, start_point=(0, 0)):
"""
:param im:
:param start_point:
:param mask:
:return:
"""
xim, ymin = start_point
shape = mask.shape # h, w, d
im[ymin:(ymin + shape[0]), xim:(xim + shape[1])] = mask
return im | b724b8e45b708fc6ad9f278d8c5dac32a085f32f | 691,194 |
import random
import string
def pwd(n=10):
"""
A random password of letters + digits
"""
return ''.join(random.sample(string.ascii_lowercase+string.digits, n)) | 5953f33fcadea96d707017fb23b1e71ed4159327 | 691,195 |
import threading
def threaded_fn(func):
"""
A decorator for any function that needs to be run on a separate thread
"""
def wrapper(*args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper | de72cc698cbbb2404e853930fcf848f2b05bff20 | 691,196 |
def get_vector13():
"""
Return the vector with ID 13.
"""
return [
0.6177727,
0.5000000,
0.3822273,
] | 2d4f60a92eccd6e0e469ab170c36bdab0462575c | 691,197 |
import requests
import json
def extract_data_dict(pg_url):
"""Get data from a URL
Args:
pg_url (str): URL of page of TED talk
Returns:
dict: dictionary of data elements on the page
"""
response = requests.get(
pg_url,
headers = {'User-agent': 'Talk Scraper Bot'},
timeout = (5,5)
)
if response.status_code != 200:
return -1 # unsucccesful request
# source: https://github.com/The-Gupta/TED-Scraper/blob/master/Scraper.ipynb
# parse HTML
html_text = response.text
# indexing to get data
start_index = html_text.find('<script data-spec="q">q("talkPage.init",')
end_index = html_text[start_index:].find(')</script>')
script_tag = html_text[start_index: start_index + end_index]
json_obj = script_tag[len('<script data-spec="q">q("talkPage.init",'):]
# convert to dictionary
try:
data_dic = json.loads(json_obj)
data_dic = data_dic['__INITIAL_DATA__']
except:
data_dic = {}
return data_dic | 2b4b3d073411a147d4188eb87ac55a304650386e | 691,198 |
import re
def replace_links(text, link_patterns):
"""
A little function that will replace string patterns in text with
supplied hyperlinks. 'text' is just a string, most often a field
in a django or flask model. link_pattern is a list of two element
dictionaries. Each dicationary must have keys 'pattern' and
'url'. 'pattern' is the regular expression to apply to the text
while url is the text to be used as its replacement. Regular
expression call backs are supported. See the python documentation
for re.sub for more details.
Note: The function does not make any attempt to validate the link or
the regex pattern.
"""
for pattern in link_patterns:
regex = re.compile(pattern.get("pattern"), re.IGNORECASE)
if "project:" in pattern.get("pattern"):
# mark down replace _ with ems - replace them first:
text = re.sub(r"</?em>", "_", text)
prj_codes = regex.findall(text)
for x in prj_codes:
link = pattern["url"]
href = link.format(x.lower(), x.upper())
text = text.replace(x, href)
else:
text = re.sub(regex, pattern["url"], text)
return text | 087bfc736d2c859fdd6828255993de52af935e45 | 691,199 |
def input_action(i, _):
"""Returns the input that matched the transition"""
return i | fff447c9d049f335e485f35ab4925879b72ee585 | 691,200 |
def byteToHex( byteStr ):
"""
Convert a byte string to it's hex string representation e.g. for output.
"""
# Uses list comprehension which is a fractionally faster implementation than
# the alternative, more readable, implementation below
#
# hex = []
# for aChar in byteStr:
# hex.append( "%02X " % ord( aChar ) )
#
# return ''.join( hex ).strip()
return ''.join( [ "%02X " % ord( x ) for x in byteStr ] ).strip() | b599eb3e24495c8f2dc76c4b83c1d71c0919c599 | 691,201 |
def clean_kos(mod_sets):
"""
Creates dict mapping modules the most common version (set of encoding genes) therefore
Arguments:
mod_sets (defaultdict) -- raw data from KEGG defining which KOs are in each module
Returns:
mod_to_ko_clean (dict )-- the functions of many modules can be "completed" by different sets of genes. Here we choose to represent each module by the most common set of genes. Dict maps each module (e.g.: 'K00001') to a list of genes (e.g.: ['K00845', ..., 'K00873'])
"""
mod_to_ko_clean = {}
for mod in mod_sets:
max_count = 0
max_path = ""
for ko_str in mod_sets[mod]:
if mod_sets[mod][ko_str] > max_count: # if there is a tie, the first one is kept
max_count = mod_sets[mod][ko_str]
max_path = ko_str.split("_")
mod_to_ko_clean[mod] = max_path
return mod_to_ko_clean | e36404c12f809076fa9a5918598c1fb7170db7b7 | 691,202 |
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
i = p.rfind('/') + 1
head, tail = p[:i], p[i:]
if head and head != '/'*len(head):
head = head.rstrip('/')
return head, tail | ae48332005fe4fe490702bd0f514190dbf856c93 | 691,203 |
import io
import json
def get_config(filename):
"""
get config items in file
:param file: json config file
:return: dict contains config item
"""
# load config items dict
with io.open(filename, encoding="utf-8-sig") as f:
return json.loads(f.read()) | fcb9ff167b2c0c01fecc0bcc94057bd1614fbcf7 | 691,204 |
def getJpgEof(dataPath, currentIdx):
""" getJpgEof(dataPath, currentIdx)
Function to find the end of file indice for the datatype jpg.
Input:
dataPath: Path of deleted_data
currentIdx: Index of first header marker
Output:
currentIdx: Indice of the end of file
"""
# skip to the start of the header
# and read in markers
with dataPath.open('rb') as file:
file.seek(currentIdx)
marker1 = file.read(1)
marker2 = file.read(1)
currentIdx += 2
# check for end of file markers
if (marker1 == b'\xff' and marker2 == b'\xd9'):
return currentIdx
# preclude SOS markers
elif (marker1 == b'\xff' and marker2 != b'\xda'):
# read in block length
segmentLen = int.from_bytes(file.read(2), "big")
currentIdx += segmentLen
# recall getJpgEof function
currentIdx = getJpgEof(dataPath, currentIdx)
return currentIdx
# check for SOS markers
elif (marker1 == b'\xff' and marker2 == b'\xda'):
marker1 = 0
marker2 = 0
# skip forward to the next block header
while marker1 != b'\xff' or marker2 == (b'\x00' or b'\x01' or b'\xd0' or b'\xd1' or
b'\xd2' or b'\xd3' or b'\xd4' or b'\xd5' or
b'\xd6' or b'\xd7' or b'\xd8'):
marker1 = file.read(1)
marker2 = file.read(1)
currentIdx += 1
file.seek(currentIdx)
# read in block header markers
marker1 = file.read(1)
marker2 = file.read(1)
# read in block length
segmentLen = int.from_bytes(file.read(2), "big")
currentIdx += segmentLen
# recall getJpgEof function
currentIdx = getJpgEof(dataPath, currentIdx)
# skip forward to end of file markers
while marker1 != b'\xff' or marker2 != b'\xd9':
marker1 = file.read(1)
marker2 = file.read(1)
currentIdx += 1
file.seek(currentIdx)
# return end of file index
return currentIdx+1 | cc0d041abd25cac008be7d984587805753ea0ff2 | 691,205 |
import torch
def refinement_regularized_margin_ranking_loss(score_positive, score_negative, score_queries):
"""
implementation of the a regularized version of the multilable margine loss, which is used in the
repbert paper.
max(0, 1-[REL(Q,D_J^+)-(REL(Q,D_J^-)+ REL(Q, Q'))])
Args:
input1:
Returns:
"""
diff = score_negative - score_positive - score_queries# input2 - input1 to cover the minus sign of target in the original formula
input_loss = diff + torch.ones(score_positive.size()).to('cuda')
max_0 = torch.nn.functional.relu(input_loss)
return torch.mean(max_0) | 77bf0e7ab49cce19eb4171a27c8250947e7a4207 | 691,206 |
def normalize(x, dtype='float32'):
"""
Normalizes the data to mean 0 and standard deviation 1
"""
# x/=255.0 raises a TypeError
# x = x/255.0
# Converting to float32 and normalizing (float32 saves memory)
x = x.astype(dtype) / 255
return x | 03e55318874dc82a1f6506809847d9ad0e577005 | 691,208 |
def datetime2dic(datetimeobj, dic):
"""
Add datetime object to dictionary
"""
dic["date"] = datetimeobj.ctime()
return dic | 15cee0192ee2ff5ef363d1454b2104b18a2859d0 | 691,209 |
def wait_for_sum_of_cubes(x):
"""
What comes in: A number x.
What goes out: Returns the smallest positive integer n
such that the sum
1 cubed + 2 cubed + 3 cubed + ... + n cubed
is greater than or equal to x.
Side effects: None.
Examples:
-- If x is 4.3, this function returns 2 because:
1 cubed = 1 which is less than 4.3
but
1 cubed + 2 cubed = 9 which is greater than or equal to 4.3
-- For similar reasons, if x is any number in the range (1, 9]
(that is, numbers greater than 1 but less than or equal to 9),
this function returns 2.
-- If x is 58, this function returns 4 because:
1 cubed + 2 cubed + 3 cubed = 36 which is less than 58
but
1 cubed + 2 cubed + 3 cubed + 4 cubed = 100
which is greater than or equal to 58
-- For similar reasons, if x is any number in the range (36, 100],
this function returns 4.
-- If x is 1000, this function returns 8 because:
1 + 8 + 27 + 64 + ... + (7**3) = 784
but
1 + 8 + 27 + 64 + ... + (8**3) = 1296
-- For similar reasons, f x is 1296, this function returns 8.
-- if x is -5.2 (or any number less than or equal to 1),
this function returns 1.
Type hints:
:type x: float [or an int]
"""
# -------------------------------------------------------------------------
# Done: 7. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# IMPLEMENTATION REQUIREMENT:
# -- Solve this using the wait-until-event pattern.
#
# Note for the mathematically inclined: One could figure out
# (or look up) a formula that would allow a faster computation.
# But no fair using any such approach in this implementation.
# -------------------------------------------------------------------------
total = 1
k = 2
while total < x:
total = total + k**3
k = k + 1
return k - 1 | 2997b12dec41afaed5768d37d2674f23640fb61f | 691,210 |
def get_electrodes_metadata(neo_reader, electrodes_ids: list, block: int = 0) -> list:
"""
Get electrodes metadata from Neo reader.
The typical information we look for is the information accepted by pynwb.icephys.IntracellularElectrode:
- name – the name of this electrode
- device – the device that was used to record from this electrode
- description – Recording description, description of electrode (e.g., whole-cell, sharp, etc)
- comment: Free-form text (can be from Methods)
- slice – Information about slice used for recording.
- seal – Information about seal used for recording.
- location – Area, layer, comments on estimation, stereotaxis coordinates (if in vivo, etc).
- resistance – Electrode resistance COMMENT: unit: Ohm.
- filtering – Electrode specific filtering.
- initial_access_resistance – Initial access resistance.
Parameters
----------
neo_reader ([type]): Neo reader
electrodes_ids (list): List of electrodes ids.
block (int, optional): Block id. Defaults to 0.
Returns
-------
list: List of dictionaries containing electrodes metadata.
"""
return [] | ea07a41d681db352a3f360af99b5864c42220a8b | 691,211 |
def filter_row(row_index, row):
"""
Ignore empty rows when parsing xlsx files.
"""
result = [element for element in row if element != '']
return len(result) == 0 | 9f08c2d73805651050002bcfceef0074b68728ed | 691,212 |
def isvalid(ctx, a):
""" Test if an identifier is known.
Returns a string '1' if valid, '0' otherwise.
"""
plotid = a.plotid()
return "%d" % (1 if ctx.isvalid(plotid) else 0) | c2b433bd4f0a4e4d0ce9443d55e659b1dc96321a | 691,214 |
import sys
import traceback
def call_stack():
"""
@return traceback
"""
exc_traceback = sys.exc_info()[-1]
return traceback.extract_tb(exc_traceback), "".join(traceback.format_tb(exc_traceback)) | 935ce43d3e133bfa3c9d636ece5367c5bb7abf35 | 691,215 |
def process_features(features):
""" Use to implement custom feature engineering logic, e.g. polynomial expansion, etc.
Default behaviour is to return the original feature tensors dictionary as-is.
Args:
features: {string:tensors} - dictionary of feature tensors
Returns:
{string:tensors}: extended feature tensors dictionary
"""
# examples - given:
# 'x' and 'y' are two numeric features:
# 'alpha' and 'beta' are two categorical features
# create new features using custom logic
# features['x_2'] = tf.pow(features['x'],2)
# features['y_2'] = tf.pow(features['y'], 2)
# features['xy'] = features['x'] * features['y']
# features['sin_x'] = tf.sin(features['x'])
# features['cos_y'] = tf.cos(features['x'])
# features['log_xy'] = tf.log(features['xy'])
# features['sqrt_xy'] = tf.sqrt(features['xy'])
# create boolean flags
# features['x_grt_y'] = tf.cast(features['x'] > features['y'], tf.int32)
# features['alpha_eq_beta'] = features['alpha'] == features['beta']
# add created features to metadata (if not already defined in metadata.py)
# CONSTRUCTED_NUMERIC_FEATURE_NAMES += ['x_2', 'y_2', 'xy', ....]
# CONSTRUCTED_CATEGORICAL_FEATURE_NAMES_WITH_IDENTITY['x_grt_y'] = 2
# CONSTRUCTED_CATEGORICAL_FEATURE_NAMES_WITH_IDENTITY['alpha_eq_beta'] = 2
return features | a8aa85dfde9918f2c570651726cd210fc098d133 | 691,216 |
def flatten_json_with_key(data, json_key):
"""
Return a list of tokens from a list of json obj.
"""
to_return = []
for obj in data:
to_return.append(obj[json_key])
return to_return | ab8e89a701bbf6425a0544228553da6bf311f8cd | 691,217 |
def listbox_identify(listbox, y):
"""Returns the index of the listbox item at the supplied (relative) y
coordinate"""
item = listbox.nearest(y)
if item != -1 and listbox.bbox(item)[1] + listbox.bbox(item)[3] > y:
return item
return None | a378f792b9274a03895e4a646a9418e594e5537f | 691,218 |
def sort_amounts(proteins, sort_index):
"""Generic function for sorting peptides and psms. Assumes a higher
number is better for what is passed at sort_index position in protein."""
amounts = {}
for protein in proteins:
amount_x_for_protein = protein[sort_index]
try:
amounts[amount_x_for_protein].append(protein)
except KeyError:
amounts[amount_x_for_protein] = [protein]
return [v for k, v in sorted(amounts.items(), reverse=True)] | 1143e11a08581874cb704dd9a216fcecd791fcc0 | 691,219 |
import difflib
def create_delta(base_buf, target_buf):
"""Use python difflib to work out how to transform base_buf to target_buf.
:param base_buf: Base buffer
:param target_buf: Target buffer
"""
assert isinstance(base_buf, str)
assert isinstance(target_buf, str)
out_buf = ""
# write delta header
def encode_size(size):
ret = ""
c = size & 0x7f
size >>= 7
while size:
ret += chr(c | 0x80)
c = size & 0x7f
size >>= 7
ret += chr(c)
return ret
out_buf += encode_size(len(base_buf))
out_buf += encode_size(len(target_buf))
# write out delta opcodes
seq = difflib.SequenceMatcher(a=base_buf, b=target_buf)
for opcode, i1, i2, j1, j2 in seq.get_opcodes():
# Git patch opcodes don't care about deletes!
#if opcode == "replace" or opcode == "delete":
# pass
if opcode == "equal":
# If they are equal, unpacker will use data from base_buf
# Write out an opcode that says what range to use
scratch = ""
op = 0x80
o = i1
for i in range(4):
if o & 0xff << i*8:
scratch += chr((o >> i*8) & 0xff)
op |= 1 << i
s = i2 - i1
for i in range(2):
if s & 0xff << i*8:
scratch += chr((s >> i*8) & 0xff)
op |= 1 << (4+i)
out_buf += chr(op)
out_buf += scratch
if opcode == "replace" or opcode == "insert":
# If we are replacing a range or adding one, then we just
# output it to the stream (prefixed by its size)
s = j2 - j1
o = j1
while s > 127:
out_buf += chr(127)
out_buf += target_buf[o:o+127]
s -= 127
o += 127
out_buf += chr(s)
out_buf += target_buf[o:o+s]
return out_buf | c8685aa7116b0509d10e6b5065d5495a79a3042c | 691,220 |
def get_container_ids(dcos_api_session, node: str):
"""Return container IDs reported by the metrics API on node.
Retries on error, non-200 status, or empty response for up to 150 seconds.
"""
response = dcos_api_session.metrics.get('/containers', node=node)
assert response.status_code == 200
container_ids = response.json()
assert len(container_ids) > 0, 'must have at least 1 container'
return container_ids | 41e64cdf27ab69b17c0804551ccca5b3fafaea1a | 691,221 |
from datetime import datetime
import os
def creation_date(obj):
"""Extract date when the file was
created.
@return: 'date', date(YYYY-mm-dd HH:MM:SS)"""
_, _, abspath, _ = obj
return 'cdate', datetime.fromtimestamp(os.path.getctime(abspath)) | d8ca0b318b7bb5809ba922e1b0de7cd66c158fc7 | 691,222 |
def _s_(n: int) -> str:
"""Return spaces."""
return " " * n | 028098a8971f690b55dd60ffb8e235a8b4244c63 | 691,223 |
from typing import Any
def is_pair(arg: Any) -> bool:
"""Check that arg is a pair."""
return callable(arg) | 75a11b5d257f2df7ea985bee79bd87f29c793375 | 691,224 |
def get_table_d_3():
"""表 D.3 石油給湯機効率の回帰係
Args:
Returns:
list: 石油給湯機効率の回帰係
"""
table_d_3 = [
(0.0005, 0.0024, 0.0005, 0.0000, 0.0000, 0.0000, 0.0062),
(0.0028, 0.0021, 0.0028, -0.0027, -0.0024, -0.0027, 0.0462),
(0.6818, 0.7560, 0.6818, 0.9026, 0.8885, 0.9026, 0.4001)
]
return table_d_3 | d6f6e21c84d4219b000963421d83643f3eb6f916 | 691,225 |
import math
def taylor_exp(x, accuracy=20):
"""
A function to get e^x.
It uses the taylor expansion of e^x.
## https://en.wikipedia.org/wiki/Exponential_function ##
Be aware that this loses accuracy fairly quickly.
Accuracy can be somewhat increased, but there is a limit
to how large integers can be turned to floats.
e^x = 1 + x + x^2/2! + x^3/3! + x^4/4!...
"""
result = 0
for i in range(0, accuracy, 1):
result += math.pow(x, i) / math.factorial(i)
return result | 68db3f3164fee700c76fafcd312eae0bd4012744 | 691,226 |
import os
import shutil
def prepare_dir(enexpath, notebook):
"""Prepares directory for output"""
# Prepare file name and path
orgfile = notebook + '.org'
orgpath = os.path.join(notebook, orgfile)
# Check if path exists
if os.path.exists(notebook):
# Delete Folder and its contents -- later change to Prompt for action
shutil.rmtree(notebook)
# Create directory
os.makedirs(notebook)
return orgpath | 1b5a0c3381d7d4e40d76d5dd9d6bafb86798458e | 691,228 |
def get_true_graph(env):
"""Given an environment, unwrap it until there is an attribute .true_graph."""
if hasattr(env, 'true_graph'):
g = env.true_graph
assert hasattr(g, 'As')
return g
elif hasattr(env, 'env'):
return get_true_graph(env.env)
else:
return None | efbd530f2aa460af1d649d286678ac2dce7954e2 | 691,229 |
def register(dmm, typecls):
"""Used as decorator to simplify datamodel registration.
Returns the object being decorated so that chaining is possible.
"""
def wraps(fn):
dmm.register(typecls, fn)
return fn
return wraps | c8ee7372ee4f651015f26bc67e93d529c6a580c0 | 691,230 |
def manual_tokenized_sentence_spelling(tokens, spelling_dictionary):
"""
:param tokens: (list of tokens) to potentially be corrected
:param spelling_dictionary: correction map
:return: a list of corrected tokens
"""
new_tokens = []
for token in tokens:
if token in spelling_dictionary:
res = spelling_dictionary[token]
if type(res) == list:
new_tokens.extend(res)
else:
new_tokens.append(res)
else:
new_tokens.append(token)
return new_tokens | ad121bb7224a999595d7326204dffd7e918425dc | 691,231 |
def build_source_url(username, repository):
"""
Create valid GitHub url for a user's repository.
:param str username: username of the repository owner
:param str repository: name of the target repository
"""
base_url = 'https://github.com/{username}/{repository}'
return base_url.format(username=username, repository=repository) | 511a29a9bf7625e3b580880d264df78f80bef8c6 | 691,232 |
def _type_name(obj: object) -> str:
"""Get type name."""
return type(obj).__qualname__ | 0ced2627eaf459c09daaa4e92d4686a58188b5a3 | 691,233 |
def maximo_cuadruple(a: float, b: float, c: float, d: float) -> float:
"""Re-escribir para que tome 4 parámetros, utilizar la función max.
Referencia: https://docs.python.org/3/library/functions.html#max"""
return max(a, b, c, d) | b955724d3a7d6fc803a37a6ea1a6b59cbd050402 | 691,234 |
import random
def _get_chance():
"""Generate a random number and return it.
Return:
int
"""
return random.random() | 8e90a65dc4e76acc75c66419324a08729b00c38e | 691,235 |
import random
def choose_first():
"""Randomly choose who goes first"""
return random.randint(1, 2) | 9f8332e93348644152a98dbc1ba05670ede56343 | 691,236 |
def make_location(context, loc = -1):
"""
"""
stream = context.stream
if loc < 0:
loc = stream.pos
s = stream.data[0 : loc]
lineno = s.count('\n') + 1
if lineno > 1:
s = s.split('\n')[-1]
colno = len(s) + 1
return stream.name, lineno, colno | 96db05803cf11c438a8cfea2a5252058d8c61722 | 691,238 |
def log_function(func):
"""
A decorator to log arguments of function
"""
fname = func.__code__.co_name
arg_names = func.__code__.co_varnames
def echo_func(*args, **kwargs):
"""
Echoes the function arguments
"""
print("[!]", fname + ": ", ", ".join("%s=%r" % entry for entry in zip(arg_names, args)))
return func(*args, **kwargs)
return echo_func | 5fb1657209f5fe105a315ecd08b909b157eef1db | 691,239 |
def clamp(minValue, maxValue, value):
"""Make sure value is between minValue and maxValue"""
if value < minValue:
return minValue
if value > maxValue:
return maxValue
return value | 17a7d58f441143ec77c965b1c8cd9eb5a70f1372 | 691,240 |
import random
def generate_unique_variants(students_count, exam_papers_count):
"""
Случайным образом генерирует и выводит на экран
номера билетов для каждого студента
"""
# check if exam papers number more than students amount
if exam_papers_count < students_count:
for val in range(students_count):
print(f'{val+1:2}й студент - вариант № {random.randrange(exam_papers_count)+1}')
return None
# define list of exam papers numbers
number_list = []
# fulfill list by numbers
for i in range(1, exam_papers_count+1):
number_list.append(i)
# randomly shuffle fulfilled list
random.shuffle(number_list)
# formatting print exam papers number in order of students number
for val in range(students_count):
print(f'{val+1:2}й студент - вариант № {number_list[val]}')
return None | 17082ac9fc20c36f15a0ec1ed78cbe8d3df4f3e0 | 691,241 |
import re
import argparse
def token_arg(token_value: str) -> str:
"""
Validates the discord token, won't run the bot if the token is not valid.
Matches to the regex: [\w\d]{24}\.[\w\d]{6}\.[\w\d-_]{27}.
Thanks to https://github.com/AnIdiotsGuide/Tutorial-Bot/blob/Episode-4/app.js for the regex I used.
Thanks to https://gist.github.com/mikecharles/9ed3082b10d77d658743 for an example of how this works with argparse
:param token_value: The string that may be a bot token
:return token_value: Returns the token_value if the token matches the regex
:exception ArgumentParseException: Raises the exception if the token_value does not match the regex
"""
# Regex check
if not re.match(r"[MN][A-Za-z\d]{23}\.[\w-]{6}\.[\w-]{27}", token_value):
raise argparse.ArgumentTypeError(
"must be a correct Discord bot token.".format(token_value)
)
# If that passes, the token's value will be returned.
return token_value | 07706edd8fd84e0da0f3b4f09eaba99396a8c304 | 691,242 |
def has_user_mods() -> bool:
"""has_user_mods() -> bool
(internal)
Returns whether the system varies from default configuration
(by user mods, etc)
"""
return bool() | 1fc5d5728f6556d84d593477deec08f12fcb71f2 | 691,243 |
def reconstruct_pod(coeffs, R):
"""
Reconstruct grid from POD coefficients and transormation matrix R.
Args:
coeffs (np.array): POD coefficients
R (np.array): Transformation matrix R
Returns:
np.array: Reconstructed grid
"""
return R @ coeffs | d13dd1aca6f7ffee6a805d50e94883d6ec58b0bc | 691,244 |
def cube_to_axial(c):
"""
Converts a cube coord to an axial coord.
:param c: A cube coord x, z, y.
:return: An axial coord q, r.
"""
x, z, _ = c
return x, z | 39c614c072b4e886e3ae3f6a2c8ecd40918cff8d | 691,245 |
from typing import Dict
from typing import Union
def _are_agent_locations_equal(
ap0: Dict[str, Union[float, int, bool]],
ap1: Dict[str, Union[float, int, bool]],
ignore_standing: bool,
tol=1e-2,
ignore_y: bool = True,
):
"""Determines if two agent locations are equal up to some tolerance."""
def rot_dist(r0: float, r1: float):
diff = abs(r0 - r1) % 360
return min(diff, 360 - diff)
return (
all(
abs(ap0[k] - ap1[k]) <= tol
for k in (["x", "z"] if ignore_y else ["x", "y", "z"])
)
and rot_dist(ap0["rotation"], ap1["rotation"]) <= tol
and rot_dist(ap0["horizon"], ap1["horizon"]) <= tol
and (ignore_standing or (ap0["standing"] == ap1["standing"]))
) | 0bcdf03a128a6df1f3be653c74355c00c0097692 | 691,246 |
from pathlib import Path
def get_data_base_path() -> Path:
"""
:return: Base Path to where the datasets are stored.
"""
return Path("data") | faf79be48a277729c2b6b10877a55441727a026c | 691,247 |
from typing import List
def calc_metric_z(centroid1: List, centroid2: List) -> float:
"""calculate IoM only for z-direction
Args:
centroid1 (List): First centroid of format (..., z1, z2)
centroid2 (List): Second centroid of format (..., z1, z2)
Returns:
float: IoM in z-direction using start and end z-frames z1, z2
"""
# look how many of centroid1 and centroid2 z-axis overlap
# using intersection/union, not intersection/minimum
min_z1, max_z1 = centroid1[-2:]
min_z2, max_z2 = centroid2[-2:]
if max_z1 < min_z2 or max_z2 < min_z1:
return 0
# +1 has to be added because of how we count with both ends including!
# if GT is visible in z-layers 5 - 8 (inclusive) and detection is in layer 8 - 9
# they have one overlap (8), but 8 - 8 = 0 which is wrong!
intersection = min(max_z1, max_z2) - max(min_z1, min_z2) + 1
min_val = min(max_z1-min_z1, max_z2-min_z2) + 1
if min_val == 0:
return 0
# gt has saved each spine with only one img -04.png
# should be no problem any more
return intersection/min_val | a74562db7e9e04fdaad840e48a5d2f5bef0e441d | 691,248 |
def two_sum_problem_sort(data, total, distinct=False):
""" Returns the pairs of number in input list which sum to the given total.
Complexity O(nlogn)
Args:
data: list, all the numbers available to compute the sums.
total: int, the sum to look for.
distinct: boolean, whether to accept distinct values when computing sums.
Returns:
list, of pairs of numbers from data which sum up to total.
"""
out = []
data.sort()
for i in data:
if i > total:
continue
other = total - i
if (other in data) and ((distinct == True and i != other) or (distinct == False)):
out.append((i, other))
return out | b254d3d08c0ee3893b14994f0832008ee1db1643 | 691,249 |
import time
def retry_function(function, action_name, err_msg,
max_retries=4, retry_delay=60):
"""Common utility to retry calling a function with exponential backoff.
:param function: The function to call.
:type function: function
:param action_name: The name of the action being performed.
:type action_name: str
:param err_msg: The error message to display if the function fails.
:type err_msg: str
:param max_retries: The maximum number of retries.
:type max_retries: int
:param retry_delay: The delay between retries.
:type retry_delay: int
:return: The result of the function call. May be None if function
does not return a result.
:rtype: object
"""
for i in range(max_retries):
try:
print("{0} attempt {1} of {2}".format(
action_name, i + 1, max_retries))
result = function()
break
except Exception as e:
print("{0} attempt failed with exception:".format(action_name))
print(e)
if i + 1 != max_retries:
print("Will retry after {0} seconds".format(retry_delay))
time.sleep(retry_delay)
retry_delay = retry_delay * 2
else:
raise RuntimeError(err_msg)
return result | b9c628f4940e204187771996e676975f90f782e7 | 691,250 |
def get_zarr_id(zstore):
"""given a GC zarr location, return the dataset_id"""
assert zstore[:10] == 'gs://cmip6'
return zstore[11:-1].split('/') | c48b1c30df06f5458166eecca2944667215707d6 | 691,251 |
def _load_time_stamped_file(fn):
"""Returns a pair (times,items)"""
with open(fn) as f:
times = []
items = []
for l in f.readlines():
l = l.strip()
if l.startswith('#'):
continue
v = l.split()
times.append(float(v[0]))
if len(v) == 2:
items.append(v[1])
else:
items.append(v[1:])
return times,items
raise IOError("Unable to load "+fn) | 001aeea94e3f1b5fb02d166f14be79507911910b | 691,252 |
import json
from io import StringIO
def generate_validation_error_report(e, json_object, lines_before=7, lines_after=7):
"""
Generate a detailed report of a schema validation error.
'e' is a jsonschema.ValidationError exception that errored on
'json_object'.
Steps to discover the location of the validation error:
1. Traverse the json object using the 'path' in the validation exception
and replace the offending value with a special marker.
2. Pretty-print the json object indendented json text.
3. Search for the special marker in the json text to find the actual
line number of the error.
4. Make a report by showing the error line with a context of
'lines_before' and 'lines_after' number of lines on each side.
"""
if json_object is None:
return "'json_object' cannot be None."
if not e.path:
if e.schema_path and e.validator_value:
return "Toplevel:\n\t{}".format(e.message)
else:
return str(e)
marker = "3fb539deef7c4e2991f265c0a982f5ea"
# Find the object that is erroring, and replace it with the marker.
ob_tmp = json_object
for entry in list(e.path)[:-1]:
ob_tmp = ob_tmp[entry]
orig, ob_tmp[e.path[-1]] = ob_tmp[e.path[-1]], marker
# Pretty print the object and search for the marker.
json_error = json.dumps(json_object, indent=4)
io = StringIO(json_error)
errline = None
for lineno, text in enumerate(io):
if marker in text:
errline = lineno
break
if errline is not None:
# Re-create report.
report = []
ob_tmp[e.path[-1]] = orig
json_error = json.dumps(json_object, indent=4)
io = StringIO(json_error)
for lineno, text in enumerate(io):
if lineno == errline:
line_text = "{:4}: >>>".format(lineno + 1)
else:
line_text = "{:4}: ".format(lineno + 1)
report.append(line_text + text.rstrip("\n"))
report = report[max(0, errline - lines_before):errline + 1 + lines_after]
s = "Error in line {}:\n".format(errline + 1)
s += "\n".join(report)
s += "\n\n" + str(e).replace("u'", "'")
else:
s = str(e)
return s | ccba131c6e524ee0a990bdd6dc440248d3684ac2 | 691,253 |
def set_first_line(img, pixels):
"""Set the first line of an image with the given pixel values."""
for index, pixel in enumerate(pixels):
img.set_pixel(index, 0, pixel)
return img | c87f5cf42591443c06b6b352c9f65cb898cfccf2 | 691,254 |
def indicator_atr(df, lookback=20):
"""Creates average true range
Args:
df (pd.DataFrame): OHLCV intraday dataframe, only needs high, low, close
lookback (:obj:int, optional): rolling window. Default is 20
Returns:
pd.DataFrame: columns with true_range and atr
Example:
"""
df = df[["high", "low", "close"]].copy()
# true range
df["highlow"] = abs(df["high"] - df["low"])
df["highclose"] = abs(df["high"] - df["close"].shift(1))
df["lowclose"] = abs(df["low"] - df["close"].shift(1))
df["true_range"] = df[["highlow", "highclose", "lowclose"]].max(axis=1)
# average
df["atr"] = df["true_range"].rolling(lookback).mean()
return df[["atr", "true_range"]] | abf24282580e4e063d1f03e1cdbce733bf8be573 | 691,255 |
def istype(obj, allowed_types):
"""isinstance() without subclasses"""
if isinstance(allowed_types, (tuple, list, set)):
return type(obj) in allowed_types
return type(obj) is allowed_types | 5c441a69030be82e4d11c54ea366ee3463d388c8 | 691,257 |
from math import sin, cos, radians
def __min_dist_inside__(point, rotation, box):
"""Gets the space in a given direction from "point" to the boundaries of
"box" (where box is an object with x0, y0, x1, & y1 attributes, point is a
tuple of x,y, and rotation is the angle in degrees)"""
x0, y0 = point
rotation = radians(rotation)
distances = []
threshold = 0.0001
if cos(rotation) > threshold:
# Intersects the right axis
distances.append((box.x1 - x0) / cos(rotation))
if cos(rotation) < -threshold:
# Intersects the left axis
distances.append((box.x0 - x0) / cos(rotation))
if sin(rotation) > threshold:
# Intersects the top axis
distances.append((box.y1 - y0) / sin(rotation))
if sin(rotation) < -threshold:
# Intersects the bottom axis
distances.append((box.y0 - y0) / sin(rotation))
return min(distances) | 568476163f38b967461eafa5bf9a7497d149bb00 | 691,258 |
def scale3D(v,scale):
"""Returns a scaled 3D vector"""
return (v[0] * scale, v[1] * scale, v[2] * scale) | 61ce41b706d1f9aadb9241c87ebd6c0757def159 | 691,259 |
def record_pct(A):
"""
Input:
list (list of tuples) - list of tuples of record percents
Returns:
record_pct (integer) - record percentage
"""
middle = list()
for i in range(len(A)):
possible = 1
for j in range(len(A[i])):
possible *= A[i][j]
middle.append(possible)
record_pct = 0
for i in range(len(middle)):
record_pct += middle[i]
return record_pct | 54f7d957cf3e27d3190d1fe5b049d0d1def485e4 | 691,260 |
import argparse
def parse_args():
""" Command-line interface """
parser = argparse.ArgumentParser(
description="Update previous FASTA alignment with new sequences by"
"pairwise alignment to a reference genome."
)
parser.add_argument('srcfile', type=argparse.FileType('r'),
help='input, FASTA file with new sequences')
parser.add_argument('destfile', type=argparse.FileType('r+'),
help='input/output, FASTA file to append to')
parser.add_argument('-ref', default=open('data/NC_045512.fa'),
type=argparse.FileType('r'),
help='Path to reference sequence.')
return parser.parse_args() | 5af4c08e20b8519386542787009ba0782cfcc00d | 691,261 |
def tokenize(chars):
"""Convert a string of characters into a list of tokens."""
# Idea from http://norvig.com/lispy.html
line = chars.replace(
'[', ' [ ').replace(']', ' ] ').replace(',', ' ').split()
for i, v in enumerate(line):
if v != '[' and v != ']':
line[i] = int(v)
return(line) | 742b3594ca94cc8ea163a5879074a686f3bc7769 | 691,262 |
from typing import Tuple
def clean_version(package) -> Tuple[str, str, str]:
""" Splits the module from requirments to the tuple: (pkg, cmp_op, ver) """
separators = ["==", ">=", "<=", "!="]
for s in separators:
if s in package:
return package.partition(s)
return (package, "", "") | 4606a5b976ca2bfe6eca9e54e40564f42e451b1e | 691,264 |
def members_to_roles(data, dict_roles):
"""
members_to_roles recupere un reponse JSON pour y extraire les members
et les ajouters au dictionnaire existant avec les roles (ID + name + members)
:param data: reponse JSON en dictionnaire avec les members
:param dict_roles: dictionnaire avec les roles (ID + name)
:return: dictionnaire avec les roles et members (ID + name + members)
"""
for user in data:
for role in user["roles"]:
if "members" in dict_roles[role]:
dict_roles[role]["members"].append(user["user"])
else:
dict_roles[role]["members"] = [user["user"]]
return dict_roles | 8b2400b50adf6be801c4e99d24891f4984941659 | 691,265 |
def get_gate_info(gate):
"""
gate: str, string gate. ie H(0), or "cx(1, 0)".
returns: tuple, (gate_name (str), gate_args (tuple)).
"""
gate = gate.strip().lower().replace("cnot", "cx")
i = gate.index("(")
gate_name, gate_args = gate[:i], eval(gate[i:])
try: len(gate_args)
except TypeError: gate_args = gate_args,
return gate_name, gate_args | 48f75c184c441d8884ae691db2af3a90684d574c | 691,266 |
import random
def PickFromPool(n, pool, a_as_set):
"""Returns n items from the pool which do not appear in a_as_set.
Args:
n: number of items to return.
pool: an sequence of elements to choose from.
a_as_set: a set of elements which should not appear in the result.
Returns:
List of n items from the pool which do not appear in a_as_set.
"""
assert isinstance(a_as_set, set)
# Remove the ones that are in A.
filtered_pool = list(filter(lambda x: x not in a_as_set, pool))
# Pick N random numbers out of the pool.
return random.sample(filtered_pool, k=n) | e856772b02232fefe84366d0399900e79fd07adb | 691,267 |
def query_for_message_ids(service, search_query):
"""searching for an e-mail (Supports the same query format as the Gmail search box.
For example, "from:someuser@example.com rfc822msgid:<somemsgid@example.com>
is:unread")
"""
result = service.messages().list(userId='me', q=search_query).execute()
results = result.get('messages')
if results:
msg_ids = [r['id'] for r in results]
else:
msg_ids = []
return msg_ids | a973708e211feb5f77a3b8dfcf65df43535a9f44 | 691,268 |
import logging
def wait_process_completion(remote_command_executor, pid):
"""Waits for a process with the given pid to terminate."""
logging.info("Waiting for performance test to complete")
command = f"""
ps --pid {pid} > /dev/null
[ "$?" -ne 0 ] && echo "COMPLETE" || echo "RUNNING"
"""
result = remote_command_executor.run_remote_command(command)
if result.stdout == "RUNNING":
raise Exception("The process is still running")
else:
return result.stdout.strip() | 6cb1036aec206ee6af92e7408b81ec288d2453b9 | 691,270 |
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..." | b3c2236b902bf20e0f2e4e7c97cfb57487ec1031 | 691,271 |
import argparse
def cmdLineParse():
"""
Command line parser.
"""
parser = argparse.ArgumentParser(description="""
Run geo2rdr.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required arguments
parser.add_argument('-p', '--product', type=str, required=True,
help='Input HDF5 product.')
# Optional arguments
parser.add_argument('-t', '--topopath', type=str, action='store', default='rdr2geo/topo.vrt',
help='Input topo file path.')
parser.add_argument('--azoff', type=float, action='store', default=0.0,
help='Gross azimuth offset.')
parser.add_argument('--rgoff', type=float, action='store', default=0.0,
help='Gross range offset.')
parser.add_argument('-f', '--freq', action='store', type=str, default='A', dest='freq',
help='Frequency code for product.')
parser.add_argument('-o', '--output', type=str, action='store', default='offsets',
help='Output directory.', dest='outdir')
parser.add_argument('-g', '--gpu', action='store_true', default=False,
help='Enable GPU processing.')
# Parse and return
return parser.parse_args() | bc31f032fe5b04b81c8f2731ebddf14d05fc9284 | 691,272 |
def euler(Tn, dTndt, tStep):
"""
Performs Euler integration to obtain T_{n+1}.
Arguments:
Tn: Array-like representing Temperature at time t_n.
dTndt: Array-like representing dT/dt at time t_n.
tStep: Float determining the time to step over.
Returns T_{n+1} as an array-like.
"""
return Tn + dTndt * tStep | 12a89ba6c777880ab20cfe04ecd13a6ab2ecc601 | 691,273 |
def test_job_static_id():
"""This is a known, previously-submitted job
"""
return '1784a694-0737-480e-95f2-44f55fb35fb7-007' | aa22496d5c84029b3d93ef36dee597719f499c8c | 691,275 |
import configparser
def get_oauth_config(filename):
"""
Attempt to pull in twitter user key and secret from the
config file. If the key and secret don't exist set them to none.
Arguments:
filename -- name of the config file to try and parse
Returns:
config_api_store -- contains the twitter API key and secret
"""
config_oauth_store = {}
config_twiter_oauth = configparser.ConfigParser()
config_twiter_oauth.read(filename)
try:
if config_twiter_oauth['DEFAULT']['OAUTH_TOKEN'] == "None":
config_oauth_store["OAUTH_TOKEN"] = None
else:
config_oauth_store["OAUTH_TOKEN"] = config_twiter_oauth['DEFAULT']['OAUTH_TOKEN']
if config_twiter_oauth['DEFAULT']['OAUTH_TOKEN_SECRET'] == "None":
config_oauth_store["OAUTH_TOKEN_SECRET"] = None
else:
config_oauth_store["OAUTH_TOKEN_SECRET"] = config_twiter_oauth['DEFAULT']['OAUTH_TOKEN_SECRET']
except KeyError:
config_oauth_store["OAUTH_TOKEN"] = None
config_oauth_store["OAUTH_TOKEN_SECRET"] = None
oauth_config_file = configparser.ConfigParser()
oauth_config_file['DEFAULT'] = {'OAUTH_TOKEN': 'None', 'OAUTH_TOKEN_SECRET': 'None'}
with open(filename, 'w') as configfile:
oauth_config_file.write(configfile)
return config_oauth_store | a37e45160212daca4bce7792e737715b02d2c439 | 691,277 |
def get_opts(options):
"""
Args:
options: options object.
Returns:
args (tuple): positional options.
kwargs (map): keyword arguments.
"""
if isinstance(options, tuple):
if len(options) == 2 and isinstance(options[-1], dict):
args, kwargs = options
else:
args = options
kwargs = {}
elif isinstance(options, dict):
args, kwargs = (), options
else:
raise ValueError("Options object expected to be either pair of (args, kwargs) or only args/kwargs")
return args, kwargs | 3bc8f06e66b9c3d8b8b7cbc3e67087bcfe9fd7da | 691,278 |
from pathlib import Path
def count_files(path, glob=None) -> int:
"""Return the number of files in a given directory."""
path = Path(path)
files = path.glob(glob) if glob else path.iterdir()
return sum(1 for file in files if file.is_file()) | 39d10368c8e6a073a0dca290b74520a177244a2b | 691,279 |
def arkToInfo(ark):
"""
Turn an ark id into an info: uri
"""
parts = ark.split("ark:", 1)
if len(parts) != 2:
return ark
return "info:ark%s" % parts[1] | 38295232ea4bc649d28e6bd35e9ff4eee1dc4045 | 691,280 |
import os
def get_filename_without_extension(path):
"""
Extracts the filename without the extension from the given path.
Parameters
----------
path : str
The path to get the filename form.
Returns
-------
The filename without the extension.
"""
if path is None:
return ''
path = path.strip()
if path == '':
return ''
return os.path.splitext(os.path.basename(path))[0] | 3ea1c9708d2245911f30153d18457c8e078f123d | 691,281 |
def closure(func):
"""A função (func) está no escopo da closure."""
def inner(*args):
"""Os argumentos estão no escopo de inner."""
return print(func(*args))
return inner | 0d2f858dbc1b805094df5c16b9d62b3d773cfbe7 | 691,282 |
import torch
def create_rectified_fundamental_matrix(batch_size):
"""Creates a batch of rectified fundamental matrices of shape Bx3x3"""
F_rect = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]]).view(1, 3, 3)
F_repeat = F_rect.repeat(batch_size, 1, 1)
return F_repeat | f641d914a6d0cf91e7e07ffc619b70dc1e4381a0 | 691,284 |
def score_mode_discrete(x, candidates):
"""Scores a candidate based on likelihood of being the mode."""
# Count the number of entries in x that match each candidate
return (x[None] == candidates[:, None]).sum(axis=1) | ce7769a1cf94571d51036917e3a32602773bd508 | 691,286 |
def get_midpoint(origin, destination):
""" Return the midpoint between two points on cartesian plane.
:param origin: (x, y)
:param destination: (x, y)
:return: (x, y)
"""
x_dist = destination[0] + origin[0]
y_dist = destination[1] + origin[1]
return x_dist / 2.0, y_dist / 2.0 | d1a0f2a476efe66688d2a686febc0a3d0a94caaa | 691,287 |
def rotate(b,dir="left"):
"""Rotate a bit sequence.
Rotate left
>>> b1 = [0,1,0]
>>> assert rotate(b1, "left") == [1,0,0]
>>> assert b1 == [0,1,0]
>>> assert rotate([0,0,1], "left") == [0,1,0]
>>> assert rotate([1,0,0], "left") == [0,0,1]
Rotate right
>>> assert rotate(b1, "right") == [0,0,1]
>>> assert b1 == [0,1,0]
>>> assert rotate([0,0,1], "right") == [1,0,0]
>>> assert rotate([1,0,0], "right") == [0,1,0]
"""
b_out = list(b)
if dir in ("left", "<"):
b_out.append(b_out.pop(0))
elif dir in ("right", ">"):
b_out.insert(0, b_out.pop(-1))
return b_out | c648a38f9745441e48f486d596b38b4cf80d7692 | 691,288 |
import string
import random
def id_generator(size=5, chars=string.ascii_lowercase + string.digits):
"""
Generate a random sufix.
This method will generate a
random sufix for the created resources.
"""
return ''.join(random.choice(chars) for _ in range(size)) | cd2992b8ea3ec54b56226155bb151295b218c317 | 691,289 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.