content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def dictmerge(D, others):
"""
Merge a dictionary with other dictionaries.
**Parameters**\n
D: dict
Main dictionary.
others: list/tuple/dict
Other dictionary or composite dictionarized elements.
**Return**\n
D: dict
Merged dictionary.
"""
if type(others) in (list, tuple): # Merge D with a list or tuple of dictionaries
for oth in others:
D = {**D, **oth}
elif type(others) == dict: # Merge D with a single dictionary
D = {**D, **others}
return D | a082416be21c998021decec68edbc336fc8382cd | 695,079 |
def real_space_shape_2d_tag_from_real_space_shape_2d(real_space_shape_2d):
"""Generate a sub-grid tag, to customize phase names based on the sub-grid size used.
This changes the phase name 'phase_name' as follows:
real_space_shape_2d = None -> phase_name
real_space_shape_2d = 1 -> phase_name_real_space_shape_2d_2
real_space_shape_2d = 4 -> phase_name_real_space_shape_2d_4
"""
if real_space_shape_2d is None:
return ""
y = str(real_space_shape_2d[0])
x = str(real_space_shape_2d[1])
return "__rs_shape_" + y + "x" + x | 098f1b031c8bb484fcfc9249be7b15b96b492abf | 695,080 |
import argparse
def argparse_is_valid_pe(pe):
""" Validate the parallel argument specifier passed in.
Returns the specifier if it is valid, otherwise it raises an exception.
"""
if (pe == 'y') or (pe == 'n'):
return pe
else:
raise argparse.ArgumentTypeError("Argument 'pe' is not valid. Choose " +
"either 'y' or 'n'. Exitting...") | 8075736e1c048253eec5ba2422c6e0bac2dba584 | 695,082 |
import csv
def _get_matrix(file_name):
""" Trims header row and id column (1st of each) """
# return reader[1:][1:]
header = None
with open(file_name, "r") as file:
matrix = []
reader = csv.reader(file)
for string_row in reader:
number_row=[]
if header == None:
header = string_row
header.append(" xx")
else:
for s in string_row:
s = s.strip()
if s == "NA":
number_row.append(None)
else:
number_row.append(float(s))
number_row.append(number_row[20])
matrix.append(number_row)
# add in a test case that is identical ...
return (header, matrix) | 7085e1dcc5155af2bbec64b5afe75625337e2d9f | 695,084 |
def merge_data_by_sliding_window(data, n_days=1, dropna=True, subset=None, time_form='diff'):
"""滑窗合并数据
:param data: 时间序列数据,一级行索引为 PATIENT_ID, 二级行索引为 RE_DATE
:param n_days: 窗口长度
:param dropna: 滑窗合并后还缺失的是否删掉
:param subset: pd.DataFrame().dropna() 参数 Note: 新参数!
:param time_form: 返回数据的时间索引,'diff' or 'timestamp'
:return: 合并后的数据,一级行索引为 PATIENT_ID, 二级行索引为 t_diff or RE_DATE, 取决于"time_form"
"""
#根据PATIENT_ID排序
data = data.reset_index(level=1)
# dt.normalize() 取出院时间的天数
# 距离出院时长 Note: 去掉了出院时间和检测时间的时分秒,因为我觉得以 00:00:00 为分界点更合适
t_diff = data['出院时间'].dt.normalize() - data['RE_DATE'].dt.normalize()
# 滑窗取整的依据。即nn_days天内的会取整成为同一个数值,后面通过groupby方法分组
data['t_diff'] = t_diff.dt.days.values // n_days * n_days
#
data = data.set_index('t_diff', append=True)
# 滑窗合并。对['PATIENT_ID', 't_diff']groupby,相当于双循环。遍历所有病人与病人的所有窗口
# 因为之前对data排序,因此每个病人t_diff会是从大到小的排序,ffill()是向上一行插值,因此相当于是向旧日期插值
# last()是每一组取最后一行,因此即取每个病人对应窗口的最后一次数据,(也一定是最全的)。
# last()自带排序。取完last后会按照索引升序排列
data = (
data
.groupby(['PATIENT_ID', 't_diff']).ffill()
.groupby(['PATIENT_ID', 't_diff']).last()
)
# 去掉缺失样本
if dropna:
data = data.dropna(subset=subset) # Note: 这里对缺失值进行了 dropna(), 而不是 fillna(-1)
# 更新二级索引。(其实timestamp在本论文的中没用到)
if time_form == 'timestamp':
data = (
data
.reset_index(level=1, drop=True)
.set_index('RE_DATE', append=True)
)
elif time_form == 'diff':
data = data.drop(columns=['RE_DATE'])
return data | 3c9a796742d6dfc60cb96d111e36d8862af0a8ff | 695,085 |
def preproc_space(text):
"""line preprocessing - removes first space"""
return text.replace(" ", "", 1) if text.startswith(" ") else text | cf6373752f6ab343834c1c6bad623d9cdbd50b38 | 695,086 |
import os
def join_path_parts(dic, base):
"""Extract file path based on dictionary structure """
prefix = dic['prefix']
subdir = dic['subdir']
if base == 'subdir':
return os.path.join(prefix, subdir)
else:
filename = dic[base]
return os.path.join(prefix, subdir, filename) | b20181605eb5341d79fb6ef104c67f8e259f35be | 695,087 |
def newton_leipnik(XYZ, t, a=0.4, b=0.175):
"""
The Newton-Leipnik Attractor.
x0 = (0.349,0,-0.16)
"""
x, y, z = XYZ
x_dt = -a * x + y + 10 * y * z
y_dt = -x - 0.4 * y + 5 * x * z
z_dt = b * z - 5 * x * y
return x_dt, y_dt, z_dt | 50eab73306071bdd7b1d1813b0e5faa98e304cec | 695,088 |
def input_float(x):
"""
Try to get a float input that matches with the validation function.
* If not validation given, any float will work
* If there are any error, executes the optional exception_function with the input as parameter
* If success, executes the optional success_function with the input as parameter
Args:
(optional) prompt (Any): The prompt that you'd use in a normal input() function
(optional) validation_function (function): A function used to check if the input meets the conditions, for default always return True
(optional) exception_function (function): A function used when there is any error converting the input or when the validation_function returns False
(optional) success_function (function): A function used when the input is successfuly converted and returned
Returns:
A float value
Examples:
>>> input_float('How much costs an apple? ', lambda x: x > 0, lambda x: print('The price must be valid and greater than 0'))
How much costs an apple? lorem ipsum
The price must be valid and greater than 0
How much costs an apple? 0
The price must be valid and greater than 0
How much costs an apple? 1.99
>>>
"""
return float(x) | 895aaf3cbb3aab0fea3154f0963e9bfa7b53b293 | 695,089 |
import os
def read_temp_file(filename: str, delete = True, stdout: str = '', stderr: str = '') -> str:
""" Reads temp file and returns contents """
# wait for file to be generated
print(f'Waiting for {filename} file...')
try:
while(not os.path.exists(filename)):
pass
except KeyboardInterrupt as e:
error_msg = f'Stdout: {stdout}\nStderr: {stderr}\n'
raise Exception(error_msg)
# read file
with open(filename, 'r', encoding='utf-8') as f:
out_str = ''.join([line for line in f.readlines()])
# delete file
if delete and os.path.exists(filename):
try:
os.remove(filename)
except:
print(f'{filename} file already removed')
return out_str | d2e6480133f238138d960cfd758ad1901ccb2f2f | 695,090 |
def user_dict():
"""
Dictionary with user data
"""
data = {
"username": "doe",
"password": "test",
"password2": "test",
"email": "doe@example.com",
}
return data | 097b471ce1b3286a855e3686ef42f2c25a296631 | 695,091 |
def get_acceleration_of_gravity(dummy_carla_vehicle):
"""
Get the acceleration of gravity for a carla vehicle
(for the moment constant at 9.81 m/s^2)
:param carla_vehicle: the carla vehicle
:type carla_vehicle: carla.Vehicle
:return: acceleration of gravity [m/s^2]
:rtype: float64
"""
acceleration = 9.81
return acceleration | 705d4a21756a1578ecc5c96d7e26b74a690f579e | 695,092 |
def soma(x, y):
"""Soma x e y"""
return x+y | f7f965db7abc23ae4a912af151b55c309dc135c3 | 695,093 |
def recite(start_verse, end_verse):
"""
Create the song based on the starting and ending verses.
:param str Starting verse.
:param str Ending verse.
"""
def build_verse(verse):
verses = {
'first': 'and a Partridge in a Pear Tree.',
'second': 'two Turtle Doves, ',
'third': 'three French Hens, ',
'fourth': 'four Calling Birds, ',
'fifth': 'five Gold Rings, ',
'sixth': 'six Geese-a-Laying, ',
'seventh': 'seven Swans-a-Swimming, ',
'eighth': 'eight Maids-a-Milking, ',
'ninth': 'nine Ladies Dancing, ',
'tenth': 'ten Lords-a-Leaping, ',
'eleventh': 'eleven Pipers Piping, ',
'twelfth': 'twelve Drummers Drumming, '
}
days = list(verses)
sentence = f'On the {days[verse-1]} day of Christmas my true love gave to me: '
if verse == 1:
sentence += "a Partridge in a Pear Tree."
return sentence
while verse >= 1:
sentence += verses[days[verse - 1]]
verse -= 1
return sentence
song = []
for index in range(start_verse, end_verse + 1):
song.append(build_verse(index))
return song | 8686dc7d01b44404525559c5d310d31b746b430b | 695,095 |
import os
def getproxies_environment():
"""Return a dictionary of scheme -> proxy server URL mappings.
Scan the environment for variables named <scheme>_proxy;
this seems to be the standard convention. If you need a
different way, you can pass a proxies dictionary to the
[Fancy]URLopener constructor.
"""
proxies = {}
# in order to prefer lowercase variables, process environment in
# two passes: first matches any, second pass matches lowercase only
for name, value in os.environ.items():
name = name.lower()
if value and name[-6:] == '_proxy':
proxies[name[:-6]] = value
# CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY
# (non-all-lowercase) as it may be set from the web server by a "Proxy:"
# header from the client
# If "proxy" is lowercase, it will still be used thanks to the next block
if 'REQUEST_METHOD' in os.environ:
proxies.pop('http', None)
for name, value in os.environ.items():
if name[-6:] == '_proxy':
name = name.lower()
if value:
proxies[name[:-6]] = value
else:
proxies.pop(name[:-6], None)
return proxies | acf6da3493ce24461ac262acf27267e462392850 | 695,096 |
def update_max_speed(vehicle, speed):
"""
Updates the max speed of the vehicle
:param vehicle: vehicle
:param speed: new max speed
:type vehicle: VehicleProfile
:return: Updated vehicle
"""
return vehicle.update_max_speed(speed) | 74f2adf603b6120474a3a59afcc3747ba224b8ed | 695,097 |
def _select_features(example, feature_list=None):
"""Select a subset of features from the example dict."""
feature_list = feature_list or ["inputs", "targets"]
return {f: example[f] for f in feature_list if f in example} | 3c8c0363b45ca0d6f44642979c30fa1854371b41 | 695,098 |
import re
def parse_age(age):
"""
Convert a human-friendly duration string into an ISO 8601 duration
Parameters
----------
age : str
Returns
-------
str
"""
m = re.fullmatch(r"(\d+)\s*(y(ear)?|m(onth)?|w(eek)?|d(ay)?)s?", age, flags=re.I)
if m:
qty = int(m.group(1))
unit = m.group(2)[0].upper()
return f"P{qty}{unit}"
else:
raise ValueError(age) | d4df09570d080172db5388ae7e06c2f36c264e6e | 695,099 |
from typing import Dict
from typing import Tuple
def create_default_domain_param_map_omo() -> Dict[int, Tuple[str, str]]:
"""
Create the default mapping from indices to domain parameters (as used in the `BayRn` algorithm).
:return: `dict` where the key is the index and the value is a tuple of domain parameter and the associated domain
distribution parameter
"""
return {
0: ("m", "mean"),
1: ("m", "std"),
2: ("k", "mean"),
3: ("k", "std"),
5: ("d", "mean"),
6: ("d", "std"),
} | 262124901fc40d54477235d8b75776ed27cf3108 | 695,101 |
import bisect
def get_group_floors(df):
"""Floor values to nearest 5,000 for each category.
"""
floors = {}
floor_vals = [x * 5e3 for x in range(50)]
for name, group in df.groupby(["category", "variant.type"]):
floors[name] = int(floor_vals[bisect.bisect(floor_vals, min(group["value"])) - 1])
return floors | 8918e7c809a190f1999791d678a6d62dff3f1859 | 695,102 |
def has_field_errors(form):
"""
This allows us to see if we have field_errors, as opposed to only having
form.non_field_errors. I would prefer to put this in a template tag, but
getting that working with a conditional statement in a template was very
challenging.
"""
if not form.errors:
return False
for field in form:
if field.errors:
return True
return False | c0768c480bc6a02967ae95efdc0c3d81f7d711d4 | 695,103 |
import yaml
def print_yml(analysis: dict):
"""
Converts the any analysis dictionary into YML output
"""
return yaml.dump(
analysis,
allow_unicode=True,
default_flow_style=False,
) | fc5e9bd6693fa3a3b637e0f472287e2423ff9678 | 695,104 |
import os
import sys
def abs_path_dir(dir_name):
"""Description of abs_path_dir
Check validity of directory and return absolute path
Otherwise raise error and end programm
"""
if not os.path.isfile(dir_name) and os.path.exists(dir_name):
return os.path.abspath(dir_name)
else:
print("Invalid directory name: " + dir_name)
sys.exit() | 60f25f6f4ff71b52af89af7aa1c59aab6c61b5b6 | 695,106 |
def drop_keys(d):
"""
Layout payload for R and Julia apps has a slightly different structure. We
drop some keys for parity.
"""
if isinstance(d, dict):
return {
k: drop_keys(v)
for k, v in d.items()
if k not in ["propNames", "package"]
and v is not None
and not (k == "children" and v == "")
}
elif isinstance(d, list):
return [drop_keys(x) for x in d]
return d | 9c8344bb979e29f99f371fa8601855cd6365b079 | 695,108 |
import struct
def object_to_msh_format(vertices, faces, texcoords, normals):
"""Coverts a mesh from lists to a binary MSH format."""
nvertex = len(vertices) // 3
nnormal = len(normals) // 3
ntexcoord = len(texcoords) // 2
nface = len(faces) // 3
# Convert to binary format according to:
# # http://mujoco.org/book/XMLreference.html#mesh
msh_string = bytes()
msh_string += struct.pack('4i', nvertex, nnormal, ntexcoord, nface)
msh_string += struct.pack(str(3 * nvertex) + 'f', *vertices)
if nnormal:
msh_string += struct.pack(str(3 * nnormal) + 'f', *normals)
if ntexcoord:
msh_string += struct.pack(str(2 * ntexcoord) + 'f', *texcoords)
msh_string += struct.pack(str(3 * nface) + 'i', *faces)
return msh_string | 11607f79be926188751da24748fe58ddf608329d | 695,109 |
def post(path):
"""
@post 装饰器
>>> from transwarp.web import post
>>> @post('/post/:id')
... def testpost():
... return '200'
...
>>> testpost.__web_route__
'/post/:id'
>>> testpost.__web_method__
'POST'
>>> testpost()
'200'
"""
def _decorator(func):
func.__web_route__ = path
func.__web_method__ = 'POST'
return func
return _decorator | 25b8c9e6795cceae753f4a3a96c214f0f3c23cad | 695,110 |
def search(search_dict, field):
"""
Takes a dict with nested lists and dicts,
and searches all dicts for a key of the field
provided.
"""
fields_found = []
if isinstance(search_dict, dict):
for key, value in search_dict.items():
if key == field:
fields_found.append(value)
elif isinstance(value, dict):
results = search(value, field)
for result in results:
fields_found.append(result)
elif isinstance(value, list):
for item in value:
more_results = search(item, field)
for another_result in more_results:
fields_found.append(another_result)
elif isinstance(search_dict, list):
for item in search_dict:
more_results = search(item, field)
for another_result in more_results:
fields_found.append(another_result)
return fields_found | 96444500f1bfaa50080f2b477fb179a7109911b5 | 695,111 |
def base_path(path):
"""
Returns the path of files in the sync folder
without the leading folder separation character
"""
if path.startswith("/"):
path = path.replace("/", "", 1)
if path.startswith("\\"):
path = path.replace("\\", "", 1)
return path | f558a0463c55904ba42d7efebba47646c06daace | 695,112 |
def depth_first(start, children_func):
"""Return a depth-first traversal of a tree.
Args:
start: the root of the tree.
children_func: function taking a node to its sequence of children.
Returns:
a list of nodes in depth-first order
"""
seen = set()
result = []
def traversal(node):
if node in seen:
return
seen.add(node)
for i in children_func(node):
traversal(i)
result.append(node)
traversal(start)
return result | 027577a6df00898b1349dc3a780ee730a7365e6c | 695,113 |
def get_outliers(cs_exp_name):
"""Computes if an observed chemical shift is within 3 standard deviation
from the mean of observed values. The observed values were taken from the
BMRB http://www.bmrb.wisc.edu/ref_info/statsel.htm"""
boundaries_CA = {'ALA':(47.23,59.11), 'ARG':(49.86,63.72), 'ASP':(48.57,60.81), 'ASN':(47.88,59.22), 'CYS':(48.28,68.38), 'GLU':(51.07,63.61), 'GLN':(50.18,63.02), 'GLY':(41.37,49.35), 'HIS':(49.57,63.49), 'ILE':(53.58,69.72), 'LEU':(49.29,62.01), 'LYS':(50.38,63.58), 'MET':(49.42,62.86), 'PHE':(50.42,65.84), 'PRO':(58.79,67.91), 'SER':(52.50,64.98), 'THR':(54.47,70.01), 'TRP':(49.95,65.43), 'TYR':(50.57,65.75), 'VAL':(53.92,71.14)}
boundaries_CB = {'ALA':(13.60,24.34), 'ARG':(25.17,36.15), 'ASP':(36.01,45.73), 'ASN':(33.71,43.67), 'CYS':(14.34,50.88), 'GLU':(24.88,35.08), 'GLN':(23.73,34.59), 'HIS':(24.01,36.43), 'ILE':(32.59,44.59), 'LEU':(36.70,47.86), 'LYS':(27.46,38.08), 'MET':(26.32,39.58), 'PHE':(33.74,46.16), 'PRO':(28.31,35.39), 'SER':(59.26,68.32), 'THR':(64.21,75.19), 'TRP':(23.89,36.07), 'TYR':(32.82,45.72), 'VAL':(27.37,38.05)}
fd = open('%s.ocs' % (cs_exp_name))
fd.readline()
low_Ca = []
high_Ca = []
low_Cb = []
high_Cb = []
for line in fd:
res_num = line.split()[0]
res_name = line.split()[1]
cs_exp_Ca_value = float(line.split()[2])
cs_exp_Cb_value = float(line.split()[3])
if cs_exp_Ca_value < 999.00:
res_outlier = res_name.title() + res_num
if cs_exp_Ca_value < boundaries_CA[res_name][0]:
low_Ca.append(res_outlier)
elif cs_exp_Ca_value > boundaries_CA[res_name][1]:
high_Ca.append(res_outlier)
if cs_exp_Cb_value < 999.00:
res_outlier = res_name.title() + res_num
if cs_exp_Cb_value < boundaries_CB[res_name][0] :
low_Cb.append(res_outlier)
elif cs_exp_Cb_value > boundaries_CB[res_name][1]:
high_Cb.append(res_outlier)
return low_Ca, high_Ca, low_Cb, high_Cb | 9de932f232359ea938fdd158151762ea26b063de | 695,114 |
def indent(level):
"""For use as a mako filter.
Returns a function that indents a block of text to the provided level.
"""
def indent_text_to_level(text, level):
result = ""
indentation = level * " "
for line in text.splitlines(True):
result = result + indentation + line
return result
return lambda text: indent_text_to_level(text, level) | 942785a20693a5bada69f775c3ecb6ce82decaf4 | 695,115 |
def getBBHeight(bb):
"""
**SUMMARY**
(Dev Zone)
Get height of the bounding box
**PARAMETERS**
bb - Bounding Box represented through 2 points (x1,y1,x2,y2)
**RETURNS**
height of the bounding box
"""
return bb[3]-bb[1]+1 | 250500f66b19c292e3f1a287ed08bb645539da57 | 695,116 |
def _sanitize(io):
"""
removes ":" "1:" etc if any in front of the io
"""
# trim begining and ending spaces
io = io.strip()
# if Output: Input: etc.
if(io[0] == ":"):
io = io[1:]
# if Output1: Input1: etc
elif(len(io) > 1 and io[1] == ":"):
io = io[2:]
return io.strip()+"\n" | 4ecbc623de9dcf85551dc644a667ff7f74e33034 | 695,118 |
def swap(arr, i, j):
"""Swaps two items on an array
Args:
arr: Array to be changed
i: index of swap number
j: index of the other swap
"""
temp = arr[i]
arr[i] = arr[j]
arr[j] = temp
return arr | 27227d3392dc3b6e49fa9948ef6b96dd68c5e95f | 695,119 |
import requests
def get_fx_rates(api_key, exchange_currency, desired_currency):
"""Function that returns the 100 day FX rate history for the currency wished to
be exchanged in json format."""
url = 'https://www.alphavantage.co/query?'
function_input = 'FX_DAILY'
from_symbol_input = f'{exchange_currency}'
to_symbol_input = f'{desired_currency}'
url_params = (f"""function={function_input}&from_symbol={from_symbol_input}&to_symbol={to_symbol_input}&apikey={api_key}""")
request_url = url + url_params
response = requests.get(request_url)
return response | 0563fc4ee227b723b165140effc7e5f4d65b14e8 | 695,120 |
import networkx as nx
def join_graphs(vecgraph):
"""Joins two or more graphs found in the arg vecgraph.
Args:
vecgraph: list of graphs to be concatenated.
"""
if len(vecgraph) < 2:
return vecgraph[0]
elif len(vecgraph) == 2:
return nx.compose(vecgraph[0], vecgraph[1])
else:
dummy = nx.compose(vecgraph[0], vecgraph[1])
for i in range(2, len(vecgraph)):
dummy = nx.compose(dummy, vecgraph[i])
return dummy | aa2c128063ed38108a67431dd97b9c308ab415c4 | 695,121 |
def get_sentiment_emoji(sentiment):
"""Returns an emoji representing the sentiment score."""
if sentiment == 0:
return ":neutral_face:"
elif sentiment > 0:
return ":thumbsup:"
else: # sentiment < 0:
return ":thumbsdown:" | 5e58199faf0fb846c40417e7b9ffa1a3701d82f6 | 695,122 |
from typing import Any
def useful_tree_predicate(
y: Any, raw_predictions: Any, previous_loss: float, current_loss: float
) -> bool:
"""This predicated tells whether `current_loss < previous_loss`.
This implies that only usefull trees (the ones that lower the
overall loss) will be added to the ensemble. Trees that increase the
loss will be discarded.
"""
return current_loss < previous_loss | 2ff691951454413a9c446d3c06c3cf61fa2f92b4 | 695,123 |
def repr_args(args, kwargs):
"""Stringify a set of arguments.
Arguments:
args: tuple of arguments as a function would see it.
kwargs: dictionary of keyword arguments as a function would see it.
Returns:
String as you would write it in a script.
"""
args_s = ("{}, " if kwargs else "{}").format(", ".join(map(repr, args))) if args else "" # noqa
kws = ", ".join(["{}={!r}".format(it[0], it[1]) for it in kwargs.items()])
return str(args_s) + str(kws) | f1d6f1645bc0997c4f0d0360ab9e43da129b8415 | 695,124 |
from typing import Callable
import os
def get_data_file_contents(data_dir: str) -> Callable[[str], str]:
"""
Returns a function that will allow getting the contents of a file in the tests data directory easily
"""
def get_contents(filename: str) -> str:
"""
Get the str contents of a file in the tests data directory
ARGUMENTS:
filename : str : the name of the file within the tests data directory to get the contents of
"""
with open(os.path.join(data_dir, filename), 'rb') as _file:
data = _file.read()
try:
return data.decode('ascii')
except UnicodeDecodeError:
return data.decode('utf-8')
return get_contents | c6c19226b6bb9fcca6b3b619183abe7d1a64081c | 695,125 |
import random
def create_random_list(size=10):
"""
Generates of random list with size, made of random integers
:param size: Size of the list to generate
:return: List of integers with random data (based on size of list)
"""
mainList = []
for i in range(size):
mainList.append(random.choice(range(size))) # append random integers based on 'size'
return mainList | 853caee84236f2469673cd0d3899e49284f3154f | 695,126 |
def deg2gon(ang):
""" convert from gon to degrees
Parameters
----------
ang : unit=degrees, range=0...360
Returns
-------
ang : unit=gon, range=0...400
See Also
--------
gon2deg, deg2compass
"""
ang *= 400/360
return ang | df13beee9f3f5d98df98693883d3ee3f3f5d2411 | 695,127 |
def read_available(filename):
"""
Parses the output of bitbake -s
minus the first few lines
"""
f = open(filename)
packages = {}
for line in f:
if line.startswith("NOTE: ") or line.startswith("Parsing .bb") or line.startswith("done."):
continue
# str.split can not be used as we have multiple whitespace
split = line.split(" ", 1)
package = split[0]
rest = split[1].strip()
# we might have a latest package...
split = rest.split(" ", 1)
if len(split) == 2:
version = split[1].strip()
else:
version = split[0]
packages[package] = version
return packages | 2eabd16c0617a84b4161ffecc2752cb8f703cdeb | 695,128 |
import sys
def download_comics_menu(comics_found):
"""
Main download menu, takes number of available comics for download
"""
print("\nThe scraper has found {} comics.".format(len(comics_found)))
print("How many comics do you want to download?")
print("Type 0 to exit.")
while True:
try:
comics_to_download = int(input(">> "))
except ValueError:
print("Error: expected a number. Try again.")
continue
if comics_to_download > len(comics_found) or comics_to_download < 0:
print("Error: incorrect number of comics to download. Try again.")
continue
elif comics_to_download == 0:
sys.exit()
return comics_to_download | 39f82cf41e0bdb412b2e637907ff9bfa074c951b | 695,129 |
import os
def get_filenames_from_path(path):
"""
Get all files' names from a path.
Does not return directories.
:param path: a string, the path to retrieve the files' names from
:return: a list
"""
filenames = [file for file in os.listdir(path) if
os.path.isfile(os.path.join(path, file))]
return filenames | 479853f078ddcbec1338edfd5f67c721b441f1db | 695,130 |
def checksums2dict(checksums: list) -> dict:
"""
Converts a list of checksums to a dict for easier look up of a block
:param checksums: tuple of checksums
:return: dictionary of {checksum: index}
"""
result = {}
for index, checksum in enumerate(checksums):
if checksum not in result:
result[checksum] = index
return result | e0d9fbcad5444a0f4cf4097a2ef2494828c53415 | 695,131 |
def fibonacci_element(n, computed = {0: 0, 1: 1}):
""" calculate N'th Fibonacci number. """
if n not in computed:
computed[n] = fibonacci_element(n-1, computed) + fibonacci_element(n-2, computed)
return computed[n] | dc93a728b90dad0abc14e56a996289bcbdc6d3bd | 695,132 |
def prefer_shallow_depths(solutions, weight=0.1):
"""Dock solutions which have a higher maximum depth"""
# Smallest maximum depth across solutions
try:
min_max_depth = min(max(p.depth for p in s.assignment) for s in solutions)
max_max_depth = max(p.depth for s in solutions for p in s.assignment)
except ValueError:
min_max_depth = 0 #solutions array is empty
max_max_depth = 0
variance = max_max_depth - min_max_depth
if variance:
result = []
for solution in solutions:
max_depth = max(p.depth for p in solution.assignment)
flags = max_depth - min_max_depth
result.append(solution.copy_with_penalty(
weight * flags / variance))
return result
else:
return solutions | 1bd245573f36fb5fadf81cd282c43b7f2635c901 | 695,133 |
def get_extension(path):
"""Return the extension after the last '.' in a path. """
idx = path.rfind('.')
if idx >= 0:
return path[idx + 1:]
return '' | ece0a1dc19b8a2b91294bce95ac65e7507c6640e | 695,134 |
def jenkins_jobs():
"""Get test jenkins jobs."""
return ['unit', 'functional'] | 674c5ced6743ae91f6db9ba0782becd23c7893e1 | 695,135 |
from typing import List
from typing import Callable
def get(route: str, additional_methods: List[str]=[]) -> Callable:
"""
States that a function will be executed when a GET request
is sent to the server.
.. Usages::
To create a simple route without any parameters:
@get('/')
def main(self):
pass
In the example above the @get decorator states that the function will
respond to a HTTP GET request and the return of this function will be the
contents of a HTML template, in this case main.html
The @get decorator also accepts variable parameters in the route URL. This
can be achieved using placeholders. For example:
@get('/user/{id:int}')
def get(self, id):
pass
In this case when the client send a HTTP GET request to /users/1, the
function get_user will be executed and the id integer parameter will
be passed to it.
The format for the placeholders are {name:type} where type
can be: str or int.
"""
return lambda func: (route, 'GET', func, additional_methods) | 42236a4cdb15c8f59ee68cb67c4c6cfb1fbfa470 | 695,136 |
def replace_neighbor(img, remove_coord):
"""
Replaces region in image defined by remove_coord with its
neighbor in the y-axis
Parameters
-------------------------------------
img: ndarray
Contains image data
remove_region: tuple
Coordinates of box containing the remove_region. Order is: (
top left x-coordinate,
top left y-coordinate,
bottom right x-coordinate,
bottom right y-coordinate
)
Returns
-------------------------------------
ndarray
transformed image
"""
remove_x1, remove_y1, remove_x2, remove_y2 = remove_coord
imgcopy = img.copy()
# find neighboring values
bot_neighbor = remove_y2 + 1
top_neighbor = remove_y1 - 1
# ensure neighbors are not past the edge of the image
if bot_neighbor == img.shape[0]:
for i in range(remove_y1, remove_y2):
imgcopy[i, remove_x1:remove_x2, :] = imgcopy[
top_neighbor,
remove_x1:remove_x2,
:
]
elif top_neighbor == 0:
for i in range(remove_y1, remove_y2):
imgcopy[i, remove_x1:remove_x2, :] = imgcopy[
bot_neighbor,
remove_x1:remove_x2,
:
]
# replace remove_coord with neighboring values on top and bottom
else:
ylim = remove_y2 - ((remove_y2 - remove_y1) // 2)
for i in range(remove_y1, remove_y2):
if i <= ylim:
imgcopy[i, remove_x1:remove_x2, :] = imgcopy[
top_neighbor,
remove_x1:remove_x2,
:
]
else:
imgcopy[i, remove_x1:remove_x2, :] = imgcopy[
bot_neighbor,
remove_x1:remove_x2,
:
]
return imgcopy | d1bfd7050a56e7ff07c2f778a95481cdefc4cfb0 | 695,137 |
import os
def create_directory(target_path, directory_name):
"""Creates a directory in the target path if it doesn't already exist
:param target_path: The path to the directory that will contain the new one
:param directory_name: The name of the directory to create in the target path
:return: The path to the newly created (or already existing) directory
"""
path = os.path.join(target_path, directory_name)
if not os.path.exists(path):
os.makedirs(path)
return path | 70a5c2655a3686d262b7d28f8093010e8d744d1f | 695,138 |
def get_datetime(filename):
"""Reads and generates datetime
Youtube compatible datetime format"""
# Opening and reading the date file
file = open(filename, "r+")
date = file.read()
# Appending 0 to date if less than 10
if int(date) < 10:
date = "0" + date
# Youtube compatible datetime format
datetime = "2021-08-" + date + "T00:00:00Z"
# Closing the file
file.close()
return datetime | f7b299b98ee7a98714bb1a71187b2863c30aedca | 695,139 |
def sum_of_multiples(limit, multiples):
"""
Find the sum of all the unique multiples of particular numbers
up to but not including that number.
:param limit int - The highest resulting product of multiples.
:param multiples list - The multiples to multiply and sum.
:return int - The total or sum of multiples that meet the criteria.
"""
total = 0
if len(multiples) == 0:
return total
factor = 1
products = {}
limit_reached = False
last_change_counter = 0
while not limit_reached:
last_total = total
for index, number in enumerate(multiples):
result = number * factor
# skip results we already stored
if products.get(result) is not None:
continue
products[result] = number
if result < limit:
total += result
elif index == 0:
# Cancle out if the smallest (index 0) multiple exceeds limit
limit_reached = True
factor += 1
# cancel out if not increasing over time
if last_change_counter > 10:
break
if last_total == total:
last_change_counter += 1
else:
last_change_counter = 0
return total | edb6fc6849f919d16a4a695f63f0a82cae55d693 | 695,140 |
import platform
def getOutputFolderAndName(filePath):
"""
Description: Get output folder name and file name from file path
Params:
- filePath: path to the dataset directory
Returns:
- no return value
"""
# If OS is Windows split by \
if platform.system() == 'Windows':
splitPath = filePath.split('\\')
# If OS is not Windows split by /
else:
splitPath = filePath.split('/')
# Path without the file name
outputFolder = filePath[0: len(filePath)-len(splitPath[-1])]
outputName = splitPath[-1]
return (outputFolder, outputName) | ea342619c8b5678b318909af8f90bf8f00f0096a | 695,141 |
def balanced ( expression , left = '([' , right = ')]' ) :
"""Simple utility to check balanced parenthesis/brackets, etc...
>>> expression = ' .... '
>>> ok = balanced ( expression )
"""
assert left and len(left) == len ( right ) ,\
'balanced: invalid left/right arguments!'
stack = []
for i in expression :
if i in left : stack.append ( i )
elif i in right :
pos = right.index ( i )
if stack and left[ pos ] == stack [ -1 ] :
stack.pop()
else :
return False
return True if not stack else False | 79c3dc4ec41f063e96783501597009ce9b2f3d14 | 695,142 |
def kullanicidanDegerAta(_kapasite, _agirliklar, _valueInt):
"""
Kullanicidan programin calismasi icin gerekli olan degerleri alan ve aldigi degerleri donduren fonksiyon
"""
sirtCantasiKapasite = _kapasite
esyaAgirlik = _agirliklar
esyaValue = _valueInt
return sirtCantasiKapasite, esyaAgirlik, esyaValue | c6304a3fe46edf2011d1647b007ddeb957ad3a1c | 695,143 |
def add_is_checked(values_list, urlfilter, to_str=False, replace_none=False):
"""A helper function that accepts a values list of items, and a url
filter( query parameters) and applies a boolean to each item in
the item list indicating whether or not that element is selected
in the current request. Used by list views to add checkbox boxes
to refine selections.
Arguments:
- `items`: - queryset values_list of items for checkboxes
- `url_filter`: - url query parameters associated with this category
"""
if urlfilter:
my_filter = urlfilter.split(",")
if replace_none:
# these values will be replaced if they appear in our url filter:
replacements = {"99": "None"}
my_filter = [replacements.get(x, x) for x in my_filter]
values_list = [list(x) for x in values_list]
for item in values_list:
item.append(str(item[0]) in my_filter)
return values_list | 3668a891ec8ffeadc9931b851a895e2483bbdf49 | 695,144 |
def pick_closing_interval(decl:str,end_position,token_end,token_begin):
"""
try to pick a closing interval from end_position to start of string (inclusive)
eg : "A(()(()))",len(the_string_on_the_left)-1,")","(") -> "(()(()))"
"""
if decl[end_position] != token_end:
raise ValueError
level = 0
end_pos = -1
for pos in range(end_position,-1,-1):
if decl[pos] == token_end:
level+=1
elif decl[pos] == token_begin:
level-=1
if level == 0:
end_pos = pos
break
else:
raise ValueError
return decl[end_pos:end_position+1] | 00e32acbbb52a2b15da2cbdf7e6a37e010920d74 | 695,145 |
def parse_subset_vht_from_tap_err(subset_vht_file):
"""
Parse the VHT (total cost as vehcile hours travlled) from the
stderr output of tap_frankwolfe_mpi -s for each subset (size > 2)
of changes in mods file (input to tap_rankwolfe_mpi)
The id strings are spearated by a '+' character in this format e.g.
03_03_0101+03_95_0001+03_96_0024
Parameters:
subset_vht_file - file of the the output
(stderr of tap_frankwolfe_mpi -s) of the tap
model with mods_file as input ,
giving subset (size>2) changes
Return value:
dict { set : vht }
set is a set of upgrade id strings
"""
set_vht_dict = {} # dict { set : pairwise_vht }
for line in open(subset_vht_file):
sline = line.split()
if len(sline) > 3 and sline[3] == "VHT":
idstrings = sline[2]
vht = float(sline[5])
idset = set(idstrings.split("+"))
set_vht_dict[frozenset(idset)] = vht
return set_vht_dict | 780dfcfd33ceb09ec7c54a8f6a2f9486ae77db28 | 695,146 |
def x_func(i):
"""The function x, for implementation in other functions."""
return i | 329239ced85ce8805f87d48db457d7a85199309a | 695,147 |
def middleOfRect(rect):
"""Returns int middle point of rectangle"""
(x, y, w, h) = rect
return (x + int(w/2.0), y + int(h/2.0)) | 606981f6052c26755a2f8b384fa7360bab50940d | 695,148 |
def parser_of_connection(data):
""" Parse the special measurement id for connection events
Args:
data (dict): see the following example
{u'msm_id': 7000, u'timestamp': 1470134587, u'prefix': u'80.100.0.0/15', u'event': u'disconnect',
u'controller': u'ctr-ams07', u'prb_id': 15093, u'type': u'connection', u'asn': 3265}
Returns:
dict, event name as key, either disconnect or connect, timestamp as value
"""
event = data.get('event', None)
tstp = data.get('timestamp', None)
return {str(event): tstp} | c05c7d77c38739b3593119f2fc3d6e8ff129645a | 695,149 |
def auto_raytracing_grid_resolution(source_fwhm_parcsec, grid_resolution_scale=0.0002, ref=10., power=1.):
"""
This function returns a resolution factor in units arcsec/pixel appropriate for magnification computations with
finite-size background sources. This fit is calibrated for source sizes (interpreted as the FWHM of a Gaussian) in
the range 0.1 -100 pc.
:param source_fwhm_parcsec: the full width at half max of a Gaussian background source
:return: an appropriate grid resolution for finite-size background magnification computation
"""
grid_resolution = grid_resolution_scale * (source_fwhm_parcsec / ref) ** power
return grid_resolution | 14891a9a79f4a8efe0d41ac15b4b8879016ba92f | 695,150 |
def pad_bits(bit_list, desired_size):
"""
Adds 0's to the head of bit_list so that bit_list has a length equal to desired_size. In other
words, adds [desired_size - len(bit_list)] 0's to the bit_
Args:
bit_list: a list of 0's and 1's
desired_size: an integer representing the desired size of the bit list
Returns:
The input bit_list with leading 0's inserted so that bit_list is of size desired_size.
Raises:
ValueError: The desired size of the padded list is smaller than the binary to be padded
"""
if len(bit_list) > desired_size:
raise ValueError("Binary number is larger than desired size!")
num_zeros_needed = desired_size-len(bit_list)
padded_list = [0] * (num_zeros_needed) + bit_list
return padded_list | 3b886b2b5d3a91129d9b2b889bc588064b7f09c8 | 695,151 |
def strip_paths(paths):
"""
Remove repeated edges
"""
res_all = []
for path in paths:
res = []
for node in path:
if len(res) < 2:
res.append(node)
continue
if node == res[-2]:
res.pop()
continue
else:
res.append(node)
res_all.append(res)
return res_all | cba6867eb337ccea57ec9ab49b42b394c45d07c0 | 695,152 |
def expand_boundaries(boundaries, start_boundary, expansion):
"""Push boundaries out by exapnsion from starting boundary"""
return boundaries[:start_boundary] + [b + expansion for b in boundaries[start_boundary:]] | aef59860765e58570f03396aa8c0916a8ff85ff0 | 695,153 |
import re
def parse_map_file(path):
"""
Parse libgccjit.map, returning the symbols in the API as a list of str.
"""
syms = []
with open(path) as f:
for line in f:
m = re.match('^\s+([a-z_]+);$', line)
if m:
syms.append(m.group(1))
return syms | b0f78cf1a7ebe45ae845fbacef3b7712a9d53fdc | 695,155 |
import six
import sys
def ensure_unicode_string(value):
"""
Return the given ``value`` as unicode string.
If the given ``value`` is not a unicode string, but a byte string, it is
decoded with the filesystem encoding (as in
:func:`sys.getfilesystemencoding()`).
"""
if not isinstance(value, six.text_type):
value = value.decode(sys.getfilesystemencoding())
return value | 77f52f6f1110faf10efae8aa83323f7a0281185f | 695,156 |
import requests
def safe_getjson(url):
""" get rest api """
r = requests.get(url).json()
desc = r.get('description')
if not r.get('success'):
raise RuntimeError(desc)
return r.get('data') | 169ad4dc9609938ef4465540134ab51faf90cded | 695,157 |
def drop_outliers_quantile(df, upper=0.99, lower=0):
"""
Drop Outliers by Quantiles
Deletes observations classified as outliers. The classification method
analyzes if the observation is out of the established quantile bounds.
Parameters
----------
df : pandas DataFrame
DataFrame to be cleaned. `df` must contain only nummerical values.
upper : float
Upper quantile boundary. Float value between 0 and 1. Must be bigger
than `lower`. Default value is 0.99.
lower : float
Lower quantile boundary. Float value between 0 and 1. Must be smaller
than `upper`. Default value is 0.
Returns
-------
pandas DataFrame
"""
n_initial_rows = df.shape[0]
drop_list = set()
quant_upper = df.quantile(upper)
quant_lower = df.quantile(lower)
print('-' * 25)
print('OUTLIERS DELETION: QUANTILE METHOD\n')
for el in df.columns.values:
drop_list = drop_list | \
set(df[el][df[el] > quant_upper[el]].index.values) | \
set(df[el][df[el] < quant_lower[el]].index.values)
drop_list = list(set(drop_list))
count = len(drop_list)
df.drop(drop_list, inplace=True)
print('Lower quantile: {} | Upper quantile: {}'.format(lower, upper))
print('N of deleted rows: {} | % of deleted rows: {}%'.format(
count, round(100 * (count / n_initial_rows), 3)))
return df | cf2e9493790e879d43f1ace33ca25b981ff8a546 | 695,158 |
import bisect
def get_class_idx_from_index(index, cumsum_examples_per_class):
"""
Get an example for batch_no and batch_index.
Args:
batch_no: The number of the batch.
batch_index: The index of the example from within the batch.
Returns:
The requested example.
"""
class_idx = bisect.bisect(cumsum_examples_per_class, index) - 1
assert class_idx >= 0, 'cannot have negative classes'
return class_idx | 79488a900c2f3a4c0adc7dcd2f86878c7e6ffc32 | 695,159 |
def calc_item_price(m: dict, count : int) -> int:
"""アイテムマスタ m から count 個目のそのアイテムの価格を計算する"""
a = m['price1']
b = m['price2']
c = m['price3']
d = m['price4']
return (c * count + 1) * (d ** (a * count + b)) | f9c6629ba30c83b1663698915eb7c6de6ec91f1d | 695,160 |
import argparse
def cmdLineParser():
"""
Command line parser.
"""
parser = argparse.ArgumentParser(description="""
Run resampSlc.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required arguments
parser.add_argument('--reference', type=str, required=True,
help='File path for reference HDF5 product.')
parser.add_argument('--secondary', type=str, required=True,
help='File path for secondary HDF5 product that may contain a resampled SLC raster.')
parser.add_argument('--frequency', type=str, required=True,
help='Frequency of SLC.')
parser.add_argument('--polarization', type=str, required=True,
help='Polarization of SLC.')
# Optional arguments
parser.add_argument('--secondaryRaster', type=str, action='store', default='',
help='Path to resampled SLC raster file. Use if resampled raster not in HDF5.')
parser.add_argument('--azband', type=float, action='store', default=0.0,
help='Azimuth bandwidth for azimuth commonband filtering.')
parser.add_argument('--rgoff', type=str, action='store', default='',
help='Filename for range offset raster for range commonbad filtering.')
parser.add_argument('--alks', type=int, action='store', default=1,
help='Number of looks to apply in the azimuth direction.')
parser.add_argument('--rlks', type=int, action='store', default=1,
help='Number of looks to apply in the range direction.')
parser.add_argument('--cohFilePath', type=str, action='store', default='crossmul/crossmul.coh',
help='Coherence output directory and file name.')
parser.add_argument('--intFilePath', type=str, action='store', default='crossmul/crossmul.int',
help='Interferogram output directory and file name.')
parser.add_argument('-g', '--gpu', action='store_true', default=False,
help='Enable GPU processing.')
# Parse and return
return parser.parse_args() | 13a47be4963b02e90c184758317bcc4987d43552 | 695,162 |
def decode_ascii(byte_msg) -> str:
"""Decodes a byte array to a string.
:param Union[bytes, bytearray] byte_msg:
The bytes to decode.
:rtype str
The decoded string.
"""
if len(byte_msg) == 0:
return ""
try:
for i in range(0, len(byte_msg)):
if byte_msg[i] < 0 or byte_msg[i] > 255:
byte_msg[i] = 35
msg = bytes(byte_msg).decode('ascii', 'replace')
return msg
except:
return "### FORMAT ERROR ###" | 195880e141b2402595849444dd6a48c596fe778b | 695,163 |
from typing import List
from pathlib import Path
def cmd_remove_path(value: List[str]) -> str:
"""Renders the code to remove directories from the path in cmd.
:param value: A list of values to prepend to the path.
:return: The code to prepend to path.
"""
# Path here ensures normalization of separators.
return "\n".join(f"set PATH=%PATH:{Path(entry)};=%" for entry in value) | 92f53ea698556eb1b551939027729c1bb9bae946 | 695,165 |
def _write_genome_info(info_path, _dict):
"""write genome info to file from dict
"""
try:
genome_info = open(info_path, "w")
for key, value in list(_dict.items()):
genome_info.write(str(key))
genome_info.write("\t")
genome_info.write(str(value))
genome_info.write("\n")
except IOError:
return False
genome_info.close()
return True | 413142055dcb19f9962a42e986ad4cdc9dfdc7e3 | 695,166 |
def get_my_env(app):
"""
Gets the env name of the currently running environment
:param app: handle to Pyramid app
:return: current env
"""
# Return value is presumably one of the above-declared environments
return app.registry.settings.get('env.name') | 43570747b0592a1d4e0f9890ddfd55c265bccc25 | 695,167 |
def preprocess_files_fast(infiles, tmpd) :
"""_Do not_ run the sound preprocessing stage.
Returns: outfile (== infiles)
"""
outfiles = infiles.copy()
return outfiles | eda724e64cdbd7714855cb20533ff3bbaadfaa2a | 695,168 |
def calculate_answers(puzzle_data):
""" Calculate the number of unique answers per group """
group_answers = list()
for group in puzzle_data:
group_answers.append(len("".join(set(group.replace("\n", "")))))
return sum(group_answers) | 3831527495189c7dce140ab2cebf01a161274d18 | 695,169 |
from warnings import warn
def normalize_username(username):
"""Make sure username is lowercase."""
if username is None:
return None
username = str(username)
if not username.islower():
warn(
f"Note that contributor names are not case-sensitive. "
f"'{username.lower()}' will be used instead of '{username}'"
)
username = username.lower()
return username | 8f2dd545d62d11297f8612879512574bacfc9b2d | 695,171 |
import re
def re_contains(a, b):
"""Return True if a regex search with pattern b yields a match in a
Args:
a (str): Pattern to search
b (str): Regex pattern to use in search
Returns:
result (bool): Whether b contains a or not.
"""
try:
regexp = re.compile(b, flags=re.IGNORECASE)
except(TypeError):
raise TypeError('Value must be a string that can be compiled to regex expression')
return bool(re.search(regexp, a)) | ca49393fab219c97d5c54190961b1973aca70582 | 695,172 |
def config(base_config):
""":py:class:`nemo_nowcast.Config` instance from YAML fragment to use as config for unit tests."""
return base_config | d6390256f170227d5b322339fca40181aacaf322 | 695,173 |
def get_election_years():
"""Implement presidential."""
return [
1982,
1988,
1994,
1999,
2005,
2010,
2015,
2019,
] | 7563e2f14ed71240cb6df2a9a52da8121995ad79 | 695,174 |
def get_squares_with_free_edge(
square_to_edges, edge_to_squares, num_free_edges):
"""Get squares with num_free_edges number of edges that don't connect."""
return [
square
for square in square_to_edges
if sum(1 for edge in square_to_edges[square]
if len(edge_to_squares[edge]) == 1
) == num_free_edges
] | 6ef1fa19d0fd199e033f21a527791593b1f96b45 | 695,175 |
def search_by_keys(key, value, connection):
"""Function provides to find some messages by parameters and keys
For example
search('FROM', 'user@example.com', connection)
Where con is an object of connection to the mailbox"""
result, data = connection.search(None, key, '"{}"'.format(value))
return data | ccfbb63e7c39bbd832d2f156d93ba50627e4f84a | 695,176 |
def read_file(path):
"""Reads a given file, returns str(file_content)"""
with open(path, 'a+') as f:
f.seek(0, 0)
return '\n'.join(f.readlines()) | 2c9cc13f3897b227389cdf52bc68a136bc119f65 | 695,177 |
def merge_resources(resource1, resource2):
"""
Updates a copy of resource1 with resource2 values and returns the merged dictionary.
Args:
resource1: original resource
resource2: resource to update resource1
Returns:
dict: merged resource
"""
merged = resource1.copy()
merged.update(resource2)
return merged | e3b2f27e29fb773b119ad22ab89b297e0425a65d | 695,178 |
import re
def get_strings_inside_parentheses(group_name,
card_text,
literal_dictionary,
use_string=False):
"""Returns every line from inside a () statement"""
results = list()
find_group_regex = r'%s\s*\(([^\)]*)\)' % group_name
search_object = re.search(find_group_regex, card_text, flags=0)
whole_group_string = ""
if search_object:
if use_string:
whole_group_string = search_object.group(2)
else:
whole_group_string = search_object.group(1)
for line in whole_group_string.split("\n"):
if line: #Ignore empty lines
line = line.strip()
find_literal_regex = r'(_LITERAL_\d+)'
# Replace every literal on the line
match_literal_object = re.search(find_literal_regex, line, flags=0)
while match_literal_object:
literal = match_literal_object.group(1)
line = line.replace(literal, literal_dictionary[literal])
match_literal_object = re.search(find_literal_regex, line, flags=0)
results.append(line)
return results
else:
return list() | f30d355cb095b2ceeaa517df296e51b79d20ceb4 | 695,179 |
def upilab5_4_10et11 () :
"""5.4.10. Exercice UpyLaB 5.14 - Parcours vert bleu rouge
(D’après une idée de Jacky Trinh - 11/02/2018)
Nous pouvons définir la distance entre deux mots de même longueur (c’est-à-dire ayant le même nombre de lettres) mot_1
et mot_2 comme le nombre minimum de fois où il faut modifier une lettre de mot_1 pour obtenir mot_2 (distance de
Hamming).
Par exemple, les mots « lire » et « bise » sont à une distance de 2, puisqu’il faut changer le “l” et le “r” du mot
« lire » pour obtenir « bise ».
Écrire une fonction distance_mots(mot_1, mot_2) qui retourne la distance entre deux mots.
Vous pouvez supposer que les deux mots sont de même longueur, et sont écrits sans accents.
Exemple 1 : L’appel suivant de la fonction : distance_mots("lire", "bise") doit retourner : 2
Exemple 2 : L’appel suivant de la fonction :distance_mots("Python", "Python") doit retourner : 0
Exemple 3 : L’appel suivant de la fonction :distadistance_mots("nce_mots("merci", "adieu") doit retourner : 5
"""
def distance_mots(m1, m2):
""" calcul de la 'distance' entre deux mots """
distance = len(m1)
if len(m2) != distance :
distance = None
else :
i = 0
for c1 in m1:
if c1 == m2[i]: distance -= 1
i += 1
return distance
test = [('distance_mots("lire", "bise")', 2),('distance_mots("Python", "Python")',0),
('distance_mots("merci", "adieu")', 5)]
for (t, r) in test :
print (t, " devrait donner ", r, " et donne ", eval(t) )
print("Test réussi ? : ", eval(t) == r )
"""5.4.11. Exercice UpyLaB 5.15 - Parcours rouge
(D’après une idée de Jacky Trinh - 11/02/2018)
Joao vient d’arriver dans notre pays depuis le Portugal. Il a encore du mal avec la langue française. Malgré ses efforts
considérables, il fait une faute d’orthographe quasi à chaque mot. Son souci est qu’il n’arrive pas toujours à écrire un
mot correctement sans se tromper à une lettre près. Ainsi pour écrire « bonjour », il peut écrire « binjour ». Pour
remédier à ce problème, Joao utilise un correcteur orthographique. Malheureusement, Joao a un examen aujourd’hui et il
a oublié son petit correcteur.
Afin de l’aider, nous vous demandons d’écrire une fonction correcteur(mot, liste_mots) où mot est le mot que Joao écrit
et liste_mots est une liste qui contient les mots (ayant la bonne orthographe) que Joao est susceptible d’utiliser.
Cette fonction doit retourner le mot dont l’orthographe a été corrigée.
Exemple 1 : L’appel suivant de la fonction :
correcteur("bonvour", ["chien", "chat", "train", "voiture", "bonjour", "merci"]) doit retourner : "bonjour"
Exemple 2 : L’appel suivant de la fonction :
correcteur("chat", ["chien", "chat", "train", "voiture", "bonjour", "merci"]) doit retourner : "chat"
"""
def correcteur(mot1, listeMots):
""" cherche dans la liste le mot qui ne diffère que d'un caractère du mot en entrée et renvoie ce mot """
n = len(mot1)
motCorrige = n * '*'
if mot1 in listeMots:
motCorrige = mot1
else:
dMax = 1
while motCorrige == n * '*' :
for mot2 in listeMots :
d = distance_mots(mot1, mot2)
if d != None :
if d == dMax:
motCorrige = mot2
dMax += 1
return motCorrige
degradeDeMot = ['intrus', 'intris', 'invris', 'invrie', 'cnvrie']
for mot in degradeDeMot :
print(mot, " correpsond à ",
correcteur(mot, ['angle', 'armoire', 'banc', 'bureau', 'carreau', 'chaise', 'dossier', 'escalier', 'lavabo',
'lecture', 'marche', 'matelas', 'maternelle', 'meuble', 'mousse', 'peluche', 'placard',
'plafond', 'portemanteau', 'poubelle', 'radiateur', 'rampe', 'rideau', 'robinet', 'savon',
'serrure', 'serviette', 'sieste', 'silence', 'sommeil', 'sonnette', 'sortie', 'table',
'tableau', 'tabouret', 'tapis', 'tiroir', 'toilette', 'aller', 'amener', 'apporter',
'appuyer', 'attendre', 'dormir', 'emmener', 'endormir', 'ennuyer', 'entrer', 'fermer',
'frapper', 'installer', 'lever', 'ouvrir', 'presser', 'rentrer', 'reposer', 'rester',
'sonner', 'tricher', 'venir', 'bonjour', 'crayon', 'stylo', 'pointe', 'mine', 'dessin',
'coloriage', 'rayure', 'peinture', 'pinceau', 'couleur', 'craie', 'papier', 'feuille',
'carnet', 'carton', 'ciseaux', 'découpage', 'pliage', 'pli', 'colle', 'affaire', 'caisse',
'trousse', 'cartable', 'jouet', 'jeu', 'pion', 'domino', 'puzzle', 'cube', 'perle', 'chose',
'forme', 'rond', 'tampon', 'livre', 'histoire', 'image', 'album', 'dictionnaire',
'magazine', 'catalogue', 'page', 'enveloppe', 'carte', 'affiche', 'alphabet', 'appareil',
'cassette', 'chanson', 'chiffre', 'contraire', 'doigt', 'film', 'instrument', 'intrus',
'lettre', 'main', 'micro', 'musique', 'nom', 'nombre', 'orchestre', 'ordinateur', 'photo',
'pouce', 'question', 'radio', 'sens', 'tambour', 'trompette', 'voix', 'xylophone'])) | a97fd46ce2d2a51c292826397759157756333e1e | 695,180 |
def personal_top_three(scores):
"""
Return the top three scores from scores.
If there are fewer than three scores, return the scores.
param: list of scores
return: highest three scores from scores.
"""
# Sort the scores in descending order
scores.sort(reverse=True)
if len(scores) < 3:
return scores
# Return the first three
return [scores[0], scores[1], scores[2]] | 846e773d72c8fca14f6dca8cd11c4a96f1d31cb9 | 695,181 |
import torch
def interpolation(neighbor_weights, idx, region_feature, N):
"""
Interpolation by 3 nearest neighbors with weights got from *get_interpolation_weights* function.
:param neighbor_weights: (B,N)
The weights of interpolation points.
:param idx:(B,N,3)
The index of interpolation points.
:param region_feature: (B,C,m,S)
:param N: int
The number of input points.
:return:
region_feature: (B,C,N)
feature after interpolation
"""
batch_size, C, _, _ = region_feature.shape
region_feature = torch.gather(region_feature.view(batch_size, C, -1), 2,
idx.contiguous().view(batch_size, 1, -1).expand(batch_size, C, N * 3))
region_feature = region_feature.view(batch_size, C, N, 3)
region_feature = neighbor_weights.unsqueeze(1) * region_feature
region_feature = region_feature.sum(-1, keepdim=False)
return region_feature | b0810b5166fdc823c5cb082e2d2167d0a04bfc87 | 695,182 |
def convert_diagram_to_relative(diag, max_dim):
"""Convert the persistence diagram using duality. Here,
add one to the dimension.
:param diag: persistence diagram in the Gudhi format.
:type diag: list of tuples (dim, (birth, death)).
"""
relative_betti = {}
for p in diag:
dim = p[0]
if dim <= max_dim-1:
relative_betti.update({dim + 1: relative_betti.get(dim + 1, 1)})
return diag | 2cc9abeb9312255b07c3f89813b1b8cd585f50e9 | 695,183 |
import argparse
def parse_args():
"""Parse_args
Funcion que parsea los argumentos de consola
"""
parser = argparse.ArgumentParser(description='WebCrawler')
parser.add_argument("--url", help="Starting point", type=str, default="https://www.pudseycomputers.co.uk/")
parser.add_argument("--name", help="Name of the crawler Default:payacrawler", type=str, default="payacrawler")
parser.add_argument("--sec", help="Number of seconds", type=int)
parser.add_argument("--mx", help="The max downloads", type=int)
parser.add_argument("--keyn", help="Number of keywords", type=int)
args = parser.parse_args()
return args | 8c259e22576b9040eb6a77b17b521c3f45b3fe9d | 695,184 |
def simple_closure(s, implications):
"""
Input: A set of implications and an attribute set s
Output: The closure of s with respect to implications
Examples
========
>>> from fca.implication import Implication
>>> cd2a = Implication(set(('c', 'd')), set(('a')))
>>> ad2c = Implication(set(('a', 'd')), set(('c')))
>>> ab2cd = Implication(set(('a', 'b')), set(('c', 'd')))
>>> imps = [cd2a, ad2c, ab2cd]
>>> print simple_closure(set('a'), imps)
set(['a'])
>>> print simple_closure(set(), imps)
set([])
>>> simple_closure(set(['b', 'c', 'd']), imps) == set(['a', 'b', 'c', 'd'])
True
>>> a2bc = Implication(set(('a')), set(('b', 'c')))
>>> ce2abd = Implication(set(('c', 'e')), set(('a', 'b', 'd')))
>>> de2abc = Implication(set(('d', 'e')), set(('a', 'b', 'c')))
>>> cd2abe = Implication(set(('c', 'd')), set(('a', 'b', 'e')))
>>> imps = [a2bc, ce2abd, de2abc, cd2abe]
>>> simple_closure(set(['b', 'a']), imps) == set(['a', 'b', 'c'])
True
>>> simple_closure(set(['a', 'e']), imps) == set(['a', 'b', 'c', 'd', 'e'])
True
>>> imps = [ce2abd, a2bc, de2abc, cd2abe]
>>> simple_closure(set(['a', 'e']), imps) == set(['a', 'b', 'c', 'd', 'e'])
True
"""
unused_imps = implications[:]
new_closure = s.copy()
changed = True
while changed:
changed = False
for imp in unused_imps:
if imp.premise <= new_closure:
new_closure |= imp.conclusion
changed = True
unused_imps.remove(imp)
return new_closure | 05ff32be462b5949bb1ff62917c28d32a05cde84 | 695,185 |
def pick_a_trip(array_data, trip, list_dividing_limit):
"""
从数据中选取索引在[ list_dividing_limit[trip] : list_dividing_limit[trip+1] ]
的数据
"""
start = list_dividing_limit[trip]
end = list_dividing_limit[trip + 1]
return array_data[start:end, :] | eee112cc54ba86004c1e9c402199bc21e7bfa375 | 695,186 |
def impala_review_list_box(context, addon, reviews):
"""Details page: Show a box with three add-on reviews."""
c = dict(context.items())
c.update(addon=addon, reviews=reviews)
return c | 4f36f38f004bd56d1aa4620874ae6db033872468 | 695,187 |
def unnormalize_bbox(width, height, xmin, ymin, xmax, ymax):
"""Normalize the bbox positions.
Normalize the bbox postion by the related image size.
:param width: image width
:param height: image height
:param xmin: relative left x coordinate.
:param ymin: relative upper y coordinate.
:param xmax: relative right x coordinate.
:param ymax: relative lower y coordinate.
:return: absolute (xmin, ymin, xmax, ymax)
"""
return xmin * width, ymin * height, xmax * width, ymax * height | 391572fb36ef248e8b4e3b3d97fd556aa2b9d5f5 | 695,188 |
def _draw_mask_on_image(src_image, mask):
"""
Draw a mask on an image.
Parameters
----------
src_image : np.ndarray
Image.
mask : np.ndarray
Mask.
Returns
-------
np.ndarray
Image with mask.
"""
dst_image = src_image.copy()
dst_image_g = dst_image[:, :, 1]
dst_image_g[mask <= 127] = 255
dst_image_b = dst_image[:, :, 0]
dst_image_b[mask > 127] = 255
return dst_image | 01994bc50219c548ab457c63033ef59beffef5e9 | 695,189 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.