content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from typing import List
def cal_group_angle(dist_no: int, group: List[str], **kwargs):
"""helper function for dandelion data class.
Calculates distribution of first circle around center."""
g_ang = dist_no / len(group) # group_ang and distribution number
output_list = []
for i in range(len(group)):
output_list.append(g_ang * i)
if "all" not in kwargs:
del output_list[5]
# del output_list[0]
return output_list | ab7b508ac9ad59711aceb16e5661dd15c8db4818 | 693,502 |
def checker_O2(opcodes_list):
"""
given a list of opcodes checks that the
"""
filtered_l = list(filter(lambda s: any(s.startswith(x)
for x in "34578"), opcodes_list))
format_l = list(map(lambda s: s[0], filtered_l))
correct_indexes = []
for i in range(len(format_l)):
if format_l[i] == "3":
found_else = False
found_endif = False
indent_level = 0
for j in range(i+1, len(format_l)):
if format_l[j] == "3":
indent_level += 1
elif format_l[j] == "4" and indent_level == 0:
# its his else
found_else = True
correct_indexes.append(j)
elif format_l[j] == "5":
if indent_level == 0:
# its his endif
if not found_else:
return False
found_endif = True
correct_indexes.append(j)
break
else:
indent_level -= 1
if not found_else or not found_endif:
return False
elif format_l[i] == "7":
indent_level = 0
found_endwhile = False
for j in range(i+1, len(format_l)):
if format_l[j] == "7":
indent_level += 1
elif format_l[j] == "8":
if indent_level == 0:
found_endwhile = True
correct_indexes.append(j)
break
else:
indent_level -= 1
if not found_endwhile:
return False
else:
if not i in correct_indexes:
return False
return True | 2a5eae2774aa788d47c414da7cdb2bae0f719bd1 | 693,503 |
def hk_aocs_modes(bits: list) -> bytes:
"""
First bit should be on the left, bitfield is read from left to right
"""
enabled_bits = 0
bits.reverse()
for i in range(len(bits)):
enabled_bits |= bits[i] << i
return enabled_bits.to_bytes(1, byteorder='big') | 7a11210d368364cdfc60b14689807ce55358c399 | 693,504 |
def include_session_id(endpoint_context, client_id, where):
"""
:param endpoint_context:
:param client_id:
:param where: front or back
:return:
"""
_pinfo = endpoint_context.provider_info
# Am the OP supposed to support {dir}-channel log out and if so can
# it pass sid in logout token and ID Token
for param in ["{}channel_logout_supported", "{}channel_logout_session_supported"]:
try:
_supported = _pinfo[param.format(where)]
except KeyError:
return False
else:
if not _supported:
return False
# Does the client support back-channel logout ?
try:
endpoint_context.cdb[client_id]["{}channel_logout_uri".format(where)]
except KeyError:
return False
return True | a48640057520a6ca3d2f01463446ff1c90ed5379 | 693,505 |
import re
def remove_escape_sequences(string):
"""
Replaces all contiguous instances of "\r\n\t\v\b\f\a " and replaces
it with a single space. Preserves at most one space of surrounding whitespace
"""
escape_seqs = r'[\r\n\t\v\b\f\a ]+'
return re.sub(escape_seqs,' ', string) | 6f6fb756125569cba1882e936c7c796125447c22 | 693,506 |
def dnac_get_modules(dnac_session, dnac_host, dnac_headers, device_id):
"""DNAC Modules of a Network Device"""
tmp_url = 'https://%s/dna/intent/api/v1/' % dnac_host
tmp_url = tmp_url + 'network-device/module?deviceId=%s' % device_id
r = dnac_session.get(tmp_url,
verify=False,
headers=dnac_headers
)
r.raise_for_status()
# print('DNAC Response Body: ' + r.text)
return r.json()['response'] | 4cf0ba4c7afa75c60016b30fe933609ce7b32a56 | 693,507 |
def clean_comment(comment):
"""
Clean the comment
"""
try:
name = comment.author.name
except:
name = "None"
data = {
"author": name,
"body": comment.body,
"ups": comment.ups,
"fullname": comment.fullname
}
for k, v in data.items():
if v == "":
data[k] = "None"
return data | 54176112cfdd47970fe3891687eea76352404dea | 693,508 |
import argparse
def parse_that_shit():
"""Parse that shit."""
parser = argparse.ArgumentParser(description="Take filename.")
parser.add_argument("filename", help="Filename of report.")
return parser.parse_args() | a3065349043f50f3bd321d9b5f3508db697f1446 | 693,509 |
import re
def extract_patents(line, default_country=None):
"""
Extract a list of patent number strings
Supports only standard patent publication number like WO2012066519
Tolerance with spaces, punctuations, slashes
Keep application numbers like PCT/IB2011055210
If a iso2 country code is provided, then it will try match just a number
:param line: The free text line
:param default_country: The default country
:return:
"""
result = []
line = ("" if default_country is None else " " + default_country) + " " + line + " "
for m in re.findall("(?:[^a-z])((?:pct/?)?[a-z]{2})([0-9 ,/]{6,})", line.lower()):
num = re.sub("[ ,-/]", "", m[1])
country = m[0].upper()
if len(num) < 5:
continue
result.append(country+num)
return result | af21658e2ba42f3f76c49f6aa15a54fcbf76238a | 693,510 |
import json
def convert_organization_response(fetch_rows):
"""レスポンス用JSON変換
organization情報のselect(fetch)した結果をレスポンス用のjsonに変換する
Args:
fetch_rows (dict): organizationテーブル取得結果
Returns:
dict: レスポンス用json
"""
result = []
for fetch_row in fetch_rows:
result_row = {
'organization_id': fetch_row['organization_id'],
'organization_name': fetch_row['organization_name'],
'additional_information': json.loads(fetch_row['additional_information']),
'create_at_str': fetch_row['create_at'].strftime("%Y/%m/%dT%H:%M:%SZ"),
'create_at_int': fetch_row['create_at'].timestamp(),
'update_at_str': fetch_row['update_at'].strftime("%Y/%m/%dT%H:%M:%SZ"),
'update_at_int': fetch_row['update_at'].timestamp(),
}
result.append(result_row)
return result | f932eb46e5f4109a9bedb9327c64af2c88a88284 | 693,511 |
def floyd_warshall(G):
"""find all shortest paths in a dense integer weighted directed graph
An implementation of the Floyd-Warshall algorithm[1][2]. Complexity is
cubic: O(n**3). If the weights in G are small, complexity can be as low as
O(n**2.575) [3].
Arguments:
G: Type List[List[int]]. A dense directed graph in the form of a square
matrix. An element G[i][j] is the cost to go from node i to node j.
Returns:
result[0]: Type List[List[int]]. Matrix of shortest path lengths. Each element
result[0][u][v] is the shortest distance from node u to node v.
result[1]: Type List[List[int]]. Matrix of path successors. Each element result[1][u][v] is
the node w immediately after node u in the shortest path from node u to node v.
Raises:
ValueError: If a negative cycle exists in G.
References:
[3] Zwick, Uri. "All pairs shortest paths using bridging sets and
rectangular matrix multiplication." Journal of the ACM (JACM) 49.3 (2002):
289-317.
"""
# Step 1: initialize graph
n = len(G)
D = [[e for e in row] for row in G] # Minimum distance between nodes
P = [[v for v in range(n)] for u in range(n)] # Successor of a node in its shortest path
# Step 2: update edges repeatedly
for w in range(n):
for u in range(n):
for v in range(n):
diff = D[u][w] + D[w][v]
if D[u][v] > diff:
D[u][v] = diff
P[u][v] = P[u][w]
# Step 3: check for negative-weight cycles
for v in range(n):
if D[v][v] < 0:
raise ValueError("Graph contains a negative-weight cycle")
return (D, P) | ec5a9a9df47d0fb49820cb97658a24fc629c477c | 693,512 |
def unf_bo_below_pb_m3m3(rho_oil_st_kgm3=820, rs_m3m3=100, rho_oil_insitu_kgm3=700, gamma_gas=0.8):
"""
Oil Formation Volume Factor according McCain correlation for pressure below bubble point pressure
:param rho_oil_st_kgm3: density of stock-tank oil, kgm3
:param rs_m3m3: solution gas-oil ratio, m3m3
:param rho_oil_insitu_kgm3: Oil density at reservoir conditions, kgm3
:param gamma_gas: specific gas density(by air)
:return: formation volume factor bo, m3m3
ref1 book Mccain_w_d_spivey_j_p_lenn_c_p_petroleum_reservoir_fluid,third edition, 2011
"""
# коэффициенты преобразованы - смотри описанию в ноутбуке
bo = (rho_oil_st_kgm3 + 1.22044505587208 * rs_m3m3 * gamma_gas) / rho_oil_insitu_kgm3
return bo | 932047445c902e8af233bb276524da70e16cb599 | 693,513 |
def content_is_xml(maintype):
"""Check if HTML content type is an image."""
return maintype in ("text/xml", "application/xml") | 15abca42f156df74e06eb43ba7a1b78361de3eca | 693,514 |
from typing import Callable
def euler_step(f: Callable[[float, float], float], t_k: float, y_k: float, h: float) -> float:
"""
Computes the euler step function for a given function
Parameters:
f (function) - The derivate to approximate the integral of
t_k (float)
y_k (float)
h (float) - Step size
"""
y_k1 = y_k + f(t_k, y_k)*h
return y_k1 | 69e5caea31fb7d2c84d52b408e5350c20564e81b | 693,515 |
def flatten (alst):
"""A recursive flattening algorithm for handling arbitrarily nested iterators
>>> flatten([0, [1,(2, 3), [4, [5, [6, 7]]]], 8])
[1, 2, 3, 4, 5, 6, 7, 8]
"""
def _recur (blst):
for elem in blst:
if hasattr(elem, "__iter__"):
for i in _recur(elem):
yield i
else:
yield elem
return list(_recur(alst)) | 8f89aab3dba6e73ba92273f2d8f2f631a5071918 | 693,516 |
def create_non_rym_dataframe(dataframe):
"""Create CSV file of movies on OMDb and not on RYM.
Args:
dataframe: dataframe of movies with omdb and rym info
Returns:
Dataframe of non RYM movies
"""
return dataframe.loc[not dataframe['in_rym'], ['imdb_id', 'title', 'year']] | 030644bb879def9edcc8508159b958139e89f0a0 | 693,517 |
def bson2bytes(bval: bytes) -> bytes:
"""Decode BSON Binary as bytes."""
return bval | ec58026ef94f594179d51dd2688943624a8601ff | 693,518 |
def rivers_with_station(stations):
"""
Given list of stations, return as set of all the
rivers names contained within these stations
"""
return set([station.river for station in stations]) | 40e4dbcfdc87dcacac39e7d9fdaef17ecf34cb7c | 693,519 |
def parse_darkhorse(input_f, output_fp, low_lpi=0.0, high_lpi=0.6):
""" Parse output of DarkHorse (smry file).
Paramters
---------
input_f: string
file descriptor for Consel output results
output_fp: str
Filepath to output best hit genome IDs
low_lpi: float
lower LPI (lineage probability index) score bound
high_lpi: float
upper LPI score bound
Returns
-------
hgts: string
one putative HGT-derived gene per line
columns: query_id, besthit_id, tax_id, species, lineage, pct_id,
pct_coverage, norm_LPI
Notes
-----
Parse output of DarkHorse to return tab-separated file of putative HGTs
using the LPI bounds and a file with all best hit genome IDs.
"""
best_hit_ids = set()
hgts = []
# skip header
next(input_f)
for line in input_f:
x = line.strip('\r\n').split('\t')
best_hit_ids.add(x[3])
if low_lpi < float(x[5]) < high_lpi:
hgt = '\t'.join((x[0], x[3], x[12], x[13], x[14],
x[6], x[9], x[4]))
hgts.append(hgt)
if output_fp:
with open(output_fp, 'w') as output_f:
output_f.write('\n'.join(best_hit_ids))
return '\n'.join(hgts) | 2d963835a59ce8aac6ab00528aecf24f4bb4eac4 | 693,520 |
import json
def load_json_file(file):
"""
JSON file parser.
Arguments
---------
file: "String containing the desired file to parse from json folder"
Modules
-------
json: "JavaScript syntax data interchange format"
Returns
-------
"Dictionary containing the json file values"
"""
with open('json/{file}'.format(file=file), 'r', encoding='utf-8') as out:
output = json.load(out)
return output | 0507c34c41edb6123bd67ec7b1e9c85c15734eb6 | 693,521 |
def string_chunker(strin, char_num):
"""
Splits a string into an array of chars
:param strin:
:param char_num:
:return:
"""
return [strin[i : i + char_num] for i in range(0, len(strin), char_num)] | 0875e4423c15d6cec479b8155e3569c35caf74bc | 693,522 |
def interceptable_sender(func, key=None):
"""Get the signal sender to intercept a function call.
:param func: The function that should be intercepted
:param key: An optional key in case using just the function
name would be too generic (e.g. most utils)
"""
name = '{}.{}'.format(func.__module__, func.__name__)
return '{}::{}'.format(name, key) if key else name | 6c74ee13b8ef60e712cf215d08ede801a4535fc1 | 693,523 |
def escape(html):
"""Returns the given HTML with ampersands, quotes and carets encoded."""
return html.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''').replace(":",":") | eb49c178c7f80766054371c973da156bf882f82d | 693,524 |
def is_float(s: str) -> bool:
""" Checks if input string can be turned into an float
Checks if input string can be turned into an float
:param s: input string
:type s: str
:return: True if input can be turned into an float
False otherwise
:rtype: bool
"""
try:
out = float(s)
except:
return False
return True | 553bee103337e46b73c710799a921f18950a19b3 | 693,525 |
import logging
def bases_to_complete_next_codon(phase): # pylint: disable=invalid-name
"""
Return the bases at the exon end that completes the next codon.
>>> bases_to_complete_next_codon(0) # e.g. -----XXX
0
>>> bases_to_complete_next_codon(2) # e.g. XX-----X
2
>>> bases_to_complete_next_codon(1) # e.g. X-----XX
1
"""
if phase in {0, 1, 2}:
return phase
logging.warning(
"Only phases 0, 1 and 2 are allowed for internal exons (phase: %s)",
phase)
return None | a59e209b1c764a73eb35cb67807f954259ee4b08 | 693,526 |
import six
def url_join(url, path):
""" url version of os.path.join
"""
p = six.moves.urllib.parse.urlparse(url)
t = None
if p.path and p.path[-1] == '/':
if path and path[0] == '/':
path = path[1:]
t = ''.join([p.path, path])
else:
t = ('' if path and path[0] == '/' else '/').join([p.path, path])
return six.moves.urllib.parse.urlunparse(
p[:2]+
(t,)+ # os.sep is different on windows, don't use it here.
p[3:]
) | bf7b9f3f6cade51fd12e8dc6b26ab5e1aaecd69e | 693,527 |
def db2pow(x):
""" Converts from dB to power ratio
Parameters
----------
x - Input in dB
Returns
-------
m - magnitude ratio
"""
m = 10.0 ** (x / 10.0)
return m | f8f03139ccca73fe9aff97d56d6e41dbcaf3efb9 | 693,528 |
def gcd(a, b):
"""
a = kb + r --> r = a % b
we have:
c = gcd(a, b)
r = a - kb
c|r --> c = gcd(b, r)
gcd(a, b) = gcd(b, a % b )
"""
if b == 0:
return a
else:
return gcd(b, a % b) | 01b8d4b158365b745de041ad23cb92df26b57975 | 693,530 |
def default_feature_func(_, X, t):
"""
Returns a list of feature strings.
(Default feature function)
:param X: An observation vector
:param t: time
:return: A list of feature strings
"""
length = len(X)
features = list()
features.append('U[0]:%s' % X[t][0])
features.append('POS_U[0]:%s' % X[t][1])
if t < length-1:
features.append('U[+1]:%s' % (X[t+1][0]))
features.append('B[0]:%s %s' % (X[t][0], X[t+1][0]))
features.append('POS_U[1]:%s' % X[t+1][1])
features.append('POS_B[0]:%s %s' % (X[t][1], X[t+1][1]))
if t < length-2:
features.append('U[+2]:%s' % (X[t+2][0]))
features.append('POS_U[+2]:%s' % (X[t+2][1]))
features.append('POS_B[+1]:%s %s' % (X[t+1][1], X[t+2][1]))
features.append('POS_T[0]:%s %s %s' % (X[t][1], X[t+1][1], X[t+2][1]))
if t > 0:
features.append('U[-1]:%s' % (X[t-1][0]))
features.append('B[-1]:%s %s' % (X[t-1][0], X[t][0]))
features.append('POS_U[-1]:%s' % (X[t-1][1]))
features.append('POS_B[-1]:%s %s' % (X[t-1][1], X[t][1]))
if t < length-1:
features.append('POS_T[-1]:%s %s %s' % (X[t-1][1], X[t][1], X[t+1][1]))
if t > 1:
features.append('U[-2]:%s' % (X[t-2][0]))
features.append('POS_U[-2]:%s' % (X[t-2][1]))
features.append('POS_B[-2]:%s %s' % (X[t-2][1], X[t-1][1]))
features.append('POS_T[-2]:%s %s %s' % (X[t-2][1], X[t-1][1], X[t][1]))
return features | 885e2b296959ac611d444a4c82a733031ac4004e | 693,531 |
def scale_early_stop_indices_to_iterations(stop_indices, plot_step):
""" Scale 'stop_indices', as calculated by find_early_stop_indices(...)), to match early-stopping iterations. """
return (stop_indices + 1) * plot_step | ae9cb2081ab2518a302c80da6fd0ba41babb6f19 | 693,532 |
def _spreadsheet_list(api):
"""sheets 파일 리스트(google cloud account 내에 있는 모든 spreadsheets)
Args:
api (obj): gspread object
Returns:
[dict]: {<title>: <id>}
"""
return {
spreadsheet.title: spreadsheet.id
for spreadsheet in api.openall()
} | 8625c5cf7ec7460683ba65b21593b387d33fa8af | 693,533 |
from typing import Tuple
def get_text_dimensions(text: str, font) -> Tuple[int, int]:
"""
Compute the approximate text size for the given string and font.
:param text: String of text. Will be split over line breaks.
:param font: The font to use.
:return: Width and height of the text.
:raises TypeError: if the font has no bounding box (`font.getmask(text).getbbox()`).
"""
# https://stackoverflow.com/a/46220683/9263761
ascent, descent = font.getmetrics()
text_width = max(font.getmask(line).getbbox()[2] for line in text.split('\n') if line)
text_height = sum(font.getmask(line).getbbox()[3] + descent for line in text.split('\n') if line)
return text_width, text_height | ed876693946637543c8fa453d8d28625148ef773 | 693,534 |
import configparser
def load_config(config_file_path):
""" Parse a WORC configuration file.
Arguments:
config_file_path: path to the configuration file to be parsed.
Returns:
settings_dict: dictionary containing all parsed settings.
"""
settings = configparser.ConfigParser()
settings.read(config_file_path)
settings_dict = {'ImageFeatures': dict(), 'General': dict(),
'SVMFeatures': dict()}
settings_dict['ImageFeatures']['image_type'] =\
str(settings['ImageFeatures']['image_type'])
settings_dict['General']['FeatureCalculator'] =\
str(settings['General']['FeatureCalculator'])
settings_dict['General']['Preprocessing'] =\
str(settings['General']['Preprocessing'])
settings_dict['General']['PCE'] =\
settings['General'].getboolean('PCE')
settings_dict['General']['Segmentix'] =\
settings['General'].getboolean('Segmentix')
return settings_dict | 6cd20957234bec969b9e54fd35fe3965e6239c09 | 693,535 |
def find_sequence(doc):
"""
Finds the sequence of an sbol component given a document containing full sbol
Requires
--------
None.
Parameters
----------
doc : object of sbol2.document module
a full depth sbol document
Returns
-------
seq: string
series of atgc all lowercase representing the component sequence
Example
-------
file_in = "https://synbiohub.programmingbiology.org/public/UWYeast_AND_gate/plasmid_0/1/sbol"
doc = sbol2.Document()
doc.read(file_in)
seq = find_sequence(doc)
"""
max_seq = 0
seq = ""
for top_level in doc:
if top_level.type == "http://sbols.org/v2#Sequence":
if len(top_level.elements) > max_seq:
max_seq = len(top_level.elements)
seq = top_level.elements.lower()
return(seq) | 23be7b7841e18eac911f9ad596b4dabcb0643355 | 693,536 |
import os
import re
import html
import unicodedata
def sanitize_filename(name):
"""
Replace reserved characters/names with underscores (windows)
Args:
name(str)
Returns:
str
"""
if isinstance(name, int):
return str(name)
if os.sep == '/':
bad_chars = re.compile(r'^\.|\.$|^ | $|^$|\?|:|<|>|\||\*|\"|/')
else:
bad_chars = re.compile(r'^\.|\.$|^ | $|^$|\?|:|<|>|/|\||\*|\"|\\')
bad_names = re.compile(r'(aux|com[1-9]|con|lpt[1-9]|prn)(\.|$)')
# Unescape '&', '<', and '>'
name = html.unescape(name)
# Replace bad characters with an underscore
name = bad_chars.sub('_', name)
if bad_names.match(name):
name = '_' + name
# Replace newlines with spaces
name = name.replace("\r", '')
name = name.replace("\n", ' ')
# Yavos (?)
while name.find('.\\') != -1:
name = name.replace('.\\', '\\')
name = name.replace('\\', os.sep)
# Replace tab characters with spaces
name = name.replace('\t', ' ')
# Cut to 125 characters
if len(name) > 125:
name = name[:125]
# Remove unicode control characters
name = ''.join(char for char in name if unicodedata.category(char)[0] != "C")
return name.strip() | 2a1db419c8ef0bebc5414038d528e652927db032 | 693,537 |
def strHypInd(i, j):
""" Returns string identifier for a hyperplane of ith and jth observation,
regardless of ordering.
"""
if i > j:
i, j = j, i
return str(i) + '-' + str(j) | f66c462d8ba0ca62878cfa92db7ce7d06bf47024 | 693,538 |
import random
def randomMAC(type="xen"):
"""Generate a random MAC address.
00-16-3E allocated to xensource
52-54-00 used by qemu/kvm
The OUI list is available at http://standards.ieee.org/regauth/oui/oui.txt.
The remaining 3 fields are random, with the first bit of the first
random field set 0.
>>> randomMAC().startswith("00:16:3E")
True
>>> randomMAC("foobar").startswith("00:16:3E")
True
>>> randomMAC("xen").startswith("00:16:3E")
True
>>> randomMAC("qemu").startswith("52:54:00")
True
@return: MAC address string
"""
ouis = { 'xen': [ 0x00, 0x16, 0x3E ], 'qemu': [ 0x52, 0x54, 0x00 ] }
try:
oui = ouis[type]
except KeyError:
oui = ouis['xen']
mac = oui + [
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(["%02x" % x for x in mac]) | ec5af0a24439f888376bf770e243193b10c9a5a3 | 693,539 |
def flat_dict_from_dict(dict_):
"""
Create a dictionary without depth.
{
'depth0': {
'depth1': {
'depth2': 'test1',
'depth2': 'test2'
}
}
}
=>
depth0.depth1.depth2:test1;depth0.depth1.depth2:test2
"""
flat_dict = dict()
for key, value in dict_.items():
if not isinstance(value, dict):
flat_dict[key] = value
continue
flat_dict_ = flat_dict_from_dict(value)
for key_, value_ in flat_dict_.items():
flat_dict[key + '.' + key_] = value_
return flat_dict | a2544b596fb6c4824f2ccb10d1c278f184eb7583 | 693,540 |
def empty(list):
""" Checks whether the `list` is empty. Returns `true` or `false` accordingly."""
return len(list) == 0 | c1e94267226053e555627728dd4fc12cb337ff8d | 693,541 |
def add4(a, h):
"""
This function helps us to add datatype.
a: Give same datatype values for a and b.
b: Give same datatype values for a and b.
"""
print("Value of a:", a)
print("Value of h:", h)
return a+h | fb9e01db3855cea2e90051f501d7960b283922ee | 693,542 |
def moments_get_skew(m):
"""Returns the skew from moments."""
return m['mu11']/m['mu02'] | 03ece117646fa56719dee8abffc01226706100a9 | 693,543 |
def get_sub_dict(data_dict, key_list, default_value='default_value'):
"""
从字典中提取子集
:param:
* data_dict: (dict) 需要提取子集的字典
* key_list: (list) 需要获取子集的键列表
* default_value: (string) 当键不存在时的默认值,默认为 default_value
:return:
* sub_dict: (dict) 子集字典
举例如下::
print('--- get_sub_dict demo---')
dict1 = {'a': 1, 'b': 2, 'list1': [1,2,3]}
list1 = ['a', 'list1', 'no_key']
print(get_sub_dict(dict1, list1))
print(get_sub_dict(dict1, list1, default_value='new default'))
print('---')
执行结果::
--- get_sub_dict demo---
{'a': 1, 'list1': [1, 2, 3], 'no_key': 'default_value'}
{'a': 1, 'list1': [1, 2, 3], 'no_key': 'new default'}
---
"""
if not isinstance(data_dict, dict):
raise TypeError('data_dict should be dict, but we got {}'.format(type(data_dict)))
if not isinstance(key_list, list):
raise TypeError('key_list should be list, but we got {}'.format(type(key_list)))
sub_dict = dict()
for item in key_list:
sub_dict.update({item: data_dict.get(item, default_value)})
return sub_dict | b9fead5e233642d9dfe714281d5358a98622fb2c | 693,544 |
def date_cmp(item1, item2):
"""
Compare items for ordering purposes
"""
if item1.end_date is None and item2.end_date is None:
if item1.start_date < item2.start_date:
return 1
if item2.start_date < item1.start_date:
return -1
return 0
if item1.end_date is None:
return -1
if item2.end_date is None:
return 1
if item1.end_date < item2.end_date:
return 1
if item2.end_date < item1.end_date:
return -1
if item1.start_date < item2.start_date:
return 1
if item2.start_date < item1.start_date:
return -1
return 0 | 0c658595cda81bf35bcd591b37419f01a73a3d1b | 693,546 |
def sanitize_str(string):
""" Sanitize string to uppercase alpha only """
return filter(str.isalpha, string.upper()) | e693d61f19ab56103395a798c930a1b9937eafd5 | 693,547 |
def pad_diff(actual_height, actual_width, desired_height, desired_width):
""" Pads img_arr width or height < samples_size with zeros """
h_diff = desired_height - actual_height
w_diff = desired_width - actual_width
padding = (0, 0, w_diff, h_diff) # left, top, right, bottom
return padding | 5796ceaa57b30a83f83bf7b1a8696aa6889a85f6 | 693,548 |
def parse_h5_attr(f, attr):
"""A Python3-safe function for getting hdf5 attributes.
If an attribute is supposed to be a string, this will return it as such.
"""
val = f.attrs.get(attr, None)
if isinstance(val, bytes):
return val.decode("utf8")
else:
return val | 75774def6f8ac3abf8c78aa97557fcb91cb2628e | 693,549 |
import copy
import random
def default_policy(state):
"""Return results of random search"""
init_state = copy.deepcopy(state)
while not state.terminal_test():
action = random.choice(state.actions())
state = state.result(action)
return -1 if state._has_liberties(init_state.player()) else 1 | 98f9bc4faa23b874d98f55035f783478e0731265 | 693,550 |
def invert_dict(d):
"""Invert a dictionary's key/value"""
#return dict(map(lambda t: list(reversed(t)), d.items()))
return dict([(v, k) for k, v in d.items()]) | b1327055cc613c0701355409e1d782f334bbe57b | 693,551 |
def process_license(license):
"""If license is Creative Common return the zenodo style string
else return license as in plan
Parameters
----------
license : dict
A string defining the license
Returns
-------
zlicense : dict
A modified version of the license dictionary following the api requirements
"""
ind = license.find('Attribution')
ind = license.find('creativecommons.org/licenses/')
if ind == -1:
zlicense = { "description": {"en": license},
"title": {"en": "Custom license"} }
else:
zlicense = {"id": "-".join(["cc", license[ind:].split("/")[2],"4.0"])}
return zlicense | b6d07168a1e11641ec78ea52becb4d08c957dd69 | 693,552 |
def strhash(*args, **kwargs):
"""
Generate a string hash value for an arbitrary set of args and kwargs. This
relies on the repr of each element.
:param args: arbitrary tuple of args.
:param kwargs: arbitrary dictionary of kwargs.
:returns: hashed string of the arguments.
"""
if kwargs:
return '%r,%r' % (args, sorted(kwargs.items()))
return '%r' % (args, ) | 8b56ab2c205e8d5788d6c3d1788c8353400c8f79 | 693,553 |
def over(*, aalpha, oomega):
"""Define the dyadic over ⍥ operator.
Monadic case:
f⍥g ⍵
f g ⍵
Dyadic case:
⍺ f⍥g ⍵
(g ⍺) f (g ⍵)
"""
def derived(*, alpha=None, omega):
if alpha is None:
return aalpha(alpha=alpha, omega=oomega(omega=omega))
else:
return aalpha(alpha=oomega(omega=alpha), omega=oomega(omega=omega))
return derived | 68079977befc9ec576aaf8706579d05864f21d24 | 693,554 |
def has_duplicates2(t):
"""Checks whether any element appears more than once in a sequence.
Faster version using a set.
t: sequence
"""
return len(set(t)) < len(t) | 7c815aa1467648b3c5aa405b6359ba10d911ac77 | 693,555 |
def notebook_name(params, filetype):
"""
Returns URL encoded notebook name without .ipynb extension.
"""
args = [param.replace('/', '') for param in params if param is not None]
print(args)
return '"{}.{}"'.format(args[-1][:-6], filetype) | 5b06fb731ccefe70aef7d007160e0cc23039b416 | 693,556 |
def box_outside_box(v1, v2):
"""return true if v1 is outside v2"""
return (v1['p1'] < v2['p1']).all() and (v1['p2'] > v2['p2']).all() | dd25f563a1064a6b09d30f0f58932e1dbb7912ae | 693,557 |
def test_default_timeout(monkeypatch, fake_response, aqhttp):
"""Timeout should default to aqhttp.TIMEOUT is timeout is absent from
aqhttp.post."""
# A fake requests sessions object
class mock_request:
@staticmethod
def request(method, path, timeout=None, **kwargs):
assert timeout == aqhttp.TIMEOUT
return fake_response(method, path, {}, 200)
monkeypatch.setattr("pydent.aqhttp.requests", mock_request)
aqhttp.post("someurl", json_data={}) | 44188a45e5a2780669377297b12b252b71663576 | 693,559 |
def extractOutput(queryResults):
"""convenience function to extract results from active query object"""
if not queryResults:
return None
try:
return [x.toDict() if x else None for x in queryResults]
except TypeError:
return queryResults.toDict()
return None | 3191987adfa4932c6456a3e6f449d081ba80c316 | 693,560 |
def _get_topic_name(klass):
"""Try to extract the Name of an Admin Topic from the path.
Still somewhat like Django App-Names.
>>> get_app_name('frontend.news.models.NewsItem')
'news'
>>> get_app_name('common.models.Sparepart')
'Sparepart'
"""
if not hasattr(klass, '__module__'):
return u''
components = klass.__module__.split('.')
if len(components) > 3:
return components[-3]
elif len(components) > 2:
return components[-2]
else:
return components[-1] | 7c9ae9a77c980365724e69497d2bb77dfd734419 | 693,561 |
def haversine(lat, lon):
"""
Formula haversine para buscar as lojas ordenando pela distancia.
Para limitar os resultados a partir de uma distancia, descomentar a linha do where.
"""
return f"""
SELECT id
FROM ( SELECT
id,
111.045 * DEGREES(ACOS(COS(RADIANS({lat}))
* COS(RADIANS(latitude))
* COS(RADIANS(longitude) - RADIANS({lon})) + SIN(RADIANS({lat}))
* SIN(RADIANS(latitude)))) AS distance_in_km
FROM proponentes_loja) as distancias
-- WHERE distancias.distance_in_km <= 10
""" | b9fc22244be3a537632490656d86c2547a293101 | 693,562 |
from pathlib import Path
from typing import Optional
from typing import Union
from typing import List
from typing import Iterator
import itertools
def directories(
PATH: Path,
INCLUDES: Optional[Union[str, List[str]]] = None,
EXCLUDES: Optional[Union[str, List[str]]] = None,
) -> Iterator[Path]:
"""Iterate over the directories of a given path."""
# Parse inputs.
if INCLUDES is None:
INCLUDES = [
"*",
]
elif type(INCLUDES) is str:
INCLUDES = [
INCLUDES,
]
if EXCLUDES is None:
EXCLUDES = []
elif type(EXCLUDES) is str:
EXCLUDES = [
EXCLUDES,
]
# Filter directory.
FILTER_INCLUDING = itertools.chain.from_iterable(PATH.glob(F) for F in INCLUDES)
FILTER_EXCLUDING = itertools.chain.from_iterable(PATH.glob(F) for F in EXCLUDES)
return iter(set(FILTER_INCLUDING) - set(FILTER_EXCLUDING)) | b0c3db317f298b75cd70844ca9538612e0e0a0c2 | 693,563 |
import argparse
def get_args(dataset="replica"):
"""
@Brief: parse input arguments.
@Usage: args = get_args(dataset)
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--config-path",
type=str,
default="ss_baselines/av_nav/config/audionav/{}/train_telephone/pointgoal_rgb.yaml".format(
dataset
),
)
parser.add_argument(
"--data_saving_root",
type=str,
default=f"data/scene_colmaps/{dataset}",
help="Root to the place that we want to write the data to",
)
parser.add_argument(
"opts",
default=None,
nargs=argparse.REMAINDER,
help="Modify config options from command line",
)
parser.add_argument(
"--fps", default=30, type=int, help="Simulation FPS",
)
parser.add_argument(
"--audio_sample_rate", default=44100, type=int, help="Audio sample rate",
)
parser.add_argument(
"--scene",
type=str,
default="room_0",
help="Name of a specific scene that you want to extract RGB-S. If not given, all scenes given in data/metadata will be used!",
)
parser.add_argument(
"--sound_name",
type=str,
default="person_10",
help="The name of the sound source file (without the extension)",
)
parser.add_argument(
"--num_obs_to_generate",
default=1e10,
type=int,
help="Number of observations to generate",
)
parser.add_argument(
"--start_node",
type=int,
default=50,
help="Index of the starting position of the agent. This is not important because we read the locations given in the graph and obtain the observation at that location",
)
parser.add_argument(
"--monoaudio",
action="store_true",
help="If setting, save only audio for the left-ear microphone",
)
parser.add_argument(
"--modalities",
type=str,
default="RGBDS",
help="Modalities in the dataset to save. By default, save both RGBD and sound",
)
parser.add_argument(
"--goal_node",
type=int,
default=0,
help="Index of the goal's position where the agent plans to reach. This is also the sound source location",
)
parser.add_argument(
"--agent_path",
type=str,
default="",
help="Path to a json file that specifies the agent path e.g: node (index) and angle (degrees)",
)
parser.add_argument(
"--visualize_mesh", action="store_true", help="Visualize the 3D env pcl or not"
)
parser.add_argument(
"--visualize_obs", action="store_true", help="Visualize the observations or not"
)
parser.add_argument(
"--save_video", action="store_true", help="Save visualization video or not"
)
parser.add_argument(
"--test_cv_K",
action="store_true",
help="Test intrinsics parameters given in OpenCV format",
)
return parser.parse_args() | cb4aad5f5df0450a18281940b368ac3d4b0e0ffc | 693,564 |
import glob
def get_file_names(directory):
"""
Get the names of the completed csv files used to store the plot details
"""
return glob.glob(directory + '/*.csv') | 49afbb85c7cf66c9f7df95a6dfa35b4005c83195 | 693,565 |
def decoded_api_json_data_complex():
"""Data to be displayed in a VMC view"""
return [
{
"id": 1,
"string": "Peter",
"number": 35,
"boolean": True,
"empty": None,
"object": {"a": "abcde", "b": 60},
"array": [1, 2, 3],
},
{
"id": 2,
"string": "Susan",
"number": 42,
"boolean": False,
"empty": None,
"object": {"a": "fghij", "b": 33},
"array": [11, 12, 13],
},
{
"id": 3,
"string": "Graham",
"number": 22,
"boolean": True,
"empty": None,
"object": {"a": "klmno", "b": 90},
"array": [111, 112, 113],
},
{
"id": 4,
"string": "Lindsay",
"number": 45,
"boolean": False,
"empty": None,
"object": {"a": "pqrst", "b": 80},
"array": [99, 102, 127],
},
{
"id": 5,
"string": "Indy",
"number": 72,
"boolean": True,
"empty": None,
"object": {"a": "uvwyz", "b": 80},
"array": [59, 37, 111],
},
{
"id": 6,
"string": "Piotr",
"number": 16,
"boolean": True,
"empty": None,
"object": {"a": "rewrt", "b": 70},
"array": [59, 37, 112],
},
{
"id": 7,
"string": "Neil",
"number": 37,
"boolean": False,
"empty": None,
"object": {"a": "iohfk", "b": 30},
"array": [59, 37, 111],
},
{
"id": 8,
"string": "Mac",
"number": 92,
"boolean": True,
"empty": None,
"object": {"a": "oxceg", "b": 20},
"array": [59, 37, 114],
},
{
"id": 9,
"string": "June",
"number": 88,
"boolean": False,
"empty": None,
"object": {"a": "wbuln", "b": 90},
"array": [59, 37, 111],
},
] | 6974aa1c3ecb58e6fecb90bcbe7dada805883461 | 693,566 |
def get_communities_filenames(database):
"""
This function retrieves all filenames (and the file count)
for every community of similar documents.
"""
query = ('MATCH (d:Document) RETURN d.community, '
'collect(d.filename) AS files, '
'count(d.filename) AS file_count '
'ORDER BY file_count DESC')
results = database.execute(query, 'r')
return results | 48cf5f14023ee34c805be88ead9873a0f6409d0d | 693,567 |
def getListObjectFromIndex(listIndex, listObject):
"""Lấy ra một list các object dựa theo list index được truyền vào."""
output = []
for i in listIndex:
output.append(listObject[i])
return output | 958df0c322a781758c1093bf1265fddf2153d9fb | 693,568 |
def _doublet(plist, J):
"""
Applies a *J* coupling to each signal in a list of (frequency, intensity)
signals, creating two half-intensity signals at +/- *J*/2.
Parameters
---------
plist : [(float, float)...]
a list of (frequency{Hz}, intensity) tuples.
J : float
The coupling constant in Hz.
Returns
-------
[(float, float)...]
a list of (frequency, intensity) tuples.
"""
res = []
for v, i in plist:
res.append((v - J / 2, i / 2))
res.append((v + J / 2, i / 2))
return res | 5b88ec2889bcbfecf6001e1c84fabe835ed4f678 | 693,569 |
def gradeReport(course):
"""Assumes: course is of type Grades"""
report = []
for student in course.allStudents():
total = 0.0
numOfGrades = 0
for grade in course.getGrades(student):
total += grade
numOfGrades += 1
try:
average = total / numOfGrades
report.append("{}'s mean grade is {}".format(student, average))
except ZeroDivisionError:
report.append("{} has no grades".format(student))
return '\n'.join(report) | 7ff2d3eb4de9166ed1443b0d265b2248f5bb2f5c | 693,570 |
def get_object_root_module(obj):
"""
Get the obj module version
:param T obj: Any object
:rtype: str
:return The root module of the object's type
"""
return type(obj).__module__.split('.')[0] | 2cfa6e05d1215227873dbd122112ef34b0795550 | 693,571 |
import codecs
def get_long_description():
"""
Strip the content index from the long description.
"""
with codecs.open('README.rst', encoding='UTF-8') as f:
readme = [line for line in f if not line.startswith('.. contents::')]
return ''.join(readme) | 985e4d6a4b6b64f7d0b9acbba9b9569f2e96adf5 | 693,572 |
def frac_cont(frm_count_dict):
""" This function specifically takes as input the dictionary output from aa_frmcount function and
outputs a dictionary, nlkts, where each key is an amino acid type and whose corresponding value is as follows:
- nlkts: dictionary output
# co_grpaa[i] (Total no. of contacts for each Amino acid type )
# tp_cnt[i] (total no. of amino acids for each type of amino acid type)
# norm_list[i] (total no. of contacts normalized by the protein surface fraction)
# cont_l[i]] (Normalized fraction of contacts)
"""
a_a = ["GLY","ALA","VAL","LEU","ILE","MET","PHE","TRP","PRO","SER","THR","CYS","TYR","ASN","GLN","ASP"
,"GLU","LYS","ARG","HIS"]
# Grouping of residues in Smith et al
aromatic_res = ['PHE', 'TRP', 'TYR', 'HIS']
hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'GLY', 'PRO','PHE', 'TRP','MET']
polar_res = ['ASN', 'CYS', 'GLN', 'SER', 'THR','TYR','GLH','ASH']
neg_res = ['ASP', 'GLU']
pos_res = ['ARG', 'HIS', 'LYS']
frac_res = [neg_res, pos_res, polar_res, hydrophobic_res, aromatic_res]
sf_lbl = ["Negative", "Positive", "Polar", "Hydrophobic", "Aromatic"]
# For each amino acid type in frac_res, this code chunk saves the frame count in a list and sums them together to
# to get a total frame count within the trajectory for each AA group in frac_res
co_grpaa = []
for row in frac_res:
fr_list = []
for j in range(len(row)):
for key, value in frm_count_dict.items():
if row[j] in key:
fr_list.append(value[0])
co_grpaa.append(sum(fr_list))
# This chunk of code gets an AA count from the above list, in order
# to get a total number of residues that contact BSA
cpl_l = []
for i in range(len(a_a)):
count = 0
for key, value in frm_count_dict.items():
if a_a[i] in key:
count += 1
cpl_l.append(a_a[i]+" "+str(count))
# For each AA type in frac_res, this code chunk saves the count for each AA within 4 Angstroms of a PLGA trimer
# in a list based on the order in frac_res, then sums the counts to get a total number of AA for each AA type
tp_cnt = []
for row in frac_res:
nw_l = []
for i in range(len(row)):
for j in range(len(cpl_l)):
if row[i] in cpl_l[j]:
nw_l.append(int(cpl_l[j][4:6]))
tp_cnt.append(sum(nw_l))
# Get the total count of AA that are within 4 A of PLGA oligomer
bsum = len(frm_count_dict.keys())
# The code chunk normalized the frame count of each AA group type by the protein surface fraction
# of each amino acid type contacted by a polymer surrogate.
norm_list = []
for i in range(len(co_grpaa)):
lk = tp_cnt[i]/bsum
if lk == 0:
norm_list.append(co_grpaa[i])
elif lk != 0:
norm_list.append(co_grpaa[i]/(tp_cnt[i]/bsum))
# This conde chunk calculates the fractional contact based on the normalized frame count
cont_l = []
nsum = sum(norm_list)
for i in range(len(norm_list)):
cont_l.append(norm_list[i]/nsum)
#Save values in a dictionary
# Legend: co_grpaa[i] (Total no. of contacts)
# tp_cnt[i] (total no. of amino acids for each type of contact)
# norm_list[i] (total no. of contacts normalized by the protein surface fraction)
# cont_l[i]] (Normalized fraction of contacts)
nlkts = {}
for i in range(len(sf_lbl)):
nlkts[sf_lbl[i]] = [co_grpaa[i], tp_cnt[i], norm_list[i], cont_l[i]]
return nlkts | 9dd7bdc939047a34a8db81c5866bf6aa19fefca0 | 693,573 |
def _string_to_bool(value, triple=True):
"""Translates a string into bool value or None.
Args:
value: The string value to evaluate. (string)
triple: If True, None is returned if not found, if False, False
Returns:
The a bool value of tag or if triple is True None.
"""
if value is None or value == "":
if triple is True:
return None
else:
return False
if type(value) is bool:
return value
if type(value) is int:
if value != 0:
return True
else:
return False
if type(value) is str:
if value.lower() in ['false', '0', 'f', '-', 'n', 'no']:
return False
else:
return True
return | b46ee6d517e353fae8d28caba6c20e80bd894a5e | 693,574 |
def reverse_string(s):
"""
Reverse a string.
"""
s[1]
return 'y' % 5 and 'wtf' | df9fdf6cd02b0cd8f2f2dea05194e6cf72542650 | 693,575 |
def int2bin(i: int) -> str:
"""Convert an 8-bit integer to a binary string.
Args:
i (int): Integer value to be converted.
Note:
The passed integer value must be <= 255.
Raises:
ValueError: If the passed integer is > 255.
Returns:
str: A binary string representation of the passed integer.
"""
if i > 255: # Limited to 1 byte.
raise ValueError(f'Passed value exceeds 1 byte: {i=}')
return ''.join(str((i >> shift) & 1) for shift in range(7, -1, -1)) | e95fe826a432ec4e680864fbc36fcbf188912289 | 693,576 |
def parse_search_value(search_value):
"""The <search-value> can be of the form::
d<device_id>r<region>z<zone>-<ip>:<port>[R<r_ip>:<r_port>]/
<device_name>_<meta>
Where <r_ip> and <r_port> are replication ip and port.
Any part is optional, but you must include at least one part.
Examples::
d74 Matches the device id 74
r4 Matches devices in region 4
z1 Matches devices in zone 1
z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4
1.2.3.4 Matches devices in any zone with the ip 1.2.3.4
z1:5678 Matches devices in zone 1 using port 5678
:5678 Matches devices that use port 5678
R5.6.7.8 Matches devices that use replication ip 5.6.7.8
R:5678 Matches devices that use replication port 5678
1.2.3.4R5.6.7.8 Matches devices that use ip 1.2.3.4 and replication ip
5.6.7.8
/sdb1 Matches devices with the device name sdb1
_shiny Matches devices with shiny in the meta data
_"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data
[::1] Matches devices in any zone with the ip ::1
z1-[::1]:5678 Matches devices in zone 1 with ip ::1 and port 5678
Most specific example::
d74r4z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8"
Nerd explanation:
All items require their single character prefix except the ip, in which
case the - is optional unless the device id or zone is also included.
"""
orig_search_value = search_value
match = {}
if search_value.startswith('d'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['id'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('r'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['region'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('z'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['zone'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('-'):
search_value = search_value[1:]
if len(search_value) and search_value[0].isdigit():
i = 1
while i < len(search_value) and search_value[i] in '0123456789.':
i += 1
match['ip'] = search_value[:i]
search_value = search_value[i:]
elif len(search_value) and search_value[0] == '[':
i = 1
while i < len(search_value) and search_value[i] != ']':
i += 1
i += 1
match['ip'] = search_value[:i].lstrip('[').rstrip(']')
search_value = search_value[i:]
if search_value.startswith(':'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['port'] = int(search_value[1:i])
search_value = search_value[i:]
# replication parameters
if search_value.startswith('R'):
search_value = search_value[1:]
if len(search_value) and search_value[0].isdigit():
i = 1
while (i < len(search_value) and
search_value[i] in '0123456789.'):
i += 1
match['replication_ip'] = search_value[:i]
search_value = search_value[i:]
elif len(search_value) and search_value[0] == '[':
i = 1
while i < len(search_value) and search_value[i] != ']':
i += 1
i += 1
match['replication_ip'] = search_value[:i].lstrip('[').rstrip(']')
search_value = search_value[i:]
if search_value.startswith(':'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['replication_port'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('/'):
i = 1
while i < len(search_value) and search_value[i] != '_':
i += 1
match['device'] = search_value[1:i]
search_value = search_value[i:]
if search_value.startswith('_'):
match['meta'] = search_value[1:]
search_value = ''
if search_value:
raise ValueError('Invalid <search-value>: %s' %
repr(orig_search_value))
return match | 20af4b46e435de450c06f79e387a0412ef43566c | 693,577 |
def join_regex(regexes):
"""Combine a list of regexes into one that matches any of them."""
if len(regexes) > 1:
return "(" + ")|(".join(regexes) + ")"
elif regexes:
return regexes[0]
else:
return "" | 616a4b51a64244e14b6029a91026f65a20077f06 | 693,578 |
def num_misses(df):
"""Total number of misses."""
return df.Type.isin(['MISS']).sum() | 72bb2277b5de1aa686cdaa1772a2d85f06d41a9e | 693,579 |
import os
def get_file_extension(file_path):
"""
>>> get_file_extension("/a/b/c")
''
>>> get_file_extension("/a/b.txt")
'txt'
>>> get_file_extension("/a/b/c.tar.xz")
'xz'
"""
_ext = os.path.splitext(file_path)[-1]
if _ext:
return _ext[1:] if _ext.startswith('.') else _ext
return "" | 2db6837cbc55d89ba18ff085c9ce0b7b96d3dff3 | 693,580 |
import os
def locate(filename, search_path=os.environ['PATH'], path_sep=os.pathsep):
"""
Helper function to locate a file in the os PATH.
"""
for path in search_path.split(path_sep):
cur = os.path.join(path, filename)
if os.path.exists(cur):
return os.path.abspath(cur)
return None | deaa5dbf495f4175a90ecda7b3a2f1066d49041e | 693,581 |
def explain_node_str(root, indent=0):
"""Returns an indendeted outline-style representation of the subtree.
"""
indent_string = " " * indent
buf = f"{indent_string}Node<item={root[0]}>"
if not root[1] and not root[2]:
buf += "\n"
else:
buf += ":\n"
if root[1]:
buf += f"{indent_string} -Left:\n"
buf += explain_node_str(root[1], indent + 1)
if root[2]:
buf += f"{indent_string} -Right:\n"
buf += explain_node_str(root[2], indent + 1)
return buf | 6bc6136fc12171841a0988b79107ab4032b108a3 | 693,582 |
def check_keys_in_dict(dictionary, keys):
"""
Checks that a list of keys exist in a dictionary.
Parameters
----------
dictionary: dict
The input dictionary.
keys: list of strings
The keys that the dictionary must contain.
Returns
-------
bool:
Returns *True* is all required keys exist in the dictionary.
Otherwise a KeyError is raised.
"""
if not all(key in dictionary for key in keys):
raise KeyError("Dictionary missing key values."
"Requires: {}".format(keys))
return True | a28a8a97e0a51196080c92400f01cb08b9ac6cc7 | 693,583 |
import os
def inject_python(code_info):
"""Injects code snippet in python code"""
cwd = os.getcwd()
append = u'# Injected by egtest\n'
append += u'import sys\n'
append += u'sys.path.insert(0, "%s")\n\n' % cwd
append += code_info.code
return append | c3ba5dac4dd74b3513b40cd93c15d6cf79cc97c6 | 693,584 |
def percent_difference(test_stat, ctrl_stat):
"""Calculates ratio between test and control. Useful when your statistics
might be close to zero. Provides a symmetric result.
Args:
test_stat: numpy array of test statistics
ctrl_stat: numpy array of control statistics
Returns:
(test_stat - ctrl_stat) / ((test_stat + ctrl_stat) / 2.0) * 100.0
"""
return (test_stat - ctrl_stat) / ((test_stat + ctrl_stat) / 2.0) * 100.0 | 83cc3c1ac7a13cd16d645d927e7d9b50154f3e72 | 693,585 |
def parse_dhm_request(msg: str) -> int:
"""Parse client's DHM key exchange request
:param msg: client's DHMKE initial message
:return: number in the client's message
"""
return int(msg.split(':')[1]) | 8664b1998a5d19718efb0efdddc1a94ede76a281 | 693,587 |
def deparen(s):
"""
Remove all interior parantheses from sequence.
"""
return s.replace("(", "").replace(")", "") | 837eb4903da8437d6b2c14ba3cee7040431c6a43 | 693,589 |
import argparse
def parseArgs():
"""Calculate AS Numbers from AS-DOT or AS-PLAIN"""
# Configure the option parser for CLI options to the script
usage = "usage: %prog [options] BGP AS Number"
parser = argparse.ArgumentParser(description="Calculate BGP AS Numbers")
parser.add_argument("ASN", type=str, help='Enter BGP AS Number ASdot or ASplain')
args = parser.parse_args()
return args | 1f50f1aa9ab8e08f4b4f9eb065b2ea18d014a918 | 693,591 |
def _clean_value(line1, line2):
"""Clean up attribute value for display."""
_line1 = line1.strip()
_line2 = line2.strip()
# Let trailing space to make the code easier to read
if _line1[-1:] in ["{", "}", "(", ")", "[", "]", ";", ","]:
_line1 += " "
return _line1 + _line2 | 8b0e4576a2aca6289a7420783def68c4f224fa97 | 693,592 |
def readfile(filename, mode='r'):
"""
returns the content of a file
:param filename: the filename
:return:
"""
if mode not in ['r', 'rb']:
raise ValueError(f"incorrect mode : expected 'r' or 'rb' given {mode}")
with open(filename, mode)as file:
content = file.read()
file.close()
return content | 91d5984c1587c080b79a21071a73937feb382468 | 693,593 |
def jp_author_name(forename, surname):
"""Construct the name of a person as a single string."""
if len(forename) == 1:
forename = forename+'.'
return forename+" "+surname | 63cb37b5ba3b7dafe9653fcce8d0fd99fed3906b | 693,595 |
def deduplicate_ref_texts(ref_texts):
"""Deduplicate reference texts by removing those that have the same content."""
new_ref_texts=[]
for rt in ref_texts:
to_keep=True
for other_rt in ref_texts:
if rt.language==other_rt.language and rt.name<other_rt.name:
if rt.content==other_rt.content:
to_keep=False
break
if to_keep:
new_ref_texts.append(rt)
return new_ref_texts | 96f351129a5c40b8dedea2722b68fff48ca960c7 | 693,596 |
def home():
""""View for the Home page of the Website"""
return "Welcome to the HomePage!" | 6a1557ae06d2caafd8b1b0698aa8ab249537c0e3 | 693,597 |
def remain_alpha(symbol: str) -> str:
"""
返回合约的字母字符串大写
"""
symbol_mark = "".join(list(filter(str.isalpha, symbol)))
return symbol_mark.upper() | 04b5a71044a48f05291105b308a6f7b91263134b | 693,598 |
from typing import Dict
from typing import List
import re
def get_repo_name_from_url(
repo_url: str,
default_version: str,
valid: Dict[str, List[str]],
) -> str:
"""
Extract repository name from URL.
If it cannot be extracted, the name of the first package of the default version is used.
:param repo_url: the repository URL
:param default_version: the default version
:param valid: the dictionary of valid versions & packages
:return: the repo name, or a default package if it failed
"""
repo_name = None
repo_name_match = re.match('.*/(.*)$', repo_url)
if repo_name_match:
# Remove '.git' extension if it is there
repo_name = repo_name_match[1].replace('.git', '')
else:
# Default to first package of first version
repo_name = valid[default_version][0]
return repo_name | f7dd60e89c3fd3db4c983c8a5ac5dc652d4ebe45 | 693,599 |
from datetime import datetime
def start_dt() -> datetime:
"""Show the start time.
Returns
-------
datetime
The date time this call was made.
"""
dt = datetime.now()
print('stop_watch', 'Started>', dt.isoformat(timespec='microseconds'), flush=True)
return dt | b815649d338f0d0a8823ecb7297c974e9bc81048 | 693,600 |
def Strip(txt):
"""Return stripped string, can handle None"""
try:
return txt.strip()
except:
return None | 44edfab97b1cdbfec4174cf554c78077fb31cfd7 | 693,601 |
def make_game(serverid, name, extras=None):
"""Create test game instance."""
result = {
'serverid': serverid,
'name': name,
'game_state': {'players': {}, 'min_players': 2, 'max_players': 4},
}
if extras:
result.update(extras)
return result | 1e97747c99ed0fa6cde87d1710b1fca7d9512f7b | 693,602 |
def checkConditions(specs, content):
"""#iterate all elements of specs (array) in content (dictionary)
and apply the correspondent function
"""
result = []
for key, value in specs.items():
controlResult = value(content.get(key))
if controlResult:
result.append(controlResult)
return result | 8da865888edbf03cdf6120f82286100c86cc0671 | 693,603 |
import time
def tot_exec_time_str(time_start):
""" execution time
This function gives out the formatted time string.
"""
time_end = time.time()
exec_time = time_end-time_start
tmp_str = "execution time: %0.2fs (%dh %dm %0.2fs)" %(exec_time, exec_time/3600, (exec_time%3600)/60,(exec_time%3600)%60)
return tmp_str | f8735cc82507089767af077c911611223d1d5465 | 693,604 |
def line_search(py, n_total, cmts_directory, windows_directory, data_info_directory, data_asdf_directory, sync_raw_directory, sync_perturbed_directory, stations_path, min_periods, max_periods, search_range, search_step):
"""
do the structure line search.
"""
script = "export IBRUN_TASKS_PER_NODE=12; \n"
script += f"ibrun -n {n_total} {py} -m seisflow.scripts.structure_inversion.mpi_structure_line_search --cmts_directory {cmts_directory} --windows_directory {windows_directory} --data_info_directory {data_info_directory} --data_asdf_directory {data_asdf_directory} --sync_raw_directory {sync_raw_directory} --sync_perturbed_directory {sync_perturbed_directory} --stations_path {stations_path} --min_periods {min_periods} --max_periods {max_periods} --search_range {search_range} --search_step {search_step}; \n"
return script | 1c9b809d8d3425fee51b1a4210d598ddd0abbdad | 693,605 |
import glob
def generate_file_list():
"""
Make list of the files we want flask to reload when changed
"""
glist = ['templates/*.html',
'templates/tests/*.html',
'static/js/*.js',
'static/js/tests-jasmine/*.js',
]
rval = []
for gexpr in glist:
tmp = glob.glob(gexpr)
rval.extend(tmp)
return(rval) | 2c415e8ddc8cf4eba8fff607a613b8a6bc828200 | 693,606 |
def knots_to_kmh(knots: float) -> float:
""" Convert velocity in knots to km/h
1 knot (i.e. 1 nm/h or 1 nautical mile per hour) is 1.852 km/h.
:param knots: velocity in knots
:returns: velocity in km/h
"""
return knots * 1.852 | 8793def54f9b7a8e3895cc8d32156e30f67c9d67 | 693,607 |
def nonexistent_user():
"""Данные несуществующего пользователя."""
return {
'username': 'unknown_username',
'email': 'unknown@mail.ru',
'phone_number': '+79001234567'
} | c00241cc8894fe480b6d0970d6069c7f69473f8b | 693,608 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.