content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def select_fets(cluster_to_fets, top_fets):
"""
Selects the most important feature from each cluster in cluster_to_fets,
using the importance information in top_fets. Each entry in top_fets is a
tuple of the form: (feature name, feature importance).
"""
# Returns the dictionary keys whose values contain an item.
get_keys = lambda x, d: [k for k, v in d.items() if x in v]
chosen_fets = []
# Examine the features from most important to least important.
for fet in reversed(sorted(top_fets, key=lambda p: p[1])):
fet_name = fet[0]
clusters = get_keys(fet_name, cluster_to_fets)
assert len(clusters) <= 1, \
("A feature should be in either 0 or 1 clusters, but "
f"\"{fet_name}\" is in clusters: {clusters}")
if len(clusters) == 0:
# This feature's cluster has already been used.
continue
# This is the first features from this cluster to be used, so
# keep it.
chosen_fets.append(fet)
# Remove this cluster to invalidate its other features.
del cluster_to_fets[clusters[0]]
# Make sure that chosen features are sorted in decreasing order of
# importance (most important is first).
chosen_fets = list(reversed(sorted(chosen_fets, key=lambda p:p[1])))
print(
f"Chosen features ({len(chosen_fets)}):\n\t" +
"\n\t".join(
f"{fet}: {coeff:.4f}" for fet, coeff in chosen_fets))
print(
"New in_spc:", "\tin_spc = (",
"\t\t" + "\n\t\t".join(f"\"{fet}\"," for fet, _ in chosen_fets[:10]),
"\t)", sep="\n")
return chosen_fets | 11bfdb491ac7fc9f00a1c384548089752a31b52d | 693,609 |
def find_min_y_point(list_of_points):
"""
Returns that point of *list_of_points* having minimal y-coordinate
:param list_of_points: list of tuples
:return: tuple (x, y)
"""
min_y_pt = list_of_points[0]
for point in list_of_points[1:]:
if point[1] < min_y_pt[1] or (point[1] == min_y_pt[1] and point[0] < min_y_pt[0]):
min_y_pt = point
return min_y_pt | bdb2bfb654456c4b62c52faab45c249fc7a7d4e3 | 693,610 |
import ast
import inspect
import json
def ast_parse(method):
"""Decorator to parse user input to JSON-AST object."""
def wrapper(*args, **kwargs):
if isinstance(args[0], str):
ast_obj = ast.parse(args[0]) # i.e. a dec or exp
else:
obj = inspect.getsource(args[0]) # i.e. a method
ast_obj = ast.parse(obj)
json_parsed = method(ast_obj, **kwargs)
parsed = json.loads(json_parsed)
return parsed
return wrapper | 9703676ed1eff0fe1da17fdeb3a45cab0f54f3b7 | 693,611 |
import os
def complete_path(text, line, arg=False):
"""
Helper for tab-completion of file paths.
"""
# stolen from dataq at
# http://stackoverflow.com/questions/16826172/filename-tab-completion-in-cmd-cmd-of-python
if arg:
# if we have "command something path"
argData = line.split()[1:]
else:
# if we have "command path"
argData = line.split()[0:]
if not argData or len(argData) == 1:
completions = os.listdir('./')
else:
dir, part, base = argData[-1].rpartition('/')
if part == '':
dir = './'
elif dir == '':
dir = '/'
completions = []
for f in os.listdir(dir):
if f.startswith(base):
if os.path.isfile(os.path.join(dir,f)):
completions.append(f)
else:
completions.append(f+'/')
return completions | 229abe7c75cb393bf381a115ecbda2378c302caa | 693,612 |
import os
import argparse
def valid_dir(value):
""" Argparse checked for directory argument """
if not os.path.isdir(value):
raise argparse.ArgumentTypeError(
'directory {0} does not exist'.format(value)
)
return os.path.abspath(value) | ef452455a8ef71b63803e594485d7f515b24bfe7 | 693,613 |
def get_data_id(data):
"""Return id attribute of the object if it is data, otherwise return given value."""
return data.id if type(data).__name__ == 'Data' else data | e869fd4d58b5a8d7c229301948608cc6be1b9d8c | 693,614 |
from typing import Dict
def combined_instruction_estimate(counts: Dict[str, int]) -> int:
"""
Given the result of run_with_cachegrind(), return estimate of total time to run_with_cachegrind.
Multipliers were determined empirically, but some research suggests they're
a reasonable approximation for cache time ratios. L3 is probably too low,
but then we're not simulating L2...
"""
return counts["l1"] + (5 * counts["l3"]) + (35 * counts["ram"]) | 5f1cfa74fdc8ab75297f5bad9f1ea401468e1c9d | 693,615 |
import math
def move_on_circle(x:float, y:float, radius:float, distance:float):
"""Move a distance on a circle with specific radius"""
# Calculate the theta of the differential angle
a, b, c = radius, radius, distance
theta = math.acos((a**2 + b**2 - c**2) / (2 * a * b))
# The new angle is the old one plus the differential angle theta
old_angle = math.atan2(float(y), float(x))
new_angle = old_angle + theta
# Calculate the new x and y and with the basic trigonometric calculation
new_x = radius * math.cos(new_angle)
new_y = radius * math.sin(new_angle)
return new_x, new_y | b9546058ca8c162dad43df53843a02258c17451f | 693,617 |
def decrypt (lst: list):
"""
Decodes html encoded emails
Pass a list of emails
Decodes email if starting characters are '&#'
Returns list of unencoded emails
"""
while True:
unencoded_emails = []
for string in lst:
if string[0:2] == '&#':
slices = int(len(string) / 6)
count = 0
starting_pos = 0
decoded_email = ''
while count < slices:
decoded_email = decoded_email + chr(int(string[starting_pos + 2: starting_pos + 5]))
count += 1
starting_pos += 6
unencoded_emails.append(decoded_email)
else:
unencoded_emails.append(string)
return unencoded_emails | 0376bb9f3b6d7589e2b631d12c121c9d07956026 | 693,619 |
def split_ae_outputs(outputs, num_joints, with_heatmaps, with_ae,
select_output_index):
"""Split multi-stage outputs into heatmaps & tags.
Args:
outputs (list(torch.Tensor)): Outputs of network
num_joints (int): Number of joints
with_heatmaps (list[bool]): Option to output
heatmaps for different stages.
with_ae (list[bool]): Option to output
ae tags for different stages.
select_output_index (list[int]): Output keep the selected index
Returns:
tuple: A tuple containing multi-stage outputs.
- heatmaps (list(torch.Tensor)): multi-stage heatmaps.
- tags (list(torch.Tensor)): multi-stage tags.
"""
heatmaps = []
tags = []
# aggregate heatmaps from different stages
for i, output in enumerate(outputs):
if i not in select_output_index:
continue
# staring index of the associative embeddings
offset_feat = num_joints if with_heatmaps[i] else 0
if with_heatmaps[i]:
heatmaps.append(output[:, :num_joints])
if with_ae[i]:
tags.append(output[:, offset_feat:])
return heatmaps, tags | 220ddc0b0811c39d005135f0c6e38fd65f40a591 | 693,620 |
import platform
import os
import sys
def appdata_dir():
"""Find the path to the application data directory; add an electrum folder and return path."""
if platform.system() == "Windows":
return os.path.join(os.environ["APPDATA"], "Electrum")
elif platform.system() == "Linux":
return os.path.join(sys.prefix, "share", "electrum")
elif (platform.system() == "Darwin" or
platform.system() == "DragonFly" or
platform.system() == "OpenBSD" or
platform.system() == "NetBSD"):
return "/Library/Application Support/Electrum"
else:
raise Exception("Unknown system") | bbac08fae65d3dd44a08e46d56956b95e1de2e00 | 693,621 |
import torch
def _li_dynamics_bwd(grad_output, output, decay):
""" """
grad_input = torch.zeros_like(grad_output)
decay = 1 - decay / (1 << 12)
num_steps = grad_output.shape[-1]
grad_input[..., num_steps - 1] = grad_output[..., num_steps - 1]
for n in range(num_steps - 1)[::-1]:
grad_input[..., n] = decay * grad_input[..., n + 1] \
+ grad_output[..., n]
grad_decay = grad_input[..., 1:] * output[..., :-1]
if torch.numel(decay) == 1: # shared parameters
grad_decay = torch.sum(grad_decay.flatten(), dim=0, keepdim=True)
else:
grad_decay = torch.sum(grad_decay, dim=[0, -1])
if len(grad_decay.shape) != 1:
grad_decay = torch.sum(
grad_decay.reshape(grad_decay.shape[0], -1),
dim=1
)
return grad_input, grad_decay | 7bff6e778f2198f5f4dcbbfcbfe32932b7113874 | 693,624 |
def get_branches_of_bus(B, j):
"""
Get the indices of the branches connected to the bus j
:param B: Branch-bus CSC matrix
:param j: bus index
:return: list of branches in the bus
"""
return [B.indices[k] for k in range(B.indptr[j], B.indptr[j + 1])] | 1fc3ecf911f590ff5da970fecc9e8cf1376e8449 | 693,625 |
import requests
def send_notification(**kwargs):
"""
Calls a custom webhook service on IFTTT (if this then that) website which then sends a notification via
smartphone APP to the customer that the download finished.
"""
url = kwargs.get('url', None)
if url is not None:
iftt_url = 'https://maker.ifttt.com/trigger/download_ready/with/key/cX2k9A3tnmE4UGl0q2v_kW/'
request = requests.post(iftt_url, {"value1": url})
return request | c3ad9e5c567250a1deed918ba3a860a6a296d64d | 693,626 |
import numpy
def integrand(x):
"""Calculates the integrand
exp(-b*[(x1-x4)^2+...+(x3-x6)^2])
from the values in the 6-dimensional array x."""
a = 1.0
b = 0.5
xy = (x[0]-x[3])**2 + (x[1]-x[4])**2 + (x[2]-x[5])**2
return numpy.exp(-b*xy) | 04381ceb3377bc1198dea7f1b4ef47e896ff99b0 | 693,627 |
import csv
def get_dest_LEDs():
"""Get train destination data.
This function assumes that dest.csv has this structure:
Destination,DestinationImageFileforUTL,LineImageFileforUTL,DestinationImageFileforSSL,LineImageFileforSSL
(e.g. 前橋,maebashi.png,utl_via_takasaki_line.png,maebashi_ssl.png,ssl_via_takasaki_line.png)
Destination: A label to show in the console.
DestinationImageFileforUTL: a file name of a destination image in an Ueno-Tokyo Line train (white texts).
LineImageFileforUTL: a file name of a line image in an Ueno-Tokyo Line train (white texts).
DestinationImageFileforSSL: a file name of a destination image in a Shonan-Shinjuku Line train (orange texts).
LineImageFileforSSL: a file name of a line image in a Shonan-Shinjuku Line train (orange texts).
Returns:
An array consisted of a tuple as follows:
(dest_images, Destination)
dest_images is a dictionary to store image URLs for each line.
Its possible keys are "utl" standing for Ueno-Tokyo Line, and "ssl" standing for Shonan-Shinjuku Line.
dest_images['utl'] = (DestinationImageURLforUTL, RouteImageURLforUTL)
dest_images['ssl'] = (DestinationImageURLforSSL, RouteImageURLforSSL)
"""
dest_LEDs = []
with open('dest.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for i, row in enumerate(csvreader):
dest_images = {}
dest_images['utl'] = ('./dest/%s' % row[1], './dest/%s' % row[2])
dest_images['ssl'] = ('./dest/%s' % row[3], './dest/%s' % row[4])
dest_LEDs.append((dest_images, row[0]))
return dest_LEDs | 747c911ea27161aab893cbab27092601263a834f | 693,628 |
import logging
def Handle(
logger,
handler=logging.NullHandler(),
formatter="%(asctime)s %(name)s - %(levelname)s: %(message)s",
level="WARNING",
):
"""
Handle a logger with a standardised formatting.
Parameters
-----------
logger : :class:`logging.Logger` | :class:`str`
Logger or module name to source a logger from.
handler : :class:`logging.Handler`
Handler for the logging messages.
formatter : :class:`str` | :class:`logging.Formatter`
Formatter for the logging handler. Strings will be passed to
the :class:`logging.Formatter` constructor.
level : :class:`str`
Logging level for the handler.
Returns
----------
:class:`logging.Logger`
Configured logger.
"""
if isinstance(logger, str):
logger = logging.getLogger(logger)
elif isinstance(logger, logging.Logger):
pass
else:
raise NotImplementedError
if isinstance(formatter, str):
formatter = logging.Formatter(formatter)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(getattr(logging, level))
return logger | 0b2f2c7e29f3702c4154ca29f457bbba2f0677e4 | 693,631 |
def disconnected():
"""
Construct a template
"""
tpl = { 'ssh-event': 'disconnected' }
return tpl | 42d5cff48f2598e769e94ded42630a084e092468 | 693,632 |
import os
def get_all_files(targetDir):
"""
递归读取所有文件目录形成列表
:param targetDir:
:return:
"""
files = []
listFiles = os.listdir(targetDir)
for i in range(0, len(listFiles)):
path = os.path.join(targetDir, listFiles[i])
if os.path.isdir(path):
files.extend(get_all_files(path))
elif os.path.isfile(path):
files.append(path)
return files | be4fb50cb53cb7e4e887d1e364ba3af8f29f80f5 | 693,633 |
import re
def find_version(*file_paths):
"""Find package version from file."""
with open("src/dvtests/__init__.py", "r", encoding="utf-8") as fh:
version_file = fh.read()
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M,
)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.") | 70d6c4ba05027e43f8b07b797ded75cdf5846b3d | 693,635 |
def bezier_quadratic(p0, p1, p2, t):
"""Returns a position on bezier curve defined by 3 points and t."""
return p1 + (1-t)**2*(p0-p1) + t**2*(p2-p1) | 91b77f59d975c8077f9bbbbafd74d4ffddfbfffc | 693,636 |
def find_backfill_id(point, monitoring_time, mongodb_backfilling_doc, logger):
"""
:param point: 监测数据对应的点位
:param monitoring_time: 监测数据时间
:param mongodb_backfilling_doc:
:param logger:
:return:
"""
if point['thickener_id'] != 0:
instrument_filter = {'thickener_id': int(point['thickener_id'])}
else:
instrument_filter = {'mixer_id': int(point['mixer_id'])}
"""
检索规则解释:
加入数据录入服务断线了一段时间,这会导致mysql中的数据不断累积。重启数据录入服务,将当前充填任务的fill_id写入历史数据是不合理的
因此检索满足如下条件的充填任务:
1. 监测时间夹在充填任务的起止时间之间 或 监测时间在充填任务的起始时间之后且该任务无结束时间
2. 满足instrument_filter
"""
backfill_task_filter = dict(
{
"$or":
[
{'start_time': {'$lt': monitoring_time}, 'end_time': {'$gte': monitoring_time}},
{'start_time': {'$lt': monitoring_time}, 'end_time': None}
]
}, **instrument_filter
)
backfilling_belong = mongodb_backfilling_doc.find(
backfill_task_filter
).sort([('start_time', -1)])
if backfilling_belong.count()>1:
# 理论上不存在两个任务同时满足该检索条件
logger.log(
'warning', 'Backfilling missions {} share {} {}'.format(
','.join(str([x['fill_id'] for x in backfilling_belong])),
'mixer' if point['thickener_id']!=0 else 'thickener',
point['mixer_id'] if point['thickener_id']!=0 else point['thickener_id']
)
)
fill_id = -1
# 研究很久没找到怎么取返回结果集里的第一个,用了看起来最蠢的方法
for backfilling in backfilling_belong:
fill_id = backfilling['fill_id']
# 因为检索条件里按照时间逆序,所以只需要取第一个
break
return fill_id | 4575e3891eebf4a85d330ffced44e1c290d78b88 | 693,637 |
def randrange(start, stop=0, step=1):
"""Return a randomly selected element from ``range(start, stop, step)``. This is
equivalent to ``choice(range(start, stop, step))``, but doesn't actually build a
range object."""
return 0 | 933fb5624e782f2285c2ec45333ca2acb7fa3650 | 693,638 |
def _Backward3b_v_Ps(P, s):
"""Backward equation for region 3b, v=f(P,s)
Parameters
----------
P : float
Pressure [MPa]
s : float
Specific entropy [kJ/kgK]
Returns
-------
v : float
Specific volume [m³/kg]
References
----------
IAPWS, Revised Supplementary Release on Backward Equations for the
Functions T(p,h), v(p,h) and T(p,s), v(p,s) for Region 3 of the IAPWS
Industrial Formulation 1997 for the Thermodynamic Properties of Water and
Steam, http://www.iapws.org/relguide/Supp-Tv%28ph,ps%293-2014.pdf, Eq 9
Examples
--------
>>> _Backward3b_v_Ps(20,5)
0.006262101987
>>> _Backward3b_v_Ps(100,5)
0.002449610757
"""
I = [-12, -12, -12, -12, -12, -12, -10, -10, -10, -10, -8, -5, -5, -5, -4,
-4, -4, -4, -3, -2, -2, -2, -2, -2, -2, 0, 0, 0, 1, 1, 2]
J = [0, 1, 2, 3, 5, 6, 0, 1, 2, 4, 0, 1, 2, 3, 0, 1, 2, 3, 1, 0, 1, 2, 3,
4, 12, 0, 1, 2, 0, 2, 2]
n = [0.591599780322238e-4, -0.185465997137856e-2, 0.104190510480013e-1,
0.598647302038590e-2, -0.771391189901699, 0.172549765557036e1,
-0.467076079846526e-3, 0.134533823384439e-1, -0.808094336805495e-1,
0.508139374365767, 0.128584643361683e-2, -0.163899353915435e1,
0.586938199318063e1, -0.292466667918613e1, -0.614076301499537e-2,
0.576199014049172e1, -0.121613320606788e2, 0.167637540957944e1,
-0.744135838773463e1, 0.378168091437659e-1, 0.401432203027688e1,
0.160279837479185e2, 0.317848779347728e1, -0.358362310304853e1,
-0.115995260446827e7, 0.199256573577909, -0.122270624794624,
-0.191449143716586e2, -0.150448002905284e-1, 0.146407900162154e2,
-0.327477787188230e1]
Pr = P/100
sigma = s/5.3
suma = 0
for i, j, ni in zip(I, J, n):
suma += ni * (Pr+0.298)**i * (sigma-0.816)**j
return 0.0088*suma | d43bd7acea998b40196e64225d2cdd6dc995cdda | 693,639 |
import math
def get_pad_value(data, kernel, stride):
"""Get the pad tuple of value for SAME padding"""
out = int(math.ceil(float(data) / float(stride)))
pad = max(0, (out - 1) * stride + kernel - data)
pad_before = pad // 2
pad_after = pad - pad_before
return pad_before, pad_after | 6e6e5a5fd9b3952fb8b8ca7b395d44cd3629036b | 693,640 |
import re
def find_functions(text):
"""Find all function names defined on all lines of the given text"""
return list(set([
re.split('[ (]*', line)[1]
for line in [
line.strip()
for line in text.splitlines()
if 'def ' in line
]
if line.startswith('def ')
])) | 960e9fbf750c0fe523658813b2fbac493e401733 | 693,641 |
import csv
def load_csv_data(filename, key, encoding='utf8', delimiter=';'):
"""Reads CSV file and returns list records as array of dicts"""
flist = {}
with open(filename, 'r', encoding=encoding) as f:
reader = csv.DictReader(f, delimiter=delimiter)
for r in reader:
flist[r[key]] = r
return flist | 44b97fe131fb207ed986e9263e2d4b59b1c296c4 | 693,642 |
import os
import zipfile
def _extract_fasttext_vectors(zip_path, dest_path="."):
""" Extracts fastText embeddings from zip file.
Args:
zip_path(str): Path to the downloaded compressed zip file.
dest_path(str): Final destination directory path to the extracted zip file.
Picks the current working directory by default.
Returns:
str: Returns the absolute path to the extracted folder.
"""
if os.path.exists(zip_path):
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(path=dest_path)
else:
raise Exception("Zipped file not found!")
os.remove(zip_path)
return dest_path | 46e205a2c15bafab30a9f048c4253607affdb0e2 | 693,643 |
def get_org_from_return_url(repo_provider_type, return_url, orgs):
"""
Helper method to find specific org from list of orgs under same contract group
This is a hack solution since it totally depends on return_url and repo service provider
However, based on the current implementation, it's a simple way to invovled minimal refactor
BTW, I don't believe the last team can do a successful demo without doing any tweaks like this
:param repo_provider_type: The repo service provider.
:type repo_provider_type: string
:param return_url: The URL will be redirected after signature done.
:type return_url: string
:return: List of Organizations of any repo service provider.
:rtype: [any_repo_service_provider.Organization]
"""
if repo_provider_type is 'github':
split_url = return_url.split('/') # parse repo name from URL
target_org_name = split_url[3]
for org in orgs:
if org.get_organization_name() == target_org_name:
return org
raise Exception('Not found org: {} under current CLA project'.format(target_org_name))
else:
raise Exception('Repo service: {} not supported'.format(repo_provider_type)) | 7a052f177fdc878a257746917aad991f091abb4f | 693,644 |
from typing import Dict
from typing import Any
from pathlib import Path
import json
def check_source(pmfpconf: Dict[str, Any], projectconfig: Dict[str, Any], sourcepackdir: Path, component_string: str) -> Dict[str, Any]:
"""校验组件所在模板库的信息,通过的话返回模板库信息"""
with open(sourcepackdir.joinpath(pmfpconf["template_config_name"]), encoding="utf-8") as f:
sourcepack_config = json.load(f)
sourcepack_language = sourcepack_config.get("language")
project_language = projectconfig.get("language")
if sourcepack_language and project_language and sourcepack_language != project_language:
raise AttributeError(f"组件{component_string}语言{sourcepack_language}与项目语言{project_language}不匹配")
sourcepack_env = sourcepack_config.get("env")
project_env = projectconfig.get("env")
if project_env and sourcepack_env and sourcepack_env != project_env:
raise AttributeError(f"组件{component_string}执行环境{sourcepack_env}与项目执行环境{project_env}不匹配")
return sourcepack_config | 6b8773e3ef614abee6e1dbb5f838d7a7344c295b | 693,646 |
import time
import os
def time_since_file_modified(filename):
"""Get the elapsed time in seconds since a file was modified."""
return time.time() - os.path.getmtime(filename) | 86274ab82ac1a49af102a4c057456c987646db90 | 693,647 |
import math
def get_x_angle(m):
"""
Returns: return h ? math.acos(c[2].z / h) * (c[2].y > 0 ? -1 : 1) : 0;
"""
c = m.rotation
h = math.sqrt(c[2].y * c[2].y + c[2].z * c[2].z)
if h > 0:
if c[2].y > 0:
return math.acos(c[2].z / h) * -1
else:
return math.acos(c[2].z / h) * 1
else:
return 0
# | 61a81ad7c013dbb8975aa0ab65d224165b1ecd65 | 693,648 |
from typing import List
def trim_any_prefixes(word: str, prefixes: List[str]) -> str:
"""Remove the provided prefixes from the given word."""
for prefix in prefixes:
if word.startswith(prefix):
return word.removeprefix(prefix)
return word | 6be47377f63750d6fe36609624551cfcf734c3db | 693,649 |
def get_variant_list(dataframe):
"""Get variant coordinates into a list.
Different fileds are selected for different variant types, i.e for CNA type is selected, ref and alt are not selected.
Returns list of dictionaries of search subset"""
if "type" in dataframe.columns: # implies that it is etiher CNV or SV
fields = ["HGNC_ID", "chr", "start", "end", "type"]
else: # implies that it is either SNP or INDEL
fields = ["HGNC_ID", "chr", "start", "end", "ref", "alt"]
rows = dataframe[fields].values.tolist()
return rows | 996ee584f6fa1f6e9a427433a336bd77dc279011 | 693,650 |
import os
def NormalizePath(path, parsed_deps):
"""Normalizes the path.
Args:
path: A string representing a path.
parsed_deps: A map from component path to its component name, repository,
etc.
Returns:
A tuple containing a component this path is in (e.g blink, skia, etc)
and a path in that component's repository. Returns None if the component
repository is not supported, i.e from googlecode.
"""
# First normalize the path by retreiving the normalized path.
normalized_path = os.path.normpath(path).replace('\\', '/')
# Iterate through all component paths in the parsed DEPS, in the decreasing
# order of the length of the file path.
for component_path in sorted(parsed_deps,
key=(lambda path: -len(path))):
# new_component_path is the component path with 'src/' removed.
new_component_path = component_path
if new_component_path.startswith('src/') and new_component_path != 'src/':
new_component_path = new_component_path[len('src/'):]
# We need to consider when the lowercased component path is in the path,
# because syzyasan build returns lowercased file path.
lower_component_path = new_component_path.lower()
# If this path is the part of file path, this file must be from this
# component.
if new_component_path in normalized_path or \
lower_component_path in normalized_path:
# Case when the retreived path is in lowercase.
if lower_component_path in normalized_path:
current_component_path = lower_component_path
else:
current_component_path = new_component_path
# Normalize the path by stripping everything off the component's relative
# path.
normalized_path = normalized_path.split(current_component_path, 1)[1]
lower_normalized_path = normalized_path.lower()
# Add 'src/' or 'Source/' at the front of the normalized path, depending
# on what prefix the component path uses. For example, blink uses
# 'Source' but chromium uses 'src/', and blink component path is
# 'src/third_party/WebKit/Source', so add 'Source/' in front of the
# normalized path.
if (lower_component_path == 'src/third_party/webkit/source' and
not lower_normalized_path.startswith('source/')):
normalized_path = (current_component_path.split('/')[-2] + '/' +
normalized_path)
component_name = parsed_deps[component_path]['name']
return (component_path, component_name, normalized_path)
# If the path does not match any component, default to chromium.
return ('src/', 'chromium', normalized_path) | 2306df780f60036ce2c440afd0b40a17a1c4c00b | 693,651 |
def split_PETSc_Mat(mat):
""" Decompose a PETSc matrix into a symmetric and skew-symmetric
matrix
Parameters:
----------
mat : :class: `PETSc4py Matrix`
Returns:
--------
H : :class: `PETSc4py Matrix`
Symmetric (or Hermitian) component of mat
S : :class: `PETSc4py Matrix`
Skew-Symmetric (or skew-Hermitian) component of mat
"""
H = mat.copy()
H.zeroEntries()
H.axpy(1.0,mat)
H.axpy(1.0,mat.transpose())
H.scale(0.5)
S = mat.copy()
S.zeroEntries()
S.axpy(1.0,mat)
S.aypx(-1.0,mat.transpose())
S.scale(0.5)
return H, S | f78d85382e2ceda7d03e572493113f383e8dd17e | 693,652 |
def decode_word_two(word, mask):
"""
Decodes the second word in the standard 4 word header.
:param word: The word that we're going to decode.
:param mask: The mask we'll use to decode that word.
:return: A dictionary containing the decoded information
"""
return {
'event_time_high': (word & mask.event_time_high()[0]) >> mask.event_time_high()[1],
'cfd_fractional_time': (word & mask.cfd_fractional_time()[0]) >>
mask.cfd_fractional_time()[1],
'cfd_trigger_source_bit': (word & mask.cfd_trigger_source()[0]) >>
mask.cfd_trigger_source()[1],
'cfd_forced_trigger_bit': (word & mask.cfd_forced_trigger()[0]) >>
mask.cfd_forced_trigger()[1]
} | 138fd25e79879771f5177ccd7e3c5000153ac307 | 693,653 |
def strip_prefix(full_string, prefix):
"""
Strip the prefix from the given string and return it. If the prefix is not present
the original string will be returned unaltered
:param full_string: the string from which to remove the prefix
:param prefix: the prefix to remove
:return: the string with prefix removed
"""
if full_string.startswith(prefix):
return full_string.rsplit(prefix)[1]
return full_string | d0fafcfdab873cd544d35362e092dd93301a5b3d | 693,654 |
def str_to_list(str):
"""
解析字符串,将字符串转换成列表
:param str: 需要解析的字符串
:return: List
"""
str_list = str.split(";")
str_list = [x.strip() for x in str_list if x.strip() != '']
return str_list | 4796a22c7212d12cd4e69aeb49a63f0d959c2b58 | 693,655 |
def get_price(item):
"""Given an SoftLayer_Product_Item, returns its default price id"""
for price in item.get('prices', []):
if not price.get('locationGroupId'):
return price.get('id')
return 0 | 1af38e528b4b95cef6e5f30cc4695e05146d869a | 693,656 |
from typing import Sequence
def is_seq_int(tp) -> bool:
"""Return True if the input is a sequence of integers."""
return tp and isinstance(tp, Sequence) and all(isinstance(p, int) for p in tp) | 98a78ab04f48b5d4f0b173f4700e7a9ee0824ce9 | 693,657 |
import pathlib
import sys
def expand_srcs(srcs: list) -> list:
"""Expands globs in a list of source patterns into a list of Paths"""
result = []
for pattern in srcs:
if any(_ in pattern for _ in ("*", "[", "?")):
expanded = pathlib.Path(".").glob(pattern)
if not expanded:
print(f"No files match {pattern}")
sys.exit(1)
result.extend(expanded)
else:
result.append(pathlib.Path(".", pattern))
return result | 6ad6be8f15c964019ff9ba6b524544a1e1381ddb | 693,658 |
def normalize(seq):
"""
Scales each number in the sequence so that the sum of all numbers equals 1.
"""
s = float(sum(seq))
return [v / s for v in seq] | 68f4986ae9f45f70dd31d51c29fab91a9c055338 | 693,659 |
def _reduce_xyfp(x, y):
"""
Rescale FP xy coordinates [-420,420] -> [-1,1] and flip x axis
"""
a = 420.0
return -x/a, y/a | f36bf33d7d38163ca2401ad1b7dba29bbac577e3 | 693,660 |
import torch
def convert_to_radar_frame(pixel_coords, config):
"""Converts pixel_coords (B x N x 2) from pixel coordinates to metric coordinates in the radar frame.
Args:
pixel_coords (torch.tensor): (B,N,2) pixel coordinates
config (json): parse configuration file
Returns:
torch.tensor: (B,N,2) metric coordinates
"""
cart_pixel_width = config['cart_pixel_width']
cart_resolution = config['cart_resolution']
gpuid = config['gpuid']
if (cart_pixel_width % 2) == 0:
cart_min_range = (cart_pixel_width / 2 - 0.5) * cart_resolution
else:
cart_min_range = cart_pixel_width // 2 * cart_resolution
B, N, _ = pixel_coords.size()
R = torch.tensor([[0, -cart_resolution], [cart_resolution, 0]]).expand(B, 2, 2).to(gpuid)
t = torch.tensor([[cart_min_range], [-cart_min_range]]).expand(B, 2, N).to(gpuid)
return (torch.bmm(R, pixel_coords.transpose(2, 1)) + t).transpose(2, 1) | 404236c1c886d5425fe270a78fbcf7c25b5a28e6 | 693,661 |
import re
def _serverquote(s):
"""quote a string for the remote shell ... which we assume is sh"""
if not s:
return s
if re.match('[a-zA-Z0-9@%_+=:,./-]*$', s):
return s
return "'%s'" % s.replace("'", "'\\''") | 7615e3b5f2a391762c4bf764df994147c936451c | 693,662 |
def get_file_prefix(fp, seed, N, trial):
"""
NTK output filename
"""
return fp + '_seed' + str(seed) + '_data' + str(N) + '_trial' + str(trial) + '_' | c254351f2379c8eafc47a636cf4c14e565dd5bca | 693,663 |
def _tasklines_from_tasks(tasks: list[dict[str, str]]) -> list[str]:
"""Parse a list of tasks into tasklines suitable for writing."""
tasklines = []
for task in tasks:
meta = [m for m in task.items() if m[0] != "text"]
meta_str = ", ".join("%s:%s" % m for m in meta)
tasklines.append("%s | %s\n" % (task["text"], meta_str))
return tasklines | 295a1adfeb68b69c0aabece5eed33d3e6f161773 | 693,664 |
def _idFromHeaderInfo(headerInfo, isDecoy, decoyTag):
"""Generates a protein id from headerInfo. If "isDecoy" is True, the
"decoyTag" is added to beginning of the generated protein id.
:param headerInfo: dict, must contain a key "id"
:param isDecoy: bool, determines if the "decoyTag" is added or not.
:param decoyTag: str, a tag that identifies decoy / reverse protein entries.
:returns: str, protein id
"""
proteinId = headerInfo['id']
if isDecoy:
proteinId = ''.join((decoyTag, proteinId))
return proteinId | 5e5433b320af1fe363eadc53dc7dd0bea160a73e | 693,665 |
def _cons2_123(m2, L21, L22, L23, d_p2p, d_p2w, keff, Cp, rho, h, vs,
dw, kw, adiabatic=False, conv_approx=False):
"""dz constraint for edge sc touching interior, edge, corner sc"""
term1 = 0.0 # convection term
if not adiabatic:
if conv_approx:
R = 1 / h + dw / 2 / kw
term1 = L22 / m2 / Cp / R # conv / cond to duct MW
else:
term1 = h * L22 / m2 / Cp # conv to wall
term2 = keff * d_p2w / m2 / Cp / L22 # cond to adj edge
term3 = keff * d_p2p / m2 / Cp / L21 # cond to adj int
term4 = keff * d_p2w / m2 / Cp / L23 # cond to adj corner
term5 = rho * vs * d_p2w / m2 # swirl
return 1 / (term1 + term2 + term3 + term4 + term5) | 6ebb600a335c7a94d17fb6e3b031b28f3cfe025c | 693,666 |
def comparison_op(request):
"""
Fixture for operator module comparison functions.
See: https://github.com/pandas-dev/pandas/blob/main/pandas/conftest.py
"""
return request.param | 8a915475a23414ab300edaf27d73bf893bcffc73 | 693,667 |
def compile_wrfnnrp_bc_Salathe2014_locations(maptable):
"""
Compile a list of file URLs for the Salathe et al., 2014 bias corrected WRF NNRP data
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations=[]
for ind, row in maptable.iterrows():
basename='_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url=['http://cses.washington.edu/rocinante/WRF/NNRP/vic_16d/WWA_1950_2010/bc/forcings_ascii/', basename]
locations.append(''.join(url))
return(locations) | 4471eacaa8e51c1f036e184d6b8b0e4f681c5c09 | 693,668 |
def minervac_sanitize(text):
"""Encodes given text to ASCII"""
return str(text.encode('ascii','ignore')) | ecfc8087c95feb0ecb7c3c93cb45ea84f9dd5f26 | 693,669 |
from typing import Pattern
from typing import List
import re
def extract_regex_strategies(pattern: Pattern, tokens: List[str],
sent_idx: int, offset: int = 0):
"""
Extract markers for a given strategy based on regex patterns
"""
# find matches
sent = " ".join(tokens[offset:])
matches = [match.span() for match in re.finditer(pattern, sent)]
extracted = []
for match_start, match_end in matches:
# idx of starting token of the matched span
tok_start = len(sent[0: match_start].split())
# idx of ending token
tok_end = len(sent[0: match_end].split())
extracted_toks = [(tokens[i+offset], sent_idx, i+offset) for i in range(tok_start, tok_end)]
extracted.append(extracted_toks)
return extracted | 0d223f5f99c0f7ea324f40e1b0e6f6748697d2dd | 693,670 |
def prepare_for_file_dump(file_lines):
"""
Join lines together with linebreaks and remove negative zeros
"""
return '\n'.join([line.replace('-0.0000000000000', ' 0.0000000000000') for line in file_lines]) | 439f9c8fad152b82c3d0e8621b2c299243311f47 | 693,672 |
def site_magnetization_to_magmom(site_dict):
"""
Convert site mangetization to MAGMOM used for restart
NOTE: Only tested for colinear cases
"""
if 'site_magnetization' in site_dict:
site_dict = site_dict['site_magnetization']
site_dict = site_dict['sphere']
to_use = None
for symbol in 'xyz':
if site_dict.get(symbol) and site_dict.get(symbol, {}).get('site_moment'):
to_use = symbol
break
# No avaliable site magnetization for setting MAGMOM, something is wrong
if to_use is None:
raise ValueError('No valid site-projected magnetization avaliable')
# Ensure sorted list
tmp = list(site_dict[to_use]['site_moment'].items())
tmp.sort(key=lambda x: int(x[0]))
return [entry[1]['tot'] for entry in tmp] | e079286a158503fa590b7a0c6477b78176735d1d | 693,673 |
import builtins
import math
import functools
def eval_friendly_repr():
"""
Monkey-patch repr() to make some cases more ammenible to eval().
In particular:
* object instances repr as "object()" rather than "<object object at ...>"
* non-finite floats like inf repr as 'float("inf")' rather than just 'inf'
>>> with eval_friendly_repr():
... repr(object())
'object()'
>>> with eval_friendly_repr():
... repr(float("nan"))
'float("nan")'
>>> # returns to original behavior afterwards:
>>> repr(float("nan"))
'nan'
>>> repr(object())[:20]
'<object object at 0x'
"""
_orig = builtins.repr
OVERRIDES = {
object: lambda o: "object()",
float: lambda o: _orig(o) if math.isfinite(o) else f'float("{o}")',
}
@functools.wraps(_orig)
def _eval_friendly_repr(obj):
typ = type(obj)
if typ in OVERRIDES:
return OVERRIDES[typ](obj)
return _orig(obj)
builtins.repr = _eval_friendly_repr
try:
yield
finally:
assert builtins.repr is _eval_friendly_repr
builtins.repr = _orig | c48537c8e295c2069fb7496cb9ffbbbf02bcbc23 | 693,674 |
def compose_document(self):
"""Allows for cross-document anchors."""
self.get_event()
node = self.compose_node(None, None)
self.get_event()
# self.anchors = {} # <<<< commented out
return node | a40141fd20cf1a8d12d0aac21731da0cf60428c3 | 693,675 |
def sqrt_decimal_expansion(n: int, precision: int) -> str:
"""Finds the square root of a number to arbitrary decimal precision.
Args:
n: A positive integer value.
precision: The desired number of digits following the decimal point.
Returns:
A string representation of ``sqrt(n)`` in base 10 that includes the
first ``precision`` digits after the decimal point.
"""
# break n into two-digit chunks
n_digits = []
while n > 0:
n, mod = divmod(n, 100)
n_digits.append(mod)
n_digits.reverse()
expansion = []
remainder = 0
root_part = 0
def f(x: int) -> int:
return x * (20 * root_part + x)
# compute digits before decimal point
for carry in n_digits:
a = 1
b = f(a)
c = remainder * 100 + carry
while b <= c:
a += 1
b = f(a)
a -= 1
b = f(a)
remainder = c - b
root_part = root_part * 10 + a
expansion.append(str(a))
expansion.append('.')
# compute digits after decimal point
for _ in range(precision):
a = 1
b = f(a)
c = remainder * 100
while b <= c:
a += 1
b = f(a)
a -= 1
b = f(a)
remainder = c - b
root_part = root_part * 10 + a
expansion.append(str(a))
return ''.join(expansion) | 5165f98a51a0522aa960a532b10fa07191cf9e12 | 693,676 |
def horizon_str_to_k_and_tau(h:str):
""" k=1&tau=0 -> (1,0) """
k = int(h.split('&')[0].split('=')[1])
tau = int(h.split('&')[1].split('=')[1])
return k, tau | 8b9a2dfae72f57f7f80ca614d95ca21e742ced98 | 693,677 |
def get_nodes_labels(properties):
"""Returns the node labels contained in given property graphs.
:param properties: An iterable of TimedPropertyGraph objects.
:return: A set containing all node labels used in given sequence of property graphs.
"""
labels = set()
for p in properties:
for n in p.graph.nodes:
labels.add(p.get_node_label(n))
return labels | 8ddabf4ebdddcb557f36f6a841ef072975514f8b | 693,678 |
def get_vm(app, nodename, scope='deployment'):
"""Return the VM *nodename* from *app*."""
for vm in app.get(scope, {}).get('vms', []):
if vm['name'] == nodename:
return vm
raise RuntimeError('Application `{}` unknown vm `{}`.'.format(app['name'], nodename)) | feb507d3fc4f3e531f7bc3855ad8400dff871413 | 693,679 |
import subprocess
def get_throttled():
"""
Returns throttled information
0: under-voltage
1: arm frequency capped
2: currently throttled
16: under-voltage has occurred
17: arm frequency capped has occurred
18: throttling has occurred
"""
cmd = '/opt/vc/bin/vcgencmd'
arg = 'get_throttled'
string = subprocess.check_output([cmd, arg]).decode('utf-8')
return int(string[string.find('=') + 1:].strip(), 16) | d19d90ce5ea83df9232edfad7749637de738f008 | 693,680 |
import itertools
def hamming_hashes(hashval, nbits, nmax=None):
"""Return an iterator over all (integer) hashes,
in order of hamming distance
Parameters
----------
hashval : integer
hash value to match
nbits : integer
number of bits in the hash
nmax : integer (optional)
if specified, halt the iterator after given number of results
"""
if nmax is not None:
return itertools.islice(hamming_hashes(hashval, nbits), nmax)
else:
hashval = int(hashval)
bits = [2 ** i for i in range(nbits)]
return (hashval ^ sum(flip)
for nflips in range(nbits + 1)
for flip in itertools.combinations(bits, nflips)) | 35fed0f21ddd46fcd536dcdc030267b8a30dcc5f | 693,681 |
import os
def generate_commands_diversity(exp_dir, n_train, n_trains_min, n_cal_maj, n_cals_min, n_test, n_test_maj,
n_test_min, lbds, runs, n_runs_test, k_maj, k_min, alpha, classifier_type,
umb_num_bins, train_cal_maj_raw_path, train_cal_min_raw_path, test_raw_path,
noise_ratio_maj=0, noise_ratios_min=[-1]):
"""
generate a list of commands from the diversity experiment setup
"""
commands = []
for n_train_min in n_trains_min:
n_train_maj = n_train - n_train_min
for noise_ratio_min in noise_ratios_min:
for n_cal_min in n_cals_min:
for lbd in lbds:
for run in runs:
exp_identity_string = "_".join([str(n_train_min), str(noise_ratio_min), str(n_cal_min), lbd, str(run)])
train_data_path = os.path.join(exp_dir, exp_identity_string + "_train_data.pkl")
cal_data_maj_path = os.path.join(exp_dir, exp_identity_string + "_cal_data_maj.pkl")
cal_data_min_path = os.path.join(exp_dir, exp_identity_string + "_cal_data_min.pkl")
scaler_path = os.path.join(exp_dir, exp_identity_string + "_scaler.pkl")
data_generation_command = "python ./scripts/generate_data_diversity.py --n_train_maj {} " \
"--n_train_min {} --n_cal_maj {} --n_cal_min {} " \
"--train_cal_maj_raw_path {} --train_cal_min_raw_path {} " \
"--train_data_path {} --cal_data_maj_path {} --cal_data_min_path {} " \
"--scaler_path {}".format(n_train_maj, n_train_min, n_cal_maj, n_cal_min,
train_cal_maj_raw_path, train_cal_min_raw_path,
train_data_path, cal_data_maj_path,
cal_data_min_path, scaler_path)
classifier_path = os.path.join(exp_dir, exp_identity_string + "_classifier.pkl")
if classifier_type == "LR":
train_classifier_command = "python ./src/train_LR.py --train_data_path {} --lbd {} " \
"--noise_ratio_maj {} --noise_ratio_min {} " \
"--classifier_path {}".format(train_data_path, lbd,
noise_ratio_maj, noise_ratio_min,
classifier_path)
elif classifier_type == "MLP":
train_classifier_command = "python ./src/train_MLP.py --train_data_path {} --lbd {} " \
"--classifier_path {}".format(train_data_path, lbd, classifier_path)
elif classifier_type == "NB":
train_classifier_command = "python ./src/train_NB.py --train_data_path {} " \
"--classifier_path {}".format(train_data_path, classifier_path)
else:
raise ValueError("Classifier {} not supported".format(classifier_type))
css_result_path = os.path.join(exp_dir, exp_identity_string + "_css_result.pkl")
css_command = "python ./src/css_diversity.py --cal_data_maj_path {} --cal_data_min_path {} " \
"--test_raw_path {} --classifier_path {} --result_path {} --k_maj {} --k_min {} " \
"--m_maj {} --m_min {} --alpha {} --scaler_path " \
"{}".format(cal_data_maj_path, cal_data_min_path, test_raw_path, classifier_path,
css_result_path, k_maj, k_min, n_test_maj, n_test_min, alpha, scaler_path)
css_naive_result_path = os.path.join(exp_dir, exp_identity_string + "_css_naive_result.pkl")
css_naive_command = "python ./src/css_diversity_naive.py --cal_data_maj_path {} " \
"--cal_data_min_path {} --test_raw_path {} --classifier_path {} " \
"--result_path {} --k_maj {} --k_min {} --m_maj {} --m_min {} --alpha {} " \
"--scaler_path {}".format(cal_data_maj_path, cal_data_min_path, test_raw_path,
classifier_path, css_naive_result_path, k_maj, k_min,
n_test_maj, n_test_min, alpha, scaler_path)
ucss_result_path = os.path.join(exp_dir, exp_identity_string + "_ucss_result.pkl")
ucss_command = "python ./src/ucss_diversity.py --test_raw_path {} " \
"--classifier_path {} --result_path {} --k_maj {} --k_min {} --m {} " \
"--n_runs_test {} --scaler_path {}".format(test_raw_path, classifier_path,
ucss_result_path, k_maj, k_min, n_test,
n_runs_test, scaler_path)
iso_reg_ss_result_path = os.path.join(exp_dir, exp_identity_string + "_iso_reg_ss_result.pkl")
iso_reg_ss_command = "python ./src/iso_reg_ss_diversity.py --cal_data_maj_path {} " \
"--cal_data_min_path {} --test_raw_path {} --classifier_path {} " \
"--result_path {} --k_maj {} --k_min {} --m_maj {} --m_min {} " \
"--scaler_path {}".format(cal_data_maj_path, cal_data_min_path, test_raw_path,
classifier_path, iso_reg_ss_result_path, k_maj,
k_min, n_test_maj, n_test_min, scaler_path)
platt_scal_ss_result_path = os.path.join(exp_dir, exp_identity_string + "_platt_scal_ss_result.pkl")
platt_scal_ss_command = "python ./src/platt_scal_ss_diversity.py --cal_data_maj_path {} " \
"--cal_data_min_path {} --test_raw_path {} --classifier_path {} " \
"--result_path {} --k_maj {} --k_min {} --m {} --n_runs_test {} " \
"--scaler_path {}".format(cal_data_maj_path, cal_data_min_path,
test_raw_path, classifier_path,
platt_scal_ss_result_path, k_maj, k_min, n_test,
n_runs_test, scaler_path)
exp_commands = [data_generation_command, train_classifier_command, css_command, css_naive_command,
ucss_command, iso_reg_ss_command, platt_scal_ss_command]
commands.append(exp_commands)
return commands | 41ca8e727b5b21510c702c522426668fb5635a25 | 693,682 |
import subprocess
def pingIP(ip):
""" pingIP: ping the hostname or IP given, returns True if response received otherwise False"""
# ping once , timeout in one second if no responce
pingTest = "ping -c 1 -W 1 " + ip
# print pingTest
process = subprocess.Popen(pingTest, shell=True, stdout=subprocess.PIPE)
process.wait()
returnCodeTotal = process.returncode
# print returnCodeTotal
return (returnCodeTotal == 0) | 9822a2cfa4db2837dc19dc9b44c4a0f99c2923bd | 693,683 |
def extract_authors(book):
"""Given book extract list of the authors."""
authors = []
for author in book.findall('.//author'):
authors.append(author.find('name').text)
return ', '.join(authors) | efa0f9f628951ac628981d531e0a6f04b810f551 | 693,684 |
import json
def get_authentication_headers(test_client):
"""
get headers for user authentication
"""
############### FIRST USER ########################
user = {'first_name':'Kunihiko',
'last_name': 'Kawani',
'email': 'kawa@gmail.com',
'password':"password1234",
'confirm_password':"password1234"
}
test_client.post('/api/v2/auth/signup',
data=json.dumps(user),
content_type='application/json'
)
signin_data = {'email': 'kawa@gmail.com',
'password':"password1234"
}
response = test_client.post('/api/v2/auth/signin',
data=json.dumps(signin_data),
content_type='application/json'
)
result = json.loads(response.data.decode('utf8'))
headers = result['access_token']
authentication_header1 = 'Bearer '+headers
############## SECOND USER 2 ###############################
user_2 = {'first_name':'Samurai',
'last_name': 'Warr',
'email': 'samwarr@gmail.com',
'password':"password123",
"confirm_password":"password123"}
test_client.post('/api/v2/auth/signup',
data=json.dumps(user_2),
content_type='application/json'
)
signin_data2 = {'email': 'samwarr@gmail.com',
'password':"password123"
}
response2 = test_client.post('/api/v2/auth/signin',
data=json.dumps(signin_data2),
content_type='application/json'
)
result2 = json.loads(response2.data.decode('utf8'))
headers2 = result2['access_token']
authentication_header2 = 'Bearer '+headers2
authentication_headers = [authentication_header1, authentication_header2]
return authentication_headers | fd667deee8aee87c9615614fae6d19c51ed99555 | 693,685 |
def sanitize_word(word):
"""returns word after replacing common punctuation with the empty string
"""
word = word.replace(".","").replace(",","").replace("?","").replace(":","")\
.replace("(","").replace(")","").replace("*","").replace(";","").replace('"',"").replace("!","")
word = word.replace("]","").replace("[","")
return word | b56793b45da6b3832b7831e133c937353613177d | 693,686 |
def get_intersecting_reads(reads1, reads2):
"""
Return reads that intersect breakpoint 1 and breakpoint
2 files.
"""
read1_dict = dict()
read2_dict = dict()
intersecting_reads = list()
for read1 in reads1:
if read1.query_name not in read1_dict:
read1_dict.update({read1.query_name:
{'chrom': read1.reference_name,
'start': str(read1.reference_start),
'end': str(read1.reference_end)}})
else:
continue
for read2 in reads2:
if read2.query_name in read1_dict:
tmp_read1 = ' '.join([
read1_dict[read2.query_name]['chrom'],
read1_dict[read2.query_name]['start'],
read1_dict[read2.query_name]['end']
])
tmp_read2 = ' '.join([
read2.reference_name,
str(read2.reference_start),
str(read2.reference_end)
])
intersecting_reads.append([read2.query_name, tmp_read1, tmp_read2])
return intersecting_reads | d54c189911bfb1161c77dc9711a3e65e115f43fa | 693,687 |
def _inner_compare(gtid_set, uuid, rngs):
"""Method: inner_compare
Description: Checks to see if the UUID is in the GTID Set passed
to the method.
Arguments:
(input) gtid_set -> GTIDSet instance.
(input) uuid -> Universal Unqiue Identifier.
(input) rngs -> Set of ranges.
(output) -> True|False on whether UUID was detected.
"""
# UUID not in lhs ==> right hand side has more
if uuid not in gtid_set.gtids:
return True
else:
for rng1, rng2 in zip(rngs, gtid_set.gtids[uuid]):
if rng1 != rng2:
return True
return False | 583168117f21e6cf345bcf86d3ea0db310887f33 | 693,688 |
def curtailment_cost_rule(mod, g, tmp):
"""
Apply curtailment cost to scheduled and subhourly curtailment
"""
return (
mod.GenVar_Scheduled_Curtailment_MW[g, tmp]
+ mod.GenVar_Subhourly_Curtailment_MW[g, tmp]
) * mod.curtailment_cost_per_pwh[g] | 00be7f3911dcf2d51a86c93003ae3d775fbc3df6 | 693,689 |
import hashlib
def url_to_nutch_dump_path(url, prefix=None):
"""
Converts URL to nutch dump path (the regular dump with reverse domain, not the commons crawl dump path)
:param url: valid url string
:param prefix: prefix string (default = "")
:return: nutch dump path prefixed to given path
"""
domain = url.split("/")[2]
return "{0}/{1}/{2}".format("" if prefix is None else prefix.strip("/"),
"/".join(reversed(domain.split("."))),
hashlib.sha256(url).hexdigest().upper()) | 9b6c40decc0c952bfe82ed8bc759af082c63fec2 | 693,690 |
def PQapplyInvertDict():
"""
Return a dictionary containing boolean values on whether or not
to apply a PQ quality flag inversely.
"""
d = {'Saturation_1' : False,
'Saturation_2' : False,
'Saturation_3' : False,
'Saturation_4' : False,
'Saturation_5' : False,
'Saturation_61' : False,
'Saturation_62' : False,
'Saturation_7' : False,
'Contiguity' : False,
'Land_Sea' : False,
'ACCA' : False,
'Fmask' : False,
'CloudShadow_1' : False,
'CloudShadow_2' : False,
'Empty_1' : False,
'Empty_2' : False
}
return d | bacd84643cb13c593126a8256b86e384dbec64a2 | 693,691 |
def get_pic_filename(d):
"""
d: datime.datetime instance
reutnrs '20180205091142.jpg'
"""
timestamp = d.strftime("%Y%m%d%H%M%S")
return "{}.jpg".format(timestamp) | eaec3979458f96bee7999117092e951e431ffb93 | 693,692 |
import os
def find_results_home_ascending(cur_dir = ".", dirname = "results"):
"""From ``startpath``, ascend directory tree to find results home directory.
.. warning::
Only tested to work correctly on POSIX-like systems. Not sure what will
happen on Windows, where the path separator is reversed.
Typical use case is when executing a script on NYU HPC from the ``slurm``
directory. There's no way to start up a package and somehow precompute the
location of the ``results`` directory, but we can search up the directory
tree until we find it given a starting path. If we fail,
:class:`FileNotFoundError` is raised.
:param cur_dir: Starting path. If not given, then the current directory
where the function is executed is used (default).
:type cur_dir: str, optional
:param dirname: Name of the results home directory. This should not need to
be changed at all.
:type dirname: str, optional
:raises FileNotFoundError: If the results home directory can't be found by
traversing up the directory tree.
:returns: Absolute path to the results home directory.
:rtype: str
"""
# change cur_dir to absolute path
cur_dir = os.path.abspath(".")
# if cur_dir doesn't exist, raise error
if not os.path.exists(cur_dir):
raise FileNotFoundError("cur_dir does not exist")
# must be a directory
if not os.path.isdir(cur_dir):
raise ValueError("cur_dir must be a directory path")
# while we haven't reached the root directory yet (path is empty)
while cur_dir != "":
# get list of files in this directory
files = os.listdir(cur_dir)
# if our desired directory name is in files and it is a directory, then
# add "/" + dirname to cur_dir and break
if dirname in files and os.path.isdir(f"{cur_dir}/{dirname}"):
cur_dir = f"{cur_dir}/{dirname}"
break
# else, we drop one more directory level
cur_dir = "/".join(cur_dir.split("/")[:-1])
# if cur_dir is empty, we failed, so raise error
if cur_dir == "":
raise FileNotFoundError(
f"couldn't find dir {dirname} by walking up the directory tree"
)
# else return, we are done
return cur_dir | d5053566167fe8aaf1588e96f365019711a982ba | 693,694 |
def split_by_value(total, nodes, headdivisor=2.0):
"""Produce, (sum,head),(sum,tail) for nodes to attempt binary partition"""
head_sum, tail_sum = 0, 0
divider = 0
for node in nodes[::-1]:
if head_sum < total/headdivisor:
head_sum += node[0]
divider -= 1
else:
break
return (head_sum, nodes[divider:]), (total-head_sum, nodes[:divider]) | fadaf9384c971b802a7b6d25f2db8bea3fd68d36 | 693,695 |
from datetime import datetime
def make_timestamp(date_string):
"""
A row-operation that converts an Efergy timestamp of the form
"2015-12-31 12:34:56" into a Python datetime object.
"""
try:
return datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S')
except:
return None | a23846aa19e97df9ad55cb9b303d3b07ce3b1d01 | 693,696 |
def format_kraken2_report_row(report_node):
"""
Formats the row that will be output in the kraken 2 style report. Input
is an instance of ReportNode.
"""
offset = 2 * report_node.offset * ' '
name = offset + report_node.name
if report_node.rank_depth == 0:
rank_depth = ''
else:
rank_depth = str(report_node.rank_depth)
rank_code = report_node.rank_code + rank_depth
report_row = '\t'.join([
str(report_node.ratio),
str(report_node.hits_at_clade),
str(report_node.hits_at_node),
rank_code,
str(report_node.node_taxid),
name])
return report_row | ca83aa5f94af50e94f39e609c271127275166be6 | 693,698 |
def _scaled_average(numbers, scalar):
"""Internal utility function to calculate scaled averages."""
average = sum(numbers) / len(numbers)
return scalar * average | 53031377d8134e55f9f20f06e36994077897cff2 | 693,700 |
import os
def get_random_bits(num_bits):
"""
Utility function to return a string of random characters, with each character ranging
from 0 to F (hex range). All characters will be alphanumeric and lowercase.
"""
return os.urandom(num_bits).hex() | 0815d7429605827bfec7ca9e5a8e8611102c8652 | 693,701 |
def steering_drop_condition(steering):
"""
Defines the condition for dropping a frame
"""
return steering.values == 0.0 | 0b6cb4461653bc71df158930a14884b822b03067 | 693,702 |
import re
def parse_shift_spec(spans : str = ""):
"""
Parse a shift specification line of the form
'09:00-12:00, 15:00-18:00, ...'
"""
if spans.replace(" ", "") == "":
return []
p = re.compile("^(\d\d):(\d\d)-(\d\d):(\d\d)$")
def parse_interval(s):
m = p.match(s.replace(" ", ""))
if m is not None:
t0 = int(m.group(1))*60 + int(m.group(2))
t1 = int(m.group(3))*60 + int(m.group(4))
return [t0, t1]
else:
raise Exception("invalid shift specification {}".format(s))
return [parse_interval(s) for s in spans.split(",")] | 19593302bd333c47bc105db75371a26d97a9c3f7 | 693,703 |
def trace_depth(pair, stacks, depth):
"""
Helper function for tracing base pairs "depth" while building dot bracket notation string
:param pair: Pair to analyze
:param stacks: Current pair "stacks"
:param depth: Current depth of main function
:return: Depth at which make_dot_notation function should now operate
"""
for stackPair in stacks[depth]:
if pair[1].position > stackPair[1].position:
if pair[0].position < stackPair[1].position:
depth += 1
return depth
return depth | b7a44c5d2f1515eea76258a5b8efe93e2bce1475 | 693,704 |
import logging
def run_evaluation_episodes(env, agent, n_runs, max_episode_len=None,
explorer=None, logger=None):
"""Run multiple evaluation episodes and return returns.
Args:
env (Environment): Environment used for evaluation
agent (Agent): Agent to evaluate.
n_runs (int): Number of evaluation runs.
max_episode_len (int or None): If specified, episodes longer than this
value will be truncated.
explorer (Explorer): If specified, the given Explorer will be used for
selecting actions.
logger (Logger or None): If specified, the given Logger object will be
used for logging results. If not specified, the default logger of
this module will be used.
Returns:
List of returns of evaluation runs.
"""
logger = logger or logging.getLogger(__name__)
scores = []
for i in range(n_runs):
obs = env.reset()
done = False
test_r = 0
t = 0
while not (done or t == max_episode_len):
def greedy_action_func():
return agent.act(obs)
if explorer is not None:
a = explorer.select_action(t, greedy_action_func)
else:
a = greedy_action_func()
obs, r, done, info = env.step(a)
test_r += r
t += 1
agent.stop_episode()
# As mixing float and numpy float causes errors in statistics
# functions, here every score is cast to float.
scores.append(float(test_r))
logger.info('test episode: %s R: %s', i, test_r)
return scores | f750f2d411508698dd88e0e307f546d092499661 | 693,705 |
def sum_of_digits(number: int) -> int:
"""Сумма цифр"""
return sum(map(int, list(str(number)))) | 8c8f557693bf87a638568a82c04e7ae70752cb8c | 693,706 |
def get_iscsi_connections(client):
"""Display iSCSI connections.
Returns:
List of iSCSI connection.
"""
return client.call('get_iscsi_connections') | 1abbcb5c3f6de2abeaa3d17c1b93199f3199b822 | 693,707 |
def response_ok():
"""OK response in bytes."""
return b"SPAMD/1.5 0 EX_OK\r\n\r\n" | 16fb7485e69416e15734b9989c19adbcff7d7cc1 | 693,708 |
def abs(number): # real signature unknown; restored from __doc__
"""
abs(number) -> number
Return the absolute value of the argument.
"""
return 0 | 1530dc89e01700fac389447d41afbd90daf690d4 | 693,709 |
def power_provision_rule(mod, p, tmp):
"""
Provided power to the system is the load shifted down minus the load
shifted up.
"""
return mod.DR_Shift_Down_MW[p, tmp] - mod.DR_Shift_Up_MW[p, tmp] | 4f19418c9a2aca671439339c2c3c3c5f7f6a7ab2 | 693,711 |
def binary(n,count=16,reverse=False):
"""
Display n in binary (only difference from built-in `bin` is
that this function returns a fixed width string and can
optionally be reversed
>>> binary(6789)
'0001101010000101'
>>> binary(6789,8)
'10000101'
>>> binary(6789,reverse=True)
'1010000101011000'
"""
bits = [str((n >> y) & 1) for y in range(count-1, -1, -1)]
if reverse:
bits.reverse()
return "".join(bits) | b322d57c2e395fc3377c359c77707167f5477111 | 693,712 |
def flip(fn, permutation=(1, 0)):
"""Flips argument order of function according to permutation."""
def inner(*args, **kwargs):
args = tuple(args[p] for p in permutation) + args[len(permutation):]
return fn(*args, **kwargs)
return inner | 8f6a748e93f3e397606d7f5d97afdc29527f175a | 693,713 |
def and_list(aList, bList):
"""Return the items in aList and in bList."""
tmp = []
for a in aList:
if a in bList:
tmp.append(a)
return tmp | 71b3be86552be659d601c598df27aff2853476ae | 693,714 |
import re
def is_ztf_name(name):
"""
Checks if a string adheres to the ZTF naming scheme
"""
return re.match("^ZTF[1-2]\d[a-z]{7}$", name) | 9962d093008b0352ef4f45e8f635cf6c714db0cd | 693,715 |
def build_limit_clause(limit):
"""Build limit clause for a query.
Get a LIMIT clause and bind vars. The LIMIT clause will have either
the form "LIMIT count" "LIMIT offset, count", or be the empty string.
or the empty string.
Args:
limit: None, int or 1- or 2-element list or tuple.
Returns:
A (str LIMIT clause, bind vars) pair.
"""
if limit is None:
return '', {}
if not isinstance(limit, (list, tuple)):
limit = (limit,)
bind_vars = {'limit_row_count': limit[0]}
if len(limit) == 1:
return 'LIMIT %(limit_row_count)s', bind_vars
bind_vars = {'limit_offset': limit[0],
'limit_row_count': limit[1]}
return 'LIMIT %(limit_offset)s, %(limit_row_count)s', bind_vars | ac113586105486bc006d1ddaff1f6d5cdfb1b22f | 693,716 |
def percentile(pmf, percentage):
"""Computes a percentile of a given Pmf.
percentage: float 0-100
"""
p = percentage / 100.0
total = 0
for val, prob in pmf.Items():
total += prob
if total >= p:
return val | bcbfadf2160dd7f2e78cc83f08560690fb4c3464 | 693,718 |
import re
def clean_tag(tag):
"""clean up tag."""
if not tag:
return tag
t = tag.upper()
t = t.replace('B', '8').replace('O', '0')
t = re.sub(r'[^0289CGJLPQRUVY]+', '', t)
return t | dbd6caec8805f264d5ef16ae814a5a9df9824cff | 693,719 |
import gzip
import json
def read_selected_torsions(input_json):
""" Read data generated by select_torsions.py
Returns
-------
selected_torsions: dict
Dictionary for selected torsions, has this structure:
{
canonical_torsion_index1: {
'initial_molecules': [ Molecule1a, Molecule1b, .. ],
'atom_indices': [ (0,1,2,3) ],
'attributes': {'canonical_explicit_hydrogen_smiles': .., 'canonical_isomeric_smiles': .., ..}
},
..
}
"""
if input_json.endswith(".gz"):
with gzip.open(input_json, 'r') as infile:
selected_torsions = json.load(infile)
else:
with open(input_json) as infile:
selected_torsions = json.load(infile)
return selected_torsions | 22726136ffb69db5ab6464f4f583ba86f9912bb5 | 693,720 |
def load(filename):
"""Load the labels and scores for Hits at K evaluation.
Loads labels and model predictions from files of the format:
Query \t Example \t Label \t Score
:param filename: Filename to load.
:return: list_of_list_of_labels, list_of_list_of_scores
"""
result_labels = []
result_scores = []
current_block_name = ""
current_block_scores = []
current_block_labels = []
with open(filename,'r') as fin:
for line in fin:
splt = line.strip().split("\t")
block_name = splt[0]
block_example = splt[1]
example_label = int(splt[2])
example_score = float(splt[3])
if block_name != current_block_name and current_block_name != "":
result_labels.append(current_block_labels)
result_scores.append(current_block_scores)
current_block_labels = []
current_block_scores = []
current_block_labels.append(example_label)
current_block_scores.append(example_score)
current_block_name = block_name
result_labels.append(current_block_labels)
result_scores.append(current_block_scores)
return result_labels,result_scores | 273a4addc8b943469b22f7495291fee179e67e62 | 693,722 |
def filter_bad_pixels(dataset):
"""Use the Data Quality Flag (DQF) to filter out bad pixels.
Each pixel (value according to a specific X-Y coordinate) has a DQF, which ranges
from 0 (good) to 3 (no value). We follow NOAA's suggestion of filtering out all
pixes with a flag of 2 or 3.
Returns
-------
xr.core.dataset.Dataset
An xarray dataset where the spectral radiance (`Rad`) of any pixel with DQF
greater than 1 is set to `np.nan`.
"""
return dataset.where(dataset.DQF.isin([0, 1])) | c484980e678dd1c09b6aad58000fc994a55f3495 | 693,724 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.