content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def get_duration_and_elevation(table):
""""Return an array of duration and elevation gain from an html table"""
try:
hiking_duration = str(table.contents[0].text.strip()) #av.note: want this to be numeric
except:
hiking_duration = ""
try:
elevation_gain_ft = str(
table.contents[2]
.text.strip()
.replace("ft", "")
.replace(",", "")
.replace("with three different ascents", "")
.replace("with multiple ascents", "")
.replace("with two ascents", "")
.replace("with two different ascents", "")
.strip()
) #av.note: want this to be numeric
except:
elevation_gain_ft = ""
return hiking_duration, elevation_gain_ft
|
d52ca3c6e5d75ff936e44b452b05790db931dc6e
| 3,646,828
|
def show_comparison(model, X_test, y_test, A_test, protected_features, prostprocess_preds):
"""
Returns Dashboard to show comparison of models based on the trade off of the disparity and accuracy
"""
FairlearnDashboard(sensitive_features=A_test, sensitive_feature_names=protected_features,
y_true=Y_test,
y_pred={"Unmitigated": model.predict(X_test) ,
"ThresholdOptimizer": postprocess_preds})
return dashboard
|
bc92c90c67f16c53c8c847556779d1ad923dc56c
| 3,646,829
|
def my_edge(bw, threshold):
"""
2018.11.26
检测图像边缘
返回检测到的边缘二值图像
阈值用于消去检测到的噪声
时间复杂度:
Args:
bw: a grey-scale image with 8-bit depth
threshold: a decimal between 0 and 1
Returns:
bw_edge_binary: 二值化的边缘图像
Raises:
"""
m, n = bw.shape
bw0 = bw.astype(np.int16)
bw_edge_rows = np.zeros([m, n])
bw_edge_cols = np.zeros([m, n])
for i in range(m-1):
bw_edge_rows[i, :] = abs(bw0[i+1, :] - bw0[i, :])
bw_edge_rows[m-1, :] = 0
for j in range(n-1):
bw_edge_cols[:, j] = abs(bw0[:, j+1] - bw0[:, j])
bw_edge_cols[:, n-1] = 0
bw_edge = np.sqrt(bw_edge_cols*bw_edge_cols + bw_edge_rows*bw_edge_rows)
index_threshold = bw_edge.max()*threshold
bw_edge_binary = np.zeros([m, n])
for i in range(m):
for j in range(n):
if bw_edge[i, j] > index_threshold:
bw_edge_binary[i, j] = 1
return bw_edge_binary
|
ea5ffd4869f0b5636ff73691761bac88316aad34
| 3,646,830
|
def csc_matvec(csc, x):
"""
Matrix vector multiplication
using csc format
"""
if not sparse.isspmatrix_csc(csc):
raise Exception("Matrix must be in csc format")
nrow, ncol = csc.shape
nnz = csc.data.shape[0]
if x.size != ncol:
print(x.size, ncol)
raise ValueError("wrong dimension!")
xx = np.require(x, requirements="C")
if csc.dtype == np.float32:
y = np.zeros((nrow), dtype=np.float32)
libsparsetools.scsc_matvec(c_int(nrow), c_int(ncol), c_int(nnz),
csc.indptr.ctypes.data_as(POINTER(c_int)),
csc.indices.ctypes.data_as(POINTER(c_int)),
csc.data.ctypes.data_as(POINTER(c_float)),
xx.ctypes.data_as(POINTER(c_float)),
y.ctypes.data_as(POINTER(c_float)))
elif csc.dtype == np.float64:
y = np.zeros((nrow), dtype=np.float64)
libsparsetools.dcsc_matvec(c_int(nrow), c_int(ncol), c_int(nnz),
csc.indptr.ctypes.data_as(POINTER(c_int)),
csc.indices.ctypes.data_as(POINTER(c_int)),
csc.data.ctypes.data_as(POINTER(c_double)),
xx.ctypes.data_as(POINTER(c_double)),
y.ctypes.data_as(POINTER(c_double)))
else:
raise ValueError("Not implemented")
return y
|
fa04c4e208333327ce6e4073a27b43f17ffb7dea
| 3,646,831
|
def encode_base58(s) -> bytes:
"""
Encodes/converts any bytes to Base58 to transmit public key
"""
count = 0
for c in s:
if c == 0:
count += 1
else:
break
num = int.from_bytes(s, 'big')
prefix = '1' * count
result = ''
while num > 0:
num, mod = divmod(num, 58)
result = BASE58_ALPHABET[mod] + result
return prefix + result
|
064867d9185a06f26c8f033ab04ac38621c48869
| 3,646,833
|
def save_chapter(
body,
source_lang,
target_lang,
title,
public=False,
user=None):
"""Save chapter to database
Parameters:
body (string): input text
source_lang (string): source language
target_lang (string): target language
title (string): title of the chapter
public: visible to all users if true
user (User object): user that created the chapter
Returns:
Chapter: Chapter object created from the given parameters
boolean: True if text was analyzed, False if not
"""
# save chapter
chapter = Chapter()
chapter.body = body
chapter.created_by = user
chapter.title = title
chapter.source_lang = source_lang
chapter.target_lang = target_lang
chapter.public = public
chapter.save()
fulltext = title + ' ' + body
doc = spacy_analyze(fulltext, source_lang)
if doc:
word_properties = analyze_text(doc)
word_list = translate_words(
word_properties,
source_lang,
target_lang
)
# save word properties related to chapter
for w in word_list:
properties = word_properties.get(w.lemma)
wp = WordProperties()
if properties:
if properties['pos'] == w.pos:
wp.frequency = properties['count']
token_list = properties.get('orig')
if token_list:
wp.token = ', '.join(token_list)
wp.chapter = chapter
wp.word = w
wp.save()
return (chapter, True)
return (chapter, False)
|
9d85acb0a08d8e44bac86f7d3c5bec24b67a3cc1
| 3,646,836
|
def frequency(g, k, h):
"""
Computes the frequency for a given wave number and water depth
(linear dispersion relationship)
:param k: the wave number
:param g: -- gravitational acceleration
:param h: -- the water depth
:returns omega: -- wave frequency
"""
return np.sqrt(g * k * np.tanh(k * h))
|
c81b6721ea874506937d245bd886f129f01b69e2
| 3,646,837
|
def primitive_name(method_name):
"""Given a method_name, returns the corresponding Phylanx primitive.
This primarily used for mapping NumPy mapped_methods to Phylanx primitives,
but there are also other functions in python that would map to primitives
with different name in Phylanx, e.g., `print` is mapped to `cout`.
"""
primitive_name = mapped_methods.get(method_name)
if primitive_name is None:
primitive_name = method_name
return primitive_name
|
d6b1cc670503a8e8bade585f0a875b7bde4f743a
| 3,646,838
|
import math
def _split_pandas_data_with_ratios(data, ratios, seed=SEED, shuffle=False):
"""Helper function to split pandas DataFrame with given ratios
Note:
Implementation referenced from `this source
<https://stackoverflow.com/questions/38250710/how-to-split-data-into-3-sets-train-validation-and-test>`_.
Args:
data (pd.DataFrame): Pandas data frame to be split.
ratios (list of floats): list of ratios for split. The ratios have to sum to 1.
seed (int): random seed.
shuffle (bool): whether data will be shuffled when being split.
Returns:
list: List of pd.DataFrame split by the given specifications.
"""
if math.fsum(ratios) != 1.0:
raise ValueError("The ratios have to sum to 1")
split_index = np.cumsum(ratios).tolist()[:-1]
if shuffle:
data = data.sample(frac=1, random_state=seed)
splits = np.split(data, [round(x * len(data)) for x in split_index])
# Add split index (this makes splitting by group more efficient).
for i in range(len(ratios)):
splits[i]["split_index"] = i
return splits
|
19b2ddd97a803042d1ac27df47a56b5157fd4e96
| 3,646,839
|
import requests
from datetime import datetime
def get_stock_information(stock, country, as_json=False):
"""
This function retrieves fundamental financial information from the specified stock. The retrieved
information from the stock can be valuable as it is additional information that can be used combined
with OHLC values, so to determine financial insights from the company which holds the specified stock.
Args:
stock (:obj:`str`): symbol of the stock to retrieve its information from.
country (:obj:`country`): name of the country from where the stock is from.
as_json (:obj:`bool`, optional):
optional argument to determine the format of the output data (:obj:`dict` or :obj:`json`).
Returns:
:obj:`pandas.DataFrame` or :obj:`dict`- stock_information:
The resulting :obj:`pandas.DataFrame` contains the information fields retrieved from Investing.com
from the specified stock ; it can also be returned as a :obj:`dict`, if argument `as_json=True`.
If any of the information fields could not be retrieved, that field/s will be filled with
None values. If the retrieval process succeeded, the resulting :obj:`dict` will look like::
stock_information = {
"Stock Symbol": "AAPL",
"Prev. Close": 267.25,
"Todays Range": "263.45 - 268.25",
"Revenue": 260170000000.00003,
"Open": 267.27,
"52 wk Range": "142 - 268.25",
"EPS": 11.85,
"Volume": 23693550.0,
"Market Cap": 1173730000000.0,
"Dividend (Yield)": "3.08 (1.15%)",
"Average Vol. (3m)": 25609925.0,
"P/E Ratio": 22.29,
"Beta": 1.23,
"1-Year Change": "47.92%",
"Shares Outstanding": 4443236000.0,
"Next Earnings Date": "04/02/2020"
}
Raises:
ValueError: raised if any of the introduced arguments is not valid or errored.
FileNotFoundError: raised if `stocks.csv` file was not found or errored.
IOError: raised if `stocks.csv` file is empty or errored.
RuntimeError: raised if scraping process failed while running.
ConnectionError: raised if the connection to Investing.com errored (did not return HTTP 200)
"""
if not stock:
raise ValueError("ERR#0013: stock parameter is mandatory and must be a valid stock symbol.")
if not isinstance(stock, str):
raise ValueError("ERR#0027: stock argument needs to be a str.")
if country is None:
raise ValueError("ERR#0039: country can not be None, it should be a str.")
if country is not None and not isinstance(country, str):
raise ValueError("ERR#0025: specified country value not valid.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
country = unidecode(country.strip().lower())
stock = unidecode(stock.strip().lower())
stocks = _get_stock_data_from_csv(country, stock)
if stocks is None:
raise IOError("ERR#0001: stocks object not found or unable to retrieve.")
if country not in get_stock_countries():
raise RuntimeError("ERR#0034: country " + country.lower() + " not found, check if it is correct.")
if stock not in stocks['symbol'].lower():
raise RuntimeError("ERR#0018: stock " + stock + " not found, check if it is correct.")
tag = stocks['tag']
stock = stocks['symbol']
url = f"https://www.investing.com/equities/{tag}"
head = {
"User-Agent": random_user_agent(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
}
req = requests.get(url, headers=head)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath("//dl[contains(@class, 'grid')]/div")
result = {}
result['Stock Symbol'] = stock
if not path_:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
if path_:
for elements_ in path_:
title_ = elements_[0].text_content()
value_ = elements_[1].text_content()
if title_ == "Day's Range":
title_ = 'Todays Range'
if title_ in result.columns.tolist():
try:
result[title_] = float(value_.replace(',', ''))
continue
except:
pass
try:
text = value_.strip()
result[title_] = datetime.strptime(text, "%b %d, %Y").strftime("%d/%m/%Y")
continue
except:
pass
try:
value = value_.strip()
if value.__contains__('B'):
value = float(value.replace('B', '').replace(',', '')) * 1e9
elif value.__contains__('T'):
value = float(value.replace('T', '').replace(',', '')) * 1e12
result[title_] = value
continue
except:
pass
return result
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
|
bfe70fd27c76d743f107056023a127283a76d8c4
| 3,646,840
|
from datetime import datetime
def check_export_start_date(export_start_dates, export_end_dates,
export_day_range):
"""
Update export_start_date according to the export_end_date so that it could be export_end_date - EXPORT_DAY_RANGE.
Parameters:
export_start_date: dict
Read from params, values are strings of dates
export_end_date: dict
Calculated according to the data received.
The type of values are datetime.datetime
export_day_range: int
Number of days to report
Returns:
dict: {str: datetime.datetime}
The keys are "covid_ag" or "flu_ag"
The values are dates until when we export data
"""
for test_type in TEST_TYPES:
if export_start_dates[test_type] == "":
export_start_dates[test_type] = datetime(2020, 5, 26)
else:
export_start_dates[test_type] = datetime.strptime(
export_start_dates[test_type], '%Y-%m-%d')
# Only export data from -45 days to -5 days
export_start_dates[test_type] = compare_dates(
export_end_dates[test_type] - timedelta(days=export_day_range),
export_start_dates[test_type], "l")
if test_type == "covid_ag":
export_start_dates[test_type] = compare_dates(
export_start_dates[test_type], datetime(2020, 5, 26), "l")
return export_start_dates
|
ab2466db1107b980506d34de71c5b1849851fd10
| 3,646,841
|
def reformat_adata(
adata: AnnData, brain_region: str, num_seq_lanes: int, transgenes_list: str
):
"""
script that takes in user specified inputs in the data_reformat script
transforms dataframe input to usable AnnData output with group cell count labels,
df_obs
it also makes genes in the index since multiple ensembl IDs can map onto the same gene
"""
for i in range(1, num_seq_lanes + 1):
adata = obs_rename(adata, i, brain_region)
obs_seq_lanes_keys = [
int(seq_lane[1]) for seq_lane in adata.obs.index.str.split("_")
]
obs_seq_lanes_df = pd.DataFrame(
obs_seq_lanes_keys, index=adata.obs.index, columns=["seq_lane_number"]
)
print("Num seq_lanes parsed...")
# create bit labels for each transgene and its possible combinations.
gene_presence_df, _, cell_gene_flags, _ = gene_list_to_flag(adata, transgenes_list)
adata.obs[[col.upper() for col in gene_presence_df.columns]] = gene_presence_df
adata.obs["which_transgenes"] = cell_gene_flags
adata.obs["transgene_present"] = (
adata.obs["which_transgenes"].notnull().astype("str")
)
group_cell_count_labels = adata.obs["which_transgenes"].value_counts(dropna=False)
adata.obs["seq_lane"] = obs_seq_lanes_df
print("Group cell count labels generated")
if adata.var.index.has_duplicates:
print(f"Duplicate gene names in index (T/F): {adata.var.index.has_duplicates}")
adata.var = uniquify(adata.var)
else:
print(f"Duplicate gene names in index (T/F): {adata.var.index.has_duplicates}")
adata, __ = gene_mask(
adata, stringify_list(transgenes_list), col_name="transgene_mask"
)
adata, ribo_mask = gene_mask(adata, "^rp[sl][0-9]", col_name="ribo_mask")
adata, mito_mask = gene_mask(adata, "^mt*-", col_name="mito_mask")
adata.obs["percent_ribo"] = np.sum(adata[:, ribo_mask].X, axis=1) / np.sum(
adata.X, axis=1
)
adata.obs["percent_mito"] = np.sum(adata[:, mito_mask].X, axis=1) / np.sum(
adata.X, axis=1
)
adata.obs = adata.obs.drop(
columns=adata.obs.columns[adata.obs.columns.str.contains("temp")]
)
return (group_cell_count_labels, adata)
|
9f6e92b7dac8c8e84987676e7c2435a2e34f32e0
| 3,646,844
|
def chunks(list_, num_items):
"""break list_ into n-sized chunks..."""
results = []
for i in range(0, len(list_), num_items):
results.append(list_[i:i+num_items])
return results
|
83da5c19c357cc996fc7585533303986bea83689
| 3,646,845
|
def form_requires_input(form):
"""
Returns True if the form has at least one question that requires input
"""
for question in form.get_questions([]):
if question["tag"] not in ("trigger", "label", "hidden"):
return True
return False
|
97072a9edc494afa731312aebd1f23dc15bf9f47
| 3,646,846
|
import zlib
import json
def on_same_fs(request):
"""
Accept a POST request to check access to a FS available by a client.
:param request:
`django.http.HttpRequest` object, containing mandatory parameters
filename and checksum.
"""
filename = request.POST['filename']
checksum_in = request.POST['checksum']
checksum = 0
try:
data = open(filename, 'rb').read(32)
checksum = zlib.adler32(data, checksum) & 0xffffffff
if checksum == int(checksum_in):
return HttpResponse(content=json.dumps({'success': True}),
content_type=JSON, status=200)
except (IOError, ValueError):
pass
return HttpResponse(content=json.dumps({'success': False}),
content_type=JSON, status=200)
|
2b19fe8d6a69db9cfeeea740cdcf70003e0c9ed1
| 3,646,848
|
from datetime import datetime
def get_memo(expense_group: ExpenseGroup, payment_type: str=None) -> str:
"""
Get the memo from the description of the expense group.
:param expense_group: The expense group to get the memo from.
:param payment_type: The payment type to use in the memo.
:return: The memo.
"""
expense_fund_source = 'Reimbursable expense' if expense_group.fund_source == 'PERSONAL' \
else 'Corporate Credit Card expense'
unique_number = None
if 'settlement_id' in expense_group.description and expense_group.description['settlement_id']:
# Grouped by payment
reimbursement = Reimbursement.objects.filter(
settlement_id=expense_group.description['settlement_id']
).values('payment_number').first()
if reimbursement and reimbursement['payment_number']:
unique_number = reimbursement['payment_number']
else:
unique_number = expense_group.description['settlement_id']
elif 'claim_number' in expense_group.description and expense_group.description['claim_number']:
# Grouped by expense report
unique_number = expense_group.description['claim_number']
if payment_type:
# Payments sync
return 'Payment for {0} - {1}'.format(payment_type, unique_number)
elif unique_number:
memo = '{} - {}'.format(expense_fund_source, unique_number)
expense_group_settings: ExpenseGroupSettings = ExpenseGroupSettings.objects.get(
workspace_id=expense_group.workspace_id
)
if expense_group.fund_source == 'CCC':
if expense_group_settings.ccc_export_date_type != 'current_date':
date = get_transaction_date(expense_group)
date = (datetime.strptime(date, '%Y-%m-%dT%H:%M:%S')).strftime('%d/%m/%Y')
memo = '{} - {}'.format(memo, date)
else:
if expense_group_settings.reimbursable_export_date_type != 'current_date':
date = get_transaction_date(expense_group)
date = (datetime.strptime(date, '%Y-%m-%dT%H:%M:%S')).strftime('%d/%m/%Y')
memo = '{} - {}'.format(memo, date)
return memo
else:
# Safety addition
return 'Reimbursable expenses by {0}'.format(expense_group.description.get('employee_email')) \
if expense_group.fund_source == 'PERSONAL' \
else 'Credit card expenses by {0}'.format(expense_group.description.get('employee_email'))
|
2402d7f0ff89ed7b06300f58f8bfb54c06d67f3f
| 3,646,849
|
def get_prefix_for_google_proxy_groups():
"""
Return a string prefix for Google proxy groups based on configuration.
Returns:
str: prefix for proxy groups
"""
prefix = config.get("GOOGLE_GROUP_PREFIX")
if not prefix:
raise NotSupported(
"GOOGLE_GROUP_PREFIX must be set in the configuration. "
"This namespaces the Google groups for security and safety."
)
return prefix
|
c81d3ede2ba1ad6b8ce716633abbc8e8f91f9a2b
| 3,646,850
|
def client(tmpdir):
"""Test client for the API."""
tmpdir.chdir()
views.app.catchall = False
return webtest.TestApp(views.app)
|
516230e96dff76afccc8f8a3a9dc3942c6341797
| 3,646,851
|
def list_extract(items, arg):
"""Extract items from a list of containers
Uses Django template lookup rules: tries list index / dict key lookup first, then
tries to getattr. If the result is callable, calls with no arguments and uses the return
value..
Usage: {{ list_of_lists|list_extract:1 }} (gets elt 1 from each item in list)
{{ list_of_dicts|list_extract:'key' }} (gets value of 'key' from each dict in list)
"""
def _extract(item):
try:
return item[arg]
except TypeError:
pass
attr = getattr(item, arg, None)
return attr() if callable(attr) else attr
return [_extract(item) for item in items]
|
23fb863a7032f37d029e8b8a86b883dbfb4d5e7b
| 3,646,852
|
from bs4 import BeautifulSoup
def get_links(url):
"""Scan the text for http URLs and return a set
of URLs found, without duplicates"""
# look for any http URL in the page
links = set()
text = get_page(url)
soup = BeautifulSoup(text, "lxml")
for link in soup.find_all('a'):
if 'href' in link.attrs:
newurl = link.attrs['href']
# resolve relative URLs
if newurl.startswith('/'):
newurl = urljoin(url, newurl)
# ignore any URL that doesn't now start with http
if newurl.startswith('http'):
links.add(newurl)
return links
|
70746ba8d28244cf712655fd82a38d358a30779a
| 3,646,853
|
from typing import Tuple
def get_merkle_root(*leaves: Tuple[str]) -> MerkleNode:
"""Builds a Merkle tree and returns the root given some leaf values."""
if len(leaves) % 2 == 1:
leaves = leaves + (leaves[-1],)
def find_root(nodes):
newlevel = [
MerkleNode(sha256d(i1.val + i2.val), children=[i1, i2])
for [i1, i2] in _chunks(nodes, 2)
]
return find_root(newlevel) if len(newlevel) > 1 else newlevel[0]
return find_root([MerkleNode(sha256d(l)) for l in leaves])
|
d0fae08918b042f87ef955be436f1e3d84a66e8a
| 3,646,854
|
import torch
import copyreg
def BeginBlock(layer_to_call: torch.nn.Module,
user_id: str = None,
ipu_id: int = None) -> torch.nn.Module:
"""
Define a block by modifying an existing PyTorch module.
You can use this with an existing PyTorch module instance, as follows:
>>> poptorch.BeginBlock(myModel.a_layer)
>>> poptorch.BeginBlock(MyNewLayer())
The wrapped module and all sub-modules will be part of this block until
a sub-module is similar modified to be another block. In addition, if an IPU
is specified, the module and its submodules will run on the specified IPU.
You can combines multiple blocks into a stage.
:param layer_to_call: PyTorch module to assign to the block.
:param user_id: A user defined identifier for the block.
Blocks with the same id are considered as being a single block.
Block identifiers are also used to manually specify pipelines or
phases.
:param ipu_id: The id of the IPU to run on.
Note that the ``ipu_id`` is an index in a multi-IPU device
within PopTorch, and is separate and distinct from the device
ids used by ``gc-info``.
.. seealso:: :py:meth:`poptorch.Options.setExecutionStrategy`
"""
if not isinstance(layer_to_call, torch.nn.Module):
# Previously, the function returned a new model so would work for any
# callable. This was never documented but should still be permitted to
# work.
if callable(layer_to_call):
return LegacyBeginBlockFn(layer_to_call, user_id, ipu_id)
raise _impl.createPoptorchError(
"module is not an instance of torch.nn.Module or " + "function.")
class BlockModule(type(layer_to_call)):
def __call__(self, *input, **kwargs):
if Block._stages_manager is not None:
if self._user_id is None:
self.__dict__['_user_id'] = (
Block._stages_manager.nextAutoId())
Block._stages_manager.beginStage(self._user_id, self._ipu_id)
return super().__call__(*input, **kwargs)
if str(layer_to_call.__class__) == str(BlockModule):
raise _impl.createPoptorchError(
"module has already been assigned to a block.")
BlockModule.__name__ = type(layer_to_call).__name__
layer_to_call.__class__ = BlockModule
layer_to_call.__dict__['_user_id'] = user_id
layer_to_call.__dict__['_ipu_id'] = ipu_id
# Register custom function to copy / serialize wrappers
copyreg.pickle(BlockModule, _pickle_reduce_block)
# There is no need to return as it is passed by reference, but this is for
# backward compatibility
return layer_to_call
|
a43c0d198fcf1f100cbec5bc3d916aeb05fd36d0
| 3,646,855
|
import torch
def unbatch_nested_tensor(nested_tensor):
"""Squeeze the first (batch) dimension of each entry in ``nested_tensor``."""
return map_structure(lambda x: torch.squeeze(x, dim=0), nested_tensor)
|
0691cb1bb851c609747cde9d45b24ca6310fa022
| 3,646,856
|
def row2dict(cursor, row):
""" タプル型の行データを辞書型に変換
@param cursor: カーソルオブジェクト
@param row: 行データ(tuple)
@return: 行データ(dict)
@see: http://docs.python.jp/3.3/library/sqlite3.html
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
|
60e0ebed21c35a65784fe94fe5781f61fbe0c97d
| 3,646,857
|
def merge(left, right):
"""this is used for merging two halves """
# print('inside Merge ')
result = [];
leftIndex = 0;
rightIndex = 0;
while leftIndex < len(left) and rightIndex < len(right):
if left[leftIndex] < right[rightIndex]:
result.append(left[leftIndex])
leftIndex += 1
else:
result.append(right[rightIndex])
rightIndex += 1
# print('merge', left, right)
# print('result', result)
# print('left elements ->', left[leftIndex:] + right[rightIndex:])
# Checking if any element was left
return result + left[leftIndex:] + right[rightIndex:]
|
5b0012e102d72a93cf3ce47f9600b7dcef758a3b
| 3,646,858
|
import re
def parse_query(query):
"""Parse the given query, returning a tuple of strings list (include, exclude)."""
exclude = re.compile(r'(?<=-")[^"]+?(?=")|(?<=-)\w+').findall(query)
for w in sorted(exclude, key=lambda i: len(i), reverse=True):
query = query.replace(w, '')
query = " " + query
return re.compile(r'(?<=[+ ]")[^"]+?(?=")|(?<=[+ ])\w+').findall(query), exclude
|
4fe6aac76935af6e5acaa3aedad40d6bc635d4ff
| 3,646,859
|
def _m_verify_mg(state, method_name, multigoal, depth, verbose=0):
"""
Pyhop 2 uses this method to check whether a multigoal-method has achieved
the multigoal that it promised to achieve.
"""
goal_dict = _goals_not_achieved(state,multigoal)
if goal_dict:
raise Exception(f"depth {depth}: method {method_name} " + \
f"didn't achieve {multigoal}]")
if verbose >= 3:
print(f"depth {depth}: method {method_name} achieved {multigoal}")
return []
|
262ae05ab34e37867d5fa83ff86ecbd01391dbe1
| 3,646,860
|
def eggs_attribute_decorator(eggs_style):
"""Applies the eggs style attribute to the function"""
def decorator(f):
f.eggs = eggs_style
@wraps(f)
def decorated_function(*args, **kwargs):
return f(*args, **kwargs)
return decorated_function
return decorator
|
3fe6d6b65b29176cf9fb997697c1b70f01f041bf
| 3,646,861
|
def byte_size(num, suffix='B'):
"""
Return a formatted string indicating the size in bytes, with the proper
unit, e.g. KB, MB, GB, TB, etc.
:arg num: The number of byte
:arg suffix: An arbitrary suffix, like `Bytes`
:rtype: float
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
|
830d4ed401df90bc3a176c52124ed93c53c25c80
| 3,646,862
|
def cdsCoverage(genome_coverage, dict_cds, datatype, coverage):
"""Return Mean Coverage or Raw Counts for each CDS, or their promotor regions for tss and chip"""
genome_coverage = [map(int, genome_coverage[0]), map(int, genome_coverage[1])]
# CDS coverage is calculated from genome coverage on the entire gene
if datatype != 'tss' and datatype != 'chip':
for cds_id in dict_cds:
# Strand plus
plus = sum(genome_coverage[0][int(dict_cds[cds_id][2]-1):int(dict_cds[cds_id][3])])
if coverage == 'mean':
dict_cds[cds_id][5][0] = float(plus) / len(genome_coverage[0][int(dict_cds[cds_id][2]-1):int(dict_cds[cds_id][3])])
elif coverage == 'counts':
dict_cds[cds_id][5][0] = float(plus)
# Strand minus
minus = sum(genome_coverage[1][int(dict_cds[cds_id][2]-1):int(dict_cds[cds_id][3])])
if coverage == 'mean':
dict_cds[cds_id][5][1] = float(minus) / len(genome_coverage[1][int(dict_cds[cds_id][2]-1):int(dict_cds[cds_id][3])])
elif coverage == 'counts':
dict_cds[cds_id][5][1] = float(minus)
return dict_cds
# CDS coverage is calculated from genome coverage on the region [-250:ATG:+100]
else:
for cds_id in dict_cds:
# Strand plus
if int(dict_cds[cds_id][4]) == 1:
start = int(dict_cds[cds_id][2]) - 250
# Test position out of the first base
if start < 1:
start = 1
stop = int(dict_cds[cds_id][2]) + 2 + 100
# Test position out of the last base
if stop > len(genome_coverage[0]):
stop = len(genome_coverage[0])
plus = sum(genome_coverage[0][start-1:stop])
if coverage == 'mean':
dict_cds[cds_id][5][0] = float(plus) / len(genome_coverage[0][start-1:stop])
elif coverage == 'counts':
dict_cds[cds_id][5][0] = float(plus)
minus = sum(genome_coverage[1][start-1:stop])
if coverage == 'mean':
dict_cds[cds_id][5][1] = float(minus) / len(genome_coverage[1][start-1:stop])
elif coverage == 'counts':
dict_cds[cds_id][5][1] = float(minus)
# Strand minus: strand is set at -1
else:
start = int(dict_cds[cds_id][3]) + 250
# Test position out of the last base
if start > len(genome_coverage[0]):
start = len(genome_coverage[0])
stop = int(dict_cds[cds_id][3]) - 2 - 100
# Test position out of the first base
if stop < 1:
stop = 1
plus = sum(genome_coverage[0][stop-1:start])
if coverage == 'mean':
dict_cds[cds_id][5][0] = float(plus) / len(genome_coverage[0][stop-1:start])
elif coverage == 'counts':
dict_cds[cds_id][5][0] = float(plus)
minus = sum(genome_coverage[1][stop-1:start])
if coverage == 'mean':
dict_cds[cds_id][5][1] = float(minus) / len(genome_coverage[1][stop-1:start])
elif coverage == 'counts':
dict_cds[cds_id][5][1] = float(minus)
return dict_cds
|
637d76347dbe09c3826e496f7c8f5ec0a79f3dbd
| 3,646,863
|
def div88():
"""
Returns the divider ZZZZZZZZZZZZ
:return: divider88
"""
return divider88
|
a2ae79f96ed7530fd2a1f266404ee3b21614a5a9
| 3,646,864
|
def laplace_noise(epsilon, shape, dtype, args):
"""
Similar to foolbox but batched version.
:param epsilon: strength of the noise
:param bounds: min max for images
:param shape: the output shape
:param dtype: the output type
:return: the noise for images
"""
scale = epsilon / np.sqrt(3) * (args.max - args.min)
noise = nprng.laplace(scale=scale, size=shape)
noise = noise.astype(dtype)
return noise
|
3016db9cebffe47c62f57e05d30442b9786636e8
| 3,646,865
|
def grid_convergence(lat, lon, radians=False):
"""
Given the latitude and longitude of a position, calculate the grid convergence
Args:
lat: latitude (degrees or radians)
lon: longitude (degrees or radians)
radians: true if lat/lon in radians
Returns: gamma, the grid convergence angle in radians or degrees
"""
lon0, lat0, _ = utm_origin_lla(lat, lon, radians=radians)
if radians:
return atan(tan(lon - lon0)*sin(lat))
else:
return rad2deg(atan(tand(lon - lon0)*sind(lat)))
|
dc60c8325f66fdc2db9b72d2bdc099823f913d26
| 3,646,867
|
import uuid
import json
def _make_index_item(resource_type):
""" """
id_prefix = "2c1|"
uuid_ = uuid.uuid4().hex
tpl = {
"access_roles": [
"guillotina.Reader",
"guillotina.Reviewer",
"guillotina.Owner",
"guillotina.Editor",
"guillotina.ContainerAdmin",
],
"access_users": ["root"],
"depth": 2,
"elastic_index": "{0}__{1}-{2}".format(
ES_INDEX_NAME, resource_type.lower(), uuid_
),
"id": None,
"uuid": id_prefix + uuid_,
}
with open(str(FHIR_EXAMPLE_RESOURCES / (resource_type + ".json")), "r") as fp:
data = json.load(fp)
tpl["id"] = data["id"]
tpl[resource_type.lower() + "_resource"] = data
return tpl
|
5c11bb14016e42ff36b12ca81fd83e81b71dea9d
| 3,646,868
|
import torch
def mol_to_graph(mol):
"""
Converts Mol object to a graph compatible with Pytorch-Geometric
Args:
mol (Mol): RDKit Mol object
Returns:
node_feats (LongTensor): features for each node, one-hot encoded by element
edge_feats (LongTensor): features for each node, one-hot encoded by element
edges (LongTensor): edges in COO format
node_pos (FloatTensor): x-y-z coordinates of each node
"""
node_pos = torch.FloatTensor(dt.get_coordinates_of_conformer(mol))
bonds = dt.get_bonds_matrix(mol)
edge_tuples = np.argwhere(bonds)
edges = torch.LongTensor(edge_tuples).t().contiguous()
node_feats = torch.FloatTensor([one_of_k_encoding_unk(a.GetSymbol(), mol_atoms) for a in mol.GetAtoms()])
# edge_feats = torch.FloatTensor([one_of_k_encoding(bonds[i,j], [1.0, 2.0, 3.0, 1.5]) for i,j in edge_tuples])
edge_feats = torch.FloatTensor([bonds[i, j] for i, j in edge_tuples]).view(-1, 1)
return node_feats, edges, edge_feats, node_pos
|
5a3e5169b7a84afae31254e71152fb6cb300bf64
| 3,646,869
|
from typing import List
import random
def _tournament(evaluated_population: List[Eval], tournament_size: int = 5,
previous_winner: Chromosome = None) -> Chromosome:
"""Selects tournament_size number of chromosomes to 'compete' against each other. The chromosome with the highest
fitness score 'wins' the tournament.
Params:
- evaluated_population (list<tuple<list<int>,float>>): The evaluated population
- tournament_size (int): Specifies the size of the tournament. When equal to 1, the
method is equivalent to random selection. The higher the tournament size, the higher the
bias towards the fitter individuals.
- previous_winner (list<int>): The winner of the previous tournament. If the same chromosome wins both tournaments,
then the runner-up to the current tournament is chosen.
Returns:
- winner (list<int>): The chromosome with the highest score in the tournament
"""
tournament = random.sample(evaluated_population, tournament_size)
tournament.sort(key=lambda evaluated_chromosome: evaluated_chromosome[1])
winner = tournament[0][0] # pylint: disable=E1136
if winner == previous_winner:
winner = tournament[1][0] # pylint: disable=E1136
return winner
|
29db4c9c4a5332c3e70760f57312b845e29b7a36
| 3,646,870
|
def interpolate_drift_table(table, start=0, skip=0, smooth=10):
"""
Smooth and interpolate a table
:param table: fxyz (nm) array
:param start: in case of renumbering needed : first frame
:param skip: how many frame were skipped
:param smooth: gaussian smoothing sigma
:return: interpolated table
"""
w = table.shape[1]
if smooth > 0:
table = smooth_drift_table(table, sigma=smooth)
table = update_frame_number(table, start=start, skip=skip)
time = table[:, 0]
# print(time.shape)
time_new = np.arange(1, max(time) + 1)
new_table = np.zeros((len(time_new), w))
new_table[:, 0] = time_new
for col in range(1, w):
y = table[:, col]
# print(y.shape)
f = interpolate.interp1d(time, y, fill_value='extrapolate')
ynew = f(time_new)
new_table[:, col] = ynew
logger.info(f'interpolating from {len(time)} to {len(ynew)} frames')
return new_table
|
d2296e6eb1b55cf5416d2ab933ef430eb0ace964
| 3,646,871
|
def on_mrsim_config_change():
"""Update the mrsim.config dict. Only includes density, volume, and #sidebands"""
existing_data = ctx.states["local-mrsim-data.data"]
fields = ["integration_density", "integration_volume", "number_of_sidebands"]
# if existing_data is not None:
print(existing_data["config"])
existing_data["trigger"] = {"simulate": True, "method_index": None}
for item in fields:
existing_data["config"][item] = ctx.states[f"{item}.value"]
return prep_valid_data_for_simulation(existing_data)
|
cea2f60ca0de5e8b383a7363adfeea19473b1662
| 3,646,872
|
import base64
def decrypt(encrypted, passphrase):
"""takes encrypted message in base64 and key, returns decrypted string without spaces on the left
IMPORTANT: key must be a multiple of 16.
Finaly, the strip function is used to remove the spaces from the left of the message"""
aes = AES.new(passphrase, AES.MODE_ECB)
return aes.decrypt(base64.b64decode(encrypted)).lstrip().decode('utf-8')
|
90e10c3e6e07934bc2171fa09febd223db200d70
| 3,646,873
|
async def total_conversations(request: HistoryQuery = HistoryQuery(month=6),
collection: str = Depends(Authentication.authenticate_and_get_collection)):
"""Fetches the counts of conversations of the bot for previous months."""
range_value, message = HistoryProcessor.total_conversation_range(
collection, request.month
)
return {"data": range_value, "message": message}
|
bc6a292b7ddc598d43c609272f6f45e87842bf21
| 3,646,876
|
import pandas
def intersect(table_dfs, col_key):
""" intsc tables by column
"""
col_key_vals = list(unique_everseen(chain(*(
table_df[col_key] for table_df in table_dfs))))
lookup_dcts = [lookup_dictionary(table_df, col_key)
for table_df in table_dfs]
intscd_rows = []
for val in col_key_vals:
row = {}
if val and all(val in lookup_dct for lookup_dct in lookup_dcts):
for lookup_dct in lookup_dcts:
row.update(lookup_dct[val])
intscd_rows.append(row)
intscd_col_keys = list(unique_everseen(chain(*table_dfs)))
intscd_df = pandas.DataFrame.from_dict(intscd_rows)[intscd_col_keys]
return intscd_df
|
9ca1035d4cd614ae4080c8e9dc9174c7423c28dc
| 3,646,878
|
import socket
import json
def ask_peer(peer_addr, req_type, body_dict, return_json=True):
"""
Makes request to peer, sending request_msg
:param peer_addr: (IP, port) of peer
:param req_type: type of request for request header
:param body_dict: dictionary of body
:param return_json: determines if json or string response should be returned
:return: string response of peer
"""
request_msg = create_request({"type": req_type}, body_dict)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client:
client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
client.settimeout(5)
try:
client.connect(peer_addr)
client.sendall(request_msg.encode())
data = client.recv(1024).decode()
except (socket.error, socket.timeout):
return None
if not data:
return None
return data if not return_json else json.loads(data)
|
44c8750ef4af487402a5cf5f789bf2a3d8d3fdb7
| 3,646,879
|
def describe_instances_header():
"""generate output header"""
return misc.format_line((
"Account",
"Region",
"VpcId",
"ec2Id",
"Type",
"State",
"ec2Name",
"PrivateIPAddress",
"PublicIPAddress",
"KeyPair"
))
|
6939b8e47c15e098733e70fe17392c18cfff9636
| 3,646,880
|
def ordered_scaffold_split(dataset, lengths, chirality=True):
"""
Split a dataset into new datasets with non-overlapping scaffolds and sorted w.r.t. number of each scaffold.
Parameters:
dataset (Dataset): dataset to split
lengths (list of int): expected length for each split.
Note the results may be different in length due to rounding.
"""
frac_train, frac_valid, frac_test = 0.8, 0.1, 0.1
scaffold2id = defaultdict(list)
for idx, smiles in enumerate(dataset.smiles_list):
scaffold = MurckoScaffold.MurckoScaffoldSmiles(smiles=smiles, includeChirality=chirality)
scaffold2id[scaffold].append(idx)
scaffold2id = {key: sorted(value) for key, value in scaffold2id.items()}
scaffold_sets = [
scaffold_set for (scaffold, scaffold_set) in sorted(
scaffold2id.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True)
]
train_cutoff = frac_train * len(dataset)
valid_cutoff = (frac_train + frac_valid) * len(dataset)
train_idx, valid_idx, test_idx = [], [], []
for scaffold_set in scaffold_sets:
if len(train_idx) + len(scaffold_set) > train_cutoff:
if len(train_idx) + len(valid_idx) + len(scaffold_set) > valid_cutoff:
test_idx.extend(scaffold_set)
else:
valid_idx.extend(scaffold_set)
else:
train_idx.extend(scaffold_set)
return torch_data.Subset(dataset, train_idx), torch_data.Subset(dataset, valid_idx), torch_data.Subset(dataset, test_idx)
|
8a3e0ab5c4cf23dcdcb075fc9363452e06c7d22f
| 3,646,881
|
import struct
def read_plain_byte_array(file_obj, count):
"""Read `count` byte arrays using the plain encoding."""
return [file_obj.read(struct.unpack(b"<i", file_obj.read(4))[0]) for i in range(count)]
|
f300d205fda9b1b92ebd505f676b1f76122f994d
| 3,646,882
|
import imp
def find_django_migrations_module(module_name):
""" Tries to locate <module_name>.migrations_django (without actually importing it).
Appends either ".migrations_django" or ".migrations" to module_name.
For details why:
https://docs.djangoproject.com/en/1.7/topics/migrations/#libraries-third-party-apps
"""
try:
module_info = imp.find_module(module_name)
module = imp.load_module(module_name, *module_info)
imp.find_module('migrations_django', module.__path__)
return module_name + '.migrations_django'
except ImportError:
return module_name + '.migrations'
|
fdae121b1341355bc1911d2b4ce9501eb80cf8f3
| 3,646,883
|
def big_number(int_in):
"""Converts a potentially big number into a lisible string.
Example:
- big_number(10000000) returns '10 000 000'.
"""
s = str(int_in)
position = len(s)
counter = 0
out = ''
while position != 0:
counter += 1
position -= 1
out = s[position] + out
if counter % 3 == 0 and position != 0:
out = " " + out
return (out)
|
7db0dce8ffa1cbea736537efbf2fdd4d8a87c20d
| 3,646,884
|
def action_list_to_string(action_list):
"""Util function for turning an action list into pretty string"""
action_list_string = ""
for idx, action in enumerate(action_list):
action_list_string += f"{action['name']} ({action['action']['class_name']})"
if idx == len(action_list) - 1:
continue
action_list_string += " => "
return action_list_string
|
5e291dd1dbf7b8d8149505a0efc157cbcc22af3b
| 3,646,885
|
def test_pandigital_9(*args):
"""
Test if args together contain the digits 1 through 9 uniquely
"""
digits = set()
digit_count = 0
for a in args:
while a > 0:
digits.add(a % 10)
digit_count += 1
a //= 10
return digit_count == 9 and len(digits) == 9 and 0 not in digits
|
ad5a738400f7b8a9bea001a13a76798633b9ac61
| 3,646,887
|
def volume_attached(context, volume_id, instance_id, mountpoint):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
|
3bfd057dee24bf9a4b51ef6503dd46bacc64210d
| 3,646,889
|
def _startswith(
self: str | ir.StringValue, start: str | ir.StringValue
) -> ir.BooleanValue:
"""Determine whether `self` starts with `end`.
Parameters
----------
self
String expression
start
prefix to check for
Examples
--------
>>> import ibis
>>> text = ibis.literal('Ibis project')
>>> text.startswith('Ibis')
Returns
-------
BooleanValue
Boolean indicating whether `self` starts with `start`
"""
return ops.StartsWith(self, start).to_expr()
|
dabc7a1e07b38fc99c1f31bb285fc895c890301d
| 3,646,891
|
def _get_all_scopes(blocks):
"""Get all block-local scopes from an IR.
"""
all_scopes = []
for label, block in blocks.items():
if not (block.scope in all_scopes):
all_scopes.append(block.scope)
return all_scopes
|
daa13a20629dd419d08c9c6026972f666c3f9291
| 3,646,893
|
from datetime import datetime
def get_equinox_type(date):
"""Returns a string representing the type of equinox based on what month
the equinox occurs on. It is assumed the date being passed has been
confirmed to be a equinox.
Keyword arguments:
date -- a YYYY-MM-DD string.
"""
month = datetime.strptime(date, '%Y-%m-%d').month
if month == 3:
return 'march'
elif month == 9:
return 'september'
else:
return None
|
06b65a54a0ccf681d9f9b57193f5e9d83578f0eb
| 3,646,894
|
def mcs_worker(k, mols, n_atms):
"""Get per-molecule MCS distance vector."""
dists_k = []
n_incomp = 0 # Number of searches terminated before timeout
for l in range(k + 1, len(mols)):
# Set timeout to halt exhaustive search, which could take minutes
result = FindMCS([mols[k], mols[l]], completeRingsOnly=True,
ringMatchesRingOnly=True, timeout=10)
dists_k.append(1. - result.numAtoms /
((n_atms[k] + n_atms[l]) / 2))
if result.canceled:
n_incomp += 1
return np.array(dists_k), n_incomp
|
013958a41813181478b3133e107efed5d0370fa6
| 3,646,895
|
def get_tally_sort_key(code, status):
"""
Get a tally sort key
The sort key can be used to sort candidates and other tabulation
categories, for example the status and tally collections returned by
rcv.Tabulation().tabulate().
The sort codes will sort candidates before other tabulation
categories; elected candidates before defeated candidates; elected
candidates by increasing round of election, then by decreasing votes;
defeated candidates by decreasing round of election, then by
decreasing votes; any remaining ties are broken by the sort order of
candidate names and labels for other tabulation categories.
Arguments
=========
code
A string representing a candidate name or label of another
tabulation category.
status
A dictionary of tabulation result statuses, as given by the second
item of the return value from rcv.Tabulation().tabulate().
Returns
=======
A sort key in the form of a tuple of integers and/or strings.
"""
sort_key = tuple([9, code])
if code in status:
nbr_round = status[code].nbr_round
votes = status[code].votes
if status[code].status == 'elected':
sort_key = (1, 1, nbr_round, -votes, code)
else:
sort_key = (1, 2, -nbr_round, -votes, code)
else:
sort_key = (2, code)
# print('code =', code, ' sort_key =', sort_key)
return sort_key
|
bd7d643300997903b84b1827174dd1f5ac515156
| 3,646,896
|
def get_lidar_point_cloud(sample_name, frame_calib, velo_dir, intensity=False):
"""Gets the lidar point cloud in cam0 frame.
Args:
sample_name: Sample name
frame_calib: FrameCalib
velo_dir: Velodyne directory
Returns:
(3, N) point_cloud in the form [[x,...][y,...][z,...]]
"""
xyzi = read_lidar(velo_dir, sample_name)
# Calculate the point cloud
points_in_lidar_frame = xyzi[:, 0:3]
points = calib_utils.lidar_to_cam_frame(points_in_lidar_frame, frame_calib)
if intensity:
return points.T, xyzi[:, 3]
return points.T
|
f1deb8896a2c11d82d6a312f0a8f353a73a1b40d
| 3,646,898
|
def make_mlp(dim_list, activation_list, batch_norm=False, dropout=0):
"""
Generates MLP network:
Parameters
----------
dim_list : list, list of number for each layer
activation_list : list, list containing activation function for each layer
batch_norm : boolean, use batchnorm at each layer, default: False
dropout : float [0, 1], dropout probability applied on each layer (except last layer)
Returns
-------
nn.Sequential with layers
"""
layers = []
index = 0
for dim_in, dim_out in zip(dim_list[:-1], dim_list[1:]):
activation = activation_list[index]
layers.append(nn.Linear(dim_in, dim_out))
if batch_norm:
layers.append(nn.BatchNorm1d(dim_out))
if activation == 'relu':
layers.append(nn.ReLU())
elif activation == 'tanh':
layers.append(nn.Tanh())
elif activation == 'leakyrelu':
layers.append(nn.LeakyReLU())
elif activation == 'sigmoid':
layers.append(nn.Sigmoid())
if dropout > 0 and index < len(dim_list) - 2:
layers.append(nn.Dropout(p=dropout))
index += 1
return nn.Sequential(*layers)
|
dc2677ccd1291942f474eb6fe7719103731f4cfc
| 3,646,900
|
def load_and_prep_image(filename):
"""
Reads an image from filename, turns it into a tensor
and reshapes it to (img_shape, img_shape, colour_channel).
"""
image = cv2.imread(filename)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier(haarcascade)
faces = face_cascade.detectMultiScale(image, scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
face = image[y:y + h, x:x + w]
gray = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(gray, (48, 48)), -1), 0)
return cropped_img
|
cbf3b1a840ecc931adf0dd7f84cff68e83180efe
| 3,646,902
|
def DoH(im, canvas, max_sigma=30, threshold=0.1, display=True):
""" Difference of Hessian blob detector
:param im: grayscale image
:param max_sigma: maximum sigma of Gaussian kernel
:param threshold: absolute lower bound Local maxima smaller than threshold ignore
"""
blobs = blob_doh(im, max_sigma=30, threshold=.1)
for blob in blobs:
y, x, r = blob
cv2.circle(canvas, (int(x), int(y)), int(r), (0, 0, 255), 2)
if display:
cv2.imshow('Difference of Hessian', canvas)
cv2.waitKey(0)
return blobs
|
965ce92cdba24514fa9802a9e8891a96d97f1cf5
| 3,646,903
|
def _parse_class(s):
"""
Parse a key, value pair, separated by '='
On the command line (argparse) a declaration will typically look like:
foo=hello
or
foo="hello world"
"""
items = s.split('=')
key = items[0].strip() # we remove blanks around keys, as is logical
if len(items) > 1:
# rejoin the rest:
value = '='.join(items[1:])
return (key, value)
|
db517a277e21448eb83ba25244a8bfa3892f18a4
| 3,646,904
|
import difflib
def getStringSimilarity(string1:str,string2:str):
"""
This function will return a similarity of two strings.
"""
return difflib.SequenceMatcher(None,string1,string2).quick_ratio()
|
292f552449569206ee83ce862c2fb49f6063dc9e
| 3,646,905
|
def calc_simcoef_distr(patfeats, labels, id_dict, simcoef):
"""
Calculates the score distributions
Inputs:
- simcoef: simcoef the values are calculated with (string)
- labels: list of strings with the scores to be calculated (e.g.: ['cited', 'random'])
- id_dict: dictionary containing the patent ID pairs for the respective label
Output:
- scores: dictionary containing the scores for each label
"""
scores = dict.fromkeys(labels)
for label in labels:
print label
scores[label] = []
combis = id_dict[label]
for combi in combis:
score = compute_sim(patfeats[combi[0]], patfeats[combi[1]], simcoef)
scores[label].append(score)
return scores
|
897bcb2e30e0587173557772c17589fe43841e60
| 3,646,907
|
def fft(signal, sampling_rate, plot=False, show_grid=True, fig_size=(10, 5)):
"""
Perform FFT on signal.
Compute 1D Discrete Fourier Transform using Fast Fourier Transform.
Optionally, plot the power spectrum of the frequency domain.
Parameters
----------
signal : ndarray
Input array to be transformed.
sampling_rate : float
Sampling rate of the input signal.
plot : bool, optional
Toggle to display a plot of the power spectrum.
show_grid : bool, optional
If creating a plot, toggle to show grid lines on the figure.
fig_size : tuple, optional
If plotting, set the width and height of the resulting figure.
Returns
-------
signal_fft : ndarray
Transformation of the original input signal.
"""
n = len(signal)
t = 1.0 / sampling_rate
time = range(n) # Time vector
xf = np.linspace(0.0, 1.0 / (2.0 * t), n // 2)
yf = np.fft.fft(signal) / n # FFT and normalize
if plot:
f, axarr = plt.subplots(2, 1, figsize=fig_size)
axarr[0].plot(time, signal)
axarr[0].set_xlim(min(time), max(time))
axarr[0].set_xlabel("Time Steps")
axarr[0].set_ylabel("Amplitude")
axarr[0].grid(show_grid)
axarr[1].plot(xf, abs(yf[0 : n // 2]), "r") # Plot the spectrum
axarr[1].set_xlabel("Freq (Hz)")
axarr[1].set_ylabel("|Y(freq)|")
axarr[1].grid(show_grid)
f.subplots_adjust(hspace=0.5)
plt.suptitle("Power Spectrum", size=16)
plt.show()
return yf
|
70296c900e8ad7342be3c6ee18ff8b34e481ac0e
| 3,646,908
|
def pattern_count(data, **params):
"""
Count occurrences of a given pattern.
Args:
data (list): values.
params (kwargs):
pattern (str or list): the pattern to be sought in data (obligatory)
metric (str): 'identity' counts identical positions,
'euclid' calculates the Euclidean distance (L2 norm),
'taxi' calculates the taxicab (Manhattan) distance
(L1 norm).
'sup' returns maximum distance between positions,
'inf' returns minimum distance between positions.
Only 'identity' can be used with non-numerical data.
radius (number): the similarity cutoff (non-negative)
normalized (bool): whether the number of occurrences is to be
divided by the maximum number of occurrences.
(default:False)
Returns the number of occurrences of the pattern in the data.
Invokes internal function '_pattern_common', which raises:
NameError when 'pattern' is not given,
TypeError if 'pattern' is neither string nor list,
ValueError if 'radius' is negative or unsupported distance method used.
"""
pattern, patlen, radius, metric = _pattern_common(**params)
normalized = params['normalized'] if 'normalized' in params else False
counts = 0
for pos in range(len(data) - patlen + 1):
if _list_distance(data[pos:pos + patlen], pattern, metric) <= radius:
counts += 1
return counts if not normalized \
else 1.0 * counts / (len(data) - patlen + 1)
|
0c943554b4c5b7739a6ca16aa739b3cd614ab79d
| 3,646,909
|
import requests
import signal
def do(hostname):
"""
Performs a GET request.
Parameters
----------
hostname : str
Target request
Return
------
The request results
"""
try:
return requests.get(hostname, timeout=10)
except TimeoutException:
print("\033[1;31mRequest timeout: test aborted\n\033[1;m")
return None
except requests.ConnectionError:
print("\033[1;31mServer not found: test aborted\n\033[1;m")
return None
finally:
signal.alarm(0)
|
7e300e4be98beecad29e28594b76230e6c19382d
| 3,646,911
|
def getAssignmentReport(assignment):
"""
Produces an ABET assignment report (as a markdown-formatted string)
for the given assignment (which is expected to be a codepost API
object) by pulling all relevant data as well as source
code files (and grader comments) for randomly selected A, B and C samples
"""
courseId = assignment.course
course = codepost.course.retrieve(id=courseId)
courseName = course.name
coursePeriod = course.period
assignmentName = assignment.name
assignmentPts = assignment.points
assignmentMean = assignment.mean
assignmentMedian = assignment.median
summary = f"""
# {courseName} - {coursePeriod}
## {assignmentName}
* Points: {assignmentPts}
* Mean: {assignmentMean}
* Median: {assignmentMedian}\n\n"""
# find ideal A, B, C samples
submissions = assignment.list_submissions()
aSubmission = submissions[0]
bSubmission = submissions[0]
cSubmission = submissions[0]
# we only expect 1 submission per student since submissions are via our
# scripts, but in any case, find the 3 closest to A=max%, B = 85%, C = 75%
for submission in submissions:
if submission.grade > aSubmission.grade:
aSubmission = submission
if abs(submission.grade / assignmentPts - .85) < abs(bSubmission.grade / assignmentPts - .85):
bSubmission = submission
if abs(submission.grade / assignmentPts - .75) < abs(cSubmission.grade / assignmentPts - .75):
cSubmission = submission
aSummary, aDetail = submissionToMarkdown(aSubmission,"A",assignmentPts)
bSummary, bDetail = submissionToMarkdown(bSubmission,"B",assignmentPts)
cSummary, cDetail = submissionToMarkdown(cSubmission,"C",assignmentPts)
return summary + aSummary + bSummary + cSummary + "\n\n" + aDetail + bDetail + cDetail
|
fd2a49c8fa8e3a15a878e06d29ec9598912034c6
| 3,646,912
|
def start_game():
"""
Method to start
:return: Choice selection for new game or load game
"""
maximize_console()
print_title()
print('Do you want to start a new game (enter 1) or resume an ongoing game (enter 2)?')
choice = input('||> ')
print()
return choice
|
5780468f4239a8a519538a18feb12a0956dd4170
| 3,646,913
|
def get_inception_score(images, batch_size, splits=10):
"""
the function is to calculate the inception score of the generated images
image is a numpy array with values should be in the range[0, 255]
images 299x299x3
"""
assert(type(images) == np.ndarray)
inception_model = inception_v3
inception_model.eval()
def get_softmax(x):
x = inception_model(x)
return tf.nn.softmax(x)
n = len(images) // batch_size
preds = np.zeros([len(images), 1000], dtype=np.float32)
tfe.enable_egaer_execution()
dataloader = tf.data.Dataset.from_tensor_slices(images)
dataloader = data.batch(batch_size)
for i, batch in enumerate(tfe.Iterator(dataloader), 0):
batch_x = tf.Variable(batch) # images
# softmax
preds[i * batch_size:(i + 1) * batch_size] = get_softmax(batch_x)
scores = []
# IS score
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
|
9e14691a5c885b6b95e6ff9ecff014db0cca119e
| 3,646,916
|
def generate_audio_testing(raw_gain, raw_freq, raw_dampings, modal_fir, reverb, impulse_profile,
gains, frequencies, dampings, modal_response,
noise, acceleration_scale, revc, audio_sample_rate, example_secs, scratch='controls'):
"""Generate DiffImpact's estimate of impact sound based on current model variables."""
# Generate impulse --> impact profile
# magnitude_envelopes, taus, prediction['stdevs']
# impc = impact.get_controls(mags, stdevs, taus, 0) # needs to be 2D?
# impulse_profile = impact.get_signal(impc['magnitudes'], impc['taus'])
print("impulse profile shape: ", impulse_profile.shape) # force profile
zero_freq = np.zeros_like(raw_freq)
zero_gain = np.random.rand(1,256) #np.zeros_like(raw_gain)
zero_damp = np.zeros_like(raw_dampings)
# Get modal response from raw freqs, gains, and dampings
irc_scratch = modal_fir.get_controls(raw_gain, raw_freq, raw_dampings)
ir_scratch = modal_fir.get_signal(irc_scratch['gains'], irc_scratch['frequencies'], irc_scratch['dampings'])
# Get modal response from scaled (passed through get_controls) freqs, gains, dampings
ir = modal_fir.get_signal(gains, frequencies, dampings)
print("ir: ", ir)
print("model's output modal response: ", modal_response)
#print("ir_scratch: ", ir_scratch)
# Convolve together for modal vibration sounds
if scratch == 'raw':
audio = ddsp.core.fft_convolve(impulse_profile, ir_scratch)
elif scratch == 'controls' or scratch =='control':
audio = ddsp.core.fft_convolve(impulse_profile, ir)
else:
audio = ddsp.core.fft_convolve(impulse_profile, modal_response)
print("convolved shape: ", audio.shape)
# Generate and add time constant noise
# Note that in the context, clips.shape[0] is batch size (which is 1 for all testing here)
# clips.shape[1] is the actual clip size (like 441000 for 10 seconds of 44100 audio sampling rate)
#unfiltered_noise = tf.random.uniform((clips.shape[0], int(clips.shape[1] * sample_factor)),
#minval=-1.0, maxval=1.0)
#noise = ddsp.core.frequency_filter(unfiltered_noise, ddsp.core.exp_sigmoid(noise_magnitudes - 4.0), 257)
audio += noise
print("after adding noise: ", audio.shape)
# Add acceleration sound
audio += impulse_profile * acceleration_scale
print("after acceleration sound: ", audio.shape)
# Add reverb
# revc = reverb.get_controls(audio, reverb_gains, reverb_decay)
audio = reverb.get_signal(audio, revc)#revc['ir'])
print("after reverb: ", audio.shape)
# Downsample from internal sampling rate to original recording sampling rate
# audio = ddsp.core.resample(audio, clips.shape[1], 'linear')
# Note that the resample function will return shape [n_timesteps], which is the second parameter
print("audio sample rate: ", audio_sample_rate)
audio = ddsp.core.resample(audio, int(audio_sample_rate)*example_secs, 'linear')
return audio
|
1b7932c165c9615096b79b5d0c19859bc6dd113d
| 3,646,917
|
def dice_coef_multilabel(y_true, y_pred, numLabels=4, channel='channel_first'):
"""
calculate channel-wise dice similarity coefficient
:param y_true: the ground truth
:param y_pred: the prediction
:param numLabels: the number of classes
:param channel: 'channel_first' or 'channel_last'
:return: the dice score
"""
assert channel=='channel_first' or channel=='channel_last', r"channel has to be either 'channel_first' or 'channel_last'"
dice = 0
if channel == 'channel_first':
y_true = np.moveaxis(y_true, 1, -1)
y_pred = np.moveaxis(y_pred, 1, -1)
for index in range(1, numLabels):
temp = dice_coef(y_true[..., index], y_pred[..., index])
dice += temp
dice = dice / (numLabels - 1)
return dice
|
16af1961d900add04f0f277335524ba1568feb12
| 3,646,918
|
def gaussian(sigma, fs, t=None):
""" return a gaussian smoothing filter
Args:
sigma: standard deviation of a Gaussian envelope
fs: sampling frequency of input signals
t: time scale
Return:
a Gaussian filter and corresponding time scale
"""
if t is None:
t = np.linspace(-sigma*4.0, sigma*4.0, int(sigma*8.0*fs))
gss = np.exp(-0.5 * (t ** 2.0) / sigma ** 2.0)
gss /= np.sum(gss)
return gss, t
|
aba5d419bb22cd0bfe0a702346dd77735b7f0d4c
| 3,646,919
|
def score_sent(sent):
"""Returns a score btw -1 and 1"""
sent = [e.lower() for e in sent if e.isalnum()]
total = len(sent)
pos = len([e for e in sent if e in positive_wds_with_negation])
neg = len([e for e in sent if e in negative_wds_with_negation])
if total > 0:
return (pos - neg) / total
else:
return 0
|
cc70d035e932513ae27743bbca66ae8d870fcc91
| 3,646,920
|
import torch
def flipud(tensor):
"""
Flips a given tensor along the first dimension (up to down)
Parameters
----------
tensor
a tensor at least two-dimensional
Returns
-------
Tensor
the flipped tensor
"""
return torch.flip(tensor, dims=[0])
|
b0fd62172b0055d9539b554a8c967c058e46b397
| 3,646,921
|
def connect():
"""Function to connect to database on Amazon Web Services"""
try:
engine = create_engine('mysql+mysqlconnector://dublinbikesadmin:dublinbikes2018@dublinbikes.cglcinwmtg3w.eu-west-1.rds.amazonaws.com/dublinbikes')
port=3306
connection = engine.connect()
Session.configure(bind=engine)
return engine
#https://campus.datacamp.com/courses/introduction-to-relational-databases-in-python/advanced-sqlalchemy-queries?ex=2#skiponboarding
except Exception as err:
print ("An error occurred when connecting to the database: ", err)
#https://dev.mysql.com/doc/connector-python/en/connector-python-api-errors-error.html
|
81da870305a853b621f374b521bb680e435d852b
| 3,646,922
|
def get_file_type(filepath):
"""Returns the extension of a given filepath or url."""
return filepath.split(".")[-1]
|
070a1b22508eef7ff6e6778498ba764c1858cccb
| 3,646,923
|
def calcB1grad(B2grad,W2,A2):
"""
Calculates the gradient of the cost with respect to B1 using the chain rule
INPUT: B2grad, [layer3Len,1] ; W2, [layer2Len, layer3Len] ;
A2, [layer2len, 1]
OUTPUT: B1grad, [layer2Len, 1]
"""
temp1 = np.dot(W2,B2grad) #layer2Len * 1 vector
sigmGradient = sigmoidGradient(A2) #layer2len * 1 vector
B1grad = np.multiply(sigmGradient,temp1)
return B1grad
|
e214f79be1377b4fc0f36690accf6072fee27884
| 3,646,924
|
def plot_3d(x, y, z, title, labels):
"""
Returns a matplotlib figure containing the 3D T-SNE plot.
Args:
x, y, z: arrays
title: string with name of the plot
labels: list of strings with label names: [x, y, z]
"""
plt.rcParams.update({'font.size': 30, 'legend.fontsize': 20})
plt.rc('font', size=30)
plt.rc('axes', titlesize=35)
labelpad = 30
figure = plt.figure(figsize=(12,12))
ax = figure.add_subplot(projection='3d')
ax.scatter(x, y, z)
ax.set_title(title)
ax.set_xlabel(labels[0], labelpad=labelpad)
ax.set_ylabel(labels[1], labelpad=labelpad)
ax.set_zlabel(labels[2], labelpad=labelpad)
plt.tight_layout()
return figure
|
624a62f9dc941d6b7cfed06e10250fae8c8defa9
| 3,646,925
|
def is_rating_col_name(col:str)->bool:
"""
Checks to see if the name matches the naming convention for a rating
column of data, i.e. A wrt B
:param col: The name of the column
:return: T/F
"""
if col is None:
return False
elif isinstance(col, (float, int)) and np.isnan(col):
return False
elif is_pw_col_name(col):
return False
else:
return __RATING_COL_REGEX.search(col) is not None
|
57802e888f5a75cdc521a08115ad3b74a56da43d
| 3,646,926
|
def _make_buildifier_command():
"""Returns a list starting with the buildifier executable, followed by any
required default arguments."""
return [
find_data(_BUILDIFIER),
"-add_tables={}".format(find_data(_TABLES))]
|
ab480ff1bc7b21685a4dd95bbc12ae5ee223bdc0
| 3,646,927
|
from typing import Union
def infer_path_type(path: str) -> Union[XPath, JSONPath]:
"""
Infers the type of a path (XPath or JSONPath) based on its syntax.
It performs some basic sanity checks to differentiate a JSONPath from an XPath.
:param path: A valid XPath or JSONPath string.
:return: An instance of JSONPath or XPath
"""
if not path:
raise ValueError("No path given")
if path[0] in ['$', '@']:
return JSONPath(path)
else:
if path[0] in ['.', '/']:
return XPath(path)
else:
raise ValueError("Couldn't identify the path type for {}".format(path))
|
abeed8003b05dd5b66ada1367d0a5acf39102d60
| 3,646,928
|
def get_proximity_angles():
"""Get the angles used for the proximity sensors."""
angles = []
# Left-side of the agent
angles.append(3 * pi / 4) # 135° (counter-clockwise)
for i in range(5): # 90° until 10° with hops of 20° (total of 5 sensors)
angles.append(pi / 2 - i * pi / 9)
# Center
angles.append(0) # 0°
# Right-side of the agent
for i in range(5): # -10° until -90° with hops of 20° (total of 5 sensors)
angles.append(-pi / 18 - i * pi / 9)
angles.append(-3 * pi / 4) # -135° (clockwise)
return angles
|
29c093d1aef0d10d24968af8bee06e6d050e9119
| 3,646,929
|
def delete(request, scenario_id):
"""
Delete the scenario
"""
# Retrieve the scenario
session = SessionMaker()
scenario = session.query(ManagementScenario).filter(ManagementScenario.id == scenario_id).one()
# Delete the current scenario
session.delete(scenario)
session.commit()
return redirect('parleys_creek_management:jobs')
|
4d9c7090d66f8cd3bd055c6f383870b4648a3828
| 3,646,930
|
def angle(p1, p2, p3):
"""Returns an angle from a series of 3 points (point #2 is centroid).
Angle is returned in degrees.
Parameters
----------
p1,p2,p3 : numpy arrays, shape = [n_points, n_dimensions]
Triplets of points in n-dimensional space, aligned in rows.
Returns
-------
angles : numpy array, shape = [n_points]
Series of angles in degrees
"""
v1 = p1 - p2
v2 = p3 - p2
return angle_2v(v1, v2)
|
3e57121a20f18f2ee5728eeb1ea2ffb39500db40
| 3,646,932
|
def transformer_parsing_base():
"""HParams for parsing on WSJ only."""
hparams = transformer_base()
hparams.attention_dropout = 0.2
hparams.layer_prepostprocess_dropout = 0.2
hparams.max_length = 512
hparams.learning_rate_warmup_steps = 16000
hparams.hidden_size = 1024
hparams.learning_rate = 0.05
hparams.shared_embedding_and_softmax_weights = False
return hparams
|
f86b3fe446866ff3de51f02278c2d2c9d7f1b126
| 3,646,933
|
def split_protocol(urlpath):
"""Return protocol, path pair"""
urlpath = stringify_path(urlpath)
if "://" in urlpath:
protocol, path = urlpath.split("://", 1)
if len(protocol) > 1:
# excludes Windows paths
return protocol, path
return None, urlpath
|
e9b006d976847daa9a94eb46a9a1c2f53cd9800f
| 3,646,934
|
import pprint
def createParPythonMapJob(info):
"""
Create map job json for IGRA matchup.
Example:
job = {
'type': 'test_map_parpython',
'params': {
'year': 2010,
'month': 7
},
'localize_urls': [
]
}
"""
print("Info:")
pprint(info, indent=2)
# build parrams
job = {
'type': 'test_map_parpython',
'name': 'test_map_parpython-%04d-%02d' % (int(info['year']), int(info['month'])),
'params': info,
'localize_urls': []
}
print("Job:")
pprint(job, indent=2)
return job
|
142afc4b4be0d77b4921e57c358494dbfc43c6ab
| 3,646,935
|
def calc_lampam_from_delta_lp_matrix(stack, constraints, delta_lampams):
"""
returns the lamination parameters of a laminate
INPUTS
- ss: laminate stacking sequences
- constraints: design and manufacturing guidelines
- delta_lampams: ply partial lamination parameters
"""
lampam = np.zeros((12,), float)
for ind_ply in range(delta_lampams.shape[0]):
lampam += delta_lampams[
ind_ply, constraints.ind_angles_dict[stack[ind_ply]]]
return lampam
|
a1d179f441368f2ebef8dc4be4e2a364d41cf84e
| 3,646,936
|
def perp(i):
"""Calculates the perpetuity to present worth factor.
:param i: The interest rate.
:return: The calculated factor.
"""
return 1 / i
|
2fe59a039ac5ecb295eb6c443143b15e41fdfddb
| 3,646,937
|
def chebyshev(x, y):
"""chebyshev distance.
Args:
x: pd.Series, sample feature value.
y: pd.Series, sample feature value.
Returns:
chebyshev distance value.
"""
return np.max(x-y)
|
876f0d441c48a7ab4a89b1826eb76459426ad9a3
| 3,646,938
|
def soda_url_helper(*, build_url, config, year, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param year: year
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
url = build_url
url = url.replace('__format__', str(config['formats'][year]))
url = url.replace('__url_text__', str(config['url_texts'][year]))
return [url]
|
b4e0f8c781a966d0291dad7d897eba02dc7a4e09
| 3,646,939
|
from typing import Optional
from datetime import datetime
from typing import List
def search(
submitted_before: Optional[datetime] = None,
submitted_after: Optional[datetime] = None,
awaiting_service: Optional[str] = None,
url:Optional[str] = None,
token:Optional[str] = None,
quiet:bool = False
) -> List[dict]:
"""Query metadatasets according to search critera. If datetimes are
specified without a timezone, they are assumed to be local time. Note that
specifying a timezone is only possible programmatically."""
config = get_config(url, token)
# Converting the datetimes to UTC is done only to have any timezone
# information at all. datetime objects without a timezone will be rejected
# by the API as invalid ISO strings. In principle they can be submitted in
# an arbitrary timezone. Applying `astimezone(utc)` to datetime objects
# without a timezone annotation assumes local time.
args = {
'submitted_before': _add_timezone(submitted_before),
'submitted_after': _add_timezone(submitted_after),
'awaiting_service': awaiting_service
}
args = { k: v for k, v in args.items() if v is not None }
info("Sending query to server", quiet)
with ApiClient(config) as api_client:
api_instance = metadata_api.MetadataApi(api_client)
api_response = api_instance.get_meta_data_sets(**args)
res = [elem.to_dict() for elem in api_response]
return result(res, quiet)
|
a2ce0d86fde2792365f27cf386e7c9ef0d4a0fa1
| 3,646,941
|
from typing import List
from typing import Pattern
import re
from typing import Optional
from typing import Match
def _target_js_variable_is_used(
*, var_name: str, exp_lines: List[str]) -> bool:
"""
Get a boolean value whether target variable is used in
js expression or not.
Parameters
----------
var_name : str
Target variable name.
exp_lines : list of str
js expression lines.
Returns
-------
result : bool
If target variable is used in js expression, True will be
returned.
"""
var_pattern: Pattern = re.compile(pattern=rf'var ({var_name}) = ')
used_pattern_1: Pattern = re.compile(
pattern=rf'{var_name}[ ;\)\.}},\]\[]')
used_pattern_2: Pattern = re.compile(
pattern=rf'{var_name}$')
for line in exp_lines:
if '//' in line:
continue
if var_name not in line:
continue
match: Optional[Match] = var_pattern.search(string=line)
if match is not None:
continue
match = used_pattern_1.search(string=line)
if match is not None:
return True
match = used_pattern_2.search(string=line)
if match is not None:
return True
return False
|
be07cb1628676717b2a02723ae7c01a7ba7364d6
| 3,646,942
|
def rnn_temporal(x, h0, Wx, Wh, b):
"""
Run a vanilla RNN forward on an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The RNN uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the RNN forward, we return the hidden states for all timesteps.
Inputs:
- x: Input data for the entire timeseries, of shape (N, T, D).
- h0: Initial hidden state, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- h: Hidden states for the entire timeseries, of shape (N, T, H).
"""
N, T, _ = x.shape
H = h0.shape[1]
h = np.zeros([N, 0, H])
for t in range(T):
h_step = rnn_step(x[:, t, :], h0 if t == 0 else h[:, t - 1, :], Wx, Wh,
b).reshape(N, 1, H)
h = np.append(h, h_step, axis=1)
return h
|
794fed02ef96c97d9b4ccb6a7278fc72b81eea33
| 3,646,943
|
from typing import Union
def rejection_fixed_lag_stitch(fixed_particle: np.ndarray,
last_edge_fixed: np.ndarray,
last_edge_fixed_length: float,
new_particles: MMParticles,
adjusted_weights: np.ndarray,
stitch_time_interval: float,
min_resample_time_indices: Union[list, np.ndarray],
dist_prior_bound: float,
mm_model: MapMatchingModel,
max_rejections: int,
break_on_zero: bool = False) -> Union[np.ndarray, None, int]:
"""
Attempt up to max_rejections of rejection sampling to stitch a single fixed particle
:param fixed_particle: trajectory prior to stitching time
:param last_edge_fixed: row of last fixed particle
:param last_edge_fixed_length: length of last fixed edge (so don't have to call get_geometry)
:param new_particles: particles proposed to stitching
:param adjusted_weights: non-interacting stitching weights
:param stitch_time_interval: time between stitching observations
:param min_resample_time_indices: indices for row of min_resample_time in new_particles
:param dist_prior_bound: bound on distance transition density (given positive if break_on_zero)
:param mm_model: MapMatchingModel
:param max_rejections: number of rejections to attempt, if none succeed return None
:param break_on_zero: whether to return 0 if new_stitching_distance=0
:return: stitched particle
"""
n = len(new_particles)
for reject_ind in range(max_rejections):
new_index = np.random.choice(n, 1, p=adjusted_weights)[0]
new_particle = new_particles[new_index].copy()
# Reject if new_particle starts from different edge
if not np.array_equal(last_edge_fixed[1:4], new_particle[0, 1:4]):
continue
# Reject if new_particle doesn't overtake fixed_particles
elif np.array_equal(last_edge_fixed[1:4], new_particle[1, 1:4]) and \
new_particle[1, 4] < last_edge_fixed[4]:
continue
# Calculate stitching distance
first_distance_j_to_k = (new_particle[1, 4] - last_edge_fixed[4]) * last_edge_fixed_length
first_distance_k = new_particle[1, -1]
change_dist = np.round(first_distance_j_to_k - first_distance_k, 5)
new_particle[1:(min_resample_time_indices[new_index] + 1), -1] += change_dist
new_stitching_distance = new_particle[min_resample_time_indices[new_index], -1]
if break_on_zero and new_stitching_distance < 1e-5:
return 0
# Evaluate distance prior
new_stitching_distance_prior = mm_model.distance_prior_evaluate(new_stitching_distance, stitch_time_interval)
new_stitching_deviation_prior = mm_model.deviation_prior_evaluate(fixed_particle[-1, 5:7],
new_particle[None,
min_resample_time_indices[new_index], 5:7],
new_stitching_distance)
accept_prob = new_stitching_distance_prior * new_stitching_deviation_prior / dist_prior_bound
if accept_prob > (1 - 1e-5) or np.random.uniform() < accept_prob:
out_particle = np.append(fixed_particle, new_particle[1:], axis=0)
return out_particle
return None
|
aeec4fc1956c7a63f15812988a38d54b63234de4
| 3,646,944
|
def zip_equalize_lists(a, b):
"""
A zip implementation which will not stop when reaching the end of the
smallest list, but will append None's to the smaller list to fill the gap
"""
a = list(a)
b = list(b)
a_len = len(a)
b_len = len(b)
diff = abs(a_len - b_len)
if a_len < b_len:
for _ in range(diff):
a.append(None)
if b_len < a_len:
for _ in range(diff):
b.append(None)
return zip(a, b)
|
1cf5b9cadf4b75f6dab6c42578583585ea7abdfc
| 3,646,945
|
def cover_line(line):
"""
This function takes a string containing a line that should
potentially have an execution count and returns a version
of that line that does have an execution count if deemed
appropriate by the rules in validate_line().
Basically, if there is currently no number where there should
be an execution count (indicating this line did not make
it into the compiled binary), a zero is placed there to
indicate that this line was executed 0 times. Test coverage
viewers will interpret this to mean that the line could
potentially have been executed.
"""
first_bar = line.find("|")
second_bar = line.find("|", first_bar+1)
if validate_line(line, second_bar) and \
line[second_bar-1].strip() == "":
# If this line could have been executed but wasn't (no
# number between first and second bars), put a zero
# before the second bar, indicating that it was
# executed zero times. Test coverage viewers will interpret
# this as meaning the line should have been covered
# but wasn't.
return "".join([line[:second_bar-1],
"0", line[second_bar:]])
# There's already an execution count - this
# template must have been instantiated
return line
|
612cd295b78ce9a0d960b902027827c03733f609
| 3,646,946
|
def find_period(samples_second):
""" # Find Period
Args:
samples_second (int): number of samples per second
Returns:
float: samples per period divided by samples per second
"""
samples_period = 4
return samples_period / samples_second
|
c4a53e1d16be9e0724275034459639183d01eeb3
| 3,646,947
|
def sqrt(x: int) -> int:
"""
Babylonian Square root implementation
"""
z = (x + 1) // 2
y = x
while z < y:
y = z
z = ( (x // z) + z) // 2
return y
|
1a91d35e5783a4984f2aca5a9b2a164296803317
| 3,646,948
|
def is_consecutive_list(list_of_integers):
"""
# ========================================================================
IS CONSECUTIVE LIST
PURPOSE
-------
Reports if elments in a list increase in a consecutive order.
INPUT
-----
[[List]] [list_of_integers]
- A list of integers.
Return
------
[BOOLEAN]
- Returns true is a list is consecutive or false if the same
number appears consecutively.
# ========================================================================
"""
for i in range(1, len(list_of_integers)):
if list_of_integers[i] - list_of_integers[i - 1] != 1:
return False
return True
|
3b165eb8d50cc9e0f3a13b6e4d47b7a8155736b9
| 3,646,949
|
def circles(x, y, s, c='b', vmin=None, vmax=None, **kwargs):
"""
See https://gist.github.com/syrte/592a062c562cd2a98a83
Make a scatter plot of circles.
Similar to plt.scatter, but the size of circles are in data scale.
Parameters
----------
x, y : scalar or array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, )
Radius of circles.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs.
Note that `c` should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values
to be colormapped. (If you insist, use `color` instead.)
`c` can be a 2-D array in which the rows are RGB or RGBA, however.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used.
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Examples
--------
a = np.arange(11)
circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')
plt.colorbar()
License
--------
This code is under [The BSD 3-Clause License]
(http://opensource.org/licenses/BSD-3-Clause)
"""
if np.isscalar(c):
kwargs.setdefault('color', c)
c = None
if 'fc' in kwargs:
kwargs.setdefault('facecolor', kwargs.pop('fc'))
if 'ec' in kwargs:
kwargs.setdefault('edgecolor', kwargs.pop('ec'))
if 'ls' in kwargs:
kwargs.setdefault('linestyle', kwargs.pop('ls'))
if 'lw' in kwargs:
kwargs.setdefault('linewidth', kwargs.pop('lw'))
# You can set `facecolor` with an array for each patch,
# while you can only set `facecolors` with a value for all.
zipped = np.broadcast(x, y, s)
patches = [Circle((x_, y_), s_)
for x_, y_, s_ in zipped]
collection = PatchCollection(patches, **kwargs)
if c is not None:
c = np.broadcast_to(c, zipped.shape).ravel()
collection.set_array(c)
collection.set_clim(vmin, vmax)
ax = plt.gca()
ax.add_collection(collection)
ax.autoscale_view()
plt.draw_if_interactive()
if c is not None:
plt.sci(collection)
return collection
|
cb3b2c4316ec573aa29cf5d500f50fbcd64f47b5
| 3,646,950
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.