content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import types
import functools
def copy_func(f):
"""Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)."""
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__,
closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
|
d661876d8568c5f33ae07682c874edd8d71dd7c9
| 3,641,673
|
from typing import List
def augment(img_list: list, hflip: bool = True, rot: bool = True) -> List[np.ndarray]:
"""
Augments the image inorder to add robustness to the model
@param img_list: The List of images
@param hflip: If True, add horizontal flip
@param rot: If True, add 90 degrees rotation
@return: A list of the augmented images
"""
# horizontal flip OR rotate
hflip = hflip and np.random.random() < 0.5
vflip = rot and np.random.random() < 0.5
rot90 = rot and np.random.random() < 0.5
return [perform_augment(hflip, vflip, rot90, img) for img in img_list]
|
3d953ba2c9ce869ec612644d9a5370690c930e22
| 3,641,674
|
def _neurovault_collections(parts, query):
"""Mocks the Neurovault API behind the `/api/collections/` path.
parts: the parts of the URL path after "collections"
ie [], ["<somecollectionid>"], or ["<somecollectionid>", "images"]
query: the parsed query string, e.g. {"offset": "15", "limit": "5"}
returns a dictionary of API results
See the neurovault API docs for details: https://neurovault.org/api-docs
"""
if parts:
return _neurovault_one_collection(parts)
collections, _ = _get_neurovault_data()
offset, limit = int(query.get("offset", 0)), int(query.get("limit", 2))
batch = collections.iloc[
offset: offset + limit].to_dict(orient="records")
return {"count": len(collections), "results": batch}
|
5ee1e6b9b59fb12e76c38c20cde65c18c3fd201a
| 3,641,675
|
def display_states():
""" Display the states"""
storage_states = storage.all(State)
return render_template('7-states_list.html', states=storage_states)
|
b9dc5c739546fee0abce077df1bba38587062f1a
| 3,641,676
|
def recompress_folder(folders, path, extension):
"""Recompress folder"""
dest = runez.SYS_INFO.platform_id.composed_basename("cpython", path.name, extension=extension)
dest = folders.dist / dest
runez.compress(path, dest, logger=print)
return dest
|
5cadc1a0b32509630cd3fa5af9fd758899e4bf94
| 3,641,677
|
import pathlib
def guessMimetype(filename):
"""Return the mime-type for `filename`."""
path = pathlib.Path(filename) if not isinstance(filename, pathlib.Path) else filename
with path.open("rb") as signature:
# Since filetype only reads 262 of file many mp3s starting with null bytes will not find
# a header, so ignoring null bytes and using the bytes interface...
buf = b""
while not buf:
data = signature.read(_NUM_SIGNATURE_BYTES)
if not data:
break
data = data.lstrip(b"\x00")
if data:
data_len = len(data)
if data_len >= _NUM_SIGNATURE_BYTES:
buf = data[:_NUM_SIGNATURE_BYTES]
else:
buf = data + signature.read(_NUM_SIGNATURE_BYTES - data_len)
# Special casing .id3/.tag because extended filetype with add_type() prepends, meaning
# all mp3 would be labeled mimetype id3, while appending would mean each .id3 would be
# mime mpeg.
if path.suffix in ID3_MIME_TYPE_EXTENSIONS:
if Id3Tag().match(buf) or Id3TagExt().match(buf):
return Id3TagExt.MIME
return filetype.guess_mime(buf)
|
84f6b2f80b341f330e3f6b9e65b4863d055f8796
| 3,641,678
|
def filter_ptr_checks(props):
"""This function will filter out extra pointer checks.
Our support to primitives and overflow pointer checks is unstable and
can result in lots of spurious failures. By default, we filter them out.
"""
def not_extra_check(prop):
return extract_property_class(prop) not in ["pointer_arithmetic", "pointer_primitives"]
return list(filter(not_extra_check, props))
|
e5964637c3f1a27521f5305673c9e5af3189e15d
| 3,641,680
|
import time
def makeKeylistObj(keylist_fname, includePrivate=False):
"""Return a new unsigned keylist object for the keys described in
'mirror_fname'.
"""
keys = []
def Key(obj): keys.append(obj)
preload = {'Key': Key}
r = readConfigFile(keylist_fname, (), (), preload)
klist = []
for k in keys:
k = thandy.keys.RSAKey.fromJSon(k)
if includePrivate and not k.isPrivateKey():
raise thandy.FormatException("Private key information not found.")
klist.append({'key': k.format(private=includePrivate), 'roles' : k.getRoles() })
result = { '_type' : "Keylist",
'ts' : formatTime(time.time()),
'keys' : klist }
KEYLIST_SCHEMA.checkMatch(result)
return result
|
13e79fbb9ac8ad207cc2533532c6be6bb0372beb
| 3,641,681
|
def getwpinfo(id,wps):
"""Help function to create description of WP inputs."""
try:
wpmin = max([w for w in wps if 'loose' in w.lower()],key=lambda x: len(x)) # get loose WP with most 'V's
wpmax = max([w for w in wps if 'tight' in w.lower()],key=lambda x: len(x)) # get tight WP with most 'V's
info = f"{id} working point: {wpmin}-{wpmax}"
except:
info = f"{id} working point: {', '.join(wps)}"
return info
|
0dcf6c205a1988227e23a77e169a9114f1fdf2cc
| 3,641,682
|
def build_word_dg(target_word, model, depth, model_vocab=None, boost_counter=None, topn=5):
""" Accept a target_word and builds a directed graph based on
the results returned by model.similar_by_word. Weights are initialized
to 1. Starts from the target_word and gets similarity results for it's children
and so forth, up to the specified depth.
Args
----
target_word (string): Root node.
model (gensim.models): Gensim word embedding model.
depth (int): Depth to restrict the search to.
topn (int): Number of words to check against in the embedding model, default=5.
"""
_DG = init_digraph()
seen_set = set()
do_hs_boosting = (
boost_counter and model_vocab and target_word in model_vocab)
if do_hs_boosting:
weight_boost = log10(float(model.vocab[target_word].count)) * boost_counter[
target_word] if target_word in boost_counter else 0
_DG.add_weighted_edges_from([(target_word, word[0], weight_boost + word[1])
for word in model.similar_by_word(target_word, topn=topn)])
else:
_DG.add_weighted_edges_from([(target_word, word[0], word[1])
for word in model.similar_by_word(target_word, topn=topn)])
seen_set.add(target_word)
for _idx in range(1, depth):
current_nodes = _DG.nodes()
for node in current_nodes:
if node not in seen_set:
_DG.add_weighted_edges_from(
[(node, word[0], word[1]) for word in model.similar_by_word(node, topn=topn)])
seen_set.add(node)
return _DG
|
ffd32cef2b44fd9e9cd554cd618091dfe8e5377f
| 3,641,683
|
def sample_normal_gamma(mu, lmbd, alpha, beta):
""" https://en.wikipedia.org/wiki/Normal-gamma_distribution
"""
tau = np.random.gamma(alpha, beta)
mu = np.random.normal(mu, 1.0 / np.sqrt(lmbd * tau))
return mu, tau
|
0f11ce95cfb772aeb023b61300bdb03d827cab37
| 3,641,685
|
def _dice(terms):
"""
Returns the elements of iterable *terms* in tuples of every possible length
and range, without changing the order. This is useful when parsing a list of
undelimited terms, which may span multiple tokens. For example:
>>> _dice(["a", "b", "c"])
[('a', 'b', 'c'), ('a', 'b'), ('b', 'c'), ('a',), ('b',), ('c',)]
"""
# remove all of the terms that look like delimiters
terms = filter(lambda x: _is_delimiter(x) == False, terms)
y = []
for n in range(len(terms), 0, -1):
for m in range(0, len(terms)-(n-1)):
y.append(tuple(terms[m:m+n]))
return y
|
bb8f567d82405864c0bf81b2ee9f3cb89b875d11
| 3,641,687
|
from datetime import datetime
def parse_date(val, format):
"""
Attempts to parse the given string date according to the
provided format, raising InvalidDateError in case of problems.
@param str val (e.g. 2014-08-12)
@param str format (e.g. %Y-%m-%d)
@return datetime.date
"""
try:
return datetime.strptime(val, format).date()
except ValueError:
raise InvalidDateError("unable to parse %s" % val)
|
4686bf46d12310ee7ac4aa1986df55b598909a06
| 3,641,688
|
def get_capture_dimensions(capture):
"""Get the dimensions of a capture"""
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
return width, height
|
9a13253c1ca5c44b7a1ef4440989b1af9abcb776
| 3,641,689
|
def ad_modify_user_pwd_by_mail(user_mail_addr, old_password, new_password):
"""
通过mail修改某个用户的密码
:param user_mail_addr:
:return:
"""
conn = __ad_connect()
user_dn = ad_get_user_dn_by_mail(user_mail_addr)
result = conn.extend.microsoft.modify_password(user="%s" % user_dn, new_password="%s" % new_password,
old_password="%s" % old_password)
conn.unbind()
return result
|
7cc5c654517ad3f175e06500310f0bbfec516ad1
| 3,641,692
|
def markup_record(record_text, record_nr, modifiers, targets, output_dict):
""" Takes current Patient record, applies context algorithm,
and appends result to output_dict
"""
# Is used to collect multiple sentence markups. So records can be complete
context = pyConText.ConTextDocument()
# Split record into sentences making use of TextBlob
blob = TextBlob(record_text.lower())
# print(blob)
count = 0
markup_result = []
# Add markup per sentence
for sentence in blob.sentences:
m = markup_sentence(sentence.raw, modifiers=modifiers, targets=targets)
markup_result.append(m)
count = count + 1
print("\nFor record number:", record_nr)
print("Number of sentences that have been marked up:", count)
# print("\nMarkup result:")
# print(markup_result)
# Add sentence markup to contextDocument
for sentence_markup in markup_result:
context.addMarkup(sentence_markup)
# Append context object and xml to output dictionary,
# with as key the record number
context_xml = context.getXML()
output_dict.update({record_nr: {"object": context, "xml": context_xml}})
return(output_dict)
|
2eee4560a411bcd7ef364b6ed9b37cc2870cd3b5
| 3,641,693
|
import inspect
def get_file_name(file_name):
"""
Returns a Testsuite name
"""
testsuite_stack = next(iter(list(filter(lambda x: file_name in x.filename.lower(), inspect.stack()))), None)
if testsuite_stack:
if '/' in testsuite_stack.filename:
split_character = '/'
else:
split_character = '\\'
return testsuite_stack.filename.split(split_character)[-1].split(".")[0]
else:
return ""
|
97172600d785339501f5e58e8aca6581a0a690e0
| 3,641,694
|
import torch
def track_edge_matrix_by_spt(batch_track_bbox, batch_track_frames, history_window_size=50):
"""
:param batch_track_bbox: B, M, T, 4 (x, y, w, h)
:return:
"""
B, M, T, _ = batch_track_bbox.size()
batch_track_xy = batch_track_bbox[:, :, :, :2]
batch_track_wh = batch_track_bbox[:, :, :, 2:]
batch_track_t = batch_track_frames[:, :, :, None]
batch_track_diff_t = 1 - torch.abs(batch_track_t[:, :, :, None, :].expand(-1, -1, -1, T, -1) - batch_track_t[:, :, None, :, :].expand(-1, -1, T, -1, -1)) / history_window_size
batch_track_diff_xy = 1 - torch.abs(batch_track_xy[:, :, :, None, :].expand(-1, -1, -1, T, -1) - batch_track_xy[:, :, None, :, :].expand(-1, -1, T, -1, -1))
batch_track_diff_wh = 1 - torch.abs(batch_track_wh[:, :, :, None, :].expand(-1, -1, -1, T, -1) - (batch_track_wh[:, :, None, :, :].expand(-1, -1, T, -1, -1)))
# B, M, T, T, 5
track_edge_matrix = torch.cat([batch_track_diff_t, batch_track_diff_xy, batch_track_diff_wh], dim=-1)
return track_edge_matrix
|
5303f401d925c26a1c18546ba371a2119a41ec3d
| 3,641,695
|
def _file(space, fname, flags=0, w_ctx=None):
""" file - Reads entire file into an array
'FILE_USE_INCLUDE_PATH': 1,
'FILE_IGNORE_NEW_LINES': 2,
'FILE_SKIP_EMPTY_LINES': 4,
'FILE_NO_DEFAULT_CONTEXT': 16,
"""
if not is_in_basedir(space, 'file', fname):
space.ec.warn("file(%s): failed to open stream: %s " %
(fname, 'Operation not permitted'))
return space.w_False
if flags > 23 or flags < 0:
space.ec.warn("file(): '%d' flag is not supported" % flags)
return space.w_False
if fname == "":
space.ec.warn("file(): Filename cannot be empty")
return space.w_False
ignore_new_lines = flags & 2 != 0
skip_empty_lines = flags & 4 != 0
try:
_fname = rpath.normpath(fname)
arr_list = []
fstream = open(_fname)
line = fstream.readline()
while line != '':
if ignore_new_lines:
line = line.rstrip('\n')
if skip_empty_lines and line == "":
line = fstream.readline()
continue
arr_list.append(space.newstr(line))
line = fstream.readline()
return space.new_array_from_list(arr_list)
except OSError:
space.ec.warn("file(%s): failed to open stream: "
"No such file or directory" % fname)
return space.w_False
except IOError:
space.ec.warn("file(%s): failed to open stream: "
"No such file or directory" % fname)
return space.w_False
|
d8a04244c90f3f730c297a8dbaa1372acd61993b
| 3,641,696
|
def prepare_features(tx_nan, degree, mean_nan=None, mean=None, std=None):
"""Clean and prepare for learning. Mean imputing, missing value indicator, standardize."""
# Get column means, if necessary
if mean_nan is None: mean_nan = np.nanmean(tx_nan,axis=0)
# Replace NaNs
tx_val = np.where(np.isnan(tx_nan), mean_nan, tx_nan)
# Polynomial features
tx = build_poly(tx_val, degree)
const_col = tx.shape[1]-1
# Add NaN indicator columns
nan_cols = np.flatnonzero(np.any(np.isnan(tx_nan), axis=0))
ind_cols = np.empty((tx_nan.shape[0], nan_cols.shape[0]))
ind_cols = np.where(np.isnan(tx_nan[:,nan_cols]), 1, 0)
tx = np.c_[tx, ind_cols]
# Standardize
tx, mean, std = standardize_numpy(tx, mean, std)
tx[:,const_col] = 1.0
return tx, mean, std, mean_nan, nan_cols
|
2f9fd73cd04b40a85556573a62a083a0ffaa725c
| 3,641,697
|
def _write_matt2(model, name, mids, nmaterials, op2, op2_ascii, endian):
"""writes the MATT2"""
#Record - MATT2(803,8,102)
#Word Name Type Description
#1 MID I Material identification number
#2 TID(15) I TABLEMi entry identification numbers
#17 UNDEF None
key = (803, 8, 102)
nfields = 17
spack = Struct(endian + b'17i')
nbytes = write_header(name, nfields, nmaterials, key, op2, op2_ascii)
for mid in sorted(mids):
mat = model.MATT2[mid]
data = [
mat.mid,
mat.g11_table,
mat.g12_table,
mat.g13_table,
mat.g22_table,
mat.g23_table,
mat.g33_table,
mat.rho_table,
mat.a1_table,
mat.a2_table,
mat.a3_table,
0,
mat.ge_table,
mat.st_table,
mat.sc_table,
mat.ss_table,
0,
]
assert None not in data, data
#print('MATT2', data, len(data))
assert len(data) == nfields, len(data)
op2_ascii.write(' mid=%s data=%s\n' % (mid, data[1:]))
op2.write(spack.pack(*data))
return nbytes
|
607b94a6c1e3daf4b482acbb1df1ce967f1bce3b
| 3,641,698
|
def all_subclasses(cls):
"""Returns all known (imported) subclasses of a class."""
return cls.__subclasses__() + [g for s in cls.__subclasses__()
for g in all_subclasses(s)]
|
8b9a2ecd654b997b5001820d6b85e442af9cee3b
| 3,641,699
|
def _find_popular_codon(aa):
"""
This function returns popular codon from a 4+ fold degenerative codon.
:param aa: dictionary containing amino acid information.
:return:
"""
codons = [c[:2] for c in aa["codons"]]
counts = []
for i in range(len(codons)):
pc = codons[i]
count = 0
for j in range(len(codons)):
if codons[j] == pc:
count += 1
counts.append(count)
# find index of the higest entry
highest = 0
for i in range(len(counts)):
if counts[i] > counts[highest]:
highest = i
return aa["codons"][highest]
|
a555a9d42ea4dfa0260d9d4d2040de3c6fca69a0
| 3,641,700
|
import pathlib
def initialize_cluster_details(scale_version, cluster_name, username,
password, scale_profile_path,
scale_replica_config):
""" Initialize cluster details.
:args: scale_version (string), cluster_name (string),
username (string), password (string), scale_profile_path (string),
scale_replica_config (bool)
"""
cluster_details = {}
cluster_details['scale_version'] = scale_version
cluster_details['scale_cluster_clustername'] = cluster_name
cluster_details['scale_service_gui_start'] = "True"
cluster_details['scale_gui_admin_user'] = username
cluster_details['scale_gui_admin_password'] = password
cluster_details['scale_gui_admin_role'] = "Administrator"
cluster_details['scale_sync_replication_config'] = scale_replica_config
cluster_details['scale_cluster_profile_name'] = str(
pathlib.PurePath(scale_profile_path).stem)
cluster_details['scale_cluster_profile_dir_path'] = str(
pathlib.PurePath(scale_profile_path).parent)
return cluster_details
|
5508733e0bfbd20fb76ecaaf0df7f41675b0c5c8
| 3,641,701
|
def load(data_home=None):
"""Load RWC-Genre dataset
Args:
data_home (str): Local path where the dataset is stored.
If `None`, looks for the data in the default directory, `~/mir_datasets`
Returns:
(dict): {`track_id`: track data}
"""
if data_home is None:
data_home = utils.get_default_dataset_path(DATASET_DIR)
rwc_popular_data = {}
for key in track_ids():
rwc_popular_data[key] = Track(key, data_home=data_home)
return rwc_popular_data
|
61d09f64ec7f36bc1dac6bfc6bea8e47fe82248b
| 3,641,702
|
import copy
def collate_spectra_by_source(source_list, tolerance, unit=u.arcsec):
"""Given a list of spec1d files from PypeIt, group the spectra within the
files by their source object. The grouping is done by comparing the
position of each spectra (using either pixel or RA/DEC) using a given tolerance.
Args:
source_list (list of :obj:`SourceObject`): A list of source objects, one
SpecObj per object, ready for collation.
tolerance (float):
Maximum distance that two spectra can be from each other to be
considered to be from the same source. Measured in floating
point pixels or as an angular distance (see ``unit`` argument).
unit (:obj:`astropy.units.Unit`):
Units of ``tolerance`` argument if match_type is 'ra/dec'.
Defaults to arcseconds. Ignored if match_type is 'pixel'.
Returns:
(list of `obj`:SourceObject): The collated spectra as SourceObjects.
"""
collated_list = []
for source in source_list:
# Search for a collated SourceObject that matches this one.
# If one can't be found, treat this as a new collated SourceObject.
found = False
for collated_source in collated_list:
if collated_source.match(source.spec_obj_list[0],
source.spec1d_header_list[0],
tolerance, unit):
collated_source.combine(source)
found = True
if not found:
collated_list.append(copy.deepcopy(source))
return collated_list
|
a82b470685ee53f3fe2de5e41a3f212e32a4d606
| 3,641,703
|
def tolist(obj):
"""
Convert given `obj` to list.
If `obj` is not a list, return `[obj]`, else return `obj` itself.
"""
if not isinstance(obj, list):
return [obj]
return obj
|
f511f4ebb86977b2db8646e692abc9840c2ae2d1
| 3,641,704
|
def bip44_tree(config: dict, cls=hierarchy.Node) -> hierarchy.Node:
"""
Return the root node of a BIP44-compatible partially ordered hierarchy.
https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki
The `config` parameter is a dictionary of the following form:
- the keys of the dictionary are crypto-coins;
- the values of the dictionary specify the number of accounts to generate for each coin,
and the number of public/private addresses to generate for each account.
As an example:
{'BTC': (
(1, 2), (4, 5), (0, 1)
)}
The previous dictionary represents a single coin, BTC.
There are three accounts, that respectively have 1, 4, and 0 private addresses and 2, 5, and 1 public addresses.
"""
master_node = cls(0, tag='m')
purpose_node = cls(44 + constants.CryptoConstants.BIP32_HARDENED_INDEX.value, tag="44'")
master_node.edges.append(purpose_node)
for coin, coin_config in config.items():
assert isinstance(coin, str)
assert coin_config
coin_node = cls(constants.CoinType[coin].value, coin)
purpose_node.edges.append(coin_node)
for i, (n_private_addresses, n_public_addresses) in enumerate(coin_config):
assert n_private_addresses > 0 or n_public_addresses > 0
account_node = cls(i)
coin_node.edges.append(account_node)
public_node = cls(0, 'XPUB')
account_node.edges.append(public_node)
private_node = cls(1, 'XPRV')
account_node.edges.append(private_node)
previous_node = private_node
for j in range(n_private_addresses):
private_address_node = cls(j)
previous_node.edges.append(private_address_node)
previous_node = private_address_node
previous_node = public_node
for j in range(n_public_addresses):
public_address_node = cls(j)
previous_node.edges.append(public_address_node)
previous_node = public_address_node
return master_node
|
bd88895932b66963aa7f63f30ad49ac009ea41f1
| 3,641,705
|
def delete_useless_vrrp_subnets(client, to_delete, project_id):
"""
:param 'Client' client
:param dict((prefix_length, type, master_region, slave_region),
(state:quantity)) to_delete
:rtype: list
"""
result = []
vrrp_subnets = client.vrrp.list(project_id=project_id)
for key in to_delete:
vrrp_to_delete = [vrrp for vrrp in vrrp_subnets if (
int(vrrp.cidr.split('/')[1]), "ipv4",
vrrp.master_region, vrrp.slave_region) == key]
vrrp_to_delete.sort(key=itemgetter("status"), reverse=True)
for vrrp in vrrp_to_delete[:to_delete.get(key)]:
client.vrrp.delete(vrrp.id)
result.append(vrrp.id)
return result
|
b16019b026c32d310f9f938a7ca1fada31d02d84
| 3,641,706
|
import torch
import warnings
def barycenter_wbc(P, K, logweights, Kb=None, c=None, debiased=False,
maxiter=1000, tol=1e-4):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
n_hists, width, _ = P.shape
if Kb is None:
b = torch.ones_like(P)[None, :]
Kb = convol_huge_imgs(b, K)
if c is None:
c = torch.ones(1, width, width, device=P.device)
q = c.clone()
logweights.requires_grad = True
err = 1
weights = torch.softmax(logweights, dim=1)[:, :, None, None]
for ii in range(maxiter):
with torch.no_grad():
qold = q.detach().clone()
a = P[None, :] / Kb
Ka = convol_huge_imgs(a, K.t())
q = c * torch.prod((Ka) ** weights, dim=1)
if debiased:
Kc = convol_imgs(c, K.t())
c = (c * q / Kc) ** 0.5
Q = q[:, None, :, :]
b = Q / Ka
Kb = convol_huge_imgs(b, K)
if torch.isnan(q).any():
warnings.warn("Numerical Errors ! Stopped early in debiased = %s" % debiased)
break
with torch.no_grad():
err = abs(q - qold).max()
if err < tol and ii > 5:
break
if ii == maxiter - 1:
warnings.warn("*** Maxiter reached ! err = {} ***".format(err))
return q
|
453c40ed988d4fe86dc202f816c5eb3bb6cbd452
| 3,641,707
|
def logistic_predict(weights, data):
"""
Compute the probabilities predicted by the logistic classifier.
Note: N is the number of examples and
M is the number of features per example.
Inputs:
weights: (M+1) x 1 vector of weights, where the last element
corresponds to the bias (intercepts).
data: N x M data matrix where each row corresponds
to one data point.
Outputs:
y: :N x 1 vector of probabilities. This is the output of the classifier.
"""
z = np.dot(data, weights[:len(data[0])])
y = sigmoid(z)
return y
|
52c2ff3ed4b854de645b2252b4949b4a7a68bda1
| 3,641,709
|
def score_matrix(motifs, k):
"""returns matrix score formed from motifs"""
nucleotides = {'A': [0]*k, 'T': [0]*k, 'C': [0]*k, 'G': [0]*k}
for motif in motifs:
for index, nucleotide in enumerate(motif):
nucleotides[nucleotide][index] = nucleotides[nucleotide][index] + 1
i = 0
matrix_score = 0
while i < k:
output = []
column_score = 0
for key in nucleotides:
output.append(nucleotides[key][i])
max_consumed = False
max_item = max(output)
for item in output:
if item == max_item:
if not max_consumed:
max_consumed = True
continue
else:
column_score = column_score + item
else:
column_score = column_score+item
matrix_score = matrix_score + column_score
i = i + 1
return matrix_score
|
ce9f7b770ce75d4e872da7b3c9b4fa3fbcd1e900
| 3,641,710
|
def log_loss(y_true, dist_pred, sample=True, return_std=False):
""" Log loss
Parameters
----------
y_true: np.array
The true labels
dist_pred: ProbabilisticEstimator.Distribution
The predicted distribution
sample: boolean, default=True
If true, loss will be averaged across the sample
return_std: boolean, default=False
If true, the standard deviation of the
loss sample will be returned
Returns
-------
np.array
Loss (with standard deviation if ``return_std`` is True)
"""
pdf = dist_pred.pdf(y_true)
loss = -np.log(pdf)
if sample:
return sample_loss(loss, return_std)
return loss
|
0f3d19111593441011cfb1e532be50a19d423390
| 3,641,711
|
import scipy
def matrix_pencil_method_old(data, p, noise_level=None, verbose=1, **kwargs):
""" Older impleentation of the matrix pencil method with pencil p on given data to
extract energy levels.
Parameters
----------
data -- lists of Obs, where the nth entry is considered to be the correlation function
at x0=n+offset.
p -- matrix pencil parameter which corresponds to the number of energy levels to extract.
higher values for p can help decreasing noise.
noise_level -- If this argument is not None an additional prefiltering via singular
value decomposition is performed in which all singular values below 10^(-noise_level)
times the largest singular value are discarded. This increases the computation time.
verbose -- if larger than zero details about the noise filtering are printed to stdout
(default 1)
"""
n_data = len(data)
if n_data <= p:
raise Exception('The pencil p has to be smaller than the number of data samples.')
matrix = scipy.linalg.hankel(data[:n_data - p], data[n_data - p - 1:]) @ np.identity(p + 1)
if noise_level is not None:
u, s, vh = svd(matrix)
s_values = np.vectorize(lambda x: x.value)(s)
if verbose > 0:
print('Singular values: ', s_values)
digit = np.argwhere(s_values / s_values[0] < 10.0**(-noise_level))
if digit.size == 0:
digit = len(s_values)
else:
digit = int(digit[0])
if verbose > 0:
print('Consider only', digit, 'out of', len(s), 'singular values')
new_matrix = u[:, :digit] * s[:digit] @ vh[:digit, :]
y1 = new_matrix[:, :-1]
y2 = new_matrix[:, 1:]
else:
y1 = matrix[:, :-1]
y2 = matrix[:, 1:]
# Moore–Penrose pseudoinverse
pinv_y1 = pinv(y1)
# Note: Automatic differentiation of eig is implemented in the git of autograd
# but not yet released to PyPi (1.3). The code is currently part of pyerrors
e = eig((pinv_y1 @ y2), **kwargs)
energy_levels = -np.log(np.abs(e))
return sorted(energy_levels, key=lambda x: abs(x.value))
|
4bcb435b3b16b153d0d1f1689f542df1fdc74ca8
| 3,641,712
|
def ext_sum(text, ratio=0.8):
"""
Generate extractive summary using BERT model
INPUT:
text - str. Input text
ratio - float. Enter a ratio between 0.1 - 1.0 [default = 0.8]
(ratio = summary length / original text length)
OUTPUT:
summary - str. Generated summary
"""
bert_model = Summarizer()
summary = bert_model(text, ratio=ratio)
return summary
|
99285d08425340f70984ce0645efdbaaa3e9072a
| 3,641,713
|
def khinalug_input_normal(field, text):
"""
Prepare a string from one of the query fields for subsequent
processing: replace common shortcuts with valid Khinalug characters.
"""
if field not in ('wf', 'lex', 'lex2', 'trans_ru', 'trans_ru2'):
return text
text = text.replace('c1_', 'č̄')
text = text.replace('c1\'', 'č̣')
text = text.replace('7', 'ˁ')
text = text.replace('g1', 'ǧ')
text = text.replace('s1', 'š')
text = text.replace('z1', 'ž')
text = text.replace('c1', 'č')
text = text.replace('j1', 'ǯ')
text = text.replace('a1', 'ä')
text = text.replace('u1', 'ü')
text = text.replace('o1', 'ö')
text = text.replace('i1', 'ı')
text = text.replace('k_', 'k̄')
text = text.replace('t_', 't̄')
text = text.replace('q_', 'q̄')
text = text.replace('c_', 'c̄')
text = text.replace('c\'', 'c̣')
text = text.replace('k\'', 'ḳ')
text = text.replace('q\'', 'q̇')
text = text.replace('x\'', 'x̣')
text = text.replace('t\'', 'ṭ')
text = text.replace('h\'', 'ḥ')
return text
|
b9b9413ae461b6a03aa8c0db4396658dbe242c91
| 3,641,714
|
from typing import List
from typing import Dict
from typing import Any
def _shift_all_classes(classes_list: List[ndarray], params_dict: Dict[str, Any]):
"""Shift the locale of all classes.
Args:
classes_list: List of classes as numpy arrays.
params_dict: Dict including the shift values for all classes.
Returns:
List of shifted classes.
"""
classes_df = pd.DataFrame()
shifted_classes = []
# shift all classes
for generated_class, shift in zip(classes_list, params_dict["all_shifts"]):
# shift class data and exclude the label from shifting
label = generated_class[:, 0].reshape(-1, 1)
shifted_class_data = generated_class[:, 1:] + shift
classes_df["mean_" + str(shift)] = shifted_class_data.flatten()
labeled_shifted_class = np.hstack((label, shifted_class_data))
assert labeled_shifted_class[:, 0].all() == label.all()
shifted_classes.append(labeled_shifted_class)
return shifted_classes, classes_df
|
2176e5f4da6aecc25386e978182887fb8568faaa
| 3,641,715
|
def fully_connected_layer(tensor,
size=None,
weight_init=None,
bias_init=None,
name=None):
"""Fully connected layer.
Parameters
----------
tensor: tf.Tensor
Input tensor.
size: int
Number of output nodes for this layer.
weight_init: float
Weight initializer.
bias_init: float
Bias initializer.
name: str
Name for this op. Defaults to 'fully_connected'.
Returns
-------
tf.Tensor:
A new tensor representing the output of the fully connected layer.
Raises
------
ValueError
If input tensor is not 2D.
"""
if weight_init is None:
num_features = tensor.get_shape()[-1].value
weight_init = tf.truncated_normal([num_features, size], stddev=0.01)
if bias_init is None:
bias_init = tf.zeros([size])
with tf.name_scope(name, 'fully_connected', [tensor]):
w = tf.Variable(weight_init, name='w', dtype=tf.float32)
b = tf.Variable(bias_init, name='b', dtype=tf.float32)
return tf.nn.xw_plus_b(tensor, w, b)
|
605cc52e8c5262aead6cb758488940e7661286b1
| 3,641,716
|
from pathlib import Path
def fetch_osborne_magnetic(version):
"""
Magnetic airborne survey of the Osborne Mine and surroundings, Australia
This is a section of a survey acquired in 1990 by the Queensland
Government, Australia. The line data have approximately 80 m terrain
clearance and 200 m line spacing. Total field anomalies are in nT. The
flight height was calculated by summing the terrain clearance to
interpolated values of SRTM (referenced to sea level). The section contains
the total field magnetic anomalies associated with the Osborne Mine,
Lightning Creek sill complex, and the Brumby prospect.
There are ~990,000 measurements in total with 5 columns available: flight
line number, longitude, latitude (geodetic), height (orthometric), and the
total field magnetic anomaly.
**Format:** CSV with xz (lzma) compression.
**Load with:** :func:`pandas.read_csv`
**Original source:** `Geophysical Acquisition & Processing Section 2019.
MIM Data from Mt Isa Inlier, QLD (P1029), magnetic line data, AWAGS
levelled. Geoscience Australia, Canberra
<http://pid.geoscience.gov.au/dataset/ga/142419>`__
**Original license:** CC-BY
**Versions:**
* `1
<https://github.com/fatiando-data/osborne-magnetic/releases/tag/v1>`_
(doi:`10.5281/zenodo.5882209 <https://doi.org/10.5281/zenodo.5882209>`__)
Parameters
----------
version : int
The data version to fetch. See the available versions above.
Returns
-------
fname : :class:`pathlib.Path`
Path to the downloaded file on disk.
"""
_check_versions(version, allowed={1}, name="Osborne mine magnetic")
fname = "osborne-magnetic.csv.xz"
return Path(_repository(fname, version).fetch(fname))
|
2a0575557a18ca4442f0cf21ee51ccd94d316ffa
| 3,641,717
|
from sympy.core.symbol import Symbol
from sympy.printing.pycode import MpmathPrinter as Printer
from sympy.printing.pycode import SciPyPrinter as Printer
from sympy.printing.pycode import NumPyPrinter as Printer
from sympy.printing.lambdarepr import NumExprPrinter as Printer
from sympy.printing.tensorflow import TensorflowPrinter as Printer
from sympy.printing.pycode import SymPyPrinter as Printer
from sympy.printing.pycode import PythonCodePrinter as Printer
def lambdify(args, expr, modules=None, printer=None, use_imps=True,
dummify=False):
"""
Returns an anonymous function for fast calculation of numerical values.
If not specified differently by the user, ``modules`` defaults to
``["scipy", "numpy"]`` if SciPy is installed, ``["numpy"]`` if only
NumPy is installed, and ``["math", "mpmath", "sympy"]`` if neither is
installed. That is, SymPy functions are replaced as far as possible by
either ``scipy`` or ``numpy`` functions if available, and Python's
standard library ``math``, or ``mpmath`` functions otherwise. To change
this behavior, the "modules" argument can be used. It accepts:
- the strings "math", "mpmath", "numpy", "numexpr", "scipy", "sympy",
"tensorflow"
- any modules (e.g. math)
- dictionaries that map names of sympy functions to arbitrary functions
- lists that contain a mix of the arguments above, with higher priority
given to entries appearing first.
.. warning::
Note that this function uses ``eval``, and thus shouldn't be used on
unsanitized input.
Arguments in the provided expression that are not valid Python identifiers
are substitued with dummy symbols. This allows for applied functions
(e.g. f(t)) to be supplied as arguments. Call the function with
dummify=True to replace all arguments with dummy symbols (if `args` is
not a string) - for example, to ensure that the arguments do not
redefine any built-in names.
For functions involving large array calculations, numexpr can provide a
significant speedup over numpy. Please note that the available functions
for numexpr are more limited than numpy but can be expanded with
implemented_function and user defined subclasses of Function. If specified,
numexpr may be the only option in modules. The official list of numexpr
functions can be found at:
https://github.com/pydata/numexpr#supported-functions
In previous releases ``lambdify`` replaced ``Matrix`` with ``numpy.matrix``
by default. As of release 1.0 ``numpy.array`` is the default.
To get the old default behavior you must pass in ``[{'ImmutableDenseMatrix':
numpy.matrix}, 'numpy']`` to the ``modules`` kwarg.
>>> from sympy import lambdify, Matrix
>>> from sympy.abc import x, y
>>> import numpy
>>> array2mat = [{'ImmutableDenseMatrix': numpy.matrix}, 'numpy']
>>> f = lambdify((x, y), Matrix([x, y]), modules=array2mat)
>>> f(1, 2)
[[1]
[2]]
Usage
=====
(1) Use one of the provided modules:
>>> from sympy import sin, tan, gamma
>>> from sympy.abc import x, y
>>> f = lambdify(x, sin(x), "math")
Attention: Functions that are not in the math module will throw a name
error when the function definition is evaluated! So this
would be better:
>>> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy"))
(2) Use some other module:
>>> import numpy
>>> f = lambdify((x,y), tan(x*y), numpy)
Attention: There are naming differences between numpy and sympy. So if
you simply take the numpy module, e.g. sympy.atan will not be
translated to numpy.arctan. Use the modified module instead
by passing the string "numpy":
>>> f = lambdify((x,y), tan(x*y), "numpy")
>>> f(1, 2)
-2.18503986326
>>> from numpy import array
>>> f(array([1, 2, 3]), array([2, 3, 5]))
[-2.18503986 -0.29100619 -0.8559934 ]
In the above examples, the generated functions can accept scalar
values or numpy arrays as arguments. However, in some cases
the generated function relies on the input being a numpy array:
>>> from sympy import Piecewise
>>> from sympy.utilities.pytest import ignore_warnings
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "numpy")
>>> with ignore_warnings(RuntimeWarning):
... f(array([-1, 0, 1, 2]))
[-1. 0. 1. 0.5]
>>> f(0)
Traceback (most recent call last):
...
ZeroDivisionError: division by zero
In such cases, the input should be wrapped in a numpy array:
>>> with ignore_warnings(RuntimeWarning):
... float(f(array([0])))
0.0
Or if numpy functionality is not required another module can be used:
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "math")
>>> f(0)
0
(3) Use a dictionary defining custom functions:
>>> def my_cool_function(x): return 'sin(%s) is cool' % x
>>> myfuncs = {"sin" : my_cool_function}
>>> f = lambdify(x, sin(x), myfuncs); f(1)
'sin(1) is cool'
Examples
========
>>> from sympy.utilities.lambdify import implemented_function
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function.:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
A more robust way of handling this is to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in `expr` can also carry their own numerical
implementations, in a callable attached to the ``_imp_``
attribute. Usually you attach this using the
``implemented_function`` factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
Usage with Tensorflow module:
>>> import tensorflow as tf
>>> f = Max(x, sin(x))
>>> func = lambdify(x, f, 'tensorflow')
>>> result = func(tf.constant(1.0))
>>> result # a tf.Tensor representing the result of the calculation
<tf.Tensor 'Maximum:0' shape=() dtype=float32>
>>> sess = tf.Session()
>>> sess.run(result) # compute result
1.0
>>> var = tf.Variable(1.0)
>>> sess.run(tf.global_variables_initializer())
>>> sess.run(func(var)) # also works for tf.Variable and tf.Placeholder
1.0
>>> tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]]) # works with any shape tensor
>>> sess.run(func(tensor))
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
"""
# If the user hasn't specified any modules, use what is available.
if modules is None:
try:
_import("scipy")
except ImportError:
try:
_import("numpy")
except ImportError:
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
else:
modules = ["numpy"]
else:
modules = ["scipy", "numpy"]
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
# consistency check
if _module_present('numexpr', modules) and len(modules) > 1:
raise TypeError("numexpr must be the only item in 'modules'")
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
if printer is None:
if _module_present('mpmath', namespaces):
elif _module_present('scipy', namespaces):
elif _module_present('numpy', namespaces):
elif _module_present('numexpr', namespaces):
elif _module_present('tensorflow', namespaces):
elif _module_present('sympy', namespaces):
else:
user_functions = {}
for m in namespaces[::-1]:
if isinstance(m, dict):
for k in m:
user_functions[k] = k
printer = Printer({'fully_qualified_modules': False, 'inline': True,
'allow_unknown_functions': True,
'user_functions': user_functions})
# Get the names of the args, for creating a docstring
if not iterable(args):
args = (args,)
names = []
# Grab the callers frame, for getting the names by inspection (if needed)
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
for n, var in enumerate(args):
if hasattr(var, 'name'):
names.append(var.name)
else:
# It's an iterable. Try to get name by inspection of calling frame.
name_list = [var_name for var_name, var_val in callers_local_vars
if var_val is var]
if len(name_list) == 1:
names.append(name_list[0])
else:
# Cannot infer name with certainty. arg_# will have to do.
names.append('arg_' + str(n))
imp_mod_lines = []
for mod, keys in (getattr(printer, 'module_imports', None) or {}).items():
for k in keys:
if k not in namespace:
imp_mod_lines.append("from %s import %s" % (mod, k))
for ln in imp_mod_lines:
exec_(ln, {}, namespace)
# Provide lambda expression with builtins, and compatible implementation of range
namespace.update({'builtins':builtins, 'range':range})
# Create the function definition code and execute it
funcname = '_lambdifygenerated'
if _module_present('tensorflow', namespaces):
funcprinter = _TensorflowEvaluatorPrinter(printer, dummify)
else:
funcprinter = _EvaluatorPrinter(printer, dummify)
funcstr = funcprinter.doprint(funcname, args, expr)
funclocals = {}
global _lambdify_generated_counter
filename = '<lambdifygenerated-%s>' % _lambdify_generated_counter
_lambdify_generated_counter += 1
c = compile(funcstr, filename, 'exec')
exec_(c, namespace, funclocals)
# mtime has to be None or else linecache.checkcache will remove it
linecache.cache[filename] = (len(funcstr), None, funcstr.splitlines(True), filename)
func = funclocals[funcname]
# Apply the docstring
sig = "func({0})".format(", ".join(str(i) for i in names))
sig = textwrap.fill(sig, subsequent_indent=' '*8)
expr_str = str(expr)
if len(expr_str) > 78:
expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
func.__doc__ = (
"Created with lambdify. Signature:\n\n"
"{sig}\n\n"
"Expression:\n\n"
"{expr}\n\n"
"Source code:\n\n"
"{src}\n\n"
"Imported modules:\n\n"
"{imp_mods}"
).format(sig=sig, expr=expr_str, src=funcstr, imp_mods='\n'.join(imp_mod_lines))
return func
|
cf7b65c503d1a7873f0ddacfb3f6aa841340ee0e
| 3,641,718
|
def _matches(o, pattern):
"""Match a pattern of types in a sequence."""
if not len(o) == len(pattern):
return False
comps = zip(o,pattern)
return all(isinstance(obj,kind) for obj,kind in comps)
|
e494016affa28e9018f337cb7184e96858701208
| 3,641,719
|
import csv
from io import StringIO
def excl_import_route():
"""import exclustions from csv"""
form = ExclImportForm()
if form.validate_on_submit():
imported = []
try:
for row in csv.DictReader(StringIO(form.data.data), EXPORT_FIELDNAMES, quoting=csv.QUOTE_MINIMAL):
imported.append(Excl(family=ExclFamily(row['family']), value=row['value'], comment=row['comment']))
if imported:
if form.replace.data:
db.session.query(Excl).delete()
for tmp in imported:
db.session.add(tmp)
db.session.commit()
return redirect(url_for('scheduler.excl_list_route'))
except (csv.Error, ValueError, SQLAlchemyError, psycopg2.Error) as e:
db.session.rollback()
current_app.logger.exception(e)
flash('Import failed', 'error')
return render_template('scheduler/excl/import.html', form=form)
|
780c5646b2a5771691c538cb71bfde390cd9b847
| 3,641,720
|
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal and keyword arguments to connect::
@receiver(signal_object, sender=sender)
def signal_receiver(sender, **kwargs):
...
"""
def _decorator(func):
signal.connect(func, **kwargs)
return func
return _decorator
|
dbbde0855b2a657adaff9fa688aa158053e46579
| 3,641,721
|
from typing import final
def create_new_connected_component(dict_projections, dict_cc, dict_nodes_cc, g_list_, set_no_proj, initial_method,
params, i, file_tags=None):
"""
If needed, create new connect component and update wanted dicts.
:param dict_projections: Embedding dict
:param dict_cc: Dict where keys are the number of the connected component and values are list of nodes that are in
this cc.
:param dict_nodes_cc: Dict where keys are nodes and values is the number representing the cc they are in.
:param g_list_: List of graphs for each time stamp.
:param set_no_proj: Set of nodes that are currently not in the embedding because they create together a new cc.
:param initial_method: State-of-the-art method to embed them with.
:param params: Dict of parameters corresponding to the initial method.
:param i: Index of the time stamp
:param file_tags: If GCN GEA is used, one needs to provide file of tags
:return: Updated dict_cc, dict_nodes_cc, and embedding dictionary.
"""
new_cc = create_new_cc(list(set_no_proj), g_list_[i + 1], to_undirected=True)
dict_cc, dict_nodes_cc = add_new_cc(new_cc, dict_nodes_cc, dict_cc)
if new_cc.number_of_nodes() < params["dimension"] and initial_method == "HOPE":
dim = params["dimension"]
initial_method = "node2vec"
params = {"dimension": dim, "walk_length": 80, "num_walks": 16, "workers": 2}
_, dict_proj_new_cc, _ = final(new_cc, initial_method, params, file_tags=file_tags)
z = {**dict_projections, **dict_proj_new_cc}.copy()
return dict_cc, dict_nodes_cc, z
|
18b8c046e78f17b125bf85250953c9d7a656892a
| 3,641,722
|
def laguerre(x, k, c):
"""Generalized Laguerre polynomials. See `help(_gmw.morsewave)`.
LAGUERRE is used in the computation of the generalized Morse
wavelets and uses the expression given by Olhede and Walden (2002),
"Generalized Morse Wavelets", Section III D.
"""
x = np.atleast_1d(np.asarray(x).squeeze())
assert x.ndim == 1
y = np.zeros(x.shape)
for m in range(k + 1):
# Log of gamma function much better ... trick from Maltab's ``beta''
fact = np.exp(gammaln_fn(k + c + 1) - gammaln_fn(c + m + 1) -
gammaln_fn(k - m + 1))
y += (-1)**m * fact * x**m / gamma_fn(m + 1)
return y
|
4eac2e1cbd9fd2097763b56129873aa6af4e8419
| 3,641,723
|
import math
def find_all_combinations(participants, team_sizes):
""" Finds all possible experience level combinations for specific team
sizes with duplicated experience levels (e.g. (1, 1, 2))
Returns a list of tuples representing all the possible combinations """
num_teams = len(team_sizes)
participant_levels = [LMS_LEVELS.get(participant.current_lms_module) or 1
for participant in participants]
hackathon_level = sum(participant_levels)
team_level = math.floor(hackathon_level / num_teams)
missing = hackathon_level - (num_teams * team_level)
team_sizes = list(set(team_sizes))
combos = []
for team_size in team_sizes:
combos += find_group_combinations(participant_levels, team_size,
team_level, missing)
# to remove differently sorted combinations with the same elements
sorted_combinations = [sorted(combo) for combo in combos]
combos_without_dupes = list(set(set(tuple(i)
for i in sorted_combinations)))
return combos_without_dupes
|
d3f4de9911a1fc427fc2e01433634ccf815f9183
| 3,641,726
|
def normalized_copy(data):
"""
Normalize timeseries data, using the maximum across all regions and timesteps.
Parameters
----------
data : xarray Dataset
Dataset with all non-time dependent variables removed
Returns
-------
ds : xarray Dataset
Copy of `data`, with the absolute taken and normalized to 0-1
"""
ds = data.copy(deep=True) # Work off a copy
for var in ds.data_vars:
# Each DataArray is indexed over a different subset of loc_techs,
# so we find it in the list of dimensions
loc_tech_dim = [i for i in ds[var].dims if 'loc_techs' in i][0]
# For each technology, get the loc_techs which are relevant
loc_tech_subsets = [
get_loc_techs(ds[loc_tech_dim].values, tech)
for tech in set(i.split('::')[1] for i in ds[loc_tech_dim].values)
]
# remove empty lists within the _techs list
loc_tech_subsets = [i for i in loc_tech_subsets if i]
# For each technology, divide all values by the maximum absolute value
for loc_tech in loc_tech_subsets:
ds[var].loc[{loc_tech_dim: loc_tech}] = abs(
ds[var].loc[{loc_tech_dim: loc_tech}] /
abs(ds[var].loc[{loc_tech_dim: loc_tech}]).max()
)
return ds
|
cfcb94458deb6caa1125cfcf2904652900babc87
| 3,641,728
|
def _get_exception(ex: Exception) -> Exception:
"""Get exception cause/context from chained exceptions
:param ex: chained exception
:return: cause of chained exception if any
"""
if ex.__cause__:
return ex.__cause__
elif ex.__context__:
return ex.__context__
else:
return ex
|
3f670dc237ebd865e31c7d0fd3719e2ea929de6d
| 3,641,729
|
from typing import Any
from typing import Dict
def recursive_normalizer(value: Any, **kwargs: Dict[str, Any]) -> Any:
"""
Prepare a structure for hashing by lowercasing all values and round all floats
"""
digits = kwargs.get("digits", 10)
lowercase = kwargs.get("lowercase", True)
if isinstance(value, (int, type(None))):
pass
elif isinstance(value, str):
if lowercase:
value = value.lower()
elif isinstance(value, list):
value = [recursive_normalizer(x, **kwargs) for x in value]
elif isinstance(value, tuple):
value = tuple(recursive_normalizer(x, **kwargs) for x in value)
elif isinstance(value, dict):
ret = {}
for k, v in value.items():
if lowercase:
k = k.lower()
ret[k] = recursive_normalizer(v, **kwargs)
value = ret
elif isinstance(value, np.ndarray):
if digits:
# Round array
value = np.around(value, digits)
# Flip zeros
value[np.abs(value) < 5 ** (-(digits + 1))] = 0
elif isinstance(value, float):
if digits:
value = round(value, digits)
if value == -0.0:
value = 0
if value == 0.0:
value = 0
else:
raise TypeError("Invalid type in KeywordSet ({type(value)}), only simple Python types are allowed.")
return value
|
e274c3976405838054d7251fdca8520dc75c48fd
| 3,641,730
|
from typing import Set
def rip_and_tear(context) -> Set:
"""Edge split geometry using specified angle or unique mesh settings.
Also checks non-manifold geometry and hard edges.
Returns set of colors that are used to color meshes."""
processed = set()
angle_use_fixed = prefs.RenderFixedAngleUse
# Angle fixed in radians
angle_fixed = prefs.RenderFixedAngle
precision = prefs.RenderPrecision
# Colors are saved in format specified by render precision parameter
# Totally white and totally black (and close to them) colors are prohibited
colors = set()
# Apply split_n_paint function to every object and unite resulting colors
# colors.union(tuple(set(tuple([split_n_paint(context, colors, precision, obj,
# angle_use_fixed, angle_fixed, processed) for obj in context.scene.objects
# if obj.type == "MESH"]))))
for obj in context.scene.objects:
if obj.type == "MESH":
if obj.data in processed or len(obj.data.polygons) == 0:
processed.add(obj.data)
else:
colors.union(
split_n_paint(
context, colors, precision, obj,
angle_use_fixed, angle_fixed,
processed,
)
)
return colors
|
6a67e9a90b4909c1aec8f7f784b2bc41750f5f79
| 3,641,731
|
def generate_primes(d):
"""Generate a set of all primes with d distinct digits."""
primes = set()
for i in range(10**(d-1)+1, 10**d, 2):
string = str(i)
unique_string = "".join(set(string))
if len(string) == len(unique_string): # Check that all digits are unique
if isprime(i): # Check that the number is prime
primes.add(str(i))
return primes
|
4edf615165144f2ab6e5d12533adc4357d904506
| 3,641,732
|
def poinv(A, UPLO='L', workers=1, **kwargs):
"""
Compute the (multiplicative) inverse of symmetric/hermitian positive
definite matrices, with broadcasting.
Given a square symmetic/hermitian positive-definite matrix `a`, return
the matrix `ainv` satisfying ``matrix_multiply(a, ainv) =
matrix_multiply(ainv, a) = Identity matrix``.
Parameters
----------
a : (..., M, M) array
Symmetric/hermitian postive definite matrices to be inverted.
UPLO : {'U', 'L'}, optional
Specifies whether the calculation is done with the lower
triangular part of the elements in `a` ('L', default) or
the upper triangular part ('U').
workers : int, optional
The number of parallel threads to use along gufunc loop dimension(s).
If set to -1, the maximum number of threads (as returned by
``multiprocessing.cpu_count()``) are used.
Returns
-------
ainv : (..., M, M) array
(Multiplicative) inverse of the `a` matrices.
Notes
-----
Numpy broadcasting rules apply.
The inverse is computed using LAPACK routines _potrf, _potri
For elements where the LAPACK routine fails, the result will be set
to NaNs.
Implemented for types single, double, csingle and cdouble. Numpy conversion
rules apply.
See Also
--------
inv : compute the multiplicative inverse of general matrices.
Examples
--------
>>> a = np.array([[5, 3], [3, 5]])
>>> ainv = poinv(a)
>>> np.allclose(matrix_multiply(a, ainv), np.eye(2))
True
>>> np.allclose(matrix_multiply(ainv, a), np.eye(2))
True
"""
uplo_choices = ['U', 'L']
if UPLO not in uplo_choices:
raise ValueError("Invalid UPLO argument '%s', valid values are: %s" %
(UPLO, uplo_choices))
if 'L' == UPLO:
gufunc = _impl.poinv_lo
else:
gufunc = _impl.poinv_up
workers, orig_workers = _check_workers(workers)
try:
out = gufunc(A, **kwargs)
finally:
# restore original number of workers
if workers != orig_workers:
_impl.set_gufunc_threads(orig_workers)
return out
|
ccba9b0fc518e0482c6ac647d56abe0e86d3409c
| 3,641,733
|
def gen_task3() -> np.ndarray:
"""Task 3: centre of cross or a plus sign."""
canv = blank_canvas()
r, c = np.random.randint(GRID-2, size=2, dtype=np.int8)
# Do we create a cross or a plus sign?
syms = rand_syms(5) # a 3x3 sign has 2 symbols, outer and centre
# syms = np.array([syms[0], syms[0], syms[1], syms[0], syms[0]])
if np.random.rand() < 0.5:
# Let's do a plus
rows, cols = [r, r+1, r+1, r+1, r+2], [c+1, c, c+1, c+2, c+1]
else:
# Let's do a cross
rows, cols = [r, r, r+1, r+2, r+2], [c, c+2, c+1, c, c+2]
canv[rows, cols] = syms
return [3, syms[2]], canv
|
aba9e78cf4d042cacd8787a90275947ba603b37c
| 3,641,734
|
def init_susceptible_00():
"""
Real Name: b'init Susceptible 00'
Original Eqn: b'8e+06'
Units: b'person'
Limits: (None, None)
Type: constant
b''
"""
return 8e+06
|
acc506bdea96b224f3627084bbee9e1a025bcff9
| 3,641,735
|
def spectrum_1D_scalar(data, dx, k_bin_num=100):
"""Calculates and returns the 2D spectrum for a 2D gaussian field of scalars, assuming isotropy of the turbulence
Example:
d=np.random.randn(101,101)
dx=1
k_bins_weighted,spect3D=spectrum_2D_scalar(d, dx, k_bin_num=100)
fig,ax=plt.subplots()
ax.scatter(k_bins_weighted,spect3D)
Arguments:
data {(Mx,My) array of floats} -- 2D Gaussian field of scalars
dx {float} -- grid spacing, assumed the same for all
k_bin_num {int} -- number of bins in reciprocal space
Returns:
k_bins_weighted {array of floats} -- location of bin centres
spect2D {array of floats} -- spectral power within bin
"""
#fourier transform data, shift to have zero freq at centre, find power
f=np.fft.fftshift(np.fft.fftn(data))
fsqr=np.real(f*np.conj(f))
#calculate k vectors in each dimension
Mx = data.shape[0]
kx = np.fft.fftshift(np.fft.fftfreq(Mx, dx))
#calculate magnitude of k at each grid point
K = np.sqrt(kx**2)
#determine 1D spectrum of k, measured from origin
#sort array in ascending k, and sort power by the same factor
K_flat=K.flatten()
fsqr_flat=fsqr.flatten()
K_sort = K_flat[K_flat.argsort()]
fsqr_sort = fsqr_flat[K_flat.argsort()]
k_bin_width = K_sort.max()/k_bin_num
k_bins = k_bin_width*np.arange(0,k_bin_num+1)
k_bins_weighted = 0.5*(k_bins[:-1]+k_bins[1:])
spect1D=np.zeros_like(k_bins_weighted)
for i in range(1,k_bin_num):
upper=K_sort<i*k_bin_width # find only values below upper bound: BOOL
lower=K_sort>=(i-1)*k_bin_width #find only values above upper bound: BOOL
f_filtered=fsqr_sort[upper*lower] # use super numpy array filtering to select only those which match both!
spect1D[i-1] = f_filtered.mean() #and take their mean.
return k_bins_weighted, spect1D
|
88cdb3917d995fdf5d870ebfef3da90f8a4526fb
| 3,641,736
|
from operator import and_
def get_previous_cat(last_index: int) -> models.Cat:
"""Get previous cat.
Args:
last_index (int): View index of last seen cat.
"""
cat = models.Cat.query.filter(and_(models.Cat.disabled == False, models.Cat.index < last_index)).order_by(
desc(models.Cat.index)).first()
if cat is None:
cat = get_last_cat()
return cat
|
bd4b6511ab7b2f004b8539e46109ce128d7af4dd
| 3,641,737
|
def encode(file, res):
"""Encode an image. file is the path to the image, res is the resolution to use. Smaller res means smaller but lower quality output."""
out = buildHeader(res)
pixels = getPixels(file, res)
for i in range(0, len(pixels)):
px = encodePixel(pixels[i])
out += px
return out
|
07f9622bc222f91cb614165e432b4584374030a3
| 3,641,738
|
def process_image(img):
"""Resize, reduce and expand image.
# Argument:
img: original image.
# Returns
image: ndarray(64, 64, 3), processed image.
"""
image = cv2.resize(img, (416, 416), interpolation=cv2.INTER_CUBIC)
image = np.array(image, dtype='float32')
image /= 255.
image = np.expand_dims(image, axis=0)
return image
|
a139d0b82c82273de35d5e95b75cfd5f0e7635e3
| 3,641,739
|
def unnormalise_x_given_lims(x_in, lims):
"""
Scales the input x (assumed to be between [-1, 1] for each dim)
to the lims of the problem
"""
# assert len(x_in) == len(lims)
r = lims[:, 1] - lims[:, 0]
x_orig = r * (x_in + 1) / 2 + lims[:, 0]
return x_orig
|
1d4cd35f45ab8594e297eb64e152a481c01905cd
| 3,641,740
|
def scalar_projection(vector, onto):
"""
Compute the scalar projection of `vector` onto the vector `onto`.
`onto` need not be normalized.
"""
if vector.ndim == 1:
check(locals(), "vector", (3,))
check(locals(), "onto", (3,))
else:
k = check(locals(), "vector", (-1, 3))
if onto.ndim == 1:
check(locals(), "onto", (3,))
else:
check(locals(), "onto", (k, 3))
return dot(vector, normalize(onto))
|
d5b27d46e6d498b22adb1b081b9c7143c636307b
| 3,641,741
|
def update_table(page_current, page_size, sort_by, filter, row_count_value):
"""
This is the collback function to update the datatable
with the required filtered, sorted, extended values
:param page_current: Current page number
:param page_size: Page size
:param sort_by: Column selected for sorting
:param filter: Value entered in the filter
:param row_count_value: Number of rows
:param data: dataframe
:return: processed data aand column values
"""
# If uploaded dataframe is not empty use that, otherwise
# use the default dataframe
if not df_up.empty:
# df_temp = pd.read_json(data, orient='split')
df_tab = df_up
else:
df_tab = df
# Setting the page size as row count value
if row_count_value is not None:
page_size = row_count_value
# Applying sort logic
if len(sort_by):
dff = df_tab.sort_values(
sort_by[0]['column_id'],
ascending=sort_by[0]['direction'] == 'asc',
inplace=False
)
else:
# No sort is applied
dff = df_tab
# Filter logic
if filter is not None:
filtering_expressions = filter.split(' && ')
for filter_part in filtering_expressions:
col_name, operator, filter_value = split_filter_part(filter_part)
if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
# these operators match pandas series operator method names
dff = dff.loc[getattr(dff[col_name], operator)(filter_value)]
elif operator == 'contains':
dff = dff.loc[dff[col_name].str.contains(filter_value)]
elif operator == 'datestartswith':
# this is a simplification of the front-end filtering logic,
# only works with complete fields in standard format
dff = dff.loc[dff[col_name].str.startswith(filter_value)]
# if selected_cols is not None:
# if len(selected_cols) != 0:
# return dff[selected_cols].iloc[
# page_current * page_size:(page_current + 1) * page_size
# ].to_dict('records')
# else:
# return dff.iloc[
# page_current * page_size:(page_current + 1) * page_size
# ].to_dict('records')
# else:
# Rounding the float values to 2 decimal places
dff = dff.round(2)
return [dff.iloc[
page_current * page_size:(page_current + 1) * page_size
].to_dict('records'),
[{"name": [i, j], "id": i} for i, j in zip(df_tab.columns, [str(x) for x in df_tab.dtypes.to_list()])]]
|
e2669f3b98546731974e5706b8af9f6d82b47550
| 3,641,742
|
def load_mooring_csv(csvfilename):
"""Loads data contained in an ONC mooring csv file
:arg csvfilename: path to the csv file
:type csvfilename: string
:returns: data, lat, lon, depth - a pandas data frame object and the
latitude, longitude and depth of the morning
"""
data_line, lat, lon, depth = find_metadata(csvfilename)
# Look up headers
headers = pd.read_csv(csvfilename, skiprows=data_line-2, nrows=1,
header=None, skipinitialspace=True, dtype=str)
headers = np.array(headers)[0]
headers[0] = headers[0].replace('#', '')
headers[0] = headers[0].replace('"', '')
# Load data
data = pd.read_csv(csvfilename, header=None, skiprows=data_line,
names=headers, parse_dates=[0], low_memory=False)
data = data.convert_objects(convert_numeric=True)
data.rename(columns={'Time UTC (yyyy-mm-ddThh:mm:ss.fffZ)': 'time'},
inplace=True)
return data, lat, lon, depth
|
a974e8607916e8fbc1b2beb7af8768d048aca8f0
| 3,641,744
|
def ez_execute(query, engine):
"""
Function takes a query string and an engine object
and returns a dataframe on the condition that the
sql query returned any rows.
Arguments:
query {str} -- a Sql query string
engine {sqlalchemy.engine.base.Engine} -- a database engine object
to run the query
Returns:
DataFrame -- A dataframe containing the results of executing the
sql query with the specified engine
"""
data = pd.read_sql_query(query, engine)
assert not data.empty, "Query returned no results"
return data
|
c350d552f89dca550e766337fd7c071e138c43e6
| 3,641,745
|
def compute_lima_image(counts, background, kernel):
"""Compute Li & Ma significance and flux images for known background.
Parameters
----------
counts : `~gammapy.maps.WcsNDMap`
Counts image
background : `~gammapy.maps.WcsNDMap`
Background image
kernel : `astropy.convolution.Kernel2D`
Convolution kernel
Returns
-------
images : dict
Dictionary containing result maps
Keys are: significance, counts, background and excess
See Also
--------
gammapy.stats.significance
"""
# Kernel is modified later make a copy here
kernel = deepcopy(kernel)
kernel.normalize("peak")
counts_conv = counts.convolve(kernel.array).data
background_conv = background.convolve(kernel.array).data
excess_conv = counts_conv - background_conv
significance_conv = significance(counts_conv, background_conv, method="lima")
return {
"significance": counts.copy(data=significance_conv),
"counts": counts.copy(data=counts_conv),
"background": counts.copy(data=background_conv),
"excess": counts.copy(data=excess_conv),
}
|
8049f5a46ecf81459a64811aec917e72ec78a208
| 3,641,746
|
def get_list_from(matrix):
"""
Transforms capability matrix into list.
"""
only_valuable = []
counter = 1
for row_number in range(matrix.shape[0]):
only_valuable += matrix[row_number, counter::].tolist()
counter += 1
return only_valuable
|
bbfa52ff6a960d91d5aece948e9d416c3dcf0667
| 3,641,747
|
def g1_constraint(x, constants, variables):
""" Constraint that the initial value of tangent modulus > 0 at ep=0.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
g2 = g_constraint(x, 0.)
return g2
|
51d55a03b608cef2c3b5d87fe5cb56bf73326ae3
| 3,641,749
|
import sqlite3
def disconnect(connection_handler):
""" Closes a current database connection
:param connection_handler: the Connection object
:return: 0 if success and -1 if an exception arises
"""
try:
if connection_handler is not None:
connection_handler.close()
return 0
except sqlite3.Error as e:
logger.error('Database disconnection error: {0}'.format(e))
return -1
|
aaba17e38ef48fe7e0be5ba825e114b6f5148433
| 3,641,750
|
def throw_out_nn_indices(ind, dist, Xind):
"""Throw out near neighbor indices that are used to embed the time series.
This is an attempt to get around the problem of autocorrelation.
Parameters
----------
ind : 2d array
Indices to be filtered.
dist : 2d array
Distances to be filtered.
Xind : int
Indices to filter.
Returns
-------
filt_ind : 2d array
Filtered indices.
filt_dist : 2d array
Filtered distances.
"""
ind_store = []
dist_store = []
#iterate through each row
for i in range(len(Xind)):
xrow = Xind[i]
indrow = ind[i]
distrow = dist[i]
mask = np.ones(len(indrow),dtype=bool)
for val in xrow:
mask[indrow == val] = False
ind_store.append( indrow[mask] )
dist_store.append(distrow[mask])
#keep up to the shortest mask. This is so that we can vstack them
ind_len = min( [len(m) for m in ind_store] )
#make all lists the same size for concatenation
ind_store = [m[:ind_len] for m in ind_store]
dist_store = [m[:ind_len] for m in dist_store]
ind_store = np.vstack(ind_store)
dist_store = np.vstack(dist_store)
return dist_store, ind_store
|
638fb43ac484ffa0e15e3c19a5b643aae5a749d9
| 3,641,751
|
import math
def lead_angle(target_disp,target_speed,target_angle,bullet_speed):
"""
Given the displacement, speed and direction of a moving target, and the speed
of a projectile, returns the angle at which to fire in order to intercept the
target. If no such angle exists (for example if the projectile is slower than
the target), then None is returned.
"""
"""
One can imagine the gun, target and point of
target collision at some time t forming a triangle
--o-.-.-.--- St collision of which one side has length St*t where St is
. /' ' ' ' . . . o the target speed, and another has length Sb*t
. /z . . where Sb is the bullet speed. We can eliminate
. . . t by scaling all sides of the triangle equally
. A. . leaving one side St and another Sb. This
. . . Sb triangle can be split into 2 right-angled
. a__ . triangles which share line A. Angle z can then
. / . be calculated and length A found
. . (A = sin(z)/St), and from this angle a can be
-----o----- found (a = arcsin(A/Sb) leading to the
gun calculation of the firing angle.
"""
# Check for situations with no solution
if target_speed > bullet_speed:
return None
if target_disp[0]==0 and target_disp[1]==0:
return None
# Find angle to target
ang_to_targ = math.atan2(target_disp[1],target_disp[0])
# Calculate angle
return math.asin(target_speed/bullet_speed*math.sin(
ang_to_targ-target_angle-math.pi
)) + ang_to_targ
|
fb5dfddf8b36d4e49df2d740b18f9aa97381d08f
| 3,641,752
|
import time
def acme_parser(characters):
"""Parse records from acme global
Args:
characters: characters to loop through the url
Returns:
2 item tuple containing all the meds as a list and a count of all meds
"""
link = (
'http://acmeglobal.com/acme/'
'wp-content/themes/acme/trade_check.php'
'?initchar_trade={0!s}&divname_trade=human')
meds = []
for character in characters:
try:
meds += parse_char(link, character)
except:
wait = rand(5, 15)
print('Failed on character {!s}.'.format(character))
print('Trying again in {0:d}s.'.format(wait))
time.sleep(wait)
try:
meds += parse_char(link, character)
except:
print('Failed on character {!s} again.'.format(character))
print('Skipping character.')
return (meds, len(meds))
|
8e9fe3b020e05243075351d7eedbdba7a54d5d81
| 3,641,754
|
def standard_atari_env_spec(env):
"""Parameters of environment specification."""
standard_wrappers = [[tf_atari_wrappers.RewardClippingWrapper, {}],
[tf_atari_wrappers.StackWrapper, {"history": 4}]]
env_lambda = None
if isinstance(env, str):
env_lambda = lambda: gym.make(env)
if callable(env):
env_lambda = env
assert env_lambda is not None, "Unknown specification of environment"
return tf.contrib.training.HParams(
env_lambda=env_lambda, wrappers=standard_wrappers, simulated_env=False)
|
e9751e1b376cdee5ec0f9c27d8ab4bf2e303f35b
| 3,641,756
|
def load_bikeshare(path='data', extract=True):
"""
Downloads the 'bikeshare' dataset, saving it to the output
path specified and returns the data.
"""
# name of the dataset
name = 'bikeshare'
data = _load_file_data(name, path, extract)
return data
|
7cce01f22c37460800a44a85b18e6574d9d7f6fb
| 3,641,757
|
def file2bytes(filename: str) -> bytes:
"""
Takes a filename and returns a byte string with the content of the file.
"""
with open(filename, 'rb') as f:
data = f.read()
return data
|
f917a265c17895c917c3c340041586bef0c34dac
| 3,641,758
|
import json
def load_session() -> dict:
"""
Returns available session dict
"""
try:
return json.load(SESSION_PATH.open())
except FileNotFoundError:
return {}
|
342c8e143c878cfc4821454cebfcc3ba47a2cd2a
| 3,641,759
|
def _preprocess_zero_mean_unit_range(inputs, dtype=tf.float32):
"""Map image values from [0, 255] to [-1, 1]."""
preprocessed_inputs = (2.0 / 255.0) * tf.cast(inputs, tf.float32) - 1.0
return tf.cast(preprocessed_inputs, dtype=dtype)
|
08238566a04ed35346b8f4ff0874fff7be48bded
| 3,641,760
|
from typing import cast
def fill_like(input, value, shape=None, dtype=None, name=None):
"""Create a uniformly filled tensor / array."""
input = as_tensor(input)
dtype = dtype or input.dtype
if has_tensor([input, value, shape], 'tf'):
value = cast(value, dtype)
return tf.fill(value, input.shape, name)
else:
dtype = dtype or input.dtype
dtype = convert_dtype(dtype, 'np')
if shape is None:
return np.full_like(input, value, dtype=dtype)
else:
return np.full(shape, value, dtype=dtype)
|
1879ac8669396dfe3fe351dae97a96cd8d6a8e5e
| 3,641,761
|
from typing import Callable
import operator
def transform_item(key, f: Callable) -> Callable[[dict], dict]:
"""transform a value of `key` in a dict. i.e given a dict `d`, return a new dictionary `e` s.t e[key] = f(d[key]).
>>> my_dict = {"name": "Danny", "age": 20}
>>> transform_item("name", str.upper)(my_dict)
{'name': 'DANNY', 'age': 20}
"""
return functional_generic.itemmap(
functional_generic.when(
functional_generic.compose_left(operator.head, operator.equals(key)),
functional_generic.packstack(operator.identity, f),
),
)
|
a202fe59b29b0a1b432df759b4600388e2d9f72e
| 3,641,762
|
def mock_dataset(mocker, mock_mart, mart_datasets_response):
"""Returns an example dataset, built using a cached response."""
mocker.patch.object(mock_mart, 'get', return_value=mart_datasets_response)
return mock_mart.datasets['mmusculus_gene_ensembl']
|
bb9a8b828f0ac5bfa59b3faee0f9bcc22c7d954e
| 3,641,763
|
import torch
def loss_function(recon_x, x, mu, logvar):
"""Loss function for varational autoencoder VAE"""
BCE = F.binary_cross_entropy(recon_x, x, size_average=False)
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
|
38c0d6ab7a8388e007324bdcfb8611f0a3072c35
| 3,641,764
|
import scipy
def resize_img(img, size):
"""
Given a list of images in ndarray, resize them into target size.
Args:
img: Input image in ndarray
size: Target image size
Returns: Resized images in ndarray
"""
img = scipy.misc.imresize(img, (size, size))
if len(img.shape) == 2:
img = img.reshape((size, size, 1))
return img
|
8a3ff8bfab0c864a6c0e4701be07b9296ad23f28
| 3,641,765
|
def cloudtopheight_IR(bt, cloudmask, latitude, month, method="modis"):
"""Cloud Top Height (CTH) from 11 micron channel.
Brightness temperatures (bt) are converted to CTHs using the IR window approach:
(bt_clear - bt_cloudy) / lapse_rate.
See also:
:func:`skimage.measure.block_reduce`
Down-sample image by applying function to local blocks.
:func:`lapserate_moist_adiabate`
Constant value 6.5 [K/km]
:func:`lapserate_modis`
Estimate of the apparent lapse rate in [K/km]
depending on month and latitude acc. to Baum et al., 2012.
Parameters:
bt (ndarray): brightness temperatures form 11 micron channel.
cloudmask (ndarray): binary cloud mask.
month (int): month of the year.
latitude (ndarray): latitudes in [°], positive North, negative South.
method (str): approach used to derive CTH: 'modis' see Baum et al., 2012,
'simple' uses the moist adiabatic lapse rate.
Returns:
ndarray: cloud top height.
References:
Baum, B.A., W.P. Menzel, R.A. Frey, D.C. Tobin, R.E. Holz, S.A.
Ackerman, A.K. Heidinger, and P. Yang, 2012: MODIS Cloud-Top Property
Refinements for Collection 6. J. Appl. Meteor. Climatol., 51,
1145–1163, https://doi.org/10.1175/JAMC-D-11-0203.1
"""
# Lapse rate
if method == "simple":
lapserate = lapserate_moist_adiabate()
elif method == "modis":
lapserate = lapserate_modis(month, latitude)
else:
raise ValueError("Method is not supported.")
resolution_ratio = np.shape(cloudmask)[0] // np.shape(bt)[0]
cloudmask_inverted = cloudmask.copy()
cloudmask_inverted[np.isnan(cloudmask_inverted)] = 1
cloudmask_inverted = np.asarray(
np.invert(np.asarray(cloudmask_inverted, dtype=bool)), dtype=int
)
cloudmask[np.isnan(cloudmask)] = 0
cloudmask = np.asarray(cloudmask, dtype=int)
# Match resolutions of cloud mask and brightness temperature (bt) arrays.
if resolution_ratio > 1:
# On bt resolution, flag pixels as cloudy only if all subgrid pixels
# are cloudy in the original cloud mask.
mask_cloudy = block_reduce(
cloudmask, (resolution_ratio, resolution_ratio), func=np.alltrue
)
# Search for only clear pixels to derive a bt clearsky/ocean value.
mask_clear = block_reduce(
cloudmask_inverted, (resolution_ratio, resolution_ratio), func=np.alltrue
)
elif resolution_ratio < 1:
try:
mask_cloudy = np.repeat(
np.repeat(cloudmask, resolution_ratio, axis=0), resolution_ratio, axis=1
)
mask_clear = np.repeat(
np.repeat(cloudmask_inverted, resolution_ratio, axis=0),
resolution_ratio,
axis=1,
)
except ValueError:
raise ValueError(
"Problems matching the shapes of provided cloud mask and bt arrays."
)
else:
mask_cloudy = cloudmask.copy()
mask_clear = cloudmask_inverted.copy()
bt_cloudy = np.ones(np.shape(bt)) * np.nan
bt_cloudy[mask_cloudy] = bt[mask_cloudy]
bt_clear_avg = np.nanmean(bt[mask_clear])
return (bt_clear_avg - bt_cloudy) / lapserate
|
b68dfb37b27d3067c2956fc3653640393491e014
| 3,641,766
|
def info2lists(info, in_place=False):
"""
Return info with:
1) `packages` dict replaced by a 'packages' list with indexes removed
2) `releases` dict replaced by a 'releases' list with indexes removed
info2list(info2dicts(info)) == info
"""
if 'packages' not in info and 'releases' not in info:
return info
if in_place:
info_lists = info
else:
info_lists = info.copy()
packages = info.get('packages')
if packages:
info_lists['packages'] = list(packages.values())
releases = info.get('releases')
if releases:
info_lists['releases'] = list(releases.values())
return info_lists
|
313fda757d386332e16a0a91bb4408fe3cb8c070
| 3,641,767
|
def calc_wave_number(g, h, omega, relax=0.5, eps=1e-15):
"""
Relaxed Picard iterations to find k when omega is known
"""
k0 = omega ** 2 / g
for _ in range(100):
k1 = omega ** 2 / g / tanh(k0 * h)
if abs(k1 - k0) < eps:
break
k0 = k1 * relax + k0 * (1 - relax)
else:
ocellaris_error(
'calc_wave_number did not converge',
'Input g=%r h=%r omega=%r, tolerance=%e' % (g, h, omega, eps),
)
return k1
|
7173fc9f38547864943046fed1e74d9b5cc832b5
| 3,641,768
|
def emit_live_notification_for_model(obj, user, history, *, type:str="change", channel:str="events",
sessionid:str="not-existing"):
"""
Sends a model live notification to users.
"""
if obj._importing:
return None
content_type = get_typename_for_model_instance(obj)
if content_type == "userstories.userstory":
if history.type == HistoryType.create:
title = _("User story created")
url = resolve("userstory", obj.project.slug, obj.ref)
elif history.type == HistoryType.change:
title = _("User story changed")
url = resolve("userstory", obj.project.slug, obj.ref)
else:
title = _("User story deleted")
url = None
body = _("US #{} - {}").format(obj.ref, obj.subject)
elif content_type == "tasks.task":
if history.type == HistoryType.create:
title = _("Task created")
url = resolve("task", obj.project.slug, obj.ref)
elif history.type == HistoryType.change:
title = _("Task changed")
url = resolve("task", obj.project.slug, obj.ref)
else:
title = _("Task deleted")
url = None
body = _("Task #{} - {}").format(obj.ref, obj.subject)
elif content_type == "issues.issue":
if history.type == HistoryType.create:
title = _("Issue created")
url = resolve("issue", obj.project.slug, obj.ref)
elif history.type == HistoryType.change:
title = _("Issue changed")
url = resolve("issue", obj.project.slug, obj.ref)
else:
title = _("Issue deleted")
url = None
body = _("Issue: #{} - {}").format(obj.ref, obj.subject)
elif content_type == "wiki.wiki_page":
if history.type == HistoryType.create:
title = _("Wiki Page created")
url = resolve("wiki", obj.project.slug, obj.slug)
elif history.type == HistoryType.change:
title = _("Wiki Page changed")
url = resolve("wiki", obj.project.slug, obj.slug)
else:
title = _("Wiki Page deleted")
url = None
body = _("Wiki Page: {}").format(obj.slug)
elif content_type == "milestones.milestone":
if history.type == HistoryType.create:
title = _("Sprint created")
url = resolve("taskboard", obj.project.slug, obj.slug)
elif history.type == HistoryType.change:
title = _("Sprint changed")
url = resolve("taskboard", obj.project.slug, obj.slug)
else:
title = _("Sprint deleted")
url = None
body = _("Sprint: {}").format(obj.name)
else:
return None
return emit_event(
{
"title": title,
"body": "Project: {}\n{}".format(obj.project.name, body),
"url": url,
"timeout": 10000,
"id": history.id
},
"live_notifications.{}".format(user.id),
sessionid=sessionid
)
|
94e7e91ec73537aad71ab3839fbd203552d4fec2
| 3,641,769
|
def is_chitoi(tiles):
"""
Returns True if the hand satisfies chitoitsu.
"""
unique_tiles = set(tiles)
return (len(unique_tiles) == 7 and
all([tiles.count(tile) == 2 for tile in unique_tiles]))
|
c04149174bb779cd07616d4f419fc86531ab95dd
| 3,641,770
|
import itertools
def get_hpo_ancestors(hpo_db, hpo_id):
"""
Get HPO terms higher up in the hierarchy.
"""
h=hpo_db.hpo.find_one({'id':hpo_id})
#print(hpo_id,h)
if 'replaced_by' in h:
# not primary id, replace with primary id and try again
h = hpo_db.hpo.find_one({'id':h['replaced_by'][0]})
hpo=[h]
if 'is_a' not in h: return hpo
for hpo_parent_id in h['is_a']:
#p=hpo_db.hpo.find({'id':hpo_parent_id}):
hpo+=list(itertools.chain(get_hpo_ancestors(hpo_db,hpo_parent_id)))
#remove duplicates
hpo={h['id'][0]:h for h in hpo}.values()
return hpo
|
2ef2c968bc3001b97529ccd269884cefad7a899f
| 3,641,771
|
def mcBufAir(params: dict, states: dict) -> float:
"""
Growth respiration
Parameters
----------
params : dict
Parameters saved as model constants
states : dict
State variables of the model
Returns
-------
float
Growth respiration of the plant [mg m-2 s-1]
"""
mcBufAir_ = (mcOrgAir_g(organ="fruit", params=params, states=states) +
mcOrgAir_g(organ="leaf", params=params, states=states) +
mcOrgAir_g(organ="stem", params=params, states=states))
return mcBufAir_
|
d31f201384fdab6c03856def1eed7d96fe28482a
| 3,641,772
|
def space_boundaries_re(regex):
"""Wrap regex with space or end of string."""
return rf"(?:^|\s)({regex})(?:\s|$)"
|
68861da6218165318b6a446c173b4906a93ef850
| 3,641,774
|
import requests
import json
import dateutil
def get_jobs():
"""
this function will query USAJOBS api and return all open FEC jobs.
if api call failed, a status error message will be displayed in the
jobs.html session in the career page.
it also query code list to update hirepath info. a hard-coded code list
is used for backup if query failed.
"""
# url = 'https://data.usajobs.gov/api/Search'
# codes_url = 'https://data.usajobs.gov/api/codelist/hiringpaths'
querystring = {}
querystring["Organization"] = settings.USAJOBS_AGENCY_CODE
querystring["WhoMayApply"] = settings.USAJOBS_WHOMAYAPPLY
headers = {
"authorization-key": settings.USAJOBS_API_KEY,
"host": "data.usajobs.gov",
"cache-control": "no-cache",
}
# query usajobs API for all open fec jobs
response = requests.get(JOB_URL, headers=headers, params=querystring)
if response.status_code != 200:
return {"error": USAJOB_SEARCH_ERROR}
responses = response.json()
# query usajobs API for list of all hiring-path codes
codes_response = requests.get(CODES_URL, headers=headers)
if codes_response.status_code != 200:
codes_responses = json.loads(CODE_LIST)
else:
codes_responses = codes_response.json()
jobData = []
search_results = responses.get("SearchResult", {})
# iterate over returned job data
if "SearchResultItems" in search_results:
for result in search_results.get("SearchResultItems", None):
matched_object_descriptor = result.get("MatchedObjectDescriptor", {})
if len(matched_object_descriptor.get("JobGrade", [])) > 0:
job_grade = matched_object_descriptor.get("JobGrade", [])[0].get(
"Code", ""
)
else:
job_grade = ""
jobs_dict = {
"position_title": matched_object_descriptor.get("PositionTitle", ""),
"position_id": matched_object_descriptor.get("PositionID", ""),
"position_uri": matched_object_descriptor.get("PositionURI", ""),
"position_start_date": dateutil.parser.parse(
matched_object_descriptor.get("PositionStartDate", "")
),
"position_end_date": dateutil.parser.parse(
matched_object_descriptor.get("PositionEndDate", "")
),
"job_grade": job_grade,
"low_grade": matched_object_descriptor.get("UserArea", {})
.get("Details", {})
.get("LowGrade", ""),
"high_grade": matched_object_descriptor.get("UserArea", {})
.get("Details", {})
.get("HighGrade", ""),
}
# map hiring-path code(s) for each job to description(s)
if len(codes_responses.get("CodeList", [])) > 0:
hiring_path_codes = codes_responses.get("CodeList", [])[0].get(
"ValidValue", []
)
else:
hiring_path_codes = []
hiring_path = [
item
for item in result.get("MatchedObjectDescriptor", {})
.get("UserArea", {})
.get("Details", {})
.get("HiringPath", [])
]
hp = []
for path in hiring_path:
hpa = [
item for item in hiring_path_codes if item["Code"] == path.upper()
]
if hpa:
hp.append(hpa[0].get("Value", ""))
else:
hp.append(path)
hiring_path_list = ", ".join(str(n) for n in hp)
open_to = {"open_to": hiring_path_list}
jobs_dict.update(open_to)
jobData.append(jobs_dict)
return {"jobData": jobData}
|
46c69348b3f964fc1c4f35391aa5c7a8d049b47e
| 3,641,775
|
def artanh(x) -> ProcessBuilder:
"""
Inverse hyperbolic tangent
:param x: A number.
:return: The computed angle in radians.
"""
return _process('artanh', x=x)
|
d93ec8e7059df02ebf7a60506d2e9896bc146b32
| 3,641,776
|
from thunder.readers import normalize_scheme, get_parallel_reader
import array
def fromtext(path, ext='txt', dtype='float64', skip=0, shape=None, index=None, labels=None, npartitions=None, engine=None, credentials=None):
"""
Loads series data from text files.
Assumes data are formatted as rows, where each record is a row
of numbers separated by spaces e.g. 'v v v v v'. You can
optionally specify a fixed number of initial items per row to skip / discard.
Parameters
----------
path : string
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'txt'
File extension.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
skip : int, optional, default = 0
Number of items in each record to skip.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have length equal to number of rows.
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
path = normalize_scheme(path, ext)
if spark and isinstance(engine, spark):
def parse(line, skip):
vec = [float(x) for x in line.split(' ')]
return array(vec[skip:], dtype=dtype)
lines = engine.textFile(path, npartitions)
data = lines.map(lambda x: parse(x, skip))
def switch(record):
ary, idx = record
return (idx,), ary
rdd = data.zipWithIndex().map(switch)
return fromrdd(rdd, dtype=str(dtype), shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for kv in data:
for line in str(kv[1].decode('utf-8')).split('\n')[:-1]:
values.append(fromstring(line, sep=' '))
values = asarray(values)
if skip > 0:
values = values[:, skip:]
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels)
|
9ab049954b23888c2d2a17786edde57dd90507c0
| 3,641,777
|
def flop_gemm(n, k):
"""# of + and * for matmat of nxn matrix with nxk matrix, with accumulation
into the output."""
return 2*n**2*k
|
b217b725e2ac27a47bc717789458fd20b4aa56c1
| 3,641,778
|
def index() -> str:
"""Rest endpoint to test whether the server is correctly working
Returns:
str: The default message string
"""
return 'DeChainy server greets you :D'
|
ce0caeb9994924f8d6ea10462db2be48bbc126d0
| 3,641,779
|
from typing import AnyStr
from typing import List
import json
def load_json_samples(path: AnyStr) -> List[str]:
"""
Loads samples from a json file
:param path: Path to the target file
:return: List of samples
"""
with open(path, "r", encoding="utf-8") as file:
samples = json.load(file)
if isinstance(samples, list):
return samples
else:
raise RuntimeError(f"File's content must be list-like")
|
b735e7265a31f6bc6d19381bfe9d0cbe26dcf170
| 3,641,781
|
import struct
import lzma
def decompress_lzma(data: bytes) -> bytes:
"""decompresses lzma-compressed data
:param data: compressed data
:type data: bytes
:raises _lzma.LZMAError: Compressed data ended before the end-of-stream marker was reached
:return: uncompressed data
:rtype: bytes
"""
props, dict_size = struct.unpack("<BI", data[:5])
lc = props % 9
props = props // 9
pb = props // 5
lp = props % 5
dec = lzma.LZMADecompressor(
format=lzma.FORMAT_RAW,
filters=[
{
"id": lzma.FILTER_LZMA1,
"dict_size": dict_size,
"lc": lc,
"lp": lp,
"pb": pb,
}
],
)
return dec.decompress(data[5:])
|
247c3d59d45f3f140d4f2c36a7500ff8a51e45b0
| 3,641,783
|
def validate(request):
"""
Validate actor name exists in database before searching.
If more than one name fits the criteria, selects the first one
and returns the id.
Won't render.
"""
search_for = request.GET.get('search-for', default='')
start_from = request.GET.get('start-from', default='')
data = {}
search_for_actor = get_actor(search_for)
start_from_actor = get_actor(start_from)
if not search_for_actor:
data['errors'] = {'search-for': 'Not a valid name'}
if not start_from_actor:
data['errors'] = {'start-from': 'Not a valid name'}
if 'errors' in data:
data['status'] = 'false'
return JsonResponse(data, status=404)
else:
data = {
'search-for': search_for_actor.id,
'start-from': start_from_actor.id,
}
return JsonResponse(data)
|
39b9183cd570cce0ddfd81febde0ec125f11c578
| 3,641,784
|
def merge(left, right, on=None, left_on=None, right_on=None):
"""Merge two DataFrames using explicit-comms.
This is an explicit-comms version of Dask's Dataframe.merge() that
only supports "inner" joins.
Requires an activate client.
Notice
------
As a side effect, this operation concatenate all partitions located on
the same worker thus npartitions of the returned dataframe equals number
of workers.
Parameters
----------
left: dask.dataframe.DataFrame
right: dask.dataframe.DataFrame
on : str or list of str
Column or index level names to join on. These must be found in both
DataFrames.
left_on : str or list of str
Column to join on in the left DataFrame.
right_on : str or list of str
Column to join on in the right DataFrame.
Returns
-------
df: dask.dataframe.DataFrame
Merged dataframe
"""
# Making sure that the "on" arguments are list of column names
if on:
on = [on] if isinstance(on, str) else list(on)
if left_on:
left_on = [left_on] if isinstance(left_on, str) else list(left_on)
if right_on:
right_on = [right_on] if isinstance(right_on, str) else list(right_on)
if left_on is None:
left_on = on
if right_on is None:
right_on = on
if not (left_on and right_on):
raise ValueError(
"Some combination of the on, left_on, and right_on arguments must be set"
)
return submit_dataframe_operation(
comms.default_comms(),
local_df_merge,
df_list=(left, right),
extra_args=(left_on, right_on),
)
|
847070e27007c049d0c58059ec9f7c66681f21bc
| 3,641,785
|
def estimate_fs(t):
"""Estimates data sampling rate"""
sampling_rates = [
2000,
1250,
1000,
600,
500,
300,
250,
240,
200,
120,
75,
60,
50,
30,
25,
]
fs_est = np.median(1 / np.diff(t))
fs = min(sampling_rates, key=lambda x: abs(x - fs_est))
return fs
|
82dbd115e3c7b656302d10339cdfe77b60ab0620
| 3,641,786
|
def get_case_number(caselist):
"""Get line number from file caselist."""
num = 0
with open(caselist, 'r') as casefile:
for line in casefile:
if line.strip().startswith('#') is False:
num = num + 1
return num
|
b1366d8e4a0e2c08da5265502d2dd2d72bf95c19
| 3,641,787
|
from typing import Any
def build_param_float_request(*, scenario: str, value: float, **kwargs: Any) -> HttpRequest:
"""Send a post request with header values "scenario": "positive", "value": 0.07 or "scenario":
"negative", "value": -3.0.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword scenario: Send a post request with header values "scenario": "positive" or "negative".
:paramtype scenario: str
:keyword value: Send a post request with header values 0.07 or -3.0.
:paramtype value: float
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = "/header/param/prim/float"
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["scenario"] = _SERIALIZER.header("scenario", scenario, "str")
header_parameters["value"] = _SERIALIZER.header("value", value, "float")
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=url, headers=header_parameters, **kwargs)
|
3e310a92ebe5760a00abc82c5c6465e160a5881d
| 3,641,788
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.