content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def group_sums_dummy(x, group_dummy):
"""sum by groups given group dummy variable
group_dummy can be either ndarray or sparse matrix
"""
if data_util._is_using_ndarray_type(group_dummy, None):
return np.dot(x.T, group_dummy)
else: # check for sparse
return x.T * group_dummy
|
2cfb448130b9c48b41dd491a4fe01ab11a38478b
| 3,641,906
|
def fixture_multi_check_schema() -> DataFrameSchema:
"""Schema with multiple positivity checks on column `a`"""
return _multi_check_schema()
|
f2b95cda9d6cd3e5bf0055d22c4b8505370a9867
| 3,641,907
|
def get_featurizer(featurizer_key: str) -> ReactionFeaturizer:
"""
:param: featurizer_key: key of a ReactionFeaturizer
:return: a ReactionFeaturizer for a specified key
"""
if featurizer_key not in FEATURIZER_INITIALIZERS:
raise ValueError(f"No featurizer for key {featurizer_key}")
return FEATURIZER_INITIALIZERS[featurizer_key]()
|
15583e90a0691ce10df9d789f6decef5443efed2
| 3,641,908
|
from typing import List
def is_negative_spec(*specs: List[List[str]]) -> bool:
""" Checks for negative values in a variable number of spec lists
Each spec list can have multiple strings. Each string within each
list will be searched for a '-' sign.
"""
for specset in specs:
if specset:
for spec in specset:
if '-' in spec:
return True
return False
|
216e6db2e63a657ac95a31896b9b61329a10a3db
| 3,641,909
|
def is_np_timedelta_like(dtype: DTypeLike) -> bool:
"""Check whether dtype is of the timedelta64 dtype."""
return np.issubdtype(dtype, np.timedelta64)
|
b68d244d2d2c4d3029a93cbd8b3affca8c55f680
| 3,641,910
|
def pp2mr(pv,p):
""" Calculates mixing ratio from the partial and total pressure
assuming both have same unitsa nd no condensate is present. Returns value
in units of kg/kg. Checked 20.03.20
"""
pv, scalar_input1 = flatten_input(pv) # don't specify pascal as this will wrongly corrected
p , scalar_input2 = flatten_input(p )
scalar_input = scalar_input1 and scalar_input2
mr = eps1*pv/(p-pv)
if scalar_input:
return np.squeeze(mr)
return mr
|
97bd35658a54a53d7541aa0ee71f1b25cf8c2dbf
| 3,641,911
|
def initialize_database(app):
""" Takes an initalized flask application and binds a database context to allow query execution
"""
# see https://github.com/mitsuhiko/flask-sqlalchemy/issues/82
db.app = app
db.init_app(app)
return db
|
11a9f6046f51239c071d3a61780d1edf775baa51
| 3,641,912
|
from typing import TextIO
def decrypt(input_file: TextIO, wordlist_filename: str) -> str:
"""
Using wordlist_filename, decrypt input_file according to the handout
instructions, and return the plaintext.
"""
encrypt = []
result = ''
ans = ''
plaintext = ''
# store English wordlist into a set
english_wordlist = set()
with open(wordlist_filename) as file:
for line_text in file:
english_wordlist.add(line_text.strip())
for line in input_file:
encrypt = line.lower()
max_so_far = 0
for count in range(26):
text = shift(encrypt, count).split()
#print(text)
# reset max end every new shift
max_end = 0
# check if English word match the text
# add 1 whenever there is a match
for word in text:
# remove symbol & punctuation
words = ''.join(char for char in word if char.isalnum())
if words in english_wordlist:
max_end += 1
# if new max found, set the result to that text
# and set max_so_far to new max
#print(max_end)
if max_so_far < max_end:
result = ' '.join(text)
max_so_far = max_end
ans += result + '\n'
return ans.strip()
|
87fae6e252b57f9903cd4b46d18a625455761d62
| 3,641,913
|
def loadEvents(fname):
"""
Reads a file that consists of first column of unix timestamps
followed by arbitrary string, one per line. Outputs as dictionary.
Also keeps track of min and max time seen in global mint,maxt
"""
events = []
ws = open(fname, 'r').read().splitlines()
events = []
for w in ws:
ix = w.find(' ') # find first space, that's where stamp ends
stamp = int(w[:ix])
str = w[ix+1:]
events.append({'t': stamp, 's': str})
# except Exception as e:
# print ('%s probably does not exist, setting empty events list.' % (fname, ))
# print ('error was:', e)
return events
|
495dbd5d47892b953c139b27b1f20dd9854ea29a
| 3,641,914
|
def compute_relative_target_raw(current_pose, target_pose):
"""
Computes the relative target pose which has to be fed to the network as an input.
Both target pose and current_pose have to be in the same coordinate frame (gloabl map).
"""
# Compute the relative goal position
goal_position_difference = [target_pose.pose.position.x - current_pose.pose.position.x,
target_pose.pose.position.y - current_pose.pose.position.y]
# Get the current orientation and the goal orientation
current_orientation = current_pose.pose.orientation
p = [current_orientation.x, current_orientation.y, current_orientation.z, current_orientation.w]
goal_orientation = target_pose.pose.orientation
q = [goal_orientation.x, goal_orientation.y, goal_orientation.z, goal_orientation.w]
# Rotate the relative goal position into the base frame (robot frame)
goal_position_base_frame = tf.transformations.quaternion_multiply(tf.transformations.quaternion_inverse(p),
tf.transformations.quaternion_multiply([goal_position_difference[0],
goal_position_difference[1],
0,
0],
p))
# Compute the difference to the goal orientation
orientation_to_target = tf.transformations.quaternion_multiply(q, tf.transformations.quaternion_inverse(p))
yaw = tf.transformations.euler_from_quaternion(orientation_to_target)[2]
return (goal_position_base_frame[0], -goal_position_base_frame[1], yaw)
|
efee9b6ef48bda67dfd42526c20e7d1de6a164da
| 3,641,915
|
import httpx
def get_tokeninfo_remote(token_info_url, token):
"""
Retrieve oauth token_info remotely using HTTP
:param token_info_url: Url to get information about the token
:type token_info_url: str
:param token: oauth token from authorization header
:type token: str
:rtype: dict
"""
token_request = httpx.get(token_info_url, headers={'Authorization': 'Bearer {}'.format(token)}, timeout=5)
if not token_request.ok:
return None
return token_request.json()
|
c0f72b47b97d2d9c57b8b7fe30a3cba9f29c2005
| 3,641,916
|
from datetime import datetime
def make_site_object(config, seen):
"""Make object with site values for evaluation."""
now = datetime.today().strftime("%Y-%m-%d")
subtitle = (
f'<h2 class="subtitle">{config.subtitle}</h2>'
if config.subtitle
else ""
)
site = SN(
author=lambda: config.author,
builddate=lambda: now,
copyrightyear=lambda: config.copyrightyear,
domain=lambda: config.domain,
email=lambda: config.email,
lang=lambda: config.lang,
repo=lambda: config.repo,
title=lambda: config.title,
subtitle=lambda: subtitle,
tool=lambda: config.tool
)
if "foot.html" in config.template:
site.foot = lambda root: _fill(
"foot.html",
config.template["foot.html"],
site,
SN(root=root)
)
else:
site.foot = lambda root: ""
if "head.html" in config.template:
site.head = lambda root: _fill(
"head.html",
config.template["head.html"],
site,
SN(root=root)
)
else:
site.head = lambda root: ""
if "stats.html" in config.template:
filled = _fill("stats.html", config.template["stats.html"], site, SN())
site.stats = lambda: filled
else:
site.stats = lambda: ""
return site
|
762382446736a1815deae275db0d7485c0718a4e
| 3,641,917
|
from datetime import datetime
import json
import hashlib
def map_aircraft_to_record(aircrafts, message_now, device_id):
"""
Maps the `aircraft` entity to a BigQuery record and its unique id.
Returns `(unique_ids, records)`
"""
def copy_data(aircraft):
result = {
'hex': aircraft.get('hex'),
'squawk': aircraft.get('squawk'),
'flight': aircraft.get('flight'),
'lat': aircraft.get('lat'),
'lon': aircraft.get('lon'),
'nucp': aircraft.get('nucp'),
'seen_pos': aircraft.get('seen_pos'),
'altitude': aircraft.get('altitude'),
'vert_rate': aircraft.get('vert_rate'),
'track': aircraft.get('track'),
'speed': aircraft.get('speed'),
'messages': aircraft.get('messages'),
'seen': aircraft.get('seen'),
'rssi': aircraft.get('rssi'),
'device_id': device_id,
'timestamp': datetime.utcfromtimestamp(float(message_now)).isoformat()
}
result_json = json.dumps(result)
result_hash = hashlib.sha512(result_json.encode('utf-8')).hexdigest()
unique_id = f'{message_now}_{result_hash}'
result['created_at'] = datetime.now().isoformat()
return (unique_id, result)
return zip( *map( copy_data, aircrafts ) )
|
d423b87e2018486de076cc94a719038c53c54602
| 3,641,919
|
def add_gaussian_noise(image, mean=0, std=0.001):
"""
添加高斯噪声
mean : 均值
var : 方差
"""
image = np.array(image / 255, dtype=float)
noise = np.random.normal(mean, std ** 0.5, image.shape)
print(np.mean(noise ** 2) - np.mean(noise) ** 2)
out = image + noise
if image.min() < 0:
low_clip = -1.
else:
low_clip = 0.
out = np.clip(out, low_clip, 1.0)
out = np.uint8(out * 255)
return out
|
1683ae5815e28ab0c3354be1623ec56e6058b449
| 3,641,920
|
def sub(xs, ys):
"""
Computes xs - ys, such that elements in xs that occur in ys are removed.
@param xs: list
@param ys: list
@return: xs - ys
"""
return [x for x in xs if x not in ys]
|
8911bb2c79919cae88463a95521cf051828038e8
| 3,641,922
|
def create_folio_skill(request, folio_id):
"""
Creates a new folio skill
"""
if request.method == "POST":
form = FolioSkillForm(request.POST)
if form.is_valid():
skill = form.save(commit=False)
skill.author_id = request.user
skill.save()
messages.success(
request,
f"The {skill.skill_title} skill has "
f"been created successfully."
)
else:
messages.error(
request,
"Data posted was not valid "
"to create a new skill."
)
else:
messages.error(
request,
"Data should be posted when "
"attempting to create a new skill."
)
return redirect(
reverse("edit_folio_skills",
kwargs={"folio_id": folio_id})
)
|
7c1966f7a6b3c98e972da90abb5eb984a4af85a2
| 3,641,923
|
def reduce_tags(tags):
"""Filter a set of tags to return only those that aren't descendents from others in the list."""
reduced_tags = []
for tag_a in tags:
include = True
for tag_b in tags:
if tag_a == tag_b:
continue
if not tag_before(tag_a, tag_b):
include = False
break
if include:
reduced_tags.append(tag_a)
return reduced_tags
|
fa76e1cc5bd10ecd58bdc8f5277fa32d41484c17
| 3,641,925
|
def default_param_noise_filter(var):
"""
check whether or not a variable is perturbable or not
:param var: (TensorFlow Tensor) the variable
:return: (bool) can be perturb
"""
if var not in tf.trainable_variables():
# We never perturb non-trainable vars.
return False
if "fully_connected" in var.name:
# We perturb fully-connected layers.
return True
# The remaining layers are likely conv or layer norm layers, which we do not wish to
# perturb (in the former case because they only extract features, in the latter case because
# we use them for normalization purposes). If you change your network, you will likely want
# to re-consider which layers to perturb and which to keep untouched.
return False
|
12817bf2c2b726d91d9d3cc838b52499e5382d80
| 3,641,926
|
def input_fn(request_body, request_content_type):
"""
An input_fn that loads the pickled tensor by the inference server of SageMaker.
The function deserialize the inference request, then the predict_fn get invoked.
Does preprocessing and returns a tensor representation of the source sentence
ready to give to the model to make inference.
:param request_body: str
The request body
:param request_content_type: type
The request body type.
:return: torch.Tensor
"""
if request_content_type == 'application/json':
return None
return 'WHAT HAPPEN TO YOU !'
|
62d45e188d5537eaa566bd4b90bdb8abc7626621
| 3,641,927
|
def get_colors(k):
"""
Return k colors in a list. We choose from 7 different colors.
If k > 7 we choose colors more than once.
"""
base_colors = ['b', 'r', 'g', 'c', 'm', 'y', 'k']
colors = []
index = 1
for i in range(0, k):
if index % (len(base_colors) + 1) == 0:
index = 1
colors.append(base_colors[index - 1])
index += 1
return colors
|
6c4a38eb394254f57d8be9fca47e0b44f51f5f04
| 3,641,928
|
import logging
def test_significance(stat, A, b, eta, mu, cov, z, alpha):
"""
Compute an p-value by testing a one-tail.
Look at right tail or left tail?
Returns "h_0 Reject
"""
ppf, params = psi_inf(A, b, eta, mu, cov, z)
if np.isnan(params['scale']) or not np.isreal(params['scale']):
logging.warning("Scale is not real or negative, test reject")
return False, params
threshold = ppf(1.-alpha)
return stat > threshold, params
|
f723a75c59a23d7110a41036d6873f6023b42333
| 3,641,929
|
def _get_cluster_group_idx(clusters: np.ndarray) -> nb.typed.List:
"""
Get start and stop indexes for unique cluster labels.
Parameters
----------
clusters : np.ndarray
The ordered cluster labels (noise points are -1).
Returns
-------
nb.typed.List[Tuple[int, int]]
Tuples with the start index (inclusive) and end index (exclusive) of
the unique cluster labels.
"""
start_i = 0
while clusters[start_i] == -1:
start_i += 1
group_idx, stop_i = nb.typed.List(), start_i
while stop_i < clusters.shape[0]:
start_i, label = stop_i, clusters[stop_i]
while stop_i < clusters.shape[0] and clusters[stop_i] == label:
stop_i += 1
group_idx.append((start_i, stop_i))
return group_idx
|
5bdae0228367868c201b8a399ca959bc50c715b2
| 3,641,930
|
from typing import Union
from typing import Optional
def genes_flyaltas2(
genes: Union[str, list] = None,
gene_nametype: Optional[str] = "symbol",
stage: Optional[str] = "male_adult",
enrich_threshold: Optional[float] = 1.0,
fbgn_path: Optional[str] = "deml_fbgn.tsv.gz",
) -> pd.DataFrame:
"""
Annotate a gene list based on the flyaltas2 database
Parameters
----------
genes: `str` or `list` (default: `None`)
The name of a gene, or a list of genes.
gene_nametype : `str` (default: `'symbol'`)
Type of gene name, including `'symbol'` and `'FBgn'`.
stage: `str` (default: `'male_adult'`)
The developmental stages of Drosophila melanogaster. Available stages are:
* `'larval'`
* `'female_adult'`
* `'male_adult'`
enrich_threshold: `float` (default: `1.0`)
Threshold for filtering enrichment in FlyAtlas 2.
fbgn_path: `str` (default: `'deml_fbgn.tsv.gz'`)
Absolute path to the deml_fbgn.tsv.gz.
Returns
-------
anno_genes: `pandas.DataFrame`
The genes and the particular tissues in which the genes are specifically expressed of each group.
"""
genes = [genes] if isinstance(genes, str) else genes
fbgn_names = (
symbol2fbgn(gene=genes, datapath=fbgn_path)
if gene_nametype is "symbol"
else genes
)
# Find the particular tissue in which the gene is specifically expressed
anno_genes = pd.DataFrame()
for fbgn_name in fbgn_names:
particular_tissues = gene2tissue(fbgn_name, stage, enrich_threshold)
if particular_tissues is not None:
anno_genes = pd.concat([anno_genes, particular_tissues], axis=0)
return anno_genes.astype(str)
|
651e3eb2ce58ae19d1785df1217b5434737b8bda
| 3,641,931
|
import torch
import tqdm
def _batch_embed(args, net, vecs: StringDataset, device, char_alphabet=None):
"""
char_alphabet[dict]: id to char
"""
# convert it into a raw string dataset
if char_alphabet != None:
vecs.to_bert_dataset(char_alphabet)
test_loader = torch.utils.data.DataLoader(vecs, batch_size=args.test_batch_size, shuffle=False, num_workers=4)
net.eval()
embedding = []
with tqdm.tqdm(total=len(test_loader), desc="# batch embedding") as p_bar:
for i, x in enumerate(test_loader):
p_bar.update(1)
if char_alphabet != None:
for xx in x:
xx = tokenizer(xx, return_tensors="pt")
# 1 x 768
xx = bert(**xx)[0][0][1].unsqueeze(0)
embedding.append(xx.cpu().data.numpy())
else:
embedding.append(net(x.to(device)).cpu().data.numpy())
vecs.to_original_dataset()
return np.concatenate(embedding, axis=0)
|
43224296330e4516c530d65217edf6a4b12dc5d3
| 3,641,932
|
def get_adjacency_matrix(distance_df, sensor_ids, normalized_k=0.1):
"""
:param distance_df: data frame with three columns: [from, to, distance].
:param sensor_ids: list of sensor ids.
:param normalized_k: entries that become lower than normalized_k after normalization are set to zero for sparsity.
:return: adjacency matrix
"""
num_sensors = len(sensor_ids)
dist_mx = np.zeros((num_sensors, num_sensors), dtype=np.float32)
dist_mx[:] = np.inf
# Builds sensor id to index map.
sensor_id_to_ind = {}
for i, sensor_id in enumerate(sensor_ids):
sensor_id_to_ind[sensor_id] = i
# Fills cells in the matrix with distances.
for row in distance_df.values:
if row[0] not in sensor_id_to_ind or row[1] not in sensor_id_to_ind:
continue
dist_mx[sensor_id_to_ind[row[0]], sensor_id_to_ind[row[1]]] = row[2]
# Calculates the standard deviation as theta.
distances = dist_mx[~np.isinf(dist_mx)].flatten()
std = distances.std()
adj_mx = np.exp(-np.square(dist_mx / std))
# Make the adjacent matrix symmetric by taking the max.
# adj_mx = np.maximum.reduce([adj_mx, adj_mx.T])
# Sets entries that lower than a threshold, i.e., k, to zero for sparsity.
adj_mx[adj_mx < normalized_k] = 0
return adj_mx
|
b8acd5401dbf743294d52d71ddc97a0b0c74780b
| 3,641,933
|
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
|
6e702d8c0d658bd822f0db6c4094ab093207b78e
| 3,641,934
|
import random
def rand_ascii_str(length):
"""Generates a random string of specified length, composed of ascii letters
and digits.
Args:
length: The number of characters in the string.
Returns:
The random string generated.
"""
letters = [random.choice(ascii_letters_and_digits) for _ in range(length)]
return ''.join(letters)
|
130e8dbe1eb8e60813b01cf08dc7e9fd388638cf
| 3,641,936
|
def set_achievement_disabled(aid, disabled):
"""
Updates a achievement's availability.
Args:
aid: the achievement's aid
disabled: whether or not the achievement should be disabled.
Returns:
The updated achievement object.
"""
return update_achievement(aid, {"disabled": disabled})
|
2793e3576904f5b1498361f02e32fdf2642b734c
| 3,641,937
|
from datetime import datetime
def get_block(in_dt: datetime):
"""Get the BlockNumber instance at or before the datetime timestamp."""
return BlockNumber.from_timestamp(in_dt.replace(tzinfo=timezone.utc).timestamp())
|
a77ca6c4021ebc0e5ef2e2c0294246415ccd9811
| 3,641,938
|
def get_channels(posts):
"""
<summary> Returns post channel (twitter/facebook)</summary>
<param name="posts" type="list"> List of posts </param>
<returns> String "twitter" or "facebook" </returns>
"""
channel = []
for i in range(0, len(posts['post_id'])):
if len(posts['post_text'][i]) <= 140:
channel.append("twitter")
else:
channel.append("facebook")
return channel
|
2bd67d13079ce115263ac46856d8a708f461cb7e
| 3,641,939
|
from bs4 import BeautifulSoup
def clean_text(text):
"""
text: a string
return: modified initial string
"""
text = BeautifulSoup(text, "lxml").text # HTML decoding
text = text.lower() # lowercase text
text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text
text = BAD_SYMBOLS_RE.sub('', text) # delete symbols which are in BAD_SYMBOLS_RE from text
text = ' '.join(word for word in text.split() if word not in STOPWORDS) # delete stopwords from text
return text
|
b29f4b388bac55d04c824ad014a6b85e1c9c8ede
| 3,641,940
|
def ConvertToTypeEnum(type_enum, airflow_executor_type):
"""Converts airflow executor type string to enum.
Args:
type_enum: AirflowExecutorTypeValueValuesEnum, executor type enum value.
airflow_executor_type: string, executor type string value.
Returns:
AirflowExecutorTypeValueValuesEnum: the executor type enum value.
"""
return type_enum(airflow_executor_type)
|
04162b04719031ba6b96d981a7ffe8a82691bc31
| 3,641,941
|
import numpy
def image2array(image):
"""PIL Image to NumPy array"""
assert image.mode in ('L', 'RGB', 'CMYK')
arr = numpy.fromstring(image.tostring(), numpy.uint8)
arr.shape = (image.size[1], image.size[0], len(image.getbands()))
return arr.swapaxes(0, 2).swapaxes(1, 2).astype(numpy.float32)
|
bb1ba38f2d27acb63ea7ccfb600720eee3d683a3
| 3,641,942
|
from typing import Type
def enum_name_callback(ctx: 'mypy.plugin.AttributeContext') -> Type:
"""This plugin refines the 'name' attribute in enums to act as if
they were declared to be final.
For example, the expression 'MyEnum.FOO.name' normally is inferred
to be of type 'str'.
This plugin will instead make the inferred type be a 'str' where the
last known value is 'Literal["FOO"]'. This means it would be legal to
use 'MyEnum.FOO.name' in contexts that expect a Literal type, just like
any other Final variable or attribute.
This plugin assumes that the provided context is an attribute access
matching one of the strings found in 'ENUM_NAME_ACCESS'.
"""
enum_field_name = _extract_underlying_field_name(ctx.type)
if enum_field_name is None:
return ctx.default_attr_type
else:
str_type = ctx.api.named_generic_type('builtins.str', [])
literal_type = LiteralType(enum_field_name, fallback=str_type)
return str_type.copy_modified(last_known_value=literal_type)
|
e7b34490625ad2c8cf55ed002592ed1194f96e2f
| 3,641,943
|
def is_forward_angle(n, theta):
"""
if a wave is traveling at angle theta from normal in a medium with index n,
calculate whether or not this is the forward-traveling wave (i.e., the one
going from front to back of the stack, like the incoming or outgoing waves,
but unlike the reflected wave). For real n & theta, the criterion is simply
-pi/2 < theta < pi/2, but for complex n & theta, it's more complicated.
See https://arxiv.org/abs/1603.02720 appendix D. If theta is the forward
angle, then (pi-theta) is the backward angle and vice-versa.
"""
assert n.real * n.imag >= 0, ("For materials with gain, it's ambiguous which "
"beam is incoming vs outgoing. See "
"https://arxiv.org/abs/1603.02720 Appendix C.\n"
"n: " + str(n) + " angle: " + str(theta))
ncostheta = n * cos(theta)
if abs(ncostheta.imag) > 100 * EPSILON:
# Either evanescent decay or lossy medium. Either way, the one that
# decays is the forward-moving wave
answer = (ncostheta.imag > 0)
else:
# Forward is the one with positive Poynting vector
# Poynting vector is Re[n cos(theta)] for s-polarization or
# Re[n cos(theta*)] for p-polarization, but it turns out they're consistent
# so I'll just assume s then check both below
answer = (ncostheta.real > 0)
# convert from numpy boolean to the normal Python boolean
answer = bool(answer)
# double-check the answer ... can't be too careful!
error_string = ("It's not clear which beam is incoming vs outgoing. Weird"
" index maybe?\n"
"n: " + str(n) + " angle: " + str(theta))
if answer is True:
assert ncostheta.imag > -100 * EPSILON, error_string
assert ncostheta.real > -100 * EPSILON, error_string
assert (n * cos(theta.conjugate())).real > -100 * EPSILON, error_string
else:
assert ncostheta.imag < 100 * EPSILON, error_string
assert ncostheta.real < 100 * EPSILON, error_string
assert (n * cos(theta.conjugate())).real < 100 * EPSILON, error_string
return answer
|
9d90a84be42968eebb1dd89285019ddc10a2b140
| 3,641,944
|
import math
def fit_cubic1(points,rotate,properties=None):
"""This function attempts to fit a given set of points to a cubic polynomial line: y = a3*x^3 + a2*x^2 + a1*x + a0"""
r=mathutils.Matrix.Rotation(math.radians(rotate),4,'Z')
rr=mathutils.Matrix.Rotation(math.radians(-rotate),4,'Z')
Sxy = 0
Sx = 0
Sy = 0
Sx2 = 0
Sx2y = 0
Sx3y = 0
Sx3 = 0
Sx4 = 0
Sx5 = 0
Sx6 = 0
Sw = 0
for p in points:
pr=p['point']*r
x = pr.x
y = pr.y
Sxy = Sxy + x*y * p['weight']
Sx = Sx + x * p['weight']
Sy = Sy + y * p['weight']
Sx2 = Sx2 + math.pow(x,2) * p['weight']
Sx2y = Sx2y+ math.pow(x,2)*y * p['weight']
Sx3y = Sx3y+ math.pow(x,3)*y * p['weight']
Sx3 = Sx3 + math.pow(x,3) * p['weight']
Sx4 = Sx4 + math.pow(x,4) * p['weight']
Sx5 = Sx5 + math.pow(x,5) * p['weight']
Sx6 = Sx6 + math.pow(x,6) * p['weight']
Sw += p['weight']
N = Sw
A=[[N, Sx, Sx2,Sx3,Sy], [Sx, Sx2, Sx3,Sx4,Sxy], [Sx2, Sx3, Sx4, Sx5,Sx2y], [Sx3, Sx4, Sx5, Sx6,Sx3y]]
xM=like_a_gauss(A)
a0=xM[0][4]
a1=xM[1][4]
a2=xM[2][4]
a3=xM[3][4]
def line_func(x,a):
return a[0] + a[1]*x + a[2]*math.pow(x,2) + a[3]*math.pow(x,3)
points=sort_index1(points,r)
return error_residual1(points,r,rr,properties,line_func,[a0,a1,a2,a3])
|
dea26481743546600bef54c58523746a638b63a5
| 3,641,946
|
def get_labelstats_df_list(fimage_list, flabel_list):
"""loop over lists of image and label files and
extract label statisics as pandas.DataFrame
"""
if np.ndim(fimage_list) == 0:
fimage_list = [fimage_list]
if np.ndim(flabel_list) == 0:
flabel_list = [flabel_list]
columns = ['imagefile', 'labelfile', 'label', 'mean', 'var', 'min', 'max',
'median', 'count', 'sum', 'boundingbox', 'voxels']
DF = pd.DataFrame(columns=columns)
for fimage in fimage_list:
for flabel in flabel_list:
df = get_labelstats_df(fimage, flabel)
df['imagefile'] = fimage
df['labelfile'] = flabel
DF = DF.append(df)
return DF
|
e494212975641e8c9f9b2d786077e320d9096c02
| 3,641,948
|
def index(web):
"""The web.request.params is a dictionary,
pointing to falcon.Request directly."""
name = web.request.params["name"]
return f"Hello {name}!\n"
|
b717ac60d42b8161ed27f7e4156d8a5a03aea803
| 3,641,949
|
def process_object(obj):
""" Recursively process object loaded from json
When the dict in appropriate(*) format is found,
make object from it.
(*) appropriate is defined in create_object function docstring.
"""
if isinstance(obj, list):
result_obj = []
for elem in obj:
result_obj.append(process_object(elem))
return result_obj
elif isinstance(obj, dict):
processed_obj = {}
for key in obj.keys():
processed_obj[key] = process_object(obj[key])
as_obj = obj.get(ObjSpecification.AS_OBJECT, False)
if as_obj:
result_obj = create_object(processed_obj)
else:
result_obj = processed_obj
return result_obj
else:
return obj
|
f5410c6168d96eb153f08c824a98cf58fd80e1a0
| 3,641,950
|
def remove_role(principal, role):
"""Removes role from passed principal.
**Parameters:**
principal
The principal (actor or group) from which the role is removed.
role
The role which is removed.
"""
try:
if isinstance(principal, Actor):
ppr = PrincipalRoleRelation.objects.get(
actor=principal, role=role, content_id=None, content_type=None)
else:
ppr = PrincipalRoleRelation.objects.get(
group=principal, role=role, content_id=None, content_type=None)
except PrincipalRoleRelation.DoesNotExist:
return False
else:
ppr.delete()
return True
|
78b27631ee80b42a2ee8759315ef00f490c0e86c
| 3,641,951
|
def set_var_input_validation(
prompt="",
predicate=lambda _: True,
failure_description="Value is illegal",
):
"""Validating user input by predicate.
Vars:
- prompt: message displayed when prompting for user input.
- predicate: lambda function to verify a condition.
- failure_description: message displayed when predicate's condition is not met.
Returns:
- The value entered by the user if predicate's condition is met and after confirmation by the user.
- If the predicate fails failure_description is displayed
- If literal_eval fails an error message containing the raised exception.
"""
while True:
try:
value = literal_eval(input(f"{Color.INFORMATION}{prompt}{Color.END}\n"))
if predicate(value):
a = literal_eval(
input(
f"{Color.INFORMATION}Is this correct: {value} ? enter 1 to confirm, 0 to retry{Color.END}\n"
)
)
if a == 1:
return value
else:
print(f"{Color.FAIL}{failure_description}{Color.END}")
except Exception as e:
print(f"{Color.FAIL}{e} was raised, try again{Color.END}")
|
40cad55c5d07a405f946e9583c69029eb2ee4e65
| 3,641,952
|
def mad(data, axis=None):
"""Mean absolute deviation"""
return np.mean(np.abs(data - np.mean(data, axis)), axis)
|
955763e5ee5d29e2b2b735d584d7a84b98affc23
| 3,641,953
|
from re import T
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pad the 2nd and 3rd dimensions of a 4D tensor
with "padding[0]" and "padding[1]" (resp.) zeros left and right.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
top_pad, bottom_pad = padding[0]
left_pad, right_pad = padding[1]
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
input_shape = x.shape
if data_format == 'channels_first':
output_shape = (input_shape[0],
input_shape[1],
input_shape[2] + top_pad + bottom_pad,
input_shape[3] + left_pad + right_pad)
output = T.zeros(output_shape)
indices = (slice(None),
slice(None),
slice(top_pad, input_shape[2] + top_pad),
slice(left_pad, input_shape[3] + left_pad))
else:
output_shape = (input_shape[0],
input_shape[1] + top_pad + bottom_pad,
input_shape[2] + left_pad + right_pad,
input_shape[3])
output = T.zeros(output_shape)
indices = (slice(None),
slice(top_pad, input_shape[1] + top_pad),
slice(left_pad, input_shape[2] + left_pad),
slice(None))
y = T.set_subtensor(output[indices], x)
y._keras_shape = output_shape
return y
|
ba57f1f8462d7c3212379b9678364c5e2e2e6a5b
| 3,641,954
|
def doms_hit_pass_threshold(mc_hits, threshold, pass_k40):
""" checks if there a at least <<threshold>> doms
hit by monte carlo hits. retuns true or false"""
if threshold == 0: return True
if len(mc_hits) == 0: return bool(pass_k40)
dom_id_set = set()
for hit in mc_hits:
dom_id = pmt_id_to_dom_id(hit.pmt_id)
dom_id_set.add(dom_id)
if len(dom_id_set) >= threshold:
return True
return False
|
9f4157d202e2587232b13cfc9873400ab6ee6e5b
| 3,641,955
|
def _parseLocalVariables(line):
"""Accepts a single line in Emacs local variable declaration format and
returns a dict of all the variables {name: value}.
Raises ValueError if 'line' is in the wrong format.
See http://www.gnu.org/software/emacs/manual/html_node/File-Variables.html
"""
paren = '-*-'
start = line.find(paren) + len(paren)
end = line.rfind(paren)
if start == -1 or end == -1:
raise ValueError("%r not a valid local variable declaration" % (line,))
items = line[start:end].split(';')
localVars = {}
for item in items:
if len(item.strip()) == 0:
continue
split = item.split(':')
if len(split) != 2:
raise ValueError("%r contains invalid declaration %r"
% (line, item))
localVars[split[0].strip()] = split[1].strip()
return localVars
|
39dc5130f47589e111e4b894cf293d446ac0eac0
| 3,641,957
|
from re import T
def NLL(mu, sigma, mixing, y):
"""Computes the mean of negative log likelihood for P(y|x)
y = T.matrix('y') # (minibatch_size, output_size)
mu = T.tensor3('mu') # (minibatch_size, output_size, n_components)
sigma = T.matrix('sigma') # (minibatch_size, n_components)
mixing = T.matrix('mixing') # (minibatch_size, n_components)
"""
# multivariate Gaussian
exponent = -0.5 * T.inv(sigma) * T.sum((y.dimshuffle(0,1,'x') - mu)**2, axis=1)
normalizer = (2 * np.pi * sigma)
exponent = exponent + T.log(mixing) - (y.shape[1]*.5)*T.log(normalizer)
max_exponent = T.max(exponent ,axis=1, keepdims=True)
mod_exponent = exponent - max_exponent
gauss_mix = T.sum(T.exp(mod_exponent),axis=1)
log_gauss = max_exponent + T.log(gauss_mix)
res = -T.mean(log_gauss)
return res
|
60a27f48d404af860cdbeac9e017a3df5ebca450
| 3,641,958
|
from typing import Optional
import sqlite3
def get_non_subscribed_trainers(user) -> Optional[str]:
"""
returns all trainers the user is not subscrbed to
"""
conn = get_db()
error = None
try:
trainers = conn.execute("""SELECT distinct u_name FROM user, trainer
where t_userID = u_userID
and u_trainer = 1
and u_name NOT IN (
SELECT u2.u_name FROM user u1, user u2, trainer, customer, subscription
where c_userID = u1.u_userID
and u1.u_name = ?
and su_customerID = c_customerID
and t_trainerID = su_trainerID
and u2.u_userID = t_userID
)
Order by u_name COLLATE NOCASE""", (user,)).fetchall()
# print(trainers)
close_db()
return trainers
except sqlite3.Error as error:
print(error)
return error
|
6c728e1bd805047e20224c45fab81f546e291d27
| 3,641,959
|
def __clean_datetime_value(datetime_string):
"""Given"""
if datetime_string is None:
return datetime_string
if isinstance(datetime_string, str):
x = datetime_string.replace("T", " ")
return x.replace("Z", "")
raise TypeError("Expected datetime_string to be of type string (or None)")
|
77afef31056365a47ea821de7a4979cb061920dc
| 3,641,960
|
def get_metric_by_name(metric: str, *args, **kwargs) -> Metric:
"""Returns metric using given `metric`, `args` and `kwargs`
Args:
metric (str): name of the metric
Returns:
Metric: requested metric as Metric
"""
assert metric in __metric_mapper__, "given metric {} is not found".format(metric)
return __metric_mapper__[metric](*args, **kwargs)
|
b1ecb1fe1fad330570abcf9abd3f12abd2a18193
| 3,641,961
|
def Square(inputs, **kwargs):
"""Calculate the square of input.
Parameters
----------
inputs : Tensor
The input tensor.
Returns
-------
Tensor
The square result.
"""
CheckInputs(inputs, 1)
arguments = ParseArguments(locals())
output = Tensor.CreateOperator(nout=1, op_type='Square', **arguments)
if inputs.shape is not None:
output.shape = inputs.shape[:]
return output
|
5869ab81460b0a3b56194ec9829bf6ec36716b9a
| 3,641,962
|
def render_table(data, col_width=3.0, row_height=0.625, font_size=14,
header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w',
bbox=[0, 0, 1, 1], header_columns=0,
ax=None, **kwargs):
"""[Taken from ref: https://stackoverflow.com/questions/19726663/how-to-save-the-pandas-dataframe-series-data-as-a-figure]
[Prints given dataframe in a nice format, that is easy to save]
Parameters
----------
data : [data frame]
[data frame]
col_width : float, optional
[column width], by default 3.0
row_height : float, optional
[row height], by default 0.625
font_size : int, optional
[font size], by default 14
header_color : str, optional
[header color], by default '#40466e'
row_colors : list, optional
[row color], by default ['#f1f1f2', 'w']
edge_color : str, optional
[edge color], by default 'w'
bbox : list, optional
[bbox ], by default [0, 0, 1, 1]
header_columns : int, optional
[header columns], by default 0
ax : [type], optional
[plotting table, by default None
Returns
-------
[object]
[figure]
"""
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
for k, cell in mpl_table._cells.items():
cell.set_edgecolor(edge_color)
if k[0] == 0 or k[1] < header_columns:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor(header_color)
else:
cell.set_facecolor(row_colors[k[0]%len(row_colors) ])
return ax.get_figure(), ax
|
597d8732ca86896d02d0df30d2a0808b88f02873
| 3,641,964
|
def segment_image(class_colours, pixel_classes, height, width, bg_alpha=0, fg_alpha=255):
"""visualise pixel classes"""
segment_colours = np.reshape(class_colours[pixel_classes], (height, width, 3))
segment_colours = segment_colours.astype("uint8")
img = Image.fromarray(segment_colours)
# set backgroud/unlabeled pixel alpha to 0.
# note to self: do with numpy
img = img.convert("RGBA")
arr = np.array(img)
arr = np.reshape(arr, (height*width, 4))
background = np.where(pixel_classes == 0)
arr[background, 3] = bg_alpha
background = np.where(pixel_classes > 0)
arr[background, 3] = fg_alpha
arr = np.reshape(arr, (height, width, 4))
return Image.fromarray(arr)
|
2dd1f70342cc101d3cf37d706ede8c55ded91629
| 3,641,965
|
def cross_replica_average(inputs,
num_shards=None,
num_shards_per_group=None,
physical_shape=None,
tile_shape=None,
use_spatial_partitioning=False):
"""Customized cross replica sum op."""
# if num_shards_per_group is defined, apply distributed batch norm.
group_assignment = None
if num_shards_per_group > 0:
if num_shards % num_shards_per_group != 0:
raise ValueError(
'num_shards: %d mod num_shards_per_group: %d, should be 0' %
(num_shards, num_shards_per_group))
num_groups = num_shards // num_shards_per_group
if physical_shape is not None and tile_shape is not None:
if use_spatial_partitioning:
group_assignment = spatial_partitioning_group_assignment(
physical_shape, tile_shape, num_groups)
else:
group_assignment = normal_group_assignment(physical_shape, tile_shape,
num_groups)
else:
group_assignment = [[ # pylint: disable=g-complex-comprehension
x for x in range(num_shards) if x // num_shards_per_group == y
] for y in range(num_groups)]
return tpu_ops.cross_replica_sum(inputs, group_assignment) / math_ops.cast(
num_shards_per_group, inputs.dtype)
|
5486eae34e2e25966343a5e115541de7c734ec98
| 3,641,966
|
def edge_naming(col_list, split_collections=True):
""" This function normalize the naming of edges collections
If split_collections is True an edge collection name will be
generated between each listed collection in order.
So if col_list = [A, B, C]
result will be [A__B, B__C]
:param col_list: ordered list of collection names
:return: an array of edge collection names
"""
result = []
name = ""
for v in col_list:
if name == "":
name = v
else:
name = name + EDGE_MARKER + v
if split_collections:
result.append(name)
name = v
if len(result) == 0:
result.append(name)
return result
|
47e0c7253a5a9f1c0df9488c54cca38b2264539f
| 3,641,968
|
def interactive_visual_difference_from_threshold_by_day(ds_ext):
"""
Returns: 1) xarray DataArray, with three variables, for each day in the dataset
i) Highest value difference from the threshold, across the area
ii) Lowest value difference from the threshold, across the area
iii) Average difference from the threshold, from all pixels in the area
: 2) bokeh pane for interactive visualization.
"""
result_type = ds_ext.attrs['result_type']
temp_var = 'tasmax' if result_type=='max' else 'tasmin'
diff_var = 'above_threshold' if result_type=='max' else 'below_threshold'
threshold_diff_high = ds_ext[diff_var].max(dim=['lat','lon'], skipna=True)
threshold_diff_low = ds_ext[diff_var].min(dim=['lat','lon'], skipna=True)
threshold_diff_avg = ds_ext[diff_var].mean(dim=['lat','lon'], skipna=True)
dt_index = threshold_diff_high.indexes['time'].to_datetimeindex()
difference_from_threshold = xr.Dataset(data_vars = {'threshold_diff_high':(['time'],threshold_diff_high.to_numpy())
, 'threshold_diff_low':(['time'],threshold_diff_low.to_numpy())
, 'threshold_diff_avg':(['time'],threshold_diff_avg.to_numpy())}
, coords=dict(time=dt_index))
difference_from_threshold_plot = difference_from_threshold.hvplot(y=['threshold_diff_low','threshold_diff_high','threshold_diff_avg']
, value_label='difference_from_threshold'
, alpha=0.7)
pane = pn.panel(difference_from_threshold_plot)
return difference_from_threshold, pane
|
2386f3564a105a44ff3cd12979f52e296e8293ac
| 3,641,969
|
def _parse_compression_method(data):
"""Parses the value of "method" extension parameter."""
return common.parse_extensions(data)
|
71901780aec98d8818a5296aa7b186c79b0f0e7b
| 3,641,970
|
def my_distance(drij):
"""
Compute length of displacement vector drij
assume drij already accounts for PBC
Args:
drij (np.array) : vector(s) of length 3
Returns:
float: length (distance) of vector(s)
"""
return np.linalg.norm(drij, axis=0)
|
45a29d7335e72dac68ddefcbf32b55b57e87b980
| 3,641,971
|
from typing import List
def show(list_id):
"""Get single list via id."""
data = db_session.query(List).filter(List.id == list_id).first()
if '/json' in request.path:
return jsonify(data.as_dict())
else:
return render_template('list/show.html', list=data)
|
9ed8de0dfc5621546dafe447fa83d33c00fa6b7e
| 3,641,972
|
def extract_smaps(kspace, low_freq_percentage=8, background_thresh=4e-6):
"""Extract raw sensitivity maps for kspaces
This function will first select a low frequency region in all the kspaces,
then Fourier invert it, and finally perform a normalisation by the root
sum-of-square.
kspace has to be of shape: nslices x ncoils x height x width
Arguments:
kspace (tf.Tensor): the kspace whose sensitivity maps you want extracted.
low_freq_percentage (int): the low frequency region to consider for
sensitivity maps extraction, given as a percentage of the width of
the kspace. In fastMRI, it's 8 for an acceleration factor of 4, and
4 for an acceleration factor of 8. Defaults to 8.
background_thresh (float): unused for now, will later allow to have
thresholded sensitivity maps.
Returns:
tf.Tensor: extracted raw sensitivity maps.
"""
n_slices = tf.shape(kspace)[0]
if n_slices > 0:
n_low_freq = tf.cast(tf.shape(kspace)[-2:] * low_freq_percentage / 100, tf.int32)
center_dimension = tf.cast(tf.shape(kspace)[-2:] / 2, tf.int32)
low_freq_lower_locations = center_dimension - tf.cast(n_low_freq / 2, tf.int32)
low_freq_upper_locations = center_dimension + tf.cast(n_low_freq / 2, tf.int32)
###
# NOTE: the following stands for in numpy:
# low_freq_mask = np.zeros_like(kspace)
# low_freq_mask[
# ...,
# low_freq_lower_locations[0]:low_freq_upper_locations[0],
# low_freq_lower_locations[1]:low_freq_upper_locations[1]
# ] = 1
x_range = tf.range(low_freq_lower_locations[0], low_freq_upper_locations[0])
y_range = tf.range(low_freq_lower_locations[1], low_freq_upper_locations[1])
X_range, Y_range = tf.meshgrid(x_range, y_range)
X_range = tf.reshape(X_range, (-1,))
Y_range = tf.reshape(Y_range, (-1,))
low_freq_mask_indices = tf.stack([X_range, Y_range], axis=-1)
# we have to transpose because only the first dimension can be indexed in
# scatter_nd
scatter_nd_perm = [2, 3, 0, 1]
low_freq_mask = tf.scatter_nd(
indices=low_freq_mask_indices,
updates=tf.ones([
tf.size(X_range),
tf.shape(kspace)[0],
tf.shape(kspace)[1]],
),
shape=[tf.shape(kspace)[i] for i in scatter_nd_perm],
)
low_freq_mask = tf.transpose(low_freq_mask, perm=scatter_nd_perm)
###
low_freq_kspace = kspace * tf.cast(low_freq_mask, kspace.dtype)
coil_image_low_freq = tf_ortho_ifft2d(low_freq_kspace)
# no need to norm this since they all have the same norm
low_freq_rss = tf.norm(coil_image_low_freq, axis=1)
coil_smap = coil_image_low_freq / low_freq_rss[:, None]
# for now we do not perform background removal based on low_freq_rss
# could be done with 1D k-means or fixed background_thresh, with tf.where
else:
coil_smap = tf.zeros_like(kspace, dtype=kspace.dtype)
return coil_smap
|
e40ec21c8e353e6352b65f35710d83857cc6c124
| 3,641,974
|
import io
def tiff_to_mat_conversion(ms_path, pan_path, save_path, ms_initial_point=(0, 0), ms_final_point=(0, 0), ratio=4):
"""
Generation of *.mat file, starting from the native GeoTiFF extension.
Also, a crop tool is provided to analyze only small parts of the image.
Parameters
----------
ms_path : str
The path of the Multi-Spectral image
pan_path : str
The path of the Panchromatic file
save_path : str
The destination mat file
ms_initial_point : tuple
Upper left point for image cropping. The point must be expressed in pixel coordinates,
as (x,y), where (0,0) is precisely the point at the top left.
ms_final_point : tuple
Bottom right point for image cropping. The point must be expressed in pixel coordinates,
as (x,y), where (0,0) is precisely the point at the top left.
ratio : int
The resolution scale which elapses between MS and PAN.
Return
------
I_in : Dictionary
The dictionary, composed of MS and Pan images.
"""
ms = gdal.Open(ms_path)
ms = ms.ReadAsArray()
ms = np.moveaxis(ms, 0, -1)
pan = gdal.Open(pan_path)
pan = pan.ReadAsArray()
if ms_final_point[0] != 0 and ms_final_point[1] != 0:
ms = ms[ms_initial_point[1]:ms_final_point[1], ms_initial_point[0]:ms_final_point[0], :]
pan = pan[ms_initial_point[1] * ratio:ms_final_point[1] * ratio,
ms_initial_point[0] * ratio:ms_final_point[0] * ratio]
io.savemat(save_path, {'I_MS_LR': ms, 'I_PAN': pan})
I_in = {'I_MS_LR': ms, 'I_PAN': pan}
return I_in
|
c0e1a03f82b97ecc8a3e33a1b84624b53c3efae7
| 3,641,975
|
def is_member(musicians, musician_name):
"""Return true if named musician is in musician list;
otherwise return false.
Parameters:
musicians (list): list of musicians and their instruments
musician_name (str): musician name
Returns:
bool: True if match is made; otherwise False.
"""
i = 0 # counter
while i < len(musicians): # guard against infinite loop
musician = musicians[i].split(', ')[0].lower()
if musician_name.lower() == musician:
return True # preferable to break statement
i += 1 # MUST INCREMENT
return False
|
6ef5b9bbccb17d9b97a85e3af7789e059829184b
| 3,641,976
|
def init(command):
"""
We assume the first command from NASA is the rover
position. Assumetions are bad. We know that the
command conists of two numbers seperated by a space.
Parse the number so it matches D D
"""
if re.match('^[0-9]\s[0-9]\s[a-zA-Z]$', command):
pos = command.split(" ");
position['x'] = pos[0]
position['y'] = pos[1]
position['heading'] = pos[2]
print position
return position
return False
|
282bb4bababeb24e0fe8cb1f6f4a57d36b2339dd
| 3,641,978
|
def add_small_gap_multiply(original_wf, gap_cutoff, density_multiplier, fw_name_constraint=None):
"""
In all FWs with specified name constraints, add a 'small_gap_multiply' parameter that
multiplies the k-mesh density of compounds with gap < gap_cutoff by density multiplier.
Useful for increasing the k-point mesh for metallic or small gap systems.
Note that this powerup only works on FireWorks with the appropriate WriteVasp* tasks that
accept the small_gap_multiply argument...
Args:
original_wf (Workflow)
gap_cutoff (float): Only multiply k-points for materials with gap < gap_cutoff (eV)
density_multiplier (float): Multiply k-point density by this amount
fw_name_constraint (str): Only apply changes to FWs where fw_name contains this substring.
Returns:
Workflow
"""
for idx_fw, idx_t in get_fws_and_tasks(original_wf, fw_name_constraint=fw_name_constraint,
task_name_constraint="WriteVasp"):
original_wf.fws[idx_fw].tasks[idx_t]["small_gap_multiply"] = [gap_cutoff, density_multiplier]
return original_wf
|
4573ee33b1be21adfcca1e8be3337b1df51ab737
| 3,641,979
|
def _get_backend(config_backend):
"""Extract the backend class from the command line arguments."""
if config_backend == 'gatttool':
backend = GatttoolBackend
elif config_backend == 'bluepy':
backend = BluepyBackend
elif config_backend == 'pygatt':
backend = PygattBackend
else:
raise Exception('unknown backend: {}'.format(config_backend))
return backend
|
86374b3c19cbb3fa26434c18720288edf1c4fbe8
| 3,641,980
|
from time import time
def kern_CUDA_sparse(nsteps,
dX,
rho_inv,
context,
phi,
grid_idcs,
mu_egrid=None,
mu_dEdX=None,
mu_lidx_nsp=None,
prog_bar=None):
"""`NVIDIA CUDA cuSPARSE <https://developer.nvidia.com/cusparse>`_ implementation
of forward-euler integration.
Function requires a working :mod:`accelerate` installation.
Args:
nsteps (int): number of integration steps
dX (numpy.array[nsteps]): vector of step-sizes :math:`\\Delta X_i` in g/cm**2
rho_inv (numpy.array[nsteps]): vector of density values :math:`\\frac{1}{\\rho(X_i)}`
int_m (numpy.array): interaction matrix :eq:`int_matrix` in dense or sparse representation
dec_m (numpy.array): decay matrix :eq:`dec_matrix` in dense or sparse representation
phi (numpy.array): initial state vector :math:`\\Phi(X_0)`
prog_bar (object,optional): handle to :class:`ProgressBar` object
Returns:
numpy.array: state vector :math:`\\Phi(X_{nsteps})` after integration
"""
c = context
c.set_phi(phi)
enmuloss = config['enable_muon_energy_loss']
de = mu_egrid.size
mu_egrid = mu_egrid.astype(c.fl_pr)
muloss_min_step = config['muon_energy_loss_min_step']
lidx, nmuspec = mu_lidx_nsp
# Accumulate at least a few g/cm2 for energy loss steps
# to avoid numerical errors
dXaccum = 0.
grid_step = 0
grid_sol = []
start = time()
for step in xrange(nsteps):
if prog_bar and (step % 5 == 0):
prog_bar.update(step)
c.do_step(rho_inv[step], dX[step])
dXaccum += dX[step]
if enmuloss and (dXaccum > muloss_min_step or step == nsteps - 1):
# Download current solution vector to host
phc = c.get_phi()
for nsp in xrange(nmuspec):
phc[lidx + de * nsp:lidx + de * (nsp + 1)] = np.interp(
mu_egrid, mu_egrid + mu_dEdX * dXaccum,
phc[lidx + de * nsp:lidx + de * (nsp + 1)])
# Upload changed vector back..
c.set_phi(phc)
dXaccum = 0.
if (grid_idcs and grid_step < len(grid_idcs) and
grid_idcs[grid_step] == step):
grid_sol.append(c.get_phi())
grid_step += 1
if dbg:
print "Performance: {0:6.2f}ms/iteration".format(
1e3 * (time() - start) / float(nsteps))
return c.get_phi(), grid_sol
|
74b8d93f867fed536b3ba1f1f4f0fc6e33e3efe8
| 3,641,981
|
def _to_sequence(x):
"""shape batch of images for input into GPT2 model"""
x = x.view(x.shape[0], -1) # flatten images into sequences
x = x.transpose(0, 1).contiguous() # to shape [seq len, batch]
return x
|
bb3b0bb478c924b520bf7bf991a028cf8aaea25f
| 3,641,982
|
def read_wav_kaldi(wav_file_path: str) -> WaveData:
"""Read a given wave file to a Kaldi readable format.
Args:
wav_file_path: Path to a .wav file.
Returns:
wd: A Kaldi-readable WaveData object.
"""
# Read in as np array not memmap.
fs, wav = wavfile.read(wav_file_path, False)
wd = read_wav_kaldi_internal(wav, fs)
return wd
|
50b2f5a848d387d4b1ca2764b7d30178d7ec5e28
| 3,641,983
|
def _testCheckSums(tableDirectory):
"""
>>> data = "0" * 44
>>> checkSum = calcTableChecksum("test", data)
>>> test = [
... dict(data=data, checkSum=checkSum, tag="test")
... ]
>>> bool(_testCheckSums(test))
False
>>> test = [
... dict(data=data, checkSum=checkSum+1, tag="test")
... ]
>>> bool(_testCheckSums(test))
True
"""
errors = []
for entry in tableDirectory:
tag = entry["tag"]
checkSum = entry["checkSum"]
data = entry["data"]
shouldBe = calcTableChecksum(tag, data)
if checkSum != shouldBe:
errors.append("Invalid checksum for the %s table." % tag)
return errors
|
192785ca352eec4686e07f2f103283f6499b656f
| 3,641,984
|
def read_img(path):
"""
读取图片,并将其转换为邻接矩阵
"""
# 对于彩色照片,只使用其中一个维度的色彩
im = sp.misc.imread(path)[:, :, 2]
im = im / 255.
# 若运算速度太慢,可使用如下的语句来缩减图片的大小
# im = sp.misc.imresize(im, 0.10) / 255.
# 计算图片的梯度,既相邻像素点之差
graph = image.img_to_graph(im)
beta = 20
# 计算邻接矩阵
graph.data = np.exp(-beta * graph.data / graph.data.std())
return im, graph
|
41e4d000f9a70a6b6e787c08bcced4083178da11
| 3,641,985
|
def createmarker(name=None, source='default', mtype=None,
size=None, color=None, priority=None,
viewport=None, worldcoordinate=None,
x=None, y=None, projection=None):
"""%s
:param name: Name of created object
:type name: `str`_
:param source: A marker, or string name of a marker
:type source: `str`_
:param mtype: Specifies the type of marker, i.e. "dot", "circle"
:type mtype: `str`_
:param size:
:type size: `int`_
:param color: A color name from the `X11 Color Names list <https://en.wikipedia.org/wiki/X11_color_names>`_,
or an integer value from 0-255, or an RGB/RGBA tuple/list (e.g. (0,100,0), (100,100,0,50))
:type color: `str`_ or int
:param priority: The layer on which the marker will be drawn.
:type priority: `int`_
:param viewport: 4 floats between 0 and 1 which specify the area that X/Y values are mapped to inside of the canvas.
:type viewport: `list`_
:param worldcoordinate: List of 4 floats (xmin, xmax, ymin, ymax)
:type worldcoordinate: `list`_
:param x: List of lists of x coordinates. Values must be between worldcoordinate[0] and worldcoordinate[1].
:type x: `list`_
:param y: List of lists of y coordinates. Values must be between worldcoordinate[2] and worldcoordinate[3].
:type y: `list`_
:returns: A secondary marker method
:rtype: vcs.marker.Tm
"""
name, source = check_name_source(name, source, 'marker')
mrk = marker.Tm(name, source)
if (mtype is not None):
mrk.type = mtype
if (size is not None):
mrk.size = size
if (color is not None):
mrk.color = color
if (priority is not None):
mrk.priority = priority
if (viewport is not None):
mrk.viewport = viewport
if (worldcoordinate is not None):
mrk.worldcoordinate = worldcoordinate
if (x is not None):
mrk.x = x
if (y is not None):
mrk.y = y
if (projection is not None):
mrk.projection = projection
return mrk
|
7afaec1d24aad89b85974c509cf7c0ed165f733b
| 3,641,986
|
from typing import List
from typing import Optional
from typing import Set
from typing import Callable
def get_parser(disable: List[str] = None ,
lang: str = 'en',
merge_terms: Optional[Set] = None,
max_sent_len: Optional[int] = None) -> Callable:
"""spaCy clinical text parser
Parameters
----------
disable
lang
merge_terms
max_sent_len
Returns
-------
"""
disable = ["ner", "parser", "tagger", "lemmatizer"] if not disable \
else disable
merge_terms = {} if not merge_terms else merge_terms
nlp = spacy.load(lang, disable=disable)
nlp.tokenizer = ct_tokenizer(nlp)
sbd_func = partial(ct_sbd_rules,
merge_terms=merge_terms,
max_sent_len=max_sent_len)
sbd = SentenceSegmenter(nlp.vocab, strategy=sbd_func)
nlp.add_pipe(sbd)
return nlp
|
175c4ea51417dc02b4a7f6f5a0c512464bd252c2
| 3,641,987
|
def block_device_mapping_get_all_by_instance(context, instance_uuid,
use_slave=False):
"""Get all block device mapping belonging to an instance."""
return IMPL.block_device_mapping_get_all_by_instance(context,
instance_uuid,
use_slave)
|
16fc00068ad87d76831044d626a88960a0278817
| 3,641,988
|
def add_rain(img, slant, drop_length, drop_width, drop_color, blur_value, brightness_coefficient, rain_drops):
"""
From https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library
Args:
img (np.uint8):
slant (int):
drop_length:
drop_width:
drop_color:
blur_value (int): rainy view are blurry
brightness_coefficient (float): rainy days are usually shady
rain_drops:
Returns:
"""
non_rgb_warning(img)
input_dtype = img.dtype
needs_float = False
if input_dtype == np.float32:
img = from_float(img, dtype=np.dtype("uint8"))
needs_float = True
elif input_dtype not in (np.uint8, np.float32):
raise ValueError("Unexpected dtype {} for RandomSnow augmentation".format(input_dtype))
image = img.copy()
for (rain_drop_x0, rain_drop_y0) in rain_drops:
rain_drop_x1 = rain_drop_x0 + slant
rain_drop_y1 = rain_drop_y0 + drop_length
cv2.line(image, (rain_drop_x0, rain_drop_y0), (rain_drop_x1, rain_drop_y1), drop_color, drop_width)
image = cv2.blur(image, (blur_value, blur_value)) # rainy view are blurry
image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS).astype(np.float32)
image_hls[:, :, 1] *= brightness_coefficient
image_rgb = cv2.cvtColor(image_hls.astype(np.uint8), cv2.COLOR_HLS2RGB)
if needs_float:
image_rgb = to_float(image_rgb, max_value=255)
return image_rgb
|
f864a46fc13e955baade855ae454249e3a514a03
| 3,641,989
|
def vrctst_tml(file_name):
""" adds vrctst_tml extension, if missing
:param file_name: name of file
:type file_name: str
:returns: file with extension added
:rtype: str
"""
return _add_extension(file_name, Extension.VRC_TML)
|
92a084fc19c39c0797d573e01c90da28c0e1c11c
| 3,641,990
|
def get_pixel_coords(x, y, xres, yres, xmin, ymax):
"""
Translate x, y coordinates to cols, rows.
Example:
col, row = map_pixel(x, y, geotransform[1],
geotransform[-1], geotransform[0], geotransform[3])
Parameters
----------
x : float, numpy.ndarray
X coordinates.
y : float, numpy.ndarray
Y coordinates.
xres : float
X resolution.
yres : float
Y resolution.
Returns
-------
col : int, numpy.ndarray
Column coordinates.
row : int, numpy.ndarray
Row coordinates.
"""
col = np.around((x - xmin) / xres).astype(int)
row = np.around((y - ymax) / yres).astype(int)
return col, row
|
ccf38618de9d24279ab4df6ba016804c6da926f7
| 3,641,991
|
import re
def _boundary_of_alternatives_indices(pattern):
"""
Determines the location of a set of alternatives in a glob pattern.
Alternatives are defined by a matching set of non-bracketed parentheses.
:param pattern: Glob pattern with wildcards.
:return: Indices of the innermost set of matching non-bracketed
parentheses in a tuple. The Index of a missing parenthesis
will be passed as None.
"""
# Taking the leftmost closing parenthesis and the rightmost opening
# parenthesis left of it ensures that the parentheses belong together and
# the pattern is parsed correctly from the most nested section outwards.
end_pos = None
for match in re.finditer('\\)', pattern):
if not _position_is_bracketed(pattern, match.start()):
end_pos = match.start()
break # Break to get leftmost.
start_pos = None
for match in re.finditer('\\(', pattern[:end_pos]):
if not _position_is_bracketed(pattern, match.start()):
start_pos = match.end()
# No break to get rightmost.
return start_pos, end_pos
|
707a4a02a362019db63277b01955bb54d31e51e7
| 3,641,992
|
def sim_DA_from_timestamps2_p2_2states(timestamps, dt_ref, k_D, R0, R_mean,
R_sigma, tau_relax, k_s, rg,
chunk_size=1000, alpha=0.05, ndt=10):
"""
2-states recoloring using CDF in dt and with random number caching
"""
dt = np.array([dt_ref] * 2, dtype=np.float64)
for state in [0, 1]:
if tau_relax[state] < ndt * dt[state]:
dt[state] = tau_relax[state] / ndt
print(f'WARNING: Reducing dt[{state}] to {dt[state]:g} '
f'[tau_relax[{state}] = {tau_relax[state]}]')
# Array flagging photons as A (1) or D (0) emitted
A_ph = np.zeros(timestamps.size, dtype=np.uint8)
# Instantaneous D-A distance at D de-excitation time
R_ph = np.zeros(timestamps.size, dtype=np.float64)
# Time of D de-excitation relative to the last timestamp
T_ph = np.zeros(timestamps.size, dtype=np.float64)
# State for each photon
S_ph = np.zeros(timestamps.size, dtype=np.uint8)
peq = [k_s[1] / (k_s[0] + k_s[1]),
k_s[0] / (k_s[0] + k_s[1])]
k_s_sum = np.sum(k_s)
t0 = 0
nanotime = 0
state = 0 # the two states are 0 and 1
R = rg.randn() * R_sigma[state] + R_mean[state]
iN = chunk_size - 1 # value to get the first chunk of random numbers
for iph, t in enumerate(timestamps):
# each cycle starts with a new photon timestamp `t`
# excitation time is `t`, emission time is `t + nanotime`
delta_t0 = t - t0
delta_t = delta_t0 - nanotime
if delta_t < 0:
# avoid negative delta_t possible when when two photons have
# the same macrotime
delta_t = 0
t = t0
p_state = (1 - peq[state]) * np.exp(-(delta_t0 * k_s_sum)) + peq[state]
u = rg.rand()
#print(f'iph={iph}, state={state}, p_state={p_state}, u={u}, delta_t0={delta_t0}')
# Inversion of u is for compatibility with N-state version
if state == 1:
u = 1 - u
if p_state <= u:
#print(' * state change')
state = 0 if state == 1 else 1
R = rg.randn() * R_sigma[state] + R_mean[state]
# Compute the D-A distance at the "excitation time"
iN += 1
if iN == chunk_size:
Na = memoryview(rg.randn(chunk_size))
Pa = memoryview(rg.rand(chunk_size))
iN = 0
N = Na[iN]
p = Pa[iN]
R = ou_single_step_cy(R, delta_t, N, R_mean[state], R_sigma[state],
tau_relax[state])
nanotime = 0
# loop through D-A diffusion steps with a fixed time-step dt
# until D de-excitation by photon emission or energy transfer to A
while True:
k_ET = k_D * (R0 / R)**6
k_emission = k_ET + k_D
d_prob_ph_em = k_emission * dt[state] # prob. of emission in dt
if d_prob_ph_em > alpha:
d_prob_ph_em = 1 - exp(-d_prob_ph_em)
if d_prob_ph_em >= p:
break # break out of the loop when the photon is emitted
nanotime += dt[state]
iN += 1
if iN == chunk_size:
Na = memoryview(rg.randn(chunk_size))
Pa = memoryview(rg.rand(chunk_size))
iN = 0
N = Na[iN]
p = Pa[iN]
R = ou_single_step_cy(R, dt[state], N, R_mean[state], R_sigma[state],
tau_relax[state])
# photon emitted, let's decide if it is from D or A
p_DA = p / d_prob_ph_em # equivalent to rand(), but faster
prob_A_em = k_ET / k_emission
if prob_A_em >= p_DA:
A_ph[iph] = 1
# time of D de-excitation by photon emission or energy transfer to A
t0 = t
# save D-A distance at emission time
R_ph[iph] = R
# save time of emission relative to the excitation time `t`
T_ph[iph] = nanotime
# Save state for current photon
S_ph[iph] = state
return A_ph, R_ph, T_ph, S_ph
|
c268e49d7fc960c3f684d77fcbb4880be446a5b0
| 3,641,995
|
def get_cross_track_error(data, rate, velocity):
"""Returns the final cross-track position (in nautical miles)
The algorithm simulates an aircraft traveling on a straight trajectory who
turns according to the data provided. The aircraft instantaneously updates
its heading at each timestep by Ω * Δt.
.. warning: This code assumes that the magnitude of the rotations in data
is small in order to use a paraxial approximation sin(\theta) = \theta.
This paraxial approximation speeds up the algorithm, which is important
if cross track error simulations will occur hundreds of times.
This can be used in conjunction with `simulate_fog_single`. In order to
simulate a transpacific flight and estimate the cross-track error for a
single run, one could run:
>>> rate = 1 # Hz
>>> data = simulate_fog_single(rate=rate, hours=10, arw=.0413,
... drift=.944, correlation_time=3600)
>>> xtk = get_cross_track_error(data, rate, 900)
Parameters
----------
data: ndarray.float
An array of rotation rates, in deg/h
rate: float
The sampling rate of data in Hz
velocity: float
The velocity of the simulated aircraft in kph
Returns
-------
float
The cross track error from this FOG signal.
"""
Δθ = data * np.pi/180/3600/rate # radians
heading = np.cumsum(Δθ)
Δy = velocity * 1000 / 3600 / rate * heading # m
xtk = np.cumsum(Δy) / 1852 # nmi
return xtk
|
88fa42408090c94c9e3212517d89ba47afeedccb
| 3,641,996
|
async def get_list_address():
"""Get list of address
"""
return await service.address_s.all()
|
4d0304e23243d7f151c0f09fc5e1896413daee1b
| 3,641,997
|
import pathlib
from typing import Optional
def load_xml_stream(
file_path: pathlib.Path, progress_message: Optional[str] = None
) -> progress.ItemProgressStream:
"""Load an iterable xml file with a progress bar."""
all_posts = ElementTree.parse(file_path).getroot()
return progress.ItemProgressStream(
all_posts, len(all_posts), prefix=" ", message=progress_message,
)
|
26d6bd350dbe913855479c557c8881b03f90b266
| 3,641,998
|
def SNR_band(cp, ccont, cb, iband, itime=10.):
"""
Calc the exposure time necessary to get a given S/N on a molecular band
following Eqn 7 from Robinson et al. 2016.
Parameters
----------
cp :
Planet count rate
ccont :
Continuum count rate
cb :
Background count rate
iband :
Indicies of molecular band
itime :
Integration time [hours]
Returns
-------
snr : float
SNR to detect band given exposure time
"""
denominator = np.power(np.sum(cp[iband] + 2.*cb[iband]), 0.5)
numerator = np.sum(np.fabs(ccont - cp[iband]))
return np.power(itime*3600., 0.5) * numerator / denominator
|
ffec688d6e760ace0fc1e38217ec91dc674ee83f
| 3,641,999
|
def create_index(conn, column_list, table='perfdata', unique=False):
"""Creates one index on a list of/one database column/s.
"""
table = base2.filter_str(table)
index_name = u'idx_{}'.format(base2.md5sum(table + column_list))
c = conn.cursor()
if unique:
sql = u'CREATE UNIQUE INDEX IF NOT EXISTS {} ON "{}" ({});'.format(
index_name, table, column_list
)
else:
sql = u'CREATE INDEX IF NOT EXISTS {} ON "{}" ({});'.format(
index_name, table, column_list
)
try:
c.execute(sql)
except Exception as e:
return(False, u'Query failed: {}, Error: {}'.format(sql, e))
return (True, True)
|
38b71cededb8500452cd3eac53609cc4ee384566
| 3,642,001
|
from typing import Type
from typing import Any
import inspect
def produce_hash(self: Type[PCell], extra: Any = None) -> str:
"""Produces a hash of a PCell instance based on:
1. the source code of the class and its bases.
2. the non-default parameter with which the pcell method is called
3. the name of the pcell
"""
# copy source code of class and all its ancestors
source_code = "".join(
[inspect.getsource(klass) for klass in self.__class__.__mro__ if issubclass(klass, PCell)]
)
diff_params = dict(self.params)
# str(diff_params) calls __repr__ in inner values, instead of __str__ ()
# therefore it would fail for instances without readable __repr__ methods
str_diff_params = "{%s}" % ", ".join("%r: %s" % p for p in diff_params.items())
long_hash_pcell = sha256(
(source_code + str_diff_params + self.name + str(extra)).encode()
).hexdigest()
short_hash_pcell = long_hash_pcell[0:7]
return short_hash_pcell
|
859f51600a9cc4da8ba546afebf0ecdf20959ec8
| 3,642,002
|
def boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar):
"""
Args:
boxes3d_fakelidar: (N, 7) [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
w, l, h, r = boxes3d_lidar[:, 3:4], boxes3d_lidar[:, 4:5], boxes3d_lidar[:, 5:6], boxes3d_lidar[:, 6:7]
boxes3d_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([boxes3d_lidar[:, 0:3], l, w, h, -(r + np.pi / 2)], axis=-1)
|
139a3da3ac8e6c09a376d976d0e96a8d91f9a74a
| 3,642,003
|
def combine_per_choice(*args):
"""
Combines two or more per-choice analytics results into one.
"""
args = list(args)
result = args.pop()
new_weight = None
new_averages = None
while args:
other = args.pop()
for key in other:
if key not in result:
result[key] = other[key]
else:
old_weight, old_averages = result[key]
other_weight, other_averages = other[key]
if (
new_averages
and set(old_averages.keys()) != set(new_averages.keys())
):
raise ValueError(
"Can't combine per-choice results which used different sets of "
"player models."
)
new_weight = old_weight + other_weight
new_averages = {}
for pmn in old_averages:
new_averages[pmn] = (
old_averages[pmn] * old_weight
+ other_averages[pmn] * other_weight
) / new_weight
result[key] = (new_weight, new_averages)
return result
|
63e482a60b521744c94d80b0b8a740ff74f4b197
| 3,642,004
|
import string
def prepare_input(dirty: str) -> str:
"""
Prepare the plaintext by up-casing it
and separating repeated letters with X's
"""
dirty = "".join([c.upper() for c in dirty if c in string.ascii_letters])
clean = ""
if len(dirty) < 2:
return dirty
for i in range(len(dirty) - 1):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(clean) & 1:
clean += "X"
return clean
|
5c55ba770e024b459d483fd168978437b8d48c21
| 3,642,005
|
def calc_center_from_box(box_array):
"""calculate center point of boxes
Args:
box_array (array): N*4 [left_top_x, left_top_y, right_bottom_x, right_bottom_y]
Returns:
array N*2: center points array [x, y]
"""
center_array=[]
for box in box_array:
center_array.append([(box[0]+box[2])//2, (box[1]+box[3])//2])
return np.array(center_array)
|
1f713a2f6900678ad1760ea60456bbf44b9f06af
| 3,642,006
|
import scipy
def getW3D(coords):
"""
#################################################################
The calculation of 3-D Wiener index based
gemetrical distance matrix optimized
by MOPAC(Not including Hs)
-->W3D
#################################################################
"""
temp = []
for i in coords:
if i[3] != 'H':
temp.append([float(i[0]), float(i[1]), float(i[2])])
DistanceMatrix = GetGementricalDistanceMatrix(temp)
return scipy.sum(DistanceMatrix) / 2.0
|
25fb1596b0d818d7f7f120289a79ccbf9b4f1ae4
| 3,642,007
|
from typing import Any
from typing import Sequence
def state_vectors(
draw: Any,
max_num_qudits: int = 3,
allowed_bases: Sequence[int] = (2, 3),
min_num_qudits: int = 1,
) -> StateVector:
"""Hypothesis strategy for generating `StateVector`'s."""
num_qudits, radixes = draw(
num_qudits_and_radixes(
max_num_qudits, allowed_bases, min_num_qudits,
),
)
return StateVector.random(num_qudits, radixes)
|
b81059843f94cb1c36581ac0a7dd5d3c17f39839
| 3,642,008
|
def ProcessChainsAndLigandsOptionsInfo(ChainsAndLigandsInfo, ChainsOptionName, ChainsOptionValue, LigandsOptionName = None, LigandsOptionValue = None):
"""Process specified chain and ligand IDs using command line options.
Arguments:
ChainsAndLigandsInfo (dict): A dictionary containing information
existing chains and ligands.
ChainsOptionName (str): Name of command line chains option.
ChainsOptionValue (str): Value for command line chains option.
LigandsOptionName (str): Name of command line ligands option.
LigandsOptionValue (str): Value for command line ligands option.
Returns:
dict: A dictionary containing list of chain identifiers and dictionaries
of chains containing lists of ligand names for each chain.
Examples:
ChainsAndLigandsInfo = ProcessChainsAndLigandsOptionsInfo(Infile,
MolName)
for ChainID in ChainsAndLigandsInfo["ChainIDs"]:
for LigandID in ChainsAndLigandsInfo["LigandIDs"][ChainID]:
MiscUtil.PrintInfo("ChainID: %s; LigandID: %s" % (ChainID,
LigandID))
"""
SpecifiedChainsAndLigandsInfo = {}
SpecifiedChainsAndLigandsInfo["ChainIDs"] = []
SpecifiedChainsAndLigandsInfo["LigandIDs"] = {}
if ChainsOptionValue is None:
return SpecifiedChainsAndLigandsInfo
_ProcessChainIDs(ChainsAndLigandsInfo, SpecifiedChainsAndLigandsInfo, ChainsOptionName, ChainsOptionValue)
if LigandsOptionValue is None:
return SpecifiedChainsAndLigandsInfo
_ProcessLigandIDs(ChainsAndLigandsInfo, SpecifiedChainsAndLigandsInfo, LigandsOptionName, LigandsOptionValue)
return SpecifiedChainsAndLigandsInfo
|
0bdadbc08512957269a179df24c1202a56870d42
| 3,642,009
|
def oa_filter(x, h, N, mode=0):
"""
Overlap and add transform domain FIR filtering.
This function implements the classical overlap and add method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import oa_filter
>>> n = np.arange(0,100)
>>> x = np.cos(2*np.pi*0.05*n)
>>> b = np.ones(10)
>>> y = oa_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = oa_filter(x,h,N,1)
"""
P = len(h)
L = int(N) - P + 1 # need N >= L + P -1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(Nframe*N)
# create an instrumentation matrix to observe the overlap and add behavior
y_mat = np.zeros((Nframe,Nframe*N))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:(k+1)*L]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk))
y[k*L:k*L+N] += yk
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[0:Nx], y_mat[:,0:Nx]
else:
return y[0:Nx]
|
41fa7aaf7a57e0f6c363eb1efa7413b2a1a34d47
| 3,642,010
|
from typing import Optional
from typing import Iterable
import re
def get_languages(translation_dir: str, default_language: Optional[str] = None) -> Iterable[str]:
"""
Get a list of available languages.
The default language is (generic) English and will always be included. All other languages will be read from
the folder `translation_dir`. A folder within that directory is considered to contain a language for the
application if its name is either two lowercase letters, or two lowercase letters, a dash, and two uppercase
letters.
:Example:
The following list contains *valid* language codes:
* ``de``
* ``de-DE``
* ``de-AT``
* ``de-CH``
:Example:
The following list contains *invalid* language codes:
* ``EN``
* ``EN-us``
* ``EN-US``
* ``en-us``
* ``en-USA``
:param default_language: The default language as used in the GetText functions within the code. If not given,
the default language from :meth:`get_default_language` will be used.
:param translation_dir: The directory within which the translation folders can be found.
:return: A list of language codes supported by the application.
"""
if not default_language:
default_language = get_default_language()
# Get a list of all entries in the translations folder and filter it. If the given folder could not be read, do not
# include any additional languages.
pattern = re.compile('^([a-z]{2})(-[A-Z]{2})?$')
try:
languages = [language for language in listdir(translation_dir) if pattern.match(language)]
except OSError:
languages = []
return [default_language] + languages
|
a25c9c2672f4f8d96cffc363aa922424b031aea7
| 3,642,011
|
def calculate_direction(a, b):
"""Calculates the direction vector between two points.
Args:
a (list): the position vector of point a.
b (list): the position vector of point b.
Returns:
array: The (unnormalised) direction vector between points a and b. The smallest magnitude of an element is 1 (eg: [1,1,2]).
"""
difference = np.subtract(a, b)
if np.count_nonzero(difference) < 1:
print("The two k-points are equal")
return np.array([0, 0, 0])
# we need to find the smallest non-zero value within a-b
a = np.array(a)
b = np.array(b)
direction_masked = ma.masked_equal(
a - b, 0) # return array with invalid entries where values are equal
direction_filled = ma.filled(
direction_masked, 10
**6) # fill invalid elements of array with a large number s
direction_absolute = np.absolute(
direction_filled) # return absolute values of each element
smallest = np.amin(direction_absolute)
direction = (
b - a) / smallest # use the minimum absolute value as a divisor a-b
if -1 in direction:
direction = np.multiply(direction, -1)
return direction
|
9e0297560bb48d57cd4e1d5f12788a62ae7d9b3b
| 3,642,012
|
import json
def parse_game_state(gs_json: dict) -> game_state_pb2.GameState:
"""Deserialize a JSON-formatted game state to protobuf."""
if 'provider' not in gs_json:
raise InvalidGameStateException(gs_json)
try:
map_ = parse_map(gs_json.get('map'))
provider = parse_provider(gs_json['provider'])
round_ = parse_round(gs_json.get('round'))
player = parse_player(gs_json['player'])
allplayers = [
parse_allplayers_entry(steam_id, allplayers_entry)
for steam_id, allplayers_entry in gs_json.get('allplayers', {}).items()
]
previously = parse_previously(gs_json['previously']) \
if 'previously' in gs_json else None
added = parse_added(gs_json['added']) if 'added' in gs_json else None
return game_state_pb2.GameState(
provider=provider, map=map_, round=round_, player=player,
allplayers=allplayers, previously=previously, added=added)
except DeserializationError as e:
logger.error('Failed to deserialize game_state: %s', json.dumps(gs_json))
raise e
|
d78868c24807a3cac469b87990e5a74f88876eb5
| 3,642,014
|
def state2bin(s, num_bins, limits):
"""
:param s: a state. (possibly multidimensional) ndarray, with dimension d =
dimensionality of state space.
:param num_bins: the total number of bins in the discretization
:param limits: 2 x d ndarray, where row[0] is a row vector of the lower
limit of each discrete dimension, and row[1] are corresponding upper
limits.
Returns the bin number (index) corresponding to state s given a
discretization num_bins between each column of limits[0] and limits[1].
The return value has same dimensionality as ``s``. \n
Note that ``s`` may be continuous. \n
\n
Examples: \n
s = 0, limits = [-1,5], num_bins = 6 => 1 \n
s = .001, limits = [-1,5], num_bins = 6 => 1 \n
s = .4, limits = [-.5,.5], num_bins = 3 => 2 \n
"""
if s == limits[1]:
return num_bins - 1
width = limits[1] - limits[0]
if s > limits[1]:
print 'Tools.py: WARNING: ', s, ' > ', limits[1], '. Using the chopped value of s'
print 'Ignoring', limits[1] - s
s = limits[1]
elif s < limits[0]:
print 'Tools.py: WARNING: ', s, ' < ', limits[0], '. Using the chopped value of s'
# print("WARNING: %s is out of limits of %s . Using the chopped value of s" %(str(s),str(limits)))
s = limits[0]
return int((s - limits[0]) * num_bins / (width * 1.))
|
8c0a1d559a332b1a015bde78c9eca413eeae942c
| 3,642,015
|
def _fix_json_agents(ag_obj):
"""Fix the json representation of an agent."""
if isinstance(ag_obj, str):
logger.info("Fixing string agent: %s." % ag_obj)
ret = {'name': ag_obj, 'db_refs': {'TEXT': ag_obj}}
elif isinstance(ag_obj, list):
# Recursive for complexes and similar.
ret = [_fix_json_agents(ag) for ag in ag_obj]
elif isinstance(ag_obj, dict) and 'TEXT' in ag_obj.keys():
ret = deepcopy(ag_obj)
text = ret.pop('TEXT')
ret['db_refs']['TEXT'] = text
else:
ret = ag_obj
return ret
|
be73467edc1dc30ac0be1f6804cdb19cf5f942bf
| 3,642,016
|
def vflip(img):
"""Vertically flip the given CV Image.
Args:
img (CV Image): Image to be flipped.
Returns:
CV Image: Vertically flipped image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
return cv2.flip(img, 1)
|
7b678327a15876a98e0622eacc012f86a8ffd432
| 3,642,017
|
def list_pending_tasks():
"""List all pending tasks in celery cluster."""
inspector = celery_app.control.inspect()
return inspector.reserved()
|
3d1785dd9ac8fd91f1f0ceb72eeb7df5671b55d5
| 3,642,018
|
def get_attack(attacker, defender):
"""
Returns a value for an attack roll.
Args:
attacker (obj): Character doing the attacking
defender (obj): Character being attacked
Returns:
attack_value (int): Attack roll value, compared against a defense value
to determine whether an attack hits or misses.
Notes:
By default, returns a random integer from 1 to 100 without using any
properties from either the attacker or defender.
This can easily be expanded to return a value based on characters stats,
equipment, and abilities. This is why the attacker and defender are passed
to this function, even though nothing from either one are used in this example.
"""
# For this example, just return a random integer up to 100.
attack_value = randint(1, 100)
return attack_value
|
3ec24ab34a02c2572ee62d1cc18079bdb7ef10ee
| 3,642,019
|
def colored(s, color=None, attrs=None):
"""Call termcolor.colored with same arguments if this is a tty and it is available."""
if HAVE_COLOR:
return colored_impl(s, color, attrs=attrs)
return s
|
a8c4f56e55721ec464728fbe9af4453cd98400ba
| 3,642,020
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.