content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def Mux(sel, val1, val0):
"""Choose between two values.
Parameters
----------
sel : Value, in
Selector.
val1 : Value, in
val0 : Value, in
Input values.
Returns
-------
Value, out
Output ``Value``. If ``sel`` is asserted, the Mux returns ``val1``, else ``val0``.
"""
sel = Value.cast(sel)
if len(sel) != 1:
sel = sel.bool()
return Operator("m", [sel, val1, val0]) | 62fa5abf293a1321af5e4a209427b896756e5617 | 10,271 |
def get_identity_list(user, provider=None):
"""
Given the (request) user
return all identities on all active providers
"""
identity_list = CoreIdentity.shared_with_user(user)
if provider:
identity_list = identity_list.filter(provider=provider)
return identity_list | f5a9a7e461813edbc293338dca242d4dd4877281 | 10,272 |
def get_user_pool_domain(prefix: str, region: str) -> str:
"""Return a user pool domain name based on the prefix received and region.
Args:
prefix: The domain prefix for the domain.
region: The region in which the pool resides.
"""
return "%s.auth.%s.amazoncognito.com" % (prefix, region) | dc1eec674379d04bd8b23318207ac5b2e6a905f3 | 10,273 |
def add_dep_info(tgt_tokens, lang, spacy_nlp, include_detail_tag=True):
"""
:param tgt_tokens: a list of CoNLLUP_Token_Template() Objects from CoNLL_Annotations.py file
:param spacy_nlp: Spacy language model of the target sentence to get the proper Dependency Tree
:return:
"""
doc = spacy_nlp.tokenizer.tokens_from_list([t.word for t in tgt_tokens])
spacy_nlp.tagger(doc)
spacy_nlp.parser(doc)
for ix, token in enumerate(doc):
tgt_tokens[ix].lemma = token.lemma_ or "_"
tgt_tokens[ix].head = token.head.i + 1
if lang in ["ES", "FR"]:
detail_tag = token.tag_.split("__") # [VERB , Mood=Sub|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin]
tgt_tokens[ix].pos_tag = detail_tag[0] or "_"
if include_detail_tag:
tgt_tokens[ix].detail_tag = detail_tag[-1] or "_"
else:
tgt_tokens[ix].pos_tag = token.tag_ or "_"
tgt_tokens[ix].pos_universal = token.pos_ or "_" # Is SpaCy already Universal?
tgt_tokens[ix].dep_tag = token.dep_ or "_"
tgt_tokens[ix].ancestors = [(t.i, t.text) for t in token.ancestors]
tgt_tokens[ix].children = [(t.i, t.text) for t in token.children]
# print(token.i, token.text, token.pos_, token.dep_, token.head.text, token.head.i, token.tag_)
assert len(doc) == len(tgt_tokens), f"LEN Mismatch! Spacy has {len(doc)} tokens and CoNLL has {len(tgt_tokens)} tokens"
return tgt_tokens | 0083d16f4344a6afaeb5fba9a6b2e9282d617ef3 | 10,275 |
async def apod(request: Request) -> dict:
"""Get the astronomy picture of the day."""
http_client = request.app.state.http_client
async with http_client.session.get(
f"https://api.nasa.gov/planetary/apod?api_key={NASA_API}"
) as resp:
data = await resp.json()
return {
"title": data["title"],
"explanation": data["explanation"],
"img": data["hdurl"],
} | edc526732904c5f0a29c144023df7fefb6d7743c | 10,276 |
def main_view(request, url, preview=False):
"""
@param request: HTTP request
@param url: string
@param preview: boolean
"""
url_result = parse_url(url)
current_site = get_site()
# sets tuple (template_name, posts_on_page)
current_template = get_template()
language = get_language(url_result)
if not url_result['page']:
page = get_index_page(language)
else:
page = get_page(url_result['page'], language, preview)
menuitems = get_main_menuitems(url_result['page'], page, preview)
meta_data = get_metadata(page)
page_num = url_result['page_num'] or 1
if url_result['post']:
posts = get_post(page, url_result['post'], preview)
template_page = 'post.html'
form = handle_comment_form(request, posts)
else:
posts = get_paginated_posts(page, page_num, page.items_per_menu)
template_page = 'page.html'
site_content = {'site': current_site,
'languages': get_languages(),
'current_language': language,
'menuitems': menuitems,
'page': page,
'scripts': get_scripts(),
'metadata': meta_data,
'posts': posts, }
if has_other_menu():
site_content['other_menuitems'] = get_other_menuitems()
try:
site_content['form'] = form
except NameError:
pass
template = '{}/{}'.format(current_template[0], template_page)
return render_to_response(
template,
{'site_content': site_content},
RequestContext(request)
) | 6febddd1e98f94865a364b8cf9a339574a303809 | 10,277 |
def parse_configs(code_config, field_config, time_config):
"""
Wrapper to validate and parse each of the config files. Returns a
a dictionary with config types as keys and parsed config files as values.
"""
# performing basic validation of config paths, obtaining dictionary of
# config types and correpsonding raw dataframes
raw_dfs = validate_config_dfs(code_config, field_config, time_config)
# performing additional config-specific validation and parsing
config_dict = {}
for config_type, df in raw_dfs.items():
if config_type in validation_functions:
validation_functions[config_type](df)
if config_type in parse_functions:
config_dict[config_type] = parse_functions[config_type](df)
else:
config_dict[config_type] = df
# concatenating code and field configs
if CODE_CONFIG in config_dict:
if FIELD_CONFIG in config_dict:
config_dict[FIELD_CONFIG] = pd.concat([config_dict[CODE_CONFIG],
config_dict[FIELD_CONFIG]], sort=True)
else:
config_dict[FIELD_CONFIG] = config_dict[CODE_CONFIG]
config_dict.pop(CODE_CONFIG)
return config_dict | 1d625e0b56ea4d197280b91a7993a16c82a2461d | 10,278 |
def butter_highpass_filter_eda(data):
""" High pass filter for 1d EDA data.
"""
b, a = eda_hpf()
y = lfilter(b, a, data)
return y | 1449d09a810e0c1ff78ab325106a6235cb94d26b | 10,279 |
def normalize_null_vals(reported_val):
"""
Takes a reported value and returns a normalized NaN is null, nan, empty, etc.
Else returns reported value.
"""
if is_empty_value(reported_val):
return np.NaN
else:
return reported_val | 790ebbad188390752401699f2d04fddbd08bcc7e | 10,280 |
def test_insert(type):
"""
>>> test_insert(int_)
[0, 1, 2, 3, 4, 5]
"""
tlist = nb.typedlist(type, [1,3])
tlist.insert(0,0)
tlist.insert(2,2)
tlist.insert(4,4)
tlist.insert(8,5)
return tlist | a0eb1f5bbf861863b47c6639d1159afffb63093e | 10,282 |
def get_next_month_range(unbounded=False):
"""获取 下个月的开始和结束时间.
:param unbounded: 开区间
"""
return get_month_range(months=1, unbounded=unbounded) | 989054b3e523400ed28ab0d3d9a840d6606ee8cc | 10,283 |
import functools
def probit_regression(
dataset_fn,
name='probit_regression',
):
"""Bayesian probit regression with a Gaussian prior.
Args:
dataset_fn: A function to create a classification data set. The dataset must
have binary labels.
name: Name to prepend to ops created in this function, as well as to the
`code_name` in the returned `TargetDensity`.
Returns:
target: `TargetDensity`.
"""
with tf.name_scope(name) as name:
dataset = dataset_fn()
num_train_points = dataset.train_features.shape[0]
num_test_points = dataset.test_features.shape[0]
have_test = num_test_points > 0
# Add bias.
train_features = tf.concat(
[dataset.train_features,
tf.ones([num_train_points, 1])], axis=-1)
train_labels = tf.convert_to_tensor(dataset.train_labels)
test_features = tf.concat(
[dataset.test_features,
tf.ones([num_test_points, 1])], axis=-1)
test_labels = tf.convert_to_tensor(dataset.test_labels)
num_features = int(train_features.shape[1])
root = tfd.JointDistributionCoroutine.Root
zero = tf.zeros(num_features)
one = tf.ones(num_features)
def model_fn(features):
weights = yield root(tfd.Independent(tfd.Normal(zero, one), 1))
probits = tf.einsum('nd,...d->...n', features, weights)
yield tfd.Independent(tfd.ProbitBernoulli(probits=probits), 1)
train_joint_dist = tfd.JointDistributionCoroutine(
functools.partial(model_fn, features=train_features))
test_joint_dist = tfd.JointDistributionCoroutine(
functools.partial(model_fn, features=test_features))
dist = joint_distribution_posterior.JointDistributionPosterior(
train_joint_dist, (None, train_labels))
expectations = {
'params':
target_spec.expectation(
fn=lambda params: params[0],
human_name='Parameters',
)
}
if have_test:
expectations['test_nll'] = target_spec.expectation(
fn=lambda params: ( # pylint: disable=g-long-lambda
-test_joint_dist.sample_distributions(value=params)
[0][-1].log_prob(test_labels)),
human_name='Test NLL',
)
expectations['per_example_test_nll'] = target_spec.expectation(
fn=lambda params: ( # pylint: disable=g-long-lambda
-test_joint_dist.sample_distributions(value=params)
[0][-1].distribution.log_prob(test_labels)),
human_name='Per-example Test NLL',
)
return target_spec.TargetDensity.from_distribution(
distribution=dist,
constraining_bijectors=(tfb.Identity(),),
expectations=expectations,
code_name='{}_{}'.format(dataset.code_name, name),
human_name='{} Probit Regression'.format(dataset.human_name),
) | b4cca9054d0ebd8c349cdb148443246616ed6120 | 10,284 |
def block_inception_c(inputs, scope=None, reuse=None):
"""Builds Inception-C block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),
slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) | b6cfe5d6eeaaef2d4a02420577884393f4bfcd4b | 10,285 |
import six
from datetime import datetime
def from_json(js):
"""
Helper to parse json values from server into python types
"""
if js is None or js is True or js is False or isinstance(js, six.text_type):
# JsNull, JsBoolean, JsString
return js
if not isinstance(js, dict) or 'type' not in js or 'data' not in js:
raise ValueError('Expected a dict, got {!r}'.format(js))
t = js['type']
data = js['data']
if t in ('byte', 'short', 'int', 'long'):
return int(data)
if t in ('float', 'double'):
return float(data)
if t == 'timestamp':
# server return timestamp in milliseconds, which is not the python convention
return float(data) / 1E3
if t == 'date':
# server return timestamp in milliseconds
return datetime.date.fromtimestamp(float(data) / 1E3)
if t == 'byte_array':
return bytearray([int(x) for x in data])
if t in ('wrapped_array', 'seq', 'array'):
return [from_json(x) for x in data]
if t == 'map':
d = {}
for entry in data:
if 'key' not in entry or 'val' not in entry:
raise ValueError('Invalid map entry: {!r}'.format(entry))
d[from_json(entry['key'])] = from_json(entry['val'])
return d
raise ValueError('Failed to parse value: {!r}'.format(js)) | 0e4f94e8fdfb634ea3a1f1f84d3ff3d5bb125175 | 10,286 |
def exposexml(func):
"""
Convenience decorator function to expose XML
"""
def wrapper(self, data, expires, contentType="application/xml"):
data = func(self, data)
_setCherryPyHeaders(data, contentType, expires)
return self.templatepage('XML', data=data,
config=self.config,
path=request.path_info)
wrapper.__doc__ = func.__doc__
wrapper.__name__ = func.__name__
wrapper.exposed = True
return wrapper | 57c62490e51693551801aa4722de2e08d3fd3817 | 10,287 |
def transform_categorical_by_percentage(TRAIN, TEST=None,
handle_unknown="error", verbose=0):
"""
Transform categorical features to numerical. The categories are encoded
by their relative frequency (in the TRAIN dataset).
To be consistent with scikit-learn transformers having categories
in transform that are not present during training will raise an error
by default.
-----
Arguments:
TRAIN: DataFrame.
TEST: DataFrame, optional (default=None).
handle_unknown: str, "error", "ignore" or "NaN",
optional (default="error").
Whether to raise an error, ignore or replace by NA if an unknown
category is present during transform.
verbose: integer, optional (default=0).
Controls the verbosity of the process.
-----
Returns:
TRAIN: DataFrame.
TEST: DataFrame.
This second DataFrame is returned if two DataFrames were provided.
"""
categorical = TRAIN.select_dtypes(include=["object"]).columns
if TEST is not None:
if len(categorical) > 0:
for col in categorical:
cat_counts = TRAIN[col].value_counts(normalize=True,
dropna=False)
dict_cat_counts = dict(zip(cat_counts.index, cat_counts))
not_in_train = list(set(TEST[col].unique()) -
set(cat_counts.index))
if len(not_in_train) > 0:
if handle_unknown == "error":
raise ValueError("TEST contains new labels: {0} "
"in variable {1}."
.format(not_in_train, col))
if handle_unknown == "ignore":
print("\n-----\n")
print("Variable: {0}".format(col))
print("Unknown category(ies) {0} present during "
"transform has(ve) been ignored."
.format(not_in_train))
if handle_unknown == "NaN":
print("\n-----\n")
print("Variable: {0}".format(col))
print("Unknown category(ies) {0} present during "
"transform has(ve) been replaced by NA."
.format(not_in_train))
for item in not_in_train:
dict_cat_counts[item] = np.nan
TRAIN[col] = TRAIN[col].replace(dict_cat_counts)
TEST[col] = TEST[col].replace(dict_cat_counts)
if verbose > 0:
print("\n-----\n")
print("Feature: {0}".format(col))
if verbose > 1:
print(cat_counts)
return (TRAIN, TEST)
else:
for col in categorical:
cat_counts = TRAIN[col].value_counts(normalize=True, dropna=False)
dict_cat_counts = dict(zip(cat_counts.index, cat_counts))
TRAIN[col] = TRAIN[col].replace(dict_cat_counts)
if verbose > 0:
print("\n-----\n")
print("Feature: {0}".format(col))
if verbose > 1:
print(cat_counts)
return TRAIN | ce2c568b40109e11d1920a211314aebdc076be7f | 10,289 |
def buildDescription(flinfoDescription='', flickrreview=False, reviewer='',
override='', addCategory='', removeCategories=False):
"""Build the final description for the image.
The description is based on the info from flickrinfo and improved.
"""
description = '== {{int:filedesc}} ==\n{}'.format(flinfoDescription)
if removeCategories:
description = textlib.removeCategoryLinks(description,
pywikibot.Site(
'commons', 'commons'))
if override:
description = description.replace('{{cc-by-sa-2.0}}\n', '')
description = description.replace('{{cc-by-2.0}}\n', '')
description = description.replace('{{flickrreview}}\n', '')
description = description.replace(
'{{copyvio|Flickr, licensed as "All Rights Reserved" which is not '
'a free license --~~~~}}\n',
'')
description = description.replace('=={{int:license}}==',
'=={{int:license}}==\n' + override)
elif flickrreview:
if reviewer:
description = description.replace(
'{{flickrreview}}',
'{{flickrreview|%s|'
'{{subst:CURRENTYEAR}}-{{subst:CURRENTMONTH}}-'
'{{subst:CURRENTDAY2}}}}' % reviewer)
if addCategory:
description = description.replace('{{subst:unc}}\n', '')
description = description + '\n[[Category:' + addCategory + ']]\n'
description = description.replace('\r\n', '\n')
return description | 94a529e5a26a4390536359e0233f26d32465e3ed | 10,290 |
def allowed_file(filename, extensions):
"""
Check file is image
:param filename: string
:param extensions: list
:return bool:
"""
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in extensions | c61e77205e40cd05fc0ea6e4e4f770180f15e6d8 | 10,291 |
def get_number_of_recovery_codes(userid):
"""
Get and return the number of remaining recovery codes for `userid`.
Parameters:
userid: The userid for which to check the count of recovery codes.
Returns:
An integer representing the number of remaining recovery codes.
"""
return d.engine.scalar("""
SELECT COUNT(*)
FROM twofa_recovery_codes
WHERE userid = %(userid)s
""", userid=userid) | f292fbbc2e2ed55f53136988ef7d6f13ab2881e6 | 10,294 |
async def mailbox_search(search_term: str, Authorize: AuthJWT = Depends(),Token = Depends(auth_schema)):
"""Search email with a search term"""
Authorize.jwt_required()
try:
return JSONResponse(dumps({"success": True, "email": database.search(search_term)}))
except Exception as err:
return JSONResponse({"success": False, "error": str(err)}) | 28d82605b78e124eb029125e9b2c9625bc207a64 | 10,296 |
def otel_service(docker_ip, docker_services):
"""Ensure that port is listening."""
# `port_for` takes a container port and returns the corresponding host port
port = docker_services.port_for("otel-collector", 4317)
docker_services.wait_until_responsive(
timeout=30.0, pause=5, check=lambda: is_portListening(docker_ip, port)
)
return True | 19bd5f21ce30fa3cf5202369bf8e0a43456fdf9e | 10,297 |
from re import T
import numpy
def broadcast(vec: T.Tensor, matrix: T.Tensor) -> T.Tensor:
"""
Broadcasts vec into the shape of matrix following numpy rules:
vec ~ (N, 1) broadcasts to matrix ~ (N, M)
vec ~ (1, N) and (N,) broadcast to matrix ~ (M, N)
Args:
vec: A vector (either flat, row, or column).
matrix: A matrix (i.e., a 2D tensor).
Returns:
tensor: A tensor of the same size as matrix containing the elements
of the vector.
Raises:
BroadcastError
"""
try:
return numpy.broadcast_to(vec, shape(matrix))
except ValueError:
raise BroadcastError('cannot broadcast vector of dimension {} \
onto matrix of dimension {}'.format(shape(vec), shape(matrix))) | 3d471489ecef50a70a668db0262d0d21a8b76c86 | 10,298 |
def neighbour_list_n_out(nlist_i: NeighbourList,
nlist_j: NeighbourList) -> np.ndarray:
"""
Compute n^out between two NeighbourList object.
Args:
nlist_i (NeighbourList): A NeighbourList object for neighbour lists at time 0.
nlist_j (NeighbourList): A NeighbourList object for neighbour lists at time t.
Returns:
(np.ndarray(float)): A 1D array of normalised correlation terms.
Raises:
ValueError: If the two NeighbourList objects have different numbers
of lengths of neighbour list vectors.
Note:
For each neighbour list vector, computes (l_i.l_i) - (l_i.l_j).
See Rabani et al. J. Chem. Phys. 1997 doi:https://doi.org/10.1063/1.474927
Eqn. 8 for details.
"""
if nlist_i.vectors.shape != nlist_j.vectors.shape:
raise ValueError(f'NeighbourList vector shapes are not equal: {nlist_i.vectors.shape} != {nlist_j.vectors.shape}')
return (np.einsum('ij,ij->i', nlist_i.vectors, nlist_i.vectors) -
np.einsum('ij,ij->i', nlist_i.vectors, nlist_j.vectors)) | 15069fbb3995f7f65f61b9145353df7116d45e23 | 10,299 |
import re
def id_label_to_project(id_label):
"""
Given a project's id_label, return the project.
"""
match = re.match(r"direct-sharing-(?P<id>\d+)", id_label)
if match:
project = DataRequestProject.objects.get(id=int(match.group("id")))
return project | bd5f322b986776b95a3b3b9203e67c2caaa81c8a | 10,301 |
def payoff_blotto_sign(x, y):
"""
Returns:
(0, 0, 1) -- x wins, y loss;
(0, 1, 0) -- draw;
(1, 0, 0)-- x loss, y wins.
"""
wins, losses = 0, 0
for x_i, y_i in zip(x, y):
if x_i > y_i:
wins += 1
elif x_i < y_i:
losses += 1
if wins > losses:
return (0, 0, 1)
elif wins < losses:
return (1, 0, 0)
return (0, 1, 0) | 5a34ce81fdff8f90ee715d9c82fc55abf7eb2904 | 10,302 |
def import_trips(url_path, dl_dir, db_path, taxi_type, nrows=None, usecols=None,
overwrite=False, verbose=0):
"""Downloads, cleans, and imports nyc tlc taxi record files for the
specified taxi type into a sqlite database.
Parameters
----------
url_path : str or None
Path to text file containing nyc tlc taxi record file urls to
download from. Set to None to skip download.
dl_dir : str
Path of directory to download files to or load files from.
db_path : str
Path to sqlite database.
taxi_type : str
Taxi type to create regex for ('fhv', 'green', 'yellow', or 'all').
nrows : int or None
Number of rows to read. Set to None to read all rows.
usecols : list
List of column names to include. Specify columns names as strings.
Column names can be entered based on names found in original tables
for the year specified or names found in the trips table. Set to None to
read all columns.
overwrite : bool
Defines whether or not to overwrite existing database tables.
verbose : int
Defines verbosity for output statements.
Returns
-------
import_num : int
Number of files imported into database.
Notes
-----
"""
# download taxi record files
if url_path:
dl_num = dl_urls(url_path, dl_dir, taxi_type, verbose=verbose)
else:
dl_num = 0
# get taxi record files
files = get_regex_files(dl_dir, taxi_regex_patterns(taxi_type),
verbose=verbose)
# create trips table (if needed)
create_sql = """
CREATE TABLE IF NOT EXISTS trips (
trip_id INTEGER PRIMARY KEY,
taxi_type INTEGER,
vendor_id INTEGER,
pickup_datetime TEXT,
dropoff_datetime TEXT,
passenger_count INTEGER,
trip_distance REAL,
pickup_longitude REAL,
pickup_latitude REAL,
pickup_location_id INTEGER,
dropoff_longitude REAL,
dropoff_latitude REAL,
dropoff_location_id INTEGER,
trip_duration REAL,
trip_pace REAL,
trip_straightline REAL,
trip_windingfactor REAL
); """
indexes = ['CREATE INDEX IF NOT EXISTS trips_pickup_datetime ON trips '
'(pickup_datetime);']
create_table(db_path, 'trips', create_sql, indexes=indexes,
overwrite=overwrite, verbose=verbose)
# load, clean, and import taxi files into table
import_num = 0
for file in files:
if verbose >= 1:
output('Started importing ' + file + '.')
if taxi_type == 'fhv':
df = pd.DataFrame({'taxi_type': []})
elif taxi_type == 'green':
df = pd.DataFrame({'taxi_type': []})
elif taxi_type == 'yellow':
df, year, month = load_yellow(dl_dir + file, nrows=nrows,
usecols=usecols, verbose=verbose)
df = clean_yellow(df, year, month, verbose=verbose)
import_num += 1
else:
output('Unknown taxi_type.', fn_str='import_trips')
df = pd.DataFrame({'taxi_type': []})
df_to_table(db_path, df, table='trips', overwrite=False,
verbose=verbose)
if verbose >= 1:
output('Imported ' + file + '.')
output('Finished importing ' + str(import_num) + ' files.')
return dl_num, import_num | 8e017873a1b17f493ee5b2df4457690a2fbc7b63 | 10,303 |
def householder(h_v: Vector) -> Matrix:
"""Get Householder transformation Matrix"""
return Matrix.identity(h_v.size()).subtract(2 * h_v * h_v.transpose() / (h_v * h_v)) | 686e8088eff5bf1b14e1438f0668a865a59a13d4 | 10,304 |
def _suppression_loop_body(boxes, iou_threshold, output_size, idx):
"""Process boxes in the range [idx*_NMS_TILE_SIZE, (idx+1)*_NMS_TILE_SIZE).
Args:
boxes: a tensor with a shape of [batch_size, anchors, 4].
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
output_size: an int32 tensor of size [batch_size]. Representing the number
of selected boxes for each batch.
idx: an integer scalar representing induction variable.
Returns:
boxes: updated boxes.
iou_threshold: pass down iou_threshold to the next iteration.
output_size: the updated output_size.
idx: the updated induction variable.
"""
num_tiles = tf.shape(boxes)[1] // _NMS_TILE_SIZE
batch_size = tf.shape(boxes)[0]
# Iterates over tiles that can possibly suppress the current tile.
box_slice = tf.slice(boxes, [0, idx * _NMS_TILE_SIZE, 0],
[batch_size, _NMS_TILE_SIZE, 4])
_, box_slice, _, _ = tf.while_loop(
lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,
_cross_suppression, [boxes, box_slice, iou_threshold,
tf.constant(0)])
# Iterates over the current tile to compute self-suppression.
iou = box_utils.bbox_overlap(box_slice, box_slice)
mask = tf.expand_dims(
tf.reshape(tf.range(_NMS_TILE_SIZE), [1, -1]) > tf.reshape(
tf.range(_NMS_TILE_SIZE), [-1, 1]), 0)
iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype)
suppressed_iou, _, _, _ = tf.while_loop(
lambda _iou, loop_condition, _iou_sum, _: loop_condition,
_self_suppression,
[iou, tf.constant(True), tf.reduce_sum(iou, [1, 2]), iou_threshold])
suppressed_box = tf.reduce_sum(suppressed_iou, 1) > 0
box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2)
# Uses box_slice to update the input boxes.
mask = tf.reshape(
tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1])
boxes = tf.tile(tf.expand_dims(
box_slice, [1]), [1, num_tiles, 1, 1]) * mask + tf.reshape(
boxes, [batch_size, num_tiles, _NMS_TILE_SIZE, 4]) * (1 - mask)
boxes = tf.reshape(boxes, [batch_size, -1, 4])
# Updates output_size.
output_size += tf.reduce_sum(
tf.cast(tf.reduce_any(box_slice > 0, [2]), tf.int32), [1])
return boxes, iou_threshold, output_size, idx + 1 | 4adcb176d8d7269bded2b684d830a90a371d0df5 | 10,305 |
from typing import Iterable
import hashlib
import json
def calculate_invalidation_digest(requirements: Iterable[str]) -> str:
"""Returns an invalidation digest for the given requirements."""
m = hashlib.sha256()
inputs = {
# `FrozenOrderedSet` deduplicates while keeping ordering, which speeds up the sorting if
# the input was already sorted.
"requirements": sorted(FrozenOrderedSet(requirements)),
}
m.update(json.dumps(inputs).encode("utf-8"))
return m.hexdigest() | 1fb2f014ad0b0ea98031d022ed02942e1b6ac1d0 | 10,306 |
def to_base_str(n, base):
"""Converts a number n into base `base`."""
convert_string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if n < base:
return convert_string[n]
else:
return to_base_str(n // base, base) + convert_string[n % base] | bc137d41c9543ef1a201f4bb14234fa277067a77 | 10,307 |
def number_of_photons(i,n=6):
"""Check if number of photons in a sample is higher than n (default value is 6)"""
bitstring = tuple(i)
if sum(bitstring) > n:
return True
else:
return False | 6c7cfea354aa4948d2c94469708f250e6d5b659d | 10,308 |
from typing import List
from typing import Optional
from typing import Dict
def _build_conflicts_from_states(
trackers: List[TrackerWithCachedStates],
domain: Domain,
max_history: Optional[int],
conflicting_state_action_mapping: Dict[int, Optional[List[Text]]],
tokenizer: Optional[Tokenizer] = None,
) -> List["StoryConflict"]:
"""Builds a list of `StoryConflict` objects for each given conflict.
Args:
trackers: Trackers that contain the states.
domain: The domain object.
max_history: Number of turns to take into account for the state descriptions.
conflicting_state_action_mapping: A dictionary mapping state-hashes to a list
of actions that follow from each state.
tokenizer: A tokenizer to tokenize the user messages.
Returns:
A list of `StoryConflict` objects that describe inconsistencies in the story
structure. These objects also contain the history that leads up to the conflict.
"""
# Iterate once more over all states and note the (unhashed) state,
# for which a conflict occurs
conflicts = {}
for element in _sliced_states_iterator(trackers, domain, max_history, tokenizer):
hashed_state = element.sliced_states_hash
if hashed_state in conflicting_state_action_mapping:
if hashed_state not in conflicts:
conflicts[hashed_state] = StoryConflict(element.sliced_states)
conflicts[hashed_state].add_conflicting_action(
action=str(element.event), story_name=element.tracker.sender_id
)
# Return list of conflicts that arise from unpredictable actions
# (actions that start the conversation)
return [
conflict
for (hashed_state, conflict) in conflicts.items()
if conflict.conflict_has_prior_events
] | 651a200dfd94518b4eb9da76d915794a761c9777 | 10,309 |
def inception_d(input_layer, nfilt):
# Corresponds to a modified version of figure 10 in the paper
"""
Parameters
----------
input_layer :
nfilt :
Returns
-------
"""
l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
l1 = bn_conv(l1, num_filters=nfilt[0][1], filter_size=3, stride=2)
l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3))
l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0))
l2 = bn_conv(l2, num_filters=nfilt[1][3], filter_size=3, stride=2)
l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)
return ConcatLayer([l1, l2, l3]) | cb1a192ef210239c0eafe944126eb965dbcf6357 | 10,310 |
def generate_response(response, output):
"""
:param response:
:return dictionary
"""
status, command = None, None
if isinstance(response, dict):
status = response.get('ok', None)
response.get('command', None)
elif isinstance(response, object):
status = getattr(response, 'ok', None)
command = getattr(response, 'command', None)
return {
'status': 'successful' if status else 'failed',
'command': command,
'output': output
} | ea8764dd3e8f0205a0ec1dd278164140a414dadc | 10,311 |
def check_add_predecessor(data_predecessor_str_set, xml_data_list, xml_chain_list, output_xml):
"""
Check if each string in data_predecessor_str_set is corresponding to an actual Data object,
create new [Data, predecessor] objects lists for object's type : Data.
Send lists to add_predecessor() to write them within xml and then returns update_list from it.
Parameters:
data_predecessor_str_set ([str]) : Lists of string from jarvis cell
xml_data_list ([Data]) : Data list from xml parsing
xml_chain_list ([View]) : View list from xml parsing
output_xml (GenerateXML object) : XML's file object
Returns:
update ([0/1]) : 1 if update, else 0
"""
data_predecessor_list = []
allocated_item_list = []
# Filter input string
data_predecessor_str_list = shared_orchestrator.cut_string_list(data_predecessor_str_set)
# Create data names list already in xml
xml_data_name_list = get_objects_names(xml_data_list)
is_elem_found = False
for elem in data_predecessor_str_list:
is_elem_found = True
if elem[0] not in xml_data_name_list:
is_elem_found = False
if elem[1] not in xml_data_name_list:
print(f"{elem[0]} and {elem[1]} do not exist")
else:
print(f"{elem[0]} does not exist")
if elem[0] in xml_data_name_list:
if elem[1] not in xml_data_name_list:
is_elem_found = False
print(f"{elem[1]} does not exist")
if is_elem_found:
for d, p in data_predecessor_str_list:
predecessor = None
selected_data = None
existing_predecessor_id_list = []
for data in xml_data_list:
if d == data.name:
selected_data = data
for existing_predecessor in data.predecessor_list:
existing_predecessor_id_list.append(existing_predecessor.id)
for da in xml_data_list:
if p == da.name and da.id not in existing_predecessor_id_list:
predecessor = da
if predecessor is not None and selected_data is not None:
data_predecessor_list.append([selected_data, predecessor])
allocation_chain_1 = shared_orchestrator.check_add_allocated_item(d,
xml_data_list,
xml_chain_list)
if allocation_chain_1:
allocated_item_list.append(allocation_chain_1)
allocation_chain_2 = shared_orchestrator.check_add_allocated_item(p,
xml_data_list,
xml_chain_list)
if allocation_chain_2:
allocated_item_list.append(allocation_chain_2)
update = add_predecessor(data_predecessor_list, xml_data_list, output_xml)
shared_orchestrator.add_allocation({5: allocated_item_list}, output_xml)
return update | d6bef7af0e32202825705ac36c3b4f09b3aa62ce | 10,312 |
def try_wrapper(func, *args, ret_=None, msg_="", verbose_=True, **kwargs):
"""Wrap ``func(*args, **kwargs)`` with ``try-`` and ``except`` blocks.
Args:
func (functions) : functions.
args (tuple) : ``*args`` for ``func``.
kwargs (kwargs) : ``*kwargs`` for ``func``.
ret_ (any) : default ret val.
msg_ (str) : message to print.
verbose_ (bool) : Whether to print message or not. (default= ``True``)
Examples:
>>> from gummy.utils import try_wrapper
>>> ret = try_wrapper(lambda x,y: x/y, 1, 2, msg_="divide")
* Succeeded to divide
>>> ret
0.5
>>> ret = try_wrapper(lambda x,y: x/y, 1, 0, msg_="divide")
* Failed to divide (ZeroDivisionError: division by zero)
>>> ret is None
True
>>> ret = try_wrapper(lambda x,y: x/y, 1, 0, ret_=1, msg_="divide")
* Failed to divide (ZeroDivisionError: division by zero)
>>> ret is None
False
>>> ret
1
"""
try:
ret_ = func(*args, **kwargs)
prefix = toGREEN("Succeeded to ")
suffix = ""
except Exception as e:
e.__class__.__name__
prefix = toRED("Failed to ")
suffix = f" ({toRED(e.__class__.__name__)}: {toACCENT(e)})"
if verbose_: print("* " + prefix + msg_ + suffix)
return ret_ | 6098c966deffd5ac5ae4bba84219088b2052d878 | 10,313 |
def hnet_bsd(args, x, train_phase):
"""High frequency convolutions are unstable, so get rid of them"""
# Sure layers weight & bias
order = 1
nf = int(args.n_filters)
nf2 = int((args.filter_gain)*nf)
nf3 = int((args.filter_gain**2)*nf)
nf4 = int((args.filter_gain**3)*nf)
bs = args.batch_size
fs = args.filter_size
nch = args.n_channels
nr = args.n_rings
tp = train_phase
std = args.std_mult
x = tf.reshape(x, shape=[bs,args.height,args.width,1,1,3])
fm = {}
# Convolutional Layers
with tf.name_scope('stage1') as scope:
cv1 = hl.conv2d(x, nf, fs, stddev=std, padding='SAME', n_rings=nr, name='1_1')
cv1 = hl.non_linearity(cv1, name='1_1')
cv2 = hl.conv2d(cv1, nf, fs, stddev=std, padding='SAME', n_rings=nr, name='1_2')
cv2 = hl.batch_norm(cv2, tp, name='bn1')
mags = to_4d(hl.stack_magnitudes(cv2))
fm[1] = linear(mags, 1, 1, name='sw1')
with tf.name_scope('stage2') as scope:
cv3 = hl.mean_pooling(cv2, ksize=(1,2,2,1), strides=(1,2,2,1))
cv3 = hl.conv2d(cv3, nf2, fs, stddev=std, padding='SAME', n_rings=nr, name='2_1')
cv3 = hl.non_linearity(cv3, name='2_1')
cv4 = hl.conv2d(cv3, nf2, fs, stddev=std, padding='SAME', n_rings=nr, name='2_2')
cv4 = hl.batch_norm(cv4, train_phase, name='bn2')
mags = to_4d(hl.stack_magnitudes(cv4))
fm[2] = linear(mags, 1, 1, name='sw2')
with tf.name_scope('stage3') as scope:
cv5 = hl.mean_pooling(cv4, ksize=(1,2,2,1), strides=(1,2,2,1))
cv5 = hl.conv2d(cv5, nf3, fs, stddev=std, padding='SAME', n_rings=nr, name='3_1')
cv5 = hl.non_linearity(cv5, name='3_1')
cv6 = hl.conv2d(cv5, nf3, fs, stddev=std, padding='SAME', n_rings=nr, name='3_2')
cv6 = hl.batch_norm(cv6, train_phase, name='bn3')
mags = to_4d(hl.stack_magnitudes(cv6))
fm[3] = linear(mags, 1, 1, name='sw3')
with tf.name_scope('stage4') as scope:
cv7 = hl.mean_pooling(cv6, ksize=(1,2,2,1), strides=(1,2,2,1))
cv7 = hl.conv2d(cv7, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='4_1')
cv7 = hl.non_linearity(cv7, name='4_1')
cv8 = hl.conv2d(cv7, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='4_2')
cv8 = hl.batch_norm(cv8, train_phase, name='bn4')
mags = to_4d(hl.stack_magnitudes(cv8))
fm[4] = linear(mags, 1, 1, name='sw4')
with tf.name_scope('stage5') as scope:
cv9 = hl.mean_pooling(cv8, ksize=(1,2,2,1), strides=(1,2,2,1))
cv9 = hl.conv2d(cv9, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='5_1')
cv9 = hl.non_linearity(cv9, name='5_1')
cv10 = hl.conv2d(cv9, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='5_2')
cv10 = hl.batch_norm(cv10, train_phase, name='bn5')
mags = to_4d(hl.stack_magnitudes(cv10))
fm[5] = linear(mags, 1, 1, name='sw5')
fms = {}
side_preds = []
xsh = tf.shape(x)
with tf.name_scope('fusion') as scope:
for key in fm.keys():
fms[key] = tf.image.resize_images(fm[key], tf.stack([xsh[1], xsh[2]]))
side_preds.append(fms[key])
side_preds = tf.concat(axis=3, values=side_preds)
fms['fuse'] = linear(side_preds, 1, 1, bias_init=0.01, name='side_preds')
return fms | 0c44775d5d342b73975cd7ffd00971d4f98fb4aa | 10,314 |
import numpy
def hdrValFilesToTrainingData(input_filebase: str, target_varname: str):
"""Extracts useful info from input_filebase.hdr and input_filebase.val
Args:
input_filebase -- points to two files
target_varname -- this will be the y, and the rest will be the X
Returns:
Xy: 2d array [#vars][#samples] -- transpose of the data from .val file
X: 2d array [#full_input_vars][#samples] -- Xy, except y
y: 1d array [#samples] -- the vector in Xy corr. to target_varname
all_varnames: List[str] -- essentially what .hdr file holds
input_varnames: List[str] -- all_varnames, minus target_varname
"""
# retrieve varnames
all_varnames = asciiRowToStrings(input_filebase + ".hdr")
# split apart input and output labels
x_rows, y_rows, input_varnames = [], [], []
for (row, varname) in enumerate(all_varnames):
if varname == target_varname:
y_rows.append(row)
else:
x_rows.append(row)
input_varnames.append(varname)
assert len(y_rows) == 1, "expected to find one and only one '%s', not: %s" % (
target_varname,
all_varnames,
)
# split apart input and output data
Xy_tr = asciiTo2dArray(input_filebase + ".val")
Xy = numpy.transpose(Xy_tr)
X = numpy.take(Xy, x_rows, 0)
y = numpy.take(Xy, y_rows, 0)[0]
assert X.shape[0] + 1 == Xy.shape[0] == len(input_varnames) + 1 == len(all_varnames)
assert X.shape[1] == Xy.shape[1] == len(y)
return Xy, X, y, all_varnames, input_varnames | de1427340dfed2dc36cdd05f43841399f54ac6a0 | 10,315 |
def create_column(number_rows: int, column_type: ColumnType) -> pd.Series:
"""Creates a column with either duplicated values or not, and either of string or int type.
:param number_rows: the number of rows in the data-frame.
:param column_type: the type of the column.
:returns: the data-frame.
"""
if column_type == ColumnType.UNIQUE_STRING:
return pd.Series(range(number_rows)).astype(str)
elif column_type == ColumnType.UNIQUE_INT:
return pd.Series(range(number_rows))
elif column_type == ColumnType.WITH_DUPLICATES_STRING:
return pd.Series(["a"] * number_rows)
elif column_type == ColumnType.WITH_DUPLICATES_INT:
return pd.Series([2] * number_rows)
else:
raise ValueError(f"Unknown column-type: {column_type}") | 0280f914960b222d589ae13b648339f1ff7ae562 | 10,316 |
import ctypes
def PumpEvents(timeout=-1, hevt=None, cb=None):
"""This following code waits for 'timeout' seconds in the way
required for COM, internally doing the correct things depending
on the COM appartment of the current thread. It is possible to
terminate the message loop by pressing CTRL+C, which will raise
a KeyboardInterrupt.
"""
# XXX Should there be a way to pass additional event handles which
# can terminate this function?
# XXX XXX XXX
#
# It may be that I misunderstood the CoWaitForMultipleHandles
# function. Is a message loop required in a STA? Seems so...
#
# MSDN says:
#
# If the caller resides in a single-thread apartment,
# CoWaitForMultipleHandles enters the COM modal loop, and the
# thread's message loop will continue to dispatch messages using
# the thread's message filter. If no message filter is registered
# for the thread, the default COM message processing is used.
#
# If the calling thread resides in a multithread apartment (MTA),
# CoWaitForMultipleHandles calls the Win32 function
# MsgWaitForMultipleObjects.
# Timeout expected as float in seconds - *1000 to miliseconds
# timeout = -1 -> INFINITE 0xFFFFFFFF;
# It can also be a callable which should return an amount in seconds
if hevt is None:
hevt = ctypes.windll.kernel32.CreateEventA(None, True, False, None)
handles = _handles_type(hevt)
RPC_S_CALLPENDING = -2147417835
# @ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_uint)
def HandlerRoutine(dwCtrlType):
if dwCtrlType == 0: # CTRL+C
ctypes.windll.kernel32.SetEvent(hevt)
return 1
return 0
HandlerRoutine = (
ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_uint)(HandlerRoutine)
)
ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 1)
while True:
try:
tmout = timeout() # check if it's a callable
except TypeError:
tmout = timeout # it seems to be a number
if tmout > 0:
tmout *= 1000
tmout = int(tmout)
try:
res = ctypes.oledll.ole32.CoWaitForMultipleHandles(
0, # COWAIT_FLAGS
int(tmout), # dwtimeout
len(handles), # number of handles in handles
handles, # handles array
# pointer to indicate which handle was signaled
ctypes.byref(ctypes.c_ulong())
)
except WindowsError as details:
if details.args[0] == RPC_S_CALLPENDING: # timeout expired
if cb is not None:
cb()
continue
else:
ctypes.windll.kernel32.CloseHandle(hevt)
ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0)
raise # something else happened
else:
ctypes.windll.kernel32.CloseHandle(hevt)
ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0)
raise KeyboardInterrupt
# finally:
# if False:
# ctypes.windll.kernel32.CloseHandle(hevt)
# ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0)
# break | 4b24bfc7e68b0953c98b507e6ba5176e4b060011 | 10,318 |
def check_mix_up(method):
"""Wrapper method to check the parameters of mix up."""
@wraps(method)
def new_method(self, *args, **kwargs):
[batch_size, alpha, is_single], _ = parse_user_args(method, *args, **kwargs)
check_value(batch_size, (1, FLOAT_MAX_INTEGER))
check_positive(alpha, "alpha")
type_check(is_single, (bool,), "is_single")
return method(self, *args, **kwargs)
return new_method | 528c30d73df3a26d8badc6df17508c6c2e6f69ae | 10,319 |
def generate_motif_distances(cluster_regions, region_sizes, motifs, motif_location, species):
"""
Generates all motif distances for a lsit of motifs
returns list[motif_distances]
motif_location - str location that motifs are stored
species - str species (for finding stored motifs)
motifs - list of motifs to analize
cluster_regions - dict from parse clusters
"""
motif_distance_list = []
#given a specific motif in a motif file generate distances from that motif...?
for motif in motifs:
mf = "motif_" + motif + ".BED"
mfgz = "motif_" + motif + ".BED.gz"
motif_tool = None
if os.path.exists(os.path.join(motif_location, species, mf)):
motif_tool = pybedtools.BedTool(os.path.join(motifBASE, species, mf))
elif os.path.exists(os.path.join(motif_location, species, mfgz)):
motif_tool = pybedtools.BedTool(os.path.join(motif_location, species, mfgz))
else:
print "MOTIF BED FILE for motif: %s is not available, please build it" % (mf)
if motif_tool is not None:
motif_distance_list.append(calculate_motif_distance(cluster_regions, region_sizes, motif_tool))
return motif_distance_list | 4694db72aaf550cac210387e50187fe910e21bf3 | 10,320 |
def sig_to_vrs(sig):
""" Split a signature into r, s, v components """
r = sig[:32]
s = sig[32:64]
v = int(encode_hex(sig[64:66]), 16)
# Ethereum magic number
if v in (0, 1):
v += 27
return [r, s, v] | 48884e4718de7bdda0527470d4e608e8f6b563b8 | 10,321 |
def index_to_string_table_from_tensor(mapping, default_value="UNK", name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_string = t.constant(["emerson", "lake", "palmer")
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_string, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `mapping` is not set.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
return lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=mapping, default_value=default_value, name=name) | 22ba72e1fe6ab28c5eb16a32152de4094d8d2e73 | 10,322 |
def test_profile_reader_no_aws_config(monkeypatch, tmp_path, capsys):
"""Test profile reader without aws config file."""
fake_get_path_called = 0
def fake_get_path():
nonlocal fake_get_path_called
fake_get_path_called += 1
return tmp_path
monkeypatch.setattr(awsswitch, "get_path", fake_get_path)
awsswitch.app()
assert fake_get_path_called == 1
captured = capsys.readouterr()
output = captured.out.split("\n")
assert output[0] == "AWS profile switcher"
err = captured.err.split("\n")
assert err[0] == "AWS config path does not exist." | 549e2f08cfac1f3f579906a80f3853cba8d2501e | 10,323 |
def get_exclusion_type(exclusion):
"""
Utility function to get an exclusion's type object by finding the exclusion type that has the given
exclusion's code.
:param exclusion: The exclusion to find the type for.
:return: The exclusion type if found, None otherwise.
"""
for exclusion_type in EXCLUSION_TYPES:
if exclusion_type.code == exclusion.code:
return exclusion_type
return None | 778efc4cbd6481ae25f76985f30aa593e1e786fa | 10,324 |
def generatePlans(update):
"""
For an update object provided this function references the updateModuleList which lets all exc
modules determine if they need to add functions to change the state of the system when new
chutes are added to the OS.
Returns: True in error, as in we should stop with this update plan
"""
out.header('Generating %r\n' % (update))
# Iterate through the list provided for this update type
for mod in update.updateModuleList:
if(mod.generatePlans(update)):
return True | d764b2b18cebb81c450ef54aaa1a8b6893ec16c8 | 10,325 |
from typing import Dict
def get_str_by_path(payload: Dict, path: str) -> str:
"""Return the string value from the dict for the path using dpath library."""
if payload is None:
return None
try:
raw = dpath_util.get(payload, path)
return str(raw) if raw is not None else raw
except (IndexError, KeyError, TypeError):
return None | bf8077838c2dd2278cd9209b6c560271faaa78cb | 10,326 |
import requests
import json
def get_token_from_code(request):
"""
Get authorization code the provider sent back to you
Find out what URL to hit to get tokens that allow you to ask for
things on behalf of a user.
Prepare and send a request to get tokens.
Parse the tokens using the OAuth 2 client
"""
code = request.args.get("code")
redirect_uri = request.args.get("redirect_uri")
provider_cfg = requests.get(DISCOVERY_URL).json()
token_endpoint = provider_cfg["token_endpoint"]
token_url, headers, body = client.prepare_token_request(
token_endpoint,
authorization_response=request.url,
redirect_url=redirect_uri,
code=code,
include_client_id=False,
)
token_response = requests.post(
token_url,
headers=headers,
data=body,
auth=(CLIENT_ID, SECRET),
)
token_response = token_response.json()
client.parse_request_body_response(json.dumps(token_response))
return token_response | 00edc369150ddb18023799768c4843333079944e | 10,327 |
def DeWeStartCAN(nBoardNo, nChannelNo):
"""Dewe start CAN"""
if f_dewe_start_can is not None:
return f_dewe_start_can(c_int(nBoardNo), c_int(nChannelNo))
else:
return -1 | f810bb73152899fbfbb89caccec469a647c90223 | 10,328 |
def mw_wo_sw(mol, ndigits=2):
"""Molecular weight without salt and water
:param ndigits: number of digits
"""
cp = clone(mol) # Avoid modification of original object
remover.remove_water(cp)
remover.remove_salt(cp)
return round(sum(a.mw() for _, a in cp.atoms_iter()), ndigits) | 32b83d5e74eec3fdc4d18012dc29ed5e3d85edf3 | 10,329 |
def get_contact_list_info(contact_list):
"""
Get contact list info out of contact list
In rgsummary, this looks like:
<ContactLists>
<ContactList>
<ContactType>Administrative Contact</ContactType>
<Contacts>
<Contact>
<Name>Matyas Selmeci</Name>
...
</Contact>
</Contacts>
</ContactList>
...
</ContactLists>
and the arg `contact_list` is the contents of a single <ContactList>
If vosummary, this looks like:
<ContactTypes>
<ContactType>
<Type>Miscellaneous Contact</Type>
<Contacts>
<Contact>
<Name>...</Name>
...
</Contact>
...
</Contacts>
</ContactType>
...
</ContactTypes>
and the arg `contact_list` is the contents of <ContactTypes>
Returns: a list of dicts that each look like:
{ 'ContactType': 'Administrative Contact',
'Name': 'Matyas Selmeci',
'Email': '...',
...
}
"""
contact_list_info = []
for contact in contact_list:
if contact.tag == 'ContactType' or contact.tag == 'Type':
contact_list_type = contact.text.lower()
if contact.tag == 'Contacts':
for con in contact:
contact_info = { 'ContactType' : contact_list_type }
for contact_contents in con:
contact_info[contact_contents.tag] = contact_contents.text
contact_list_info.append(contact_info)
return contact_list_info | 18d82190ad971b2a2cabb60706fc1486a91a32a5 | 10,330 |
import requests
def enableLegacyLDAP(host, args, session):
"""
Called by the ldap function. Configures LDAP on Lagecy systems.
@param host: string, the hostname or IP address of the bmc
@param args: contains additional arguments used by the ldap subcommand
@param session: the active session to use
@param args.json: boolean, if this flag is set to true, the output will
be provided in json format for programmatic consumption
"""
url='https://'+host+'/xyz/openbmc_project/user/ldap/action/CreateConfig'
scope = {
'sub' : 'xyz.openbmc_project.User.Ldap.Create.SearchScope.sub',
'one' : 'xyz.openbmc_project.User.Ldap.Create.SearchScope.one',
'base': 'xyz.openbmc_project.User.Ldap.Create.SearchScope.base'
}
serverType = {
'ActiveDirectory' : 'xyz.openbmc_project.User.Ldap.Create.Type.ActiveDirectory',
'OpenLDAP' : 'xyz.openbmc_project.User.Ldap.Create.Type.OpenLdap'
}
data = {"data": [args.uri, args.bindDN, args.baseDN, args.bindPassword, scope[args.scope], serverType[args.serverType]]}
try:
res = session.post(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout)
except(requests.exceptions.Timeout):
return(connectionErrHandler(args.json, "Timeout", None))
except(requests.exceptions.ConnectionError) as err:
return connectionErrHandler(args.json, "ConnectionError", err)
return res.text | 7ae9763930c6a0de29f8eac032b3e58d5cd64791 | 10,331 |
from typing import List
from typing import Dict
from typing import OrderedDict
def retrieve_panelist_appearance_counts(panelist_id: int,
database_connection: mysql.connector.connect
) -> List[Dict]:
"""Retrieve yearly apperance count for the requested panelist ID"""
cursor = database_connection.cursor()
query = ("SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"JOIN ww_panelists p ON p.panelistid = pm.panelistid "
"WHERE pm.panelistid = %s AND s.bestof = 0 "
"AND s.repeatshowid IS NULL "
"GROUP BY p.panelist, YEAR(s.showdate) "
"ORDER BY p.panelist ASC, YEAR(s.showdate) ASC")
cursor.execute(query, (panelist_id, ))
result = cursor.fetchall()
cursor.close()
if not result:
return None
appearances = OrderedDict()
total_appearances = 0
for row in result:
appearances[row[0]] = row[1]
total_appearances += row[1]
appearances["total"] = total_appearances
return appearances | 27a5ccb192cf55714fed8316d647f53bce0ffbb2 | 10,332 |
from datetime import datetime
def chart(
symbols=("AAPL", "GLD", "GOOG", "$SPX", "XOM", "msft"),
start=datetime.datetime(2008, 1, 1),
end=datetime.datetime(2009, 12, 31), # data stops at 2013/1/1
normalize=True,
):
"""Display a graph of the price history for the list of ticker symbols provided
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
start (datetime): The date at the start of the period being analyzed.
end (datetime): The date at the end of the period being analyzed.
normalize (bool): Whether to normalize prices to 1 at the start of the time series.
"""
start = util.normalize_date(start or datetime.date(2008, 1, 1))
end = util.normalize_date(end or datetime.date(2009, 12, 31))
symbols = [s.upper() for s in symbols]
timeofday = datetime.timedelta(hours=16)
timestamps = du.getNYSEdays(start, end, timeofday)
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
ldf_data = da.get_data(timestamps, symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
na_price = d_data['close'].values
if normalize:
na_price /= na_price[0, :]
plt.clf()
plt.plot(timestamps, na_price)
plt.legend(symbols)
plt.ylabel('Adjusted Close')
plt.xlabel('Date')
plt.savefig('chart.pdf', format='pdf')
plt.grid(True)
plt.show()
return na_price | 2b4272daa353e21c7f1927e1d8ad68b6c184d97b | 10,333 |
def create_table(peak_id, chrom, pstart, pend, p_center, min_dist_hit, attrib_keys, min_pos, genom_loc, ovl_pf, ovl_fp, i):
"""Saves info of the hit in a tabular form to be written in the output table. """
if attrib_keys != ["None"]:
# extract min_dist_content
[dist, [feat, fstart, fend, strand, attrib_val]] = min_dist_hit
# attrib_val.strip("\r").strip("\t").strip("\n")
dist = max(dist) if isinstance(dist, list) else dist
dist = '%d' % round(dist, 1)
best_res = "\t".join(np.hstack([peak_id, chrom, pstart, p_center, pend, feat, fstart,
fend, strand, min_pos, dist, genom_loc, str(ovl_pf), str(ovl_fp), attrib_val, str(i)]))
return best_res + "\n"
elif attrib_keys == ["None"]:
[dist, [feat, fstart, fend, strand]] = min_dist_hit
dist = max(dist) if isinstance(dist, list) else dist
dist = '%d' % round(dist, 1)
best_res = "\t".join([peak_id, chrom, pstart, p_center, pend, feat, fstart,
fend, strand, min_pos, dist, genom_loc, str(ovl_pf), str(ovl_fp), str(i)])
return best_res + "\n" | 476f0e272fe604c5254b68e93fa059f2ae942b2d | 10,334 |
def _get_tests(tier):
"""Return a generator of test functions."""
return TEST_TIERS[tier] | 364b263f2dc64b375092de6f2de9e771dbc020c2 | 10,335 |
def get_first_of_iterable(iterable):
"""
Return the first element of the given sequence.
Most useful on generator types.
:param iterable iterable: input iterable
:returns: tuple(iterable, first_element). If a generator is passed,
a new generator will be returned preserving the original values.
:raises: IndexError
Example
_______
>>> a = [1,2,3]
>>> b = (str(i) for i in range(3))
>>> a, first_element = get_first_of_iterable(a)
>>> a, first_element
([1, 2, 3], 1)
When the generator ``b`` is given, a new generator is returned by ``is_empty_iterable``
to preserve original values of ``b``:
>>> b, first_element = get_first_of_iterable(b)
>>> next(b), first_element
('0', '0')
"""
if hasattr(iterable, '__getitem__'):
return iterable, iterable[0]
iterable = iter(iterable)
try:
first = next(iterable)
except StopIteration:
raise IndexError('`iterable` is empty')
return chain([first], iterable), first | a16ae6795eb656a98ad6c620ae4f177d9cfc2387 | 10,336 |
import sqlite3
def getTiers(connection=None):
"""
"""
# Open the master database if it is not supplied.
flag = False
if connection is None:
connection = sqlite3.connect(MASTER)
flag = True
# Create a cursor from the connection.
cursor = connection.cursor()
# Execute the statement to remove the tier title combo from the database.
cursor.execute("""SELECT DISTINCT tier FROM hierarchy""")
# Fetch the returned data.
tiers = [tier[0] for tier in cursor.fetchall()]
# Close the cursor.
cursor.close()
# Commit the change to the database and close the connection.
if flag:
connection.close()
return tiers | 1689f9aec4f84f5427e04352e1ce546e548eb505 | 10,337 |
from typing import Any
from typing import get_type_hints
from typing import get_origin
from typing import Union
from typing import get_args
def get_repr_type(type_: Any) -> Any:
"""Parse a type and return an representative type.
Example:
All of the following expressions will be ``True``::
get_repr_type(A) == A
get_repr_type(Annotated[A, ...]) == A
get_repr_type(Union[A, B, ...]) == A
get_repr_type(Optional[A]) == A
"""
class Temporary:
__annotations__ = dict(type=type_)
unannotated = get_type_hints(Temporary)["type"]
if get_origin(unannotated) is Union:
return get_args(unannotated)[0]
return unannotated | fe74d79c1fcc74ff86d0c41db3f8f9da37dbf69a | 10,338 |
from datetime import datetime
import calendar
def get_month_range_from_dict(source):
"""
:param source: dictionary with keys 'start' and 'end
:return: a tuple of datatime objects in the form (start, end)
"""
now = timezone.now()
start = source.get('start')
end = source.get('end', datetime.datetime(now.year, now.month, calendar.monthrange(now.year, now.month)[1]))
if not start:
start = datetime.datetime(end.year-1, end.month+1, 1) if end.month != 12 else datetime.datetime(end.year, 1, 1)
return start, end | af68ad0ebcd63444c627fe5b408e1b59dc54e985 | 10,339 |
def softmax_ad_set_dim_func(head, data, axis):
"""Look up the softmax_ad_set_dim_map, and return hash_value, hash_key."""
key = []
key.append(tuple(data.shape))
key.append(data.dtype)
key.append(axis)
hash_key = str(tuple(key))
if hash_key in softmax_ad_set_dim_map.keys():
return ct_util.set_dims(softmax_ad_set_dim_map[hash_key]), hash_key
return "", hash_key | c2077a70c47bb45dcbc79325e7620d4c63324560 | 10,340 |
import csv
def parse_latency_stats(fp):
"""
Parse latency statistics.
:param fp: the file path that stores the statistics
:returns an average latency in milliseconds to connect a pair of initiator and responder clients
"""
latency = []
with open(fp) as csvfile:
csvreader = csv.DictReader(csvfile, delimiter=' ', fieldnames=['title', 'time'])
for row in csvreader:
latency.append(float(row['time']) * 1000)
return sum(latency) / len(latency) | c50c730b5c5bea704bd682d003baa0addfd7ee89 | 10,341 |
def micro_jaccard(y_true, y_pred):
"""
Calculate the micro Jaccard-score, i.e. TP / (TP + FP + FN).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: The micro Jaccard-score.
"""
return jaccard_score(y_true, y_pred, average='micro') | b2be25baa6b161dabd676bcb4e58b2682485725f | 10,343 |
def round_to_nreads(number_set, n_reads, digit_after_decimal=0):
"""
This function take a list of number and return a list of percentage, which represents the portion of each number in sum of all numbers
Moreover, those percentages are adding up to 100%!!!
Notice: the algorithm we are using here is 'Largest Remainder'
The down-side is that the results won't be accurate, but they are never accurate anyway:)
"""
unround_numbers = [
x / float(sum(number_set)) * n_reads * 10 ** digit_after_decimal
for x in number_set
]
decimal_part_with_index = sorted(
[(index, unround_numbers[index] % 1) for index in range(len(unround_numbers))],
key=lambda y: y[1],
reverse=True,
)
remainder = n_reads * 10 ** digit_after_decimal - sum(
[int(x) for x in unround_numbers]
)
index = 0
while remainder > 0:
unround_numbers[decimal_part_with_index[index][0]] += 1
remainder -= 1
index = (index + 1) % len(number_set)
return [int(x) / float(10 ** digit_after_decimal) for x in unround_numbers] | c7a50b5caffb072b3fb6de9478b4acf83f701780 | 10,344 |
def _get_raster_extent(src):
"""
extract projected extent from a raster dataset
(min_x, max_x, min_y, max_y)
Parameters
----------
src : gdal raster
Returns
-------
(min_x, max_x, min_y, max_y)
"""
ulx, xres, xskew, uly, yskew, yres = src.GetGeoTransform()
lrx = ulx + (src.RasterXSize * xres)
lry = uly + (src.RasterYSize * yres)
return ulx, lrx, lry, uly | 49ed0b3c583cbfa5b9ecbc96d94aec42aeba3a32 | 10,345 |
def joined_table_table_join_args(joined_table: SQLParser.JoinedTableContext) -> dict:
"""
Resolve a joinedTable ParseTree node into relevant keyword arguments for TableJoin.
These will be pushed down and applied to the child TableRef.
"""
assert isinstance(joined_table, SQLParser.JoinedTableContext)
on_clauses = None
if joined_table.expr() is not None:
on_clauses = sql_ast_clauses_from_expr(joined_table.expr())
using_columns = None
if joined_table.identifierListWithParentheses() is not None:
using_columns = sql_ast_identifiers_from_list(
joined_table.identifierListWithParentheses().identifierList()
)
return {
"on_clauses": on_clauses,
"using_columns": using_columns,
**join_type_table_join_args(joined_table),
} | 7419e63d28a4a34a49fe50e917faa478b246cb09 | 10,346 |
def find_by_name(name):
"""
Find and return a format by name.
:param name: A string describing the name of the format.
"""
for format in FORMATS:
if name == format.name:
return format
raise UnknownFormat('No format found with name "%s"' % name) | 3626316b961d913217036ddc58eaa71dbdaea1a7 | 10,347 |
def create_neighborhood_polygons(gdf):
""" an attempt to muild neighborhoods polygons from asset points"""
gdf = gdf.reset_index()
neis = gdf['Neighborhood'].unique()
gdf['neighborhood_shape'] = gdf.geometry
# Must be a geodataframe:
for nei in neis:
gdf1 = gdf[gdf['Neighborhood'] == nei]
inds = gdf1.index
polygon = gdf1.geometry.unary_union.convex_hull
# gdf.loc[inds, 'neighborhood_shape'] = [polygon for x in range(len(inds))]
gdf.loc[inds, 'neighborhood_shape'] = polygon
return gdf | 7ca77acfd73a4b13f9088e3839121076d1a70730 | 10,349 |
def custom_gradient(f=None):
"""Decorator to define a function with a custom gradient.
This decorator allows fine grained control over the gradients of a sequence
for operations. This may be useful for multiple reasons, including providing
a more efficient or numerically stable gradient for a sequence of operations.
For example, consider the following function that commonly occurs in the
computation of cross entropy and log likelihoods:
```python
def log1pexp(x):
return tf.math.log(1 + tf.exp(x))
```
Due to numerical instability, the gradient of this function evaluated at x=100
is NaN. For example:
```python
x = tf.constant(100.)
y = log1pexp(x)
dy = tf.gradients(y, x) # Will be NaN when evaluated.
```
The gradient expression can be analytically simplified to provide numerical
stability:
```python
@tf.custom_gradient
def log1pexp(x):
e = tf.exp(x)
def grad(dy):
return dy * (1 - 1 / (1 + e))
return tf.math.log(1 + e), grad
```
With this definition, the gradient at x=100 will be correctly evaluated as
1.0.
Nesting custom gradients can lead to unintuitive results. The default
behavior does not correspond to n-th order derivatives. For example
```python
@tf.custom_gradient
def op(x):
y = op1(x)
@tf.custom_gradient
def grad_fn(dy):
gdy = op2(x, y, dy)
def grad_grad_fn(ddy): # Not the 2nd order gradient of op w.r.t. x.
return op3(x, y, dy, ddy)
return gdy, grad_grad_fn
return y, grad_fn
```
The function `grad_grad_fn` will be calculating the first order gradient
of `grad_fn` with respect to `dy`, which is used to generate forward-mode
gradient graphs from backward-mode gradient graphs, but is not the same as
the second order gradient of `op` with respect to `x`.
Instead, wrap nested `@tf.custom_gradients` in another function:
```python
@tf.custom_gradient
def op_with_fused_backprop(x):
y, x_grad = fused_op(x)
def first_order_gradient(dy):
@tf.custom_gradient
def first_order_custom(unused_x):
def second_order_and_transpose(ddy):
return second_order_for_x(...), gradient_wrt_dy(...)
return x_grad, second_order_and_transpose
return dy * first_order_custom(x)
return y, first_order_gradient
```
Additional arguments to the inner `@tf.custom_gradient`-decorated function
control the expected return values of the innermost function.
See also `tf.RegisterGradient` which registers a gradient function for a
primitive TensorFlow operation. `tf.custom_gradient` on the other hand allows
for fine grained control over the gradient computation of a sequence of
operations.
Note that if the decorated function uses `Variable`s, the enclosing variable
scope must be using `ResourceVariable`s.
Args:
f: function `f(*x)` that returns a tuple `(y, grad_fn)` where:
- `x` is a sequence of (nested structures of) `Tensor` inputs to the
function.
- `y` is a (nested structure of) `Tensor` outputs of applying TensorFlow
operations in `f` to `x`.
- `grad_fn` is a function with the signature `g(*grad_ys)` which returns
a list of `Tensor`s the same size as (flattened) `x` - the derivatives
of `Tensor`s in `y` with respect to the `Tensor`s in `x`. `grad_ys` is
a sequence of `Tensor`s the same size as (flattened) `y` holding the
initial value gradients for each `Tensor` in `y`.
In a pure mathematical sense, a vector-argument vector-valued function
`f`'s derivatives should be its Jacobian matrix `J`. Here we are
expressing the Jacobian `J` as a function `grad_fn` which defines how
`J` will transform a vector `grad_ys` when left-multiplied with it
(`grad_ys * J`, the vector-Jacobian product, or VJP). This functional
representation of a matrix is convenient to use for chain-rule
calculation (in e.g. the back-propagation algorithm).
If `f` uses `Variable`s (that are not part of the
inputs), i.e. through `get_variable`, then `grad_fn` should have
signature `g(*grad_ys, variables=None)`, where `variables` is a list of
the `Variable`s, and return a 2-tuple `(grad_xs, grad_vars)`, where
`grad_xs` is the same as above, and `grad_vars` is a `list<Tensor>`
with the derivatives of `Tensor`s in `y` with respect to the variables
(that is, grad_vars has one Tensor per variable in variables).
Returns:
A function `h(x)` which returns the same value as `f(x)[0]` and whose
gradient (as calculated by `tf.gradients`) is determined by `f(x)[1]`.
"""
if f is None:
return lambda f: custom_gradient(f=f)
@Bind.decorator
def decorated(wrapped, args, kwargs):
"""Decorated function with custom gradient."""
# raise ValueError("PW: trap")
if context.executing_eagerly():
return _eager_mode_decorator(wrapped, args, kwargs)
else:
return _graph_mode_decorator(wrapped, args, kwargs)
return tf_decorator.make_decorator(f, decorated(f)) # pylint: disable=no-value-for-parameter | 6c6432ab9a10c219d811651db2cbbf2321e43b95 | 10,350 |
def Field(name,
ctype,
field_loader=FieldLoaderMethod.OPTIONAL,
comment=None,
gen_setters_and_getters=True):
"""Make a field to put in a node class.
Args:
name: field name
ctype: c++ type for this field
Should be a ScalarType like an int, string or enum type,
or the name of a node class type (e.g. ASTExpression).
Cannot be a pointer type, and should not include modifiers like
const.
field_loader: FieldLoaderMethod enum specifies which FieldLoader method
to use for this field.
comment: Comment text for this field. Text will be stripped and
de-indented.
gen_setters_and_getters: When False, suppress generation of default
template-based get and set methods. Non-standard alternatives
may be supplied via extra_defs.
Returns:
The newly created field.
Raises:
RuntimeError: If an error is detected in one or more arguments.
"""
if field_loader == FieldLoaderMethod.REST_AS_REPEATED:
is_vector = True
else:
is_vector = False
member_name = name + '_'
if isinstance(ctype, ScalarType):
member_type = ctype.ctype
cpp_default = ctype.cpp_default
is_node_ptr = False
enum_name = None
element_storage_type = None
else:
element_storage_type = 'const %s*' % ctype
if is_vector:
member_type = 'absl::Span<%s const>' % element_storage_type
cpp_default = ''
is_node_ptr = False
enum_name = None
else:
member_type = 'const %s*' % ctype
cpp_default = 'nullptr'
is_node_ptr = True
enum_name = NameToEnumName(ctype)
return {
'ctype': ctype,
'cpp_default': cpp_default,
'member_name': member_name, # member variable name
'name': name, # name without trailing underscore
'comment': CleanComment(comment, prefix=' // '),
'member_type': member_type,
'is_node_ptr': is_node_ptr,
'field_loader': field_loader.name,
'enum_name': enum_name,
'is_vector': is_vector,
'element_storage_type': element_storage_type,
'gen_setters_and_getters': gen_setters_and_getters,
} | 9f3f84be56213640ca9d7368d35bdca8eb9958b2 | 10,351 |
from typing import Tuple
from typing import List
def sql(dataframe: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, List[str], str]:
"""Infer best fit data types using dataframe values. May be an object converted to a better type,
or numeric values downcasted to a smallter data type.
Parameters
----------
dataframe (pandas.DataFrame) : contains unconverted and non-downcasted columns
Returns
-------
dataframe (pandas.DataFrame) : contains columns converted to best fit pandas data type
schema (pandas.DataFrame) : derived SQL schema
not_nullable (list[str]) : columns that should not be null
pk (str) : name of column that best fits as the primary key
"""
# numeric like: bit, tinyint, smallint, int, bigint, float
dataframe = convert_numeric(dataframe)
# datetime like: time, date, datetime2
dataframe = convert_date(dataframe)
# string like: varchar, nvarchar
dataframe = convert_string(dataframe)
# determine SQL properties
schema = sql_schema(dataframe)
not_nullable, pk = sql_unique(dataframe, schema)
return dataframe, schema, not_nullable, pk | d64a2d44cb3a89896aa3b7c19ec1a11bb8fcc2ff | 10,352 |
def calculateDeviation(img, lineLeft,lineRight, ):
"""This function calculates
the deviation of the vehicle from the center of the
image
"""
frameCenter = np.mean([lineLeft.bestx,lineRight.bestx] , dtype=np.int32)
imgCenter = img.shape[1]//2
dev = frameCenter - imgCenter
xm_per_pix = 3.7/450 # meters per pixel in x dimension
result = dev*xm_per_pix
# Moving average deviation (Not needed as applied to bestx)
#x = np.append(lineLeft.center_deviation, [dev])
#result = moving_average(x, movingAvg)[-1]
#lineLeft.center_deviation = np.append(lineLeft.center_deviation, result)
if dev > 0.01:
text = "Vehicle is {:.2f} m -->".format(abs(result))
elif dev < -0.01:
text = "Vehicle is {:.2f} m <--".format(abs(result))
else:
text = "Vehicle is spot on center!"
return result , text | 79d16240a2d606cb25360532ac77e4fbe834e23d | 10,353 |
import requests
def post_new_tracker_story(message, project_id, user):
"""Posts message contents as a story to the bound project."""
if ";" in message:
name, description = message.split(";", maxsplit=1)
else:
name, description = (message, "")
story_name = "{name} (from {user})".format(
name=name.strip(), user=user)
response = requests.post(
story_post_url.format(project_id=project_id),
headers=pivotal_headers,
json={"name": story_name,
"description": description.strip()})
story_url = response.json()["url"]
return name, story_url | b26391ad07159df3087cf22c136e5959a7fb6f4b | 10,354 |
def nz2epsmu(N, Z):#{{{
""" Accepts index of refraction and impedance, returns effective permittivity and permeability"""
return N/Z, N*Z | 3173df57ab5ad573baab87cd4fd6f353fcf69e2c | 10,355 |
import scipy
def logdet_symm(m, check_symm=False):
"""
Return log(det(m)) asserting positive definiteness of m.
Parameters
----------
m : array-like
2d array that is positive-definite (and symmetric)
Returns
-------
logdet : float
The log-determinant of m.
"""
if check_symm:
if not np.all(m == m.T): # would be nice to short-circuit check
raise ValueError("m is not symmetric.")
c, _ = scipy.linalg.cho_factor(m, lower=True)
return 2 * np.sum(np.log(c.diagonal())) | 4e4358cb9094d671ba393d653adf685a916c4fa3 | 10,356 |
def merge(left, right):
""" Merge helper
Complexity: O(n)
"""
arr = []
left_cursor, right_cursor = 0, 0
while left_cursor < len(left) and right_cursor < len(right):
# Sort each one and place into the result
if left[left_cursor] <= right[right_cursor]:
arr.append(left[left_cursor])
left_cursor += 1
else:
arr.append(right[right_cursor])
right_cursor += 1
# Add the left overs if there's any left to the result
for i in range(left_cursor, len(left)):
arr.append(left[i])
for i in range(right_cursor, len(right)):
arr.append(right[i])
# Return result
return arr | c6730a0fe5bfcaf713c6c3fa8f2e777db50e4445 | 10,357 |
def validate_form_data(FORM_Class):
"""
Validates the passed form/json data to a request and passes the
form to the called function.
If form data is not valid, return a 406 response.
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
form = FORM_Class(csrf_enabled=False)
if not form.validate():
return json_error(code=406, data=form.errors)
kwargs['form'] = form
return f(*args, **kwargs)
return decorated_function
return decorator | d0287479d3c5da32c5fbd48a8f2047b64bce5e2b | 10,358 |
def set_axis_tick_format(
ax, xtickformat=None, ytickformat=None, xrotation=0, yrotation=0
):
"""Sets the formats for the ticks of a single axis
:param ax: axis object
:param xtickformat: optional string for the format of the x ticks
:param ytickformat: optional string for the format of the y ticks
:param xrotation: rotation angle of the x ticks. Defaults to 0
:param yrotation: rotation angle of the y ticks. Defaults to 0
:returns: ax
"""
if xtickformat is not None:
ax.xaxis.set_major_formatter(FormatStrFormatter(xtickformat))
if ytickformat is not None:
ax.yaxis.set_major_formatter(FormatStrFormatter(ytickformat))
plt.setp(ax.get_xticklabels(), ha="right", rotation=xrotation)
plt.setp(ax.get_yticklabels(), ha="right", rotation=yrotation)
return ax | 06b7f6cc5ba78fa093cf517a5df414fa1bb6f504 | 10,359 |
def two_body(y, t):
"""
Solves the two body problem
:param y: state vector
y = [rx,ry,rz,vx,vy,vz]
:param t: time
:return: dy
"""
rx, ry, rz = y[0], y[1], y[2]
vx, vy, vz = y[3], y[4], y[5]
r = np.array([rx, ry, rz])
v = np.array([vx, vy, vz])
r_mag = np.linalg.norm(r)
c = -mu / (r_mag ** 3)
dy = np.zeros(6)
dy[0] = y[3]
dy[1] = y[4]
dy[2] = y[5]
dy[3] = c*y[0]
dy[4] = c*y[1]
dy[5] = c*y[2]
return dy | 5a04f279caa3a540f76d3af488344414f4b3547e | 10,360 |
def DD_carrier_sync(z,M,BnTs,zeta=0.707,type=0):
"""
z_prime,a_hat,e_phi = DD_carrier_sync(z,M,BnTs,zeta=0.707,type=0)
Decision directed carrier phase tracking
z = complex baseband PSK signal at one sample per symbol
M = The PSK modulation order, i.e., 2, 8, or 8.
BnTs = time bandwidth product of loop bandwidth and the symbol period,
thus the loop bandwidth as a fraction of the symbol rate.
zeta = loop damping factor
type = Phase error detector type: 0 <> ML, 1 <> heuristic
z_prime = phase rotation output (like soft symbol values)
a_hat = the hard decision symbol values landing at the constellation
values
e_phi = the phase error e(k) into the loop filter
Ns = Nominal number of samples per symbol (Ts/T) in the carrier
phase tracking loop, almost always 1
Kp = The phase detector gain in the carrier phase tracking loop;
This value depends upon the algorithm type. For the ML scheme
described at the end of notes Chapter 9, A = 1, K 1/sqrt(2),
so Kp = sqrt(2).
Mark Wickert July 2014
Motivated by code found in M. Rice, Digital Communications A Discrete-Time
Approach, Prentice Hall, New Jersey, 2009. (ISBN 978-0-13-030497-1).
"""
Ns = 1
Kp = np.sqrt(2.) # for type 0
z_prime = np.zeros_like(z)
a_hat = np.zeros_like(z)
e_phi = np.zeros(len(z))
theta_h = np.zeros(len(z))
theta_hat = 0
# Tracking loop constants
K0 = 1;
K1 = 4*zeta/(zeta + 1/(4*zeta))*BnTs/Ns/Kp/K0;
K2 = 4/(zeta + 1/(4*zeta))**2*(BnTs/Ns)**2/Kp/K0;
# Initial condition
vi = 0
for nn in range(len(z)):
# Multiply by the phase estimate exp(-j*theta_hat[n])
z_prime[nn] = z[nn]*np.exp(-1j*theta_hat)
if M == 2:
a_hat[nn] = np.sign(z_prime[nn].real) + 1j*0
elif M == 4:
a_hat[nn] = np.sign(z_prime[nn].real) + 1j*np.sign(z_prime[nn].imag)
elif M == 8:
a_hat[nn] = np.angle(z_prime[nn])/(2*np.pi/8.)
# round to the nearest integer and fold to nonnegative
# integers; detection into M-levels with thresholds at mid points.
a_hat[nn] = np.mod(round(a_hat[nn]),8)
a_hat[nn] = np.exp(1j*2*np.pi*a_hat[nn]/8)
else:
raise ValueError('M must be 2, 4, or 8')
if type == 0:
# Maximum likelihood (ML)
e_phi[nn] = z_prime[nn].imag * a_hat[nn].real - \
z_prime[nn].real * a_hat[nn].imag
elif type == 1:
# Heuristic
e_phi[nn] = np.angle(z_prime[nn]) - np.angle(a_hat[nn])
else:
raise ValueError('Type must be 0 or 1')
vp = K1*e_phi[nn] # proportional component of loop filter
vi = vi + K2*e_phi[nn] # integrator component of loop filter
v = vp + vi # loop filter output
theta_hat = np.mod(theta_hat + v,2*np.pi)
theta_h[nn] = theta_hat # phase track output array
#theta_hat = 0 # for open-loop testing
# Normalize outputs to have QPSK points at (+/-)1 + j(+/-)1
#if M == 4:
# z_prime = z_prime*np.sqrt(2)
return z_prime, a_hat, e_phi, theta_h | 77bcd1dd49a7bb9dfd40b8b45c9a9eb129ed674d | 10,362 |
from typing import Dict
from typing import Any
def rubrik_gps_vm_snapshot_create(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
Trigger an on-demand vm snapshot.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
object_id = validate_required_arg("object_id", args.get("object_id", ""))
sla_domain_id = args.get("sla_domain_id", "")
raw_response = client.create_vm_snapshot(object_id, sla_domain_id)
outputs = raw_response.get("data", {}).get("vsphereOnDemandSnapshot", {})
outputs = remove_empty_elements(outputs)
if not outputs or not outputs.get("id"):
return CommandResults(readable_output=MESSAGES['NO_RESPONSE'])
hr_content = {
"On-Demand Snapshot Request ID": outputs.get("id"),
"Status": outputs.get("status")
}
hr = tableToMarkdown("GPS VM Snapshot", hr_content, headers=["On-Demand Snapshot Request ID", "Status"],
removeNull=True)
return CommandResults(outputs_prefix=OUTPUT_PREFIX["GPS_SNAPSHOT_CREATE"],
outputs_key_field="id",
outputs=outputs,
raw_response=raw_response,
readable_output=hr) | b065c29e58043b2785f48cf788fb1414262b0eee | 10,363 |
def CT_freezing_first_derivatives(SA, p, saturation_fraction):
"""
Calculates the first derivatives of the Conservative Temperature at
which seawater freezes, with respect to Absolute Salinity SA and
pressure P (in Pa).
Parameters
----------
SA : array-like
Absolute Salinity, g/kg
p : array-like
Sea pressure (absolute pressure minus 10.1325 dbar), dbar
saturation_fraction : array-like
Saturation fraction of dissolved air in seawater. (0..1)
Returns
-------
CTfreezing_SA : array-like, K kg/g
the derivative of the Conservative Temperature at
freezing (ITS-90) with respect to Absolute Salinity at
fixed pressure [ K/(g/kg) ] i.e.
CTfreezing_P : array-like, K/Pa
the derivative of the Conservative Temperature at
freezing (ITS-90) with respect to pressure (in Pa) at
fixed Absolute Salinity
"""
return _gsw_ufuncs.ct_freezing_first_derivatives(SA, p, saturation_fraction) | af3aa120e2e2620f16d984b29d42d583ed9fd347 | 10,365 |
def get_territory_center(territory: inkex.Group) -> inkex.Vector2d:
"""
Get the name of the territory from its child title element. If no title, returns
Warzone.UNNAMED_TERRITORY_NAME
:param territory:
:return:
territory name
"""
center_rectangle: inkex.Rectangle = territory.find(f"./{Svg.GROUP}/{Svg.RECTANGLE}", NSS)
return inkex.Vector2d(
center_rectangle.left + center_rectangle.rx / 2,
center_rectangle.top + center_rectangle.ry / 2
) | 90f9e8ae7eebc5f3acf3d6e4100dec36ef5839d9 | 10,367 |
def batch_norm_relu(inputs, is_training, data_format):
"""Performs a batch normalization followed by a ReLU."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
inputs = tf.layers.batch_normalization(
inputs=inputs, axis=1 if data_format == 'channels_first' else -1,
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,
scale=True, training=is_training, fused=True)
# unary = {"1":lambda x:x ,"2":lambda x: -x, "3":lambda x:tf.abs, "4":lambda x : tf.pow(x,2),"5":lambda x : tf.pow(x,3),
# "6":lambda x:tf.sqrt,"7":lambda x: tf.Variable(tf.truncated_normal([1], stddev=0.08))*x,
# "8":lambda x : x + tf.Variable(tf.truncated_normal([1], stddev=0.08)),"9":lambda x: tf.log(tf.abs(x)+10e-8),
# "10":lambda x:tf.exp,"11":lambda x:tf.sin,"12":lambda x:tf.sinh,"13":lambda x:tf.cosh,"14":lambda x:tf.tanh,"15":lambda x:tf.asinh,"16":lambda x:tf.atan,"17":lambda x: tf.sin(x)/x,
# "18":lambda x : tf.maximum(x,0),"19":lambda x : tf.minimum(x,0),"20":tf.sigmoid,"21":lambda x:tf.log(1+tf.exp(x)),
# "22":lambda x:tf.exp(-tf.pow(x,2)),"23":lambda x:tf.erf,"24":lambda x: tf.Variable(tf.truncated_normal([1], stddev=0.08))}
# binary = {"1":lambda x,y: tf.add(x,y),"2":lambda x,y:tf.multiply(x,y),"3":lambda x,y:tf.add(x,-y),"4":lambda x,y:x/(y+10e-8),
# "5":lambda x,y:tf.maximum(x,y),"6":lambda x,y: tf.sigmoid(x)*y,"7":lambda x,y:tf.exp(-tf.Variable(tf.truncated_normal([1], stddev=0.08))*tf.pow(x-y,2)),
# "8":lambda x,y:tf.exp(-tf.Variable(tf.truncated_normal([1], stddev=0.08))*tf.abs(x-y)),
# "9":lambda x,y: tf.Variable(tf.truncated_normal([1], stddev=0.08))*x + (1-tf.Variable(tf.truncated_normal([1], stddev=0.08)))*y}
unary = {"1":lambda x:x ,"2":lambda x: -x, "3": lambda x: tf.maximum(x,0), "4":lambda x : tf.pow(x,2),"5":lambda x : tf.tanh(tf.cast(x,tf.float32))}
binary = {"1":lambda x,y: tf.add(x,y),"2":lambda x,y:tf.multiply(x,y),"3":lambda x,y:tf.add(x,-y),"4":lambda x,y:tf.maximum(x,y),"5":lambda x,y: tf.sigmoid(x)*y}
input_fun = {"1":lambda x:tf.cast(x,tf.float32) , "2":lambda x:tf.zeros(tf.shape(x)), "3": lambda x:2*tf.ones(tf.shape(x)),"4": lambda x : tf.ones(tf.shape(x)), "5": lambda x: -tf.ones(tf.shape(x))}
with open("tmp","r") as f:
activation = f.readline()
activation = activation.split(" ")
#inputs = binary[activation[8]](unary[activation[5]](binary[activation[4]](unary[activation[2]](input_fun[activation[0]](inputs)),unary[activation[3]](input_fun[activation[1]](inputs)))),unary[activation[7]](input_fun[activation[6]](inputs)))
inputs = binary[activation[5]](unary[activation[3]](binary[activation[2]](unary[activation[0]](inputs),unary[activation[1]]((inputs)))),unary[activation[4]]((inputs)))
#inputs = binary[activation[4]]((unary[activation[2]](input_fun[activation[0]](inputs))),(unary[activation[3]](input_fun[activation[1]](inputs)))) #b[4](u1[2](x1[0]),u2[3](x2[1])) #core unit
#inputs = binary[activation[2]]((unary[activation[0]](inputs)),(unary[activation[1]](inputs))) #b[2](u1[0](x),u2[1](x)) #core unit
#inputs = tf.nn.relu(inputs)
functions = open("./functions.txt", "a")
functions.write(str(inputs) + "\n")
return inputs | 3d9d08000cd4dc5b90b64c6fdce7fca0039c5a03 | 10,368 |
def score_to_rating_string(score):
"""
Convert score to rating
"""
if score < 1:
rating = "Terrible"
elif score < 2:
rating = "Bad"
elif score < 3:
rating = "OK"
elif score < 4:
rating = "Good"
else:
rating = "Excellent"
return rating | 0c6a5aba0cb220a470f2d40c73b873d11b1a0f98 | 10,370 |
def deconv1d_df(t, observed_counts, one_sided_prf, background_count_rate, column_name='deconv', same_time=True,
deconv_func=emcee_deconvolve, **kwargs):
"""
deconvolve and then return results in a pandas.DataFrame
"""
#print("working on chunk with length {}".format(len(observed_counts)))
with util.timewith("deconvolve chunk with {} elements".format(len(observed_counts))) as timer:
results = deconv_func(t, observed_counts, one_sided_prf,
background_count_rate, **kwargs)
sampler, A, t_ret = results[:3]
mean_est = A.mean(axis=0)
percentiles = np.percentile(A, [10, 16, 50, 84, 90], axis=0)
d = {column_name + '_mean': mean_est,
column_name + '_p10': percentiles[0],
column_name + '_p16': percentiles[1],
column_name + '_p50': percentiles[2],
column_name + '_p84': percentiles[3],
column_name + '_p90': percentiles[4]}
df = pd.DataFrame(data=d, index=t_ret)
if same_time:
df = df.ix[t]
return df | bc773398762ac7e82876d935b6ca0b0351960865 | 10,371 |
def create_parser() -> ArgumentParser:
"""Create a parser instance able to parse args of script.
return:
Returns the parser instance
"""
parser = ArgumentParser()
version = get_distribution('hexlet-code').version
parser.add_argument('first_file', help='path to JSON or YAML file')
parser.add_argument('second_file', help='path to JSON or YAML file')
parser.add_argument(
'-f',
'--format',
choices=FORMATS.keys(),
default=DEFAULT_FORMAT,
help='set format of output',
)
parser.add_argument(
'-v',
'--version',
action='version',
version='{prog} {version}'.format(prog=parser.prog, version=version),
help='print version info',
)
return parser | 93f06dd056ab9121be1ad1312b67024312a4108f | 10,372 |
def remap_key(ctx, origin_key, destination_key, *, mode=None, level=None):
"""Remap *origin_key* to *destination_key*.
Returns an instance of :class:`RemappedKey`.
For valid keys refer to `List of Keys
<https://www.autohotkey.com/docs/KeyList.htm>`_.
The optional keyword-only *mode* and *level* arguments are passed to the
:func:`send` function that will send the *destination_key* when the user
presses the *origin_key*.
For more information refer to `Remapping Keys
<https://www.autohotkey.com/docs/misc/Remap.htm>`_.
"""
mouse = destination_key.lower() in {"lbutton", "rbutton", "mbutton", "xbutton1", "xbutton2"}
if mouse:
def origin_hotkey():
if not is_key_pressed(destination_key):
send("{Blind}{%s DownR}" % destination_key, mode=mode, level=level, mouse_delay=-1)
def origin_up_hotkey():
send("{Blind}{%s Up}" % destination_key, mode=mode, level=level, mouse_delay=-1)
else:
ctrl_to_alt = (
origin_key.lower() in {"ctrl", "lctrl", "rctrl"} and
destination_key.lower() in {"alt", "lalt", "ralt"}
)
if ctrl_to_alt:
def origin_hotkey():
send(
"{Blind}{%s Up}{%s DownR}" % (origin_key, destination_key),
mode=mode,
level=level,
key_delay=-1,
)
else:
def origin_hotkey():
send("{Blind}{%s DownR}" % destination_key, mode=mode, level=level, key_delay=-1)
def origin_up_hotkey():
send("{Blind}{%s Up}" % destination_key, mode=mode, level=level, key_delay=-1)
origin_hotkey = ctx.hotkey(f"*{origin_key}", origin_hotkey)
origin_up_hotkey = ctx.hotkey(f"*{origin_key} Up", origin_up_hotkey)
return RemappedKey(origin_hotkey, origin_up_hotkey) | f4a8f7cddea2f82a13d06b5f3f3c4031e862e32b | 10,374 |
def get_anime_list(wf):
"""Get an Animelist instance.
:param Workflow3 wf: the Workflow3 object
:returns: Animelist object
:rtype: Animelist
"""
try:
animelist = Animelist(
wf.settings['UID'], wf.get_password('bangumi-auth-token')
)
except Exception as e:
raise LogoutException("Please login first")
else:
return animelist | a16a063afd2ac6a3fba4877665185a30971e8387 | 10,375 |
def use_linear_strategy():
"""
Uses a linear function to generate target velocities.
"""
max_velocity = kmph2mps(rospy.get_param("~velocity", 40))
stop_line_buffer = 2.0
def linear_strategy(distances_to_waypoints, current_velocity):
# Target velocity function should be a line
# going from (0, current_velocity)
# to (last_waypoint - buffer, 0)
# (after x-intercept, y = 0)
d = max(distances_to_waypoints[-1] - stop_line_buffer, 0) # stopping distance
v = current_velocity # amount by which to slow down within given distance
# Protect against divide by 0 case
if d < 0.01:
return [0 for x in distances_to_waypoints]
f = lambda x: min(
max(
# [0, d]: downward line:
# y = (-v / d)x + v = (1 - (x/d)) * v
(1. - (x / d)) * v,
# (-inf, 0) && (d, +inf): flat
# y = 0
0
),
# Never faster than maximum
max_velocity
)
return map(f, distances_to_waypoints)
return linear_strategy | 7f0be5e0e11c7d29bb68ce7007e911f0fd14d6e2 | 10,376 |
def recomputation_checkpoint(module: nn.Module):
"""Annotates the output of a module to be checkpointed instead of
recomputed"""
def recompute_outputs(module, inputs, outputs):
return tuple(poptorch.recomputationCheckpoint(y) for y in outputs)
return module.register_forward_hook(recompute_outputs) | a39f106f05e84b36ab21a948044adb14fb44b6cd | 10,377 |
import requests
import json
def get_random_quote() -> str:
"""Retrieve a random quote from the Forismatic API.
Returns:
str: The retrieved quote
"""
quote = ""
while quote == "":
response = requests.get(
"http://api.forismatic.com/api/1.0/?method=getQuote&lang=en&format=json"
)
if response.status_code != 200:
print(f"Error while getting image: {response}")
continue
try:
response_json = json.loads(response.text.replace("\\'", "'"))
except json.decoder.JSONDecodeError as error:
print(f"Error while decoding JSON: {response.text}\n{error}")
continue
quote_text: str = response_json["quoteText"]
if contains_no_blacklisted_regexes(quote_text):
quote = quote_text
return quote | bd189878c76a4da1544a6d3762c2a67f69ad1846 | 10,378 |
def has_datapoint(fake_services, metric_name=None, dimensions=None, value=None, metric_type=None, count=1):
"""
Returns True if there is a datapoint seen in the fake_services backend that
has the given attributes. If a property is not specified it will not be
considered. Dimensions, if provided, will be tested as a subset of total
set of dimensions on the datapoint and not the complete set.
"""
found = 0
# Try and cull the number of datapoints that have to be searched since we
# have to check each datapoint.
if dimensions is not None:
datapoints = []
for k, v in dimensions.items():
datapoints += fake_services.datapoints_by_dim[f"{k}:{v}"]
elif metric_name is not None:
datapoints = fake_services.datapoints_by_metric[metric_name]
else:
datapoints = fake_services.datapoints
for dp in fake_services.datapoints:
if metric_name and dp.metric != metric_name:
continue
if dimensions and not has_all_dims(dp, dimensions):
continue
if metric_type and dp.metricType != metric_type:
continue
if value is not None:
if dp.value.HasField("intValue"):
if dp.value.intValue != value:
continue
elif dp.value.HasField("doubleValue"):
if dp.value.doubleValue != value:
continue
else:
# Non-numeric values aren't supported, so they always fail to
# match
continue
found += 1
if found >= count:
return True
return False | 59797809cb73644236cdc8d35cf9698491fff83a | 10,379 |
def optimizeMemoryUsage(foregroundTasks, backgroundTasks, K):
"""
:type foregroundTasks: List[int]
:type backgroundTasks: List[int]
:type K: int
:rtype: List[List[int]]
"""
res = []
curr_max = 0
if len(foregroundTasks) == 0:
for j in range(len(backgroundTasks)):
add_result(backgroundTasks[j], K, curr_max, res, j, 1)
if len(backgroundTasks) == 0:
for i in range(len(foregroundTasks)):
add_result(foregroundTasks[i], K, curr_max, res, i, 0)
for i in range(len(foregroundTasks)):
for j in range(len(backgroundTasks)):
curr_usage = foregroundTasks[i] + backgroundTasks[j]
if curr_usage > K:
add_result(foregroundTasks[i], K, curr_max, res, i, 0)
add_result(backgroundTasks[j], K, curr_max, res, j, 1)
if curr_usage > curr_max and curr_usage <= K:
res = [[i, j]]
curr_max = curr_usage
elif curr_usage == curr_max:
res.append([i, j])
return res if len(res) > 0 else [[-1, -1]] | be7de70bf39ea1872ad1d5bbb9c5209ae3978c8c | 10,380 |
import itertools
def cv_indices(num_folds,num_samples):
"""
Given number of samples and num_folds automatically create a subjectwise cross validator
Assumption: per subject we have 340 samples of data
>>> cv_set = cv_indices(2,680)
>>> cv_set
>>> (([0:340],[340:680]),([340:680,0:340]))
Algo:
1.Compute all the permutations.
2.itreate through all the permutations and first calculate the train indices by taking first five then
six,seven so on of each combination of arrangement.The rest will be the values of test indices
3. Finally zip it to form the indices.
:param num_folds: folds for cv
:param num_samples: number of samples of input of data (should be a multiple of 340)
:return: return a zipped list of tuples
of ranges of training and testing data
"""
n_epoch = 340
n_subjects = num_samples/n_epoch
rem=num_samples%n_epoch
assert (rem == 0),"samples passed in not a multiple of 340"
assert (num_folds<=n_subjects),"number of subjects is less then number of folds"
n_set = np.round(n_subjects/num_folds)
n_set = int(n_set)
n_subjects=int(n_subjects)
flag=[]
for i in range(num_folds):
if i<num_folds-1:
flag=flag+[list(range(i*n_set,(i+1)*n_set))]
else:
flag=flag+[list(range(i*n_set,n_subjects))]
train_indices=[]
test_indices=[]
#permutations=perm1(range(num_folds))
permutations=list(itertools.combinations(list(range(num_folds)),num_folds-1))
permutations=list(map(list,permutations))
sets = len(permutations)
permutations_test=list(itertools.combinations(list(range(num_folds)),1))
permutations_test=list(map(list,permutations_test))
permutations_test.reverse()
for i in range(num_folds-1):
for j in range(sets):
for k in range(len(flag[permutations[j][i]])):
if i<1:
train_indices=train_indices+[list(range(flag[permutations[j][i]][k]*n_epoch,(flag[permutations[j][i]][k]+1)*n_epoch))]
test_indices=test_indices+[list(range(flag[permutations_test[j][i]][k]*n_epoch,(flag[permutations_test[j][i]][k]+1)*n_epoch))]
else:
train_indices=train_indices+[list(range(flag[permutations[j][i]][k]*n_epoch,(flag[permutations[j][i]][k]+1)*n_epoch))]
custom_cv=list(zip(train_indices,test_indices))
return custom_cv | ce0d983458a089919f5581ebc0a650f73cf4c423 | 10,381 |
import pkg_resources
import json
import jsonschema
def load_schema(schema_name: str) -> dict:
"""Load a JSON schema.
This function searches within apollon's own schema repository.
If a schema is found it is additionally validated agains Draft 7.
Args:
schema_name: Name of schema. Must be file name without extension.
Returns:
Schema instance.
Raises:
IOError
"""
schema_path = 'schema/' + schema_name + SCHEMA_EXT
if pkg_resources.resource_exists('apollon', schema_path):
schema = pkg_resources.resource_string('apollon', schema_path)
schema = json.loads(schema)
jsonschema.Draft7Validator.check_schema(schema)
return schema
raise IOError(f'Schema ``{schema_path.name}`` not found.') | b8ebed15394fef7ec1bb9ab735e4f8cb2201df0b | 10,382 |
def question_12(data):
"""
Question 12 linear transform the data, plot it, and show the newly created cov matrix.
:param data: data
:return: data after linear transformation
"""
s_mat = np.array([[0.1, 0, 0], [0, 0.5, 0], [0, 0, 2]])
new_data = np.matmul(s_mat, data)
plot_3d(new_data, "Q12: Linear Transformed the prev data")
print("------ Covariance Matrix (QUESTION 12) ------")
print_cov_mat(new_data)
return new_data | e0904f6a36d2833b48e064ff62d3071d07ab448b | 10,383 |
def update_storage(user_choice):
"""It updates the Coffee Machine resources after a beverage is ordered."""
resources["water"] = resources["water"] - MENU[user_choice]["ingredients"]["water"]
resources["milk"] -= MENU[user_choice]["ingredients"]["milk"]
resources["coffee"] -= MENU[user_choice]["ingredients"]["coffee"]
return resources | 48c642fad80a124fd802a2ae1e1fc440ffa20203 | 10,384 |
def second_test_function(dataset_and_processing_pks):
"""
Pass a result of JSON processing to a function that saves result on a model.
:param dataset_and_processing_pks: tuple of two (Dataset PK, Processing PK)
:return: tuple of two (Dataset PK; JSON (Python's list of dicts))
"""
# unpack tuple; needed for Celery chain compatibility
dataset_pk, processing_pk = dataset_and_processing_pks
# re-fetch Dataset and Processing
dataset = Dataset.objects.get(pk=dataset_pk)
processing = Processing.objects.get(pk=processing_pk)
result = []
# calculate result; handle exceptions
try:
result = [{'result': pair['a'] + pair['b']} for pair in dataset.data]
except Exception as err:
# exception string = exception type + exception args
exception_message = "{type}: {message}". \
format(type=type(err).__name__, message=err)
# save exception to db
dataset.exception = exception_message
processing.exceptions = True
dataset.save()
processing.save()
return dataset_pk, result | b21960b1349c825b00997281dfbef4ef924846d0 | 10,385 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.