content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def snrcat(spec,plugmap):
"""This function calculates the S/N for each fiber.
Parameters
----------
spec : SpecFrame object
The SpecFrame object that constrains the 1D extracted spectra.
plugmap : numpy structured array
The plugmap information for each fiber including which fiber contains
sky or stars.
Returns
-------
cat : numpy structured array
A catalog containing information on each object in the fibers and the
median S/N.
Example
-------
.. code-block:: python
cat = snrcat(spec,plugmap)
"""
dtype = np.dtype([('apogee_id',np.str,30),('ra',np.float64),('dec',np.float64),('hmag',np.float),('objtype',np.str,30),
('fiberid',np.int),('fiberindex',np.int),('flux',np.float),('err',np.float),('snr',np.float)])
cat = np.zeros(300,dtype=dtype)
# Load the spectral data
cat['fiberindex'] = np.arange(300)
cat['flux'] = np.median(spec.flux,axis=1)
cat['err'] = np.median(spec.err,axis=1)
err = cat['err']
bad = (err <= 0.0)
err[bad] = 1.0
cat['snr'] = cat['flux']/err
# Load the plugging data
pcat = plugmap['PLUGMAPOBJ']
fibs, = np.where( (pcat['fiberId']>=0) & (pcat['holeType']=='OBJECT') & (pcat['spectrographId']==2) )
fiberindex = 300-pcat[fibs]['fiberId']
cat['apogee_id'][fiberindex] = pcat[fibs]['tmass_style']
cat['ra'][fiberindex] = pcat[fibs]['ra']
cat['dec'][fiberindex] = pcat[fibs]['dec']
cat['hmag'][fiberindex] = pcat[fibs]['mag'][:,1]
cat['objtype'][fiberindex] = pcat[fibs]['objType']
cat['fiberid'][fiberindex] = pcat[fibs]['fiberId']
cat = Table(cat)
return cat | 30b3de8197425b92d0b69c3d49f3a1bca46ca659 | 9,679 |
def forward(S, A, O, obs):
"""Calculates the forward probability matrix F. This is a matrix where each
(i, j) entry represents P(o_1, o_2, ... o_j, X_t = i| A, O). In other words,
each (i, j) entry is the probability that the observed sequence is o_1, ...
o_j and that at position j we are in hidden state i. We build F from the
first observation o_1 up to the entire observed sequence o_1, ... o_M. Thus
F has dimension L x M where L is the number of hidden states and M is the
length of our input sample 'obs'.
@params:
S np.array - state vector for starting distribution.
A np.array - transition matrix, L x L for L hidden states, each (i, j)
entry is P(X_i | X_j), or the probability of transitioning
from start state X_j (column entry) to target state X_i
(row entry).
O np.array - observation matrix, L x M' for L hidden states and M' total
possible observations. each (i, j) entry is P(Y_j | X_i), or
the probability of observing observation Y_j while in state
X_i.
obs np.array, list - the observations. these are assumed to be integers
that index correctly into A and O.
"""
assert np.shape(A)[0] == np.shape(A)[1] # transition matrix should be square
L = np.shape(A)[0] # L is the number of hidden states
M = len(obs) # M is the number of observations in our sample 'obs'
C = [] # the list of coefficients used to normalize each column to 1
F = np.zeros((L, M)) # the foward algorithm generates an L x M matrix
F[:, 0] = np.multiply(S, O[:, obs[0]]) # initialize the first column of F via S * (obs[0] column of B)
c_0 = np.sum(F[:, 0]) # compute the first normalizing coefficient
C.append(c_0) # record c_0
F[:, 0] = np.divide(F[:, 0], c_0) # normalize the first column so the entries sum to 1
# begin the forward algorithm. generate each subsequent column of F via the previous one,
# normalizing at each step
for j in range(1, M):
F[:, j] = np.dot(np.multiply(A, O[:,obs[j]]), F[:,j - 1]) # compute the new column j
c_j = np.sum(F[:, j]) # compute the jth coeff.
C.append(c_j) # record the jth coeff.
F[:, j] = np.divide(F[:, j], c_j) # normalize column j
# return the foward matrix F and the list of normalizing coefficients C (these will be used
# to normalize the backward probabilities in the backward step)
return (F, C) | 24c6ddfa053b623a5a56dcc773c8fc4419258df8 | 9,680 |
def calculate_state(position, dt):
"""
Sometimes, a data file will include position only. In those cases,
the velocity must be calculated before the regression is run.
If the position is
| position_11 position_21 |
| position_12 position_22 |
| ....................... |
| position_1n position_2n |
The value returned is
| position_11 position_21 velocity_11 velocity_21 |
| position_12 position_22 velocity_12 velocity_22 |
| ....................................................... |
| position_1n-1 position_2n-1 velocity_1n-1 velocity_2n-1 |
The last value of each state is clipped off because given n values,
there are n-1 differences between them.
"""
# velocity is (x1 - x0) * dt
velocity = (position[1:, :] - position[:-1, :]) * dt
state = np.hstack((position[:-1, :], velocity))
return state | 9db6d94c8d80f99ccebbdcb9e0691e85e03c836d | 9,681 |
from typing import Optional
def get_server(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServerResult:
"""
Use this data source to retrieve an auth server from Okta.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
example = okta.auth.get_server(name="Example Auth")
```
:param str name: The name of the auth server to retrieve.
"""
__args__ = dict()
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('okta:auth/getServer:getServer', __args__, opts=opts, typ=GetServerResult).value
return AwaitableGetServerResult(
audiences=__ret__.audiences,
credentials_last_rotated=__ret__.credentials_last_rotated,
credentials_next_rotation=__ret__.credentials_next_rotation,
credentials_rotation_mode=__ret__.credentials_rotation_mode,
description=__ret__.description,
id=__ret__.id,
issuer=__ret__.issuer,
issuer_mode=__ret__.issuer_mode,
kid=__ret__.kid,
name=__ret__.name,
status=__ret__.status) | d1693c032a254397d44f82bea626a54343c2dfef | 9,682 |
import pathos
from functools import partial
from itertools import repeat
from typing import Sequence
from typing import Dict
from typing import Set
def estimate_aps_user_defined(ml, X_c = None, X_d = None, data = None, C: Sequence = None, D: Sequence = None, L: Dict[int, Set] = None,
S: int = 100, delta: float = 0.8, seed: int = None, pandas: bool = False, pandas_cols: Sequence = None,
keep_order: bool = False, reorder: Sequence = None, parallel: bool = False, nprocesses: int = None, ntasks: int = 1, **kwargs):
"""Estimate APS for given dataset and user defined ML function
Approximate propensity score estimation involves taking draws :math:`X_c^1, \\ldots,X_c^S` from the uniform distribution on :math:`N(X_{ci}, \\delta)`, where :math:`N(X_{ci},\\delta)` is the :math:`p_c` dimensional ball centered at :math:`X_{ci}` with radius :math:`\\delta`.
:math:`X_c^1, \\ldots,X_c^S` are destandardized before passed for ML inference. The estimation equation is :math:`p^s(X_i;\\delta) = \\frac{1}{S} \\sum_{s=1}^{S} ML(X_c^s, X_{di})`.
Parameters
-----------
ml: Object
User defined ml function
X_c: array-like, default: None
1D/2D vector of continuous input variables
X_d: array-like, default: None
1D/2D vector of discrete input variables
data: array-like, default: None
Dataset containing ML input variables
C: array-like, default: None
Integer column indices for continous variables
D: array-like, default: None
Integer column indices for discrete variables
L: Dict[int, Set]
Dictionary with keys as indices of X_c and values as sets of discrete values
S: int, default: 100
Number of draws for each APS estimation
delta: float, default: 0.8
Radius of sampling ball
seed: int, default: None
Seed for sampling
pandas: bool, default: False
Whether to cast inputs into pandas dataframe
pandas_cols: Sequence, default: None
Columns names for dataframe input
keep_order: bool, default: False
Whether to maintain the column order if data passed as a single 2D array
reorder: Sequence, default: False
Indices to reorder the data assuming original order [X_c, X_d]
parallel: bool, default: False
Whether to parallelize the APS estimation
nprocesses: int, default: None
Number of processes to parallelize. Defaults to number of processors on machine.
ntasks: int, default: 1
Number of tasks to send to each worker process.
**kwargs: keyword arguments to pass into user function
Returns
-----------
np.ndarray
Array of estimated APS for each observation in sample
Notes
------
X_c, X_d, and data should never have any overlapping variables. This is not checkable through the code, so please double check this when passing in the inputs.
The arguments `keep_order`, `reorder`, and `pandas_cols` are applied sequentially, in that order. This means that if `keep_order` is set, then `reorder` will reorder the columns from the original column order as `data`. `pandas_cols` will then be the names of the new ordered dataset.
The default ordering of inputs is [X_c, X_d], where the continuous variables and discrete variables will be in the original order regardless of how their input is passed. If `reorder` is called without `keep_order`, then the reordering will be performed on this default ordering.
Parallelization uses the `Pool` module from pathos, which will NOT be able to deal with execution on GPU. If the user function enables inference on GPU, then it is recommended to implement parallelization within the user function as well.
The optimal settings for nprocesses and nchunks are specific to each machine, and it is highly recommended that the user pass these arguments to maximize the performance boost. `This SO thread <https://stackoverflow.com/questions/42074501/python-concurrent-futures-processpoolexecutor-performance-of-submit-vs-map>`_ recommends setting nchunks to be 14 * # of workers for optimal performance.
"""
# Set X_c and X_d based on inputs
if X_c is None and data is None:
raise ValueError("APS estimation requires continuous data!")
# Prioritize explicitly passed variables
if X_c is not None:
X_c = np.array(X_c).astype(float)
if X_d is not None:
X_d = np.array(X_d).astype(float)
if data is not None:
data = np.array(data).astype(float)
# If X_c not given, but data is, then we assume all of data is X_c
if X_c is None and X_d is not None and data is not None:
print("`X_c` not given but both `X_d` and `data` given. We will assume that all the variables in `data` are continuous.")
X_c = data
# If X_d not given, but data is, then we assume all of data is X_d
if X_c is not None and X_d is None and data is not None:
print("`X_d` not given but both `X_c` and `data` given. We will assume that all the variables in `data` are discrete.")
X_d = data
# If both X_c and X_d are none, then use indices
order = None
if X_c is None and X_d is None:
# Save original order if keep order in place
if keep_order:
order = _get_og_order(data.shape[1], C, D)
if C is None and D is None:
print("`data` given but no indices passed. We will assume that all the variables in `data` are continuous.")
X_c = data
elif C is None:
if isinstance(D, int):
d_len = 1
else:
d_len = len(D)
X_d = data[:,D]
if d_len >= data.shape[1]:
raise ValueError(f"Passed discrete indices of length {d_len} for input data of shape {data.shape}. Continuous variables are necessary to conduct APS estimation.")
else:
print(f"Passed discrete indices of length {d_len} for input data of shape {data.shape}. Remaining columns of `data` will be assumed to be continuous variables.")
X_c = np.delete(data, D, axis = 1)
elif D is None:
if isinstance(C, int):
c_len = 1
else:
c_len = len(C)
X_c = data[:,C]
if c_len < data.shape[1]:
print(f"Passed continuous indices of length {c_len} for input data of shape {data.shape}. Remaining columns of `data` will be assumed to be discrete variables.")
X_d = np.delete(data, C, axis = 1)
else:
X_c = data[:,C]
X_d = data[:,D]
# Force X_c to be 2d array
if X_c.ndim == 1:
X_c = X_c[:,np.newaxis]
if X_d is not None:
if X_d.ndim == 1:
X_d = X_d[:,np.newaxis]
# === Preprocess mixed variables ===
if L is not None:
L_keys = np.array(list(L.keys()))
L_vals = np.array(list(L.values()))
X_c, mixed_og_vals, mixed_og_inds = _preprocessMixedVars(X_c, L_keys, L_vals)
mixed_rows, mixed_cols = mixed_og_inds
else:
mixed_og_vals = None
mixed_og_inds = None
# === Standardize continuous variables ===
# Formula: (X_ik - u_k)/o_k; k represents a continuous variable
X_c, mu, sigma = standardize(X_c)
if seed is not None:
np.random.seed(seed)
# If parallelizing, then force inference on CPU
if parallel == True:
cpu = True
computeUserAPS_frozen = partial(_computeUserAPS, ml = ml, S = S, delta = delta, mu = mu, sigma = sigma, pandas = pandas,
pandas_cols = pandas_cols, order = order, reorder = reorder, **kwargs)
mp = pathos.helpers.mp
p = mp.Pool(nprocesses)
if nprocesses is None:
workers = "default (# processors)"
nprocesses = mp.cpu_count()
else:
workers = nprocesses
print(f"Running APS estimation with {workers} workers...")
# Split input arrays into chunked rows
nchunks = ntasks * nprocesses
X_c_split = np.array_split(X_c, nchunks)
iter_c = iter(X_c_split)
if X_d is None:
iter_d = repeat(None)
else:
iter_d = iter(np.array_split(X_d, nchunks))
if L is None:
iter_L_ind = repeat(None)
iter_L_val = repeat(None)
else:
# Split indices depending on which chunk they fall into
chunksizes = np.append([0], np.cumsum([c.shape[0] for c in X_c_split]))
chunked_inds = [(mixed_rows[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))] - chunksizes[i],
mixed_cols[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))]) for i in range(len(chunksizes) - 1)]
chunked_vals = [mixed_og_vals[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))] for i in range(len(chunksizes) - 1)]
iter_L_ind = iter(chunked_inds)
iter_L_val = iter(chunked_vals)
iter_args = zip(iter_c, iter_d, iter_L_ind, iter_L_val)
p_out = p.starmap(computeUserAPS_frozen, iter_args)
p.close()
p.join()
aps_vec = np.concatenate(p_out)
else:
aps_vec = _computeUserAPS(X_c, X_d, mixed_og_inds, mixed_og_vals, ml, S, delta, mu, sigma, pandas, pandas_cols, order, reorder, **kwargs) # Compute APS for each individual i
aps_vec = np.array(aps_vec)
return aps_vec | 23e94a27800e8cdeca140666d23aace3fd8c5b2d | 9,683 |
def shared_vinchain_instance():
""" This method will initialize ``SharedInstance.instance`` and return it.
The purpose of this method is to have offer single default
vinchainio instance that can be reused by multiple classes.
"""
if not SharedInstance.instance:
clear_cache()
SharedInstance.instance = vin.VinChain()
return SharedInstance.instance | 8668ec7b3b56353545f7fb35fd834918de4207fc | 9,684 |
def generateMfccFeatures(filepath):
"""
:param filepath:
:return:
"""
y, sr = librosa.load(filepath)
mfcc_features = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40)
return mfcc_features | 985519827a04d7375e021524d9b8ce6b4fb0482f | 9,686 |
def homography_crop_resize(org_img_size, crop_y, resize_img_size):
"""
compute the homography matrix transform original image to cropped and resized image
:param org_img_size: [org_h, org_w]
:param crop_y:
:param resize_img_size: [resize_h, resize_w]
:return:
"""
# transform original image region to network input region
ratio_x = resize_img_size[1] / org_img_size[1]
ratio_y = resize_img_size[0] / (org_img_size[0] - crop_y)
H_c = np.array([[ratio_x, 0, 0],
[0, ratio_y, -ratio_y*crop_y],
[0, 0, 1]])
return H_c | 85d252627e947306f3ece89eec1a678c2fa86bc9 | 9,687 |
def extract_project_info(req_soup, full_name=False):
"""Extract the relevant project info from a request.
Arguments:
req_soup (BS4 soup object):
The soup of the request.
full_name (boolean):
Whether or not to capture the entire project name or just the last
hyphenated element.
Returns:
prj_info (Project):
The required info to post a project.
"""
if full_name:
prj_name = req_soup.find("name").string
else:
prj_name = req_soup.find("name").string.split('-')[-1]
res_name = req_soup.find("owner").find("name").string
email = req_soup.find("owner").find("email").string
# NOTE: Change this line to your own institution's email domain.
if "email.arizona.edu" in email:
res_lab = "internal"
else:
res_lab = "external"
# Replace all not ascii chars with ascii ones, and any symbols with '-'.
prj_res = api_types.Researcher(
extract_custom_forms._sanitize_text(res_name.split()[0]),
extract_custom_forms._sanitize_text(res_name.split()[-1]),
extract_custom_forms._sanitize_text(res_lab),
email,
"")
prj_info = api_types.Project(prj_name, prj_res)
return prj_info | 04064b769cb97688f47133df6c8dca0f806b6544 | 9,688 |
from typing import Optional
def get_underlying_asset_price(token_symbol: str) -> Optional[Price]:
"""Gets the underlying asset price for token symbol, if any
This function is neither in inquirer.py or chain/ethereum/defi.py
due to recursive import problems
"""
price = None
if token_symbol == 'yaLINK':
price = Inquirer().find_usd_price(A_ALINK)
elif token_symbol == 'yDAI':
price = Inquirer().find_usd_price(A_DAI)
elif token_symbol == 'yWETH':
price = Inquirer().find_usd_price(A_ETH)
elif token_symbol == 'yYFI':
price = Inquirer().find_usd_price(A_YFI)
elif token_symbol == 'yUSDT':
price = Inquirer().find_usd_price(A_USDT)
elif token_symbol == 'yUSDC':
price = Inquirer().find_usd_price(A_USDC)
elif token_symbol == 'yTUSD':
price = Inquirer().find_usd_price(A_TUSD)
elif token_symbol in ('ycrvRenWSBTC', 'crvRenWBTC', 'crvRenWSBTC'):
price = Inquirer().find_usd_price(A_BTC)
return price | 8b8e7e79e3e77e7e2985d1a7f5aee337da424f87 | 9,689 |
import asyncio
def do_call_async(
fn_name, *args, return_type=None, post_process=None
) -> asyncio.Future:
"""Perform an asynchronous library function call."""
lib_fn = getattr(get_library(), fn_name)
loop = asyncio.get_event_loop()
fut = loop.create_future()
cf_args = [None, c_int64, c_int64]
if return_type:
cf_args.append(return_type)
cb_type = CFUNCTYPE(*cf_args) # could be cached
cb_res = _create_callback(cb_type, fut, post_process)
# keep a reference to the callback function to avoid it being freed
CALLBACKS[fut] = (loop, cb_res)
result = lib_fn(*args, cb_res, c_void_p()) # not making use of callback ID
if result:
# callback will not be executed
if CALLBACKS.pop(fut):
fut.set_exception(get_current_error())
return fut | c3d32f9521a58c81231fe70601a9807b4b9841be | 9,690 |
def prefer_static_value(x):
"""Return static value of tensor `x` if available, else `x`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static value is obtainable), else `Tensor`.
"""
static_x = tensor_util.constant_value(x)
if static_x is not None:
return static_x
return x | 4bd38e5f3a57314b48c86e37f543e6fb69847d1c | 9,691 |
from psyneulink.core.components.component import Component, ComponentsMeta
import types
import copy
def copy_parameter_value(value, shared_types=None, memo=None):
"""
Returns a copy of **value** used as the value or spec of a
Parameter, with exceptions.
For example, we assume that if we have a Component in an
iterable, it is meant to be a pointer rather than something
used in computation requiring it to be a "real" instance
(like `Component.function`)
e.g. in spec attribute or Parameter `Mechanism.input_ports_spec`
"""
if shared_types is None:
shared_types = (Component, ComponentsMeta, types.MethodType)
else:
shared_types = tuple(shared_types)
try:
return copy_iterable_with_shared(
value,
shared_types=shared_types,
memo=memo
)
except TypeError:
# this will attempt to copy the current object if it
# is referenced in a parameter, such as
# ComparatorMechanism, which does this for input_ports
if not isinstance(value, shared_types):
return copy.deepcopy(value, memo)
else:
return value | 2586cc79524b63d74920f4b262d0b9b63cb8ef02 | 9,692 |
def ajax_login_required(function):
"""
Decorator for views that checks that the user is logged in, resulting in a
403 Unauthorized response if not.
"""
@wraps(function, assigned=available_attrs(function))
def wrapped_function(request, *args, **kwargs):
if request.user.is_authenticated:
return function(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return wrapped_function | 194d97cd4f9897482a27addeca74780316c39083 | 9,693 |
def compile_str_from_parsed(parsed):
"""The (quasi-)inverse of string.Formatter.parse.
Args:
parsed: iterator of (literal_text, field_name, format_spec, conversion) tuples,
as yield by string.Formatter.parse
Returns:
A format string that would produce such a parsed input.
>>> s = "ROOT/{}/{0!r}/{1!i:format}/hello{:0.02f}TAIL"
>>> assert compile_str_from_parsed(string.Formatter().parse(s)) == s
>>>
>>> # Or, if you want to see more details...
>>> parsed = list(string.Formatter().parse(s))
>>> for p in parsed:
... print(p)
('ROOT/', '', '', None)
('/', '0', '', 'r')
('/', '1', 'format', 'i')
('/hello', '', '0.02f', None)
('TAIL', None, None, None)
>>> compile_str_from_parsed(parsed)
'ROOT/{}/{0!r}/{1!i:format}/hello{:0.02f}TAIL'
"""
result = ''
for literal_text, field_name, format_spec, conversion in parsed:
# output the literal text
if literal_text:
result += literal_text
# if there's a field, output it
if field_name is not None:
result += '{'
if field_name != '':
result += field_name
if conversion:
result += '!' + conversion
if format_spec:
result += ':' + format_spec
result += '}'
return result | bfc3d39ecee6e07e41690ee8f85f969c110de69b | 9,694 |
def calculate_class_weight(labels):
"""Calculates the inverse of the class cardinalities and
normalizes the weights such that the minimum is equal to 1.
Args:
labels: List of integers representing class labels
Returns:
Numpy array with weight for each class
"""
labels = np.array(labels)
unique = sorted(np.unique(labels))
counts = np.zeros(len(unique))
for i, label in enumerate(unique):
counts[i] = np.sum(labels == label)
weight = 1. / counts
weight = weight / weight.min()
return weight | ff88ac33b49e90f75ac743aec463e87d36023876 | 9,695 |
def CosEnv(length,rft=(0.005),fs=(44100)):
"""
rft : Rise and fall time [s]
length : Total length of window [s]
fs : Sampling freq [Hz]
"""
rfsamp = int(np.round(rft * fs))
windowsamp = int(np.round(length * fs))
flatsamp = windowsamp - (2 * rfsamp)
time_index = np.arange(0, 1, 1 / rfsamp)
r_env = (1 + np.cos(np.pi + np.pi * time_index)) / 2
f_env = (1 + np.cos(np.pi * time_index)) / 2
flat_env = np.ones(flatsamp)
env = np.concatenate((r_env, flat_env, f_env), 0)
return env | e15cf02bfad7f0a1935507d8e394718429df6958 | 9,696 |
def flatten(nested_list):
"""
Args:
nested_list (list): list of lists
Returns:
list: flat list
Example:
>>> import ubelt as ub
>>> nested_list = [['a', 'b'], ['c', 'd']]
>>> list(ub.flatten(nested_list))
['a', 'b', 'c', 'd']
"""
return it.chain.from_iterable(nested_list) | 418c8d76ff97991ef26e59d7740df14690655cd5 | 9,697 |
def fetch_reply(query, session_id):
"""
main function to fetch reply for chatbot and
return a reply dict with reply 'type' and 'data'
"""
response = apiai_response(query, session_id)
intent, params = parse_response(response)
reply = {}
if intent == None:
reply['type'] = 'none'
reply['data'] = "I didn't understand"
elif intent == "news":
reply['type'] = 'news'
print(params)
articles = get_news(params)
news_elements = []
for article in articles:
element = {}
element['title'] = article['title']
element['item_url'] = article['link']
element['image_url'] = article['img']
element['buttons'] = [{
"type":"web_url",
"title":"Read more",
"url":article['link']}]
news_elements.append(element)
reply['data'] = news_elements
elif intent.startswith('smalltalk'):
reply['type'] = 'smalltalk'
reply['data'] = response['result']['fulfillment']['speech']
return reply | bfcb2938042b964da15c33c614d37433948b37f2 | 9,698 |
def create_SpatialReference(sr):
""" creates an arcpy.spatial reference object """
return arcpy.SpatialReference(sr) | fdb535d4e1c1acda4da4270d78ceb4db9c114584 | 9,699 |
def check_in_image(paste_image_location, paste_image_size, canvas_image_size):
"""Checks whether the location for the pasted image is within the canvas.
Args:
paste_image_location: a namedtuple of utils.XY, with 'x' and 'y' coordinates
of
the center of the image we want to paste.
paste_image_size: a namedtuple of utils.XY, with 'x' and 'y' coordinates
corresponding to the size of the image we are pasting.
canvas_image_size: the size of the canvas that we are pasting the image to.
Returns:
True if the pasted image would lie within the canvas, False otherwise.
"""
offset_x = int(paste_image_size.x / 2) + 1
offset_y = int(paste_image_size.y / 2) + 1
if (paste_image_location.x + offset_x > canvas_image_size or
paste_image_location.x - offset_x < 1 or
paste_image_location.y + offset_y > canvas_image_size or
paste_image_location.y - offset_y < 1):
return False
return True | 173ff3ca7961bff34237512990fb2f103dd7ddc9 | 9,700 |
import functools
def withSEVCHK(fcn):
"""decorator to raise a ChannelAccessException if the wrapped
ca function does not return status = dbr.ECA_NORMAL. This
handles the common case of running :func:`PySEVCHK` for a
function whose return value is from a corresponding libca function
and whose return value should be ``dbr.ECA_NORMAL``.
"""
@functools.wraps(fcn)
def wrapper(*args, **kwds):
"withSEVCHK wrapper"
status = fcn(*args, **kwds)
return PySEVCHK( fcn.__name__, status)
return wrapper | 938468e504cb37a154c6165cc052f59995f806bc | 9,701 |
from . import ism, gradient, referencebased
def available_methods():
"""Get all available importance scores
"""
int_modules = [ism, gradient, referencebased]
available_methods = {}
for m in int_modules:
available_methods = merge_dicts(available_methods, m.METHODS)
return available_methods | 3f452affeebafdae1571cfb54d6af9235871f798 | 9,703 |
def is_sketch_list_empty():
"""Check to see if any sketches"""
return len(_CVB_SKETCH_LIST) == 0 | fdaa5b5a251bde8a8b4d2e5a0c8a1d4b4b3d5f7d | 9,705 |
def pb22():
"""
Problem 22 : Names scores.
We first open the file, suppress the useless ", put everything into lowercase, and split to get a list.
We use merge sort to sort the list by alphabetical order (see utils.merge_sort), and then :
- for each word in the list
- for each character in the list we get its alphabetical rank (ord - 96, that why we needed lowercase) and we sum.
"""
res = 0
with open('./resources/input_pb22.txt', 'r') as f:
lst = f.readline().replace('"', '').lower().split(sep=',')
utils.merge_sort(lst)
for i in range(len(lst)):
res += sum([ord(char)-96 for char in lst[i]])*(i+1)
return res | f8c08fc3c42e0889514d84e2193e10a8be1f8595 | 9,706 |
def resize(im, target_size, max_size, stride=0, interpolation = cv2.INTER_LINEAR):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param target_size: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:param stride: if given, pad the image to designated stride
:param interpolation: if given, using given interpolation method to resize image
:return:
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=interpolation)
if stride == 0:
return im, im_scale
else:
# pad to product of stride
im_height = int(np.ceil(im.shape[0] / float(stride)) * stride)
im_width = int(np.ceil(im.shape[1] / float(stride)) * stride)
im_channel = im.shape[2]
padded_im = np.zeros((im_height, im_width, im_channel))
padded_im[:im.shape[0], :im.shape[1], :] = im
return padded_im, im_scale | ba2238bfaeb3c3c08ad4c1b9371e87c5e0653edc | 9,707 |
from typing import List
from typing import Tuple
import json
def _add_qc(
samples: List[Sample], namespace: str, overwrite_multiqc: bool
) -> Tuple[str, str]:
"""
Populates s.qc_values for each Sample object. Returns paths to MultiQC
html and json files.
"""
multiqc_html_path = join(
f'gs://cpg-{NAGIM_PROJ_ID}-{namespace}-web/qc/multiqc.html'
)
multiqc_json_path = join(
f'gs://cpg-{NAGIM_PROJ_ID}-{namespace}-analysis/qc/multiqc_data.json'
)
if 'QC' in SOURCES_TO_PROCESS:
logger.info('Running MultiQC on QC files')
parsed_json_fpath = _run_multiqc(
samples,
multiqc_html_path,
multiqc_json_path,
tmp_bucket=f'gs://cpg-{NAGIM_PROJ_ID}-{namespace}-tmp/qc',
namespace=namespace,
overwrite=overwrite_multiqc,
)
gfs = gcsfs.GCSFileSystem()
with gfs.open(parsed_json_fpath) as f:
row_by_sample = json.load(f)
for s in samples:
if s.nagim_id in row_by_sample:
s.qc_values = row_by_sample[s.nagim_id]
return multiqc_html_path, multiqc_json_path | 8c93cd7c08c3e392b9f79956189295cb5c486048 | 9,708 |
def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength,
vInv=None, beamVec=bVec_ref, etaVec=eta_ref):
"""
Takes a list of unit reciprocal lattice vectors in crystal frame to the
specified detector-relative frame, subject to the conditions:
1) the reciprocal lattice vector must be able to satisfy a bragg condition
2) the associated diffracted beam must intersect the detector plane
Required Arguments:
hkls -- (n, 3) ndarray of n reciprocal lattice vectors in the CRYSTAL FRAME
chi -- float representing the inclination angle of the oscillation axis (std coords)
rMat_c -- (3, 3) ndarray, the COB taking CRYSTAL FRAME components to SAMPLE FRAME
bMat -- (3, 3) ndarray, the COB taking RECIPROCAL LATTICE components to CRYSTAL FRAME
wavelength -- float representing the x-ray wavelength in Angstroms
Optional Keyword Arguments:
beamVec -- (3, 1) mdarray containing the incident beam direction components in the LAB FRAME
etaVec -- (3, 1) mdarray containing the reference azimuth direction components in the LAB FRAME
Outputs:
ome0 -- (n, 3) ndarray containing the feasible (tTh, eta, ome) triplets for each input hkl (first solution)
ome1 -- (n, 3) ndarray containing the feasible (tTh, eta, ome) triplets for each input hkl (second solution)
Notes:
------------------------------------------------------------------------
The reciprocal lattice vector, G, will satisfy the the Bragg condition
when:
b.T * G / ||G|| = -sin(theta)
where b is the incident beam direction (k_i) and theta is the Bragg
angle consistent with G and the specified wavelength. The components of
G in the lab frame in this case are obtained using the crystal
orientation, Rc, and the single-parameter oscillation matrix, Rs(ome):
Rs(ome) * Rc * G / ||G||
The equation above can be rearranged to yield an expression of the form:
a*sin(ome) + b*cos(ome) = c
which is solved using the relation:
a*sin(x) + b*cos(x) = sqrt(a**2 + b**2) * sin(x + alpha)
--> sin(x + alpha) = c / sqrt(a**2 + b**2)
where:
alpha = atan2(b, a)
The solutions are:
/
| arcsin(c / sqrt(a**2 + b**2)) - alpha
x = <
| pi - arcsin(c / sqrt(a**2 + b**2)) - alpha
\
There is a double root in the case the reflection is tangent to the
Debye-Scherrer cone (c**2 = a**2 + b**2), and no solution if the
Laue condition cannot be satisfied (filled with NaNs in the results
array here)
"""
hkls = np.array(hkls, dtype=float, order='C')
if vInv is None:
vInv = np.ascontiguousarray(vInv_ref.flatten())
else:
vInv = np.ascontiguousarray(vInv.flatten())
beamVec = np.ascontiguousarray(beamVec.flatten())
etaVec = np.ascontiguousarray(etaVec.flatten())
bMat = np.ascontiguousarray(bMat)
return _transforms_CAPI.oscillAnglesOfHKLs(
hkls, chi, rMat_c, bMat, wavelength, vInv, beamVec, etaVec
) | 37d17027fcaa15613188f0be61b0df4c5965a19c | 9,709 |
def srfFaultSurfaceExtract(SRFfile):
"""
Generate fault surface from SRF file convention
Following the Graves' SRF convention used in BBP and CyberShake
"""
lines = open( SRFfile, 'r' ).readlines()
Nseg = int(lines[1].strip().split()[1])
# loop over segments to get (Nrow,Ncol) of each segments
# fault surface for each segment will be read latter
srfFaultSurface = {}
srfFaultSurface['segments'] = {}
dims = []
dips = []
ztors = []
for iseg in xrange( Nseg ):
il0 = 2*iseg + 2 # fault geometry info
spl = lines[il0].strip().split()
lon0, lat0, L, W, Ncol, Nrow = np.array( spl, 'f' )
Ncol, Nrow = int(Ncol), int(Nrow)
dims.append( [Ncol,Nrow] )
il1 = il0 + 1 # focal mechanism and hypocenter info
spl = lines[il1].strip().split()
strike, dip, ztor, hypoAS, hypoDD = np.array(spl,'f')
dips.append(dip) # will be used to get the average dip angle (over segments)
ztors.append(ztor)
srfFaultSurface['segments']['dims'] = dims
srfFaultSurface['segments']['dips'] = dips
srfFaultSurface['segments']['ztors'] = ztors
il0 = 2*(Nseg+1)
Npoints = int(lines[il0].strip().split()[1])
il0 = il0 + 1 # jump to the data block (for each segments, there are a data block)
locs = []; rakes = []
while il0 < len(lines):
spl = lines[il0].strip().split()
lon, lat, dep, strike, dip, Area, Tinit, dt = np.array( spl, 'f' )
locs.append( [lon,lat,dep] )
il0 = il0 + 1
spl = lines[il0].strip().split()
rake, slipA_AlongRake, Nt = np.array( spl[:3], 'f' )
rakes.append( rake ) # will be used to get average rake (over points)
dl = int(Nt/6) + (Nt%6!=0)*1
il0 = il0 + dl + 1 # import (similar to the segments jump) ...
Nrow1 = 0; Ncol1 = 0
for iseg in xrange( Nseg ):
Nrow1 += dims[iseg][1]
Ncol1 += dims[iseg][0]
FaultGeom = np.array( locs ).reshape( (Nrow1, Ncol1, 3) )
srfFaultSurface['FaultGeom'] = FaultGeom
srfFaultSurface['rakes'] = rakes
return srfFaultSurface | 6e1197b76b88f92e0d61bee87d57910660192346 | 9,710 |
def _to_response(
uploaded_protocol: UploadedProtocol,
) -> route_models.ProtocolResponseAttributes:
"""Create ProtocolResponse from an UploadedProtocol"""
meta = uploaded_protocol.data
analysis_result = uploaded_protocol.data.analysis_result
return route_models.ProtocolResponseAttributes(
id=meta.identifier,
protocolFile=route_models.FileAttributes(
basename=meta.contents.protocol_file.path.name
),
supportFiles=[
route_models.FileAttributes(basename=s.path.name)
for s in meta.contents.support_files
],
lastModifiedAt=meta.last_modified_at,
createdAt=meta.created_at,
metadata=analysis_result.meta,
requiredEquipment=analysis_result.required_equipment,
errors=analysis_result.errors,
) | 1594a90ce1351ad33961819201409167c0f462a7 | 9,711 |
def has_valid_chars(token: str) -> bool:
"""
decides whether this token consists of a reasonable character mix.
:param token: the token to inspect
:return: True, iff the character mix is considered "reasonable"
"""
hits = 0 # everything that is not alphanum or '-' or '.'
limit = int(len(token) / 10)
for c in token:
if not (c.isalnum() or c == '.' or c == '-' or c == ' '):
hits += 1
if hits > limit:
return False
return True | b9b65f1bfd3529275847f1d6e227d57dfebea8a8 | 9,712 |
import logging
def sqlalchemy_engine(args, url):
"""engine constructor"""
environ['PATH'] = args.ora_path # we have to point to oracle client directory
url = f'oracle://{args.user}:{pswd(args.host, args.user)}@{args.host}/{args.sid}'
logging.info(url)
return create_engine(url) | 06a670eccf96997c23a9eb5125925db5be33e978 | 9,713 |
def shortcut_layer(name: str, shortcut, inputs):
"""
Creates the typical residual block architecture. Residual blocks are useful
for training very deep convolutional neural networks because they act as
gradient 'highways' that enable the gradient to flow back into the first few
initial convolutional layers. Without residual blocks, the gradient tends to
disappear at those first inital layers and the model has a difficult time
converging.
Parameters
----------
name : string
The name of the tensor to be used in TensorBoard.
shortcut: tensor
The output of a previous convolutional layer
inputs : tensor
The output of the immediately previous convolutional layer.
Returns
-------
inputs : tensor
The resulting tensor.
new_shortcut : tensor
A new shortcut for a future residual block to connect to.
"""
with tf.variable_scope(name):
inputs += shortcut
new_shortcut = inputs
return inputs, new_shortcut | b680df8c6415d256ee98d292d491fc30a6a4bb4a | 9,715 |
def event_message(iden, event):
"""Return an event message."""
return {"id": iden, "type": "event", "event": event} | bfc3fca17a9ad8d3767853c82c5453328d4c07e3 | 9,716 |
def match(command):
"""Match function copied from cd_mkdir.py"""
return (
command.script.startswith('cd ') and any((
'no such file or directory' in command.output.lower(),
'cd: can\'t cd to' in command.output.lower(),
'does not exist' in command.output.lower()
))) | e49540995f26b40b4c52879814fe905f35b1c8fd | 9,717 |
def db_remove_game(game: str, channel: str) -> bool:
"""Removes a game from the database, for a specific channel
"""
if db_check_game_exists(game, channel):
cursor.execute(
"DELETE FROM deathcount "
"WHERE channel=(?) AND game=(?)",
(channel.lower(), game.lower())
)
connection.commit()
return True
else:
return False | 536a48201274767f834443d7b1c279c2c5c15e14 | 9,718 |
def get_unique_id():
"""Return an ID that will be unique over the current segmentation
:return: unique_id
:rtype: int
"""
global UNIQUE_ID
UNIQUE_ID = UNIQUE_ID + 1
return UNIQUE_ID | e55be0d1619f3435d0b6b76a3da2661c1349213b | 9,719 |
def logout_route():
"""logout route"""
logout_user()
return redirect(url_for('app.index_route')) | 097644c147003be394a886c4b796b57e8cc775c7 | 9,720 |
def get_repo_name(
name: str, in_mode: str, include_host_name: bool = False
) -> str:
"""
Return the full/short name of a Git repo based on the other name.
:param in_mode: the values `full_name` or `short_name` determine how to interpret
`name`
"""
repo_map = get_complete_repo_map(in_mode, include_host_name)
dbg.dassert_in(
name, repo_map, "Invalid name='%s' for in_mode='%s'", name, in_mode
)
ret = repo_map[name]
return ret | 3eebb75487f5eb5c8c8eb7a5a7a46c92dcf4c304 | 9,722 |
def binary_search_hi(a,d,lo,hi):
"""
Created for leetcode prob 34
"""
if d!=a[lo]:
raise Exception("d should be a[lo]")
while hi>lo:
mid=(lo+hi)//2+1
if a[mid]==d:
lo=mid
else:
hi=mid-1
if a[hi]==d:
return hi
else:
return lo | 4ef9ad63fb83bbb1cb1a9f7d4a3ea4a08ad40d8d | 9,723 |
def check_subscription(func):
"""Checks if the user signed up for a paid subscription """
@wraps(func)
def wrapper(*args, **kwargs):
if current_user.is_authenticated():
subscription = current_user.subscription
if not subscription.active and subscription.plan.name != 'Free':
return redirect(url_for('account.subscribe', plan_id=subscription.plan_id))
return func(*args, **kwargs)
return wrapper | 853b16cc4a05742f2bd17fd159ac570f92fdb16c | 9,724 |
def spikesbetter(P):
"""
same as the custom cython function _dice6, a python implementation for easy use on other computers
does spin selection procedure based on given array of probabilities
--------------------------------------------------------------------
Inputs:
P: probability of silence array. shape (loop, xmax, N)
-------------------------------------------------------------------
Output:
array of spin values in {0,1} with shape (loop, xmax, N)
"""
spikes=np.zeros(P.shape)
for i in range(P.shape[0]):
for j in range(P.shape[1]):
for k in range(P.shape[2]):
if np.random.rand() > P[i,j,k]:
spikes[i,j,k] += 1
return spikes | 75fcff7e53ccbd392361faf46f2c4f171f85e724 | 9,725 |
def t3x1_y(y):
"""
Translation in y.
"""
return t3x1(0.0, y, 0.0) | 26d99e8a5b5ccd676d5488a8e8aafcd76d5272a5 | 9,726 |
def perm_data_time(x, indices):
"""
Permute data matrix, i.e. exchange node ids,
so that binary unions form the clustering tree.
"""
if indices is None:
return x
N, M, Q = x.shape
Mnew = len(indices)
assert Mnew >= M
xnew = np.empty((N, Mnew, Q))
for i,j in enumerate(indices):
# Existing vertex, i.e. real data.
if j < M:
xnew[:, i, :] = x[:, j, :]
# Fake vertex because of singeltons.
# They will stay 0 so that max pooling chooses the singelton.
# Or -infty ?
else:
xnew[:, i, :] = np.zeros((N, Q))
return xnew | ed1201d34cb35debe7653601d0048f099e32db16 | 9,727 |
def check_chromium() -> bool:
"""Check if chromium is placed at correct path."""
return chromium_executable().exists() | 21b60e3070ba707ae46e53f68f31ef8e719aed76 | 9,728 |
def plot_edges(lattice : Lattice,
labels : np.ndarray = 0,
color_scheme : np.ndarray = ['k','r','b'],
subset : np.ndarray = slice(None, None, None),
directions : np.ndarray = None,
ax = None,
arrow_head_length = None,
**kwargs):
"""
Plot the edges of a lattice with optional arrows.
This uses matplotlib.collections.LineColection under the hood and you may
pass in any keyword to be passed along to it.
Note that arrays for alpha or linestyle don't currently work since they would have to be tiled correctly, and are not currently.
If directions is not none, arrows are plotted from the first vertex to the second unless direction[i] == -1
:param lattice: The lattice to use.
:type lattice: Lattice
:param labels: int or array of ints specifying the colors, defaults to 0. May be the same size as the vertices or of the subset.
:type labels: np.ndarray, optional
:param color_scheme: List or array of colors, defaults to ['black', ]
:type color_scheme: np.ndarray, optional
:param subset: An array of indices, boolean array or slice that selects which elements to plot, defaults to plotting all.
:type subset: np.ndarray, optional
:param directions: An array of arrow directions +/-1, defaults to None.
:type directions: np.ndarray, optional
:param ax: The axis to plot on, defaults to plt.gca()
:type subset: axis, optional
"""
labels, colors, color_scheme, subset, ax, transform = _process_plot_args(lattice, ax, labels, color_scheme, subset, lattice.n_edges)
edge_colors = np.tile(colors, 9)
edge_vertices = lattice.vertices.positions[lattice.edges.indices[subset]]
edge_vertices[:, 0, :] -= lattice.edges.crossing[subset]
unit_cell_vectors = generate_point_array(np.array([0,0]), padding = 1)[:, None, None, :] #shape (9, 2) -> (9, 1, 1, 2)
replicated_edges = edge_vertices[None,...] + unit_cell_vectors #shape (n_edges, 2, 2) -> (9, n_edges, 2, 2)
replicated_edges = replicated_edges.reshape((-1,2,2)) #shape (9, n_edges, 2, 2) -> (9*n_edges, 2, 2)
vis = _lines_cross_unit_cell(replicated_edges) | _line_fully_in_unit_cell(replicated_edges)
# print(edge_colors.shape, replicated_edges.shape, vis.shape)
lc = LineCollection(replicated_edges[vis, ...], colors = edge_colors[vis], transform = transform, path_effects=[path_effects.Stroke(capstyle="round")], **kwargs)
ax.add_collection(lc)
if directions is not None:
directions = _broadcast_args(directions, subset, lattice.n_edges, dtype = int)
directions = np.tile(directions, 9)
_plot_edge_arrows(ax, edge_colors[vis],replicated_edges[vis, ...],directions[vis], lc, lattice.unit_cell, arrow_head_length = arrow_head_length)
return ax | 8f4b6ef68b6eae62637a772621c68ecb0acc1a55 | 9,730 |
def menu_items_api(restaurant_id):
"""Route handler for api endpoint retreiving menu items for a restaurant.
Args:
restaurant_id: An int representing the id of the restaurant whose menu
items are to be retrieved
Returns:
response: A json object containing all menu items for a given
restaurant
"""
menu_items = (
session.query(MenuItem).filter_by(restaurant_id=restaurant_id).all()
)
response = jsonify(
menu_items=[menu_item.serialize for menu_item in menu_items]
)
return response | 472adaa25cd588246aef9f2b9a621723df399503 | 9,731 |
import logging
import tqdm
def run(indata):
"""indata: event detection DataArray or DataSet"""
if isinstance(indata, xr.DataArray):
events = indata
else:
events = indata["Event_ID"]
logging.info("events array defined.")
# turn events into time x space by stacking lat & lon:
events_stacked = events.stack(z=("lat", "lon"))
logging.info("Stacked events.")
# events_stacked is [time, z]
# make sure to only have integers for the event IDs:
zint = events_stacked.values.astype(int)
logging.info(f"Convert events to integers. Result is shape {zint.shape}.") # should still be [time, z]
mx = np.max(zint)
logging.info(f"Max number of events is {mx}; output dimesion size (add one for zeros).")
ids, ndx, dur = theloop(zint)
logging.info("Loop done.")
logging.info(f"kind of ids: {type(ids)}\n ndx: {type(ndx)}, shape: {ndx.shape}\n dur: {type(dur)}")
# use ndx to go back to 'time' and construct array of datetimes
dates = np.full(ndx.shape, np.datetime64('NaT'), dtype='datetime64[D]') # fill value should be numpy's "not a time" value. (what if time is in cftime, though?); dtype needs to be set with correct unit (D = days)
for loc in tqdm(np.arange(ndx.shape[0]), desc="Dates Loop"):
last_event = ids[loc, :].max()
dates[loc, 0:last_event] = indata.time[ndx[loc, 0:last_event]] # loc: int; dates: datetime; ndx: int
logging.info("Finished the initial dates reconstruction.")
# dates[:, 1:] = np.ma.masked_where(ndx[:, 1:] == 0, dates[:, 1:], copy=False) # mask where eventID == 0
# Convert resulting numpy arrays to Xarray DataArrays
ids_da = xr.DataArray(ids, coords={"z":events_stacked['z'], 'events':np.arange(1,mx+2)},
dims=("z", "events"))
ndx_da = xr.DataArray(ndx, coords={"z":events_stacked['z'], 'events':np.arange(1,mx+2)},
dims=("z", "events"))
cnt_da = xr.DataArray(dur, coords={"z":events_stacked['z'], 'events':np.arange(1,mx+2)},
dims=("z", "events"))
dates_da = xr.DataArray(dates, coords={"z":events_stacked['z'], 'events':np.arange(1,mx+2)},
dims=("z", "events"))
ids_da.name = "Event_ID"
ndx_da.name = "initial_index"
cnt_da.name = "duration"
dates_da.name = 'initial_date'
logging.info("DataArray are made")
ids_da = ids_da.unstack()
ndx_da = ndx_da.unstack()
cnt_da = cnt_da.unstack()
dates_da = dates_da.unstack()
logging.info("Unstacked.")
return xr.merge([ids_da, ndx_da, cnt_da, dates_da]) | 817bb329638adb39950fac3b3f10d81938515f1a | 9,732 |
def format_trace_id(trace_id: int) -> str:
"""Format the trace id according to b3 specification."""
return format(trace_id, "032x") | 2c0541b4a25d85ae990e68e00dd75012aa1ced60 | 9,733 |
def get_count_name(df):
"""Indicate if a person has a 'Name'
Parameters
----------
df : panda dataframe
Returns
-------
Categorical unique code
"""
# Feature that tells whether a passenger had a cabin on the Titanic
df['Words_Count'] = df['Name'].apply(lambda x: len(x.split())).astype(int)
return df | c51dfbcc025908243f20d10f4faa498fa068d4f7 | 9,734 |
from django.contrib.auth.models import User
def register_action(request):
"""
从这个django.contrib/auth.models 库里倒入里User方法。(其实User是orm方式操作用户表的实例)
然后我们直接用User.objects.create_user方法生成一个用户,参数为用户名和密码。然后保存这个生成的用户 就是注册成功了
但是如果用户表中已存在这个用户名,那么,这个生成语句就会报错。所以我们用try来捕获这个异常,如果发送错误那就是“用户已经存在”,如实给用户返回这句话。如果没问题,那么就返回 注册成功
:param request:
:return:
"""
u_name = request.GET['username']
p_word = request.GET['password']
# 开始 联通 django 用户库,查看用户名密码是否正确
try:
user = User.objects.create_user(username=u_name, password=p_word)
user.save()
return HttpResponse('注册成功')
except:
return HttpResponse('注册失败~用户名好像已经存在了~') | 4e0d4cdd6ba3547846738b6483ba242adacb71e0 | 9,735 |
from typing import Callable
from typing import Any
import asyncio
import functools
async def run_blocking_io(func: Callable, *args, **kwargs) -> Any:
"""|coro|
Run some blocking function in an event loop.
If there is a running loop, ``'func'`` is executed in it.
Otherwise, a new loop is being created and closed at the end of the execution.
Example:
.. code-block:: python3
def make_image():
... # long code of creating an image
# somewhere in an async function:
await run_blocking_io(make_image)
"""
loop = acquire_loop(running=True)
asyncio.set_event_loop(loop)
return await loop.run_in_executor(None, functools.partial(func, *args, **kwargs)) | e277d1fca909f26d0226c3085fe0e0fbf03bf257 | 9,736 |
def parse_config(cfg, section):
""" parse config data structure, return data of required section """
def is_valid_section(s):
valid_sections = ["info", "project", "variables", "refdata"]
return s in valid_sections
cfg_data = None
if is_valid_section(section):
try:
cfg_data = cfg[section]
except KeyError:
log.critical(cfg.keys())
log.critical("Section <%s> not found in config" % section)
exit(1)
else:
log.critical("Section <%s> not a valid name" % section)
exit(1)
return cfg_data | 72d36dfaf93e17da166cac0c9b786da29107db3e | 9,737 |
from hmmlearn import hmm
import collections
def _predict_states(freqs):
"""Use frequencies to predict states across a chromosome.
Normalize so heterozygote blocks are assigned state 0 and homozygous
are assigned state 1.
"""
freqs = np.column_stack([np.array(freqs)])
model = hmm.GaussianHMM(2, covariance_type="full")
model.fit(freqs)
states = model.predict(freqs)
freqs_by_state = collections.defaultdict(list)
for i, state in enumerate(states):
freqs_by_state[state].append(freqs[i])
if np.median(freqs_by_state[0]) > np.median(freqs_by_state[1]):
states = [0 if s == 1 else 1 for s in states]
return states | be1b1b540b644dc9f412a3d648076a36369e9aae | 9,738 |
def tp(*args) -> np.ndarray:
"""Tensor product.
Recursively calls `np.tensordot(a, b, 0)` for argument list
`args = [a0, a1, a2, ...]`, yielding, e.g.,
tp(a0, a1, a2) = tp(tp(a0, a1), a2)
Parameters
----------
args : sequence
Sequence of tensors
Returns
-------
np.ndarray
Tensor product
Examples
--------
>>> a = np.random.rand(2, 3, 4)
... b = np.random.rand(7, 8, 9)
... c = tp(a, b) # c_ijkmno = a_ijk b_mno
... c.shape == (2, 3, 4, 7, 8, 9)
"""
temp = args[0]
for i in range(1, len(args)):
temp = np.tensordot(temp, args[i], 0)
return temp | c02b74d79d484e7335387e568fda723fcf3851b8 | 9,739 |
def aws_aws_page():
"""main endpoint"""
form = GenericFormTemplate()
return render_template(
'aws_page.html',
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options(),
) | 2605838a948e3b58fe7c669a388a859144c329c6 | 9,740 |
def precompute(instr):
"""
Args:
instr:
Returns:
"""
qecc = instr.qecc
if qecc.name == '4.4.4.4 Surface Code' and qecc.circuit_compiler.name == 'Check2Circuits':
precomputed_data = code_surface4444(instr)
elif qecc.name == 'Medial 4.4.4.4 Surface Code' and qecc.circuit_compiler.name == 'Check2Circuits':
precomputed_data = code_surface4444medial(instr)
else:
raise Exception('Can only handle the non-medial surface code!')
return precomputed_data | 789e1f1f5e37f118f791a5c72c1c0dd7df2cf745 | 9,742 |
import re
def remove_url(txt):
"""Replace URLs found in a text string with nothing
(i.e. it will remove the URL from the string).
Parameters
----------
txt : string
A text string that you want to parse and remove urls.
Returns
-------
The same txt string with url's removed.
"""
return " ".join(re.sub("([^0-9A-Za-z \t])|(\w+:\/\/\S+)", "", txt).split()) | 8d1b8b89cb65ca7761c093dc388d1f19729137e7 | 9,743 |
def use_database(fn):
"""
Ensure that the correct database context is used for the wrapped function.
"""
@wraps(fn)
def inner(self, *args, **kwargs):
with self.database.bind_ctx(self.models):
return fn(self, *args, **kwargs)
return inner | 71a42974ce2413c0b24863be9397252bcd06f22e | 9,744 |
import imaplib, email, email.header
def getImapMailboxEmail(server, user, password, index, path="INBOX", searchSpec=None):
"""
imap_headers(server, user, password, index, path="INBOX", searchSpec=None)
Load specified email header from an imap server. index starts from 0.
Example
WITH RECURSIVE
cnt(x) AS (VALUES(1) UNION ALL SELECT x+1 FROM cnt WHERE x<imap_count("127.0.0.1","jj","pass","test"))
select x-1 as num, imap_email("127.0.0.1","jj","pass",x-1,"test") as message FROM cnt;
See also
https://gist.github.com/robulouski/7441883
https://oracle-base.com/articles/10g/utl_mail-send-email-from-the-oracle-database
https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection.create_aggregate
"""
try:
with imaplib.IMAP4_SSL(server) as M:
M.login(user,password)
typ, data=M.select(path)
if(data[0]==b'0'):
print ("*SELECT*FAILED",path,typ,data)
return "ERR NO MAILBOX:"+path
if searchSpec== None:
typ, data = M.search(None, 'ALL')
else:
typ, data = M.search(None, searchSpec)
if len(data[0].split()) >0:
id2fetch= (data[0].split())[index]
typ, data = M.fetch(id2fetch, '(RFC822)')
msg_return=data[0][1]
else:
msg_return=None
M.logout()
return msg_return
except Exception as e:
raise SqliteFunctionException( e ) | c04569870f9528539e958cc114c11ad80c36800c | 9,745 |
def nmatches_mem(txt, pat, t, p, mem):
"""Find number of matches with recursion + memoization using a dictionary
(this solution will also crash when recursion limit is reached)
nmatches_mem(text, pattern, len(text), len(pattern), {})
"""
if (t,p) in mem:
return mem[t, p]
if p==0:
return 1
if t==0:
return 0
matches = 0
for i in range(t, 0, -1):
if txt[t-i] == pat[p-1]:
matches += nmatches_mem(txt, pat, t-i, p-1, mem)
mem[t, p] = matches
return matches | 5b6a10328ca876481fb9b8425bde2442f603d7e1 | 9,747 |
def data_get():
"""
Get shared data from this server's local store.
"""
consistency = request.json["consistency"]
name = request.json["name"]
field = request.json["field"]
value = ""
error = "ok"
if consistency == "strict":
store = globalvars.get_data_store(globalvars.STRICT_CENTRALIZED)
with store_lock:
try:
value = store.get(name, field)
except AttributeError as ex:
error = str(ex)
res = {
"value" : value,
"error" : error,
}
return jsonify(res) | c8a56b4171109800f4818d59aaf2b3bd9eed1b78 | 9,749 |
def pad_sequences(sequences, maxlen=None, value=0):
"""
pad sequences (num_samples, num_timesteps) to same length
"""
if maxlen is None:
maxlen = max(len(x) for x in sequences)
outputs = []
for x in sequences:
x = x[:maxlen]
pad_range = (0, maxlen - len(x))
x = np.pad(array=x, pad_width=pad_range, mode='constant', constant_values=value)
outputs.append(x)
return np.array(outputs) | 29204f0f47150f6fac0761876b8045f680032da5 | 9,750 |
def empty_surface(fill_color, size=None, flags=0):
"""Returns an empty surface filled with fill_color.
:param fill_color: color to fill the surface with
:type fill_color: pygame.Color
:param size: the size of the new surface, if None its created
to be the same size as the screen
:type size: int-2-tuple
"""
if size is None:
sr = pygame.display.get_surface().get_rect()
surf = pygame.Surface((sr.w, sr.h), flags=flags)
else:
surf = pygame.Surface(size, flags=flags)
surf.fill(fill_color)
return surf | b48d21649f279736f531ca0cb6e7dabf083c813b | 9,754 |
def getNetAddress(ip, netmask):
"""
Get the netaddress from an host ip and the netmask.
:param ip: Hosts IP address
:type ip: str
:param netmask: Netmask of the network
:type netmask:
:returns: Address of the network calculated using hostIP and netmask
:rtype: str
"""
return str(IPNetwork(ip + "/" + str(getPrefix(netmask))).network) | 2bc70d21edcc08d82b146de83389ce94d2fa64ee | 9,755 |
def balance_intent_data(df):
"""Balance the data for intent detection task
Args:
df (pandas.DataFrame): data to be balance, should contain "Core Relations" column
Returns:
pandas.DataFrame: balanced data
"""
relation_counter = build_counter(df, "Core Relations")
# augment each low resource label to average count
avg_count = int(
sum(relation_counter.values()) / len(relation_counter.values())
)
sample_df = df.sample(0)
for k, v in relation_counter.items():
# only augment the low resource label
if v >= avg_count:
continue
# to be sample amount
sample_count = avg_count - v
idx_of_label_k = df["Core Relations"].apply(lambda label: k in label)
# if sample amount if larger, then sample all the value until it exceed the sample count
while sample_count > relation_counter[k]:
temp_df = df[idx_of_label_k].sample(relation_counter[k])
sample_df = pd.concat([sample_df, temp_df])
sample_count -= relation_counter[k]
sample_df = pd.concat(
[sample_df, df[idx_of_label_k].sample(sample_count)]
)
balance_df = pd.concat([df.copy(), sample_df])
return balance_df | 3f759ae229de5e30fe4f13f42e4b8db18f0c913d | 9,758 |
def npareamajority(values, areaclass):
"""
numpy area majority procedure
:param values:
:param areaclass:
:return:
"""
uni,ind = np.unique(areaclass,return_inverse=True)
return np.array([np.argmax(np.bincount(values[areaclass == group])) for group in uni])[ind] | 9e43244ef81e63d9870d281660b738fa3b73a11f | 9,759 |
def calcReward(eventPos, carPos, closeReward, cancelPenalty, openedPenalty):
"""
this function calculates the reward that will be achieved assuming event is picked up
:param eventPos: position of events
:param carPos: position of cars
:param closeReward: reward if event is closed
:param cancelPenalty: penalty if event is canceled (for now assuming events are not canceled)
:param openedPenalty: penalty for time events are waiting (for now assuming events dont wait since they are picked up as spesific time)
:return: rewardCarsToEvents - R_{cars,events},
rewardEventsToEvents - R_{events,events}
"""
nCars = carPos.shape[0]
nEvents = eventPos.shape[0]
distEventsToEvents = cdist(eventPos, eventPos, metric='cityblock')
distCarsToEvents = cdist(carPos, eventPos, metric='cityblock')
rewardCarsToEvents = -distCarsToEvents + np.ones(shape=(nCars, nEvents))*closeReward
rewardEventsToEvents = -distEventsToEvents + np.ones(shape=(nEvents, nEvents))*closeReward
timeEventsToEvents = distEventsToEvents
timeCarsToEvents = distCarsToEvents
return rewardCarsToEvents, rewardEventsToEvents, timeCarsToEvents, timeEventsToEvents | 52226496b5338a0ebd3433ae9ee779c036c64809 | 9,760 |
from typing import Any
def b64encode(s: Any, altchars: Any = None) -> bytes:
"""Encode bytes using the standard Base64 alphabet.
Argument ``s`` is a :term:`bytes-like object` to encode.
Optional ``altchars`` must be a byte string of length 2 which specifies
an alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The result is returned as a :class:`bytes` object.
"""
if altchars is not None:
altchars = _get_bytes(altchars)
assert len(altchars) == 2, repr(altchars)
return builtin_encode(s, altchars) | deef546ada7679a538afa5432a13846ce765b911 | 9,761 |
def select_programs(args, filter_paused=True, force=False):
"""
Return a list of selected programs from command line arguments
"""
if not (args.all ^ bool(args.names)):
if args.all:
log.error("You may not specify a program name when you use the -a/--all option (See -h/--help for more details)")
else:
log.error("You must select at least one program from the command line (See -h/--help for more details)")
raise SystemExit(1)
if args.all:
programs = list(Program.find_for_user(force=force))
if filter_paused:
programs = [prog for prog in programs if not prog.is_paused]
else:
programs = [Program(name, force=force) for name in args.names]
erorrs = 0
for program in programs:
if not program.exists():
erorrs += 1
log.error("Program '%s' does not exist" % program.name)
if erorrs:
raise SystemExit(1)
return list(programs) | 8b597b043ac7e245bf16aec17a779c5639d451d9 | 9,762 |
import math
def three_comp_two_objective_functions(obj_vars, hz: int,
ttes: SimpleTTEMeasures,
recovery_measures: SimpleRecMeasures):
"""
Two objective functions for recovery and expenditure error
that get all required params as arguments
:param obj_vars: values that define the three comp agent [anf, ans, m_ae, m_anf, m_ans, theta, gamma, phi]
:param hz: estimations per second for agent
:param ttes: time to exhaustion tests to use
:param recovery_measures: recovery trials to compare to
:return: tte_nrmse and rec_nrmse values to minimise (the smaller the better)
"""
# differences in exhaustion times determine fitness
tte_se = [] # TTE standard errors
ttes_exp = [] # TTEs that are expected (original)
rec_se = [] # Recovery standard errors
recs_exp = [] # Recovery ratios expected (original)
three_comp_agent = ThreeCompHydAgent(hz=hz,
a_anf=obj_vars[0],
a_ans=obj_vars[1],
m_ae=obj_vars[2],
m_ans=obj_vars[3],
m_anf=obj_vars[4],
the=obj_vars[5],
gam=obj_vars[6],
phi=obj_vars[7])
# compare tte times
for tte_t, tte_p in ttes.iterate_pairs():
# use the simulator
try:
tte = ThreeCompHydSimulator.do_a_tte(agent=three_comp_agent,
p_exp=tte_p)
except UserWarning:
tte = 5000
# square time difference
tte_se.append(pow(tte - tte_t, 2))
ttes_exp.append(tte_t)
# get NRMSE (Normalised Root Mean Squared Error)
tte_nrmse = math.sqrt(sum(tte_se) / len(tte_se)) / np.mean(ttes_exp)
# compare all available recovery ratio measures
for p_exp, p_rec, t_rec, expected in recovery_measures.iterate_measures():
# use the simulator
try:
achieved = ThreeCompHydSimulator.get_recovery_ratio_wb1_wb2(three_comp_agent,
p_exp=p_exp,
p_rec=p_rec,
t_rec=t_rec)
except UserWarning:
achieved = 200
# add the squared difference
rec_se.append(pow(expected - achieved, 2))
recs_exp.append(expected)
# get NRMSE
rec_nrmse = math.sqrt(sum(rec_se) / len(rec_se)) / np.mean(recs_exp)
# determine return value
return tte_nrmse, rec_nrmse | f02403d00142556b17371f3adedd32994dbf9fad | 9,763 |
def scale(data, new_min, new_max):
"""Scales a normalised data series
:param data: The norrmalised data series to be scaled
:type data: List of numeric values
:param new_min: The minimum value of the scaled data series
:type new_min: numeric
:param new_max: The new maximum of the scaled data series
:type new_max: numeric
:return: A scaled data series
:rtype: list
"""
return [(x*(new_max-new_min))+new_min for x in data] | 3e7720ae90cfdbef1253dbfa39b3e4a10fc118bb | 9,764 |
def default_config() -> ClientConfig:
"""
:return: Default configuration for the experiment
"""
simulation_config = SimulationConfig(render=False, sleep=0.8, video=True, log_frequency=1,
video_fps=5, video_dir=default_output_dir() + "/videos", num_episodes=1000,
gifs=True, gif_dir=default_output_dir() + "/gifs", video_frequency = 1)
env_name = "idsgame-v3"
client_config = ClientConfig(env_name=env_name, attacker_type=AgentType.RANDOM.value,
defender_type=AgentType.DEFEND_MINIMAL_VALUE.value, mode=RunnerMode.SIMULATE.value,
simulation_config=simulation_config, output_dir=default_output_dir(),
title="RandomAttacker vs DefendMinimalDefender")
return client_config | d7e53ccc4ad8818453cd673ddaa21fe0614dfc5a | 9,765 |
def get_same_padding(kernel_size: int, stride: int, dilation: int) -> int:
"""Calculates the padding size to obtain same padding.
Same padding means that the output will have the
shape input_shape / stride. That means, for
stride = 1 the output shape is the same as the input,
and stride = 2 gives an output that is half of the
input shape.
Args:
kernel_size : convolution kernel size. Only tested to be correct with odd values.
stride : convolution stride
dilation : convolution dilation
Raises:
ValueError: Only stride or dilation may be greater than 1
Returns:
padding value to obtain same padding.
"""
if stride > 1 and dilation > 1:
raise ValueError("Only stride OR dilation may be greater than 1")
if dilation > 1:
return (dilation * (kernel_size - 1) + 1) // 2
return kernel_size // 2 | 12548482e855dcfc627c5b0a6ccf69ad4a74b39b | 9,766 |
def move_child_position(context, request):
""" Move the child from one position to another.
:param context: "Container" node in which the child changes its position.
:type context: :class:kotti.resources.Node or descendant
:param request: Current request (of method POST). Must contain either
"from" and "to" params or a json_body that contain(s) the
0-based old (i.e. the current index of the child to be
moved) and new position (its new index) values.
:type request:
:result: JSON serializable object with a single attribute ("result") that is
either "success" or "error".
:rtype: dict
"""
data = request.POST or request.json_body
if ("from" in data) and ("to" in data):
max_pos = len(context.children) - 1
try:
old_position = int(data["from"])
new_position = int(data["to"])
if not ((0 <= old_position <= max_pos) and (0 <= new_position <= max_pos)):
raise ValueError
except ValueError:
return {"result": "error"}
# sqlalchemy.ext.orderinglist takes care of the "right" sequence
# numbers (immediately consecutive, starting with 0) for us.
context.children.insert(new_position, context.children.pop(old_position))
result = "success"
else:
result = "error"
return {"result": result} | 082aef1169de6dab4881593ef8abf85e5076f190 | 9,767 |
import requests
def get_current_version(package: str) -> str:
"""
Query PyPi index to find latest version of package
:param package: str - name of pacahe
:return: str - version if available
"""
url = f'{PYPI_BASE_URL}/pypi/{package}/json'
headers = {
'Content-Type': 'application/json'
}
response = requests.get(url=url, headers=headers)
if response.status_code == 200:
data = response.json()
if 'info' in data and 'version' in data['info']:
# only return version if everything went OK, otherwise, too bad!
return data['info']['version']
return None | 6d65dcb6d381c8cf6cba7c06ccebf538c16b85c7 | 9,768 |
def mconcat(xs : [a]) -> a:
"""
mconcat :: (Monoid m) => [m] -> m
Fold a list using the monoid.
"""
return Monoid[xs[0]].mconcat(xs) | cd87cc91bb4d2c6d1cf653fb45967ecb59d6749d | 9,770 |
def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask):
"""If necessary, zero out inputs to a conv for padding positions.
Args:
inputs: a Tensor with shape [batch, length, ...]
kernel_size: an integer or pair of integers
nonpadding_mask: a Tensor with shape [batch, length]
Returns:
Tensor of the same shape as inputs.
"""
if (kernel_size != 1 and kernel_size != (1, 1) and
nonpadding_mask is not None):
while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims:
nonpadding_mask = tf.expand_dims(nonpadding_mask, -1)
return inputs * nonpadding_mask
return inputs | e53c1e181cac554047b9acb8d70d358baa9f8a4c | 9,771 |
import json
def display_json(value):
"""
Display input JSON as a code
"""
if value is None:
return display_for_value(value)
if isinstance(value, str):
value = json.loads(value)
return display_code(json.dumps(value, indent=2, ensure_ascii=False, cls=DjangoJSONEncoder)) | 727dc50d9844a5b0b7f01231c348652056d334cc | 9,772 |
def get_rgba_from_color(rgba):
"""Return typle of R, G, B, A components from given color.
Arguments:
rgba - color
"""
r = (rgba & 0xFF000000) >> 24
g = (rgba & 0x00FF0000) >> 16
b = (rgba & 0x0000FF00) >> 8
a = (rgba & 0x000000FF)
return r, g, b, a | 56d3e0dce01cfc4348ae115de81abb55ec85eb56 | 9,773 |
def beauty_factor(G):
"""Return the "beauty factor" of an arbitrary graph, the minimum distance
between a vertex and a non-incident edge."""
V, E = G[0], G[1]
dists = []
for (i, u) in enumerate(V):
for (j, k) in E:
if i == j or i == k:
continue
v, w = V[j], V[k]
a, b = u-v, w-v
proj = (a.real*b.real+a.imag*b.imag) / abs(b) # scalar projection
if 0 <= proj <= abs(b):
dists.append(abs(a - b * proj / abs(b)))
else:
dists.extend((abs(a), abs(u-w)))
return min(dists) | 9267a534d8453a17561b2c8e1f67e40942069ffe | 9,774 |
def line_plane_cost(line, plane):
"""
A cost function for a line and a plane
"""
P = normalised((line|plane)*I5)
L = normalised(meet(P, plane))
return line_cost_function(L, line) | 34ab1df71f2018544a52020d232514127e16aa3e | 9,775 |
import numpy
def force_full_index(dataframe: pd.DataFrame, resampling_step: int = None,
resampling_unit: str = "min", timestamp_start: int = None,
timestamp_end: int = None) -> pd.DataFrame:
""" forces a full index. Missing index will be replaced by Nan.
Note: resampling should be done before to benefit from sampling strategies.
Args:
dataframe(dataframe): data frame containing NaN values
resampling_step (int, 8): This is the desired time step of final dataframe.
resampling_unit (str, 't'): unit of desired time step
timestamp_start (string, none): index at which the dataframe starts
timestamp_end (string, none): index at which the dataframe ends
Returns
dataframe(pandas.Dataframe): dataframe with full index
"""
if timestamp_start is None:
print("start index was not provided")
timestamp_start = dataframe.first_valid_index()
if timestamp_end is None:
print("end index is not provided")
timestamp_end = dataframe.last_valid_index()
freq = str(resampling_step) + resampling_unit
new_index = pd.date_range(start=timestamp_start, end=timestamp_end, freq=freq)
new_index = new_index.astype(numpy.int64) // 10 ** 9
delta_time_tmp = dataframe.reindex(index=new_index, fill_value=numpy.nan)
return delta_time_tmp | cc08ee348467e5fe335ebf3239ce78880c0f99c4 | 9,777 |
def legislature_to_number(leg):
"""
Takes a full session and splits it down to the values for
FormatDocument.asp.
session = '49th-1st-regular'
legislature_to_number(session) --> '49Leg/1s'
"""
l = leg.lower().split('-')
return '%sLeg/%s%s' % (l[0][0:2], l[1][0], l[2][0]) | cffeeea2bad17d9dadcfd75d70417824c7fe3396 | 9,778 |
def get_variable_field_type(variable_name, field_name, error_prefix=''):
"""
获取某个变量的某个字段的类型
"""
schema = get_variable_schema(variable_name)
result_type = schema.get(field_name)
if not result_type:
raise RuntimeError(utf8(error_prefix) + '变量(%s)不包含字段(%s)' % (utf8(variable_name), utf8(field_name)))
# 策略中的目前基本是基本类型
if result_type[1]:
raise RuntimeError(utf8(error_prefix) + '暂不支持%s(%s)这种复杂数据类型' % (utf8(field_name), utf8(result_type)))
return result_type[0] | 6038cebd8219350eec5595bd5ca9aa0151f287cf | 9,779 |
def test(input_test_data):
"""
Run test batches on trained network
:return: Test accuracy [0-1]
"""
print('--- Execute testing ---')
one_hot_label = np.zeros(10, dtype=np.uint8)
correct_n = 0
total_n = 0
for batch_id, (mini_batch, label) in enumerate(input_test_data):
for sample_id, sample in enumerate(mini_batch):
# Flatten input, create 748, input vector
flat_sample = (np.array(sample)).reshape((network.input_dim, 1))
# Forward pass one sample to network
one_hot_label[label[sample_id]] = 1 # we require one-hot encoding for our input data
lossr, result = network.forward_pass(flat_sample, one_hot_label)
# check if sample was correctly classified
if (result == one_hot_label).all():
correct_n += 1
total_n += 1
one_hot_label[:] = 0
# print('batch_id at end: ', batch_id)
if total_n != 0:
return (correct_n / total_n) * 100
else:
print('Warning, total_n should not be 0')
return 0 | dcdbaad1c1496f7cc611ca81f0cb086c3dd127fc | 9,780 |
def test(net, example):
"""
Args:
net (FlowNet): Instance of networks.flownet.FlowNet model, only to be used for pre-processing.
example (dict): Un-processed example.
Returns:
good (list, DMatch): List of good SIFT matches.
"""
net.eval()
example = net.preprocess(example)
cs_arr, tg_arr = np.array(example['resized_cs_im']), np.array(example['resized_tg_im'])
cs_mask, tg_mask = example['resized_cs_mask'], example['resized_tg_mask']
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# Find the keypoints and descriptors with SIFT
kp_cs, des_cs = sift.detectAndCompute(cs_arr, mask=cs_mask)
kp_tg, des_tg = sift.detectAndCompute(tg_arr, mask=tg_mask)
example['kp_cs'], example['kp_tg'] = kp_cs, kp_tg
# Return empty list no matches if no matches are found in either scene or target.
if des_cs is None or des_tg is None:
return []
# Make sure that there are at-least 2 features in both scene and target for knn with nn=2.
if len(des_cs) < 2 or len(des_tg) < 2:
return []
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des_tg, des_cs, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
return good | b0a864e0468304c6c0060d3ee621d579f806c58f | 9,781 |
def get_hashers():
"""
从settings.py中动态导入一连串hashers对象
Read list of hashers from app.settings.py
"""
hashers = []
# 导入报名
for hasher_path in current_app.config.get('PASSWORD_HASHERS'):
hasher_cls = import_string(hasher_path)
hasher = hasher_cls()
hashers.append(hashers)
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % hasher_path)
hashers.append(hasher)
return hashers | d44784d077a99ca23190826249fab6bbf8ad57d5 | 9,782 |
def str2range(s):
"""parse a samtools/tabix type region specification 'chr:start-stop' or 'chr:start..stop'"""
chrom = None
start = 1
stop = None
tmp = s.split(':')
chrom = tmp[0]
if len(tmp)>1:
if '-' in tmp[1]:
tmp = tmp[1].split('-')
else:
tmp = tmp[1].split('..')
start = str2int(tmp[0])
if len(tmp)>1:
stop = str2int(tmp[1])
return (chrom, start, stop) | 8a72107495cc0e7587cadc289b80d326b7901c59 | 9,783 |
import json
def turn_read_content(path, labelIdx, dataIdx):
"""
sentences: (dialog_num, turn_num, nbest_num, sentence_len)
scores: (dialog_num, turn_num, nbest_num)
acts: (dialog_num, turn_num, machine_act_len)
labels: (dialog_num, turn_num, [label_dim])
"""
sentences, scores, acts, labels = [], [], [], []
with open(path) as json_file:
data = json.load(json_file)
#print data["data"][dataIdx]
for dialog in data[dataIdx]:
dialog_sentences, dialog_scores, machine_acts, dialog_labels = read_nbest_dialog_content(dialog, labelIdx)
sentences.append(dialog_sentences)
scores.append(dialog_scores)
acts.append(machine_acts)
labels.append(dialog_labels)
return sentences, scores, acts, labels | 09049b9028ab4331de9c71afb35a79d348bfce08 | 9,784 |
def index_page() -> dict:
"""Get data for Index page , interfaces, dp neighbors, arps, and hsrp"""
interfaces = GetThisDataFromDevice.get_interfaces(request.json.get('ip'), request.json.get('port'), request.json.get('username'), request.json.get('password'))
neighbors = GetThisDataFromDevice.get_dp_neighbors(request.json.get('ip'), request.json.get('port'), request.json.get('username'), request.json.get('password'))
arps = GetThisDataFromDevice.get_arps(request.json.get('ip'), request.json.get('port'), request.json.get('username'), request.json.get('password'))
hsrp = InCaseRestDoesntWork.get_hsrp_status(request.json.get('username'), request.json.get('password'), request.json.get('ip'))
cpu_status = GetThisDataFromDevice.get_cpu_usages(request.json.get('ip'), request.json.get('port'), request.json.get('username'), request.json.get('password'))
return {'interfaces': interfaces, 'arps': arps, 'dp': neighbors, 'hsrp': hsrp, 'cpu': cpu_status[0], 'mem': cpu_status[1]} | 5506031e11e5ab2c8b40e1f294e04a0ed56a96ac | 9,785 |
def reverse_int_bits(n: int, n_bits: int = 10) -> int:
"""Reverses the bits of *n*, considering it is padded by *n_bits* first"""
return int(format(n, '0' + str(n_bits) + 'b')[::-1], 2) | 3c76db59296863161b0bb543e057a82383a780a2 | 9,786 |
def get_conn():
"""
获取
:return:
"""
for name in GENERATOR_MAP:
print(name)
if not hasattr(g, name):
setattr(g, name + '_cookies', eval('CookiesRedisClient' + '(name="' + name + '")'))
setattr(g, name + '_account', eval('AccountRedisClient' + '(name="' + name + '")'))
return g | 7bf4d23db3f829203041560077e0813a94930af0 | 9,787 |
def get_rule_satisfaction_matrix(x, y, rules):
""" Returns a matrix that shows which instances satisfy which rules
Each column of the returned matrix corresponds to a rules and each row to an instance.
If an instance satisfies a rule, the corresponding value will be 1, else 0.
:param x: np.ndarray
:param y: np.array
:param rules: list
:param opts: AadOpts
:return: np.ndarray
matrix with x.shape[0] rows and len(rules) rows
"""
satisfaction_matrix = np.zeros((x.shape[0], len(rules)), dtype=np.int32)
for i, rule in enumerate(rules):
idxs = rule.where_satisfied(x, y)
satisfaction_matrix[idxs, i] = 1
return satisfaction_matrix | 1df187006449e2c101f09b88ac2e8fe9851c7698 | 9,788 |
def refactor(df, frequency = '1W'):
"""Refactor/rebin the data to a lower cadence
The data is regrouped using pd.Grouper
"""
low = df.low.groupby(pd.Grouper(freq=frequency)).min()
high = df.high.groupby(pd.Grouper(freq=frequency)).max()
close = df.close.groupby(pd.Grouper(freq=frequency)).last()
open_ = df.open.groupby(pd.Grouper(freq=frequency)).first()
volume = df.volume.groupby(pd.Grouper(freq=frequency)).sum()
return pd.DataFrame(dict(low=low, high=high, open=open_, close=close, volume=volume)) | 217e65236994e9a075ef410488250cbb1051dbb4 | 9,789 |
def pendulum_derivatives(theta, omega, g=9.8, l=1):
"""
\dot{\theta} = \omega
\dot{\omega} = -\frac{g \sin\theta}{l}
:param theta: angel of the pendulum
:param omega: angular velocity of the pendulum
:param g: gravitational acceleration
:param l: length of the pendulum
:return: derivative of angel, derivative of angular velocity
"""
d_theta = omega
d_omega = - np.sin(theta) * g / l
return d_theta, d_omega | 1e83af5ed6028a9cd0ecf5819d3797986493df25 | 9,790 |
def task_edit(request, pk=None):
"""
"""
return edit(request, form_model=TaskForm, model=Task, pk=pk) | 8ff6f1bd007ff4f6931030da41f5efeaa2380d3a | 9,792 |
import torch
def _gradient(P, T, N, A):
"""
Creates the gradient operator, starting from the point set P, the topology tensor T, the normal tensor N and the
triangle area tensor A
Parameters
----------
P : Tensor
the (N,3,) point set tensor
T : LongTensor
the (3,M,) topology tensor
N : Tensor
the (M,3,) triangle normal tensor
A : Tensor
the (M,) triangle area tensor
Returns
-------
list
the gradient operator data
"""
device = P.device
def V(i):
return P[T[i], :]
n = row(P)
m = col(T)
i = LongTensor([], device=device)
j = LongTensor([], device=device)
w = FloatTensor([], device=device)
f = indices(0, m - 1, device=device).squeeze()
for k in range(row(T)):
# opposite edge e_i indexes
s = (k+1) % 3
t = (k+2) % 3
# vector N_f^e_i
wk = cross(V(t) - V(s), N, 1)
# update the index listing
i = torch.cat((i, f), dim=0)
j = torch.cat((j, T[k]), dim=0)
w = torch.cat((w, wk), dim=0)
a = diag(torch.reciprocal(A), rows=m)
e = torch.cat((i.unsqueeze(0), j.unsqueeze(0)), dim=0)
G = []
for k in range(col(P)):
G += [torch.matmul(a, adjacency(e, w[:, k], size=[m, n]))]
return G | dd1118218ca6e8ad3ff3202c2a3c6d603f88a3a9 | 9,794 |
from typing import Tuple
def breast_tissue_diagnostic_black_pen() -> Tuple[
openslide.OpenSlide, str
]: # pragma: no cover
"""breast_tissue_diagnostic_black_pen() -> Tuple[openslide.OpenSlide, str]
Breast tissue, TCGA-BRCA dataset. Diagnostic slide with black pen marks.
This image is available here
https://portal.gdc.cancer.gov/files/e70c89a5-1c2f-43f8-b6be-589beea55338
or through the API
https://api.gdc.cancer.gov/data/e70c89a5-1c2f-43f8-b6be-589beea55338
It corresponds to TCGA file
`TCGA-BH-A201-01Z-00-DX1.6D6E3224-50A0-45A2-B231-EEF27CA7EFD2.svs`
Access: open
Returns
-------
breast_tissue : openslide.OpenSlide
H&E-stained Whole-Slide-Image of breast tissue with green black marks.
path : str
Path where the slide is saved
"""
return _load_svs(
"tcga/breast/TCGA-BH-A201-01Z-00-DX1.6D6E3224-50A0-45A2-B231-EEF27CA7EFD2.svs"
) | 9758d6ac89a5bb4402e89486be624de9fad986d4 | 9,795 |
def fitcand(t,fm,p,full=False):
"""
Perform a non-linear fit to a putative transit.
Parameters
----------
t : time
fm : flux
p : trial parameter (dictionary)
full : Retrun tdt and fdt
Returns
-------
res : result dictionary.
"""
dtL = LDTwrap(t,fm,p)
dt = np.hstack(dtL)
fdt = dt['fdt']
tdt = dt['tdt']
p0 = np.array([p['P'],p['epoch'],p['df'],p['tdur']])
p1 = optimize.fmin_powell(objMT,p0,args=(tdt,fdt),disp=False)
dp = (p0[:2]-p1[:2])
if (abs(dp) > np.array([dP,depoch])).any():
stbl = False
elif (p1[0] < 0) | (p1[3] < 0):
stbl = False
else:
stbl = True
tfold = getT(tdt,p['P'],p['epoch'],p['tdur'])
fdt = ma.masked_array(fdt,mask=tfold.mask)
tdt = ma.masked_array(tdt,mask=tfold.mask)
s2n = s2n_fit(fdt,tdt,p1)
res = dict(P=p1[0],epoch=p1[1],df=p1[2],tdur=p1[3],s2n=s2n,stbl=stbl)
if full:
res['fdt'] = fdt
res['tdt'] = tdt
return res | dbf1252d3a4b9d81d092d983b231b7f25a2ef10b | 9,796 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.