content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import torch
def shuffle_tensor(input):
"""
Returns a new tensor whose elements correspond to a randomly shuffled version of the the elements of the input.
Args:
input (`torch.Tensor`): input tensor.
Returns:
(`torch.Tensor`): output tensor.
"""
return input[torch.randperm(input.nelement())]
|
e7c3ff4180123de1fe6322296ba08863de9766a4
| 3,641,209
|
def relaunch_failed_jobs(tasks, spec_file, verbose=False):
""" Relaunch jobs that are failed from the given list """
job_cnts = 0 # number of newly launched jobs
for i, task in enumerate(tasks):
job_id = str(task[-1]) # the last entry
# Try to launch until succeed
while True:
p = relaunch_failed_job(job_id, spec_file)
if p is None: # NOTE: when the job is not failed
break
if verbose:
print("==> Re-launching failed task: {} ...".format(task))
new_id = get_job_id(p)
if new_id is not None:
break
# If a new process is launched
if p is not None:
tasks[i][-1] = new_id
job_cnts += 1
return job_cnts
|
d78c250e1c6f10c60bb81aea077063f6f5b15b12
| 3,641,210
|
def intensityTriWave(coeff,L,ang):
"""Simulate the intensity observed a distance L from
the grating. Standard Zernike coefficients, L, and
the diffraction angle ang are used as input.
"""
k = 2*np.pi/405.e-6 #blue wavevector
x,y = np.meshgrid(np.linspace(-1.1,1.1,1000),np.linspace(-1.1,1.1,1000))
m = np.sin(ang)
coeff = np.array(coeff).astype('float')
coeff = np.tile(coeff,(3,1))
coeff[0][2] = -m/2.
coeff[1][1] = m/2.*np.sqrt(3)/2
coeff[1][2] = m/4
coeff[2][1] = -m/2.*np.sqrt(3)/2
coeff[2][2] = m/4
#Construct three phases
phi1 = zern.zernsurf(x,y-m*L,0.,0.,1.,coeff[0])
phi2 = zern.zernsurf(x-m*L*np.sqrt(3)/2,y+m*L/2,0,0,1,coeff[1])
phi3 = zern.zernsurf(x+m*L*np.sqrt(3)/2,y+m*L/2,0,0,1,coeff[2])
#Transform into complex exponentials and combine
i = np.abs(np.exp(1j*phi1*k)+np.exp(1j*phi2*k)+np.exp(1j*phi3*k))**2
return phi1,phi2,phi3,i
|
3b85a0d437fce65d8f164b31a3fc2e85fca33006
| 3,641,211
|
import requests
import time
import ast
def do_auth_code_grant(fqdn, force_login=False, identity=None):
"""Perform an Oauth2 authorization grant consent flow."""
code_verifier, code_challenge = _gen_code()
scope = (SCOPE_FORMAT.format(fqdn=fqdn))
host = GLOBUS_AUTH_HOST
creds = _lookup_credentials()
params = {
'redirect_uri' : 'https://' + host + '/v2/web/auth-code',
'client_id' : creds['client'],
'access_type' : 'offline',
'state' : '_default',
'code_challenge' : code_challenge,
'code_challenge_method': 'S256',
'response_type' : 'code',
'scope' : scope
}
if identity is not None:
params['session_message'] = 'The SSH service requires that you authenticate using this identity:'
params['session_required_identities'] = str(identity)
if force_login is True:
params['prompt'] = 'login'
url = "https://" + host + '/v2/oauth2/authorize?' + urlencode(params)
print('Please go to this URL and login: {0}'.format(url))
auth_code = raw_input(
'Please enter the code you get after login here: ').strip()
body = {
'code' : auth_code,
'code_verifier': code_verifier,
'redirect_uri' : 'https://' + host + '/v2/web/auth-code',
'grant_type' : 'authorization_code'
}
r = _authenticated_request(requests.post, '/v2/oauth2/token', data = body)
return Token(authorized_at=int(time.time()), **ast.literal_eval(r.text))
|
0e5583d1d6a273e165f0d8d9bed82e3c9af491cd
| 3,641,212
|
import json
def decode(serialized: str) -> Node:
"""Decode JSON as a `Node`"""
node = json.loads(serialized)
return dict_decode(node) if isinstance(node, dict) else node
|
b608b6c18c09d7061e09d722445ca1f50fd78b3f
| 3,641,213
|
def validate_duration_unit(recv_duration_unit):
"""Decapitalize and check in units_list"""
units_list = DaysAndUnitsList.units_list
recv_duration_unit = recv_duration_unit.lower()
if recv_duration_unit in units_list:
return True
else:
return False
|
784693ea8106c601b884a729ad2afd2a75b94ba2
| 3,641,214
|
def make_word_list1():
"""Reads lines from a file and builds a list using append."""
t = []
fin = open('words.txt')
for line in fin:
word = line.strip()
t.append(word)
return t
|
7af7b0697557e8bba891d73bd8217860350b810e
| 3,641,215
|
def metropolis(data, likelihood, priors, samples=1000, par_init=None,
width_prop=.5):
"""
Returns the posterior function of the parameters given the likelihood and
the prior functions. Returns also the number of the accepted jumps in the
Metropolis-Hastings algorithm.
Notes:
- <width_prop> should be chosen so to result in about 50% accepted jumps.
- <posterior> has shape (samples, n_par).
- priors must be from function "prior_dist".
- for numerical stability the computation is carried out using logarithms.
"""
# Current parameters
n_par = len(priors)
par_curr = np.zeros(n_par) if (par_init is None) else np.asarray(par_init)
# Init quantities
jumps = 0
par_prop = np.zeros(n_par)
posterior = np.zeros((samples, n_par))
posterior[0, :] = par_curr
# Current priors
bb = 0.0
for i in range(n_par):
bb += np.log(prior_dist(priors[i], par_curr[i]))
prior_curr = np.exp(bb)
# Current likelihood
bb = np.log(likelihood(data, par_curr)).sum()
likelihood_curr = np.exp(bb)
# Current posterior probability
p_curr = likelihood_curr * prior_curr
# Loop <samples> times
for sample in range(samples):
# Randomnly pick the proposed parameters
for i in range(n_par):
par_prop[i] = stats.norm(par_curr[i], width_prop).rvs()
# Evaluate priors with the proposed parameters
bb = 0.0
for i in range(n_par):
bb += np.log(prior_dist(priors[i], par_prop[i]))
prior_prop = np.exp(bb)
# Evaluate likelihood with the proposed parameters
bb = np.log(likelihood(data, par_prop)).sum()
likelihood_prop = np.exp(bb)
# Proposed posterior probability
p_prop = likelihood_prop * prior_prop
# Randomly accept or reject the jump
p_accept = p_prop / p_curr
if ((np.random.uniform() < p_accept)):
# Update quantities if jump accepted
jumps += 1
par_curr = par_prop.copy()
prior_curr = prior_prop
likelihood_curr = likelihood_prop
p_curr = p_prop
# Save (accepted and rejected) parameters
posterior[sample, :] = par_curr
return posterior, jumps
|
3857e237390a8373eecbc575209e96d42b6ff614
| 3,641,217
|
def _get_single_spec_df(reference_dict, mapping_dict, spectrum):
"""Primary method for reading and storing information from a single spectrum.
Args:
reference_dict (dict): dict with reference columns to be filled in
mapping_dict (dict): mapping of engine level column names to ursgal unified column names
spectrum (xml Element): namespace of single spectrum with potentially multiple PSMs
Returns:
(pd.DataFrame): dataframe containing spectrum information
"""
spec_records = []
spec_level_dict = reference_dict.copy()
spec_level_dict["spectrum_id"] = spectrum.attrib["spectrumID"].split("scan=")[-1]
# Iterate children
for psm in spectrum.findall(".//{*}SpectrumIdentificationItem"):
psm_level_dict = spec_level_dict.copy()
psm_level_dict.update(
{mapping_dict[k]: psm.attrib[k] for k in mapping_dict if k in psm.attrib}
)
cv_param_info = {
c.attrib["name"]: c.attrib["value"] for c in psm.findall(".//{*}cvParam")
}
psm_level_dict.update(
{
mapping_dict[k]: cv_param_info[k]
for k in mapping_dict
if k in cv_param_info
}
)
spec_records.append(psm_level_dict)
return pd.DataFrame(spec_records)
|
3d286e9bc206c0b59364cf9ef6d861b5cde9e9d4
| 3,641,218
|
import re
def in2func(inp):
"""Function converts input expression to a mathematical expression."""
# Validate Function
if inp == "":
raise ValueError( f"Enter a function to plot!")
for char in re.findall("[a-zA-Z_]+", inp):
if char not in allowed_inputs:
# Error will communicate over stderr pipeline
raise ValueError( f"'{char}' is not in the allowed as an input character!")
return
# Replace allowed chars with suitable methods for eval compiling.
for before, after in replaced_inputs.items():
inp = inp.replace(before, after)
# Edge Case: When no 'x' presents in the function
if "x" not in inp:
inp = f"({inp})*(x**0)"
# Return a function to be used for y value calculation.
def func(x):
return eval(inp)
return func
|
d3bf2faaed00f7b57c5bcd5b2681c94846671793
| 3,641,219
|
from datetime import datetime
def filter_posts(posts: list, parsing_date: datetime) -> list:
"""Отфильтровывает лишние посты, которые не входят в месяц парсинга"""
res = []
for post in posts:
post_date = datetime.fromtimestamp(post['date'])
if post_date.month == parsing_date.month:
res.append(post)
return res
|
381d5cb37e4ae3439c335a7962352431ad3ca17c
| 3,641,220
|
import requests
import re
def parse_bing():
"""
解析bing网页的壁纸链接,采用正则表达式匹配
:return: IMG_info,IMG_url
"""
base_url = 'https://cn.bing.com/'
language_parameter = '?mtk=zh-CN'
# base_url = 'https://www.bing.com/?mkt=zh-CN'
try:
resp = requests.get(base_url+language_parameter, headers=header).text
except RequestException:
send_text(MASTER, "connectionError")
# print(resp)
match_url = re.search('id="bgLink".*?href="(.*?)"', resp, re.S)
info = re.search('class="sc_light" title="(.*?)".*?"主页图片信息"', resp, re.S)
print(info)
if not info:
info = re.search('"copyright":"(.*?)","copyrightlink"', resp, re.S)
print('-'*40)
print(info)
IMG_info = str(info.groups(1)).strip("(),'")
IMG_url = base_url + str(match_url.groups(1)).strip("()',")
print(IMG_info, "----", IMG_url)
return IMG_info, IMG_url
|
4a963514f385a931882a75f45be774cbab4428ff
| 3,641,221
|
def quadsum(*args, **kwargs):
"""Sum of array elements in quadrature.
This function is identical to numpy.sum except that array elements are
squared before summing and then the sqrt of the resulting sums is returned.
The docstring from numpy.sum is reproduced below for convenience (copied
2014-12-09)
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed.
The default (`axis` = `None`) is perform a sum over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a sum is performed on multiple
axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which
the elements are summed. By default, the dtype of `a` is used.
An exception is when `a` has an integer type with less precision
than the default platform integer. In that case, the default
platform integer is used instead.
out : ndarray, optional
Array into which the output is placed. By default, a new array is
created. If `out` is given, it must be of the appropriate shape
(the shape of `a` with `axis` removed, i.e.,
``numpy.delete(a.shape, axis)``). Its type is preserved. See
`doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
args = list(args)
args[0] = np.asarray(args[0])**2
return np.sqrt(np.sum(*args, **kwargs))
|
ef842dab6258dc46b84098098115151a240f767b
| 3,641,222
|
import codecs
def open_file(path):
"""open_file."""
return codecs.open(path, encoding='utf8').read()
|
f7fd375ea76e8e7872e465e89eea5c02f3396115
| 3,641,223
|
def get_api_status():
"""Get API status"""
return "<h4>API Is Up</h4>"
|
5c88fc39bc5a970c4d223d8fe87c4fa3ad473b50
| 3,641,224
|
def fgt_set_pressureUnit(pressure_index, unit):
"""Override the default pressure unit for a single pressure channel"""
unit_array = (c_char * (len(unit)+1))(*([c_char_converter(c) for c in unit]))
c_error = c_ubyte(lib.fgt_set_pressureUnit(c_uint(pressure_index), unit_array))
return c_error.value,
|
8b7b75ffc598f70e7bbf3e1742a7837bf71f474f
| 3,641,226
|
def create_alert_from_slack_message(payload, time):
"""
Create a new raw alert (json) from the new alert form in Slack
"""
alert_json = {}
values = payload['view']['state']['values']
for value in values:
for key in values[value]:
if key == 'severity':
alert_json[key] = \
values[value][key]['selected_option']['text']['text']
else:
alert_json[key] = values[value][key]['value']
alert_json['datetime'] = time
return alert_json
|
1ae8b93a6b9f8bd7532ac193cb6dfde58bf8d409
| 3,641,228
|
def psd(buf_in, buf_out):
"""
Perform discrete fourier transforms using the FFTW library and use it to
get the power spectral density. FFTW optimizes
the fft algorithm based on the size of the arrays, with SIMD parallelized
commands. This optimization requires initialization, so this is a factory
function that returns a numba gufunc that performs the FFT. FFTW works on
fixed memory buffers, so you must tell it what memory to use ahead of time.
When using this with ProcessingChain, to ensure the correct buffers are used
call ProcessingChain.get_variable('var_name') to give it the internal memory
buffer directly (with raw_to_dsp, you can just give it the name and it will
automatically happen!). The possible dtypes for the input/outputs are:
- complex64 (size n) -> float32/float (size n)
- complex128 (size n) -> float64/double (size n)
- complex256/clongdouble (size n) -> float128/longdouble (size n)
- float32/float (size n) -> float32/float (size n/2+1)
- float64/double (size n) -> float64/double (size n/2+1)
- float128/longdouble (size n) -> float128/longdouble (size n/2+1)
"""
# build intermediate array for the dft, which will be abs'd to get the PSD
buf_dft = np.ndarray(buf_out.shape, np.dtype('complex'+str(buf_out.dtype.itemsize*16)))
try:
dft_fun = FFTW(buf_in, buf_dft, axes=(-1,), direction='FFTW_FORWARD')
except ValueError:
raise ValueError("""Incompatible array types/shapes. Allowed:
- complex64 (size n) -> float32/float (size n)
- complex128 (size n) -> float64/double (size n)
- complex256/clongdouble (size n) -> float128/longdouble (size n)
- float32/float (size n) -> float32/float (size n/2+1)
- float64/double (size n) -> float64/double (size n/2+1)
- float128/longdouble (size n) -> float128/longdouble (size n/2+1)""")
typesig = 'void(' + str(buf_in.dtype) + '[:, :], ' + str(buf_out.dtype) + '[:, :])'
sizesig = '(m, n)->(m, n)' if buf_in.shape == buf_out.shape else '(m, n),(m, l)'
@guvectorize([typesig], sizesig, forceobj=True)
def psd(wf_in, psd_out):
dft_fun(wf_in, buf_dft)
np.abs(buf_dft, psd_out)
return psd
|
9573935fd0e80e3e1a53237334a46f21d94984ab
| 3,641,229
|
def get_cell_ids(num_celltypes=39):
"""get valid cell ids by removing cell types with missing data.
Return:
A cell id list.
"""
missing_ids = [8,23,25,30,32,33,34,35,38,39,17]
return [item for item in list(range(1,num_celltypes+1)) if item not in missing_ids]
|
a7c8f881ad62af9c4287cd50b9b01118f724c4f8
| 3,641,230
|
def limit_data():
"""Slice data by dolphot values and recovered stars in two filters"""
fmt = '{:s}_{:s}'
filter1, filter2 = filters.value.split(',')
selected = data[
(np.abs(data[fmt.format(filter1, 'VEGA')]) <= 60) &
(np.abs(data[fmt.format(filter2, 'VEGA')]) <= 60) &
(data[fmt.format(filter1, 'SNR')] <= snr.value[1]) &
(data[fmt.format(filter1, 'SNR')] >= snr.value[0]) &
(data[fmt.format(filter1, 'SHARP')] <= shp.value[1]) &
(data[fmt.format(filter1, 'SHARP')] >= shp.value[0]) &
(data[fmt.format(filter1, 'CROWD')] <= cwd.value[1]) &
(data[fmt.format(filter1, 'CROWD')] >= cwd.value[0]) &
(data[fmt.format(filter1, 'ROUND')] <= rnd.value[1]) &
(data[fmt.format(filter1, 'ROUND')] >= rnd.value[0]) &
(data[fmt.format(filter1, 'ERR')] <= err.value[1]) &
(data[fmt.format(filter1, 'ERR')] >= err.value[0]) &
(data[fmt.format(filter1, 'CHI')] <= chi.value[1]) &
(data[fmt.format(filter1, 'CHI')] >= chi.value[0]) &
(data[fmt.format(filter2, 'SNR')] <= snr.value[1]) &
(data[fmt.format(filter2, 'SNR')] >= snr.value[0]) &
(data[fmt.format(filter2, 'SHARP')] <= shp.value[1]) &
(data[fmt.format(filter2, 'SHARP')] >= shp.value[0]) &
(data[fmt.format(filter2, 'CROWD')] <= cwd.value[1]) &
(data[fmt.format(filter2, 'CROWD')] >= cwd.value[0]) &
(data[fmt.format(filter2, 'ROUND')] <= rnd.value[1]) &
(data[fmt.format(filter2, 'ROUND')] >= rnd.value[0]) &
(data[fmt.format(filter2, 'ERR')] <= err.value[1]) &
(data[fmt.format(filter2, 'ERR')] >= err.value[0]) &
(data[fmt.format(filter2, 'CHI')] <= chi.value[1]) &
(data[fmt.format(filter2, 'CHI')] >= chi.value[0])]
return selected
|
785d027c13a05b97f2b98526dd0762e95e4e0fd6
| 3,641,231
|
from typing import Dict
from typing import Optional
from typing import Callable
def make_valance_getter(
lexicon: Dict[str, float],
lemmatize: bool = True,
lowercase: bool = True,
cap_differential: Optional[float] = C_INCR,
) -> Callable[[Token], float]:
"""Creates a token getter which return the valence (sentiment) of a token including the capitalization of the token.
Args:
lexicon (Dict[str, float]): The valence scores of the tokens.
lemmatize (bool, optional): Should it look up in the lexicon (and intensifiers) using the lemma? Defaults to True.
lowercase (bool, optional): Should it look up in the lexicon (and intensifiers) using the lowercased word? Defaults to True.
cap_differential (Optional[float], optional): Capitalization differential, which is added to the valence of the score it is emphasized using all caps.
Defaults to 0.733, an emperically derived constant (Hutto and Gilbert, 2014). If None it will not be used.
Returns:
Callable[[Token], float]: The getter function
"""
t_getter = make_txt_getter(lemmatize, lowercase)
def lemma_valence_getter(token: Token) -> float:
valence = 0
t = t_getter(token)
if (t in lexicon) and not (
Token.has_extension("intensifier") and token._.intensifier
): # if token isn't a intensifier
return lexicon[t]
return 0.0
def cap_diff_valence_getter(token: Token) -> float:
valence = token._.raw_valence
if token.is_upper and token.sent._.is_cap_diff:
if valence > 0:
valence += cap_differential
elif valence < 0:
valence -= cap_differential
return valence
if cap_differential:
if not Token.has_extension("raw_valence"):
Token.set_extension("raw_valence", getter=lemma_valence_getter)
if not Span.has_extension("is_cap_diff"):
Span.set_extension("is_cap_diff", getter=allcap_differential_getter)
return cap_diff_valence_getter
return lemma_valence_getter
|
825e8bf624240e3628537dbcfc6a09af2d54cd83
| 3,641,233
|
import re
def proper_units(text: str) -> str:
"""
Function for changing units to a better form.
Args:
text (str): text to check.
Returns:
str: reformatted text with better units.
"""
conv = {
r"degK": r"K",
r"degC": r"$^{\circ}$C",
r"degrees\_celsius": r"$^{\circ}$C",
r"degrees\_north": r"$^{\circ}$N",
r"degrees\_east": r"$^{\circ}$E",
r"degrees\_west": r"$^{\circ}$W",
r"I metric": r"$\mathcal{I}$--metric",
}
regex = re.compile(
"|".join(
re.escape(key) for key in sorted(conv.keys(), key=lambda item: -len(item))
)
)
return regex.sub(lambda match: conv[match.group()], text)
|
5113d227db1a75ec8fa407c5f9edd5a897960d82
| 3,641,234
|
import re
from datetime import datetime
def coerce_number(value, convert = float):
""" 将数据库字段类型转为数值类型 """
pattern = re.compile(r'^\d{4}(-\d\d){2}')
format = '%Y-%m-%d %H:%M:%S'
if isinstance(value, basestring) and pattern.match(value):
#将字符串的日期时间先转为对象
try:
mask = format[:len(value) - 2]
value = datetime.strptime(value, mask)
except ValueError:
pass
if isinstance(value, date):
value = value.strftime('%s')
return convert(value)
|
a36b3b8e814d722d6814a3306c692a8c7cbe28a5
| 3,641,235
|
def create_credential_resolver():
"""Create a credentials resolver for Localstack."""
env_provider = botocore.credentials.EnvProvider()
default = DefaultCredentialProvider()
resolver = botocore.credentials.CredentialResolver(
providers=[env_provider, default]
)
return resolver
|
36426521d5928aec1cb7c01308afe3d60c3f9959
| 3,641,236
|
def does_algorithm_implementation_have_capabilities_to_execute_parameter(parameter_kisao_id, algorithm_specs):
""" Determine if an implementation of an algorithm has the capabilities to execute a model langugae
Args:
parameter_kisao_id (:obj:`str`): KiSAO id for an algorithm parameter
algorithm_specs (:obj:`dict` with schema ``https://api.biosimulators.org/openapi.json#/components/schemas/Algorithm``):
specifications of the implementation of an algorithm
Returns:
:obj:`bool`: whether the implementation of the algorithm has the capabilities to execute the SED parameter
"""
for parameter_specs in algorithm_specs['parameters']:
if parameter_specs['kisaoId']['id'] == parameter_kisao_id:
return True
return False
|
653712ae621bd014547e04009243cefe4c9eb8e1
| 3,641,237
|
def main():
"""
This method allows the script to be run in stand alone mode.
@return Exit code from running the script
"""
record = Record()
result = record.Run()
return result
|
5460a32b9202c133da9ca109f5f2784fe21d7ee2
| 3,641,238
|
def stamp_pixcov_from_theory(N,cmb2d_TEB,n2d_IQU=0.,beam2d=1.,iau=False,return_pow=False):
"""Return the pixel covariance for a stamp N pixels across given the 2D IQU CMB power spectrum,
2D beam template and 2D IQU noise power spectrum.
"""
n2d = n2d_IQU
cmb2d = cmb2d_TEB
assert cmb2d.ndim==4
ncomp = cmb2d.shape[0]
assert cmb2d.shape[1]==ncomp
assert ncomp==3 or ncomp==1
wcs = cmb2d.wcs
shape = cmb2d.shape[-2:]
if ncomp==3: cmb2d = rotate_pol_power(shape,wcs,cmb2d,iau=iau,inverse=True)
p2d = cmb2d*beam2d**2.+n2d
if not(return_pow): return fcov_to_rcorr(shape,wcs,p2d,N)
return fcov_to_rcorr(shape,wcs,p2d,N), cmb2d
|
1ad8d5c2925f5e7ab5636348cbedbed1383c2963
| 3,641,239
|
def make_data_parallel(module, expose_methods=None):
"""Wraps `nn.Module object` into `nn.DataParallel` and links methods whose name is listed in `expose_methods`
"""
dp_module = nn.DataParallel(module)
if expose_methods is None:
if hasattr(module, 'expose_methods'):
expose_methods = module.expose_methods
if expose_methods is not None:
for mt in expose_methods:
setattr(dp_module, mt, getattr(dp_module.module, mt))
return dp_module
|
9992b8980f2cdec22e13f6805b4d02d3694c4b4a
| 3,641,240
|
def model_creator(model_dict, X_train, y_train, rd=None, rev=None):
"""Returns a SVM classifier"""
# Load model based on model_dict
clf = model_loader(model_dict, rd, rev)
# If model does not exist, train a new SVM
if clf is None:
clf = model_trainer(model_dict, X_train, y_train, rd, rev)
return clf
|
6f962c898167d1466b80a074aa7289ff26b0c3e2
| 3,641,241
|
import torch
def bert_predict(model, loader):
"""Perform a forward pass on the trained BERT model to predict probabilities
on the test set.
"""
# Put the model into the evaluation mode. The dropout layers are disabled during
# the test time.
model.eval()
all_logits = []
# For each batch in our test set...
for batch in loader:
# Load batch to GPU
b_input_ids, b_attn_mask = tuple(t.to(device) for t in batch)[:2]
# Compute logits
with torch.no_grad():
logits = model(b_input_ids, b_attn_mask)
all_logits.append(logits)
# Concatenate logits from each batch
all_logits = torch.cat(all_logits, dim=0)
# Apply softmax to calculate probabilities
probs = F.softmax(all_logits, dim=1).cpu().numpy()
return probs
|
602e219ce3fbed8afb86d11daf06ab09efe9c1b3
| 3,641,242
|
def eval_input_fn(training_dir, params):
"""Returns input function that feeds the model during evaluation"""
return _input_fn('eval')
|
0bb40833dee0e7564d166b7aabb27a54d61cdf2d
| 3,641,243
|
def GNIs(features, labels, mode, params, config):
"""Builds the model function for use in an estimator.
Arguments:
features: The input features for the estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
"""
del config
N, H = params["N"], params["H"]
n_samples = params["n_samples"]
params["non_targeted_layers"] = []
if params["input_inject"]:
params["non_targeted_layers"] = list(range(1, N + 1))
params["non_targeted_layers"] += [N + 1]
image_tile_summary("input", features, rows=1, cols=16)
# --- Ensure input data is flat
features = tf.reshape(features, (-1, np.prod(params['image_shape'])))
features = tf.cast(features, dtype=tf.float32)
if labels is not None:
labels = tf.cast(labels, dtype=tf.float32)
else:
labels = tf.ones_like(features[:, :10], dtype=None)
B = int_shape(labels)[0]
n_output = int_shape(labels)[-1]
if params['activation'] != 'linear':
activation = getattr(tf.nn, params['activation'])
else:
activation = None
# --- Make discriminator
if params["disc_type"] == 'mlp':
mlp = make_mlp(activation, np.prod(params['image_shape']), N, H,
n_output)
if params["disc_type"] == 'convnet':
mlp = make_convnet(activation, params['image_shape'], n_output)
if params["disc_type"] == 'vgg':
mlp = make_vgg13(activation, params['image_shape'], n_output)
# --- Retrieve intermediate activations, and layer output
# --- we don't want to mask the final layer so activations doesn't include the output layer
p_phi_y = mlp(features)
sel_layer_shapes = [p_phi_y['layer_shapes'][i] for i in range(N + 1)]
# --- Get Predictions using log(p(y|x))
preds = p_phi_y['activations'][-1]
# --- Classification loss, log(p(y|x))
if params["loss"] == 'cross_entropy':
loss = cross_entropy(labels, preds)
pred_class = tf.argmax(input=preds, axis=-1)
true_class = tf.argmax(input=labels, axis=-1)
acc = tf.cast(tf.equal(pred_class, true_class), tf.float32)
tf.compat.v1.summary.scalar("accuracy", tf.reduce_mean(acc))
elif params["loss"] == 'mse':
loss = square_error(labels, preds)
global_step = tf.compat.v1.train.get_or_create_global_step()
p_phi_y_noisy = replace_mask_layer(
features,
p_phi_y,
non_targeted_layers=params['non_targeted_layers'],
var=params["var"],
n_samples=n_samples,
mode=params["noise_mode"])
preds_noisy = p_phi_y_noisy['activations'][-1]
# --- Classification loss, log(p(y|x))
if params["loss"] == 'cross_entropy':
noisy_loss = cross_entropy(labels, preds_noisy)
elif params["loss"] == 'mse':
noisy_loss = square_error(labels, preds_noisy)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
params["learning_rate"])
gradients, variables = [], []
tf.compat.v1.summary.scalar("learning_rate", params["learning_rate"])
tf.compat.v1.summary.scalar("batch_size", B)
# --- Enumerate over activation layers, zip automatically removes final
# --- logit layer
layers = [
l for l in p_phi_y['net'].layers
if ('dense' in l.name or 'conv' in l.name)
]
noises = [
tf.reshape(n, (B, n_samples, -1)) for n in p_phi_y_noisy['noise'][:-1]
]
weights = [layers[i].trainable_weights[0] for i in range(N + 1)]
acts = p_phi_y['activations'][:-1]
Js = [
tf.reshape(batch_jacobian(preds, a, use_pfor=True), (B, -1, n_output))
for a in acts
]
print(Js)
G, C, H = calc_taylor_expansion(Js, loss, preds, noises, B, n_samples)
EC = calc_tikhonov_reg(Js, acts, preds, params["noise_mode"],
params["var"], params["loss"])
H_sig = heavy_tail_variance(Js, loss, preds)
l_noise = 0
if params["noise_type"] is None:
noisy_loss_estimate = loss
elif params["noise_type"] == 'input':
noisy_loss_estimate = noisy_loss
elif 'full' in params["noise_type"]:
# --- This is the Gaussian stuff
assert n_samples == 1
l_noise += H + G + C
noisy_loss_estimate = loss + l_noise
elif 'marginal' in params["noise_type"]:
# --- Don't ever noise final layer
assert n_samples == 1
l_noise = EC
if 'H' in params["noise_type"]:
l_noise += H
if 'C' in params["noise_type"]:
# alpha, beta, sigma, mu = tf.py_func(
# estimate_all_params,
# inp=[(C - EC)],
# Tout=[tf.float32, tf.float32, tf.float32, tf.float32])
#
# tf.compat.v1.summary.scalar('C/alpha', alpha)
# tf.compat.v1.summary.scalar('C/beta', beta)
# tf.compat.v1.summary.scalar('C/sigma', sigma)
# tf.compat.v1.summary.scalar('C/mu', mu)
# tf.compat.v1.summary.scalar('C', tf.reduce_mean(C - EC))
# tf.compat.v1.summary.histogram('C', C)
l_noise += (C - EC)
if 'G' in params["noise_type"]:
l_noise += G
noisy_loss_estimate = loss + l_noise
actual_noise = tf.reduce_mean(noisy_loss - loss)
estimated_noise = tf.reduce_mean(noisy_loss_estimate - loss)
tf.compat.v1.summary.scalar('loss/actual_noise', actual_noise)
tf.compat.v1.summary.scalar('loss/estimated_noise', estimated_noise)
tf.compat.v1.summary.scalar("loss/noisy_" + params["loss"],
tf.reduce_mean(noisy_loss))
tf.compat.v1.summary.scalar("loss/og_" + params["loss"],
tf.reduce_mean(loss))
noise_err = tf.reduce_mean(estimated_noise - actual_noise)
tf.compat.v1.summary.scalar(
'loss/noise_est_pe',
tf.abs(noise_err / tf.reduce_mean(actual_noise + 1e-8)))
tf.compat.v1.summary.scalar('loss/noise_est_mse',
tf.abs(tf.reduce_mean(noise_err**2)))
loss_err = tf.reduce_mean(noisy_loss_estimate - noisy_loss)
tf.compat.v1.summary.scalar(
'loss/loss_est_pe',
tf.abs(loss_err / tf.reduce_mean(noisy_loss + 1e-8)))
tf.compat.v1.summary.scalar('loss/loss_est_mse',
tf.abs(tf.reduce_mean(loss_err**2)))
if params["L2"] > 0:
vars = tf.trainable_variables()
l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in vars]) * params["L2"]
noisy_loss_estimate += l2_reg
tf.compat.v1.summary.scalar("loss/L2_reg", l2_reg)
loss_err = tf.reduce_mean(noisy_loss_estimate - noisy_loss)
# tf.compat.v1.summary.image('activations_covariance', activation_covariance)
# g_noise =
for i, w in enumerate(weights):
layer_name = "layer_" + str(i)
num_params = np.prod(int_shape(w))
a = p_phi_y['activations'][i]
noisy_a = p_phi_y_noisy['activations'][i]
inj_noise = noisy_a - a
print(noisy_a, a)
# --- Display in tensorboard -- Injected noise stats
tf.compat.v1.summary.histogram(layer_name + '/injected_noise',
inj_noise)
n_neurons = int_shape(a)[1]
tf.compat.v1.summary.histogram(layer_name + '/w', w)
corr = tfp.stats.correlation(a)
tf.compat.v1.summary.scalar(layer_name + '/corr', tf.reduce_mean(corr))
sparsity = tf.reduce_sum(tf.cast(a <= 1e-6, tf.float32))
# tf.compat.v1.summary.scalar(layer_name + '/lifetime_sparsity',
# sparsity / B)
tf.compat.v1.summary.scalar(layer_name + '/population_sparsity',
sparsity / (B * n_neurons))
# --- Retrieve the noise of the gradient of each layer
# --- = noisy gradients - gradients, this corresponds to
# --- n_t * gradients where n_t is our noise matrix
# --- W gradients
og_W_n = tf.gradients([tf.reduce_mean(noisy_loss)], [w])[0]
g_W_n = tf.gradients([tf.reduce_mean(noisy_loss_estimate)], [w])[0]
g = tf.gradients(tf.reduce_mean(loss), w)[0]
err = -g_W_n + og_W_n
g_noise = g_W_n - g
tf.compat.v1.summary.scalar(layer_name + '/mean_grad_noise',
tf.reduce_mean(g_noise))
tf.compat.v1.summary.histogram(layer_name + '/grad_noise', g_noise)
tf.compat.v1.summary.scalar(layer_name + '/weights_l2/',
tf.reduce_mean(tf.norm(w)))
tf.compat.v1.summary.scalar(layer_name + '/grad_est_mse',
tf.reduce_mean((og_W_n - g_W_n)**2))
tf.compat.v1.summary.scalar(layer_name + '/grad_est_pe',
tf.reduce_mean((-og_W_n + g_W_n) / og_W_n))
gradients.extend([g_W_n])
variables.extend([w])
if i > 0 and params['calc_hessian']:
# --- Number of parameters does not include batch_size
hessians = trace_hessian([noisy_loss], weights)
h_trace = tf.reduce_sum(tf.concat(hessians, axis=1)) / (B * n_samples)
for i, h in enumerate(hessians):
layer_name = "layer_" + str(i)
tf.compat.v1.summary.scalar(layer_name + '/H_trace',
tf.reduce_sum(h) / (B * n_samples))
tf.compat.v1.summary.scalar('network/H_trace', h_trace)
# --- Sum all them losses
loss = tf.reduce_mean(loss)
noisy_loss = tf.reduce_mean(noisy_loss)
train_step = optimizer.apply_gradients(zip(gradients, variables),
global_step=global_step)
if mode == tf.estimator.ModeKeys.PREDICT:
eval_metrics = {}
predictions = {
'preds': tf.nn.softmax(p_phi_y['activations'][-1], axis=1)
}
predictions['GCH'] = G + C + H - EC
for i, J in enumerate(Js):
predictions['J' + str(i)] = J
# for i, w in enumerate(weights):
# predictions['dGCH' + str(i)] = tf.gradients(
# [predictions['GCH']], [w])[0]
if params['calc_hessian']:
# --- Number of parameters does not include batch_size
hessians = trace_hessian([noisy_loss], weights[1:3])
h_trace = tf.reduce_sum(tf.concat(hessians,
axis=1)) / (B * n_samples)
predictions['h_trace'] = h_trace
else:
predictions = {}
eval_metrics = {
"loss/og": tf.compat.v1.metrics.mean(loss),
}
if params["loss"] == 'cross_entropy':
eval_metrics["accuracy"] = tf.compat.v1.metrics.mean(acc)
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
predictions=predictions,
train_op=train_step,
eval_metric_ops=eval_metrics)
|
724b32981a3b79c6725e4a7c6add9ab0f5046647
| 3,641,244
|
def b58decode(v, length):
""" decode v into a string of len bytes
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
|
3a9d1d5da02c2174bcf0220de705a92a91cd0b18
| 3,641,245
|
def has_remove_arg(args):
"""
Checks if remove argument exists
:param args: Argument list
:return: True if remove argument is found, False otherwise
"""
if "remove" in args:
return True
return False
|
9b07fe70cecfbdf6e6e2274e5b3e715f903331c7
| 3,641,247
|
def supported_locales(prefix, parsed_args, **kwargs):
"""
Returns all supported locales.
:param prefix: The prefix text of the last word before the cursor on the command line.
:param parsed_args: The result of argument parsing so far.
:param kwargs: keyword arguments.
:returns list: list of all supported locales.
"""
return constants.locales()
|
db6f73699120dc4b784b1f46ed7c9fbe4a3cc9a9
| 3,641,248
|
def generate_tool_panel_dict_for_tool_config( guid, tool_config, tool_sections=None ):
"""
Create a dictionary of the following type for a single tool config file name. The intent is to call this method for every tool config
in a repository and append each of these as entries to a tool panel dictionary for the repository. This allows for each tool to be
loaded into a different section in the tool panel.
{<Tool guid> : [{ tool_config : <tool_config_file>, id: <ToolSection id>, version : <ToolSection version>, name : <TooSection name>}]}
"""
tool_panel_dict = {}
file_name = suc.strip_path( tool_config )
tool_section_dicts = generate_tool_section_dicts( tool_config=file_name, tool_sections=tool_sections )
tool_panel_dict[ guid ] = tool_section_dicts
return tool_panel_dict
|
8e976cf4d54212d0477ef4ae7d4fb1dd532363fa
| 3,641,249
|
def get_tmp_dir():
"""get or create the tmp dir corresponding to each process"""
tmp_dir = result_dir / "tmp"
tmp_dir.mkdir(exist_ok=True)
return tmp_dir
|
406962c5783dff1d23523bd5bd258b7bb18ed149
| 3,641,250
|
def get_logs(job_id, user, index):
"""get logs"""
return instance().get_logs(job_id=job_id,
user=user,
log_index=int(index))
|
f2d959835c34ffec475d5e9da18e74feef13b5d9
| 3,641,251
|
def repeat_as_list(x: TensorType, n: int):
"""
:param x: Array/Tensor to be repeated
:param n: Integer with the number of repetitions
:return: List of n repetitions of Tensor x
"""
return [x for _ in range(n)]
|
cb4924909d93899a555c11bd70950c6cbb77cf85
| 3,641,252
|
def transition(x, concat_axis, nb_filter, dropout_rate=None, weight_decay=1E-4):
"""Apply BatchNorm, Relu 1x1Conv2D, optional dropout and Maxpooling2D
:parameter x: keras model
:parameter concat_axis: int -- index of contatenate axis
:parameter nb_filter: int -- number of filters
:parameter dropout_rate: int -- dropout rate
:parameter weight_decay: int -- weight decay factor
:returns: model
:return type: keras model, after applying batch_norm, relu-conv, dropout, maxpool
"""
x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (1, 1), kernel_initializer="he_uniform", padding="same", use_bias=False, kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
|
30d89aca3a330dc6b04b4c9ee21a8620c8ba69f1
| 3,641,253
|
import torch
def scale_params(cfg):
"""
Scale:
* learning rate,
* weight decay,
* box_loss_gain,
* cls_loss_gain,
* obj_loss_gain
according to:
* effective batch size
* DDP world size
* image size
* num YOLO output layers
* num classes
"""
logger = get_logger(__name__)
# Scale LR and weight decay
is_ddp = cfg.sg_model.multi_gpu == MultiGPUMode.DISTRIBUTED_DATA_PARALLEL and torch.distributed.is_initialized()
world_size = torch.distributed.get_world_size() if is_ddp else 1
# Scale LR and WD for DDP due to gradients being averaged between devices
# Equivalent to loss * WORLD_SIZE in ultralytics
cfg.training_params.initial_lr *= world_size
cfg.training_params.warmup_bias_lr *= world_size
cfg.training_params.optimizer_params.weight_decay /= world_size
# Scale WD with a factor of [effective batch size]/64.
batch_size, batch_accumulate = cfg.dataset_params.batch_size, cfg.training_params.batch_accumulate
batch_size_factor = cfg.sg_model.num_devices if is_ddp else cfg.sg_model.dataset_interface.batch_size_factor
effective_batch_size = batch_size * batch_size_factor * batch_accumulate
cfg.training_params.optimizer_params.weight_decay *= effective_batch_size / 64.
# Scale EMA beta to match Ultralytics update
cfg.training_params.ema_params.beta = cfg.training_params.max_epochs * len(cfg.sg_model.train_loader) / 2000.
log_msg = \
f"""
IMPORTANT:\n
Training with world size of {world_size}, {'DDP' if is_ddp else 'no DDP'}, effective batch size of {effective_batch_size},
scaled:
* initial_lr to {cfg.training_params.initial_lr};
* warmup_bias_lr to {cfg.training_params.warmup_bias_lr};
* weight_decay to {cfg.training_params.optimizer_params.weight_decay};
* EMA beta to {cfg.training_params.ema_params.beta};
"""
if cfg.training_params.loss == 'yolo_v5_loss':
# Scale loss gains
model = cfg.sg_model.net
model = model.module if hasattr(model, 'module') else model
num_levels = model._head._modules_list[-1].detection_layers_num
train_image_size = cfg.dataset_params.train_image_size
num_branches_norm = 3. / num_levels
num_classes_norm = len(cfg.sg_model.classes) / 80.
image_size_norm = train_image_size / 640.
cfg.training_params.criterion_params.box_loss_gain *= num_branches_norm
cfg.training_params.criterion_params.cls_loss_gain *= num_classes_norm * num_branches_norm
cfg.training_params.criterion_params.obj_loss_gain *= image_size_norm ** 2 * num_branches_norm
log_msg += \
f"""
* box_loss_gain to {cfg.training_params.criterion_params.box_loss_gain};
* cls_loss_gain to {cfg.training_params.criterion_params.cls_loss_gain};
* obj_loss_gain to {cfg.training_params.criterion_params.obj_loss_gain};
"""
logger.info(log_msg)
return cfg
|
a74472a5c5ce2a6b83eab0467c66b468226c222d
| 3,641,255
|
def get_model(args):
"""
Load model and move tensors to a given devices.
"""
if args.model == "lstm":
model = LSTM(args)
if args.model == "lstmattn":
model = LSTMATTN(args)
if args.model == "bert":
model = Bert(args)
if args.model == "lqt":
model = LastQuery(args)
model.to(args.device)
return model
|
131a4e3d8832d9b0aa099c55f7a8851d3a8907ef
| 3,641,256
|
def convert_to_boolean(value):
"""Turn strings to bools if they look like them
Truthy things should be True
>>> for truthy in ['true', 'on', 'yes', '1']:
... assert convert_to_boolean(truthy) == True
Falsey things should be False
>>> for falsey in ['false', 'off', 'no', '0']:
... assert convert_to_boolean(falsey) == False
Other things should be unchanged
>>> for value in ['falsey', 'other', True, 0]:
... assert convert_to_boolean(value) == value
"""
if isinstance(value, str):
if value.lower() in ['t', 'true', 'on', 'yes', '1']:
return True
elif value.lower() in ['f', 'false', 'off', 'no', '0']:
return False
return value
|
7cbf7a8fd601904c7aa8b685f6a3b3f5eaaa5c51
| 3,641,257
|
def getSampleBandPoints(image, region, **kwargs):
"""
Function to perform sampling of an image over a given region, using ee.Image.samp;e(image, region, **kwargs)
Args:
image (ee.Image): an image to sample
region (ee.Geometry): the geometry over which to sample
Returns:
An ee.FeatureCollection of sampled points along with coordinates
"""
dargs = {
'numPixels': 1000,
'region': region
}
dargs.update(kwargs)
sample = image.sample(**dargs)
return sample
|
4cfbc3c180b805abe52c718f81cc16c409693922
| 3,641,258
|
def updateRIPCount(idx,RIPtracker,addRev=0,addFwd=0,addNonRIP=0):
"""Add observed RIP events to tracker by row."""
TallyRev = RIPtracker[idx].revRIPcount + addRev
TallyFwd = RIPtracker[idx].RIPcount + addFwd
TallyNonRIP = RIPtracker[idx].nonRIPcount + addNonRIP
RIPtracker[idx] = RIPtracker[idx]._replace(revRIPcount=TallyRev,RIPcount=TallyFwd,nonRIPcount=TallyNonRIP)
return RIPtracker
|
7f83c547d9acd6c697174fffa1ccb3aec6e91a24
| 3,641,259
|
def serialize(obj):
""" Return a JSON-serializable representation of an object """
cls = obj.__class__
cls_name = cls.__name__
module_name = cls.__module__
serializer = None
if hasattr(obj, "to_serializable"):
# The object implements its own serialization
s = obj.to_serializable()
elif hasattr(obj, "__dict__"):
# Use the object's __dict__ if it's there
s = obj.__dict__
else:
# Use a custom serializer
serializer = _serializers.get((module_name, cls_name))
# If we don't have one, that's a problem
assert serializer is not None
# Apply the serializer to the object
s = serializer[0](obj)
# Do some sanity checks: we must be able to recreate
# an instance of this class during de-serialization
assert module_name and module_name != "__main__"
assert serializer is not None or hasattr(cls, "from_serializable")
# Return a serialization wrapper dict with enough info
# for deserialization
return dict(
__cls__=cls_name,
__module__=module_name,
__obj__=s
)
|
3fd5449922808a1e1772b3937bca6736c63df9a2
| 3,641,260
|
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
controller_id = CONTROLLER_ID.format(
host=config_entry.data[CONF_CONTROLLER][CONF_HOST],
site=config_entry.data[CONF_CONTROLLER][CONF_SITE_ID]
)
controller = hass.data[DOMAIN].pop(controller_id)
return await controller.async_reset()
|
2341f49794ecd9f9824330594cf3955bca117455
| 3,641,262
|
import operator
def get_farthest_three_shots(gps_shots):
"""get three shots with gps that are most far apart"""
areas = {}
for (i, j, k) in combinations(gps_shots, 3):
areas[(i, j, k)] = area(np.array(i.metadata.gps_position), np.array(j.metadata.gps_position), np.array(k.metadata.gps_position))
return max(areas.items(), key=operator.itemgetter(1))[0]
|
697d87549bee0a8ff3adee30ceb7b41a24f3d66b
| 3,641,264
|
from operator import sub
def __parse_entry(entry_line):
"""Parse the SOFT file entry name line that starts with '^', '!' or '#'.
:param entry_line: str -- line from SOFT file
:returns: tuple -- type, value
"""
if entry_line.startswith("!"):
entry_line = sub(r"!\w*?_", '', entry_line)
else:
entry_line = entry_line.strip()[1:]
try:
entry_type, entry_name = [i.strip() for i in entry_line.split("=", 1)]
except ValueError:
entry_type = [i.strip() for i in entry_line.split("=", 1)][0]
entry_name = ''
return entry_type, entry_name
|
1a645cb4dcaafaa4de1db7011d3ff54931b8123f
| 3,641,265
|
def _mut_insert_is_applied(original, mutated):
""" Checks if mutation was caused by `mut_insert`.
:param original: the pre-mutation individual
:param mutated: the post-mutation individual
:return: (bool, str). If mutation was caused by function, True. False otherwise.
str is a message explaining why mutation is not caused by function.
"""
if len(list(original.primitives)) >= len(list(mutated.primitives)):
return (
False,
"Number of primitives should be strictly greater, was {} is {}.".format(
len(list(original.primitives)), len(list(mutated.primitives))
),
)
return True, None
|
f19bb092e1eefc14435f5bb90a030558980fed4c
| 3,641,266
|
from typing import Dict
from typing import Any
def remap_ids(
mapping_table: Dict[Any, int] = {}, default: int = 0, dtype: DTypes = "i"
) -> Model[InT, OutT]:
"""Remap string or integer inputs using a mapping table, usually as a
preprocess before embeddings. The mapping table can be passed in on input,
or updated after the layer has been created. The mapping table is stored in
the "mapping_table" attribute.
"""
return Model(
"remap_ids",
forward,
attrs={"mapping_table": mapping_table, "dtype": dtype, "default": default},
)
|
4380b9377930d6affac6703a0a1e656a916b45db
| 3,641,267
|
def get_text(cell):
""" get stripped text from a BeautifulSoup td object"""
return ''.join([x.strip() + ' ' for x in cell.findAll(text=True)]).strip()
|
08037cbe5d2058206de029417f03d211d350820f
| 3,641,268
|
import torch
def test_augmentation(text, text_lengths, augmentation_class):
"""
test_augmentation method is written for augment input text in evaluation
:param text: input text
:param text_lengths: text length
:param augmentation_class: augmentation class
:return:
"""
augmentation_text = augmentation_class.test_augment(text, text_lengths)
augmentation_text.append(text)
augmentation_text = torch.FloatTensor(augmentation_text).long()
return augmentation_text
|
2f83ec9fa0afa110d05f05f52e85cae65a28c6f9
| 3,641,270
|
def selfintersection(linear_ring: Points):
"""
not support warp polygon.
"""
validate.linear_ring(linear_ring)
if len(linear_ring) == 4:
return (
abs(
linear_ring[0][1] * (linear_ring[1][0] - linear_ring[2][0])
+ linear_ring[1][1] * (linear_ring[2][0] - linear_ring[0][0])
+ linear_ring[2][1] * (linear_ring[0][0] - linear_ring[1][0])
)
< EPSILON
)
lines = [[linear_ring[i], linear_ring[i + 1]] for i in range(len(linear_ring) - 1)]
def check(lines, start=0):
if start + 2 >= len(lines):
return False
l1 = lines[start]
endIndex = len(lines) - 1 if start == 0 else len(lines)
for i in range(start + 2, endIndex):
l2 = lines[i]
if intersection(*l1, *l2):
return True
return check(lines, start + 1)
return check(lines)
|
d0b92d7796a3281a4481071f0b0666fdf79c6952
| 3,641,271
|
import math
def ToMercPosition(lat_deg, num_tiles):
"""Calculate position of a given latitude on qt grid.
LOD is log2(num_tiles)
Args:
lat_deg: (float) Latitude in degrees.
num_tiles: (integer) Number of tiles in the qt grid.
Returns:
Floating point position of latitude in tiles relative to equator.
"""
lat_rad = lat_deg / 180.0 * math.pi
y_merc = math.log(math.tan(lat_rad / 2.0 + math.pi / 4.0))
return num_tiles / 2.0 * (1 + y_merc / math.pi)
|
1ae7e7b2da9ec3ee20756ef7ffa13d99485aaea7
| 3,641,272
|
def conv3x3(in_planes, out_planes, stride=1, dilation=1, groups=1, bias=False):
"""2D 3x3 convolution.
Args:
in_planes (int): number of input channels.
out_planes (int): number of output channels.
stride (int): stride of the operation.
dilation (int): dilation rate of the operation.
groups (int): number of groups in the operation.
bias (bool): whether to add learnable bias parameter.
Returns:
`nn.Conv2d' instance.
"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=groups,
bias=bias)
|
d5658d81f5fbc5d196418e4e4b005dbf7d3f20ae
| 3,641,273
|
def parse_hostnames(filename, hostnames):
"""Parses host names from a comma-separated list or a filename.
Fails if neither filename nor hostnames provided.
:param filename: filename with host names (one per line)
:type filename: string
:param hostnames: comma-separated list of host names
:type hostnames: string
:rtype: list of host names
"""
if bool(filename) == bool(hostnames):
die('Please specify either --filename or --hosts')
if filename:
hostnames = _parse_hostname_file(filename)
elif hostnames:
hostnames = _parse_hostname_list(hostnames)
if not hostnames:
die('No valid hosts found.')
return hostnames
|
b3fce0f3af7f59217fd18bfce53baec87784759f
| 3,641,275
|
def ssh(host, command, stdin=None):
"""Run 'command' (list) on 'host' via ssh.
stdin is an string to send."""
return run([*SSH_COMMAND, ssh_user_host(host), *command], stdin=stdin)
|
9719aef39530e285d27a2e9dd5a7ceab09f3793e
| 3,641,276
|
def cart_step1_choose_type_of_order(request):
"""
This view is not login required because we want to display some summary of
ticket prices here as well.
"""
special_fares = get_available_fares_for_type(TicketType.other)
context = {"show_special": bool(special_fares)}
return TemplateResponse(
request, "conference/cart/step_1_choose_type_of_order.html", context
)
|
869941df96c750c0049f6ab5e50e5fad17679af2
| 3,641,278
|
def buildAndTrainModel(model, learningRate, batchSize, epochs, trainingData, validationData, testingData, trainingLabels, validationLabels, testingLabels, MODEL_NAME, isPrintModel=True):
"""Take the model and model parameters, build and train the model"""
# Build and compile model
# To use other optimizers, refer to: https://keras.io/optimizers/
# Please do not change the loss function
optimizer = tf.keras.optimizers.Adam(lr=learningRate)
model.compile(optimizer=optimizer,
loss=tf.keras.losses.MeanSquaredError())
if isPrintModel:
print(model.summary())
for epoch in range(0, epochs):
model.fit(trainingData, trainingLabels,
epochs=1,
verbose=0,
batch_size=batchSize,
shuffle=False)
# Evaluate model
valLoss = model.evaluate(validationData, validationLabels, verbose=False)
#model.save('Results/StructuredBinary/{}/epoch_{}'.format(filename,epoch))
## get metrics
predictions = model.predict(testingData)
MSE, MAE, MAPE, RMSE, PR = getMetrics(testingLabels,predictions)
MeanSquaredError.append(MSE)
RootMeanSquaredError.append(RMSE)
MeanAbsoluteError.append(MAE)
MeanAbsolutePercentageError.append(MAPE)
PearsonR.append(PR)
ValMSE.append(valLoss)
Epoch.append(epoch)
if valLoss <= min(ValMSE):
max_predictions = predictions
return MeanSquaredError, RootMeanSquaredError, MeanAbsoluteError, MeanAbsolutePercentageError, ValMSE, PearsonR, Epoch, max_predictions
|
9e9170ccb817be6ec3908c16390d1afe4f96b2e7
| 3,641,279
|
def vflip():
"""Toggle vertical flipping of camera image."""
# Catch ajax request with form data
vflip_val = 'error'
if request.method == 'POST':
vflip_val = request.form.get('vflip')
if vflip_val is not None:
app.logger.info('Form brightness submitted: %s', vflip_val)
camera.set_vflip(vflip_val == 'true')
return {'brightness': vflip_val}
|
064280aadbdc53783d983caa66a52d294732be9e
| 3,641,280
|
def getGoalHistogramData(responses):
"""
Goal Completion histogram chart on project detail page.
Return: {obj} Counts and % of each Goal Completion rating across given responses.
"""
try:
snapshotResponses = responses.exclude(Q(primary_goal__isnull=True) | Q(primary_goal__name=''))
respsnapshotResponsesCount = snapshotResponses.count()
# Get unique list of primary goals and count each primary goal occurance.
# Then clean up names and change counts to percents
goals = list(snapshotResponses.values(goalName=F('primary_goal__name')).annotate(goalTotal=Count('primary_goal')).order_by('-goalTotal'))
# For each unique goal and count found:
for goal in goals:
goalResponses = snapshotResponses.filter(primary_goal__name=goal['goalName']).select_related('goal_completed')
responseYesCount = goalResponses.filter(goal_completed__name__iexact='yes').count()
responsePartiallyCount = goalResponses.filter(goal_completed__name__iexact='yartially').count()
responseNoCount = goalResponses.filter(goal_completed__name__iexact='no').count()
goal['Yes'] = responseYesCount
goal['Partially'] = responsePartiallyCount
goal['No'] = responseNoCount
goal['YesPercent'] = round((responseYesCount/goal['goalTotal'])*100)
goal['NoPercent'] = round((responseNoCount/goal['goalTotal'])*100)
goal['PartiallyPercent'] = round((responsePartiallyCount/goal['goalTotal'])*100)
goal['goalName'] = goal['goalName'].replace('_',' ').capitalize()
goal['goalPercent'] = round((goal['goalTotal']/respsnapshotResponsesCount)*100)
except Exception as ex:
goals = None
#print(json.dumps(data, indent=2))
return goals
|
782c911f1a751ccf4c441874520f0cbc66b4a89c
| 3,641,281
|
def hookes_law(receiver_nodes, sender_nodes, k, x_rest):
"""Applies Hooke's law to springs connecting some nodes.
Args:
receiver_nodes: Ex5 tf.Tensor of [x, y, v_x, v_y, is_fixed] features for the
receiver node of each edge.
sender_nodes: Ex5 tf.Tensor of [x, y, v_x, v_y, is_fixed] features for the
sender node of each edge.
k: Spring constant for each edge.
x_rest: Rest length of each edge.
Returns:
Nx2 Tensor of the force [f_x, f_y] acting on each edge.
"""
diff = receiver_nodes[..., 0:2] - sender_nodes[..., 0:2]
x = tf.norm(diff, axis=-1, keepdims=True)
force_magnitude = tf.multiply(k, (x - x_rest) / x)
force = -1 * force_magnitude * diff
return force
|
30182ed5e91e07affa4db117c9e24a9cf76e3646
| 3,641,282
|
def check_output_filepath(filepath):
"""
Check and return an appropriate output_filepath parameter.
Ensures the file is a csv file. Ensures a value is set. If
a value is not set or is not a csv, it will return a
default value.
:param filepath: string filepath name
:returns: a string representing a filepath location.
"""
if filepath.endswith('.csv'):
return filepath
return "clean_rules_report.csv"
|
63fcf697dbde9a62cc39311b4d234955520f6394
| 3,641,283
|
import re
def mock_open_url(url, allow_local=False, timeout=None, verify_ssl=True, http_headers=None):
"""Open local files instead of URLs.
If it's a local file path, leave it alone; otherwise,
open as a file under ./files/
This is meant as a side effect for unittest.mock.Mock
"""
if re.match(r'https?:', url):
# Looks like a URL
filename = re.sub(r'^.*/([^/]+)$', '\\1', url)
path = resolve_path('files/mock/' + filename)
else:
# Assume it's a file
path = url
return (open(path, 'rb'), None, None, None)
|
28705c7d1785853f99d544967e745a12a58321f0
| 3,641,284
|
def concat_chunked_data(jsons, f_src='c', *args, **kwargs):
"""
Takes chunks of data and combines them into a numpy array
of shape trial x cells x time, concatendated over trials, and
clips the trials at shortest frame number and fewest cells. Args and
kwargs are passed to process_data.
Args:
jsons (list): list of jsons to process
f_src (str): key to F data to load ('c' or 'dff'). Defaults to 'c'.
Returns:
trial_dat: 3D numpy array, (trials, cells, time)
"""
# load and format
c_trials = [load_json(j)[f_src] for j in jsons]
s_trials = [load_json(j)['splits'] for j in jsons]
# smoosh all the lists of trials into a big array
trial_dat = []
for c,s in zip(c_trials, s_trials):
out = process_data(c, s, *args, **kwargs)
trial_dat.append(out)
# ensure that trials are the same length and have same
shortest = min([s.shape[2] for s in trial_dat]) # shortest trial
# fewest = min([c.shape[1] for c in trial_dat]) # fewest cells
# trial_dat = np.concatenate([a[:, :fewest, :shortest] for a in trial_dat])
try:
trial_dat = np.concatenate([a[:, :, :shortest] for a in trial_dat])
except:
print('WARNING LOST A CELL(S)!!!!')
fewest = min([c.shape[1] for c in trial_dat]) # fewest cells
trial_dat = np.concatenate([a[:, :fewest, :shortest] for a in trial_dat])
return trial_dat
|
cfd978a1ac74d35d857e152e6051e88b05ccf495
| 3,641,285
|
def hmc_update(context, hmc_uuid, values, session=None):
"""Updates an existing HMC instance in the Database"""
return IMPL.hmc_update(context, hmc_uuid, values, session)
|
943dd2359b2458429d60bb8c68ee20c40651b8fe
| 3,641,286
|
def _dense_difference(fun, x0, f0, h, one_sided, method):
"""
Calculates an approximation of the Jacobian of `fun`at the point `x0` in dense matrix form.
NOTE: Inspired from: https://github.com/scipy/scipy/blob/master/scipy/optimize/_numdiff.py
Parameters
----------
fun : callable
Function which computes a vector of residuals with call f(x, *args, **kwargs).
x0 : array_like with shape (n,) or float
Initial guess of the dependent variable.
method : {'2-point', '3-point'}, optional
Method used for the finite difference scheme.
Returns
-------
J : array_like, shape (m, n)
Approximation of the Jacobian matrix.
"""
m = f0.size
n = x0.size
Jt = np.empty((n, m))
hv = np.diag(h)
for i in range(h.size):
if method == '2-point':
x = x0 + hv[i]
dx = x[i] - x0[i]
df = fun(x) - f0
elif (method == '3-point') and one_sided[i]:
x1 = x0 + hv[i]
x2 = x0 + 2. * hv[i]
dx = x2[i] - x0[i]
f1 = fun(x1)
f2 = fun(x2)
df = -3. * f0 + 4. * f1 - f2
elif (method == '3-point') and (not one_sided[i]):
x1 = x0 - hv[i]
x2 = x0 + hv[i]
dx = x2[i] - x1[i]
f1 = fun(x1)
f2 = fun(x2)
df = f2 - f1
else:
raise ValueError("Step-method must be either '2-point' or '3-point'.")
Jt[i, :] = df / dx
if m == 1:
Jt = np.ravel(Jt)
return Jt.T
|
47d840a70fe2b8d22bf9fec4fdbb0e5190dec2f2
| 3,641,287
|
def alternate( name, *functions ):
"""Construct a callable that functions as the first implementation found of given set of alternatives
if name is a function then its name will be used....
"""
if not isinstance( name, (bytes,unicode)):
functions = (name,)+functions
name = name.__name__
return type( name, (_Alternate,), {} )( name, *functions )
|
5e751a5332c3e8e9e37f5544e9461c772bc525ac
| 3,641,288
|
from typing import Optional
from typing import Callable
def check_messenger(messenger: Optional[Callable]):
"""
Check that `messenger` is a `utipy.Messenger` object or `None`.
In the latter case a `utipy.Messenger` with `verbose=False` is returned.
Parameters
----------
messenger : `utipy.Messenger` or None
A Messenger instance to check.
Or `None`, in which case a `utipy.Messenger` with `verbose=False` is returned.
Returns
-------
`utipy.Messenger`
"""
# Check the messenger function
if messenger is None:
messenger = Messenger(verbose=False)
else:
assert isinstance(messenger, Messenger)
return messenger
|
b50bc38d5034e3d4d4d35d4532a504024008361f
| 3,641,289
|
import re
def convert(s):
"""Take an input string s, find all things that look like SGML character
entities, and replace them with the Unicode equivalent.
Function is from:
http://stackoverflow.com/questions/1197981/convert-html-entities-to-ascii-in-python/1582036#1582036
"""
matches = re.findall("&#\d+;", s)
if len(matches) > 0:
hits = set(matches)
for hit in hits:
name = hit[2:-1]
try:
entnum = int(name)
s = s.replace(hit, unichr(entnum))
except ValueError:
pass
matches = re.findall("&\w+;", s)
hits = set(matches)
amp = "&"
if amp in hits:
hits.remove(amp)
for hit in hits:
name = hit[1:-1]
if name in htmlentitydefs.name2codepoint:
s = s.replace(hit,
unichr(htmlentitydefs.name2codepoint[name]))
s = s.replace(amp, "&")
return s
|
0a25ee189ff107e5cd725bba1d1d20d6cb1c0f0c
| 3,641,290
|
def check_data_selection(race_id=None, category_index=None, racer_id=None):
"""Makes sure that we are trying to show data that is in the database."""
errors = []
if not race_id in Races.get_column('race_id'):
race_id = Races.get_random_id()
errors.append('race')
categories = Races.get_categories(race_id)
if category_index >= len(categories):
category_index = 0
errors.append('category')
if not racer_id in Racers.get_column('RacerID'):
# Random racer from the currently selected category
racer_id = Results.get_random_racer_id(racer_id,
categories[category_index])
errors.append('racer')
if errors:
return redirect(url_for('error'))
|
6d5b4eeaf1149fdac76e83bb94a6b6d482d0d280
| 3,641,291
|
def index(request):
"""
Root page view. Just shows a list of liveblogs.
"""
# Get a list of liveblogs, ordered by the date of their most recent
# post, descending (so ones with stuff happening are at the top)
liveblogs = Liveblog.objects.annotate(
max_created=Max("posts__created")
).order_by("-max_created")
# Render that in the index template
return render(request, "index.html", {
"liveblogs": liveblogs,
})
|
518c64db21bd843dc34513c4d5677a18e5eac319
| 3,641,293
|
def model_fn_builder(
bert_config,
num_labels,
init_checkpoint,
learning_rate,
num_train_steps,
num_warmup_steps,
use_tpu,
use_one_hot_embeddings
):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids0 = features["input_ids0"]
input_mask0 = features["input_mask0"]
segment_ids0 = features["segment_ids0"]
input_ids1 = features["input_ids1"]
input_mask1 = features["input_mask1"]
segment_ids1 = features["segment_ids1"]
input_ids2 = features["input_ids2"]
input_mask2 = features["input_mask2"]
segment_ids2 = features["segment_ids2"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config,
is_training,
input_ids0,
input_mask0,
segment_ids0,
input_ids1,
input_mask1,
segment_ids1,
input_ids2,
input_mask2,
segment_ids2,
label_ids,
num_labels,
use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(label_ids, predictions)
loss = tf.metrics.mean(per_example_loss)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=probabilities, scaffold_fn=scaffold_fn)
return output_spec
return model_fn
|
088e636bf21caa316995dbd69b680dedd42ca21a
| 3,641,294
|
def strarray(*args):
"""strarray(strarray_t array, size_t array_size, int code) -> char"""
return _idaapi.strarray(*args)
|
9dfb42d81d307a32f201f1b55a1ef81cbede7c27
| 3,641,295
|
def _single_value_set(target_list, value):
"""
Return true if this constraint has only one value and it is
this one.
"""
return len(target_list) == 1 and target_list[0] == value
|
472ebe1aa9726c70642423d05fa55723496e9bc5
| 3,641,296
|
def get_positive_input(message, float_parse=False, allow_zero=False):
""" Obtains and returns a positive int from the user.
Preconditions:
message: non-empty string
float_parse: bool defaulted to False
allow_zero: bool defaulted to False
Parameters:
message: The message that is printed when obtaining the input.
float_parse: Whether to parse input to float or int
allow_zero: Whether to allow zero as an input
Postconditions:
num: The valid inputted number.
"""
# use ternary operator to determine the sign to use
sign = ">=" if allow_zero else ">"
# try to parse input to either a float or int
try:
if float_parse:
num = float(input("(must be " + sign + " 0), " + message).strip())
else:
num = int(input("(must be " + sign + " 0), " + message).strip())
# raise a ValueError if input was invalid
if (not allow_zero) and (num <= 0):
raise ValueError()
elif num < 0:
raise ValueError()
return num
# catch any ValueErrors.
except ValueError:
print("Not a valid input.")
# recurse the method until proper input was found
return get_positive_input(message, float_parse, allow_zero)
|
17982ff069907464c70df7b6efb1f42d3811962e
| 3,641,297
|
def hellinger(p, q):
"""Compute Hellinger distance between 2 distributions."""
return np.linalg.norm(np.sqrt(p) - np.sqrt(q)) / np.sqrt(2)
|
f976a96af2e4acaf81961b93f2cfe7a868d912e3
| 3,641,298
|
def usd_currency(currency_df: pd.DataFrame, value: int, date: str) -> float:
"""
Compute VALUE/(USD/SYMBOL)
Parameters
----------
currency_df : pd.DataFrame
USD/SYMBOL df
value : int
Value of product
date : str
Currency quote day
Returns
---------
float
Computed value
"""
return value / currency_df.loc[date].usd
|
15ce0d8f9db3b5dc1e1a684dc27daa63d163853b
| 3,641,299
|
def svn_wc_adm_probe_retrieve(*args):
"""svn_wc_adm_probe_retrieve(svn_wc_adm_access_t associated, char path, apr_pool_t pool) -> svn_error_t"""
return _wc.svn_wc_adm_probe_retrieve(*args)
|
2716094e31c596212b5ccd7833b9ae10ef52d44e
| 3,641,300
|
def flights_preclean(df):
"""
Input: Raw dataframe of Flights table.
Output: Cleaned flights table:
- Remove cancelled rows, made available in new dataframe "df_can"
- Drop columns ['Unnamed: 0', 'branded_code_share',
'mkt_carrier', 'cancelled', 'cancellation_code', 'flights', 'air_time',
'first_dep_time', 'total_add_gtime', 'longest_add_gtime', 'no_name']
- Fill null values in delay columns
- Drop remaining null values
"""
global df_can
df_can = df[df.cancelled == 1].copy()
print("Removed cancelled flights - now available in dataframe 'df_can'")
df = df[df.cancelled == 0]
df = df.drop(columns=['Unnamed: 0', 'branded_code_share',
'mkt_carrier', 'cancelled', 'cancellation_code', 'flights', 'air_time',
'first_dep_time', 'total_add_gtime', 'longest_add_gtime', 'no_name'])
for col in ['carrier_delay', 'weather_delay', 'nas_delay', 'security_delay', 'late_aircraft_delay']:
df[col] = df[col].fillna(value=0)
df = df.dropna()
return df
|
61dcfa6afd6ec7dd0abb5525187938d6ab978996
| 3,641,301
|
def convert_spectral_kernel_quint(sequences, list_seq_to_id):
""" Return a list seq of nb of time the seq in list_seq_to_id appear in sequence"""
final = []
for j in range(len(sequences)):
sequence = sequences[j]
dico_appear = {seq: 0 for seq in list_seq_to_id}
for i in range(len(sequence) - 4):
seq_to_add = sequence[i] + sequence[i+1] + sequence[i+2] + sequence[i+3] + sequence[i+4]
dico_appear[seq_to_add] += 1
final.append([dico_appear[k] for k in list_seq_to_id])
return final
|
49f727dd26822834bad2c9a448136288dc1c426c
| 3,641,302
|
def grad_of_marginal_fit(c, h, tau, epsilon):
"""Computes grad of terms linked to marginals in objective.
Computes gradient w.r.t. f ( or g) of terms in
https://arxiv.org/pdf/1910.12958.pdf, left-hand-side of Eq. 15
(terms involving phi_star)
Args:
c: jnp.ndarray, first target marginal (either a or b in practice)
h: jnp.ndarray, potential (either f or g in practice)
tau: float, strength (in ]0,1]) of regularizer w.r.t. marginal
epsilon: regularization
Returns:
a vector of the same size as c or h
"""
if tau == 1.0:
return c
else:
rho = epsilon * tau / (1 - tau)
return jnp.where(c > 0, c * derivative_phi_star(-h, rho), 0.0)
|
38b6b57766c97f8eda72162b6919e48c235cd880
| 3,641,303
|
def SuggestField(**kwargs):
"""
Query 'foo' to get the TextField, or 'foo.raw' to get the KeywordField, or 'foo.suggest' to get the CompletionField.
"""
return fields.TextField(
fields={
'raw': fields.KeywordField(),
'suggest': fields.CompletionField(),
},
**kwargs
)
|
57f673bbc310a22432178ee078c8f5eec2355e12
| 3,641,304
|
import math
def distance_on_unit_sphere(FoLat, FoLng, ToLat, ToLng):
""" Convert latitude and longitude to spherical coordinates in radians."""
phi1 = math.radians(90.0 - FoLat)
phi2 = math.radians(90.0 - ToLat)
theta1 = math.radians(FoLng)
theta2 = math.radians(ToLng)
"""Compute spherical distance from spherical coordinates.
For two locations in spherical coordinates
(1, theta, phi) and (1, theta', phi')
cosine( arc length ) =
sin phi sin phi' cos(theta-theta') + cos phi cos phi'
distance = rho * arc length"""
cos = (math.sin(phi1) * math.sin(phi2) * math.cos(theta1 - theta2) + math.cos(phi1) * math.cos(phi2))
arc = math.acos(cos)
"""Remember to multiply arc by the radius of the earth in your favorite set of units to get length."""
return arc
|
98c9294697e36c5b45cd165ba96529187f2750de
| 3,641,305
|
import pandas # noqa
def check_pandas_support(caller_name):
"""Raise ImportError with detailed error message if pandsa is not
installed.
Plot utilities like :func:`fetch_openml` should lazily import
pandas and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires pandas.
"""
try:
return pandas
except ImportError as e:
raise ImportError(
"{} requires pandas.".format(caller_name)
) from e
|
f3d484bb3a5dbca43a81cca83b7343e1fcd7cbcf
| 3,641,306
|
def summarize_sequence(summary_type, hidden, d_model, n_head, d_head, dropout,
dropatt, input_mask, is_training, initializer,
scope=None, reuse=None):
"""Summarize hidden sequence into a vector."""
tf.logging.info("===== Sequence summary =====")
tf.logging.info(" - input_mask %s", input_mask)
tf.logging.info(" - summary_type %s", summary_type)
tf.logging.info("============================")
with tf.variable_scope(scope, "sequnece_summary", reuse=reuse):
if summary_type == "last":
summary = hidden[:, -1]
elif summary_type == "first":
summary = hidden[:, 0]
elif summary_type == "max":
if input_mask is None:
summary = tf.reduce_max(hidden, axis=1)
else:
neg_pad = -1e10 * input_mask[:, :, None]
summary = tf.reduce_max(hidden + neg_pad, axis=1)
elif summary_type == "mean":
if input_mask is None:
summary = tf.reduce_mean(hidden, axis=1)
else:
inp_mask = (1. - input_mask)[:, :, None]
summary = (tf.reduce_sum(hidden * inp_mask, axis=1) /
(1e-6 + tf.reduce_sum(inp_mask, axis=1)))
elif summary_type == "attn":
bsz = tf.shape(hidden)[1]
summary_bias = tf.get_variable("summary_bias", [d_model],
dtype=hidden.dtype,
initializer=initializer)
summary_bias = tf.tile(summary_bias[None, None], [bsz, 1, 1])
if input_mask is not None:
# [B X T] -> [B x N x F x T]
input_mask = input_mask[:, None, None, :]
summary, _ = multihead_attn(summary_bias, hidden, hidden, input_mask,
d_model, n_head, d_head, dropout, dropatt,
is_training, initializer, residual=False)
summary = summary[:, 0]
else:
raise ValueError("Unsupported summary type {}".format(summary_type))
# use another projection with `tanh` activation
summary = tf.layers.dense(
summary,
d_model,
activation=tf.tanh,
use_bias=True,
kernel_initializer=initializer,
name="summary")
return summary
|
50dd0e72c15adfa522847cb822e897c9892cd1cf
| 3,641,308
|
def encode_line(line, vocab):
"""Given a string and a vocab dict, encodes the given string"""
line = line.strip()
sequence = [vocab.get(char, vocab['<UNK>']) for char in line]
sequence_length = len(sequence)
return sequence, sequence_length
|
feb14d86dd6c219d57cffc4cd9d90d16c4e9c987
| 3,641,309
|
import math
def get_like_from_mats(ky_mat, l_mat, alpha, name):
""" compute the likelihood from the covariance matrix
:param ky_mat: the covariance matrix
:return: float, likelihood
"""
# catch linear algebra errors
labels = _global_training_labels[name]
# calculate likelihood
like = (-0.5 * np.matmul(labels, alpha) -
np.sum(np.log(np.diagonal(l_mat))) -
math.log(2 * np.pi) * ky_mat.shape[1] / 2)
return like
|
8fb7842547ecee25425bdaf920ff69d3386b920b
| 3,641,310
|
def engulfing(data: pd.DataFrame):
"""
engulfing
Positive numbers are multi-side, negative numbers are short-side
0 is abnormal, meaning that the ratio of the absolute value of the current Candle up or down to the previous one is more than 10 times.
For machine learning convenience, a floating point number should be returned to indicate the strength of the engulfing,
in preparation for the final Machine Learning normalization.
"""
def cal(ser):
result = 0
if ser.raise_0 > 0 >= ser.raise_1 and ser.open <= ser.close_1 and ser.close >= ser.open_1:
# Current candle is going up,long
rr = abs(ser.raise_0) / abs(ser.raise_1) if 0 > ser.raise_1 else ser.raise_0/ser.avg_5_change_abs
result = rr if rr > 1 else 0
elif ser.raise_0 < 0 < ser.raise_1 and ser.open >= ser.close_1 and ser.close <= ser.open_1:
# Current candle is going down, short
rr = abs(ser.raise_0) / abs(ser.raise_1) if 0 < ser.raise_1 else ser.raise_0/ser.avg_5_change_abs
result = -rr if rr > 1 else 0
return result
data_copy = data.copy()
data_copy["raise_0"] = data_copy["close"] - data_copy["open"]
data_copy["raise_1"] = data_copy["raise_0"].shift(1)
data_copy["open_1"] = data_copy["open"].shift(1)
data_copy["close_1"] = data_copy["close"].shift(1)
# get recent 5 average price change, in order to calculate if prev day price is zero change, we still won't miss it
data_copy["avg_5_change_abs"] = data_copy.raise_0.rolling(window=5).apply(lambda ser: ser.abs().mean())
data_copy["engulfing"] = data_copy[["raise_0", "raise_1", "open", "open_1", "close", "close_1", "avg_5_change_abs"]].apply(cal, axis=1)
# print(data_copy.query("raise_1==0").tail(20))
data["engulfing"] = data_copy["engulfing"]
|
2974a62afa6b77ae2d8d02b35d7293362dd90927
| 3,641,312
|
def filter_matches(kp1, kp2, matches, ratio = 0.75):
"""
This function applies a ratio test
:param kp1: raw keypoint 1
:param kp2: raw keypoint 2
:param matches: raw matches
:param ratio: filtering ratio
:return: filtered keypoint 1, filtered keypoint 2, keypoint pairs
"""
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] ) # keypoint with Index of the descriptor in query descriptors
mkp2.append( kp2[m.trainIdx] ) # keypoint with Index of the descriptor in train descriptors
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = list(zip(mkp1, mkp2))
return p1, p2, kp_pairs
|
a54d96e092019b9629852b1bf57511f9994aba46
| 3,641,314
|
import itertools
def add_derived_columns(
data: pd.DataFrame,
differences: bool = True,
second_differences: bool = True,
multiplications: bool = True,
rolling_means: int | None = 10,
rolling_stds: int | None = 10,
mean_distances: bool = True,
) -> pd.DataFrame:
"""This will create many columns that can be valuable for making predictions like difference, or rolling mean or
distance from average. Computed columns will be appened to original data. It will process all the columns,
so a lot of redundant data will be created. It is necessary do some feature extraction afterwards to remove
noncorrelated columns.
Args:
data (pd.DataFrame): Data that we want to extract more information from.
differences (bool, optional): Compute difference between n and n-1 sample. Defaults to True.
second_differences (bool, optional): Compute second difference. Defaults to True.
multiplications (bool, optional): Column multiplicated with other column. Defaults to True.
rolling_means (int | None, None), optional): Rolling mean with defined window. Defaults to 10.
rolling_stds (int | None, optional): Rolling std with defined window. Defaults to 10.
mean_distances (bool, optional): Distance from average. Defaults to True.
Returns:
pd.DataFrame: Data with more columns, that can have more informations,
than original data. Number of rows can be little bit smaller. Data has the same type as input.
Example:
>>> import mydatapreprocessing as mdp
>>> data = pd.DataFrame(
... [mdp.generate_data.sin(n=100), mdp.generate_data.ramp(n=100)]
... ).T
...
>>> extended = add_derived_columns(data, differences=True, rolling_means=32)
"""
results = [data]
if differences:
results.append(
pd.DataFrame(np.diff(data.values, axis=0), columns=[f"{i} - Difference" for i in data.columns],)
)
if second_differences:
results.append(
pd.DataFrame(
np.diff(data.values, axis=0, n=2), columns=[f"{i} - Second difference" for i in data.columns],
)
)
if multiplications:
combinations = list(itertools.combinations(data.columns, 2))
combinations_names = [f"Multiplicated {i}" for i in combinations]
multiplicated = np.zeros((len(data), len(combinations)))
for i, j in enumerate(combinations):
multiplicated[:, i] = data[j[0]] * data[j[1]]
results.append(pd.DataFrame(multiplicated, columns=combinations_names))
if rolling_means:
results.append(
pd.DataFrame(
np.mean(rolling_windows(data.values.T, rolling_means), axis=2).T,
columns=[f"{i} - Rolling mean" for i in data.columns],
)
)
if rolling_stds:
results.append(
pd.DataFrame(
np.std(rolling_windows(data.values.T, rolling_stds), axis=2).T,
columns=[f"{i} - Rolling std" for i in data.columns],
)
)
if mean_distances:
mean_distanced = np.zeros(data.T.shape)
for i in range(data.shape[1]):
mean_distanced[i] = data.values.T[i] - data.values.T[i].mean()
results.append(pd.DataFrame(mean_distanced.T, columns=[f"{i} - Mean distance" for i in data.columns]))
min_length = min(len(i) for i in results)
return pd.concat([i.iloc[-min_length:].reset_index(drop=True) for i in results], axis=1)
|
080f3853f67ead678c55a3c95bf2a1c19614452c
| 3,641,315
|
from typing import List
def parse_query(
query: List[str],
format,
use_youtube,
generate_m3u,
lyrics_provider,
threads,
path_template,
) -> List[SongObject]:
"""
Parse query and return list containing song object
"""
songs_list = []
# Iterate over all search queries and add them to songs_list
for request in query:
if request.endswith(".spotdlTrackingFile"):
continue
songs_list.extend(
parse_request(
request,
format,
use_youtube,
generate_m3u,
lyrics_provider,
threads,
path_template,
)
)
# linefeed to visually separate output for each query
print()
# remove duplicates
seen_songs = set()
songs = []
for song in songs_list:
if song.file_name not in seen_songs:
songs.append(song)
seen_songs.add(song.file_name)
return songs
|
a58e5bd6acf2c12de7eb7fae7824aab924188c26
| 3,641,316
|
def assert_address_book(address_book):
"""Fixture returning an object providing a custom address book asserts."""
return icemac.addressbook.testing.AddressBookAssertions(address_book)
|
fd9197472c86a59dd1c52dad14febe1a0b318c85
| 3,641,318
|
def make_shell_context():
"""Open shell."""
db = get_db()
return {"db": db, "Doi": Doi, "Url": Url, "FBRequest": FBRequest}
|
996988c06aa8039c7689126360ce0fea886ab392
| 3,641,319
|
import re
def get_version():
"""
Extracts the version number from the version.py file.
"""
VERSION_FILE = 'fleming/version.py'
mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))
|
b50df998254d83bd48d7a5c0863ba89b29ab529b
| 3,641,320
|
import math
def distance(s1, s2):
""" Euclidean distance between two sequences. Supports different lengths.
If the two series differ in length, compare the last element of the shortest series
to the remaining elements in the longer series. This is compatible with Euclidean
distance being used as an upper bound for DTW.
:param s1: Sequence of numbers
:param s2: Sequence of numbers
:return: Euclidean distance
"""
n = min(len(s1), len(s2))
ub = 0
for v1, v2 in zip(s1, s2):
ub += (v1 - v2)**2
# If the two series differ in length, compare the last element of the shortest series
# to the remaining elements in the longer series
if len(s1) > len(s2):
v2 = s2[n - 1]
for v1 in s1[n:]:
ub += (v1 - v2)**2
elif len(s1) < len(s2):
v1 = s1[n-1]
for v2 in s2[n:]:
ub += (v1 - v2)**2
return math.sqrt(ub)
|
61c308da89b98b4bbde1bba690c86559fd5e1400
| 3,641,322
|
import re
def valid_attribute(attr_filter_key, attr_filter_val, hit):
"""Validates the hit according to a filter attribute."""
if (attr_filter_key != "None") and (attr_filter_val != "None"):
try:
# If key for filtering is not correct or doesn't exist-> error
# should be ignored
hit_attrib_val = re.split(
"; " + attr_filter_key + " ", hit[8])[1].split(';')[0].strip('"\'').rstrip('\"')
except IndexError: # if key doesn't exist re.split will give error
hit_attrib_val = "not.found"
# If biotype of hit == attr_value from query-> continue annotation
return attr_filter_val == hit_attrib_val
else:
return True
|
c7480008f24e011f0803d82f1243a5d00c5a4030
| 3,641,324
|
def encrypt_message(kx, ky, message):
"""
Encrypts a message using ECC and AES-256
First generates a random AES key and IV with os.urandom()
Then encrypts the original message with that key
Then encrypts the AES key with the ECC key
NOTE:
This means that plaintext will not have the same ciphertext
when encrypted twice. Keep this in mind if you require reproducibility behavior
:param kx: Public key kx (int)
:param ky: Public key ky (int)
:param message: Message (bytes)
:return: Tuple (encrypted key (list of ints), encrypted IV (list of ints),
and encrypted message (bytes))
"""
ecies = ecc.ECEIS(ecc.CURVE)
r, s = ecies.exchange(ecc.ECPublicKey(ecc.AffineCurvePoint(kx, ky, ecc.CURVE)))
s = str(s).encode('utf-8')
key = sha.SHA3_512(s).digest()
message_encryptor = Encrypter(mode=AESModeOfOperationCBC(key[:32], iv=key[32:48]))
encrypted_blocks = message_encryptor.feed(oaep.oaep_pad(message))
encrypted_blocks += message_encryptor.feed()
encrypted_key = r.x, r.y
return encrypted_key, encrypted_blocks
|
e4bd462e8724a85ed0e183e048203ff23e349f34
| 3,641,326
|
def mirror_notes(key_position: int) -> int:
"""
指定したキーポジションを反転させた値を返します
引数
----
key_position : int
-> キーポジション
戻り値
------
int
-> キーポジションを反転したときのキーポジション
"""
return 512 - key_position
|
03ad894eca67405bb79cbf6ea1ecef12b19958ed
| 3,641,328
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.