content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def convert_coevalcube_to_sphere_surface_inpdict(inpdict):
"""
-----------------------------------------------------------------------------
Covert a cosmological coeval cube at a given resolution (in physical comoving
distance) to HEALPIX coordinates of a specified nside covering the whole sky
or coordinates covering a spherical patch. Wrapper for
convert_coevalcube_to_sphere_surface()
Inputs:
inpdict [dictionary] Dictionary of parameters for tiling cosmological
coeval cubes to healpix lightcone cubes. It consists of the
following keys and values:
inpcube [numpy array] Cosmological cube in three dimensions
of comoving distance
inpres [scalar or tuple or list or numpy array] Input cube
pixel resolution (in comoving Mpc). If specified as
scalar, it is applied to all three dimensions.
Otherwise a three-element tuple, list or numpy array
must be specified one for each dimension
nside [scalar] HEALPIX nside parameter for output HEALPIX
map. If set theta_phi will be ignored.
theta_phi [numpy array] nsrc x 2 numpy array of theta and phi
(in degrees) at which the lightcone surface should
be evaluated. One and only one of nside or theta_phi
must be specified.
freq [scalar] Frequency (in Hz) to be processed. One and
only one of inputs freq or z (see below) must be set
in order to determined the redshift at which this
processing is to take place. Redshift is necessary
to determine the cosmology. If set to None, redshift
must be specified (see below)
redshift [scalar] Redshift to be processed. One and only one
of inputs freq (see above) or redshift must be
specified. If set to None, freq must be specified
(see above)
method [string] Method of interpolation from cube to
spherical surface pixels. Accepted values are
'nearest_rounded' (fastest but not accurate), and
those accepted by the input keyword method in
scipy.interpolate.interpn(), namely, 'linear' and
'nearest', and 'splinef2d'. 'splinef2d' is only
supported for 2-dimensional data. Default='linear'
rest_freq [scalar] Rest frame frequency (in Hz) to be used in
determination of redshift. Will be used only if
freq is set and redshift is set to None.
Default=1420405751.77 Hz (the rest frame frequency
of neutral Hydrogen spin flip transition)
cosmo [instance of class astropy.cosmology] Instance of
class astropy.cosmology to determine comoving
distance for a given redshift. By default (None) it
is set to WMAP9
Output:
Stacked lightcone surfaces covering spherical patch (whole sky using HEALPIX
if nside is specified) or just at specified theta and phi coordinates. It is
of shape npix
-----------------------------------------------------------------------------
"""
try:
inpdict
except NameError:
raise NameError('Input inpdict must be provided')
if not isinstance(inpdict, dict):
raise TypeError('Input inpdict must be a dictionary')
for key,val in inpdict.iteritems():
exec(key + '=val')
try:
inpcube, inpres
except NameError:
raise NameError('Inputs inpcube and inpres must be specified in inpdict')
try:
nside
except NameError:
nside = None
try:
theta_phi
except NameError:
theta_phi = None
try:
freq
except NameError:
freq = None
try:
redshift
except NameError:
redshift = None
try:
cosmo
except NameError:
cosmo = None
try:
method
except NameError:
method = 'linear'
try:
rest_freq
except NameError:
rest_freq = CNST.rest_freq_HI
return convert_coevalcube_to_sphere_surface(inpcube, inpres, nside=nside, theta_phi=theta_phi, freq=freq, redshift=redshift, method=method, rest_freq=rest_freq, cosmo=cosmo)
|
e99f4ca3d6ff1a76ce95c4e929521ccf857148df
| 3,637,734
|
def postmsg(message):
"""!Sends the message to the jlogfile logging stream at level INFO.
This is identical to:
@code
jlogger.info(message).
@endcode
@param message the message to log."""
return jlogger.info(message)
|
b7cad54650fd769ef9c56f8a03e68d0ef9fa485d
| 3,637,735
|
def dec_lap_pyr(x, levs):
""" constructs batch of 'levs' level laplacian pyramids from x
Inputs:
x -- BxCxHxW pytorch tensor
levs -- integer number of pyramid levels to construct
Outputs:
pyr -- a list of pytorch tensors, each representing a pyramid level,
pyr[0] contains the finest level, pyr[-1] the coarsest
"""
pyr = []
cur = x # Initialize approx. coefficients with original image
for i in range(levs):
# Construct and store detail coefficients from current approx. coefficients
h = cur.size(2)
w = cur.size(3)
x_small = F.interpolate(cur, (h // 2, w // 2), mode='bilinear')
x_back = F.interpolate(x_small, (h, w), mode='bilinear')
lap = cur - x_back
pyr.append(lap)
# Store new approx. coefficients
cur = x_small
pyr.append(cur)
return pyr
|
d0b48660b194c71e34e7f838525d0814081939fb
| 3,637,736
|
def mif2amps(sh_mif_file, working_dir, dsi_studio_odf="odf8"):
"""Convert a MRTrix SH mif file to a NiBabel amplitudes image.
Parameters:
===========
sh_mif_file : str
path to the mif file with SH coefficients
"""
verts, _ = get_dsi_studio_ODF_geometry(dsi_studio_odf)
num_dirs, _ = verts.shape
hemisphere = num_dirs // 2
directions = verts[:hemisphere]
x, y, z = directions.T
_, theta, phi = cart2sphere(x, y, -z)
dirs_txt = op.join(working_dir, "directions.txt")
np.savetxt(dirs_txt, np.column_stack([phi, theta]))
odf_amplitudes_nii = op.join(working_dir, "amplitudes.nii")
popen_run(["sh2amp", "-quiet", "-nonnegative", sh_mif_file, dirs_txt, odf_amplitudes_nii])
if not op.exists(odf_amplitudes_nii):
raise FileNotFoundError("Unable to create %s", odf_amplitudes_nii)
amplitudes_img = nb.load(odf_amplitudes_nii)
return amplitudes_img, directions
|
2defa9d0656bc6c884e6f0591041efdea743db95
| 3,637,738
|
import struct
import array
def write_nifti_header(hdrname, hdr, newfile=True):
#*************************************************
"""
filename is the name of the nifti header file.
hdr is a header dictionary. Contents of the native header
will be used if it is a nifti header.
Returns: 0 if no error, otherwise 1.
"""
if hdr.has_key('native_header'):
whdr = hdr['native_header']
if whdr.has_key('filetype'):
ftype = whdr['filetype']
else:
ftype = 'unknown'
else:
ftype = 'unknown'
Rout = hdr['R']
# Fix broken headers.
if hdr['mdim'] == 0:
hdr['mdim'] = 1
if hdr['tdim'] == 0:
hdr['tdim'] = 1
if hdr['zdim'] == 0:
hdr['zdim'] = 1
# Insert info for fieldmap correction if available.
modify_nifti_auxfile(hdr)
# Convert to quaternions.
if abs(Rout[:3,:3]).sum() > 0 and Rout[3,3] == 1.:
# This looks like a valid R matrix.
x = rot44_to_quatern(Rout)
else:
x = None
if isinstance(x, tuple):
qa, qb, qc, qd, qfac, qoffx, qoffy, qoffz = x
qform_code = whdr.get('qform_code',c.NIFTI_XFORM_SCANNER_ANAT)
qform_code = c.NIFTI_XFORM_SCANNER_ANAT
else:
# Conversion failed, use defaults.
qa, qb, qc, qd, qfac, qoffx, qoffy, qoffz = \
(0., 0., 0., 0., 1., 0., 0., 0.)
qform_code = c.NIFTI_XFORM_UNKNOWN
fmt = 'i10s18sihsB8hfffhhhh8ffffhcbffffii80s24shh6f4f4f4f16s4s'
lgth = struct.calcsize(fmt)
if hdr['swap']:
fmt = ">" + fmt
else:
fmt = "<" + fmt
if hdr['native_header'].has_key('ScanningSequence'):
if whdr['ScanningSequence'][0].strip() == 'EP':
slice_dim = NIFTI_SLICE_ALT_INC
else:
slice_dim = 0
if whdr['PhaseEncDir'] == 'ROW':
# dim_info = (slice_dim << 4) | (0x1 << 2) | 0x2
freq_dim = 2
phase_dim = 1
else:
# dim_info = (slice_dim << 4) | (0x2 << 2) | 0x1
freq_dim = 1
phase_dim = 2
else:
freq_dim = whdr.get('freq_dim', 0)
phase_dim = whdr.get('phase_dim', 0)
slice_dim = whdr.get('slice_dim', 0)
if not whdr.has_key('quatern_b'):
# Existing header not for a nifti file. Rewrite defaults.
whdr = {'sizeof_hdr':348, 'data_type':"", 'db_name':"", \
'extents':16384, \
'session_error':0, 'regular':"r", 'dim_info':"0", \
'dim':[1, 1, 1, 1, 1, 1, 1, 1], \
'intent_p1':0., 'intent_p2':0., 'intent_p3':0., 'intent_code':0, \
'bitpix':0, 'slice_start':0, \
'pixdim':[1., 0., 0., 0., 0., 0., 0., 0.], \
'vox_offset':0., 'scl_slope':0., 'scl_inter':0., 'slice_code':"", \
'xyzt_units':"", 'cal_max':0., 'cal_min':0., 'slice_duration':0., \
'toffset':0., 'glmax':0, 'glmin':0, 'descrip':"", \
'qform_code':qform_code, 'time_units':'msec', 'space_units':'mm', \
'misc_units':'', 'sform_code':'unknown', 'intent_name':"", \
'magic':"ni1"}
# Set orientation information.
whdr['quatern_b'] = qb
whdr['quatern_c'] = qc
whdr['quatern_d'] = qd
whdr['qoffset_x'] = qoffx
whdr['qoffset_y'] = qoffy
whdr['qoffset_z'] = qoffz
Rlpi = convert_R_to_lpi(hdr['R'], hdr['dims'], hdr['sizes'])
# Rlpi = hdr['R']
Rtmp = dot(Rlpi, diag([hdr['xsize'], hdr['ysize'], hdr['zsize'], 1.]))
whdr['srow_x'] = zeros(4, float)
whdr['srow_x'][:] = Rtmp[0, :]
whdr['srow_y'] = zeros(4, float)
whdr['srow_y'][:] = Rtmp[1, :]
whdr['srow_z'] = zeros(4, float)
whdr['srow_z'][:] = Rtmp[2, :]
# whdr['srow_x'][:3] *= hdr['xsize']
# whdr['srow_y'][:3] *= hdr['ysize']
# whdr['srow_z'][:3] *= hdr['zsize']
whdr['qfac'] = qfac
# Set undefined fields to zero. Spm puts garbage here.
whdr['glmin'] = 0
whdr['glmax'] = 0
whdr['sizeof_hdr'] = 348
whdr['descrip'] = hdr['native_header'].get('descrip','')
whdr['aux_file'] = hdr['native_header'].get('aux_file','')
if len(whdr['descrip']) > 79:
whdr['descrip'] = whdr['descrip'][:79]
whdr['dim'] = [hdr['ndim'], hdr['xdim'], hdr['ydim'], hdr['zdim'], \
hdr['tdim'], hdr['mdim'], 0, 0]
whdr['slice_end'] = hdr['zdim']-1
if hdr['sizes'][3] > 0.:
TR = hdr['sizes'][3]
else:
TR = hdr.get('TR',0.)
if TR == 0.:
TR = hdr['subhdr'].get('TR',0.)
whdr['pixdim'] = [hdr['ndim'], hdr['xsize'], hdr['ysize'], hdr['zsize'], \
TR, hdr['msize'], 0., 0.]
whdr['qoffset_x'] = qoffx
whdr['qoffset_y'] = qoffy
whdr['qoffset_z'] = qoffz
whdr['quatern_b'] = qb
whdr['quatern_c'] = qc
whdr['quatern_d'] = qd
whdr['qfac'] = float(qfac)
whdr['bitpix'] = datatype_to_lgth[hdr['datatype']]
whdr['datatype'] = nifti_type_to_datacode[hdr['datatype']]
whdr['dim_info'] = freq_dim | (phase_dim << 2) | (slice_dim << 4)
whdr['slice_code'] = nifti_slice_order_encode[ \
hdr['native_header'].get('SliceOrder', 'unknown')]
whdr['intent_code'] = nifti_intent_encode[whdr.get('intent_class', \
'unknown')]
whdr['qform_code'] = nifti_sqform_encode.get(qform_code, c.NIFTI_XFORM_UNKNOWN)
whdr['sform_code'] = nifti_sqform_encode[whdr.get('sform_code', 0)]
whdr['xyzt_units'] = nifti_units_encode[whdr.get('space_units', 'mm')] | \
nifti_units_encode[whdr.get('time_units', 'msec')] | \
nifti_units_encode[whdr.get('misc_units', '')]
if hdr['filetype'] == 'nii':
hdr['filetype'] = 'n+1'
whdr['magic'] = hdr['filetype']
if hdr['filetype'] == 'n+1':
vox_offset = 348
vox_offset = vox_offset + 4
else:
vox_offset = 0
extcode = whdr.get('extcode', '0000')
if extcode[0] != '0':
vox_offset = int(vox_offset) + 6 + len(whdr.get('edata',''))
whdr['vox_offset'] = vox_offset
binary_hdr = struct.pack(fmt, whdr['sizeof_hdr'], whdr['data_type'], \
whdr['db_name'], whdr['extents'], whdr['session_error'], whdr['regular'], \
whdr['dim_info'], whdr['dim'][0], whdr['dim'][1], whdr['dim'][2], \
whdr['dim'][3], whdr['dim'][4], whdr['dim'][5], whdr['dim'][6], \
whdr['dim'][7], whdr['intent_p1'], whdr['intent_p2'], whdr['intent_p3'], \
whdr['intent_code'], whdr['datatype'], whdr['bitpix'], \
whdr['slice_start'], whdr['qfac'], whdr['pixdim'][1], whdr['pixdim'][2], \
whdr['pixdim'][3], whdr['pixdim'][4], whdr['pixdim'][5], \
whdr['pixdim'][6], whdr['pixdim'][7], whdr['vox_offset'], \
hdr['scale_factor'], hdr['scale_offset'], whdr['slice_end'], \
whdr['slice_code'], whdr['xyzt_units'], whdr['cal_max'], whdr['cal_min'], \
whdr['slice_duration'], whdr['toffset'], whdr['glmax'], whdr['glmin'], \
whdr['descrip'], whdr['aux_file'], whdr['qform_code'], whdr['sform_code'], \
whdr['quatern_b'], whdr['quatern_c'], whdr['quatern_d'], \
whdr['qoffset_x'], whdr['qoffset_y'], whdr['qoffset_z'], \
whdr['srow_x'][0], whdr['srow_x'][1], whdr['srow_x'][2], whdr['srow_x'][3], \
whdr['srow_y'][0], whdr['srow_y'][1], whdr['srow_y'][2], whdr['srow_y'][3], \
whdr['srow_z'][0], whdr['srow_z'][1], whdr['srow_z'][2], whdr['srow_z'][3], \
whdr['intent_name'], whdr['magic'])
# try:
if True:
if newfile:
f = open(hdrname, 'w')
else:
f = open(hdrname, 'r+')
f.seek(0)
# except IOError:
# raise IOError(\
# "\nfile_io::write_nifti: Could not open %s\n\n"%hdrname)
try:
f.write(binary_hdr)
except IOError:
raise IOError(\
"\nfile_io::write_nifti: Could not write to %s\n\n"%hdrname)
if hdr['filetype'] == 'n+1':
ecodes = whdr.get('extcode', zeros(4,byte))
if isinstance(ecodes, list):
ecodes = array(ecodes)
if ecodes[0]:
# Extension is present.
exthdr = struct.pack('ccccii', ecodes[0], ecodes[1], \
ecodes[2], ecodes[3], whdr['esize'], \
nifti_ecode_encode[whdr['ecode']]) + whdr['edata']
else:
exthdr = fromstring(ecodes,byte)
# Write the extension header.
f.write(exthdr)
f.close()
return 0
|
8b9239ff96d453f8bcb7a667e62434fa9f1bfbc6
| 3,637,739
|
import struct
def get_array_of_float(num, data):
"""Read array of floats
Parameters
----------
num : int
Number of values to be read (length of array)
data : str
4C binary data file
Returns
-------
str
Truncated 4C binary data file
list
List of floats
"""
length = 4
results = struct.unpack('f' * num, data[:num * length])
pos = num * length
new_data = data[pos:]
return new_data, list(results)
|
92a0a4cc653046826b14c2cd376a42045c4fa641
| 3,637,740
|
def AUcat(disk=None, first=1, last=1000, Aname=None, Aclass=None, Aseq=0,
giveList=False):
"""
Catalog listing of AIPS UV data files on disk disk
Strings use AIPS wild cards:
* blank => any
'?' => one of any character
"*" => arbitrary string
If giveList then return list of CNOs
* disk = AIPS disk number to list
* first = lowest slot number to list
* last = highest slot number to list
* Aname = desired AIPS name, using AIPS wildcards, None -> don't check
* Aclass = desired AIPS class, using AIPS wildcards, None -> don't check
* Aseq = desired AIPS sequence, 0=> any
* giveList = If true, return list of CNOs matching
"""
################################################################
global Adisk
if disk==None:
disk = Adisk
else:
Adisk = disk
# Get catalog
cat = AIPSData.AIPSCat(disk)
olist = AIPSDir.PListCat(cat.catalog, disk, type="UV", first=first, last=last,
Aname=Aname, Aclass=Aclass, Aseq=Aseq,
giveList=giveList)
OErr.printErrMsg(err, "Error with AIPS catalog")
return olist
# end AUcat
|
501bb5a1eaa82fd162d17478f5bd9b14d8b76124
| 3,637,741
|
def process_threat_results(matching_threats, context):
""" prepare response from threat results """
threats = [ThreatSerializer(threat).data for threat in matching_threats]
response_data = {
"id": context.id,
"hits": threats,
}
status_code = status.HTTP_200_OK
if context.pending_searches:
response_data["retry_secs"] = 60
status_code = status.HTTP_303_SEE_OTHER
return Response(response_data, status_code)
|
b6f763f1a2983967dd0ccc68237408bf3871f9ac
| 3,637,742
|
def entropy_logits(logits):
"""
Computes the entropy of an unnormalized probability distribution.
"""
probs = F.softmax(logits, dim=-1)
return entropy(probs)
|
a9806dfbafbe77f74df55b81cc19603826e2d994
| 3,637,743
|
def convert_int_to_str(number: int, char: str = "'"):
"""Converts an ugly int into a beautiful and sweet str
Parameters:
nb: The number which is gonna be converted.
char: The characters which are gonna be inserted between every 3 digits.
Example: 2364735247 --> 2'364'735'247"""
number = str(number)
for index in range(len(number) - 3, 0, -3):
number = number[:index] + char + number[index:]
return number
|
ae8e2b0e4cc9a332e559e3128c440fff59cf6c78
| 3,637,744
|
def exists(index, doc_type, id, **kwargs):
"""
Returns a boolean indicating whether or not given document exists in Elasticsearch.
http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html
"""
res = request("exists", None, index, doc_type, id, **kwargs)
jsonprint(res)
return res
|
fd5488acef16b22b0da7302345eab2de6073523c
| 3,637,745
|
def deserialize_cookie(string):
"""Deserialize cookie"""
parts = string.split("#")
length = len(parts)
if length == 0 or length < 3:
return None
if not is_int(parts[2]):
return None
return create_internal_cookie(
unquote(parts[0]),
unquote(parts[1]),
parse_int(parts[2])
)
|
9887eb18c4cc91a13048b987ec962deb83a4da2b
| 3,637,746
|
def choose(n, k):
"""This is a binomial coeficient nCk used in binomial probablilty
this funtion uses factorial()
Usage: choose(n, k)
args:
n = total number
k = total number of sub-groups """
try:
return factorial(n)/(factorial(k) * factorial(n - k))
except(ValueError, ZeroDivisionError, TypeError):
print("""This is a binomial coeficient nCk used in binomial probablilty
this funtion uses factorial()
Usage: choose(n, k)
args:
n = total number
k = total number of sub-groups """)
|
3e9fe5212a2ddf680fc6681c0a7d7bd1ec9a4de2
| 3,637,747
|
import grp
from typing import cast
def get_os_group(name: _STR_OR_INT_OR_NONE = None) -> grp.struct_group:
"""Get an operating system group object.
Args:
name (:obj:`str` or :obj:`int`, optional): The "group name" or ``gid``.
Defaults to the current users's group.
Raises:
OSError: If the given ``name`` does not exist as a "group
name" for this operating system.
OSError: If the given ``name`` is a ``gid`` and it does not
exist.
:rtype:
:obj:`struct_group <grp>`
* A tuple like object.
Example:
>>> from flutils.pathutils import get_os_group
>>> get_os_group('bar')
grp.struct_group(gr_name='bar', gr_passwd='*', gr_gid=2001,
gr_mem=['foo'])
"""
if name is None:
name = get_os_user().pw_gid
name = cast(int, name)
if isinstance(name, int):
try:
return grp.getgrgid(name)
except KeyError:
raise OSError(
'The given gid: %r, is not a valid gid for this operating '
'system.' % name
)
try:
return grp.getgrnam(name)
except KeyError:
raise OSError(
'The given name: %r, is not a valid "group name" '
'for this operating system.' % name
)
|
6c359b46cdd2766cbdea7fb5412b1e03a3fbecac
| 3,637,749
|
def _process_output(response, context):
"""Post-process TensorFlow Serving output before it is returned to the client.
Args:
response (obj): the TensorFlow serving response
context (Context): an object containing request and configuration details
Returns:
(bytes, string): data to return to client, response content type
"""
if response.status_code != 200:
_return_error(response.status_code, response.content.decode('utf-8'))
response_content_type = context.accept_header
print("response.json():", response.json())
# remove whitespace from output JSON string
prediction = response.content.decode('utf-8').translate(dict.fromkeys(map(ord,whitespace)))
return prediction, response_content_type
|
19805fc9ce122b4c02a596167edbc01398dfa2ab
| 3,637,750
|
from bs4 import BeautifulSoup
import requests
def make_soup(text: str, mode: str="url", parser: str=PARSER) -> BeautifulSoup:
""" Returns a soup. """
if mode == "url" or isinstance(mode, dict):
params = mode if isinstance(mode, dict) else {}
text = requests.get(text, params=params).text
elif mode == "file":
text = open(text)
return BeautifulSoup(text, parser)
|
9641a7a0807194c911614e2ac41551b04bdbe22d
| 3,637,752
|
import ast
def _merge_inner_function(
class_def, infer_type, intermediate_repr, merge_inner_function
):
"""
Merge the inner function if found within the class, with the class IR
:param class_def: Class AST
:type class_def: ```ClassDef```
:param infer_type: Whether to try inferring the typ (from the default)
:type infer_type: ```bool```
:param intermediate_repr: a dictionary of form
{ "name": Optional[str],
"type": Optional[str],
"doc": Optional[str],
"params": OrderedDict[str, {'typ': str, 'doc': Optional[str], 'default': Any}]
"returns": Optional[OrderedDict[Literal['return_type'],
{'typ': str, 'doc': Optional[str], 'default': Any}),)]] }
:type intermediate_repr: ```dict```
:param merge_inner_function: Name of inner function to merge. If None, merge nothing.
:type merge_inner_function: ```Optional[str]```
:returns: a dictionary of form
{ "name": Optional[str],
"type": Optional[str],
"doc": Optional[str],
"params": OrderedDict[str, {'typ': str, 'doc': Optional[str], 'default': Any}]
"returns": Optional[OrderedDict[Literal['return_type'],
{'typ': str, 'doc': Optional[str], 'default': Any}),)]] }
:rtype: ```dict```
"""
function_def = next(
filter(
lambda func: func.name == merge_inner_function,
filter(rpartial(isinstance, FunctionDef), ast.walk(class_def)),
),
None,
)
if function_def is not None:
function_type = (
"static" if not function_def.args.args else function_def.args.args[0].arg
)
inner_ir = function(
function_def,
function_name=merge_inner_function,
function_type=function_type,
infer_type=infer_type,
)
ir_merge(other=inner_ir, target=intermediate_repr)
return intermediate_repr
|
5c891ba82cb5b41a5b5d311611f5d318d249a31e
| 3,637,753
|
def pb_set_defaults():
"""Set board defaults. Must be called before using any other board functions."""
return spinapi.pb_set_defaults()
|
30d360a15e4602c64a81900a581a2f4429f7d71e
| 3,637,754
|
def count_routes_graph(graph, source_node, dest_node):
"""
classic tree-like graph traversal
"""
if dest_node == source_node or dest_node - source_node == 1:
return 1
else:
routes = 0
for child in graph[source_node]:
routes += count_routes_graph(graph, child, dest_node)
return routes
|
f952b35f101d9f1c42eb1d7444859493701c6838
| 3,637,755
|
from typing import Dict
def pluck_state(obj: Dict) -> str:
"""A wrapper to illustrate composing
the above two functions.
Args:
obj: The dictionary created from the json string.
"""
plucker = pipe(get_metadata, get_state_from_meta)
return plucker(obj)
|
d9517346b701f9ff434452992a4f3e8ca3dccf08
| 3,637,756
|
from typing import Callable
from typing import Mapping
from typing import Any
from typing import Optional
def value(
parser: Callable[[str, Mapping[str, str]], Any] = nop,
tag_: Optional[str] = None,
var: Optional[str] = None,
) -> Parser:
"""Return a parser to parse a simple value assignment XML tag.
:param parser:
The text parser to use for the contents of the given `tag_`. It will
also be given the attributes mapping.
:param tag_:
The name of the tag to parse. The default is to consume any tag.
:param var:
Override the name the value is to be assigned to. The default is the
tag name.
.. note::
Use of this will break the AST's ability to make suggestions when
attempting to assign to an invalid variable as that feature
requires the tag and variable to have the same name.
:return:
A parser that consumes the given XML `tag_` and produces a
:class:`rads.config.ast.Assignment` AST node.
:raises rads.config.xml_parsers.TerminalXMLParseError:
Raised by the returned parser if the consumed tag is empty or the given
text `parser` produces a :class:`rads.config.text_parsers.TextParseError`.
"""
def process(element: Element) -> Assignment:
var_ = var if var else element.tag
condition = parse_condition(element.attributes)
action = parse_action(element)
text = element.text if element.text else ""
source = source_from_element(element)
try:
value = parser(text, element.attributes)
except TextParseError as err:
raise error_at(element)(str(err)) from err
return Assignment(
name=var_, value=value, condition=condition, action=action, source=source
)
if tag_:
return tag(tag_) ^ process
return any() ^ process
|
dcb2ad9b9e83015f1fd86323a156bbe92d505211
| 3,637,757
|
def compute_Rnorm(image, mask_field, cen, R=12, wid=1, mask_cross=True, display=False):
""" Compute (3 sigma-clipped) normalization using an annulus.
Note the output values of normalization contain background.
Paramters
----------
image : input image for measurement
mask_field : mask map with nearby sources masked as 1.
cen : center of target
R : radius of annulus
wid : half-width of annulus
Returns
-------
I_mean: mean value in the annulus
I_med : median value in the annulus
I_std : std value in the annulus
I_flag : 0 good / 1 bad (available pixles < 5)
"""
annulus_ma = CircularAnnulus([cen], R-wid, R+wid).to_mask()[0]
mask_ring = annulus_ma.to_image(image.shape) > 0.5 # sky ring (R-wid, R+wid)
mask_clean = mask_ring & (~mask_field) # sky ring with other sources masked
# Whether to mask the cross regions, important if R is small
if mask_cross:
yy, xx = np.indices(image.shape)
rr = np.sqrt((xx-cen[0])**2+(yy-cen[1])**2)
cross = ((abs(xx-cen[0])<4)|(abs(yy-cen[1])<4))
mask_clean = mask_clean * (~cross)
if len(image[mask_clean]) < 5:
return [np.nan] * 3 + [1]
z = sigma_clip(np.log10(image[mask_clean]), sigma=2, maxiters=5)
I_mean, I_med, I_std = 10**np.mean(z), 10**np.median(z.compressed()), np.std(10**z)
if display:
z = 10**z
fig, (ax1,ax2) = plt.subplots(nrows=1, ncols=2, figsize=(9,4))
ax1.imshow(mask_clean, cmap="gray", alpha=0.7)
ax1.imshow(image, vmin=image.min(), vmax=I_med+50*I_std,
cmap='viridis', norm=AsinhNorm(), alpha=0.7)
ax1.plot(cen[0], cen[1], 'r*', ms=10)
ax2.hist(sigma_clip(z),alpha=0.7)
# Label mean value
plt.axvline(I_mean, color='k')
plt.text(0.5, 0.9, "%.1f"%I_mean, color='darkorange', ha='center', transform=ax2.transAxes)
# Label 20% / 80% quantiles
I_20 = np.quantile(z.compressed(), 0.2)
I_80 = np.quantile(z.compressed(), 0.8)
for I, x_txt in zip([I_20, I_80], [0.2, 0.8]):
plt.axvline(I, color='k', ls="--")
plt.text(x_txt, 0.9, "%.1f"%I, color='orange',
ha='center', transform=ax2.transAxes)
return I_mean, I_med, I_std, 0
|
7c0b2aebf009b81c19de30e3a0d9f91fcfcebd52
| 3,637,758
|
import six
def inject_timeout(func):
"""Decorator which injects ``timeout`` parameter into request.
On client initiation, default timeout is set. This timeout will be
injected into any request if no explicit parameter is set.
:return: Value of decorated function.
"""
@six.wraps(func)
def decorator(self, *args, **kwargs):
kwargs.setdefault("timeout", self._timeout)
return func(self, *args, **kwargs)
return decorator
|
479ed7b6aa7005d528ace0ff662840d14c23035c
| 3,637,759
|
def test_match_partial(values):
"""@match_partial allows not covering all the cases."""
v, v2 = values
@match_partial(MyType)
class get_partial_value(object):
def MyConstructor(x):
return x
assert get_partial_value(v) == 3
|
826a08066822e701c2077c2b71be48152c401b3f
| 3,637,760
|
def assert_sim_of_model_with_itself_is_approx_one(mdl: nn.Module, X: Tensor,
layer_name: str,
metric_comparison_type: str = 'pwcca',
metric_as_sim_or_dist: str = 'dist') -> bool:
"""
Returns true if model is ok. If not it asserts against you (never returns False).
"""
dist: float = get_metric(mdl, mdl, X, X, layer_name, metric_comparison_type=metric_comparison_type,
metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
assert approx_equal(dist, 0.0), f'Sim should be close to 1.0 but got: {dist=}'
return True
|
76d9b88063b69b69217f28cb98c985ff92f9b6e0
| 3,637,761
|
def cver(verstr):
"""Converts a version string into a number"""
if verstr.startswith("b"):
return float(verstr[1:])-100000
return float(verstr)
|
1ad119049b9149efe7df74f5ac269d3dfafad4e2
| 3,637,762
|
import urllib
def _GetGaeCookie(host, service, auth_token, secure):
"""This function creates a login cookie using the authentication token
obtained after logging in successfully in the Google account.
Args:
host: Host where the user wants to login.
service: Service code where the user wants to login.
auth_token: Authentication token obtained from ClientLogin.
secure: True if we want a secure cookie, false if not.
Returns:
A cookie for the specifed service.
Raises:
urllib2.HTTPError: This exception is raised when the cookie cannot be
obtained and the user is redirected to another place.
"""
# Create a request for Google's service with the authentication token.
continue_location = 'http://localhost/'
cookie_request_data_map = {
'continue' : continue_location,
'auth' : auth_token,
}
cookie_request_data = urllib.urlencode(cookie_request_data_map)
cookie_url = '{protocol}://{host}/_{service}/login?{data}'.format(
protocol=('https' if secure else 'http'), host=host, service=service,
data=cookie_request_data)
cookie_request = urllib2.Request(cookie_url)
try:
# Create a custom opener, make the request and extract the body.
http_opener = _GetHTTPOpener()
cookie_response = http_opener.open(cookie_request)
except urllib2.HTTPError as e:
# Keep the error as the cookie response.
cookie_response = e
# Check that a redirection was made to the required continue location.
# Otherwise, return an HTTP error.
response_code = cookie_response.code
if (response_code != 302 or
cookie_response.info()['location'] != continue_location):
raise urllib2.HTTPError(cookie_request.get_full_url(), response_code,
cookie_response.msg, cookie_response.headers,
cookie_response.fp)
# Extract the cookie from the headers and remove 'HttpOnly' from it.
cookie = cookie_response.headers.get('Set-Cookie')
return cookie.replace('; HttpOnly', '')
|
9bef7516f6b43c2b744e6bb0a75a488e8aee3934
| 3,637,763
|
async def ping_handler() -> data.PingResponse:
"""
Check server status.
"""
return data.PingResponse(status="ok")
|
77d1130aa31f54fbcac351d58b8ae4e4b893c5e9
| 3,637,764
|
def create_session_cookie():
"""
Creates a cookie containing a session for a user
Stolen from https://stackoverflow.com/questions/22494583/login-with-code-when-using-liveservertestcase-with-django
:param username:
:param password:
:return:
"""
# First, create a new test user
user = AuthUserFactory()
# Then create the authenticated session using the new user credentials
session = SessionStore()
session[SESSION_KEY] = user.pk
session[BACKEND_SESSION_KEY] = settings.AUTHENTICATION_BACKENDS[0]
session[HASH_SESSION_KEY] = user.get_session_auth_hash()
session.save()
# Finally, create the cookie dictionary
cookie = {settings.SESSION_COOKIE_NAME: session.session_key}
return cookie
|
d4d7eef96e7b0136aa888d362b3278eb24ae91b8
| 3,637,767
|
def _replace_oov(original_vocab, line):
"""Replace out-of-vocab words with "UNK".
This maintains compatibility with published results.
Args:
original_vocab: a set of strings (The standard vocabulary for the dataset)
line: a unicode string - a space-delimited sequence of words.
Returns:
a unicode string - a space-delimited sequence of words.
"""
return u" ".join(
[word if word in original_vocab else u"UNK" for word in line.split()])
|
2e2cb1464484806b79263a14fd32ed4d40d0c9ba
| 3,637,770
|
def linear_CMD_fit(x,y,xerr,yerr):
"""
Does a linear fit to CMD data where x is color and y is amplitude, returning some fit
statistics
Parameters
----------
x : array-like
color
y : array-like
magnitude
xerr : array-like
color errors
yerr : array-like
magnitude errors
Returns
-------
slope : float
slope of best-fit line
r_squared : float
Correlation coefficient (R^2)
"""
data = RealData(x, y, sx=xerr, sy=yerr)
mod = Model(line)
odr = ODR(data, mod, beta0=[-0.1, np.mean(y)])
out = odr.run()
slope = out.beta[0]
r_squared = r2_score(y, line(out.beta, x))
return slope, r_squared
|
fb145d5caf48d2ab1b49a17b1e05ddd32e97c3f1
| 3,637,771
|
def _verify_path_value(value, is_str, is_kind=False):
"""Verify a key path value: one of a kind, string ID or integer ID.
Args:
value (Union[str, int]): The value to verify
is_str (bool): Flag indicating if the ``value`` is a string. If
:data:`False`, then the ``value`` is assumed to be an integer.
is_kind (Optional[bool]): Flag indicating if the value is meant to
be a kind. Defaults to :data:`False`.
Returns:
Union[str, int]: The ``value`` passed in, if it passed verification
checks.
Raises:
ValueError: If the ``value`` is a ``str`` for the kind, but the number
of UTF-8 encoded bytes is outside of the range ``[1, 1500]``.
ValueError: If the ``value`` is a ``str`` for the name, but the number
of UTF-8 encoded bytes is outside of the range ``[1, 1500]``.
ValueError: If the ``value`` is an integer but lies outside of the
range ``[1, 2^63 - 1]``.
"""
if is_str:
if 1 <= len(value.encode("utf-8")) <= _MAX_KEYPART_BYTES:
return value
if is_kind:
raise ValueError(_BAD_KIND.format(_MAX_KEYPART_BYTES, value))
else:
raise ValueError(_BAD_STRING_ID.format(_MAX_KEYPART_BYTES, value))
else:
if 1 <= value <= _MAX_INTEGER_ID:
return value
raise ValueError(_BAD_INTEGER_ID.format(value))
|
3d8db518f244e6d09826d29dfcc42769a0015c33
| 3,637,772
|
def _is_tipologia_header(row):
"""Controlla se la riga corrente e' una voce o l'header di una
nuova tipologia di voci ("Personale", "Noli", etc).
"""
if type(row.iloc[1]) is not str:
return False
if type(row.iloc[2]) is str:
if row.iloc[2] != HEADERS["units"]:
return False
else:
if not np.isnan(row.iloc[2]):
return False
return True
|
0fdbc6bea8d961fbe990d607a175815ccc475f88
| 3,637,773
|
def validateFloat(
value,
blank=False,
strip=None,
allowRegexes=None,
blockRegexes=None,
min=None,
max=None,
lessThan=None,
greaterThan=None,
excMsg=None,
):
# type: (str, bool, Union[None, str, bool], Union[None, Sequence[Union[Pattern, str]]], Union[None, Sequence[Union[Pattern, str, Sequence[Union[Pattern, str]]]]], Optional[int], Optional[int], Optional[int], Optional[int], Optional[str]) -> Union[float, str]
"""Raises ValidationException if value is not a float.
Returns value, so it can be used inline in an expression:
print(2 + validateFloat(your_number))
Note that since float() ignore leading or trailing whitespace
when converting a string to a number, so does this validateNum().
* value (str): The value being validated as an int or float.
* blank (bool): If True, a blank string will be accepted. Defaults to False.
* strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped.
* allowRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers.
* blockRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation.
* _numType (str): One of 'num', 'int', or 'float' for the kind of number to validate against, where 'num' means int or float.
* min (int, float): The (inclusive) minimum value for the value to pass validation.
* max (int, float): The (inclusive) maximum value for the value to pass validation.
* lessThan (int, float): The (exclusive) minimum value for the value to pass validation.
* greaterThan (int, float): The (exclusive) maximum value for the value to pass validation.
* excMsg (str): A custom message to use in the raised ValidationException.
If you specify min or max, you cannot also respectively specify lessThan
or greaterThan. Doing so will raise PySimpleValidateException.
>>> import pysimplevalidate as pysv
>>> pysv.validateFloat('3.14')
3.14
>>> pysv.validateFloat('pi')
Traceback (most recent call last):
...
pysimplevalidate.ValidationException: 'pi' is not a float.
>>> pysv.validateFloat('3')
3.0
>>> pysv.validateFloat('3', min=3)
3.0
>>> pysv.validateFloat('3', greaterThan=3)
Traceback (most recent call last):
...
pysimplevalidate.ValidationException: Number must be greater than 3.
"""
# Even though validateNum *could* return a int, it won't if _numType is 'float', so ignore mypy's complaint:
return validateNum(
value=value,
blank=blank,
strip=strip,
allowRegexes=allowRegexes,
blockRegexes=blockRegexes,
_numType="float",
min=min,
max=max,
lessThan=lessThan,
greaterThan=greaterThan,
)
|
e11bbef1b0f53fa803918f9871e9779549e3cdb8
| 3,637,774
|
from typing import Dict
from typing import Any
def send_sms(mobile: str, sms_code: str) -> Dict[str, Any]:
"""发送短信"""
sdk: SmsSDK = SmsSDK(
celery.app.config.get("SMS_ACCOUNT_ID"),
celery.app.config.get("SMS_ACCOUNT_TOKEN"),
celery.app.config.get("SMS_APP_ID")
)
try:
ret: str = sdk.sendMessage(
celery.app.config.get("SMS_TEMPLATE_ID"), # 模板ID
mobile, # 用户手机号
(sms_code, celery.app.config.get("SMS_EXPIRE_TIME") // 60) # 模板变量信息
)
# 容联云短信返回的结果是json格式的字符串,需要转换成dict
result: Dict[str, Any] = orjson.loads(ret)
# 6个0表示短信发送成功,将验证码缓存到redis中
if result["statusCode"] == "000000":
pipe: Pipeline = redis.pipeline()
pipe.multi() # 开启事务
# 保存短信记录到redis中
pipe.setex("sms_%s" % mobile, celery.app.config.get("SMS_EXPIRE_TIME"), sms_code)
# 进行冷却倒计时
pipe.setex("int_%s" % mobile, celery.app.config.get("SMS_INTERVAL_TIME"), "_")
pipe.execute() # 提交事务
return result
else:
raise Exception
except Exception as exc:
celery.app.logger.error("短信发送失败!\r\n%s" % exc)
return result
|
f1117d0543cc84d0429ce67f1415e6ab371ef2a6
| 3,637,775
|
def from_dataframe(df, name='df', client=None):
"""
convenience function to construct an ibis table
from a DataFrame
EXPERIMENTAL API
Parameters
----------
df : DataFrame
name : str, default 'df'
client : Client, default new PandasClient
client dictionary will be mutated with the
name of the DataFrame
Returns
-------
Table
"""
if client is None:
return connect({name: df}).table(name)
client.dictionary[name] = df
return client.table(name)
|
23d64170f078652e60d65be5346293ea3c4aedb5
| 3,637,776
|
def filter_list(prev_list, current_list, zeta):
"""
apply filter to the all elements
of the list one by one
"""
filtered_list = []
for i, current_val in enumerate(current_list):
prev_val = prev_list[i]
filtered_list.append(
moving_average_filter(current_val, prev_val, zeta))
return filtered_list
|
842d71f58b07dbe771c7fdd43797f26e75565ef5
| 3,637,781
|
def has_prefix(sub_s):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
for word in dict_list:
if word.startswith(sub_s):
return True
return False
|
78900ed757d4a1a94832f5a2f6d19da784935966
| 3,637,782
|
import yaml
def main():
""" """
try:
# read parameters configuration file yaml
with open(setupcfg.extraParam, "r") as stream:
try:
param = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
# check parameters file
return _check_param(param)
except Exception:
_logger.exception(
f"Something goes wrong when loading extra parameters file -{setupcfg.extraParam}-."
)
raise
|
67da82991e8ae5b36dae81c6ac107099a54ab7e4
| 3,637,784
|
def primary_key(field_type):
"""
* Returns the field to be treated as the "primary key" for this type
* Primary key is determined as the first of:
* - non-null ID field
* - ID field
* - first String field
* - first field
*
* @param {object_type_definition} type
* @returns {FieldDefinition} primary key field
"""
# Find the primary key for the type
# first field with a required ID
# if no required ID type then first required type
pk = first_non_null_and_id_field(field_type)
if not pk:
pk = first_id_field(field_type)
if not pk:
pk = first_non_null_field(field_type)
if not pk:
pk = first_field(field_type)
return pk
|
5beef62f9311b013b6c6cbe3c36260783bc61506
| 3,637,785
|
def get_discussion_data_list_with_percentage(session: Session, doi, limit: int = 20, min_percentage: float = 1,
dd_type="lang"):
""" get discussion types with count an percentage from postgresql """
query = """
WITH result AS
(
(
SELECT "value",
count as c,
ROUND(count / CAST(SUM(count) OVER () AS FLOAT) * 1000) / 10 as p
FROM counted_discussion_data
JOIN discussion_data as dd ON (discussion_data_point_id = dd.id)
WHERE type = :type and value != 'und' and value != 'unknown'
ORDER BY c DESC
LIMIT :limit
)
UNION
(
SELECT 'total' as "value", SUM(count) as c, 100 as p
FROM counted_discussion_data
JOIN discussion_data as dd ON (discussion_data_point_id = dd.id)
WHERE type = :type and value != 'und' and value != 'unknown'
)
)
SELECT "value", c as count, p
FROM result
WHERE result.p >= :mp
ORDER BY count DESC;
"""
params = {
'type': dd_type,
'limit': limit,
'mp': min_percentage
}
if doi:
query = """
WITH result AS
(
(
SELECT "value",
SUM(count) as c,
ROUND(SUM(count) / CAST(SUM(SUM(count)) OVER () AS FLOAT) * 1000) / 10 as p
FROM (SELECT "value", "count"
FROM discussion_data_point as ddp
JOIN discussion_data as dd ON (ddp.discussion_data_point_id = dd.id)
WHERE type = :type and value != 'und' and value != 'unknown'
AND publication_doi=:doi
) temp
GROUP BY "value"
ORDER BY c DESC
LIMIT :limit
)
UNION
(
SELECT 'total' as "value", SUM(count) as c, 100 as p
FROM discussion_data_point as ddp
JOIN discussion_data as dd ON (ddp.discussion_data_point_id = dd.id)
WHERE type = :type and value != 'und' and value != 'unknown'
AND publication_doi=:doi
)
)
SELECT "value", c as count, p
FROM result
WHERE result.p >= :mp
ORDER BY count DESC;
"""
params['doi'] = doi
s = text(query)
# print(query)
# print(params)
if 'doi' in params:
s = s.bindparams(bindparam('type'), bindparam('limit'), bindparam('mp'), bindparam('doi'))
else:
s = s.bindparams(bindparam('type'), bindparam('limit'), bindparam('mp'))
return session.execute(s, params).fetchall()
|
4842566f7a891ce53cfc8170cc0fb5db2a6b298b
| 3,637,786
|
import collections
import torch
import time
def validate(config, model, val_iterator, criterion, scheduler=None):
"""Runs one standard validation pass over the val_iterator.
This function automatically measures timing for various operations such
as host to device transfer and processing time for the batch.
It also automatically detects and places the data on the given GPU device
if available.
Raises:
ValueError if multiple models/schedulers are provided. You
are expected to have a custom validation function if you wish
to use multiple models/schedulers.
Args:
config: (dict): A user configuration provided into the Trainer
constructor.
model: The model as created by the model_creator.
train_iterator: An iterator created from the DataLoader which
wraps the provided Dataset.
criterion: The loss object created by the loss_creator.
scheduler (optional): The torch.optim.lr_scheduler object
as created by the scheduler_creator. By default,
this is not used in this function.
Returns:
A dict of metrics from the evaluation.
"""
if isinstance(model, collections.Iterable) or isinstance(
scheduler, collections.Iterable):
raise ValueError(
"Need to provide custom validation function if using multi-model "
"or multi-scheduler training.")
batch_time = AverageMeter()
losses = AverageMeter()
# switch to evaluate mode
model.eval()
correct = 0
total = 0
batch_idx = 0
with torch.no_grad():
end = time.time()
for batch_idx, (features, target) in enumerate(val_iterator):
if torch.cuda.is_available():
features = features.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(features)
loss = criterion(output, target)
_, predicted = torch.max(output.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
# measure accuracy and record loss
losses.update(loss.item(), features.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if config.get(TEST_MODE) and batch_idx == 0:
break
stats = {
BATCH_COUNT: batch_idx + 1,
"batch_time": batch_time.avg,
"validation_loss": losses.avg,
"mean_accuracy": correct / total,
"mean_loss": losses.sum / total,
}
return stats
|
4f10e68c2e863e11e33f4f49b8378de51ff2b8fe
| 3,637,787
|
def geq_indicate(var, indicator, var_max, thr):
"""Generates constraints that make indicator 1 iff var >= thr, else 0.
Parameters
----------
var : str
Variable on which thresholding is performed.
indicator : str
Identifier of the indicator variable.
var_max : int
An upper bound on var.
the : int
Comparison threshold.
Returns
-------
List[str]
A list holding the two constraints.
"""
lb = "- %s + %d %s <= 0" % (var, thr, indicator)
ub = "- %s + %d %s >= -%d" % (var, var_max - thr + 1, indicator, thr - 1)
return [lb, ub]
|
319f18f5343b806b7108dd9c02ca5d647e132dab
| 3,637,790
|
import re
def parse_manpage_number(path):
"""
Parse number of man page group.
"""
# Create regular expression
number_regex = re.compile(r".*/man(\d).*")
# Get number of manpage group
number = number_regex.search(path)
only_number = ""
if number is not None:
number = number.group(1)
return number
|
b45edb65705592cd18fd1fd8ee30bb389dbd8dff
| 3,637,791
|
def sample_coordinates_from_coupling(c, row_points, column_points, num_samples=None, return_all = False, thr = 10**(-6)):
"""
Generates [x, y] samples from the coupling c.
If return_all is True, returns [x,y] coordinates of every pair with coupling value >thr
"""
index_samples = sample_indices_from_coupling(c, num_samples = num_samples, return_all = return_all, thr = thr)
return np.array([ [row_points[s[0], :], column_points[s[1],:]] for s in index_samples])
|
a8343291a34ff31a2fc7b86c9b83872e7c787b76
| 3,637,792
|
import ast
def is_suppress_importerror(node: ast.With):
"""
Returns whether the given ``with`` block contains a
:func:`contextlib.suppress(ImportError) <contextlib.suppress>` contextmanager.
.. versionadded:: 0.5.0 (private)
:param node:
""" # noqa: D400
item: ast.withitem
for item in node.items:
if not isinstance(item.context_expr, ast.Call):
continue
try:
name = '.'.join(get_attribute_name(item.context_expr.func))
except NotImplementedError: # pragma: no cover
continue
if name not in {"suppress", "contextlib.suppress", "contextlib2.suppress"}:
continue
for arg in item.context_expr.args:
try:
arg_name = '.'.join(get_attribute_name(arg))
except NotImplementedError: # pragma: no cover
continue
if arg_name in {"ImportError", "ModuleNotFoundError"}:
return True
return False
|
341d106b62d7940e4d84a359cd2f2ca254d3434e
| 3,637,793
|
def random_flip_left_right(data):
""" Randomly flip an image or batch of image left/right uniformly
Args:
data: tensor of shape (H, W, C) or (N, H, W, C)
Returns:
Randomly flipped data
"""
data_con, C, N = _concat_batch(data)
data_con = tf.image.random_flip_left_right(data_con)
return _unconcat_batch(data_con, C, N)
|
bcdd0dfd35ff7ee0237d585d5a6cd70f92d7df2b
| 3,637,794
|
def get_all_lobbyists(official_id, cycle=None, api_key=None):
"""
https://www.opensecrets.org/api/?method=candContrib&cid=N00007360&cycle=2020&apikey=__apikey__
"""
if cycle is None:
cycle = 2020 # I don't actually know how the cycles work; I assume you can't just take the current year?
# if API key none, get it from some sort of appwide config defined above
w = Wrapper(api_key)
return w.get({'method':'candContrib', 'cid': official_id, 'cycle': cycle})
|
a2d8267881e871cb54201d243357739e689f187e
| 3,637,798
|
def get_sale(this_line):
"""Convert the input into a dictionary, with keys matching
the CSV column headers in the scrape_util module.
"""
sale = {}
sale['consignor_name'] = this_line.pop(0)
sale['consignor_city'] = this_line.pop(0).title()
try:
maybe_head = this_line[0].split()
int(maybe_head[0])
sale['cattle_head'] = maybe_head[0]
sale['cattle_cattle'] = ' '.join(maybe_head[1:])
this_line.pop(0)
except:
sale['cattle_cattle'] = this_line.pop(0)
sale['cattle_avg_weight'] = this_line.pop(0)
price_string = this_line.pop(0)
sale['cattle_price_cwt'] = price_string.replace(',', '')
return sale
|
39fee66b4c92a2cb459722f238e4a3b6e5848f4d
| 3,637,799
|
def validate_besseli(nu, z, n):
"""
Compares the results of besseli function with scipy.special. If the return
is zero, the result matches with scipy.special.
.. note::
Scipy cannot compute this special case: ``scipy.special.iv(nu, 0)``,
where nu is negative and non-integer. The correct answer is -inf, but
scipy's result is +inf. This issue also affects derivatives of the
iv function at ``z = 0``. For example, ``scipy.special.ivp(nu, 0, n)``.
However, the results for *complex* argument ``z = 0j`` is correctly
returned by scipy (which is ``nan``).
"""
# Compute using special_functions package
i_specf = besseli(nu, z, n)
# Compute using scipy.special package
if n == 0:
if not isinstance(z, complex) and nu == 0:
i_scipy = i0(z)
elif not isinstance(z, complex) and nu == 1:
i_scipy = i1(z)
else:
i_scipy = iv(nu, z)
else:
i_scipy = ivp(nu, z, n)
# Whitelist false scipy results. See note in docstring above.
ignore_scipy = False
if (nu < 0) and (round(nu) != nu) and (z.real == 0) and (z.imag == 0):
ignore_scipy = True
if (round(nu) != nu) and (z.real == 0) and (z.imag == 0) and (n > 0):
ignore_scipy = True
# Compare
error = i_specf - i_scipy
tolerance = 1e-14
if ignore_scipy:
error_detected = False
elif isinstance(error, float) and isinf(i_specf) and isinf(i_scipy) \
and (copysign(1, i_specf) == copysign(1, i_scipy)):
error_detected = False
elif isinstance(error, complex) and isinf(i_specf.real) and \
isinf(i_scipy.real) and \
(copysign(1, i_specf.real) == copysign(1, i_scipy.real)):
error_detected = False
elif isinstance(error, float) and isnan(i_specf) and isnan(i_scipy):
error_detected = False
elif isinstance(error, complex) and isnan(i_specf.real) and \
isnan(i_scipy.real):
error_detected = False
elif error.real < tolerance and error.real > -tolerance and \
error.imag < tolerance and error.imag > -tolerance:
error_detected = False
else:
error_detected = True
if isinstance(z, complex):
print('ERROR: nu: %+0.2f, z: (%+0.2f,%+0.2f), n: %d, '
% (nu, z.real, z.imag, n), end=" ")
else:
print('ERROR: nu: %+0.2f, z: (%+0.2f,.....), n: %d, '
% (nu, z.real, n), end=" ")
if isinstance(i_specf, complex):
print('i_nu: (%+0.3f,%+0.3f) '
% (i_specf.real, i_specf.imag), end=" ")
else:
print('i_nu: (%+0.3f,......) ' % (i_specf), end=" ")
if isinstance(i_scipy, complex):
print('!= (%+0.3f,%+0.3f), '
% (i_scipy.real, i_scipy.imag), end=" ")
else:
print('!= (%+0.3f,......), ' % (i_scipy), end=" ")
if isinstance(error, complex):
print('error: (%+0.3e,%+0.3e)'
% (error.real, error.imag))
else:
print('error: (%+0.3e,..........)' % (error))
return error_detected
|
a8102c014fdcb2d256adf94aea842d1e5733ba72
| 3,637,800
|
from typing import Any
from typing import List
def delete_by_ip(*ip_address: Any) -> List:
"""
Remove the rules connected to specific ip_address.
"""
removed_rules = []
counter = 1
for rule in rules():
if rule.src in ip_address:
removed_rules.append(rule)
execute("delete", counter, force=True)
else:
counter += 1
return removed_rules
|
88b430b83a5c3c82491f210e218a10719b5b75df
| 3,637,801
|
def findMaxWindow(a, w):
"""
:param a: input array of integers
:param w: window size
:return: array of max val in every window
"""
max = [0] * (len(a)-w+1)
maxPointer = 0
maxCount = 0
q = Queue()
for i in range(0, w):
if a[i] > max[maxPointer]:
max[maxPointer] = a[i]
elif a[i] == max[maxPointer]:
maxCount += 1
if w>1:
q.enqueue(a[i])
maxPointer += 1
for i in range(w, len(a)):
if w>1:
a0 = q.dequeue()
if a0 == max[maxPointer-1]:
maxCount -= 1
if a[i] > max[maxPointer-1]:
maxCount = 0
max[maxPointer] = a[i]
elif a[i] == max[maxPointer-1]:
max[maxPointer] = a[i]
maxCount += 1
else:
max[maxPointer] = max[maxPointer-1]
q.enqueue(a[i])
maxPointer += 1
return max
|
af3e7f010b162e8f378e541be32a2d295e31e51c
| 3,637,802
|
import logging
def filtering_news(news: list, filtered_news: list):
"""
Filters news to remove unwanted removed articles
Args:
news (list): List of articles to remove from
filtered_news (list): List of titles to filter the unwanted news with
Returns:
news (list): List of articles with undesired articles removed
"""
for x in filtered_news:
for y in news: # Nested loop to loop through the titles since it is a list of dictionaries
if y["title"] == x["title"]:
news.remove(y)
logging.info("News filtered, removed {}".format(x["title"]))
break
return news
|
98049b6bd826109fe7bc8e2e42de4c50970988a9
| 3,637,803
|
def extract_subsequence(sequence, start_time, end_time):
"""Extracts a subsequence from a NoteSequence.
Notes starting before `start_time` are not included. Notes ending after
`end_time` are truncated.
Args:
sequence: The NoteSequence to extract a subsequence from.
start_time: The float time in seconds to start the subsequence.
end_time: The float time in seconds to end the subsequence.
Returns:
A new NoteSequence that is a subsequence of `sequence` in the specified time
range.
"""
subsequence = music_pb2.NoteSequence()
subsequence.CopyFrom(sequence)
del subsequence.notes[:]
for note in sequence.notes:
if note.start_time < start_time or note.start_time >= end_time:
continue
new_note = subsequence.notes.add()
new_note.CopyFrom(note)
new_note.end_time = min(note.end_time, end_time)
subsequence.total_time = min(sequence.total_time, end_time)
return subsequence
|
cf8e1be638163a6cb7c6fd6e69121ccc7100afd6
| 3,637,804
|
import re
def read_data(filename):
"""Read the raw tweet data from a file. Replace Emails etc with special tokens """
with open(filename, 'r') as f:
all_lines=f.readlines()
padded_lines=[]
for line in all_lines:
line = emoticonsPattern.sub(lambda m: rep[re.escape(m.group(0))], line.lower().strip())
line = userMentionsRegex.sub(' USER ', line )
line = emailsRegex.sub(' EMAIL ', line )
line=urlsRegex.sub(' URL ', line)
line=numsRegex.sub(' NUM ',line)
line=punctuationNotEmoticonsRegex.sub(' PUN ',line)
line=re.sub(r'(.)\1{2,}', r'\1\1',line)
words_tokens=[token for token in TweetTokenizer().tokenize(line)]
line= ' '.join(token for token in words_tokens )
padded_lines.append(line)
padded_data=' '.join(line for line in padded_lines)
encoded_data=tf.compat.as_str(padded_data).split()
return encoded_data
|
8e15d6e4bd9e4a6b3b01ea5baffad8e6bc390034
| 3,637,805
|
def client():
"""AlgodClient for testing"""
client = _algod_client()
client.flat_fee = True
client.fee = 1000
print("fee ", client.fee)
return client
|
ad51102a58d9ffad4a9dd43c3e2b4bd5adc0f467
| 3,637,806
|
def GRU_sent_encoder(batch_size, max_len, vocab_size, hidden_dim, wordembed_dim,
dropout=0.0, is_train=True, n_gpus=1):
"""
Implementing the GRU of skip-thought vectors.
Use masks so that sentences at different lengths can be put into the same batch.
sent_seq: sequence of tokens consisting a sentence, shape: batch_size x max_len
mask: 1 indicating valid, 0 invalid, shape: batch_size x max_len
embed_weight: word embedding, shape:
"""
sent_seq = mx.sym.Variable('sent_seq')
mask = mx.sym.Variable('mask')
embed_weight = mx.sym.Variable('embed_weight')
embeded_seq = mx.sym.Embedding(data=sent_seq, input_dim=vocab_size, weight=embed_weight,
output_dim=wordembed_dim, name='sent_embedding')
sent_vec = GRU_unroll(batch_size, embeded_seq, mask=mask,
in_dim=wordembed_dim, seq_len=max_len,
num_hidden=hidden_dim, dropout=dropout,
prefix='sent', n_gpus=n_gpus)
return sent_vec
|
fe7090efe78ec97ba88651ecf8f7918bb5277eec
| 3,637,807
|
def process_contours(frame_resized):
"""Get contours of the object detected"""
blurred = cv2.GaussianBlur(frame_resized, (11, 9), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, constants.blueLower, constants.blueUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
return contours
|
5725b12a3e5e0447a3b587d091f4fdeae1f5bac9
| 3,637,808
|
from typing import Optional
from typing import List
import itertools
def add_ignore_file_arguments(files: Optional[List[str]] = None) -> List[str]:
"""Adds ignore file variables to the scope of the deployment"""
default_ignores = ["config.json", "Dockerfile", ".dockerignore"]
# Combine default files and files
ingore_files = default_ignores + (files or [])
return list(
itertools.chain.from_iterable(
[["--ignore-file", filename] for filename in ingore_files]
)
)
|
f7e7487c4a17a761f23628cbb79cbade64237ce6
| 3,637,809
|
import torch
def compute_accuracy(logits, targets):
"""Compute the accuracy"""
with torch.no_grad():
_, predictions = torch.max(logits, dim=1)
accuracy = torch.mean(predictions.eq(targets).float())
return accuracy.item()
|
af15e4d077209ff6e790d6fdaa7642bb65ff8dbf
| 3,637,810
|
def division_by_zero(number: int):
"""Divide by zero. Should raise exception.
Try requesting http://your-app/_divide_by_zero/7
"""
result = -1
try:
result = number / 0
except ZeroDivisionError:
logger.exception("Failed to divide by zero", exc_info=True)
return f"{number} divided by zeor is {result}"
|
b97d7f38aea43bfb6ee4db23549e89799bd299b7
| 3,637,811
|
def is_ELF_got_pointer_to_external(ea):
"""Similar to `is_ELF_got_pointer`, but requires that the eventual target
of the pointer is an external."""
if not is_ELF_got_pointer(ea):
return False
target_ea = get_reference_target(ea)
return is_external_segment(target_ea)
|
cd62d43bb266d229ae31e477dc60d21f73b8850a
| 3,637,812
|
from pathlib import Path
def _normalise_dataset_path(input_path: Path) -> Path:
"""
Dataset path should be either the direct imagery folder (mtl+bands) or a tar path.
Translate other inputs (example: the MTL path) to one of the two.
>>> tmppath = Path(tempfile.mkdtemp())
>>> ds_path = tmppath.joinpath('LE07_L1GT_104078_20131209_20161119_01_T1')
>>> ds_path.mkdir()
>>> mtl_path = ds_path / 'LC08_L1TP_090084_20160121_20170405_01_T1_MTL.txt'
>>> mtl_path.write_text('<mtl content>')
13
>>> _normalise_dataset_path(ds_path).relative_to(tmppath).as_posix()
'LE07_L1GT_104078_20131209_20161119_01_T1'
>>> _normalise_dataset_path(mtl_path).relative_to(tmppath).as_posix()
'LE07_L1GT_104078_20131209_20161119_01_T1'
>>> tar_path = tmppath / 'LS_L1GT.tar.gz'
>>> tar_path.write_text('fake tar')
8
>>> _normalise_dataset_path(tar_path).relative_to(tmppath).as_posix()
'LS_L1GT.tar.gz'
>>> _normalise_dataset_path(Path(tempfile.mkdtemp()))
Traceback (most recent call last):
...
ValueError: No MTL files within input path .... Not a dataset?
"""
input_path = normalise_nci_symlinks(input_path)
if input_path.is_file():
if ".tar" in input_path.suffixes:
return input_path
input_path = input_path.parent
mtl_files = list(input_path.rglob("*_MTL.txt"))
if not mtl_files:
raise ValueError(
"No MTL files within input path '{}'. Not a dataset?".format(input_path)
)
if len(mtl_files) > 1:
raise ValueError(
"Multiple MTL files in a single dataset (got path: {})".format(input_path)
)
return input_path
|
cf61da9a043db9c67714d7437c7ef18ee6235acb
| 3,637,813
|
def get_customers():
"""returns an array of dicts with the customers
Returns:
Array[Dict]: returns an array of dicts of the customers
"""
try:
openConnection
with conn.cursor() as cur:
result = cur.run_query('SELECT * FROM customer')
cur.close()
conn.close()
except:
return Exception
customers = []
for row in result:
if row[0] == 1:
continue
customer = {'id': row[0], 'name':row[1], 'credit': 0, 'rfid': row[2]}
customers.append(customer)
return customers
|
4440fb5d226070facb4e5c1b854535e40f42d607
| 3,637,814
|
def fixtureid_es_server(fixture_value):
"""
Return a fixture ID to be used by pytest for fixture `es_server()`.
Parameters:
fixture_value (:class:`~easy_server.Server`):
The server the test runs against.
"""
es_obj = fixture_value
assert isinstance(es_obj, easy_server.Server)
return "es_server={0}".format(es_obj.nickname)
|
f795a8e909354e0004ea81ebdf71f7da81153a64
| 3,637,815
|
def topn_vocabulary(document, TFIDF_model, topn=100):
"""
Find the top n most important words in a document.
Parameters
----------
`document` : The document to find important words in.
`TFIDF_model` : The TF-IDF model that will be used.
`topn`: Default = 100. Amount of top words.
Returns
-------
`dictionary` : A dictionary containing words and their importance as a `float`.
"""
if type(document) == list:
document = " ".join(document)
weight_list = TFIDF_list_of_weigths(TFIDF_model=TFIDF_model, abstract=document)
temp_dict = utils.tuples_to_dict(weight_list[:topn])
return temp_dict
|
4c58e2f041c76407bb2e7c686713b12e2c1e8256
| 3,637,816
|
def embedding_table(inputs, vocab_size, embed_size, zero_pad=False,
trainable=True, scope="embedding", reuse=None):
""" Generating Embedding Table with given parameters
:param inputs: A 'Tensor' with type 'int8' or 'int16' or 'int32' or 'int64'
containing the ids to be looked up in 'lookup table'.
:param vocab_size: An int. Vocabulary size.
:param embed_size: An int. Number of size of embedding vector.
:param zero_pad: A boolean. If True, all the values of the first low (id 0)
should be constant zeros.
:param trainable: A boolean. Whether freeze the embedding matrix or not.
:param scope: A str, Optional scope for 'variable_scope'.
:param reuse: A boolean. Whether to reuse the weights of a previous layer
by the same name.
:return: A 'Tensor' with ...
"""
with tf.variable_scope(scope, reuse=reuse):
embed_table = tf.get_variable('embedding_table',
shape=[vocab_size, embed_size],
initializer=_init,
trainable=trainable,
dtype=tf.float32)
if zero_pad:
embed_table = tf.concat((tf.zeros(shape=[1, embed_size]), embed_table[1:, :]),
axis=0)
return tf.nn.embedding_lookup(embed_table, inputs)
|
bc509e18048230372b8f52dc5bbb77295014aec8
| 3,637,817
|
def get_trading_dates(start_date, end_date):
"""
获取某个国家市场的交易日列表(起止日期加入判断)。目前仅支持中国市场。
:param start_date: 开始日期
:type start_date: `str` | `date` | `datetime` | `pandas.Timestamp`
:param end_date: 结束如期
:type end_date: `str` | `date` | `datetime` | `pandas.Timestamp`
:return: list[`datetime.date`]
:example:
.. code-block:: python3
:linenos:
[In]get_trading_dates(start_date='2016-05-05', end_date='20160505')
[Out]
[datetime.date(2016, 5, 5)]
"""
return DataProxy.get_instance().get_trading_dates(start_date, end_date)
|
5b0bf331376c5b2f9d1c8308be285b54fa053e5f
| 3,637,818
|
def gm_put(state, b1, b2):
"""
If goal is ('pos',b1,b2) and we're holding b1,
Generate either a putdown or a stack subtask for b1.
b2 is b1's destination: either the table or another block.
"""
if b2 != 'hand' and state.pos[b1] == 'hand':
if b2 == 'table':
return [('a_putdown', b1)]
elif state.clear[b2]:
return [('a_stack', b1, b2)]
|
c9076ac552529c60b5460740c74b1602c42414f2
| 3,637,819
|
def pad_to_shape_label(label, shape):
"""
Pad the label array to the given shape by 0 and 1.
:param label: The label for padding, of shape [n_batch, *vol_shape, n_class].
:param shape: The shape of the padded array, of value [n_batch, *vol_shape, n_class].
:return: The padded label array.
"""
assert np.all(label.shape <= shape), "The shape of array to be padded is larger than the target shape."
offset1 = (shape[1] - label.shape[1]) // 2
offset2 = (shape[2] - label.shape[2]) // 2
remainder1 = (shape[1] - label.shape[1]) % 2
remainder2 = (shape[2] - label.shape[2]) % 2
class_pred = []
for k in range(label.shape[-1]):
if k == 0:
class_pred.append(np.pad(label[..., k],
((0, 0),
(offset1, offset1 + remainder1),
(offset2, offset2 + remainder2)),
'constant', constant_values=1))
else:
class_pred.append(np.pad(label[..., k],
((0, 0),
(offset1, offset1 + remainder1),
(offset2, offset2 + remainder2)),
'constant'))
return np.stack(class_pred, axis=-1)
|
e40d7c1949cc891353c9899767c92419202c325d
| 3,637,821
|
def download_report(
bucket_name: str, client: BaseClient, report: str, location: str
) -> bool:
"""
Downloads the original report
to the temporary work area
"""
response = client.download_file(
Bucket=bucket_name, FileName=report, Location=location
)
return response
|
d46fb279d5a315c60f1908664951436edc997ab8
| 3,637,822
|
def get_service(hass, config):
"""Get the Google Voice SMS notification service."""
if not validate_config({DOMAIN: config},
{DOMAIN: [CONF_USERNAME,
CONF_PASSWORD]},
_LOGGER):
return None
return GoogleVoiceSMSNotificationService(config[CONF_USERNAME],
config[CONF_PASSWORD])
|
c7fda936ca9448587e2c4167d9c765186344fb43
| 3,637,825
|
import random
import time
def hammer_op(context, chase_duration):
"""what better way to do a lot of gnarly work than to pointer chase?"""
ptr_length = context.op_config["chase_size"]
data = list(range(0, ptr_length))
random.shuffle(data)
curr = random.randint(0, ptr_length - 1)
# and away we go
start_time = time.time()
while (time.time() - start_time) < chase_duration:
curr = data[curr]
context.log.info("Hammered - start %d end %d" % (start_time, time.time()))
return chase_duration
|
f4a51fe1e2f89443b79fd4c9a5b3f5ee459e79ca
| 3,637,826
|
from typing import Callable
from typing import Mapping
import copy
import torch
def generate_optimization_fns(
loss_fn: Callable,
opt_fn: Callable,
k_fn: Callable,
normalize_grad: bool = False,
optimizations: Mapping = None,
):
"""Directly generates upper/outer bilevel program derivative functions.
Args:
loss_fn: loss_fn(z, *params), upper/outer level loss
opt_fn: opt_fn(*params) = z, lower/inner argmin function
k_fn: k_fn(z, *params) = 0, lower/inner implicit function
normalize_grad: whether to normalize the gradient by its norm
jit: whether to apply just-in-time (jit) compilation to the functions
Returns:
``f_fn(*params), g_fn(*params), h_fn(*params)``
parameters-only upper/outer level loss, gradient and Hessian.
"""
sol_cache = dict()
opt_fn_ = lambda *args, **kwargs: opt_fn(*args, **kwargs).detach()
optimizations = {} if optimizations is None else copy(optimizations)
@fn_with_sol_cache(opt_fn_, sol_cache)
def f_fn(z, *params):
z = z.detach() if isinstance(z, torch.Tensor) else z
params = _detach_args(*params)
return loss_fn(z, *params)
@fn_with_sol_cache(opt_fn_, sol_cache)
def g_fn(z, *params):
z = z.detach() if isinstance(z, torch.Tensor) else z
params = _detach_args(*params)
g = JACOBIAN(loss_fn, (z, *params))
Dp = implicit_jacobian(
k_fn, z.detach(), *params, Dg=g[0], optimizations=optimizations
)
Dp = Dp if len(params) != 1 else [Dp]
# opts = dict(device=z.device, dtype=z.dtype)
# Dp = [
# torch.zeros(param.shape, **opts) for param in params
# ]
ret = [Dp + g for (Dp, g) in zip(Dp, g[1:])]
if normalize_grad:
ret = [(z / (torch.norm(z) + 1e-7)).detach() for z in ret]
ret = [ret.detach() for ret in ret]
return ret[0] if len(ret) == 1 else ret
@fn_with_sol_cache(opt_fn_, sol_cache)
def h_fn(z, *params):
z = z.detach() if isinstance(z, torch.Tensor) else z
params = _detach_args(*params)
g = JACOBIAN(loss_fn, (z, *params))
if optimizations.get("Hz_fn", None) is None:
optimizations["Hz_fn"] = lambda z, *params: HESSIAN_DIAG(
lambda z: loss_fn(z, *params), (z,)
)[0]
Hz_fn = optimizations["Hz_fn"]
Hz = Hz_fn(z, *params)
H = [Hz] + HESSIAN_DIAG(lambda *params: loss_fn(z, *params), params)
Dp, Dpp = implicit_hessian(
k_fn,
z,
*params,
Dg=g[0],
Hg=H[0],
optimizations=optimizations,
)
Dpp = Dpp if len(params) != 1 else [Dpp]
ret = [Dpp + H for (Dpp, H) in zip(Dpp, H[1:])]
ret = [ret.detach() for ret in ret]
return ret[0] if len(ret) == 1 else ret
return f_fn, g_fn, h_fn
|
5e70f05c5aa0e754e5c1fbe585e4a0856a732006
| 3,637,828
|
def get_weighted_spans(doc, vec, feature_weights):
# type: (Any, Any, FeatureWeights) -> Optional[WeightedSpans]
""" If possible, return a dict with preprocessed document and a list
of spans with weights, corresponding to features in the document.
"""
if isinstance(vec, FeatureUnion):
return _get_weighted_spans_from_union(doc, vec, feature_weights)
else:
result = _get_doc_weighted_spans(doc, vec, feature_weights)
if result is not None:
found_features, doc_weighted_spans = result
return WeightedSpans(
[doc_weighted_spans],
other=_get_other(feature_weights, [('', found_features)]),
)
|
0896a8449690895d922ae409c7e278f38002f111
| 3,637,829
|
def get_child(parent, child_index):
"""
Get the child at the given index, or return None if it doesn't exist.
"""
if child_index < 0 or child_index >= len(parent.childNodes):
return None
return parent.childNodes[child_index]
|
37f7752a4a77f3d750413e54659f907b5531848c
| 3,637,830
|
def extinction(species, adj, z, independent):
"""
Returns the presence/absence of each species after taking into account
the secondary extinctions.
Parameters
----------
species : numpy array of shape (nbsimu, S) with nbsimu being the number
of simulations (decompositions). This array contains the information
about the presence (1) or absence (0) of each species (columns) in
each simulation (rows).
adj : numpy array of size (S,S) with S being the species richness
Adjacency matrix.
z : float
Number of species which might not undergo secondary extinction.
independent : bool
Should the species having no incoming links be considered as
independent (i.e. not undergo secondary extinction)?
Returns
-------
Numpy array of shape (nbsimu, S) containing, for each decomposition (row),
the presence (1) or absence (0) of each species (columns).
"""
#-------- Extinction of dependent species --------
# Basic rule for dependent species :
# they need to be linked to another species to be part of the network
left = np.sum(adj, axis = 2)[:,z:] # Number of neighbours left
Psurvival = (left > 0).astype(int) # Survival if at least 1 neighbour
# Extinction cascade through trophic levels
while np.sum(species[:,z:] != Psurvival) != 0 :
### Extinction(s) ###
# Removal of non surviving species
species[:,z:] = (species[:,z:])*Psurvival
# Removal of non surviving links (i.e. links of the extinct species)
adj = cancel(adj, species)
### Check for higher order extinctions ###
left = np.sum(adj, axis=2)[:,z:] # Number of neighbours left
Psurvival = (left > 0).astype(int) # Survival if at least 1 neighbour
#-------- Extinction of independent species --------
if independent==False: # If there is no independent species
# Species having no incoming link undergo secondary extinction
interact = np.sum(cancel(adj, species),axis=1)[:, :z] # Outgoing links
(species[:,:z])[interact == 0] = 0 # Removed if no outgoing links left
return(species)
|
2a9cb1884cfceb3a7c06aede60191d8a86f4741b
| 3,637,832
|
def fix_variable_mana(card):
"""
This function was created to fix a problem in the dataset.
We're currently pretty up against the wall and I realized
that 'Variable' mana texts were not correctly converted to {X}
so this function is fed cards and corrects their mana values
if it detects this problem.
"""
def correct_field(symbol):
if 'Variable' in symbol:
# strip out brackets:
symbol = symbol[1:-1]
# remove 'Variable'
symbol = symbol.replace('Variable', '').strip()
# get the correct color-letter
symbol = alt_text_to_curly_bracket(symbol)
# 'insert' X and return the corrected symbol.
return f'{{X{symbol[1:-1]}}}'
else:
return symbol
corrected = [x for x in card.mana_cost]
card.mana_cost = corrected
|
de0a0fe10d7ebbe02cd36088765be373c7dd9789
| 3,637,833
|
def cli_arg(
runner: CliRunner,
notebook_path: Path,
mock_terminal: Mock,
remove_link_ids: Callable[[str], str],
mock_tempfile_file: Mock,
mock_stdin_tty: Mock,
mock_stdout_tty: Mock,
) -> Callable[..., str]:
"""Return function that applies arguments to cli."""
def _cli_arg(
*args: Union[str, None],
truecolor: bool = True,
paging: Union[bool, None] = False,
material_theme: bool = True,
images: bool = True,
**kwargs: Union[str, None],
) -> str:
"""Apply given arguments to cli.
Args:
*args (Union[str, None]): The extra arguments to pass to the
command.
truecolor (bool): Whether to pass
'--color-system=truecolor' option. By default True.
paging (Union[bool, None]): Whether to pass '--paging' or
'--no-paging' option. By default False, which
corresponds to '--no-paging'.
material_theme (bool): Whether to set the theme to
'material'. By default True.
images (bool): Whether to pass '--images'. By default True.
**kwargs (Union[str, None]): Environmental variables to set.
Will be uppercased.
Returns:
str: The output of the invoked command.
"""
cleaned_args = [arg for arg in args if arg is not None]
upper_kwargs = {
name.upper(): value for name, value in kwargs.items() if value is not None
}
cli_args = [os.fsdecode(notebook_path), *cleaned_args]
if images:
cli_args.append("--images")
if material_theme:
cli_args.append("--theme=material")
if truecolor:
cli_args.append("--color-system=truecolor")
if paging is True:
cli_args.append("--paging")
elif paging is False:
cli_args.append("--no-paging")
result = runner.invoke(
__main__.typer_click_object,
args=cli_args,
color=True,
env=upper_kwargs,
)
output = remove_link_ids(result.output)
return output
return _cli_arg
|
5d7e02b11ace8ee44fa85ce7d2dc4c5a24fb72cf
| 3,637,834
|
def distinguish_system_application(vulner_info):
"""
Test whether CVE has system CIA loss or application CIA loss.
:param vulner_info: object of class Vulnerability from cve_parser.py
:return: result impact or impacts
"""
result_impacts = []
if system_confidentiality_changed(
vulner_info.description,
vulner_info.cvssv2,
vulner_info.cpe_type):
result_impacts.append("System confidentiality loss")
if system_integrity_changed(
vulner_info.description,
vulner_info.cvssv2,
vulner_info.cpe_type):
result_impacts.append("System integrity loss")
if system_availability_changed(
vulner_info.description,
vulner_info.cvssv2,
vulner_info.cpe_type):
result_impacts.append("System availability loss")
if not result_impacts:
if vulner_info.cvssv3['i'] != "NONE":
result_impacts.append("Application integrity loss")
if vulner_info.cvssv3['a'] != "NONE":
result_impacts.append("Application availability loss")
if vulner_info.cvssv3['c'] != "NONE":
result_impacts.append("Application confidentiality loss")
return result_impacts
|
c10ec04a761b038fe3c0d6408a31660ccf23a205
| 3,637,836
|
from typing import Tuple
def nearest_with_mask_regrid(
distances: ndarray,
indexes: ndarray,
surface_type_mask: ndarray,
in_latlons: ndarray,
out_latlons: ndarray,
in_classified: ndarray,
out_classified: ndarray,
vicinity: float,
) -> Tuple[ndarray, ndarray]:
"""
Main regridding function for the nearest distance option.
some input just for handling island-like points.
Args:
distances:
Distnace array from each target grid point to its source grid points.
indexes:
Source grid point indexes for each target grid point.
surface_type_mask:
Boolean true if source point type matches target point type.
in_latlons:
Source points's latitude-longitudes.
out_latlons:
Target points's latitude-longitudes.
in_classified:
Land/sea type for source grid points (land -> True).
out_classified:
Land/sea type for target grid points (land -> True).
vicinity:
Radius of specified searching domain, in meter.
Returns:
- Updated distances - array from each target grid point to its source grid points.
- Updated indexes - source grid point number for all target grid points.
"""
# Check if there are output points with mismatched surface types
matched_nearby_points_count = np.count_nonzero(surface_type_mask, axis=1)
points_with_mismatches = (np.where(matched_nearby_points_count < 4))[0]
# Look for nearest input points for the output points with mismatched surface
indexes, distances, surface_type_mask = update_nearest_points(
points_with_mismatches,
in_latlons,
out_latlons,
indexes,
distances,
surface_type_mask,
in_classified,
out_classified,
)
# Handle island and lake like output points - find more distant same surface type input points
# Note: surface_type_mask has been updated above
matched_nearby_points_count = np.count_nonzero(surface_type_mask, axis=1)
fully_mismatched_points = (np.where(matched_nearby_points_count == 0))[0]
if fully_mismatched_points.shape[0] > 0:
indexes, surface_type_mask = lakes_islands(
fully_mismatched_points,
indexes,
surface_type_mask,
in_latlons,
out_latlons,
in_classified,
out_classified,
vicinity,
)
# Convert mask to be true where input points should not be considered
inverse_surface_mask = np.logical_not(surface_type_mask)
# Replace distances with infinity where they should not be used
masked_distances = np.where(inverse_surface_mask, np.float64(np.inf), distances)
# Distances and indexes have been prepared to handle the mask, so can now
# call the non-masked regrid function in process
return masked_distances, indexes
|
75b69ddbbdca4c316ecf2d4e3933f6e3a55ff0e1
| 3,637,840
|
def get_renaming(mappers, year):
"""Get original to final column namings."""
renamers = {}
for code, attr in mappers.items():
renamers[code] = attr['df_name']
return renamers
|
33197b5c748b3ecc43783d5f1f3a3b5a071d3a4e
| 3,637,842
|
async def clap(text, args):
""" Puts clap emojis between words. """
if args != []:
clap_str = args[0]
else:
clap_str = "👏"
words = text.split(" ")
clappy_text = f" {clap_str} ".join(words)
return clappy_text
|
09865461e658213a2f048b89757b75b2a37c0602
| 3,637,843
|
from typing import Union
from typing import Callable
from typing import List
def apply_binary_str(
a: Union[pa.Array, pa.ChunkedArray],
b: Union[pa.Array, pa.ChunkedArray],
*,
func: Callable,
output_dtype,
parallel: bool = False,
):
"""
Apply an element-wise numba-jitted function on two Arrow columns.
The supplied function must return a numpy-compatible scalar.
Handling of missing data and chunking of the inputs is done automatically.
"""
if len(a) != len(b):
raise ValueError("Inputs don't have the same length.")
if isinstance(a, pa.ChunkedArray):
if isinstance(b, pa.ChunkedArray):
in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b)
new_chunks: List[pa.Array] = []
for a_offset, b_offset in zip(in_a_offsets, in_b_offsets):
a_slice = a.chunk(a_offset[0])[a_offset[1] : a_offset[1] + a_offset[2]]
b_slice = b.chunk(b_offset[0])[b_offset[1] : b_offset[1] + b_offset[2]]
new_chunks.append(
_apply_binary_str_array(
a_slice,
b_slice,
func=func,
output_dtype=output_dtype,
parallel=parallel,
)
)
return pa.chunked_array(new_chunks)
elif isinstance(b, pa.Array):
new_chunks = []
offsets = _calculate_chunk_offsets(a)
for chunk, offset in zip(a.iterchunks(), offsets):
new_chunks.append(
_apply_binary_str_array(
chunk,
b[offset : offset + len(chunk)],
func=func,
output_dtype=output_dtype,
parallel=parallel,
)
)
return pa.chunked_array(new_chunks)
else:
raise ValueError(f"left operand has unsupported type {type(b)}")
elif isinstance(a, pa.Array):
if isinstance(b, pa.ChunkedArray):
new_chunks = []
offsets = _calculate_chunk_offsets(b)
for chunk, offset in zip(b.iterchunks(), offsets):
new_chunks.append(
_apply_binary_str_array(
a[offset : offset + len(chunk)],
chunk,
func=func,
output_dtype=output_dtype,
parallel=parallel,
)
)
return pa.chunked_array(new_chunks)
elif isinstance(b, pa.Array):
return _apply_binary_str_array(
a, b, func=func, output_dtype=output_dtype, parallel=parallel
)
else:
raise ValueError(f"left operand has unsupported type {type(b)}")
else:
raise ValueError(f"left operand has unsupported type {type(a)}")
|
853cd326b5812314bb6595fee191ca1c6e1f89f6
| 3,637,844
|
def product_review(product_id: str):
"""
Shows review statistics for a product.
Returns a python dictionary with content-type: application/json
"""
session = Session()
date = request.args.get('date') # parse a query string formatted as BIGINT unixReviewTime
# SELECT AVG(overall) AS average, COUNT(overall) AS total FROM reviews WHERE productID=<product_id> (AND unixReviewTime=date);
query_1 = (
session.query(
func.avg(reviews.columns.overall)
.label('average'),
func.count(reviews.columns.overall)
.label('total')
)
.filter(reviews.columns.productID==product_id)
)
if date:
query_1 = query_1.filter(reviews.columns.unixReviewTime==date)
query_1 = query_1.first()
# SELECT overall AS stars, COUNT(overall) AS count FROM reviews WHERE productID=<product_id> (AND unixReviewTime=date) GROUP BY overall;
query_2 = (
session.query(
reviews.columns.overall
.label('stars'),
func.count(reviews.columns.overall)
.label('count')
)
.filter(reviews.columns.productID==product_id)
)
if date:
query_2 = query_2.filter(reviews.columns.unixReviewTime==date)
query_2 = (
query_2.group_by(reviews.columns.overall)
.all()
)
try:
json = {
"productID": product_id,
"average": round(query_1.average,1),
"percent_breakdown": {f"{int(row.stars)}_star": round((row.count*100)/query_1.total) for row in query_2},
"count_breakdown": {f"{int(row.stars)}_star": row.count for row in query_2},
"total": query_1.total
}
return json
except:
return Response("Error",404)
finally:
session.close()
|
945f29a536a5645b602633c4558ac3d68affe85a
| 3,637,845
|
def remove_extra_two_spaces(text: str) -> str:
"""Replaces two consecutive spaces with one wherever they occur in a text"""
return text.replace(" ", " ")
|
d8b9600d3b442216b1fbe85918f313fec8a5c9cb
| 3,637,846
|
def reflect_table(table_name, engine):
"""
Gets the table with the given name from the sqlalchemy engine.
Args:
table_name (str): Name of the table to extract.
engine (sqlalchemy.engine.base.Engine): Engine to extract from.
Returns:
table (sqlalchemy.ext.declarative.api.DeclarativeMeta): The extracted table, which can be now be used to read from the database.
"""
meta = MetaData()
table = Table(table_name, meta, autoload=True, autoload_with=engine)
return table
|
414a04172cec7e840bf257eaf5b15b1fc3fa9d59
| 3,637,847
|
def load_utt_list(utt_list):
"""Load a list of utterances.
Args:
utt_list (str): path to a file containing a list of utterances
Returns:
List[str]: list of utterances
"""
with open(utt_list) as f:
utt_ids = f.readlines()
utt_ids = map(lambda utt_id: utt_id.strip(), utt_ids)
utt_ids = filter(lambda utt_id: len(utt_id) > 0, utt_ids)
return list(utt_ids)
|
6a77e876b0cc959ac4151b328b718ae45522448b
| 3,637,848
|
def kfunc_vals(points, area):
"""
Input
points: a list of Point objects
area: an Extent object
Return
ds: list of radii
lds: L(d) values for each radius in ds
"""
# This function is taken from kfunction file in spatialanalysis library
n = len(points)
density = n/area.area()
t = kdtree2(points)
d = min([area.xmax-area.xmin,area.ymax-area.ymin])*2/3/10
ds = [ d*(i+1) for i in range(10)]
lds = [0 for d in ds]
for i, d in enumerate(ds):
for p in points:
ld = kfunc(t, p, d, density)[1]
lds[i] += ld
lds = [ld/n for ld in lds]
return ds, lds
|
2fd56da45f8fb4ede38a219b158dce802d68ae44
| 3,637,849
|
from datetime import datetime
async def get_locations():
"""
Retrieves the locations from the categories. The locations are cached for 1 hour.
:returns: The locations.
:rtype: List[Location]
"""
# Get all of the data categories locations.
confirmed = await get_category("confirmed")
deaths = await get_category("deaths")
# recovered = await get_category("recovered")
locations_confirmed = confirmed["locations"]
locations_deaths = deaths["locations"]
# locations_recovered = recovered["locations"]
# Final locations to return.
locations = []
# Go through locations.
for index, location in enumerate(locations_confirmed):
# Get the timelines.
timelines = {
"confirmed": locations_confirmed[index]["history"],
"deaths": locations_deaths[index]["history"],
# 'recovered' : locations_recovered[index]['history'],
}
# Grab coordinates.
coordinates = location["coordinates"]
# Create location (supporting timelines) and append.
locations.append(
TimelinedLocation(
# General info.
index,
location["country"],
location["province"],
# Coordinates.
Coordinates(coordinates["lat"], coordinates["long"]),
# Last update.
datetime.utcnow().isoformat() + "Z",
# Timelines (parse dates as ISO).
{
"confirmed": Timeline(
{
datetime.strptime(date, "%m/%d/%y").isoformat() + "Z": amount
for date, amount in timelines["confirmed"].items()
}
),
"deaths": Timeline(
{
datetime.strptime(date, "%m/%d/%y").isoformat() + "Z": amount
for date, amount in timelines["deaths"].items()
}
),
"recovered": Timeline({}),
},
)
)
# Finally, return the locations.
return locations
|
24272f06ca3732f053d6efcc41a31ec205603a27
| 3,637,850
|
def MDAPE(y_true, y_pred, multioutput='raw_values'):
"""
calculate Median Absolute Percentage Error (MDAPE).
:param y_true: array-like of shape = (n_samples, *)
Ground truth (correct) target values.
:param y_pred: array-like of shape = (n_samples, *)
Estimated target values.
:param multioutput: string in ['raw_values', 'uniform_average']
:return:float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
"""
y_true, y_pred, original_shape = _standardize_input(y_true, y_pred, multioutput)
output_errors = np.median(100 * np.abs((y_true - y_pred) / (y_true + EPSILON)), axis=0,)
if multioutput == 'raw_values':
return output_errors.reshape(original_shape)
return np.mean(output_errors)
|
05cfbef6bd3e63ca151a584dc25b9b6574d2aa37
| 3,637,851
|
def read_line1(line):
"""! Function read_line1
Reads as argument a string formatted as a Line 1 in SEISAN's Nordic format
Returns a Hypocenter dataclass with all the fields in a SEISAN's Line 1
@param[in] line string with SEISAN's Nordic hypocenter format (Line 1)
@return Hypocenter dataclass
"""
if len(line) != 81:
print('ERROR: invalid line length')
if line[79] != '1':
print('ERROR: invalid line type')
year = int(line[1:5])
month = int(line[6:8])
day = int(line[8:10])
fixed_time = line[10] # 'F if origin time fixed'
hour = int(line[11:13])
minute = int(line[13:15])
if not line[16:20].isspace():
second = float(line[16:20])
else:
second = None
location_model = line[20]
distance_indicator = line[21]
event_type = line[22] # blank for earthquake, 'E' for explosion
if not line[23:30].isspace():
latitude = float(line[23:30])
else:
latitude = None
if not line[30:38].isspace():
longitude = float(line[30:38])
else:
longitude = None
if not line[38:43].isspace():
depth = float(line[38:43])
else:
depth = None
depth_indicator = line[43] # blank free depth, 'F' fixed, 'S' starting
locating_indicator = line[44] # blank free depth, 'F' fixed, 'S' starting, '*' do not locate
locating_agency = line[45:48]
if not line[48:51].isspace():
num_sta = int(line[48:51])
else:
num_sta = None
if not line[51:55].isspace():
rms = float(line[51:55])
else:
rms = None
if not line[55:59].isspace():
mag1 = float(line[55:59])
mag_type1 = 'M' + line[59]
mag_agency1 = line[60:63]
else:
mag1 = None
mag_type1 = ' '
mag_agency1 = ' '
if not line[63:67].isspace():
mag2 = float(line[63:67])
mag_type2 = 'M' + line[67]
mag_agency2 = line[68:71]
else:
mag2 = None
mag_type2 = ' '
mag_agency2 = ' '
if not line[71:75].isspace():
mag3 = float(line[71:75])
mag_type3 = 'M' + line[75]
mag_agency3 = line[76:79]
else:
mag3 = None
mag_type3 = ' '
mag_agency3 = ' '
return Hypocenter(year, month, day, fixed_time, hour, minute, second,
location_model, distance_indicator, event_type, latitude, longitude, depth, depth_indicator,
locating_indicator, locating_agency, num_sta, rms,
mag1, mag_type1, mag_agency1, mag2, mag_type2, mag_agency2, mag3, mag_type3, mag_agency3)
|
871f468c2ec4dd9e0a5e8784d2beb7dd958d068d
| 3,637,854
|
def getInfo_insert(sql : str, tableInfo : table_info_module.TableInfo) -> tuple:
"""테이블 이름과 컬럼을 반환합니다."""
sql = string_module.removeNoise(sql)
tableName = string_module.getParenthesesContext2(sql, "INSERT INTO ", " ")
columns = tableInfo[tableName]
return (tableName, columns)
|
25f2087b5fbb15ab1012d3f37749430a74e6faaa
| 3,637,858
|
def compute_flow_for_supervised_loss(
feature_model,
flow_model,
batch,
training
):
"""Compute flow for an image batch.
Args:
feature_model: A model to compute features for flow.
flow_model: A model to compute flow.
batch: A tf.tensor of shape [b, seq, h, w, c] holding a batch of triplets.
training: bool that tells the model to use training or inference code.
Returns:
A tuple consisting of the images, the extracted features, the estimated
flows, and the upsampled refined flows.
"""
feature_dict = feature_model(batch[:, 0],
batch[:, 1],
training=training)
return flow_model(feature_dict, training=training)
|
a74f392c1d4e234fdb66d18e63d7c733ec6669a7
| 3,637,859
|
def farey_sequence(n):
"""Return the nth Farey sequence as order pairs of the form (N,D) where `N' is the numerator and `D' is the denominator."""
a, b, c, d = 0, 1, 1, n
sequence=[(a,b)]
while (c <= n):
k = int((n + b) / d)
a, b, c, d = c, d, (k*c-a), (k*d-b)
sequence.append( (a,b) )
return sequence
|
d55bb90d05b4930d05a83dac9feb58e747288754
| 3,637,861
|
def make_vgg19_block(block):
"""Builds a vgg19 block from a dictionary
Args:
block: a dictionary
"""
layers = []
for i in range(len(block)):
one_ = block[i]
for k, v in one_.items():
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1],
padding=v[2])]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3],
padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
return nn.Sequential(*layers)
|
512543dfb32f9ed97b6ce99dd6ffc692d0ffa3b8
| 3,637,862
|
def tld():
"""
Return a random tld (Top Level Domain) from the tlds list below
:return: str
"""
tlds = ('com', 'org', 'edu', 'gov', 'co.uk', 'net', 'io', 'ru', 'eu',)
return pickone(tlds)
|
8e9341058ccf79d991aab6317ab3c29858f00fdf
| 3,637,864
|
def validate_boolean(option, value):
"""Validates that 'value' is 'true' or 'false'.
"""
if isinstance(value, bool):
return value
elif isinstance(value, basestring):
if value not in ('true', 'false'):
raise ConfigurationError("The value of '%s' must be "
"'true' or 'false'" % (option,))
return value == 'true'
raise TypeError("Wrong type for %s, value must "
"be a boolean or string representation" % (option,))
|
85b9a256e57ce7715fceea556ff7ad48b05bd996
| 3,637,865
|
def A2RT(room_size, A_wall_all, F_abs, c=343, A_air=None, estimator='Norris_Eyring'):
""" Estimate reverberation time based on room acoustic parameters,
translated from matlab code developed by Douglas R Campbell
Args:
room_size: three-dimension measurement of shoebox room
A_wall_all: sound absorption coefficients of six wall surfaces
c: sound speed, default to 343 m/s
F_abs: center frequency of each frequency band
A_air: absorption coefficients of air, if not specified, it will
calculated based on humidity of 50
estimator: estimate methods, choose from [Sabine,SabineAir,
SabineAirHiAbs,Norris_Eyring], default to Norris_Eyring
"""
if A_air is None:
humidity = 50
A_air = (5.5e-4)*(50/humidity)*((F_abs/1000)**1.7)
Lx, Ly, Lz = room_size
V_room = np.prod(room_size) # Volume of room m^3
S_wall_all = [Lx*Lz, Ly*Lz, Lx*Ly]
S_room = 2.*np.sum(S_wall_all) # Total area of shoebox room surfaces
# Effective absorbing area of room surfaces at each frequency
Se = (S_wall_all[1]*(A_wall_all[:, 0] + A_wall_all[:, 1])
+ S_wall_all[0]*(A_wall_all[:, 2] + A_wall_all[:, 3])
+ S_wall_all[2]*(A_wall_all[:, 4] + A_wall_all[:, 5]))
A_mean = Se/S_room # Mean absorption of wall surfaces
# Mean absorption of air averaged across frequency.
# A_air_mean = np.mean(A_air)
# Mean Free Path (Average distance between succesive reflections) (Ref A4)
# MFP = 4*V_room/S_room
# Reverberation time estimate
# Detect anechoic case and force RT60 all zeros
if np.linalg.norm(1-A_mean) < EPSILON:
RT60 = np.zeros(F_abs.shape)
else: # Select an estimation equation
if estimator == 'Sabine':
RT60 = np.divide((55.25/c)*V_room, Se) # Sabine equation
if estimator == 'SabineAir':
# Sabine equation (SI units) adjusted for air
RT60 = np.divide((55.25/c)*V_room, (4*A_air*V_room+Se))
if estimator == 'SabineAirHiAbs':
# % Sabine equation (SI units) adjusted for air and high absorption
RT60 = np.divide(55.25/c*V_room,
4*A_air*V_room+np.multiply(Se, (1+A_mean/2)))
if estimator == 'Norris_Eyring':
# Norris-Eyring estimate adjusted for air absorption
RT60 = np.divide(55.25/c*V_room,
4*A_air*V_room-S_room*np.log(1-A_mean+EPSILON))
return RT60
|
8a8df0bf8f91c93dfb7480775ea9eadc552edcfe
| 3,637,866
|
def GetVideoFromRate(content):
"""
从视频搜索源码页面提取视频信息
"""
#av号和标题
regular1 = r'<a href="/video/av(\d+)/" target="_blank" class="title" [^>]*>(.*)</a>'
info1 = GetRE(content, regular1)
#观看数
regular2 = r'<i class="b-icon b-icon-v-play" title=".+"></i><span number="([^"]+)">\1</span>'
info2 = GetRE(content, regular2)
#收藏
regular3 = r'<i class="b-icon b-icon-v-fav" title=".+"></i><span number="([^"]+)">\1</span></span>'
info3 = GetRE(content, regular3)
#弹幕
regular4 = r'<i class="b-icon b-icon-v-dm" title=".+"></i><span number="([^"]+)">\1</span>'
info4 = GetRE(content, regular4)
#日期
regular5 = r'<span class="v-date" title=".+">(.+)</span>'
info5 = GetRE(content, regular5)
#封面
regular6 = r'<img data-img="(.+)" [^>]*>'
info6 = GetRE(content, regular6)
#Up的id和名字
regular7 = r'<a class="v-author" href=".+/(\d+).+">(.+)</a>'
info7 = GetRE(content, regular7)
#!!!!!!!!这里可以断言所有信息长度相等
videoNum = len(info1) #视频长度
videoList = []
for i in range(videoNum):
video_t = Video()
video_t.aid = getint(info1[i][0])
video_t.title = info1[i][1]
video_t.guankan = getint(info2[i])
video_t.shoucang = getint(info3[i])
video_t.danmu = getint(info4[i])
video_t.date = info5[i]
video_t.cover = info6[i]
video_t.author = User(info7[i][0], info7[i][1])
videoList.append(video_t)
return videoList
|
446343bc3f2597310b7e4b22dd784bb0bc9b06ea
| 3,637,867
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.