signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _prepare_statement ( sql_statement , parameters ) :
"""Prepare the specified SQL statement , replacing the placeholders by the
value of the given parameters
@ param sql _ statement : the string expression of a SQL statement .
@ param parameters : a dictionary of parameters where the key represents
the name of a parameter and the value represents the value of this
parameter to replace in each placeholder of this parameter in the
SQL statement .
@ return : a string representation of the SQL statement where the
placehodlers have been replaced by the value of the corresponding
variables , depending on the type of these variables ."""
|
placehoolders = RdbmsConnection . _get_placeholders ( sql_statement , parameters )
for ( variable_name , ( variable_type , variable_value ) ) in placehoolders . iteritems ( ) : # Only expand parameters whose value corresponds to a list .
if isinstance ( variable_value , ( list , set , tuple ) ) :
sql_statement = RdbmsConnection . _replace_placeholder ( sql_statement , ( variable_name , variable_type , variable_value ) )
# Remove this parameter as it has been expended in the SQL expression .
del parameters [ variable_name ]
return sql_statement
|
def load ( self , name = None ) :
"""load from file"""
|
import pickle
name = name if name else self . name
s = pickle . load ( open ( name + '.pkl' , 'rb' ) )
self . res = s . res
# disregard the class
return self
|
def fetch_page_async ( self , page_size , ** q_options ) :
"""Fetch a page of results .
This is the asynchronous version of Query . fetch _ page ( ) ."""
|
qry = self . _fix_namespace ( )
return qry . _fetch_page_async ( page_size , ** q_options )
|
def get_grammar ( self ) :
"""Returns the grammar of the UAI file ."""
|
network_name = Word ( alphas ) . setResultsName ( 'network_name' )
no_variables = Word ( nums ) . setResultsName ( 'no_variables' )
grammar = network_name + no_variables
self . no_variables = int ( grammar . parseString ( self . network ) [ 'no_variables' ] )
domain_variables = ( Word ( nums ) * self . no_variables ) . setResultsName ( 'domain_variables' )
grammar += domain_variables
no_functions = Word ( nums ) . setResultsName ( 'no_functions' )
grammar += no_functions
self . no_functions = int ( grammar . parseString ( self . network ) [ 'no_functions' ] )
integer = Word ( nums ) . setParseAction ( lambda t : int ( t [ 0 ] ) )
for function in range ( 0 , self . no_functions ) :
scope_grammar = Word ( nums ) . setResultsName ( 'fun_scope_' + str ( function ) )
grammar += scope_grammar
function_scope = grammar . parseString ( self . network ) [ 'fun_scope_' + str ( function ) ]
function_grammar = ( ( integer ) * int ( function_scope ) ) . setResultsName ( 'fun_' + str ( function ) )
grammar += function_grammar
floatnumber = Combine ( Word ( nums ) + Optional ( Literal ( "." ) + Optional ( Word ( nums ) ) ) )
for function in range ( 0 , self . no_functions ) :
no_values_grammar = Word ( nums ) . setResultsName ( 'fun_no_values_' + str ( function ) )
grammar += no_values_grammar
no_values = grammar . parseString ( self . network ) [ 'fun_no_values_' + str ( function ) ]
values_grammar = ( ( floatnumber ) * int ( no_values ) ) . setResultsName ( 'fun_values_' + str ( function ) )
grammar += values_grammar
return grammar
|
def blast_records_to_object ( blast_records ) :
"""Transforms biopython ' s blast record into blast object defined in django - blastplus app ."""
|
# container for transformed objects
blast_objects_list = [ ]
for blast_record in blast_records :
br = BlastRecord ( ** { 'query' : blast_record . query , 'version' : blast_record . version , 'expect' : blast_record . expect , 'application' : blast_record . application , 'reference' : blast_record . reference } )
for alignment in blast_record . alignments :
al = Alignment ( ** { 'hit_def' : alignment . hit_def , 'title' : alignment . title , 'length' : alignment . length , } )
for hsp in alignment . hsps :
h = Hsp ( ** { 'align_length' : hsp . align_length , 'bits' : hsp . bits , 'expect' : hsp . expect , 'frame' : hsp . frame , 'gaps' : hsp . gaps , 'identities' : hsp . identities , 'match' : hsp . match , 'num_alignments' : hsp . num_alignments , 'positives' : hsp . positives , 'query' : hsp . query , 'query_end' : hsp . query_end , 'query_start' : hsp . query_start , 'sbjct' : hsp . sbjct , 'sbjct_end' : hsp . sbjct_end , 'sbjct_start' : hsp . sbjct_start , 'score' : hsp . score , 'strand' : hsp . strand , 'str' : str ( hsp ) , } )
al . hsp_list . append ( h )
br . alignments . append ( al )
blast_objects_list . append ( br )
return blast_objects_list
|
def __construct_object ( self , obj , exclude_list = [ ] ) :
"""Loop dict / class object and parse values"""
|
new_obj = { }
for key , value in obj . items ( ) :
if str ( key ) . startswith ( '_' ) or key is 'json_exclude_list' or key in exclude_list :
continue
new_obj [ self . camel_case ( key ) ] = self . __iterate_value ( value )
return new_obj
|
def checkStock ( self ) :
"""check stocks in preference"""
|
if not self . preferences :
logger . debug ( "no preferences" )
return None
soup = BeautifulSoup ( self . xpath ( path [ 'stock-table' ] ) [ 0 ] . html , "html.parser" )
count = 0
# iterate through product in left panel
for product in soup . select ( "div.tradebox" ) :
prod_name = product . select ( "span.instrument-name" ) [ 0 ] . text
stk_name = [ x for x in self . preferences if x . lower ( ) in prod_name . lower ( ) ]
if not stk_name :
continue
name = prod_name
if not [ x for x in self . stocks if x . product == name ] :
self . stocks . append ( Stock ( name ) )
stock = [ x for x in self . stocks if x . product == name ] [ 0 ]
if 'tradebox-market-closed' in product [ 'class' ] :
stock . market = False
if not stock . market :
logger . debug ( "market closed for %s" % stock . product )
continue
sell_price = product . select ( "div.tradebox-price-sell" ) [ 0 ] . text
buy_price = product . select ( "div.tradebox-price-buy" ) [ 0 ] . text
sent = int ( product . select ( path [ 'sent' ] ) [ 0 ] . text . strip ( '%' ) ) / 100
stock . new_rec ( [ sell_price , buy_price , sent ] )
count += 1
logger . debug ( f"added %d stocks" % count )
return self . stocks
|
def last_activity_time ( self ) :
"""获取用户最后一次活动的时间
: return : 用户最后一次活动的时间 , 返回值为 unix 时间戳
: rtype : int"""
|
self . _make_soup ( )
act = self . soup . find ( 'div' , class_ = 'zm-profile-section-item zm-item clearfix' )
return int ( act [ 'data-time' ] ) if act is not None else - 1
|
def thumbnail_url ( source , alias ) :
"""Return the thumbnail url for a source file using an aliased set of
thumbnail options .
If no matching alias is found , returns an empty string .
Example usage : :
< img src = " { { person . photo | thumbnail _ url : ' small ' } } " alt = " " >"""
|
try :
thumb = get_thumbnailer ( source ) [ alias ]
except Exception :
return ''
return thumb . url
|
def _get_py_dictionary ( self , var , names = None , used___dict__ = False ) :
''': return tuple ( names , used _ _ _ dict _ _ ) , where used _ _ _ dict _ _ means we have to access
using obj . _ _ dict _ _ [ name ] instead of getattr ( obj , name )'''
|
# TODO : Those should be options ( would fix https : / / github . com / Microsoft / ptvsd / issues / 66 ) .
filter_private = False
filter_special = True
filter_function = True
filter_builtin = True
if not names :
names , used___dict__ = self . get_names ( var )
d = { }
# Be aware that the order in which the filters are applied attempts to
# optimize the operation by removing as many items as possible in the
# first filters , leaving fewer items for later filters
if filter_builtin or filter_function :
for name in names :
try :
name_as_str = name
if name_as_str . __class__ != str :
name_as_str = '%r' % ( name_as_str , )
if filter_special :
if name_as_str . startswith ( '__' ) and name_as_str . endswith ( '__' ) :
continue
if filter_private :
if name_as_str . startswith ( '_' ) or name_as_str . endswith ( '__' ) :
continue
if not used___dict__ :
attr = getattr ( var , name )
else :
attr = var . __dict__ [ name ]
# filter builtins ?
if filter_builtin :
if inspect . isbuiltin ( attr ) :
continue
# filter functions ?
if filter_function :
if inspect . isroutine ( attr ) or isinstance ( attr , MethodWrapperType ) :
continue
except : # if some error occurs getting it , let ' s put it to the user .
strIO = StringIO . StringIO ( )
traceback . print_exc ( file = strIO )
attr = strIO . getvalue ( )
d [ name_as_str ] = attr
return d , used___dict__
|
def get_interval ( note , interval , key = 'C' ) :
"""Return the note an interval ( in half notes ) away from the given note .
This will produce mostly theoretical sound results , but you should use
the minor and major functions to work around the corner cases ."""
|
intervals = map ( lambda x : ( notes . note_to_int ( key ) + x ) % 12 , [ 0 , 2 , 4 , 5 , 7 , 9 , 11 , ] )
key_notes = keys . get_notes ( key )
for x in key_notes :
if x [ 0 ] == note [ 0 ] :
result = ( intervals [ key_notes . index ( x ) ] + interval ) % 12
if result in intervals :
return key_notes [ intervals . index ( result ) ] + note [ 1 : ]
else :
return notes . diminish ( key_notes [ intervals . index ( ( result + 1 ) % 12 ) ] + note [ 1 : ] )
|
def read_ASCII_cols ( infile , cols = [ 1 , 2 , 3 ] ) : # noqa : N802
"""Interpret input ASCII file to return arrays for specified columns .
Notes
The specification of the columns should be expected to have lists for
each ' column ' , with all columns in each list combined into a single
entry .
For example : :
cols = [ ' 1,2,3 ' , ' 4,5,6 ' , 7]
where ' 1,2,3 ' represent the X / RA values , ' 4,5,6 ' represent the Y / Dec
values and 7 represents the flux value for a total of 3 requested
columns of data to be returned .
Returns
outarr : list of arrays
The return value will be a list of numpy arrays , one for each
' column ' ."""
|
# build dictionary representing format of each row
# Format of dictionary : { ' colname ' : col _ number , . . . }
# This provides the mapping between column name and column number
coldict = { }
with open ( infile , 'r' ) as f :
flines = f . readlines ( )
for l in flines : # interpret each line from catalog file
if l [ 0 ] . lstrip ( ) == '#' or l . lstrip ( ) == '' :
continue
else : # convert first row of data into column definitions using indices
coldict = { str ( i + 1 ) : i for i , _ in enumerate ( l . split ( ) ) }
break
numcols = len ( cols )
outarr = [ [ ] for _ in range ( numcols ) ]
convert_radec = False
# Now , map specified columns to columns in file and populate output arrays
for l in flines : # interpret each line from catalog file
l = l . strip ( )
lspl = l . split ( )
# skip blank lines , comment lines , or lines with
# fewer columns than requested by user
if not l or len ( lspl ) < numcols or l [ 0 ] == '#' or "INDEF" in l :
continue
# For each ' column ' requested by user , pull data from row
for c , i in zip ( cols , list ( range ( numcols ) ) ) :
cnames = parse_colname ( c )
if len ( cnames ) > 1 : # interpret multi - column specification as one value
outval = ''
for cn in cnames :
cnum = coldict [ cn ]
cval = lspl [ cnum ]
outval += cval + ' '
outarr [ i ] . append ( outval )
convert_radec = True
else : # pull single value from row for this column
cnum = coldict [ cnames [ 0 ] ]
if isfloat ( lspl [ cnum ] ) :
cval = float ( lspl [ cnum ] )
else :
cval = lspl [ cnum ]
# Check for multi - column values given as " nn : nn : nn . s "
if ':' in cval :
cval = cval . replace ( ':' , ' ' )
convert_radec = True
outarr [ i ] . append ( cval )
# convert multi - column RA / Dec specifications
if convert_radec :
outra = [ ]
outdec = [ ]
for ra , dec in zip ( outarr [ 0 ] , outarr [ 1 ] ) :
radd , decdd = radec_hmstodd ( ra , dec )
outra . append ( radd )
outdec . append ( decdd )
outarr [ 0 ] = outra
outarr [ 1 ] = outdec
# convert all lists to numpy arrays
for c in range ( len ( outarr ) ) :
outarr [ c ] = np . array ( outarr [ c ] )
return outarr
|
def _set_show_mpls_route ( self , v , load = False ) :
"""Setter method for show _ mpls _ route , mapped from YANG variable / brocade _ mpls _ rpc / show _ mpls _ route ( rpc )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ show _ mpls _ route is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ show _ mpls _ route ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = show_mpls_route . show_mpls_route , is_leaf = True , yang_name = "show-mpls-route" , rest_name = "show-mpls-route" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = { u'tailf-common' : { u'hidden' : u'rpccmd' , u'actionpoint' : u'showMplsRoute' } } , namespace = 'urn:brocade.com:mgmt:brocade-mpls' , defining_module = 'brocade-mpls' , yang_type = 'rpc' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """show_mpls_route must be of a type compatible with rpc""" , 'defined-type' : "rpc" , 'generated-type' : """YANGDynClass(base=show_mpls_route.show_mpls_route, is_leaf=True, yang_name="show-mpls-route", rest_name="show-mpls-route", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showMplsRoute'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""" , } )
self . __show_mpls_route = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def get_full_filename_by_suffixes ( dir_src , suffixes ) : # type : ( AnyStr , Union [ AnyStr , List [ AnyStr ] ] ) - > Optional [ List [ AnyStr ] ]
"""get full file names with the given suffixes in the given directory
Args :
dir _ src : directory path
suffixes : wanted suffixes
Returns :
full file names with the given suffixes as list"""
|
file_names = FileClass . get_filename_by_suffixes ( dir_src , suffixes )
if file_names is None :
return None
return list ( dir_src + os . sep + name for name in file_names )
|
def FileEntryExistsByPathSpec ( self , path_spec ) :
"""Determines if a file entry for a path specification exists .
Args :
path _ spec ( PathSpec ) : path specification .
Returns :
bool : True if the file entry exists ."""
|
location = getattr ( path_spec , 'location' , None )
if ( location is None or not location . startswith ( self . LOCATION_ROOT ) ) :
return False
if len ( location ) == 1 :
return True
try :
self . _tar_file . getmember ( location [ 1 : ] )
return True
except KeyError :
pass
# Check if location could be a virtual directory .
for name in iter ( self . _tar_file . getnames ( ) ) : # The TAR info name does not have the leading path separator as
# the location string does .
if name . startswith ( location [ 1 : ] ) :
return True
return False
|
def data_filler_company ( self , number_of_rows , db ) :
'''creates and fills the table with company data'''
|
try :
company = db
data_list = list ( )
for i in range ( 0 , number_of_rows ) :
post_comp_reg = { "id" : rnd_id_generator ( self ) , "name" : self . faker . company ( ) , "date" : self . faker . date ( pattern = "%d-%m-%Y" ) , "email" : self . faker . company_email ( ) , "domain" : self . faker . safe_email ( ) , "city" : self . faker . city ( ) }
company . save ( post_comp_reg )
logger . warning ( 'companies Commits are successful after write job!' , extra = d )
except Exception as e :
logger . error ( e , extra = d )
|
def _parse_canonical_decimal128 ( doc ) :
"""Decode a JSON decimal128 to bson . decimal128 . Decimal128."""
|
d_str = doc [ '$numberDecimal' ]
if len ( doc ) != 1 :
raise TypeError ( 'Bad $numberDecimal, extra field(s): %s' % ( doc , ) )
if not isinstance ( d_str , string_type ) :
raise TypeError ( '$numberDecimal must be string: %s' % ( doc , ) )
return Decimal128 ( d_str )
|
def info ( ctx ) :
"""Display status of OATH application ."""
|
controller = ctx . obj [ 'controller' ]
version = controller . version
click . echo ( 'OATH version: {}.{}.{}' . format ( version [ 0 ] , version [ 1 ] , version [ 2 ] ) )
click . echo ( 'Password protection ' + ( 'enabled' if controller . locked else 'disabled' ) )
keys = ctx . obj [ 'settings' ] . get ( 'keys' , { } )
if controller . locked and controller . id in keys :
click . echo ( 'The password for this YubiKey is remembered by ykman.' )
if ctx . obj [ 'dev' ] . is_fips :
click . echo ( 'FIPS Approved Mode: {}' . format ( 'Yes' if controller . is_in_fips_mode else 'No' ) )
|
def output ( id , url ) :
"""View the files from a dataset ."""
|
data_source = get_data_object ( id , use_data_config = False )
if not data_source :
sys . exit ( )
data_url = "%s/%s" % ( floyd . floyd_web_host , data_source . name )
if url :
floyd_logger . info ( data_url )
else :
floyd_logger . info ( "Opening output directory in your browser ..." )
webbrowser . open ( data_url )
|
def frange ( start , stop , step = 1.0 ) :
"""Implementation of Python ' s ` ` range ( ) ` ` function which works with floats .
Reference to this implementation : https : / / stackoverflow . com / a / 36091634
: param start : start value
: type start : float
: param stop : end value
: type stop : float
: param step : increment
: type step : float
: return : float
: rtype : generator"""
|
i = 0.0
x = float ( start )
# Prevent yielding integers .
x0 = x
epsilon = step / 2.0
yield x
# always yield first value
while x + epsilon < stop :
i += 1.0
x = x0 + i * step
yield x
if stop > x :
yield stop
|
def _is_protein ( pe ) :
"""Return True if the element is a protein"""
|
val = isinstance ( pe , _bp ( 'Protein' ) ) or isinstance ( pe , _bpimpl ( 'Protein' ) ) or isinstance ( pe , _bp ( 'ProteinReference' ) ) or isinstance ( pe , _bpimpl ( 'ProteinReference' ) )
return val
|
def get_column ( self , col ) :
"""Loop over files getting the requested dataset values from each
Parameters
col : string
Name of the dataset to be returned
Returns
numpy array
Values from the dataset , filtered if requested and
concatenated in order of file list"""
|
logging . info ( 'getting %s' % col )
vals = [ ]
for f in self . files :
d = FileData ( f , group = self . group , columnlist = self . columns , filter_func = self . filter_func )
vals . append ( d . get_column ( col ) )
# Close each file since h5py has an upper limit on the number of
# open file objects ( approx . 1000)
d . close ( )
logging . info ( '- got %i values' % sum ( len ( v ) for v in vals ) )
return np . concatenate ( vals )
|
def _gc_dead_sinks ( self ) :
"""Remove any dead weakrefs ."""
|
deadsinks = [ ]
for i in self . _sinks :
if i ( ) is None :
deadsinks . append ( i )
for i in deadsinks :
self . _sinks . remove ( i )
|
def price_value_renderer ( value , currency = None , ** options ) :
"""Format price value , with current locale and CURRENCY in settings"""
|
if not currency :
currency = getattr ( settings , 'CURRENCY' , 'USD' )
return format_currency ( value , currency , locale = utils . get_current_locale ( ) )
|
def _decode ( self , s ) :
'''This converts from the external coding system ( as passed to
the constructor ) to the internal one ( unicode ) .'''
|
if self . decoder is not None :
return self . decoder . decode ( s )
else :
raise TypeError ( "This screen was constructed with encoding=None, " "so it does not handle bytes." )
|
async def main ( ) -> None :
"""Create the aiohttp session and run the example ."""
|
loglevels = dict ( ( logging . getLevelName ( level ) , level ) for level in [ 10 , 20 , 30 , 40 , 50 ] )
logging . basicConfig ( level = loglevels [ LOGLEVEL ] , format = '%(asctime)s:%(levelname)s:\t%(name)s\t%(message)s' )
async with ClientSession ( ) as websession :
try :
myq = await pymyq . login ( MYQ_ACCOUNT_EMAIL , MYQ_ACCOUNT_PASSWORD , MYQ_BRAND , websession )
devices = await myq . get_devices ( )
for idx , device in enumerate ( devices ) :
print ( 'Device #{0}: {1}' . format ( idx + 1 , device . name ) )
print ( '--------' )
print ( 'Brand: {0}' . format ( device . brand ) )
print ( 'Type: {0}' . format ( device . type ) )
print ( 'Serial: {0}' . format ( device . serial ) )
print ( 'Device ID: {0}' . format ( device . device_id ) )
print ( 'Parent ID: {0}' . format ( device . parent_id ) )
print ( 'Online: {0}' . format ( device . available ) )
print ( 'Unattended Open: {0}' . format ( device . open_allowed ) )
print ( 'Unattended Close: {0}' . format ( device . close_allowed ) )
print ( )
print ( 'Current State: {0}' . format ( device . state ) )
if JSON_DUMP :
print ( json . dumps ( device . _device , indent = 4 ) )
else :
if device . state != STATE_OPEN :
print ( 'Opening the device...' )
await device . open ( )
print ( ' 0 Current State: {0}' . format ( device . state ) )
for waited in range ( 1 , 30 ) :
if device . state == STATE_OPEN :
break
await asyncio . sleep ( 1 )
await device . update ( )
print ( ' {} Current State: {}' . format ( waited , device . state ) )
await asyncio . sleep ( 10 )
await device . update ( )
print ( )
print ( 'Current State: {0}' . format ( device . state ) )
if device . state != STATE_CLOSED :
print ( 'Closing the device...' )
await device . close ( )
print ( ' 0 Current State: {0}' . format ( device . state ) )
for waited in range ( 1 , 30 ) :
if device . state == STATE_CLOSED :
break
await asyncio . sleep ( 1 )
await device . update ( )
print ( ' {} Current State: {}' . format ( waited , device . state ) )
await asyncio . sleep ( 10 )
await device . update ( )
print ( )
print ( 'Current State: {0}' . format ( device . state ) )
except MyQError as err :
print ( err )
|
def _solution_factory ( self , basis_kwargs , coefs_array , nodes , problem , result ) :
"""Construct a representation of the solution to the boundary value problem .
Parameters
basis _ kwargs : dict ( str : )
coefs _ array : numpy . ndarray
problem : TwoPointBVPLike
result : OptimizeResult
Returns
solution : SolutionLike"""
|
soln_coefs = self . _array_to_list ( coefs_array , problem . number_odes )
soln_derivs = self . _construct_derivatives ( soln_coefs , ** basis_kwargs )
soln_funcs = self . _construct_functions ( soln_coefs , ** basis_kwargs )
soln_residual_func = self . _interior_residuals_factory ( soln_derivs , soln_funcs , problem )
solution = solutions . Solution ( basis_kwargs , soln_funcs , nodes , problem , soln_residual_func , result )
return solution
|
def decomposed_diffusion_program ( qubits : List [ int ] ) -> Program :
"""Constructs the diffusion operator used in Grover ' s Algorithm , acted on both sides by an
a Hadamard gate on each qubit . Note that this means that the matrix representation of this
operator is diag ( 1 , - 1 , . . . , - 1 ) . In particular , this decomposes the diffusion operator , which
is a : math : ` 2 * * { len ( qubits ) } \t imes2 * * { len ( qubits ) } ` sparse matrix , into
: math : ` \ mathcal { O } ( len ( qubits ) * * 2 ) single and two qubit gates .
See C . Lavor , L . R . U . Manssur , and R . Portugal ( 2003 ) ` Grover ' s Algorithm : Quantum Database
Search ` _ for more information .
. . _ ` Grover ' s Algorithm : Quantum Database Search ` : https : / / arxiv . org / abs / quant - ph / 0301079
: param qubits : A list of ints corresponding to the qubits to operate on .
The operator operates on bistrings of the form
` ` | qubits [ 0 ] , . . . , qubits [ - 1 ] > ` ` ."""
|
program = Program ( )
if len ( qubits ) == 1 :
program . inst ( Z ( qubits [ 0 ] ) )
else :
program . inst ( [ X ( q ) for q in qubits ] )
program . inst ( H ( qubits [ - 1 ] ) )
program . inst ( RZ ( - np . pi , qubits [ 0 ] ) )
program += ( ControlledProgramBuilder ( ) . with_controls ( qubits [ : - 1 ] ) . with_target ( qubits [ - 1 ] ) . with_operation ( X_GATE ) . with_gate_name ( X_GATE_LABEL ) . build ( ) )
program . inst ( RZ ( - np . pi , qubits [ 0 ] ) )
program . inst ( H ( qubits [ - 1 ] ) )
program . inst ( [ X ( q ) for q in qubits ] )
return program
|
def check_for_required_columns ( problems : List , table : str , df : DataFrame ) -> List :
"""Check that the given GTFS table has the required columns .
Parameters
problems : list
A four - tuple containing
1 . A problem type ( string ) equal to ` ` ' error ' ` ` or ` ` ' warning ' ` ` ;
` ` ' error ' ` ` means the GTFS is violated ;
` ` ' warning ' ` ` means there is a problem but it is not a
GTFS violation
2 . A message ( string ) that describes the problem
3 . A GTFS table name , e . g . ` ` ' routes ' ` ` , in which the problem
occurs
4 . A list of rows ( integers ) of the table ' s DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ` ` table ` `
Returns
list
The ` ` problems ` ` list extended as follows .
Check that the DataFrame contains the colums required by GTFS
and append to the problems list one error for each column
missing ."""
|
r = cs . GTFS_REF
req_columns = r . loc [ ( r [ "table" ] == table ) & r [ "column_required" ] , "column" ] . values
for col in req_columns :
if col not in df . columns :
problems . append ( [ "error" , f"Missing column {col}" , table , [ ] ] )
return problems
|
def dict_to_dataset ( data , * , attrs = None , library = None , coords = None , dims = None ) :
"""Convert a dictionary of numpy arrays to an xarray . Dataset .
Parameters
data : dict [ str ] - > ndarray
Data to convert . Keys are variable names .
attrs : dict
Json serializable metadata to attach to the dataset , in addition to defaults .
library : module
Library used for performing inference . Will be attached to the attrs metadata .
coords : dict [ str ] - > ndarray
Coordinates for the dataset
dims : dict [ str ] - > list [ str ]
Dimensions of each variable . The keys are variable names , values are lists of
coordinates .
Returns
xr . Dataset
Examples
dict _ to _ dataset ( { ' x ' : np . random . randn ( 4 , 100 ) , ' y ' , np . random . rand ( 4 , 100 ) } )"""
|
if dims is None :
dims = { }
data_vars = { }
for key , values in data . items ( ) :
data_vars [ key ] = numpy_to_data_array ( values , var_name = key , coords = coords , dims = dims . get ( key ) )
return xr . Dataset ( data_vars = data_vars , attrs = make_attrs ( attrs = attrs , library = library ) )
|
def maybe_download_and_extract ( ) :
"""Download and extract the tarball from Alex ' s website ."""
|
dest_directory = "/tmp/cifar"
if not os . path . exists ( dest_directory ) :
os . makedirs ( dest_directory )
filename = DATA_URL . split ( '/' ) [ - 1 ]
filepath = os . path . join ( dest_directory , filename )
if not os . path . exists ( filepath ) :
def _progress ( count , block_size , total_size ) :
sys . stdout . write ( '\r>> Downloading %s %.1f%%' % ( filename , float ( count * block_size ) / float ( total_size ) * 100.0 ) )
sys . stdout . flush ( )
filepath , _ = urllib . request . urlretrieve ( DATA_URL , filepath , _progress )
print ( )
statinfo = os . stat ( filepath )
print ( 'Successfully downloaded' , filename , statinfo . st_size , 'bytes.' )
tarfile . open ( filepath , 'r:gz' ) . extractall ( dest_directory )
|
def getFailureMessage ( failure ) :
"""Return a short message based on L { twisted . python . failure . Failure } .
Tries to find where the exception was triggered ."""
|
str ( failure . type )
failure . getErrorMessage ( )
if len ( failure . frames ) == 0 :
return "failure %(exc)s: %(msg)s" % locals ( )
( func , filename , line , some , other ) = failure . frames [ - 1 ]
filename = scrubFilename ( filename )
return "failure %(exc)s at %(filename)s:%(line)s: %(func)s(): %(msg)s" % locals ( )
|
def indices_to_points ( indices , pitch , origin ) :
"""Convert indices of an ( n , m , p ) matrix into a set of voxel center points .
Parameters
indices : ( q , 3 ) int , index of voxel matrix ( n , m , p )
pitch : float , what pitch was the voxel matrix computed with
origin : ( 3 , ) float , what is the origin of the voxel matrix
Returns
points : ( q , 3 ) float , list of points"""
|
indices = np . asanyarray ( indices , dtype = np . float64 )
origin = np . asanyarray ( origin , dtype = np . float64 )
pitch = float ( pitch )
if indices . shape != ( indices . shape [ 0 ] , 3 ) :
from IPython import embed
embed ( )
raise ValueError ( 'shape of indices must be (q, 3)' )
if origin . shape != ( 3 , ) :
raise ValueError ( 'shape of origin must be (3,)' )
points = indices * pitch + origin
return points
|
def move ( self , item , new_index ) :
"""Move an item to the given position .
> > > u = Unique ( [ ' spam ' , ' eggs ' ] )
> > > u . move ( ' spam ' , 1)
Unique ( [ ' eggs ' , ' spam ' ] )
> > > u . move ( ' ham ' , 0)
Traceback ( most recent call last ) :
ValueError : ' ham ' is not in list"""
|
idx = self . _items . index ( item )
if idx != new_index :
item = self . _items . pop ( idx )
self . _items . insert ( new_index , item )
|
def _interfaces_added ( self , object_path , interfaces_and_properties ) :
"""Internal method ."""
|
added = object_path not in self . _objects
self . _objects . setdefault ( object_path , { } )
old_state = copy ( self . _objects [ object_path ] )
self . _objects [ object_path ] . update ( interfaces_and_properties )
new_state = self . _objects [ object_path ]
if added :
kind = object_kind ( object_path )
if kind in ( 'device' , 'drive' ) :
self . trigger ( 'device_added' , self [ object_path ] )
if Interface [ 'Block' ] in interfaces_and_properties :
slave = self [ object_path ] . luks_cleartext_slave
if slave :
if not self . _has_job ( slave . object_path , 'device_unlocked' ) :
self . trigger ( 'device_unlocked' , slave )
if not added :
self . trigger ( 'device_changed' , self . get ( object_path , old_state ) , self . get ( object_path , new_state ) )
|
def p_throw_statement ( self , p ) :
"""throw _ statement : THROW expr SEMI
| THROW expr AUTOSEMI"""
|
p [ 0 ] = self . asttypes . Throw ( expr = p [ 2 ] )
p [ 0 ] . setpos ( p )
|
def pack ( self , value , nocheck = False , major = DEFAULT_KATCP_MAJOR ) :
"""Return the value formatted as a KATCP parameter .
Parameters
value : object
The value to pack .
nocheck : bool , optional
Whether to check that the value is valid before
packing it .
major : int , optional
Major version of KATCP to use when interpreting types .
Defaults to latest implemented KATCP version .
Returns
packed _ value : str
The unescaped KATCP string representing the value ."""
|
if value is None :
value = self . get_default ( )
if value is None :
raise ValueError ( "Cannot pack a None value." )
if not nocheck :
self . check ( value , major )
return self . encode ( value , major )
|
def delete ( filething ) :
"""delete ( filething )
Arguments :
filething ( filething )
Raises :
mutagen . MutagenError
Remove tags from a file ."""
|
t = OggFLAC ( filething )
filething . fileobj . seek ( 0 )
t . delete ( filething )
|
def axis_names_without ( self , axis ) :
"""Return axis names without axis , or None if axis _ names is None"""
|
if self . axis_names is None :
return None
return itemgetter ( * self . other_axes ( axis ) ) ( self . axis_names )
|
def delete ( self ) :
"""Deletes the directory if it exists ."""
|
if self . exists :
logger . info ( "Deleting %s" % self . path )
shutil . rmtree ( self . path )
|
def build ( self , text , matrix , skim_depth = 10 , d_weights = False ) :
"""1 . For each term in the passed matrix , score its KDE similarity with
all other indexed terms .
2 . With the ordered stack of similarities in hand , skim off the top X
pairs and add them as edges .
Args :
text ( Text ) : The source text instance .
matrix ( Matrix ) : An indexed term matrix .
skim _ depth ( int ) : The number of siblings for each term .
d _ weights ( bool ) : If true , give " close " words low edge weights ."""
|
for anchor in bar ( matrix . keys ) :
n1 = text . unstem ( anchor )
# Heaviest pair scores :
pairs = matrix . anchored_pairs ( anchor ) . items ( )
for term , weight in list ( pairs ) [ : skim_depth ] : # If edges represent distance , use the complement of the raw
# score , so that similar words are connected by " short " edges .
if d_weights :
weight = 1 - weight
n2 = text . unstem ( term )
# NetworkX does not handle numpy types when writing graphml ,
# so we cast the weight to a regular float .
self . graph . add_edge ( n1 , n2 , weight = float ( weight ) )
|
def get_route ( self , file_id ) :
'''a method to retrieve route information for file on telegram api
: param file _ id : string with id of file in a message send to bot
: return : dictionary of response details with route details in [ json ] [ result ]'''
|
title = '%s.get_route' % self . __class__ . __name__
# validate inputs
input_fields = { 'file_id' : file_id , }
for key , value in input_fields . items ( ) :
if value :
object_title = '%s(%s=%s)' % ( title , key , str ( value ) )
self . fields . validate ( value , '.%s' % key , object_title )
# construct key word arguments
request_kwargs = { 'url' : '%s/getFile' % self . api_endpoint , 'data' : { 'file_id' : file_id } }
# send request
response_details = self . _post_request ( ** request_kwargs )
return response_details
|
def get_points_within_r ( center_points , target_points , r ) :
r"""Get all target _ points within a specified radius of a center point .
All data must be in same coordinate system , or you will get undetermined results .
Parameters
center _ points : ( X , Y ) ndarray
location from which to grab surrounding points within r
target _ points : ( X , Y ) ndarray
points from which to return if they are within r of center _ points
r : integer
search radius around center _ points to grab target _ points
Returns
matches : ( X , Y ) ndarray
A list of points within r distance of , and in the same
order as , center _ points"""
|
tree = cKDTree ( target_points )
indices = tree . query_ball_point ( center_points , r )
return tree . data [ indices ] . T
|
def update_available ( after_days = 1 ) :
"""Check whether updated NRFA data is available .
: param after _ days : Only check if not checked previously since a certain number of days ago
: type after _ days : float
: return : ` True ` if update available , ` False ` if not , ` None ` if remote location cannot be reached .
: rtype : bool or None"""
|
never_downloaded = not bool ( config . get ( 'nrfa' , 'downloaded_on' , fallback = None ) or None )
if never_downloaded :
config . set_datetime ( 'nrfa' , 'update_checked_on' , datetime . utcnow ( ) )
config . save ( )
return True
last_checked_on = config . get_datetime ( 'nrfa' , 'update_checked_on' , fallback = None ) or datetime . fromtimestamp ( 0 )
if datetime . utcnow ( ) < last_checked_on + timedelta ( days = after_days ) :
return False
current_version = LooseVersion ( config . get ( 'nrfa' , 'version' , fallback = '0' ) or '0' )
try :
with urlopen ( config [ 'nrfa' ] [ 'oh_json_url' ] , timeout = 10 ) as f :
remote_version = LooseVersion ( json . loads ( f . read ( ) . decode ( 'utf-8' ) ) [ 'nrfa_version' ] )
config . set_datetime ( 'nrfa' , 'update_checked_on' , datetime . utcnow ( ) )
config . save ( )
return remote_version > current_version
except URLError :
return None
|
def add_r_ending_to_syllable ( last_syllable : str , is_first = True ) -> str :
"""Adds an the - r ending to the last syllable of an Old Norse word .
In some cases , it really adds an - r . In other cases , it on doubles the last character or left the syllable
unchanged .
> > > add _ r _ ending _ to _ syllable ( " arm " , True )
' armr '
> > > add _ r _ ending _ to _ syllable ( " ás " , True )
' áss '
> > > add _ r _ ending _ to _ syllable ( " stól " , True )
' stóll '
> > > " jö " + add _ r _ ending _ to _ syllable ( " kul " , False )
' jökull '
> > > add _ r _ ending _ to _ syllable ( " stein " , True )
' steinn '
> > > ' mi ' + add _ r _ ending _ to _ syllable ( ' kil ' , False )
' mikill '
> > > add _ r _ ending _ to _ syllable ( ' sæl ' , True )
' sæll '
> > > ' li ' + add _ r _ ending _ to _ syllable ( ' til ' , False )
' litill '
> > > add _ r _ ending _ to _ syllable ( ' vænn ' , True )
' vænn '
> > > add _ r _ ending _ to _ syllable ( ' lauss ' , True )
' lauss '
> > > add _ r _ ending _ to _ syllable ( " vin " , True )
' vinr '
> > > add _ r _ ending _ to _ syllable ( " sel " , True )
' selr '
> > > add _ r _ ending _ to _ syllable ( ' fagr ' , True )
' fagr '
> > > add _ r _ ending _ to _ syllable ( ' vitr ' , True )
' vitr '
> > > add _ r _ ending _ to _ syllable ( ' vetr ' , True )
' vetr '
> > > add _ r _ ending _ to _ syllable ( ' akr ' , True )
' akr '
> > > add _ r _ ending _ to _ syllable ( ' Björn ' , True )
' Björn '
> > > add _ r _ ending _ to _ syllable ( ' þurs ' , True )
' þurs '
> > > add _ r _ ending _ to _ syllable ( ' karl ' , True )
' karl '
> > > add _ r _ ending _ to _ syllable ( ' hrafn ' , True )
' hrafn '
: param last _ syllable : last syllable of the word
: param is _ first : is it the first syllable of the word ?
: return : inflected syllable"""
|
if len ( last_syllable ) >= 2 :
if last_syllable [ - 1 ] in [ 'l' , 'n' , 's' , 'r' ] :
if last_syllable [ - 2 ] in CONSONANTS : # Apocope of r
return last_syllable
else : # Assimilation of r
if len ( last_syllable ) >= 3 and last_syllable [ - 3 : - 1 ] in DIPHTHONGS :
return apply_raw_r_assimilation ( last_syllable )
elif last_syllable [ - 2 ] in SHORT_VOWELS and is_first : # No assimilation when r is supposed to be added to a stressed syllable
# whose last letter is l , n or s and the penultimate letter is a short vowel
return last_syllable + "r"
elif last_syllable [ - 2 ] in SHORT_VOWELS :
return apply_raw_r_assimilation ( last_syllable )
elif last_syllable [ - 2 ] in LONG_VOWELS :
return apply_raw_r_assimilation ( last_syllable )
return apply_raw_r_assimilation ( last_syllable )
else :
return last_syllable + "r"
else :
return last_syllable + "r"
|
def dump_feature ( self , feature_name , feature , force_extraction = True ) :
"""Dumps a list of lists or ndarray of features into database ( allows to
copy features from a pre - existing . txt / . csv / . whatever file , for example )
Parameters
feature : list of lists or ndarray , contains the data to be written to the database
force _ extraction : boolean , if True - will overwrite any existing feature with this name
default value : False
Returns
None"""
|
dump_feature_base ( self . dbpath , self . _set_object , self . points_amt , feature_name , feature , force_extraction )
return None
|
def _adjust_inserted_indices ( inserted_indices_list , prune_indices_list ) :
"""Adjust inserted indices , if there are pruned elements ."""
|
# Created a copy , to preserve cached property
updated_inserted = [ [ i for i in dim_inds ] for dim_inds in inserted_indices_list ]
pruned_and_inserted = zip ( prune_indices_list , updated_inserted )
for prune_inds , inserted_inds in pruned_and_inserted : # Only prune indices if they ' re not H & S ( inserted )
prune_inds = prune_inds [ ~ np . in1d ( prune_inds , inserted_inds ) ]
for i , ind in enumerate ( inserted_inds ) :
ind -= np . sum ( prune_inds < ind )
inserted_inds [ i ] = ind
return updated_inserted
|
def is_mutating ( status ) :
"""Determines if the statement is mutating based on the status ."""
|
if not status :
return False
mutating = set ( [ 'insert' , 'update' , 'delete' , 'alter' , 'create' , 'drop' , 'replace' , 'truncate' , 'load' ] )
return status . split ( None , 1 ) [ 0 ] . lower ( ) in mutating
|
def deploy ( jboss_config , source_file ) :
'''Deploy the application on the jboss instance from the local file system where minion is running .
jboss _ config
Configuration dictionary with properties specified above .
source _ file
Source file to deploy from
CLI Example :
. . code - block : : bash
salt ' * ' jboss7 . deploy ' { " cli _ path " : " integration . modules . sysmod . SysModuleTest . test _ valid _ docs " , " controller " : " 10.11.12.13:9999 " , " cli _ user " : " jbossadm " , " cli _ password " : " jbossadm " } ' / opt / deploy _ files / my _ deploy'''
|
log . debug ( "======================== MODULE FUNCTION: jboss7.deploy, source_file=%s" , source_file )
command = 'deploy {source_file} --force ' . format ( source_file = source_file )
return __salt__ [ 'jboss7_cli.run_command' ] ( jboss_config , command , fail_on_error = False )
|
def until_state ( self , state , timeout = None ) :
"""Return a tornado Future that will resolve when the requested state is set"""
|
if state not in self . _valid_states :
raise ValueError ( 'State must be one of {0}, not {1}' . format ( self . _valid_states , state ) )
if state != self . _state :
if timeout :
return with_timeout ( self . _ioloop . time ( ) + timeout , self . _waiting_futures [ state ] , self . _ioloop )
else :
return self . _waiting_futures [ state ]
else :
f = tornado_Future ( )
f . set_result ( True )
return f
|
def get_version_records ( self ) :
"""Yield RASH version information stored in DB . Latest first .
: rtype : [ VersionRecord ]"""
|
keys = [ 'id' , 'rash_version' , 'schema_version' , 'updated' ]
sql = """
SELECT id, rash_version, schema_version, updated
FROM rash_info
ORDER BY id DESC
"""
with self . connection ( ) as connection :
for row in connection . execute ( sql ) :
yield VersionRecord ( ** dict ( zip ( keys , row ) ) )
|
def DbDeleteDeviceAlias ( self , argin ) :
"""Delete a device alias .
: param argin : device alias name
: type : tango . DevString
: return :
: rtype : tango . DevVoid"""
|
self . _log . debug ( "In DbDeleteDeviceAlias()" )
self . db . delete_device_alias ( argin )
|
def remove_outliers ( self , thresh = 3 , ** predict_kwargs ) :
"""Remove outliers from the GP with very simplistic outlier detection .
Removes points that are more than ` thresh ` * ` err _ y ` away from the GP
mean . Note that this is only very rough in that it ignores the
uncertainty in the GP mean at any given point . But you should only be
using this as a rough way of removing bad channels , anyways !
Returns the values that were removed and a boolean array indicating
where the removed points were .
Parameters
thresh : float , optional
The threshold as a multiplier times ` err _ y ` . Default is 3 ( i . e . ,
throw away all 3 - sigma points ) .
* * predict _ kwargs : optional kwargs
All additional kwargs are passed to : py : meth : ` predict ` . You can , for
instance , use this to make it use MCMC to evaluate the mean . ( If you
don ' t use MCMC , then the current value of the hyperparameters is
used . )
Returns
X _ bad : array
Input values of the bad points .
y _ bad : array
Bad values .
err _ y _ bad : array
Uncertainties on the bad values .
n _ bad : array
Derivative order of the bad values .
bad _ idxs : array
Array of booleans with the original shape of X with True wherever a
point was taken to be bad and subsequently removed .
T _ bad : array
Transformation matrix of returned points . Only returned if
: py : attr : ` T ` is not None for the instance ."""
|
mean = self . predict ( self . X , n = self . n , noise = False , return_std = False , output_transform = self . T , ** predict_kwargs )
deltas = scipy . absolute ( mean - self . y ) / self . err_y
deltas [ self . err_y == 0 ] = 0
bad_idxs = ( deltas >= thresh )
good_idxs = ~ bad_idxs
# Pull out the old values so they can be returned :
y_bad = self . y [ bad_idxs ]
err_y_bad = self . err_y [ bad_idxs ]
if self . T is not None :
T_bad = self . T [ bad_idxs , : ]
non_zero_cols = ( T_bad != 0 ) . all ( axis = 0 )
T_bad = T_bad [ : , non_zero_cols ]
X_bad = self . X [ non_zero_cols , : ]
n_bad = self . n [ non_zero_cols , : ]
else :
X_bad = self . X [ bad_idxs , : ]
n_bad = self . n [ bad_idxs , : ]
# Delete the offending points :
if self . T is None :
self . X = self . X [ good_idxs , : ]
self . n = self . n [ good_idxs , : ]
else :
self . T = self . T [ good_idxs , : ]
non_zero_cols = ( self . T != 0 ) . all ( axis = 0 )
self . T = self . T [ : , non_zero_cols ]
self . X = self . X [ non_zero_cols , : ]
self . n = self . n [ non_zero_cols , : ]
self . y = self . y [ good_idxs ]
self . err_y = self . err_y [ good_idxs ]
self . K_up_to_date = False
if self . T is None :
return ( X_bad , y_bad , err_y_bad , n_bad , bad_idxs )
else :
return ( X_bad , y_bad , err_y_bad , n_bad , bad_idxs , T_bad )
|
def agent ( agent_id ) :
'''Show the information about the given agent .'''
|
fields = [ ( 'ID' , 'id' ) , ( 'Status' , 'status' ) , ( 'Region' , 'region' ) , ( 'First Contact' , 'first_contact' ) , ( 'CPU Usage (%)' , 'cpu_cur_pct' ) , ( 'Used Memory (MiB)' , 'mem_cur_bytes' ) , ( 'Total slots' , 'available_slots' ) , ( 'Occupied slots' , 'occupied_slots' ) , ]
if is_legacy_server ( ) :
del fields [ 9 ]
del fields [ 6 ]
q = 'query($agent_id:String!) {' ' agent(agent_id:$agent_id) { $fields }' '}'
q = q . replace ( '$fields' , ' ' . join ( item [ 1 ] for item in fields ) )
v = { 'agent_id' : agent_id }
with Session ( ) as session :
try :
resp = session . Admin . query ( q , v )
except Exception as e :
print_error ( e )
sys . exit ( 1 )
info = resp [ 'agent' ]
rows = [ ]
for name , key in fields :
if key == 'mem_cur_bytes' and info [ key ] is not None :
info [ key ] = round ( info [ key ] / 2 ** 20 , 1 )
if key in info :
rows . append ( ( name , info [ key ] ) )
print ( tabulate ( rows , headers = ( 'Field' , 'Value' ) ) )
|
def accept_connection ( self , name = None , alias = None , timeout = 0 ) :
"""Accepts a connection to server identified by ` name ` or the latest
server if ` name ` is empty .
If given an ` alias ` , the connection is named and can be later referenced
with that name .
If ` timeout ` is > 0 , the connection times out after the time specified .
` timeout ` defaults to 0 which will wait indefinitely .
Empty value or None will use socket default timeout .
Examples :
| Accept connection |
| Accept connection | Server1 | my _ connection |
| Accept connection | Server1 | my _ connection | timeout = 5 |"""
|
server = self . _servers . get ( name )
server . accept_connection ( alias , timeout )
|
def HasNonLSCTables ( elem ) :
"""Return True if the document tree below elem contains non - LSC
tables , otherwise return False ."""
|
return any ( t . Name not in TableByName for t in elem . getElementsByTagName ( ligolw . Table . tagName ) )
|
def gate ( self , gate , ID = None , apply_now = True ) :
'''Applies the gate to each Measurement in the Collection , returning a new Collection with gated data .
{ _ containers _ held _ in _ memory _ warning }
Parameters
gate : { _ gate _ available _ classes }
ID : [ str , numeric , None ]
New ID to be given to the output . If None , the ID of the current collection will be used .'''
|
def func ( well ) :
return well . gate ( gate , apply_now = apply_now )
return self . apply ( func , output_format = 'collection' , ID = ID )
|
def change_mime ( self , bucket , key , mime ) :
"""修改文件mimeType :
主动修改指定资源的文件类型 , 具体规格参考 :
http : / / developer . qiniu . com / docs / v6 / api / reference / rs / chgm . html
Args :
bucket : 待操作资源所在空间
key : 待操作资源文件名
mime : 待操作文件目标mimeType"""
|
resource = entry ( bucket , key )
encode_mime = urlsafe_base64_encode ( mime )
return self . __rs_do ( 'chgm' , resource , 'mime/{0}' . format ( encode_mime ) )
|
def _read_pyMatch ( fn , precursors ) :
"""read pyMatch file and perform realignment of hits"""
|
with open ( fn ) as handle :
reads = defaultdict ( realign )
for line in handle :
query_name , seq , chrom , reference_start , end , mism , add = line . split ( )
reference_start = int ( reference_start )
# chrom = handle . getrname ( cols [ 1 ] )
# print ( " % s % s % s % s " % ( line . query _ name , line . reference _ start , line . query _ sequence , chrom ) )
if query_name not in reads :
reads [ query_name ] . sequence = seq
iso = isomir ( )
iso . align = line
iso . start = reference_start
iso . subs , iso . add = _realign ( reads [ query_name ] . sequence , precursors [ chrom ] , reference_start )
logger . debug ( "%s %s %s %s %s" % ( query_name , reference_start , chrom , iso . subs , iso . add ) )
if len ( iso . subs ) > 1 :
continue
reads [ query_name ] . set_precursor ( chrom , iso )
reads = _clean_hits ( reads )
return reads
|
def fit_toy_potential ( orbit , force_harmonic_oscillator = False ) :
"""Fit a best fitting toy potential to the orbit provided . If the orbit is a
tube ( loop ) orbit , use the Isochrone potential . If the orbit is a box
potential , use the harmonic oscillator potential . An option is available to
force using the harmonic oscillator ( ` force _ harmonic _ oscillator ` ) .
See the docstrings for ~ ` gala . dynamics . fit _ isochrone ( ) ` and
~ ` gala . dynamics . fit _ harmonic _ oscillator ( ) ` for more information .
Parameters
orbit : ` ~ gala . dynamics . Orbit `
force _ harmonic _ oscillator : bool ( optional )
Force using the harmonic oscillator potential as the toy potential .
Returns
potential : : class : ` ~ gala . potential . IsochronePotential ` or : class : ` ~ gala . potential . HarmonicOscillatorPotential `
The best - fit potential object ."""
|
circulation = orbit . circulation ( )
if np . any ( circulation == 1 ) and not force_harmonic_oscillator : # tube orbit
logger . debug ( "===== Tube orbit =====" )
logger . debug ( "Using Isochrone toy potential" )
toy_potential = fit_isochrone ( orbit )
logger . debug ( "Best m={}, b={}" . format ( toy_potential . parameters [ 'm' ] , toy_potential . parameters [ 'b' ] ) )
else : # box orbit
logger . debug ( "===== Box orbit =====" )
logger . debug ( "Using triaxial harmonic oscillator toy potential" )
toy_potential = fit_harmonic_oscillator ( orbit )
logger . debug ( "Best omegas ({})" . format ( toy_potential . parameters [ 'omega' ] ) )
return toy_potential
|
def memoize ( obj ) :
"""Decorator to create memoized functions , methods or classes ."""
|
cache = obj . cache = { }
@ functools . wraps ( obj )
def memoizer ( * args , ** kwargs ) :
if args not in cache :
cache [ args ] = obj ( * args , ** kwargs )
return cache [ args ]
return memoizer
|
def rm_gos ( self , rm_goids ) :
"""Remove any edges that contain user - specified edges ."""
|
self . edges = self . _rm_gos_edges ( rm_goids , self . edges )
self . edges_rel = self . _rm_gos_edges_rel ( rm_goids , self . edges_rel )
|
def bool ( cls , must = None , should = None , must_not = None , minimum_number_should_match = None , boost = None ) :
'''http : / / www . elasticsearch . org / guide / reference / query - dsl / bool - query . html
A query that matches documents matching boolean combinations of other queris . The bool query maps to Lucene BooleanQuery . It is built using one of more boolean clauses , each clause with a typed occurrence . The occurrence types are :
' must ' - The clause ( query ) must appear in matching documents .
' should ' - The clause ( query ) should appear in the matching document . A boolean query with no ' must ' clauses , one or more ' should ' clauses must match a document . The minimum number of ' should ' clauses to match can be set using ' minimum _ number _ should _ match ' parameter .
' must _ not ' - The clause ( query ) must not appear in the matching documents . Note that it is not possible to search on documents that only consists of a ' must _ not ' clause ( s ) .
' minimum _ number _ should _ match ' - Minimum number of documents that should match
' boost ' - boost value
> term = ElasticQuery ( )
> term . term ( user = ' kimchy ' )
> query = ElasticQuery ( )
> query . bool ( should = term )
> query . query ( )
{ ' bool ' : { ' should ' : { ' term ' : { ' user ' : ' kimchy ' } } } }'''
|
instance = cls ( bool = { } )
if must is not None :
instance [ 'bool' ] [ 'must' ] = must
if should is not None :
instance [ 'bool' ] [ 'should' ] = should
if must_not is not None :
instance [ 'bool' ] [ 'must_not' ] = must_not
if minimum_number_should_match is not None :
instance [ 'bool' ] [ 'minimum_number_should_match' ] = minimum_number_should_match
if boost is not None :
instance [ 'bool' ] [ 'boost' ] = boost
return instance
|
def wcxf2arrays ( d ) :
"""Convert a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values to a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values . This is needed for the parsing
of input in WCxf format ."""
|
C = { }
for k , v in d . items ( ) :
name = k . split ( '_' ) [ 0 ]
s = C_keys_shape [ name ]
if s == 1 :
C [ k ] = v
else :
ind = k . split ( '_' ) [ - 1 ]
if name not in C :
C [ name ] = np . zeros ( s , dtype = complex )
C [ name ] [ tuple ( [ int ( i ) - 1 for i in ind ] ) ] = v
return C
|
def get_export ( self , export_type , generate = False , wait = False , wait_timeout = None , ) :
"""Downloads a data export over HTTP . Returns a ` Requests Response
< http : / / docs . python - requests . org / en / master / api / # requests . Response > ` _
object containing the content of the export .
- * * export _ type * * is a string specifying which type of export should be
downloaded .
- * * generate * * is a boolean specifying whether to generate a new export
and wait for it to be ready , or to just download the latest export .
- * * wait * * is a boolean specifying whether to wait for an in - progress
export to finish , if there is one . Has no effect if ` ` generate ` ` is
` ` True ` ` .
- * * wait _ timeout * * is the number of seconds to wait if ` ` wait ` ` is
` ` True ` ` . Has no effect if ` ` wait ` ` is ` ` False ` ` or if ` ` generate ` `
is ` ` True ` ` .
The returned : py : class : ` . Response ` object has two additional attributes
as a convenience for working with the CSV content ; * * csv _ reader * * and
* * csv _ dictreader * * , which are wrappers for : py : meth : ` . csv . reader `
and : py : class : ` csv . DictReader ` respectively . These wrappers take care
of correctly decoding the export content for the CSV parser .
Example : :
classification _ export = Project ( 1234 ) . get _ export ( ' classifications ' )
for row in classification _ export . csv _ reader ( ) :
print ( row )
classification _ export = Project ( 1234 ) . get _ export ( ' classifications ' )
for row in classification _ export . csv _ dictreader ( ) :
print ( row )"""
|
if generate :
self . generate_export ( export_type )
if generate or wait :
export = self . wait_export ( export_type , wait_timeout )
else :
export = self . describe_export ( export_type )
if export_type in TALK_EXPORT_TYPES :
media_url = export [ 'data_requests' ] [ 0 ] [ 'url' ]
else :
media_url = export [ 'media' ] [ 0 ] [ 'src' ]
response = requests . get ( media_url , stream = True )
response . csv_reader = functools . partial ( csv . reader , response . iter_lines ( decode_unicode = True ) , )
response . csv_dictreader = functools . partial ( csv . DictReader , response . iter_lines ( decode_unicode = True ) , )
return response
|
def readlines ( filename , encoding = 'utf-8' ) :
"""Read lines from file ( ' filename ' )
Return lines and encoding"""
|
text , encoding = read ( filename , encoding )
return text . split ( os . linesep ) , encoding
|
def xpathRegisterNs ( self , prefix , ns_uri ) :
"""Register a new namespace . If @ ns _ uri is None it unregisters
the namespace"""
|
ret = libxml2mod . xmlXPathRegisterNs ( self . _o , prefix , ns_uri )
return ret
|
def save_state ( self , out_path ) :
"""Save the current state of this emulated object to a file .
Args :
out _ path ( str ) : The path to save the dumped state of this emulated
object ."""
|
state = self . dump_state ( )
# Remove all IntEnums from state since they cannot be json - serialized on python 2.7
# See https : / / bitbucket . org / stoneleaf / enum34 / issues / 17 / difference - between - enum34 - and - enum - json
state = _clean_intenum ( state )
with open ( out_path , "w" ) as outfile :
json . dump ( state , outfile , indent = 4 )
|
def __auth_descriptor ( self , api_info ) :
"""Builds an auth descriptor from API info .
Args :
api _ info : An _ ApiInfo object .
Returns :
A dictionary with ' allowCookieAuth ' and / or ' blockedRegions ' keys ."""
|
if api_info . auth is None :
return None
auth_descriptor = { }
if api_info . auth . allow_cookie_auth is not None :
auth_descriptor [ 'allowCookieAuth' ] = api_info . auth . allow_cookie_auth
if api_info . auth . blocked_regions :
auth_descriptor [ 'blockedRegions' ] = api_info . auth . blocked_regions
return auth_descriptor
|
def _setup_ctx ( self , ctx ) :
"Should be called by any derived plugin ' s setup _ ctx ( ) function ."
|
ctx . strict = True
ctx . canonical = True
ctx . max_identifier_len = 64
ctx . implicit_errors = False
# always add additional prefixes given on the command line
self . namespace_prefixes . extend ( ctx . opts . lint_namespace_prefixes )
self . modulename_prefixes . extend ( ctx . opts . lint_modulename_prefixes )
# register our grammar validation funs
statements . add_validation_var ( '$chk_default' , lambda keyword : keyword in _keyword_with_default )
statements . add_validation_var ( '$chk_required' , lambda keyword : keyword in _required_substatements )
statements . add_validation_var ( '$chk_recommended' , lambda keyword : keyword in _recommended_substatements )
statements . add_validation_fun ( 'grammar' , [ '$chk_default' ] , lambda ctx , s : v_chk_default ( ctx , s ) )
statements . add_validation_fun ( 'grammar' , [ '$chk_required' ] , lambda ctx , s : v_chk_required_substmt ( ctx , s ) )
statements . add_validation_fun ( 'grammar' , [ '$chk_recommended' ] , lambda ctx , s : v_chk_recommended_substmt ( ctx , s ) )
if ctx . opts . lint_ensure_hyphenated_names :
statements . add_validation_fun ( 'grammar' , [ '*' ] , lambda ctx , s : v_chk_hyphenated_names ( ctx , s ) )
statements . add_validation_fun ( 'grammar' , [ 'namespace' ] , lambda ctx , s : v_chk_namespace ( ctx , s , self . namespace_prefixes ) )
statements . add_validation_fun ( 'grammar' , [ 'module' , 'submodule' ] , lambda ctx , s : v_chk_module_name ( ctx , s , self . modulename_prefixes ) )
statements . add_validation_fun ( 'strict' , [ 'include' ] , lambda ctx , s : v_chk_include ( ctx , s ) )
statements . add_validation_fun ( 'strict' , [ 'module' ] , lambda ctx , s : v_chk_mandatory_top_level ( ctx , s ) )
# register our error codes
error . add_error_code ( 'LINT_EXPLICIT_DEFAULT' , 4 , 'RFC 8407: 4.4: ' + 'statement "%s" is given with its default value "%s"' )
error . add_error_code ( 'LINT_MISSING_REQUIRED_SUBSTMT' , 3 , '%s: ' + 'statement "%s" must have a "%s" substatement' )
error . add_error_code ( 'LINT_MISSING_RECOMMENDED_SUBSTMT' , 4 , '%s: ' + 'statement "%s" should have a "%s" substatement' )
error . add_error_code ( 'LINT_BAD_NAMESPACE_VALUE' , 4 , 'RFC 8407: 4.9: namespace value should be "%s"' )
error . add_error_code ( 'LINT_BAD_MODULENAME_PREFIX_1' , 4 , 'RFC 8407: 4.1: ' + 'the module name should start with the string %s' )
error . add_error_code ( 'LINT_BAD_MODULENAME_PREFIX_N' , 4 , 'RFC 8407: 4.1: ' + 'the module name should start with one of the strings %s' )
error . add_error_code ( 'LINT_NO_MODULENAME_PREFIX' , 4 , 'RFC 8407: 4.1: ' + 'no module name prefix string used' )
error . add_error_code ( 'LINT_BAD_REVISION' , 3 , 'RFC 8407: 4.7: ' + 'the module\'s revision %s is older than ' + 'submodule %s\'s revision %s' )
error . add_error_code ( 'LINT_TOP_MANDATORY' , 3 , 'RFC 8407: 4.10: ' + 'top-level node %s must not be mandatory' )
error . add_error_code ( 'LINT_NOT_HYPHENATED' , 4 , '%s is not hyphenated, e.g., using upper-case or underscore' )
# override std error string
error . add_error_code ( 'LONG_IDENTIFIER' , 3 , 'RFC 8407: 4.3: identifier %s exceeds %s characters' )
|
def backend_routing ( self , context ) :
"""Returns the targeted backend and an updated state
: type context : satosa . context . Context
: rtype satosa . backends . base . BackendModule
: param context : The request context
: return : backend"""
|
satosa_logging ( logger , logging . DEBUG , "Routing to backend: %s " % context . target_backend , context . state )
backend = self . backends [ context . target_backend ] [ "instance" ]
context . state [ STATE_KEY ] = context . target_frontend
return backend
|
def sizeAdjust ( self ) :
"""If the glyph is bigger than the font ( because the user set it smaller )
this should be able to shorten the size"""
|
font_width , font_height = self . font_bbox [ : 2 ]
self . width = min ( self . width , font_width )
self . height = min ( self . height , font_height )
self . bbox [ : 2 ] = self . width , self . height
self . crop ( )
|
def swagger_definition ( self , base_path = None , ** kwargs ) :
"""return a valid swagger spec , with the values passed ."""
|
return Swagger ( { "info" : Info ( { key : kwargs . get ( key , self . DEFAULT_INFO . get ( key ) ) for key in Info . fields . keys ( ) if key in kwargs or key in self . DEFAULT_INFO } ) , "paths" : self . paths , "swagger" : "2.0" , "basePath" : base_path , } ) . to_primitive ( )
|
def read_tx_witnesses ( ptr , tx , num_witnesses ) :
"""Returns an array of witness scripts .
Each witness will be a bytestring ( i . e . encoding the witness script )"""
|
witnesses = [ ]
for i in xrange ( 0 , num_witnesses ) :
witness_stack_len = read_var_int ( ptr , tx )
witness_stack = [ ]
for j in xrange ( 0 , witness_stack_len ) :
stack_item = read_var_string ( ptr , tx )
witness_stack . append ( stack_item )
witness_script = btc_witness_script_serialize ( witness_stack ) . decode ( 'hex' )
witnesses . append ( witness_script )
return witnesses
|
def sparse_toy_linear_1d_classification_uncertain_input ( num_inducing = 10 , seed = default_seed , optimize = True , plot = True ) :
"""Sparse 1D classification example
: param seed : seed value for data generation ( default is 4 ) .
: type seed : int"""
|
try :
import pods
except ImportError :
print ( 'pods unavailable, see https://github.com/sods/ods for example datasets' )
import numpy as np
data = pods . datasets . toy_linear_1d_classification ( seed = seed )
Y = data [ 'Y' ] [ : , 0 : 1 ]
Y [ Y . flatten ( ) == - 1 ] = 0
X = data [ 'X' ]
X_var = np . random . uniform ( 0.3 , 0.5 , X . shape )
# Model definition
m = GPy . models . SparseGPClassificationUncertainInput ( X , X_var , Y , num_inducing = num_inducing )
m [ '.*len' ] = 4.
# Optimize
if optimize :
m . optimize ( )
# Plot
if plot :
from matplotlib import pyplot as plt
fig , axes = plt . subplots ( 2 , 1 )
m . plot_f ( ax = axes [ 0 ] )
m . plot ( ax = axes [ 1 ] )
print ( m )
return m
|
def getCachedOrUpdatedValue ( self , key ) :
"""Gets the device ' s value with the given key .
If the key is not found in the cache , the value is queried from the host ."""
|
try :
return self . _VALUES [ key ]
except KeyError :
return self . getValue ( key )
|
def markdown_filter ( value , typogrify = True , extensions = ( 'extra' , 'codehilite' ) ) :
"""A smart wrapper around the ` ` markdown ` ` and ` ` typogrify ` ` functions that automatically removes leading
whitespace before every line . This is necessary because Markdown is whitespace - sensitive . Consider some Markdown
content in a template that looks like this :
. . codeblock : : html + jinja
< article >
{ % filter markdown % }
# # A Heading
Some content here .
Code goes here .
More lines of code
And more .
Closing thoughts
{ % endfilter % }
< / article >
In this example , a typical Markdown filter would see the leading whitespace before the first heading and assume
it was a code block , which would then cause the entire Markdown document to be rendered incorrectly . You may have
a document with spacing like this because your text editor automatically ' pretty - prints ' the markup ,
including the content within the filter tag .
This filter automatically removes the leading whitespace - leaving code block and other expected offsets in place
of course - so that rendering occurs correctly regardless of the nested spacing of the source document ."""
|
# Determine how many leading spaces there are , then remove that number from the beginning of each line .
match = re . match ( r'(\n*)(\s*)' , value )
s , e = match . span ( 2 )
pattern = re . compile ( r'^ {%s}' % ( e - s ) , # use ^ in the pattern so mid - string matches won ' t be removed
flags = re . MULTILINE )
# use multi - line mode so ^ will match the start of each line
output = pattern . sub ( u'' , value )
if typogrify :
return jinja_filters . typogrify ( markdown ( output , extensions = extensions ) )
else :
return markdown ( output , extensions = extensions )
|
def upload ( c , directory , index = None , sign = False , dry_run = False ) :
"""Upload ( potentially also signing ) all artifacts in ` ` directory ` ` .
: param str index :
Custom upload index / repository name .
By default , uses whatever the invoked ` ` pip ` ` is configured to use .
Modify your ` ` pypirc ` ` file to add new named repositories .
: param bool sign :
Whether to sign the built archive ( s ) via GPG .
: param bool dry _ run :
Skip actual publication step if ` ` True ` ` .
This also prevents cleanup of the temporary build / dist directories , so
you can examine the build artifacts ."""
|
# Obtain list of archive filenames , then ensure any wheels come first
# so their improved metadata is what PyPI sees initially ( otherwise , it
# only honors the sdist ' s lesser data ) .
archives = list ( itertools . chain . from_iterable ( glob ( os . path . join ( directory , "dist" , "*.{0}" . format ( extension ) ) ) for extension in ( "whl" , "tar.gz" ) ) )
# Sign each archive in turn
# TODO : twine has a - - sign option ; but the below is still nice insofar
# as it lets us dry - run , generate for web upload when pypi ' s API is
# being cranky , etc . Figure out which is better .
if sign :
prompt = "Please enter GPG passphrase for signing: "
input_ = StringIO ( getpass . getpass ( prompt ) + "\n" )
gpg_bin = find_gpg ( c )
if not gpg_bin :
sys . exit ( "You need to have one of `gpg`, `gpg1` or `gpg2` " "installed to GPG-sign!" )
for archive in archives :
cmd = "{0} --detach-sign -a --passphrase-fd 0 {{0}}" . format ( gpg_bin )
# noqa
c . run ( cmd . format ( archive ) , in_stream = input_ )
input_ . seek ( 0 )
# So it can be replayed by subsequent iterations
# Upload
parts = [ "twine" , "upload" ]
if index :
index_arg = "--repository {0}" . format ( index )
if index :
parts . append ( index_arg )
paths = archives [ : ]
if sign :
paths . append ( os . path . join ( directory , "dist" , "*.asc" ) )
parts . extend ( paths )
cmd = " " . join ( parts )
if dry_run :
print ( "Would publish via: {0}" . format ( cmd ) )
print ( "Files that would be published:" )
c . run ( "ls -l {0}" . format ( " " . join ( paths ) ) )
else :
c . run ( cmd )
|
def datagram_received ( self , data , addr ) :
'''On datagram receive .
: param data :
: param addr :
: return :'''
|
message = salt . utils . stringutils . to_unicode ( data )
if message . startswith ( self . signature ) :
try :
timestamp = float ( message [ len ( self . signature ) : ] )
except ( TypeError , ValueError ) :
self . log . debug ( 'Received invalid timestamp in package from %s:%s' , * addr )
if self . disable_hidden :
self . _sendto ( '{0}:E:{1}' . format ( self . signature , 'Invalid timestamp' ) , addr )
return
if datetime . datetime . fromtimestamp ( timestamp ) < ( datetime . datetime . now ( ) - datetime . timedelta ( seconds = 20 ) ) :
if self . disable_hidden :
self . _sendto ( '{0}:E:{1}' . format ( self . signature , 'Timestamp is too old' ) , addr )
self . log . debug ( 'Received outdated package from %s:%s' , * addr )
return
self . log . debug ( 'Received "%s" from %s:%s' , message , * addr )
self . _sendto ( salt . utils . stringutils . to_bytes ( str ( '{0}:@:{1}' ) . format ( # future lint : disable = blacklisted - function
self . signature , salt . utils . json . dumps ( self . answer , _json_module = _json ) ) ) , addr )
else :
if self . disable_hidden :
self . _sendto ( salt . utils . stringutils . to_bytes ( '{0}:E:{1}' . format ( self . signature , 'Invalid packet signature' ) , addr ) )
self . log . debug ( 'Received bad signature from %s:%s' , * addr )
|
def start_all ( self ) :
"""Starts all inactive service instances ."""
|
for alias , service in self . _service_objects . items ( ) :
if not service . is_alive :
with expects . expect_no_raises ( 'Failed to start service "%s".' % alias ) :
service . start ( )
|
def multiline_repr ( text , special_chars = ( '\n' , '"' ) ) :
"""Get string representation for triple quoted context .
Make string representation as normal except do not transform
" special characters " into an escaped representation to support
use of the representation in a triple quoted multi - line string
context ( to avoid escaping newlines and double quotes ) .
Pass ` ` RAW _ MULTILINE _ CHARS ` ` as the ` ` special _ chars ` ` when use
context is a " raw " triple quoted string ( to also avoid excaping
backslashes ) .
: param text : string
: type text : str or unicode
: param iterable special _ chars : characters to remove / restore
: returns : representation
: rtype : str"""
|
try :
char = special_chars [ 0 ]
except IndexError :
text = ascii ( text ) [ 2 if PY2 else 1 : - 1 ]
else :
text = char . join ( multiline_repr ( s , special_chars [ 1 : ] ) for s in text . split ( char ) )
return text
|
def list_member_events ( self , upcoming = True ) :
'''a method to retrieve a list of events member attended or will attend
: param upcoming : [ optional ] boolean to filter list to only future events
: return : dictionary with list of event details inside [ json ] key
event _ details = self . _ reconstruct _ event ( { } )'''
|
# https : / / www . meetup . com / meetup _ api / docs / self / events /
# construct request fields
url = '%s/self/events' % self . endpoint
params = { 'status' : 'past' , 'fields' : 'comment_count,event_hosts,rsvp_rules,short_link,survey_questions,rsvpable' }
if upcoming :
params [ 'status' ] = 'upcoming'
# send requests
response_details = self . _get_request ( url , params = params )
# construct method output
member_events = { 'json' : [ ] }
for key , value in response_details . items ( ) :
if key != 'json' :
member_events [ key ] = value
for event in response_details [ 'json' ] :
member_events [ 'json' ] . append ( self . _reconstruct_event ( event ) )
return member_events
|
def summarize ( self , text , topics = 4 , length = 5 , binary_matrix = True , topic_sigma_threshold = 0.5 ) :
"""Implements the method of latent semantic analysis described by Steinberger and Jezek in the paper :
J . Steinberger and K . Jezek ( 2004 ) . Using latent semantic analysis in text summarization and summary evaluation .
Proc . ISIM ’ 04 , pp . 93–100.
: param text : a string of text to be summarized , path to a text file , or URL starting with http
: param topics : the number of topics / concepts covered in the input text ( defines the degree of
dimensionality reduction in the SVD step )
: param length : the length of the output summary ; either a number of sentences ( e . g . 5 ) or a percentage
of the original document ( e . g . 0.5)
: param binary _ matrix : boolean value indicating whether the matrix of word counts should be binary
( True by default )
: param topic _ sigma _ threshold : filters out topics / concepts with a singular value less than this
percentage of the largest singular value ( must be between 0 and 1 , 0.5 by default )
: return : list of sentences for the summary"""
|
text = self . _parse_input ( text )
sentences , unprocessed_sentences = self . _tokenizer . tokenize_sentences ( text )
length = self . _parse_summary_length ( length , len ( sentences ) )
if length == len ( sentences ) :
return unprocessed_sentences
topics = self . _validate_num_topics ( topics , sentences )
# Generate a matrix of terms that appear in each sentence
weighting = 'binary' if binary_matrix else 'frequency'
sentence_matrix = self . _compute_matrix ( sentences , weighting = weighting )
sentence_matrix = sentence_matrix . transpose ( )
# Filter out negatives in the sparse matrix ( need to do this on Vt for LSA method ) :
sentence_matrix = sentence_matrix . multiply ( sentence_matrix > 0 )
s , u , v = self . _svd ( sentence_matrix , num_concepts = topics )
# Only consider topics / concepts whose singular values are half of the largest singular value
if 1 <= topic_sigma_threshold < 0 :
raise ValueError ( 'Parameter topic_sigma_threshold must take a value between 0 and 1' )
sigma_threshold = max ( u ) * topic_sigma_threshold
u [ u < sigma_threshold ] = 0
# Set all other singular values to zero
# Build a " length vector " containing the length ( i . e . saliency ) of each sentence
saliency_vec = np . dot ( np . square ( u ) , np . square ( v ) )
top_sentences = saliency_vec . argsort ( ) [ - length : ] [ : : - 1 ]
# Return the sentences in the order in which they appear in the document
top_sentences . sort ( )
return [ unprocessed_sentences [ i ] for i in top_sentences ]
|
def _calc_avg_and_last_val ( self , has_no_column , sum_existing_columns ) :
"""Calculate the average of all columns and return a rounded down number .
Store the remainder and add it to the last row . Could be implemented
better . If the enduser wants more control , he can also just add the
amount of columns . Will work fine with small number ( < 4 ) of items in a
row .
: param has _ no _ column :
: param sum _ existing _ columns :
: return : average , columns _ for _ last _ element"""
|
sum_no_columns = len ( has_no_column )
columns_left = self . ALLOWED_COLUMNS - sum_existing_columns
if sum_no_columns == 0 :
columns_avg = columns_left
else :
columns_avg = int ( columns_left / sum_no_columns )
remainder = columns_left - ( columns_avg * sum_no_columns )
columns_for_last_element = columns_avg + remainder
return columns_avg , columns_for_last_element
|
def _Rzderiv ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ Rzderiv
PURPOSE :
evaluate the mixed radial , vertical derivative for this potential
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
the mixed radial , vertical derivative"""
|
if not self . isNonAxi :
phi = 0.
x , y , z = self . _compute_xyz ( R , phi , z , t )
phixza = self . _2ndderiv_xyz ( x , y , z , 0 , 2 )
phiyza = self . _2ndderiv_xyz ( x , y , z , 1 , 2 )
ang = self . _omegab * t + self . _pa
c , s = np . cos ( ang ) , np . sin ( ang )
phixz = c * phixza + s * phiyza
phiyz = - s * phixza + c * phiyza
return np . cos ( phi ) * phixz + np . sin ( phi ) * phiyz
|
def put ( self , path , body ) :
"""PUT request ."""
|
return self . _make_request ( 'put' , self . _format_url ( API_ROOT + path ) , { 'json' : body } )
|
def _render_rows ( self ) :
"""Render the rows in the current stylesheet"""
|
_datas = getattr ( self , '_datas' , ( ) )
headers = getattr ( self , 'headers' , ( ) )
for index , row in enumerate ( _datas ) :
row_number = index + 2
for col_num , value in enumerate ( row ) :
cell = self . worksheet . cell ( row = row_number , column = col_num + 1 )
if value is not None :
cell . value = value
else :
cell . value = ""
if len ( headers ) > col_num :
header = headers [ col_num ]
format = get_cell_format ( header )
if format is not None :
cell . number_format = format
|
def display_output ( data , out = None , opts = None , ** kwargs ) :
'''Print the passed data using the desired output'''
|
if opts is None :
opts = { }
display_data = try_printout ( data , out , opts , ** kwargs )
output_filename = opts . get ( 'output_file' , None )
log . trace ( 'data = %s' , data )
try : # output filename can be either ' ' or None
if output_filename :
if not hasattr ( output_filename , 'write' ) :
ofh = salt . utils . files . fopen ( output_filename , 'a' )
# pylint : disable = resource - leakage
fh_opened = True
else : # Filehandle / file - like object
ofh = output_filename
fh_opened = False
try :
fdata = display_data
if isinstance ( fdata , six . text_type ) :
try :
fdata = fdata . encode ( 'utf-8' )
except ( UnicodeDecodeError , UnicodeEncodeError ) : # try to let the stream write
# even if we didn ' t encode it
pass
if fdata :
ofh . write ( salt . utils . stringutils . to_str ( fdata ) )
ofh . write ( '\n' )
finally :
if fh_opened :
ofh . close ( )
return
if display_data :
salt . utils . stringutils . print_cli ( display_data )
except IOError as exc : # Only raise if it ' s NOT a broken pipe
if exc . errno != errno . EPIPE :
raise exc
|
def _prepare_configs ( configs , requires_filters , temporal_timeouts ) :
"""Overrides the filters specified in the decorator with the given ones
: param configs : Field → ( Requirement , key , allow _ none ) dictionary
: param requires _ filters : Content of the ' requires . filter ' component
property ( field → string )
: param temporal _ timeouts : Content of the ' temporal . timeouts ' component
property ( field → float )
: return : The new configuration dictionary"""
|
if not isinstance ( requires_filters , dict ) :
requires_filters = { }
if not isinstance ( temporal_timeouts , dict ) :
temporal_timeouts = { }
if not requires_filters and not temporal_timeouts : # No explicit configuration given
return configs
# We need to change a part of the requirements
new_configs = { }
for field , config in configs . items ( ) : # Extract values from tuple
requirement , timeout = config
explicit_filter = requires_filters . get ( field )
explicit_timeout = temporal_timeouts . get ( field )
# Convert the timeout value
try :
explicit_timeout = int ( explicit_timeout )
if explicit_timeout <= 0 :
explicit_timeout = timeout
except ( ValueError , TypeError ) :
explicit_timeout = timeout
if not explicit_filter and not explicit_timeout : # Nothing to do
new_configs [ field ] = config
else :
try : # Store an updated copy of the requirement
requirement_copy = requirement . copy ( )
if explicit_filter :
requirement_copy . set_filter ( explicit_filter )
new_configs [ field ] = ( requirement_copy , explicit_timeout )
except ( TypeError , ValueError ) : # No information for this one , or invalid filter :
# keep the factory requirement
new_configs [ field ] = config
return new_configs
|
def check_wlcalib_sp ( sp , crpix1 , crval1 , cdelt1 , wv_master , coeff_ini = None , naxis1_ini = None , min_nlines_to_refine = 0 , interactive = False , threshold = 0 , nwinwidth_initial = 7 , nwinwidth_refined = 5 , ntimes_match_wv = 2 , poldeg_residuals = 1 , times_sigma_reject = 5 , use_r = False , title = None , remove_null_borders = True , ylogscale = False , geometry = None , pdf = None , debugplot = 0 ) :
"""Check wavelength calibration of the provided spectrum .
Parameters
sp : numpy array
Wavelength calibrated spectrum .
crpix1 : float
CRPIX1 keyword .
crval1 : float
CRVAL1 keyword .
cdelt1 : float
CDELT1 keyword .
wv _ master : numpy array
Array with the detailed list of expected arc lines .
coeff _ ini : array like
Coefficients initially employed to obtain the wavelength
calibration of the provided spectrum . When this coefficients
are provided , this function computes a refined version of
them , incorporating the corrections derived from the fit to
the residuals .
naxis1 _ ini : int
NAXIS1 in original spectrum employed to fit the initial
wavelength calibration .
min _ nlines _ to _ refine : int
Minimum number of identified lines necessary to perform the
wavelength calibration refinement . If zero , no minimum number
is required .
interactive : bool
If True , the function allows the user to modify the residuals
fit .
threshold : float
Minimum signal in the peaks .
nwinwidth _ initial : int
Width of the window where each peak must be initially found .
nwinwidth _ refined : int
Width of the window where each peak must be refined .
ntimes _ match _ wv : float
Times CDELT1 to match measured and expected wavelengths .
poldeg _ residuals : int
Polynomial degree for fit to residuals .
times _ sigma _ reject : float or None
Number of times the standard deviation to reject points
iteratively . If None , the fit does not reject any point .
use _ r : bool
If True , additional statistical analysis is performed using R .
title : string
Plot title .
remove _ null _ borders : bool
If True , remove leading and trailing zeros in spectrum .
ylogscale : bool
If True , the spectrum is displayed in logarithmic units . Note
that this is only employed for display purposes . The line peaks
are found in the original spectrum .
geometry : tuple ( 4 integers ) or None
x , y , dx , dy values employed to set the window geometry .
pdf : PdfFile object or None
If not None , output is sent to PDF file .
debugplot : int
Debugging level for messages and plots . For details see
' numina . array . display . pause _ debugplot . py ' .
Returns
coeff _ refined : numpy array
Refined version of the initial wavelength calibration
coefficients . These coefficients are computed only when
the input parameter ' coeff _ ini ' is not None ."""
|
# protections
if type ( sp ) is not np . ndarray :
raise ValueError ( "sp must be a numpy.ndarray" )
elif sp . ndim != 1 :
raise ValueError ( "sp.ndim is not 1" )
if coeff_ini is None and naxis1_ini is None :
pass
elif coeff_ini is not None and naxis1_ini is not None :
pass
else :
raise ValueError ( "coeff_ini and naxis1_ini must be simultaneously " "None of both different from None" )
# check that interactive use takes place when plotting
if interactive :
if abs ( debugplot ) % 10 == 0 :
raise ValueError ( "ERROR: interative use of this function is not " "possible when debugplot=" , debugplot )
# interactive and pdf are incompatible
if interactive :
if pdf is not None :
raise ValueError ( "ERROR: interactive use of this function is not " "possible when pdf is not None" )
# display list of expected arc lines
if abs ( debugplot ) in ( 21 , 22 ) :
print ( 'wv_master:' , wv_master )
# determine spectrum length
naxis1 = sp . shape [ 0 ]
# define default values in case no useful lines are identified
fxpeaks = np . array ( [ ] )
ixpeaks_wv = np . array ( [ ] )
fxpeaks_wv = np . array ( [ ] )
wv_verified_all_peaks = np . array ( [ ] )
nlines_ok = 0
xresid = np . array ( [ ] , dtype = float )
yresid = np . array ( [ ] , dtype = float )
reject = np . array ( [ ] , dtype = bool )
polyres = np . polynomial . Polynomial ( [ 0 ] )
poldeg_effective = 0
ysummary = summary ( np . array ( [ ] ) )
local_ylogscale = ylogscale
# find initial line peaks
ixpeaks = find_peaks_spectrum ( sp , nwinwidth = nwinwidth_initial , threshold = threshold )
npeaks = len ( ixpeaks )
if npeaks > 0 : # refine location of line peaks
fxpeaks , sxpeaks = refine_peaks_spectrum ( sp , ixpeaks , nwinwidth = nwinwidth_refined , method = "gaussian" )
ixpeaks_wv = fun_wv ( ixpeaks + 1 , crpix1 , crval1 , cdelt1 )
fxpeaks_wv = fun_wv ( fxpeaks + 1 , crpix1 , crval1 , cdelt1 )
# match peaks with expected arc lines
delta_wv_max = ntimes_match_wv * cdelt1
wv_verified_all_peaks = match_wv_arrays ( wv_master , fxpeaks_wv , delta_wv_max = delta_wv_max )
loop = True
while loop :
if npeaks > 0 :
lines_ok = np . where ( wv_verified_all_peaks > 0 )
nlines_ok = len ( lines_ok [ 0 ] )
# there are matched lines
if nlines_ok > 0 : # compute residuals
xresid = fxpeaks_wv [ lines_ok ]
yresid = wv_verified_all_peaks [ lines_ok ] - fxpeaks_wv [ lines_ok ]
# determine effective polynomial degree
if nlines_ok > poldeg_residuals :
poldeg_effective = poldeg_residuals
else :
poldeg_effective = nlines_ok - 1
# fit polynomial to residuals
polyres , yresres , reject = polfit_residuals_with_sigma_rejection ( x = xresid , y = yresid , deg = poldeg_effective , times_sigma_reject = times_sigma_reject , use_r = use_r , debugplot = 0 )
ysummary = summary ( yresres )
else :
polyres = np . polynomial . Polynomial ( [ 0.0 ] )
list_wv_found = [ str ( round ( wv , 4 ) ) for wv in wv_verified_all_peaks if wv != 0 ]
list_wv_master = [ str ( round ( wv , 4 ) ) for wv in wv_master ]
set1 = set ( list_wv_master )
set2 = set ( list_wv_found )
missing_wv = list ( set1 . symmetric_difference ( set2 ) )
missing_wv . sort ( )
if abs ( debugplot ) >= 10 :
print ( '-' * 79 )
print ( ">>> Number of arc lines in master file:" , len ( wv_master ) )
if abs ( debugplot ) in [ 21 , 22 ] :
print ( ">>> Unmatched lines...................:" , missing_wv )
elif abs ( debugplot ) >= 10 :
print ( ">>> Number of unmatched lines.........:" , len ( missing_wv ) )
if abs ( debugplot ) >= 10 :
print ( ">>> Number of line peaks found........:" , npeaks )
print ( ">>> Number of identified lines........:" , nlines_ok )
print ( ">>> Number of unmatched lines.........:" , len ( missing_wv ) )
print ( ">>> Polynomial degree in residuals fit:" , poldeg_effective )
print ( ">>> Polynomial fit to residuals.......:\n" , polyres )
# display results
if ( abs ( debugplot ) % 10 != 0 ) or ( pdf is not None ) :
from numina . array . display . matplotlib_qt import plt
if pdf is not None :
fig = plt . figure ( figsize = ( 11.69 , 8.27 ) , dpi = 100 )
else :
fig = plt . figure ( )
set_window_geometry ( geometry )
# residuals
ax2 = fig . add_subplot ( 2 , 1 , 1 )
if nlines_ok > 0 :
ymin = min ( yresid )
ymax = max ( yresid )
dy = ymax - ymin
if dy > 0 :
ymin -= dy / 20
ymax += dy / 20
else :
ymin -= 0.5
ymax += 0.5
else :
ymin = - 1.0
ymax = 1.0
ax2 . set_ylim ( ymin , ymax )
if nlines_ok > 0 :
ax2 . plot ( xresid , yresid , 'o' )
ax2 . plot ( xresid [ reject ] , yresid [ reject ] , 'o' , color = 'tab:gray' )
ax2 . set_ylabel ( 'Offset ' + r'($\AA$)' )
ax2 . yaxis . label . set_size ( 10 )
if title is not None :
ax2 . set_title ( title , ** { 'size' : 12 } )
xwv = fun_wv ( np . arange ( naxis1 ) + 1.0 , crpix1 , crval1 , cdelt1 )
ax2 . plot ( xwv , polyres ( xwv ) , '-' )
ax2 . text ( 1 , 0 , 'CDELT1 (' + r'$\AA$' + '/pixel)=' + str ( cdelt1 ) , horizontalalignment = 'right' , verticalalignment = 'bottom' , transform = ax2 . transAxes )
ax2 . text ( 0 , 0 , 'Wavelength ' + r'($\AA$) --->' , horizontalalignment = 'left' , verticalalignment = 'bottom' , transform = ax2 . transAxes )
ax2 . text ( 0 , 1 , 'median=' + str ( round ( ysummary [ 'median' ] , 4 ) ) + r' $\AA$' , horizontalalignment = 'left' , verticalalignment = 'top' , transform = ax2 . transAxes )
ax2 . text ( 0.5 , 1 , 'npoints (total / used / removed)' , horizontalalignment = 'center' , verticalalignment = 'top' , transform = ax2 . transAxes )
ax2 . text ( 0.5 , 0.92 , str ( ysummary [ 'npoints' ] ) + ' / ' + str ( ysummary [ 'npoints' ] - sum ( reject ) ) + ' / ' + str ( sum ( reject ) ) , horizontalalignment = 'center' , verticalalignment = 'top' , transform = ax2 . transAxes )
ax2 . text ( 1 , 1 , 'robust_std=' + str ( round ( ysummary [ 'robust_std' ] , 4 ) ) + r' $\AA$' , horizontalalignment = 'right' , verticalalignment = 'top' , transform = ax2 . transAxes )
# median spectrum and peaks
# remove leading and trailing zeros in spectrum when requested
if remove_null_borders :
nonzero = np . nonzero ( sp ) [ 0 ]
j1 = nonzero [ 0 ]
j2 = nonzero [ - 1 ]
xmin = xwv [ j1 ]
xmax = xwv [ j2 ]
else :
xmin = min ( xwv )
xmax = max ( xwv )
dx = xmax - xmin
if dx > 0 :
xmin -= dx / 80
xmax += dx / 80
else :
xmin -= 0.5
xmax += 0.5
if local_ylogscale :
spectrum = sp - sp . min ( ) + 1.0
spectrum = np . log10 ( spectrum )
ymin = spectrum [ ixpeaks ] . min ( )
else :
spectrum = sp . copy ( )
ymin = min ( spectrum )
ymax = max ( spectrum )
dy = ymax - ymin
if dy > 0 :
ymin -= dy / 20
ymax += dy / 20
else :
ymin -= 0.5
ymax += 0.5
ax1 = fig . add_subplot ( 2 , 1 , 2 , sharex = ax2 )
ax1 . set_xlim ( xmin , xmax )
ax1 . set_ylim ( ymin , ymax )
ax1 . plot ( xwv , spectrum )
if npeaks > 0 :
ax1 . plot ( ixpeaks_wv , spectrum [ ixpeaks ] , 'o' , fillstyle = 'none' , label = "initial location" )
ax1 . plot ( fxpeaks_wv , spectrum [ ixpeaks ] , 'o' , fillstyle = 'none' , label = "refined location" )
lok = wv_verified_all_peaks > 0
ax1 . plot ( fxpeaks_wv [ lok ] , spectrum [ ixpeaks ] [ lok ] , 'go' , label = "valid line" )
if local_ylogscale :
ax1 . set_ylabel ( '~ log10(number of counts)' )
else :
ax1 . set_ylabel ( 'number of counts' )
ax1 . yaxis . label . set_size ( 10 )
ax1 . xaxis . tick_top ( )
ax1 . xaxis . set_label_position ( 'top' )
for i in range ( len ( ixpeaks ) ) : # identified lines
if wv_verified_all_peaks [ i ] > 0 :
ax1 . text ( fxpeaks_wv [ i ] , spectrum [ ixpeaks [ i ] ] , str ( wv_verified_all_peaks [ i ] ) + '(' + str ( i + 1 ) + ')' , fontsize = 8 , horizontalalignment = 'center' )
else :
ax1 . text ( fxpeaks_wv [ i ] , spectrum [ ixpeaks [ i ] ] , '(' + str ( i + 1 ) + ')' , fontsize = 8 , horizontalalignment = 'center' )
# estimated wavelength from initial calibration
if npeaks > 0 :
estimated_wv = fun_wv ( fxpeaks [ i ] + 1 , crpix1 , crval1 , cdelt1 )
estimated_wv = str ( round ( estimated_wv , 4 ) )
ax1 . text ( fxpeaks_wv [ i ] , ymin , # spmedian [ ixpeaks [ i ] ] ,
estimated_wv , fontsize = 8 , color = 'grey' , rotation = 'vertical' , horizontalalignment = 'center' , verticalalignment = 'top' )
if len ( missing_wv ) > 0 :
tmp = [ float ( wv ) for wv in missing_wv ]
ax1 . vlines ( tmp , ymin = ymin , ymax = ymax , colors = 'grey' , linestyles = 'dotted' , label = 'missing lines' )
ax1 . legend ( )
if pdf is not None :
pdf . savefig ( )
else :
if debugplot in [ - 22 , - 12 , 12 , 22 ] :
pause_debugplot ( debugplot = debugplot , optional_prompt = 'Zoom/Unzoom or ' + 'press RETURN to continue...' , pltshow = True )
else :
pause_debugplot ( debugplot = debugplot , pltshow = True )
# display results and request next action
if interactive :
print ( 'Recalibration menu' )
print ( '------------------' )
print ( '[d] (d)elete all the identified lines' )
print ( '[r] (r)estart from begining' )
print ( '[a] (a)utomatic line inclusion' )
print ( '[l] toggle (l)ogarithmic scale on/off' )
print ( '[p] modify (p)olynomial degree' )
print ( '[o] (o)utput data with identified line peaks' )
print ( '[x] e(x)it without additional changes' )
print ( '[#] from 1 to ' + str ( len ( ixpeaks ) ) + ' --> modify line #' )
ioption = readi ( 'Option' , default = 'x' , minval = 1 , maxval = len ( ixpeaks ) , allowed_single_chars = 'adloprx' )
if ioption == 'd' :
wv_verified_all_peaks = np . zeros ( npeaks )
elif ioption == 'r' :
delta_wv_max = ntimes_match_wv * cdelt1
wv_verified_all_peaks = match_wv_arrays ( wv_master , fxpeaks_wv , delta_wv_max = delta_wv_max )
elif ioption == 'a' :
fxpeaks_wv_corrected = np . zeros_like ( fxpeaks_wv )
for i in range ( npeaks ) :
fxpeaks_wv_corrected [ i ] = fxpeaks_wv [ i ] + polyres ( fxpeaks_wv [ i ] )
delta_wv_max = ntimes_match_wv * cdelt1
wv_verified_all_peaks = match_wv_arrays ( wv_master , fxpeaks_wv_corrected , delta_wv_max = delta_wv_max )
elif ioption == 'l' :
if local_ylogscale :
local_ylogscale = False
else :
local_ylogscale = True
elif ioption == 'p' :
poldeg_residuals = readi ( 'New polynomial degree' , minval = 0 )
elif ioption == 'o' :
for i in range ( len ( ixpeaks ) ) : # identified lines
if wv_verified_all_peaks [ i ] > 0 :
print ( wv_verified_all_peaks [ i ] , spectrum [ ixpeaks [ i ] ] )
elif ioption == 'x' :
loop = False
else :
print ( wv_master )
expected_value = fxpeaks_wv [ ioption - 1 ] + polyres ( fxpeaks_wv [ ioption - 1 ] )
print ( ">>> Current expected wavelength: " , expected_value )
delta_wv_max = ntimes_match_wv * cdelt1
close_value = match_wv_arrays ( wv_master , np . array ( [ expected_value ] ) , delta_wv_max = delta_wv_max )
newvalue = readf ( 'New value (0 to delete line)' , default = close_value [ 0 ] )
wv_verified_all_peaks [ ioption - 1 ] = newvalue
else :
loop = False
else :
loop = False
# refined wavelength calibration coefficients
if coeff_ini is not None :
npoints_total = len ( xresid )
npoints_removed = sum ( reject )
npoints_used = npoints_total - npoints_removed
if abs ( debugplot ) >= 10 :
print ( '>>> Npoints (total / used / removed)..:' , npoints_total , npoints_used , npoints_removed )
if npoints_used < min_nlines_to_refine :
print ( 'Warning: number of lines insuficient to refine ' 'wavelength calibration!' )
copc = 'n'
else :
if interactive :
copc = readc ( 'Refine wavelength calibration coefficients: ' '(y)es, (n)o' , default = 'y' , valid = 'yn' )
else :
copc = 'y'
if copc == 'y' :
coeff_refined = update_poly_wlcalib ( coeff_ini = coeff_ini , coeff_residuals = polyres . coef , naxis1_ini = naxis1_ini , debugplot = 0 )
else :
coeff_refined = np . array ( coeff_ini )
else :
coeff_refined = None
if abs ( debugplot ) % 10 != 0 :
if coeff_refined is not None :
for idum , fdum in enumerate ( zip ( coeff_ini , coeff_refined ) ) :
print ( ">>> coef#" + str ( idum ) + ': ' , end = '' )
print ( "%+.8E --> %+.8E" % ( decimal . Decimal ( fdum [ 0 ] ) , decimal . Decimal ( fdum [ 1 ] ) ) )
return coeff_refined
|
def minimum_dtype ( x , dtype = np . bool_ ) :
"""returns the " most basic " dtype which represents ` x ` properly , which
provides at least the same value range as the specified dtype ."""
|
def check_type ( x , dtype ) :
try :
converted = dtype . type ( x )
except ( ValueError , OverflowError ) :
return False
# False if some overflow has happened
return converted == x or np . isnan ( x )
def type_loop ( x , dtype , dtype_dict , default = None ) :
while True :
try :
dtype = np . dtype ( dtype_dict [ dtype . name ] )
if check_type ( x , dtype ) :
return np . dtype ( dtype )
except KeyError :
if default is not None :
return np . dtype ( default )
raise ValueError ( "Can not determine dtype of %r" % x )
dtype = np . dtype ( dtype )
if check_type ( x , dtype ) :
return dtype
if np . issubdtype ( dtype , np . inexact ) :
return type_loop ( x , dtype , _next_float_dtype )
else :
return type_loop ( x , dtype , _next_int_dtype , default = np . float32 )
|
def init_app ( self , app ) :
"""Flask application initialization ."""
|
self . init_config ( app )
app . extensions [ 'inspire-crawler' ] = self
app . cli . add_command ( crawler_cmd )
|
def naturaldate ( value ) :
"""Like naturalday , but will append a year for dates that are a year
ago or more ."""
|
try :
value = date ( value . year , value . month , value . day )
except AttributeError : # Passed value wasn ' t date - ish
return value
except ( OverflowError , ValueError ) : # Date arguments out of range
return value
delta = abs_timedelta ( value - date . today ( ) )
if delta . days >= 365 :
return naturalday ( value , '%b %d %Y' )
return naturalday ( value )
|
def _read_channel ( channel , stream , start , duration ) :
"""Get channel using lalframe"""
|
channel_type = lalframe . FrStreamGetTimeSeriesType ( channel , stream )
read_func = _fr_type_map [ channel_type ] [ 0 ]
d_type = _fr_type_map [ channel_type ] [ 1 ]
data = read_func ( stream , channel , start , duration , 0 )
return TimeSeries ( data . data . data , delta_t = data . deltaT , epoch = start , dtype = d_type )
|
def delete ( self , url , params = None ) :
"""Executes an HTTP DELETE request for the given URL .
` ` params ` ` should be a dictionary"""
|
response = self . http . delete ( url , params = params , ** self . requests_params )
return self . process ( response )
|
def interpolate ( self , ** options ) :
"""Interpolate Irradiance to a specified evenly spaced resolution / grid
This is necessary to make integration and folding ( with a channel
relative spectral response ) straightforward .
dlambda = wavelength interval in microns
start = Start of the wavelength interval ( left / lower )
end = End of the wavelength interval ( right / upper end )
options :
dlambda : Delta wavelength used when interpolating / resampling
ival _ wavelength : Tuple . The start and end interval in wavelength
space , defining where to integrate / convolute the spectral response
curve on the spectral irradiance data ."""
|
from scipy . interpolate import InterpolatedUnivariateSpline
# The user defined wavelength span is not yet used :
# FIXME !
if 'ival_wavelength' in options :
ival_wavelength = options [ 'ival_wavelength' ]
else :
ival_wavelength = None
if 'dlambda' in options :
self . _dlambda = options [ 'dlambda' ]
if ival_wavelength is None :
if self . wavespace == 'wavelength' :
start = self . wavelength [ 0 ]
end = self . wavelength [ - 1 ]
else :
start = self . wavenumber [ 0 ]
end = self . wavenumber [ - 1 ]
else :
start , end = ival_wavelength
xspl = np . linspace ( start , end , round ( ( end - start ) / self . _dlambda ) + 1 )
if self . wavespace == 'wavelength' :
ius = InterpolatedUnivariateSpline ( self . wavelength , self . irradiance )
else :
ius = InterpolatedUnivariateSpline ( self . wavenumber , self . irradiance )
yspl = ius ( xspl )
self . ipol_wavelength = xspl
self . ipol_irradiance = yspl
|
def tweets_default ( * args ) :
"""Tweets for the default settings ."""
|
query_type = settings . TWITTER_DEFAULT_QUERY_TYPE
args = ( settings . TWITTER_DEFAULT_QUERY , settings . TWITTER_DEFAULT_NUM_TWEETS )
per_user = None
if query_type == QUERY_TYPE_LIST :
per_user = 1
return tweets_for ( query_type , args , per_user = per_user )
|
def serialize ( self , tag ) :
"""Return the literal representation of a tag ."""
|
handler = getattr ( self , f'serialize_{tag.serializer}' , None )
if handler is None :
raise TypeError ( f'Can\'t serialize {type(tag)!r} instance' )
return handler ( tag )
|
def client_receives_message ( self , * parameters ) :
"""Receive a message with template defined using ` New Message ` and
validate field values .
Message template has to be defined with ` New Message ` before calling
this .
Optional parameters :
- ` name ` the client name ( default is the latest used ) example : ` name = Client 1 `
- ` timeout ` for receiving message . example : ` timeout = 0.1 `
- ` latest ` if set to True , get latest message from buffer instead first . Default is False . Example : ` latest = True `
- message field values for validation separated with colon . example : ` some _ field : 0xaf05 `
Examples :
| $ { msg } = | Client receives message |
| $ { msg } = | Client receives message | name = Client1 | timeout = 5 |
| $ { msg } = | Client receives message | message _ field : ( 0 | 1 ) |"""
|
with self . _receive ( self . _clients , * parameters ) as ( msg , message_fields , header_fields ) :
self . _validate_message ( msg , message_fields , header_fields )
return msg
|
def enrich ( self , column ) :
"""This method calculates thanks to the genderize . io API the gender
of a given name .
This method initially assumes that for the given
string , only the first word is the one containing the name
eg : Daniel Izquierdo < dizquierdo @ bitergia . com > , Daniel would be the name .
If the same class instance is used in later gender searches , this stores
in memory a list of names and associated gender and probability . This is
intended to have faster identifications of the gender and less number of
API accesses .
: param column : column where the name is found
: type column : string
: return : original dataframe with four new columns :
* gender : male , female or unknown
* gender _ probability : value between 0 and 1
* gender _ count : number of names found in the Genderized DB
* gender _ analyzed _ name : name that was sent to the API for analysis
: rtype : pandas . DataFrame"""
|
if column not in self . data . columns :
return self . data
splits = self . data [ column ] . str . split ( " " )
splits = splits . str [ 0 ]
self . data [ "gender_analyzed_name" ] = splits . fillna ( "noname" )
self . data [ "gender_probability" ] = 0
self . data [ "gender" ] = "Unknown"
self . data [ "gender_count" ] = 0
names = list ( self . data [ "gender_analyzed_name" ] . unique ( ) )
for name in names :
if name in self . gender . keys ( ) :
gender_result = self . gender [ name ]
else :
try : # TODO : some errors found due to encode utf - 8 issues .
# Adding a try - except in the meantime .
gender_result = self . connection . get ( [ name ] ) [ 0 ]
except Exception :
continue
# Store info in the list of users
self . gender [ name ] = gender_result
# Update current dataset
if gender_result [ "gender" ] is None :
gender_result [ "gender" ] = "NotKnown"
self . data . loc [ self . data [ "gender_analyzed_name" ] == name , 'gender' ] = gender_result [ "gender" ]
if "probability" in gender_result . keys ( ) :
self . data . loc [ self . data [ "gender_analyzed_name" ] == name , 'gender_probability' ] = gender_result [ "probability" ]
self . data . loc [ self . data [ "gender_analyzed_name" ] == name , 'gender_count' ] = gender_result [ "count" ]
self . data . fillna ( "noname" )
return self . data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.