signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def formatted_param_type ( ptype ) :
"""Return the short name for a type . Special treatment for by - name and var args""" | pt_name = ptype [ "name" ]
if pt_name . startswith ( "<byname>" ) :
pt_name = pt_name . replace ( "<byname>[" , "=> " ) [ : - 1 ]
elif pt_name . startswith ( "<repeated>" ) :
pt_name = pt_name . replace ( "<repeated>[" , "" ) [ : - 1 ] + "*"
return pt_name |
def do_media ( self , args ) :
"""Media management command demonstrates multiple layers of sub - commands being handled by AutoCompleter""" | func = getattr ( args , 'func' , None )
if func is not None : # Call whatever subcommand function was selected
func ( self , args )
else : # No subcommand was provided , so call help
self . do_help ( 'media' ) |
def __handle_changed_state ( self , state ) :
"""we need to pack a struct with the following five numbers :
tv _ sec , tv _ usec , ev _ type , code , value
then write it using _ _ write _ to _ character _ device
seconds , mircroseconds , ev _ type , code , value
time we just use now
ev _ type we look up
code we look up
value is 0 or 1 for the buttons
axis value is maybe the same as Linux ? Hope so !""" | timeval = self . __get_timeval ( )
events = self . __get_button_events ( state , timeval )
events . extend ( self . __get_axis_events ( state , timeval ) )
if events :
self . __write_to_character_device ( events , timeval ) |
def bind ( cls , app , * paths , methods = None , name = None , view = None ) :
"""Connect to admin interface and application .""" | # Register self in admin
if view is None :
app . ps . admin . register ( cls )
if not paths :
paths = ( '%s/%s' % ( app . ps . admin . cfg . prefix , name or cls . name ) , )
cls . url = paths [ 0 ]
return super ( AdminHandler , cls ) . bind ( app , * paths , methods = methods , name = name , view = view ) |
def trial ( path = TESTS_PATH , coverage = False ) :
"""Run tests using trial""" | args = [ 'trial' ]
if coverage :
args . append ( '--coverage' )
args . append ( path )
print args
local ( ' ' . join ( args ) ) |
def to_source ( node , indent_with = ' ' * 4 ) :
"""This function can convert a node tree back into python sourcecode . This
is useful for debugging purposes , especially if you ' re dealing with custom
asts not generated by python itself .
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable . The reason for this is that the AST contains some
more data than regular sourcecode does , which is dropped during
conversion .
Each level of indentation is replaced with ` indent _ with ` . Per default this
parameter is equal to four spaces as suggested by PEP 8 , but it might be
adjusted to match the application ' s styleguide .""" | generator = SourceGenerator ( indent_with )
generator . visit ( node )
return '' . join ( generator . result ) |
def _set_objective_bank_view ( self , session ) :
"""Sets the underlying objective _ bank view to match current view""" | if self . _objective_bank_view == FEDERATED :
try :
session . use_federated_objective_bank_view ( )
except AttributeError :
pass
else :
try :
session . use_isolated_objective_bank_view ( )
except AttributeError :
pass |
def _align_series ( self , indexer , ser , multiindex_indexer = False ) :
"""Parameters
indexer : tuple , slice , scalar
The indexer used to get the locations that will be set to
` ser `
ser : pd . Series
The values to assign to the locations specified by ` indexer `
multiindex _ indexer : boolean , optional
Defaults to False . Should be set to True if ` indexer ` was from
a ` pd . MultiIndex ` , to avoid unnecessary broadcasting .
Returns :
` np . array ` of ` ser ` broadcast to the appropriate shape for assignment
to the locations selected by ` indexer `""" | if isinstance ( indexer , ( slice , np . ndarray , list , Index ) ) :
indexer = tuple ( [ indexer ] )
if isinstance ( indexer , tuple ) : # flatten np . ndarray indexers
def ravel ( i ) :
return i . ravel ( ) if isinstance ( i , np . ndarray ) else i
indexer = tuple ( map ( ravel , indexer ) )
aligners = [ not com . is_null_slice ( idx ) for idx in indexer ]
sum_aligners = sum ( aligners )
single_aligner = sum_aligners == 1
is_frame = self . obj . ndim == 2
is_panel = self . obj . ndim >= 3
obj = self . obj
# are we a single alignable value on a non - primary
# dim ( e . g . panel : 1,2 , or frame : 0 ) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame :
single_aligner = single_aligner and aligners [ 0 ]
# panel
elif is_panel :
single_aligner = ( single_aligner and ( aligners [ 1 ] or aligners [ 2 ] ) )
# we have a frame , with multiple indexers on both axes ; and a
# series , so need to broadcast ( see GH5206)
if ( sum_aligners == self . ndim and all ( is_sequence ( _ ) for _ in indexer ) ) :
ser = ser . reindex ( obj . axes [ 0 ] [ indexer [ 0 ] ] , copy = True ) . _values
# single indexer
if len ( indexer ) > 1 and not multiindex_indexer :
len_indexer = len ( indexer [ 1 ] )
ser = np . tile ( ser , len_indexer ) . reshape ( len_indexer , - 1 ) . T
return ser
for i , idx in enumerate ( indexer ) :
ax = obj . axes [ i ]
# multiple aligners ( or null slices )
if is_sequence ( idx ) or isinstance ( idx , slice ) :
if single_aligner and com . is_null_slice ( idx ) :
continue
new_ix = ax [ idx ]
if not is_list_like_indexer ( new_ix ) :
new_ix = Index ( [ new_ix ] )
else :
new_ix = Index ( new_ix )
if ser . index . equals ( new_ix ) or not len ( new_ix ) :
return ser . _values . copy ( )
return ser . reindex ( new_ix ) . _values
# 2 dims
elif single_aligner and is_frame : # reindex along index
ax = self . obj . axes [ 1 ]
if ser . index . equals ( ax ) or not len ( ax ) :
return ser . _values . copy ( )
return ser . reindex ( ax ) . _values
# > 2 dims
elif single_aligner :
broadcast = [ ]
for n , labels in enumerate ( self . obj . _get_plane_axes ( i ) ) : # reindex along the matching dimensions
if len ( labels & ser . index ) :
ser = ser . reindex ( labels )
else :
broadcast . append ( ( n , len ( labels ) ) )
# broadcast along other dims
ser = ser . _values . copy ( )
for ( axis , l ) in broadcast :
shape = [ - 1 ] * ( len ( broadcast ) + 1 )
shape [ axis ] = l
ser = np . tile ( ser , l ) . reshape ( shape )
if self . obj . ndim == 3 :
ser = ser . T
return ser
elif is_scalar ( indexer ) :
ax = self . obj . _get_axis ( 1 )
if ser . index . equals ( ax ) :
return ser . _values . copy ( )
return ser . reindex ( ax ) . _values
raise ValueError ( 'Incompatible indexer with Series' ) |
def master_using_raster ( mdf , raster , endpoint = False ) :
"""get single master based on the raster
Parameters
mdf : asammdf . MDF
measurement object
raster : float
new raster
endpoint = False : bool
include maximum time stamp in the new master
Returns
master : np . array
new master""" | if not raster :
master = np . array ( [ ] , dtype = '<f8' )
else :
t_min = [ ]
t_max = [ ]
for i , group in enumerate ( mdf . groups ) :
cycles_nr = group . channel_group . cycles_nr
if cycles_nr :
master_min = mdf . get_master ( i , record_offset = 0 , record_count = 1 , )
if len ( master_min ) :
t_min . append ( master_min [ 0 ] )
mdf . _master_channel_cache . clear ( )
master_max = mdf . get_master ( i , record_offset = cycles_nr - 1 , record_count = 1 , )
if len ( master_max ) :
t_max . append ( master_max [ 0 ] )
mdf . _master_channel_cache . clear ( )
if t_min :
t_min = np . amin ( t_min )
t_max = np . amax ( t_max )
num = float ( np . float32 ( ( t_max - t_min ) / raster ) )
if int ( num ) == num :
master = np . linspace ( t_min , t_max , int ( num ) + 1 )
else :
master = np . arange ( t_min , t_max , raster )
if endpoint :
master = np . concatenate ( [ master , [ t_max ] ] )
else :
master = np . array ( [ ] , dtype = '<f8' )
return master |
def unique_justseen ( iterable , key = None ) :
"""Yields elements in order , ignoring serial duplicates
> > > list ( unique _ justseen ( ' AAAABBBCCDAABBB ' ) )
[ ' A ' , ' B ' , ' C ' , ' D ' , ' A ' , ' B ' ]
> > > list ( unique _ justseen ( ' ABBCcAD ' , str . lower ) )
[ ' A ' , ' B ' , ' C ' , ' A ' , ' D ' ]""" | return map ( next , map ( operator . itemgetter ( 1 ) , groupby ( iterable , key ) ) ) |
def timestep_text ( self ) :
"""Return a text string representing the timestep of the collection .""" | if self . header . analysis_period . timestep == 1 :
return 'Hourly'
else :
return '{} Minute' . format ( int ( 60 / self . header . analysis_period . timestep ) ) |
def itersplit_to_fields ( self , _str ) :
"""Split ( or parse ) repository log output into fields
Returns :
tuple : self . _ tuple ( * values )""" | if self . preparse :
_str = self . preparse ( _str )
_fields = itersplit ( _str , self . fsep )
try :
values = ( t [ 1 ] for t in izip_longest ( self . _tuple . _fields , _fields ) )
return self . _tuple ( * values )
except Exception as e :
log . error ( self . _tuple )
log . error ( _fields )
log . exception ( e )
raise |
async def set ( self , * args , ** kwargs ) :
'''Sets the value of the event .''' | return await _maybe_await ( self . event . set ( * args , ** kwargs ) ) |
def _format_bf ( bf , precision = 3 , trim = '0' ) :
"""Format BF10 to floating point or scientific notation .""" | if bf >= 1e4 or bf <= 1e-4 :
out = np . format_float_scientific ( bf , precision = precision , trim = trim )
else :
out = np . format_float_positional ( bf , precision = precision , trim = trim )
return out |
def p_instance_bodylist_noname ( self , p ) :
'instance _ bodylist _ noname : instance _ bodylist _ noname COMMA instance _ body _ noname' | p [ 0 ] = p [ 1 ] + ( p [ 3 ] , )
p . set_lineno ( 0 , p . lineno ( 1 ) ) |
def immediate ( self , name , value ) :
"""Load something immediately""" | setattr ( self , name , value )
self . _all . add ( name ) |
def lock_version ( self , service_id , version_number ) :
"""Locks the specified version .""" | content = self . _fetch ( "/service/%s/version/%d/lock" % ( service_id , version_number ) )
return self . _status ( content ) |
def mkdir ( name , mode = 0o750 ) :
"""Implementation of ` ` mkdir - p ` ` : Creates folder with the given ` name ` and
the given permissions ( ` mode ` )
* Create missing parents folder
* Do nothing if the folder with the given ` name ` already exists
* Raise ` OSError ` if there is already a file with the given ` name `""" | if os . path . isdir ( name ) :
pass
elif os . path . isfile ( name ) :
raise OSError ( "A file with the same name as the desired " "dir, '%s', already exists." % name )
else :
os . makedirs ( name , mode ) |
def convert_halo_to_array_form ( halo , ndim ) :
"""Converts the : samp : ` { halo } ` argument to a : samp : ` ( ndim , 2 ) `
shaped array .
: type halo : : samp : ` None ` , : obj : ` int ` , an : samp : ` { ndim } ` length sequence
of : samp : ` int ` or : samp : ` ( { ndim } , 2 ) ` shaped array
of : samp : ` int `
: param halo : Halo to be converted to : samp : ` ( { ndim } , 2 ) ` shaped array form .
: type ndim : : obj : ` int `
: param ndim : Number of dimensions .
: rtype : : obj : ` numpy . ndarray `
: return : A : samp : ` ( { ndim } , 2 ) ` shaped array of : obj : ` numpy . int64 ` elements .
Examples : :
> > > convert _ halo _ to _ array _ form ( halo = 2 , ndim = 4)
array ( [ [ 2 , 2 ] ,
[2 , 2 ] ,
[2 , 2 ] ,
[2 , 2 ] ] )
> > > convert _ halo _ to _ array _ form ( halo = [ 0 , 1 , 2 ] , ndim = 3)
array ( [ [ 0 , 0 ] ,
[1 , 1 ] ,
[2 , 2 ] ] )
> > > convert _ halo _ to _ array _ form ( halo = [ [ 0 , 1 ] , [ 2 , 3 ] , [ 3 , 4 ] ] , ndim = 3)
array ( [ [ 0 , 1 ] ,
[2 , 3 ] ,
[3 , 4 ] ] )""" | dtyp = _np . int64
if halo is None :
halo = _np . zeros ( ( ndim , 2 ) , dtype = dtyp )
elif is_scalar ( halo ) :
halo = _np . zeros ( ( ndim , 2 ) , dtype = dtyp ) + halo
elif ( ndim == 1 ) and ( _np . array ( halo ) . shape == ( 2 , ) ) :
halo = _np . array ( [ halo , ] , copy = True , dtype = dtyp )
elif len ( _np . array ( halo ) . shape ) == 1 :
halo = _np . array ( [ halo , halo ] , dtype = dtyp ) . T . copy ( )
else :
halo = _np . array ( halo , copy = True , dtype = dtyp )
if halo . shape [ 0 ] != ndim :
raise ValueError ( "Got halo.shape=%s, expecting halo.shape=(%s, 2)" % ( halo . shape , ndim ) )
return halo |
def ParseSmsRow ( self , parser_mediator , query , row , ** unused_kwargs ) :
"""Parses an SMS row .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
query ( str ) : query that created the row .
row ( sqlite3 . Row ) : row .""" | query_hash = hash ( query )
sms_read = self . _GetRowValue ( query_hash , row , 'read' )
sms_type = self . _GetRowValue ( query_hash , row , 'type' )
event_data = AndroidSMSEventData ( )
event_data . address = self . _GetRowValue ( query_hash , row , 'address' )
event_data . body = self . _GetRowValue ( query_hash , row , 'body' )
event_data . offset = self . _GetRowValue ( query_hash , row , 'id' )
event_data . query = query
event_data . sms_read = self . SMS_READ . get ( sms_read , 'UNKNOWN' )
event_data . sms_type = self . SMS_TYPE . get ( sms_type , 'UNKNOWN' )
timestamp = self . _GetRowValue ( query_hash , row , 'date' )
date_time = dfdatetime_java_time . JavaTime ( timestamp = timestamp )
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_CREATION )
parser_mediator . ProduceEventWithEventData ( event , event_data ) |
def throughput ( self , points ) :
"""A setter for the throughput
Parameters
throughput : sequence
The array of throughput points""" | # Test shape
if not points . shape == self . wave . shape :
raise ValueError ( "Throughput and wavelength must be same shape." )
self . _throughput = points |
def account_balance_before ( self ) :
"""Get the balance of the account associated with this leg before the transaction""" | # TODO : Consider moving to annotation , particularly once we can count on Django 1.11 ' s subquery support
transaction_date = self . transaction . date
return self . account . balance ( leg_query = ( models . Q ( transaction__date__lt = transaction_date ) | ( models . Q ( transaction__date = transaction_date ) & models . Q ( transaction_id__lt = self . transaction_id ) ) ) ) |
def units_from_metadata ( obj , guess = True ) :
"""Try to extract hints from metadata and if that fails
guess based on the object scale .
Parameters
obj : object
Has attributes ' metadata ' ( dict ) and ' scale ' ( float )
guess : bool
If metadata doesn ' t indicate units , guess from scale
Returns
units : str
A guess of what the units might be""" | # try to guess from metadata
for key in [ 'file_name' , 'name' ] :
if key not in obj . metadata :
continue
# get the string which might contain unit hints
hints = obj . metadata [ key ] . lower ( )
if 'unit' in hints : # replace all delimiter options with white space
for delim in '_-.' :
hints = hints . replace ( delim , ' ' )
# loop through each hint
for hint in hints . strip ( ) . split ( ) : # key word is " unit " or " units "
if 'unit' not in hint :
continue
# get rid of keyword and whitespace
hint = hint . replace ( 'units' , '' ) . replace ( 'unit' , '' ) . strip ( )
# if the hint is a valid unit return it
if hint in TO_INCH :
return hint
if not guess :
raise ValueError ( 'no units and not allowed to guess' )
# we made it to the wild ass guess section
# if the scale is larger than 100 mystery units
# declare the model to be millimeters , otherwise inches
log . warning ( 'no units: guessing from scale' )
if float ( obj . scale ) > 100.0 :
return 'millimeters'
else :
return 'inches' |
def reverse ( self ) :
"""Reverse the audio data .
: raises : : class : ` ~ aeneas . audiofile . AudioFileNotInitializedError ` : if the audio file is not initialized yet
. . versionadded : : 1.2.0""" | if self . __samples is None :
if self . file_path is None :
self . log_exc ( u"AudioFile object not initialized" , None , True , AudioFileNotInitializedError )
else :
self . read_samples_from_file ( )
self . log ( u"Reversing..." )
self . __samples [ 0 : self . __samples_length ] = numpy . flipud ( self . __samples [ 0 : self . __samples_length ] )
self . log ( u"Reversing... done" ) |
def get_cache ( self , decorated_function , * args , ** kwargs ) :
""": meth : ` WCacheStorage . get _ cache ` method implementation""" | self . __check ( decorated_function , * args , ** kwargs )
if decorated_function in self . _storage :
for i in self . _storage [ decorated_function ] :
if i [ 'instance' ] ( ) == args [ 0 ] :
result = i [ 'result' ] . cache_entry ( * args , ** kwargs )
if self . __statistic is True :
if result . has_value is True :
self . __cache_hit += 1
else :
self . __cache_missed += 1
return result
if self . __statistic is True :
self . __cache_missed += 1
return WCacheStorage . CacheEntry ( ) |
def _app_config_select ( self , request , obj ) :
"""Return the select value for apphook configs
: param request : request object
: param obj : current object
: return : False if no preselected value is available ( more than one or no apphook
config is present ) , apphook config instance if exactly one apphook
config is defined or apphook config defined in the request or in the current
object , False otherwise""" | if not obj and not request . GET . get ( self . app_config_attribute , False ) :
config_model = get_apphook_model ( self . model , self . app_config_attribute )
if config_model . objects . count ( ) == 1 :
return config_model . objects . first ( )
return None
elif obj and getattr ( obj , self . app_config_attribute , False ) :
return getattr ( obj , self . app_config_attribute )
elif request . GET . get ( self . app_config_attribute , False ) :
config_model = get_apphook_model ( self . model , self . app_config_attribute )
return config_model . objects . get ( pk = int ( request . GET . get ( self . app_config_attribute , False ) ) )
return False |
def run ( self ) :
"""run CMake""" | command = [ self . cmake ]
if self . generator :
command . extend ( [ '-G' , self . generator ] )
if self . path :
command . append ( self . path )
if self . definitions is not None :
for item in self . definitions . items ( ) :
command . append ( '-D%s=%s' % item )
if self . options is not None :
command . extend ( self . options )
cmd = yield self . makeRemoteShellCommand ( command = command )
yield self . runCommand ( cmd )
return cmd . results ( ) |
def step_indices ( group_idx ) :
"""Return the edges of areas within group _ idx , which are filled with the same value .""" | ilen = step_count ( group_idx ) + 1
indices = np . empty ( ilen , np . int64 )
indices [ 0 ] = 0
indices [ - 1 ] = group_idx . size
cmp_pos = 0
ri = 1
for i in range ( len ( group_idx ) ) :
if group_idx [ cmp_pos ] != group_idx [ i ] :
cmp_pos = i
indices [ ri ] = i
ri += 1
return indices |
def _make_datetime ( value ) :
"""Helper function for ` make _ datetime ( ) ` .
Tries to convert the given value to a
: class : ` datetime . datetime ` . But , unlike make _ datetime ( ) , if no
timezone is given , makes a naive ` datetime . datetime ` .
Strings will be parsed as ISO 8601 timestamps .
If a number is provided , it will be interpreted as a UNIX
timestamp , which by definition is UTC .
If a ` dict ` is provided , does ` datetime . datetime ( * * value ) ` .
If a ` tuple ` or a ` list ` is provided , does
` datetime . datetime ( * value ) ` . Uses the timezone in the tuple or
list if provided .
: param value : something to convert
: type value : str | unicode | float | int | : class : ` datetime . datetime ` | dict | list | tuple
: return : the value after conversion
: rtype : : class : ` datetime . datetime `
: raises : ValueError | TypeError""" | if isinstance ( value , basestring ) :
try :
return aniso8601 . parse_datetime ( value )
except Exception as e :
raise ValueError ( "Conversion to datetime.datetime failed. Could not " "parse the given string as an ISO 8601 timestamp: " "%s\n\n" "%s" % ( repr ( value ) , e . message , ) )
try :
if isinstance ( value , datetime . datetime ) :
return value
elif isinstance ( value , dict ) :
tzinfo = value . pop ( 'tzinfo' , None )
if tzinfo :
return tzinfo . localize ( datetime . datetime ( ** value ) )
else :
return datetime . datetime ( ** value )
# struct _ time does not preserve millisecond accuracy per
# TinCan spec , so this is disabled to discourage its use .
# elif isinstance ( value , struct _ time ) :
# posix = mktime ( value )
# return datetime . datetime . utcfromtimestamp ( posix ) . replace ( tzinfo = utc )
elif isinstance ( value , ( tuple , list ) ) :
return tuple_to_datetime ( value )
else :
return datetime . datetime . utcfromtimestamp ( value ) . replace ( tzinfo = utc )
except Exception as e :
msg = ( "Could not convert the given value of type '%s' to a " "datetime.datetime: %s\n\n" "%s" % ( value . __class__ . __name__ , repr ( value ) , e . message , ) )
raise TypeError ( msg ) if isinstance ( e , TypeError ) else ValueError ( msg ) |
def to_glyphs_family_user_data_from_designspace ( self ) :
"""Set the GSFont userData from the designspace family - wide lib data .""" | target_user_data = self . font . userData
for key , value in self . designspace . lib . items ( ) :
if key == UFO2FT_FEATURE_WRITERS_KEY and value == DEFAULT_FEATURE_WRITERS : # if the designspace contains featureWriters settings that are the
# same as glyphsLib default settings , there ' s no need to store them
continue
if _user_data_has_no_special_meaning ( key ) :
target_user_data [ key ] = value |
def remove_namespace ( self , ns_uri ) :
"""Removes the indicated namespace from this set .""" | if not self . contains_namespace ( ns_uri ) :
return
ni = self . __ns_uri_map . pop ( ns_uri )
for prefix in ni . prefixes :
del self . __prefix_map [ prefix ] |
def _maybeCreateTable ( self , tableClass , key ) :
"""A type ID has been requested for an Item subclass whose table was not
present when this Store was opened . Attempt to create the table , and
if that fails because another Store object ( perhaps in another process )
has created the table , re - read the schema . When that ' s done , return
the typeID .
This method is internal to the implementation of getTypeID . It must be
run in a transaction .
@ param tableClass : an Item subclass
@ param key : a 2 - tuple of the tableClass ' s typeName and schemaVersion
@ return : a typeID for the table ; a new one if no table exists , or the
existing one if the table was created by another Store object
referencing this database .""" | try :
self . _justCreateTable ( tableClass )
except errors . TableAlreadyExists : # Although we don ' t have a memory of this table from the last time
# we called " _ startup ( ) " , another process has updated the schema
# since then .
self . _startup ( )
return self . typenameAndVersionToID [ key ]
typeID = self . executeSchemaSQL ( _schema . CREATE_TYPE , [ tableClass . typeName , tableClass . __module__ , tableClass . schemaVersion ] )
self . typenameAndVersionToID [ key ] = typeID
if self . tablesCreatedThisTransaction is not None :
self . tablesCreatedThisTransaction . append ( tableClass )
# If the new type is a legacy type ( not the current version ) , we need
# to queue it for upgrade to ensure that if we are in the middle of an
# upgrade , legacy items of this version get upgraded .
cls = _typeNameToMostRecentClass . get ( tableClass . typeName )
if cls is not None and tableClass . schemaVersion != cls . schemaVersion :
self . _upgradeManager . queueTypeUpgrade ( tableClass )
# We can pass ( ) for extantIndexes here because since the table didn ' t
# exist for tableClass , none of its indexes could have either .
# Whatever checks _ createIndexesFor will make would give the same
# result against the actual set of existing indexes as they will
# against ( ) .
self . _createIndexesFor ( tableClass , ( ) )
for n , ( name , storedAttribute ) in enumerate ( tableClass . getSchema ( ) ) :
self . executeSchemaSQL ( _schema . ADD_SCHEMA_ATTRIBUTE , [ typeID , n , storedAttribute . indexed , storedAttribute . sqltype , storedAttribute . allowNone , storedAttribute . attrname , storedAttribute . doc , storedAttribute . __class__ . __name__ ] )
# XXX probably need something better for pythontype eventually ,
# when we figure out a good way to do user - defined attributes or we
# start parameterizing references .
return typeID |
def fast_steady_state ( Ep , epsilonp , detuning_knob , gamma , omega_level , rm , xi , theta , file_name = None , return_code = False ) :
r"""Return a fast function that returns a steady state .
We test a basic two - level system .
> > > import numpy as np
> > > from scipy . constants import physical _ constants
> > > from sympy import Matrix , symbols
> > > from fast . electric _ field import electric _ field _ amplitude _ top
> > > from fast . symbolic import ( define _ laser _ variables ,
. . . polarization _ vector )
> > > Ne = 2
> > > Nl = 1
> > > a0 = physical _ constants [ " Bohr radius " ] [ 0]
> > > rm = [ np . array ( [ [ 0 , 0 ] , [ a0 , 0 ] ] ) ,
. . . np . array ( [ [ 0 , 0 ] , [ 0 , 0 ] ] ) ,
. . . np . array ( [ [ 0 , 0 ] , [ 0 , 0 ] ] ) ]
> > > xi = np . array ( [ [ [ 0 , 1 ] , [ 1 , 0 ] ] ] )
> > > omega _ level = [ 0 , 1.0e9]
> > > gamma21 = 2 * np . pi * 6e6
> > > gamma = np . array ( [ [ 0 , - gamma21 ] , [ gamma21 , 0 ] ] )
> > > theta = phase _ transformation ( Ne , Nl , rm , xi )
We define symbolic variables to be used as token arguments .
> > > Ep , omega _ laser = define _ laser _ variables ( Nl )
> > > epsilonps = [ polarization _ vector ( 0 , 0 , 0 , 0 , 1 ) ]
> > > detuning _ knob = [ symbols ( " delta1 " , real = True ) ]
An map to unfold the density matrix .
> > > unfolding = Unfolding ( Ne , True , True , True )
We obtain a function to calculate Hamiltonian terms .
> > > aux = ( Ep , epsilonps , detuning _ knob , gamma ,
. . . omega _ level , rm , xi , theta )
> > > steady _ state = fast _ steady _ state ( * aux )
We specify values for the variables
> > > detuning _ knobs = [ 100e6]
> > > Eps = electric _ field _ amplitude _ top ( 1e - 3 , 1e - 3 , 1 , " SI " )
> > > Eps * = np . exp ( 1j * np . pi )
> > > Eps = [ Eps ]
> > > print ( steady _ state ( Eps , detuning _ knobs ) )
[ 0.018 0.1296 - 0.0244]""" | # We unpack variables .
if True :
Ne = len ( omega_level )
Nl = xi . shape [ 0 ]
unfolding = Unfolding ( Ne , True , True , True )
# We determine which arguments are constants .
if True :
try :
Ep = np . array ( [ complex ( Ep [ l ] ) for l in range ( Nl ) ] )
variable_Ep = False
except :
variable_Ep = True
try :
epsilonp = [ np . array ( [ complex ( epsilonp [ l ] [ i ] ) for i in range ( 3 ) ] ) for l in range ( Nl ) ]
variable_epsilonp = False
except :
variable_epsilonp = True
try :
detuning_knob = np . array ( [ float ( detuning_knob [ l ] ) for l in range ( Nl ) ] )
variable_detuning_knob = False
except :
variable_detuning_knob = True
# We obtain code for the three parts .
if True :
args = ( Ep , epsilonp , detuning_knob , gamma , omega_level , rm , xi , theta , unfolding , True , None , True )
bloch_equations = fast_bloch_equations ( * args )
code = bloch_equations + "\n\n"
if ( ( not variable_Ep ) and ( not variable_epsilonp ) and ( not variable_detuning_knob ) ) : # We can call bloch _ equations here !
code += "bloch_equations = bloch_equations()\n"
# We establish the arguments of the output function .
if True :
code += "def steady_state("
if variable_Ep :
code += "Ep, "
if variable_epsilonp :
code += "epsilonp, "
if variable_detuning_knob :
code += "detuning_knob, "
code += "bloch_equations=bloch_equations):\n"
code += ' r"""A fast calculation of the steady state."""\n'
# We call the Bloch equations .
if True :
code += r""" A, b = bloch_equations"""
if ( ( not variable_Ep ) and ( not variable_epsilonp ) and ( not variable_detuning_knob ) ) :
code += "\n"
else :
code += "("
if variable_Ep :
code += "Ep, "
if variable_epsilonp :
code += "epsilonp, "
if variable_detuning_knob :
code += "detuning_knob, "
if code [ - 2 : ] == ", " :
code = code [ : - 2 ]
code += ")\n"
code += """ rhox = np.linalg.solve(A, b)\n"""
code += """ return rhox\n"""
# We write the code to file if provided , and execute it .
if True :
if file_name is not None :
f = file ( file_name + ".py" , "w" )
f . write ( code )
f . close ( )
steady_state = code
if not return_code : exec
steady_state
return steady_state |
def create_gist ( self , public , files , description = github . GithubObject . NotSet ) :
""": calls : ` POST / gists < http : / / developer . github . com / v3 / gists > ` _
: param public : bool
: param files : dict of string to : class : ` github . InputFileContent . InputFileContent `
: param description : string
: rtype : : class : ` github . Gist . Gist `""" | assert isinstance ( public , bool ) , public
assert all ( isinstance ( element , github . InputFileContent ) for element in files . itervalues ( ) ) , files
assert description is github . GithubObject . NotSet or isinstance ( description , ( str , unicode ) ) , description
post_parameters = { "public" : public , "files" : { key : value . _identity for key , value in files . iteritems ( ) } , }
if description is not github . GithubObject . NotSet :
post_parameters [ "description" ] = description
headers , data = self . _requester . requestJsonAndCheck ( "POST" , "/gists" , input = post_parameters )
return github . Gist . Gist ( self . _requester , headers , data , completed = True ) |
def _is_iterable ( val ) :
"""Ensure that a value is iterable and not some sort of string""" | try :
iter ( val )
except ( ValueError , TypeError ) :
return False
else :
return not isinstance ( val , basestring ) |
def sigusr1_handler ( self , unused_signum , unused_frame ) :
"""Handle SIGUSR1 signal . Call function which is defined in the
* * settings . SIGUSR1 _ HANDLER * * . If main process , forward the
signal to all child processes .""" | for process in self . processes :
if process . pid and os . getpid ( ) == self . main_pid :
try :
os . kill ( process . pid , signal . SIGUSR1 )
except ProcessLookupError :
pass
if self . _sigusr1_handler_func is not None :
self . _sigusr1_handler_func ( self . context ) |
def exception ( self ) :
"""Try retrieving the last subprocess exception .
If set , the exception is returned . Otherwise None is returned .""" | if self . _exception is not None :
return self . _exception
try :
exc , tblines = self . exc_queue . get_nowait ( )
except Empty :
self . _exception , self . tb_lines = None , None
else : # Raise any exception that the subprocess encountered and sent .
self . _exception , self . tb_lines = exc , tblines
return self . _exception |
def format_tasks ( tasks ) :
"""Converts a list of tasks to a list of string representations .
Args :
tasks : A list of the tasks to convert .
Returns :
A list of string formatted tasks .""" | return [ '%d : %s (%s)' % ( task . key . id ( ) , task . description , ( 'done' if task . done else 'created %s' % task . created ) ) for task in tasks ] |
def merge ( self , data ) :
'''Merge an array of output dictionaries into a single dictionary
with properly scoped names .
Parameters
data : list of dict
Output dicts as produced by ` pumpp . task . BaseTaskTransformer . transform `
or ` pumpp . feature . FeatureExtractor . transform ` .
Returns
data _ out : dict
All elements of the input dicts are stacked along the 0 axis ,
and keys are re - mapped by ` scope ` .''' | data_out = dict ( )
# Iterate over all keys in data
for key in set ( ) . union ( * data ) :
data_out [ self . scope ( key ) ] = np . stack ( [ np . asarray ( d [ key ] ) for d in data ] , axis = 0 )
return data_out |
def get_mapping_from_db3_file ( db_path ) :
'''Does the work of reading the Rosetta SQLite3 . db3 file to retrieve the mapping''' | import sqlite3
# should be moved to the top but we do this here for CentOS 5 support
conn = sqlite3 . connect ( db_path )
results = conn . cursor ( ) . execute ( '''
SELECT chain_id, pdb_residue_number, insertion_code, residues.struct_id, residues.resNum, residues.name3, residues.res_type
FROM residue_pdb_identification
INNER JOIN residues ON residue_pdb_identification.struct_id=residues.struct_id AND residue_pdb_identification.residue_number=residues.resNum
''' )
# Create the mapping from PDB residues to Rosetta residues
rosetta_residue_ids = [ ]
mapping = { }
for r in results :
mapping [ "%s%s%s" % ( r [ 0 ] , str ( r [ 1 ] ) . rjust ( 4 ) , r [ 2 ] ) ] = { 'pose_residue_id' : r [ 4 ] , 'name3' : r [ 5 ] , 'res_type' : r [ 6 ] }
rosetta_residue_ids . append ( r [ 4 ] )
# Ensure that the the range of the map is exactly the set of Rosetta residues i . e . the map from ( a subset of ) the PDB residues to the Rosetta residues is surjective
raw_residue_list = [ r for r in conn . cursor ( ) . execute ( '''SELECT resNum, name3 FROM residues ORDER BY resNum''' ) ]
assert ( sorted ( [ r [ 0 ] for r in raw_residue_list ] ) == sorted ( rosetta_residue_ids ) )
return mapping |
def load_GODag ( ) :
"""OBO file retrieved from http : / / obo . cvs . sourceforge . net / viewvc / obo / obo / ontology / genomic - proteomic / so . obo""" | from jcvi . apps . base import download
so_file_url = "http://obo.cvs.sourceforge.net/viewvc/obo/obo/ontology/genomic-proteomic/so.obo"
so_file = download ( so_file_url , debug = False )
return GODag ( so_file ) |
def encode ( self , payload ) :
"""Encode payload""" | try :
return self . encoder . encode ( payload )
except Exception as exception :
raise EncodeError ( str ( exception ) ) |
def time_average ( rho , t ) :
r"""Return a time - averaged density matrix ( using trapezium rule ) .
We test a basic two - level system .
> > > import numpy as np
> > > from scipy . constants import physical _ constants
> > > from sympy import Matrix , symbols
> > > from fast . electric _ field import electric _ field _ amplitude _ top
> > > from fast . symbolic import ( define _ laser _ variables ,
. . . polarization _ vector )
> > > Ne = 2
> > > Nl = 1
> > > a0 = physical _ constants [ " Bohr radius " ] [ 0]
> > > rm = [ np . array ( [ [ 0 , 0 ] , [ a0 , 0 ] ] ) ,
. . . np . array ( [ [ 0 , 0 ] , [ 0 , 0 ] ] ) ,
. . . np . array ( [ [ 0 , 0 ] , [ 0 , 0 ] ] ) ]
> > > xi = np . array ( [ [ [ 0 , 1 ] , [ 1 , 0 ] ] ] )
> > > omega _ level = [ 0 , 1.0e9]
> > > gamma21 = 2 * np . pi * 6e6
> > > gamma = np . array ( [ [ 0 , - gamma21 ] , [ gamma21 , 0 ] ] )
> > > theta = phase _ transformation ( Ne , Nl , rm , xi )
We define symbolic variables to be used as token arguments .
> > > Ep , omega _ laser = define _ laser _ variables ( Nl )
> > > epsilonps = [ polarization _ vector ( 0 , 0 , 0 , 0 , 1 ) ]
> > > detuning _ knob = [ symbols ( " delta1 " , real = True ) ]
An map to unfold the density matrix .
> > > unfolding = Unfolding ( Ne , True , True , True )
We obtain a function to calculate Hamiltonian terms .
> > > aux = ( Ep , epsilonps , detuning _ knob , gamma ,
. . . omega _ level , rm , xi , theta )
> > > time _ evolution = fast _ time _ evolution ( * aux )
We specify values for the variables
> > > detuning _ knobs = [ 100e6]
> > > Eps = electric _ field _ amplitude _ top ( 1e - 3 , 1e - 3 , 1 , " SI " )
> > > Eps * = np . exp ( 1j * np . pi )
> > > Eps = [ Eps ]
> > > t = np . linspace ( 0 , 1e - 6 , 11)
> > > rho0 = np . array ( [ [ 1 , 0 ] , [ 0 , 0 ] ] )
> > > rho0 = unfolding ( rho0)
> > > rho = time _ evolution ( t , rho0 , Eps , detuning _ knobs )
> > > print ( time _ average ( rho , t ) )
[ 0.0175 0.1244 - 0.0222]""" | T = t [ - 1 ] - t [ 0 ]
dt = t [ 1 ] - t [ 0 ]
rhoav = np . sum ( rho [ 1 : - 1 ] , axis = 0 ) + 0.5 * ( rho [ 0 ] + rho [ - 1 ] )
rhoav = dt / T * rhoav
return rhoav |
def _display_error ( normalized_data , stream ) :
"""print error message from docker - py stream and raise Exception .
u ' message ' : u " Error getting container 981c3e17bfc6138d1985c0c8f5616e9064e56559e817646ad38211a456d6bcf2 from driver
devicemapper : Error mounting ' / dev / mapper / docker - 8:3-34393696-981c3e17bfc6138d1985c0c8f5616e9064e56559e817646ad38211a456d6bcf2'
on ' / data / docker / devicemapper / mnt / 981c3e17bfc6138d1985c0c8f5616e9064e56559e817646ad38211a456d6bcf2 ' : no such file
or directory " """ | # TODO : need to revisit this later .
error = normalized_data [ 'error' ]
if 'error_detail' in normalized_data :
stream . write ( "exit code: {0}\n" . format ( normalized_data [ 'error_detail' ] . get ( 'code' ) , 'There was no exit code provided' ) )
stream . write ( normalized_data [ 'error_detail' ] . get ( 'message' , 'There were no message details provided.' ) )
raise DockerStreamException ( error ) |
def watch_length ( params , ctxt , scope , stream , coord ) :
"""WatchLength - Watch the total length of each of the params .
Example :
The code below uses the ` ` WatchLength ` ` update function to update
the ` ` length ` ` field to the length of the ` ` data ` ` field : :
int length < watch = data , update = WatchLength > ;
char data [ length ] ;""" | if len ( params ) <= 1 :
raise errors . InvalidArguments ( coord , "{} args" . format ( len ( params ) ) , "at least two arguments" )
to_update = params [ 0 ]
total_size = 0
for param in params [ 1 : ] :
total_size += param . _pfp__width ( )
to_update . _pfp__set_value ( total_size ) |
async def cities ( self , country : str , state : str ) -> list :
"""Return a list of supported cities in a country / state .""" | data = await self . _request ( 'get' , 'cities' , params = { 'state' : state , 'country' : country } )
return [ d [ 'city' ] for d in data [ 'data' ] ] |
def get_configurations ( self ) :
"""Generates a dictionary that ' s made up of the configurations on the project .
Any configurations on a project that are duplicated on a stage , the stage configuration will take precedence .""" | project_configurations_dictionary = { }
project_configurations = self . project . project_configurations ( )
# Create project specific configurations dictionary
for config in project_configurations :
project_configurations_dictionary [ config . key ] = config
stage_configurations_dictionary = { }
stage_configurations = self . stage_configurations ( )
# Create stage specific configurations dictionary
for s in stage_configurations :
stage_configurations_dictionary [ s . key ] = s
# override project specific configuration with the ones in the stage if they are there
project_configurations_dictionary . update ( stage_configurations_dictionary )
# Return the updated configurations
return project_configurations_dictionary |
def which_lease_to_steal ( self , stealable_leases , have_lease_count ) :
"""Determines and return which lease to steal
If the number of leases is a multiple of the number of hosts , then the desired
configuration is that all hosts own the name number of leases , and the
difference between the " biggest " owner and any other is 0.
If the number of leases is not a multiple of the number of hosts , then the most
even configurationpossible is for some hosts to have ( self , leases / hosts ) leases
and others to have ( self , ( self , leases / hosts ) + 1 ) . For example , for 16 partitions
distributed over five hosts , the distribution would be 4 , 3 , 3 , 3 , 3 , or any of the
possible reorderings .
In either case , if the difference between this host and the biggest owner is 2 or more ,
then thesystem is not in the most evenly - distributed configuration , so steal one lease
from the biggest . If there is a tie for biggest , we pick whichever appears first in the
list because it doesn ' t really matter which " biggest " is trimmed down .
Stealing one at a time prevents flapping because it reduces the difference between the
biggest and this host by two at a time . If the starting difference is two or greater ,
then the difference cannot end up below 0 . This host may become tied for biggest , but it
cannot become larger than the host that it is stealing from .
: param stealable _ leases : List of leases to determine which can be stolen .
: type stealable _ leases : list [ ~ azure . eventprocessorhost . lease . Lease ]
: param have _ lease _ count : Lease count .
: type have _ lease _ count : int
: rtype : ~ azure . eventprocessorhost . lease . Lease""" | counts_by_owner = self . count_leases_by_owner ( stealable_leases )
biggest_owner = ( sorted ( counts_by_owner . items ( ) , key = lambda kv : kv [ 1 ] ) ) . pop ( )
steal_this_lease = None
if ( biggest_owner [ 1 ] - have_lease_count ) >= 2 :
steal_this_lease = [ l for l in stealable_leases if l . owner == biggest_owner [ 0 ] ] [ 0 ]
return steal_this_lease |
def call_env_doctree_read ( cls , kb_app , sphinx_app : Sphinx , doctree : doctree ) :
"""On doctree - read , do callbacks""" | for callback in EventAction . get_callbacks ( kb_app , SphinxEvent . DREAD ) :
callback ( kb_app , sphinx_app , doctree ) |
def get ( self , url , params ) :
"""Issues a GET request against the API , properly formatting the params
: param url : a string , the url you are requesting
: param params : a dict , the key - value of all the paramaters needed
in the request
: returns : a dict parsed of the JSON response""" | url = self . host + url
if params :
url = url + "?" + urllib . parse . urlencode ( params )
try :
resp = requests . get ( url , allow_redirects = False , headers = self . headers , auth = self . oauth )
except TooManyRedirects as e :
resp = e . response
return self . json_parse ( resp ) |
def transformer_tall_pretrain_lm ( ) :
"""Hparams for transformer on LM pretraining ( with 64k vocab ) .""" | hparams = transformer_tall ( )
hparams . learning_rate_constant = 2e-4
hparams . learning_rate_schedule = ( "linear_warmup*constant*cosdecay" )
hparams . optimizer = "adam_w"
hparams . optimizer_adam_beta1 = 0.9
hparams . optimizer_adam_beta2 = 0.999
hparams . optimizer_adam_epsilon = 1e-8
# Set max examples to something big when pretraining only the LM , definitely
# something an order of magnitude bigger than number of train steps .
hparams . multiproblem_schedule_max_examples = 5e8
# Set train steps to learning _ rate _ decay _ steps or less
hparams . learning_rate_decay_steps = 5000000
return hparams |
def get_ids ( cs ) :
"""Return chemical identifier records .""" | records = [ ]
for c in cs :
records . append ( { k : c [ k ] for k in c if k in { 'names' , 'labels' } } )
return records |
def getPort ( self ) :
"""Helper method for testing ; returns the TCP port used for this
registration , even if it was specified as 0 and thus allocated by the
OS .""" | disp = self . pbmanager . dispatchers [ self . portstr ]
return disp . port . getHost ( ) . port |
def get_machine_stats ( self ) :
'''Gather spider based stats''' | self . logger . debug ( "Gathering machine stats" )
the_dict = { }
keys = self . redis_conn . keys ( 'stats:crawler:*:*:*:*' )
for key in keys : # break down key
elements = key . split ( ":" )
machine = elements [ 2 ]
spider = elements [ 3 ]
response = elements [ 4 ]
end = elements [ 5 ]
# we only care about the machine , not spider type
if machine not in the_dict :
the_dict [ machine ] = { }
if response not in the_dict [ machine ] :
the_dict [ machine ] [ response ] = { }
if end in the_dict [ machine ] [ response ] :
the_dict [ machine ] [ response ] [ end ] = the_dict [ machine ] [ response ] [ end ] + self . _get_key_value ( key , end == 'lifetime' )
else :
the_dict [ machine ] [ response ] [ end ] = self . _get_key_value ( key , end == 'lifetime' )
# simple count
the_dict [ 'count' ] = len ( list ( the_dict . keys ( ) ) )
ret_dict = { }
ret_dict [ 'machines' ] = the_dict
return ret_dict |
def csv ( self , text = TEXT , sep = ',' , index = True , float_fmt = "%.2g" ) :
"""Generate a CSV table from the table data .""" | return self . _data . to_csv ( sep = sep , index = index , float_format = float_fmt ) |
def createCells ( self ) :
'''Function to instantiate Cell objects based on the characteristics of this population''' | # add individual cells
if 'cellsList' in self . tags :
cells = self . createCellsList ( )
# create cells based on fixed number of cells
elif 'numCells' in self . tags :
cells = self . createCellsFixedNum ( )
# create cells based on density ( optional ynorm - dep )
elif 'density' in self . tags :
cells = self . createCellsDensity ( )
# create cells based on density ( optional ynorm - dep )
elif 'gridSpacing' in self . tags :
cells = self . createCellsGrid ( )
# not enough tags to create cells
else :
self . tags [ 'numCells' ] = 1
print ( 'Warninig: number or density of cells not specified for population %s; defaulting to numCells = 1' % ( self . tags [ 'pop' ] ) )
cells = self . createCellsFixedNum ( )
return cells |
def paint ( self , painter , option , widget ) :
"""Overloads the default QGraphicsItem paint event to update the node when necessary and call the draw method for the node .
: param painter < QPainter >
: param option < QStyleOptionGraphicsItem >
: param widget < QWidget >""" | # rebuild when dirty
if self . isDirty ( ) :
self . rebuild ( )
painter . save ( )
painter . setOpacity ( self . opacity ( ) )
if self . drawHotspotsUnderneath ( ) :
self . drawHotspots ( painter )
if self . isEnabled ( ) :
painter . setPen ( self . penColor ( ) )
painter . setBrush ( self . brush ( ) )
else :
painter . setPen ( self . disabledPenColor ( ) )
painter . setBrush ( self . disabledBrush ( ) )
# draw the item
self . draw ( painter , option , widget )
if not self . drawHotspotsUnderneath ( ) :
self . drawHotspots ( painter )
painter . restore ( ) |
def iter_by_year ( self ) :
"""Split the return objects by year and iterate""" | for key , grp in self . rets . groupby ( lambda x : x . year ) :
yield key , CumulativeRets ( rets = grp ) |
def parse_doctype ( cls , file , encoding = None ) :
'''Get the doctype from the document .
Returns :
str , None''' | if encoding :
lxml_encoding = to_lxml_encoding ( encoding ) or 'latin1'
else :
lxml_encoding = encoding
try :
parser = lxml . etree . XMLParser ( encoding = lxml_encoding , recover = True )
tree = lxml . etree . parse ( io . BytesIO ( wpull . util . peek_file ( file ) ) , parser = parser )
if tree . getroot ( ) is not None :
return tree . docinfo . doctype
except lxml . etree . LxmlError :
pass |
def duplicate ( self , name ) :
""". . versionadded : : 0.5.8
Requires SMC version > = 6.3.2
Duplicate this element . This is a shortcut method that will make
a direct copy of the element under the new name and type .
: param str name : name for the duplicated element
: raises ActionCommandFailed : failed to duplicate the element
: return : the newly created element
: rtype : Element""" | dup = self . make_request ( method = 'update' , raw_result = True , resource = 'duplicate' , params = { 'name' : name } )
return type ( self ) ( name = name , href = dup . href , type = type ( self ) . typeof ) |
def slice_3d_grid ( grid , n ) :
"""Takes a three dimensional array and an integer ( n ) and returns a 2d array
containing the Nth value from the 3rd dimension at each location in the
grid .""" | phen_grid = initialize_grid ( ( len ( grid [ 0 ] ) , len ( grid ) ) , 0 )
for i in range ( len ( grid ) ) :
for j in range ( len ( grid [ i ] ) ) :
phen_grid [ i ] [ j ] = grid [ i ] [ j ] [ n ]
return phen_grid |
def stepThroughJsWaf_selenium_chromium ( self , url , titleContains = '' , titleNotContains = '' ) :
'''Use Selenium + SeleniumChromium to access a resource behind cloudflare protection .
Params :
` ` url ` ` - The URL to access that is protected by cloudflare
` ` titleContains ` ` - A string that is in the title of the protected page , and NOT the
cloudflare intermediate page . The presence of this string in the page title
is used to determine whether the cloudflare protection has been successfully
penetrated .
The current WebGetRobust headers are installed into the selenium browser , which
is then used to access the protected resource .
Once the protected page has properly loaded , the cloudflare access cookie is
then extracted from the selenium browser , and installed back into the WebGetRobust
instance , so it can continue to use the cloudflare auth in normal requests .''' | if ( not titleContains ) and ( not titleNotContains ) :
raise ValueError ( "You must pass either a string the title should contain, or a string the title shouldn't contain!" )
if titleContains and titleNotContains :
raise ValueError ( "You can only pass a single conditional statement!" )
self . log . info ( "Attempting to access page through cloudflare browser verification." )
if not self . selenium_chromium_driver :
self . _initSeleniumChromiumWebDriver ( )
self . _syncIntoSeleniumChromiumWebDriver ( )
self . selenium_chromium_driver . get ( url )
if titleContains :
condition = EC . title_contains ( titleContains )
elif titleNotContains :
condition = SeleniumCommon . title_not_contains ( titleNotContains )
else :
raise ValueError ( "Wat?" )
try :
WebDriverWait ( self . selenium_chromium_driver , 45 ) . until ( condition )
success = True
self . log . info ( "Successfully accessed main page!" )
except TimeoutException :
self . log . error ( "Could not pass through cloudflare blocking!" )
success = False
# Add cookies to cookiejar
self . _syncOutOfSeleniumChromiumWebDriver ( )
self . _syncCookiesFromFile ( )
return success |
def generate ( self , id_or_uri ) :
"""Generates and returns a random range .
Args :
id _ or _ uri :
ID or URI of range .
Returns :
dict : A dict containing a list with IDs .""" | uri = self . _client . build_uri ( id_or_uri ) + "/generate"
return self . _client . get ( uri ) |
def envs ( ignore_cache = False ) :
'''Return a list of refs that can be used as environments''' | if not ignore_cache :
env_cache = os . path . join ( __opts__ [ 'cachedir' ] , 'svnfs/envs.p' )
cache_match = salt . fileserver . check_env_cache ( __opts__ , env_cache )
if cache_match is not None :
return cache_match
ret = set ( )
for repo in init ( ) :
trunk = os . path . join ( repo [ 'repo' ] , repo [ 'trunk' ] )
if os . path . isdir ( trunk ) : # Add base as the env for trunk
ret . add ( 'base' )
else :
log . error ( 'svnfs trunk path \'%s\' does not exist in repo %s, no base ' 'environment will be provided by this remote' , repo [ 'trunk' ] , repo [ 'url' ] )
branches = os . path . join ( repo [ 'repo' ] , repo [ 'branches' ] )
if os . path . isdir ( branches ) :
ret . update ( os . listdir ( branches ) )
else :
log . error ( 'svnfs branches path \'%s\' does not exist in repo %s' , repo [ 'branches' ] , repo [ 'url' ] )
tags = os . path . join ( repo [ 'repo' ] , repo [ 'tags' ] )
if os . path . isdir ( tags ) :
ret . update ( os . listdir ( tags ) )
else :
log . error ( 'svnfs tags path \'%s\' does not exist in repo %s' , repo [ 'tags' ] , repo [ 'url' ] )
return [ x for x in sorted ( ret ) if _env_is_exposed ( x ) ] |
def hsv_to_rgb ( hsv ) :
"""Converts a tuple of hue , saturation , value to a tuple of red , green blue .
Hue should be an angle from 0.0 to 359.0 . Saturation and value should be a
value from 0.0 to 1.0 , where saturation controls the intensity of the hue and
value controls the brightness .""" | # Algorithm adapted from http : / / www . cs . rit . edu / ~ ncs / color / t _ convert . html
h , s , v = hsv
if s == 0 :
return ( v , v , v )
h /= 60.0
i = math . floor ( h )
f = h - i
p = v * ( 1.0 - s )
q = v * ( 1.0 - s * f )
t = v * ( 1.0 - s * ( 1.0 - f ) )
if i == 0 :
return ( v , t , p )
elif i == 1 :
return ( q , v , p )
elif i == 2 :
return ( p , v , t )
elif i == 3 :
return ( p , q , v )
elif i == 4 :
return ( t , p , v )
else :
return ( v , p , q ) |
def _remove_from_index ( index , obj ) :
"""Removes object ` ` obj ` ` from the ` ` index ` ` .""" | try :
index . value_map [ indexed_value ( index , obj ) ] . remove ( obj . id )
except KeyError :
pass |
def grep ( self , path , content , flags ) :
"""grep every child path under path for content""" | try :
match = re . compile ( content , flags )
except sre_constants . error as ex :
print ( "Bad regexp: %s" % ( ex ) )
return
for gpath , matches in self . do_grep ( path , match ) :
yield ( gpath , matches ) |
def _process_interaction ( self , source_id , interaction , text , pmid , extra_annotations ) :
"""Process an interaction JSON tuple from the ISI output , and adds up
to one statement to the list of extracted statements .
Parameters
source _ id : str
the JSON key corresponding to the sentence in the ISI output
interaction : the JSON list with subject / verb / object information
about the event in the ISI output
text : str
the text of the sentence
pmid : str
the PMID of the article from which the information was extracted
extra _ annotations : dict
Additional annotations to add to the statement ' s evidence ,
potentially containing metadata about the source . Annotations
with the key " interaction " will be overridden by the JSON
interaction tuple from the ISI output""" | verb = interaction [ 0 ] . lower ( )
subj = interaction [ - 2 ]
obj = interaction [ - 1 ]
# Make ungrounded agent objects for the subject and object
# Grounding will happen after all statements are extracted in _ _ init _ _
subj = self . _make_agent ( subj )
obj = self . _make_agent ( obj )
# Make an evidence object
annotations = deepcopy ( extra_annotations )
if 'interaction' in extra_annotations :
logger . warning ( "'interaction' key of extra_annotations ignored" + " since this is reserved for storing the raw ISI " + "input." )
annotations [ 'source_id' ] = source_id
annotations [ 'interaction' ] = interaction
ev = ist . Evidence ( source_api = 'isi' , pmid = pmid , text = text . rstrip ( ) , annotations = annotations )
# For binding time interactions , it is said that a catayst might be
# specified . We don ' t use this for now , but extract in case we want
# to in the future
cataylst_specified = False
if len ( interaction ) == 4 :
catalyst = interaction [ 1 ]
if catalyst is not None :
cataylst_specified = True
self . verbs . add ( verb )
statement = None
if verb in verb_to_statement_type :
statement_class = verb_to_statement_type [ verb ]
if statement_class == ist . Complex :
statement = ist . Complex ( [ subj , obj ] , evidence = ev )
else :
statement = statement_class ( subj , obj , evidence = ev )
if statement is not None : # For Complex statements , the ISI reader produces two events :
# binds ( A , B ) and binds ( B , A )
# We want only one Complex statement for each sentence , so check
# to see if we already have a Complex for this source _ id with the
# same members
already_have = False
if type ( statement ) == ist . Complex :
for old_s in self . statements :
old_id = statement . evidence [ 0 ] . source_id
new_id = old_s . evidence [ 0 ] . source_id
if type ( old_s ) == ist . Complex and old_id == new_id :
old_statement_members = [ m . db_refs [ 'TEXT' ] for m in old_s . members ]
old_statement_members = sorted ( old_statement_members )
new_statement_members = [ m . db_refs [ 'TEXT' ] for m in statement . members ]
new_statement_members = sorted ( new_statement_members )
if old_statement_members == new_statement_members :
already_have = True
break
if not already_have :
self . statements . append ( statement ) |
def firstChild ( self ) :
'''firstChild - property , Get the first child block , text or tag .
@ return < str / AdvancedTag / None > - The first child block , or None if no child blocks''' | blocks = object . __getattribute__ ( self , 'blocks' )
# First block is empty string for indent , but don ' t hardcode incase that changes
if blocks [ 0 ] == '' :
firstIdx = 1
else :
firstIdx = 0
if len ( blocks ) == firstIdx : # No first child
return None
return blocks [ 1 ] |
def is_grounded_to_name ( c : Concept , name : str , cutoff = 0.7 ) -> bool :
"""Check if a concept is grounded to a given name .""" | return ( top_grounding ( c ) == name ) if is_well_grounded ( c , cutoff ) else False |
async def send_api ( container , targetname , name , params = { } ) :
"""Send API and discard the result""" | handle = object ( )
apiEvent = ModuleAPICall ( handle , targetname , name , params = params )
await container . wait_for_send ( apiEvent ) |
def _dict ( self , with_name = True ) :
"""Returns the identity as a dict .
values that are empty are removed""" | d = dict ( [ ( k , getattr ( self , k ) ) for k , _ , _ in self . name_parts ] )
if with_name :
d [ 'name' ] = self . name
try :
d [ 'vname' ] = self . vname
except ValueError :
pass
return self . clear_dict ( d ) |
def get_tuple_version ( name , default = DEFAULT_TUPLE_NOT_FOUND , allow_ambiguous = True ) :
"""Get tuple version from installed package information for easy handling .
It will return : attr : ` default ` value when the named package is not
installed .
Parameters
name : string
An application name used to install via setuptools .
default : tuple
A default returning value used when the named application is not
installed yet
allow _ ambiguous : boolean
` ` True ` ` for allowing ambiguous version information .
Returns
string
A version tuple
Examples
> > > v = get _ tuple _ version ( ' app _ version ' , allow _ ambiguous = True )
> > > len ( v ) > = 3
True
> > > isinstance ( v [ 0 ] , int )
True
> > > isinstance ( v [ 1 ] , int )
True
> > > isinstance ( v [ 2 ] , int )
True
> > > get _ tuple _ version ( ' distribution _ which _ is _ not _ installed ' )
(0 , 0 , 0)""" | def _prefer_int ( x ) :
try :
return int ( x )
except ValueError :
return x
version = get_string_version ( name , default = default , allow_ambiguous = allow_ambiguous )
# convert string version to tuple version
# prefer integer for easy handling
if isinstance ( version , tuple ) : # not found
return version
return tuple ( map ( _prefer_int , version . split ( '.' ) ) ) |
def get_source_and_pgp_key ( source_and_key ) :
"""Look for a pgp key ID or ascii - armor key in the given input .
: param source _ and _ key : Sting , " source _ spec | keyid " where ' | keyid ' is
optional .
: returns ( source _ spec , key _ id OR None ) as a tuple . Returns None for key _ id
if there was no ' | ' in the source _ and _ key string .""" | try :
source , key = source_and_key . split ( '|' , 2 )
return source , key or None
except ValueError :
return source_and_key , None |
def within_kilometers ( self , key , point , max_distance , min_distance = None ) :
"""增加查询条件 , 限制返回结果指定字段值的位置在某点的一段距离之内 。
: param key : 查询条件字段名
: param point : 查询地理位置
: param max _ distance : 最大距离限定 ( 千米 )
: param min _ distance : 最小距离限定 ( 千米 )
: rtype : Query""" | if min_distance is not None :
min_distance = min_distance / 6371.0
return self . within_radians ( key , point , max_distance / 6371.0 , min_distance ) |
def append ( self , item ) :
"""Append item to end of model""" | self . beginInsertRows ( QtCore . QModelIndex ( ) , self . rowCount ( ) , self . rowCount ( ) )
self . items . append ( item )
self . endInsertRows ( ) |
def timedelta_seconds ( td ) :
'''Return the offset stored by a : class : ` datetime . timedelta ` object as an
integer number of seconds . Microseconds , if present , are rounded to
the nearest second .
Delegates to
: meth : ` timedelta . total _ seconds ( ) < datetime . timedelta . total _ seconds ( ) > `
if available .
> > > timedelta _ seconds ( timedelta ( hours = 1 ) )
3600
> > > timedelta _ seconds ( timedelta ( hours = - 1 ) )
-3600
> > > timedelta _ seconds ( timedelta ( hours = 1 , minutes = 30 ) )
5400
> > > timedelta _ seconds ( timedelta ( hours = 1 , minutes = 30,
. . . microseconds = 300000 ) )
5400
> > > timedelta _ seconds ( timedelta ( hours = 1 , minutes = 30,
. . . microseconds = 900000 ) )
5401''' | try :
return int ( round ( td . total_seconds ( ) ) )
except AttributeError :
days = td . days
seconds = td . seconds
microseconds = td . microseconds
return int ( round ( ( days * 86400 ) + seconds + ( microseconds / 1000000 ) ) ) |
def _set_policy ( self , v , load = False ) :
"""Setter method for policy , mapped from YANG variable / rbridge _ id / maps / policy ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ policy is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ policy ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "policyname" , policy . policy , yang_name = "policy" , rest_name = "policy" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'policyname' , extensions = { u'tailf-common' : { u'info' : u'Configure Policy' , u'callpoint' : u'MapsPolicy' } } ) , is_container = 'list' , yang_name = "policy" , rest_name = "policy" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Configure Policy' , u'callpoint' : u'MapsPolicy' } } , namespace = 'urn:brocade.com:mgmt:brocade-maps' , defining_module = 'brocade-maps' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """policy must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("policyname",policy.policy, yang_name="policy", rest_name="policy", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='policyname', extensions={u'tailf-common': {u'info': u'Configure Policy', u'callpoint': u'MapsPolicy'}}), is_container='list', yang_name="policy", rest_name="policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Policy', u'callpoint': u'MapsPolicy'}}, namespace='urn:brocade.com:mgmt:brocade-maps', defining_module='brocade-maps', yang_type='list', is_config=True)""" , } )
self . __policy = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def manage_mep ( self , mep_json ) :
'''Import a mep as a representative from the json dict fetched from
parltrack''' | # Some versions of memopol will connect to this and skip inactive meps .
responses = representative_pre_import . send ( sender = self , representative_data = mep_json )
for receiver , response in responses :
if response is False :
logger . debug ( 'Skipping MEP %s' , mep_json [ 'Name' ] [ 'full' ] )
return
changed = False
slug = slugify ( '%s-%s' % ( mep_json [ "Name" ] [ "full" ] if 'full' in mep_json [ "Name" ] else mep_json [ "Name" ] [ "sur" ] + " " + mep_json [ "Name" ] [ "family" ] , _parse_date ( mep_json [ "Birth" ] [ "date" ] ) ) )
try :
representative = Representative . objects . get ( slug = slug )
except Representative . DoesNotExist :
representative = Representative ( slug = slug )
changed = True
# Save representative attributes
self . import_representative_details ( representative , mep_json , changed )
self . add_mandates ( representative , mep_json )
self . add_contacts ( representative , mep_json )
logger . debug ( 'Imported MEP %s' , unicode ( representative ) )
return representative |
def name ( self ) :
"""Return the person ' s name . If we have special titles , use them , otherwise ,
don ' t include the title .""" | if self . title in [ "DR" , "SIR" , "LORD" ] :
return "%s %s %s" % ( self . get_title_display ( ) , self . first_name , self . last_name )
else :
return "%s %s" % ( self . first_name , self . last_name ) |
def acls ( name , acls , version = - 1 , profile = None , hosts = None , scheme = None , username = None , password = None , default_acl = None ) :
'''Update acls on a znode
name
path to znode
acls
list of acl dictionaries to set on znode
version
Specify the version which should be deleted
Default : - 1 ( always match )
profile
Configured Zookeeper profile to authenticate with ( Default : None )
hosts
Lists of Zookeeper Hosts ( Default : ' 127.0.0.1:2181)
scheme
Scheme to authenticate with ( Default : ' digest ' )
username
Username to authenticate ( Default : None )
password
Password to authenticate ( Default : None )
default _ acl
Default acls to assign if a node is created in this connection ( Default : None )
. . code - block : : yaml
update acls :
zookeeper . acls :
- name : / test / name
- acls :
- username : daniel
password : test
all : True
- username : gtmanfred
password : test
all : True''' | ret = { 'name' : name , 'result' : False , 'comment' : 'Failed to set acls on znode {0}' . format ( name ) , 'changes' : { } }
connkwargs = { 'profile' : profile , 'hosts' : hosts , 'scheme' : scheme , 'username' : username , 'password' : password , 'default_acl' : default_acl }
if isinstance ( acls , dict ) :
acls = [ acls ]
chk_acls = [ __salt__ [ 'zookeeper.make_digest_acl' ] ( ** acl ) for acl in acls ]
if not __salt__ [ 'zookeeper.exists' ] ( name , ** connkwargs ) :
ret [ 'comment' ] += ': Znode does not exist'
return ret
cur_acls = __salt__ [ 'zookeeper.get_acls' ] ( name , ** connkwargs )
if _check_acls ( cur_acls , chk_acls ) :
ret [ 'result' ] = True
ret [ 'comment' ] = 'Znode {0} acls already set' . format ( name )
return ret
if __opts__ [ 'test' ] is True :
ret [ 'result' ] = None
ret [ 'comment' ] = 'Znode {0} acls will be updated' . format ( name )
ret [ 'changes' ] [ 'old' ] = cur_acls
ret [ 'changes' ] [ 'new' ] = chk_acls
return ret
__salt__ [ 'zookeeper.set_acls' ] ( name , acls , version , ** connkwargs )
new_acls = __salt__ [ 'zookeeper.get_acls' ] ( name , ** connkwargs )
ret [ 'changes' ] = { 'old' : cur_acls , 'new' : new_acls }
if _check_acls ( new_acls , chk_acls ) :
ret [ 'result' ] = True
ret [ 'comment' ] = 'Znode {0} acls updated' . format ( name )
return ret
ret [ 'comment' ] = 'Znode {0} acls failed to update' . format ( name )
return ret |
def bind_proxy ( self , name , proxy ) :
"""Adds a mask that maps to a given proxy .""" | if not len ( name ) or name [ 0 ] != '/' or name [ - 1 ] != '/' :
raise ValueError ( "name must start and end with '/': {0}" . format ( name ) )
self . _folder_proxys . insert ( 0 , ( name , proxy ) ) |
def get_waveform_filter_length_in_time ( approximant , template = None , ** kwargs ) :
"""For filter templates , return the length in time of the template .""" | kwargs = props ( template , ** kwargs )
if approximant in _filter_time_lengths :
return _filter_time_lengths [ approximant ] ( ** kwargs )
else :
return None |
def tree_build ( self ) :
"""Build a tree from the taxonomy data present in this ` ClassificationsDataFrame ` or
` SampleCollection ` .
Returns
` skbio . tree . TreeNode ` , the root node of a tree that contains all the taxa in the current
analysis and their parents leading back to the root node .""" | from skbio . tree import TreeNode
# build all the nodes
nodes = { }
for tax_id in self . taxonomy . index :
node = TreeNode ( name = tax_id , length = 1 )
node . tax_name = self . taxonomy [ "name" ] [ tax_id ]
node . rank = self . taxonomy [ "rank" ] [ tax_id ]
node . parent_tax_id = self . taxonomy [ "parent_tax_id" ] [ tax_id ]
nodes [ tax_id ] = node
# generate all the links
for tax_id in self . taxonomy . index :
try :
parent = nodes [ nodes [ tax_id ] . parent_tax_id ]
except KeyError :
if tax_id != "1" :
warnings . warn ( "tax_id={} has parent_tax_id={} which is not in tree" "" . format ( tax_id , nodes [ tax_id ] . parent_tax_id ) )
continue
parent . append ( nodes [ tax_id ] )
return nodes [ "1" ] |
def issue_section ( issue ) :
"""Returns the section heading for the issue , or None if this issue should be ignored .""" | labels = issue . get ( 'labels' , [ ] )
for label in labels :
if not label [ 'name' ] . startswith ( 'type: ' ) :
continue
if label [ 'name' ] in LOG_SECTION :
return LOG_SECTION [ label [ 'name' ] ]
elif label [ 'name' ] in IGNORE_ISSUE_TYPE :
return None
else :
logging . warning ( 'unknown issue type: "{}" for: {}' . format ( label [ 'name' ] , issue_line ( issue ) ) )
return None |
def get_client ( self , client_type ) :
"""GetClient .
[ Preview API ] Get the client package .
: param str client _ type : Either " EXE " for a zip file containing a Windows symbol client ( a . k . a . symbol . exe ) along with dependencies , or " TASK " for a VSTS task that can be run on a VSTS build agent . All the other values are invalid . The parameter is case - insensitive .
: rtype : object""" | route_values = { }
if client_type is not None :
route_values [ 'clientType' ] = self . _serialize . url ( 'client_type' , client_type , 'str' )
response = self . _send ( http_method = 'GET' , location_id = '79c83865-4de3-460c-8a16-01be238e0818' , version = '5.0-preview.1' , route_values = route_values )
return self . _deserialize ( 'object' , response ) |
def almost_equals ( self , other , e = 0.000001 ) :
'''Sometimes required for comparing GeoVectors if float error has
occurred . Determine if self and other ( another GeoVector ) are
equal to within e km of each other in dx and dy . The default
( e = 0.000001 ) will return True if self and other are less than
1 mm apart in distance .''' | return abs ( self . dx - other . dx ) < e and abs ( self . dy - other . dy ) < e |
def _get_memory_bank_size ( memBank ) :
"""Get the size of a memory bank in bytes .""" | fileName = '/sys/devices/system/node/node{0}/meminfo' . format ( memBank )
size = None
with open ( fileName ) as f :
for line in f :
if 'MemTotal' in line :
size = line . split ( ':' ) [ 1 ] . strip ( )
if size [ - 3 : ] != ' kB' :
raise ValueError ( '"{}" in file {} is not a memory size.' . format ( size , fileName ) )
size = int ( size [ : - 3 ] ) * 1024
# kernel uses KiB but names them kB , convert to Byte
logging . debug ( "Memory bank %s has size %s bytes." , memBank , size )
return size
raise ValueError ( 'Failed to read total memory from {}.' . format ( fileName ) ) |
def is_rfc1918 ( ip ) :
"""Checks to see if an IP address is used for local communications within
a private network as specified by RFC 1918""" | if ip_between ( ip , "10.0.0.0" , "10.255.255.255" ) :
return True
elif ip_between ( ip , "172.16.0.0" , "172.31.255.255" ) :
return True
elif ip_between ( ip , "192.168.0.0" , "192.168.255.255" ) :
return True
else :
return False |
def _vmomentsurfacemass ( self , R , n , m , romberg = False , nsigma = None , relative = False , phi = 0. , deriv = None ) :
"""Non - physical version of vmomentsurfacemass , otherwise the same""" | # odd moments of vR are zero
if isinstance ( n , int ) and n % 2 == 1 :
return 0.
if nsigma == None :
nsigma = _NSIGMA
logSigmaR = self . targetSurfacemass ( R , log = True , use_physical = False )
sigmaR2 = self . targetSigma2 ( R , use_physical = False )
sigmaR1 = sc . sqrt ( sigmaR2 )
logsigmaR2 = sc . log ( sigmaR2 )
if relative :
norm = 1.
else :
norm = sc . exp ( logSigmaR + logsigmaR2 * ( n + m ) / 2. ) / self . _gamma ** m
# Use the asymmetric drift equation to estimate va
va = sigmaR2 / 2. / R ** self . _beta * ( 1. / self . _gamma ** 2. - 1. - R * self . _surfaceSigmaProfile . surfacemassDerivative ( R , log = True ) - R * self . _surfaceSigmaProfile . sigma2Derivative ( R , log = True ) )
if math . fabs ( va ) > sigmaR1 :
va = 0.
# To avoid craziness near the center
if deriv is None :
if romberg :
return sc . real ( bovy_dblquad ( _vmomentsurfaceIntegrand , self . _gamma * ( R ** self . _beta - va ) / sigmaR1 - nsigma , self . _gamma * ( R ** self . _beta - va ) / sigmaR1 + nsigma , lambda x : - nsigma , lambda x : nsigma , [ R , self , logSigmaR , logsigmaR2 , sigmaR1 , self . _gamma , n , m ] , tol = 10. ** - 8 ) / sc . pi * norm / 2. )
else :
return integrate . dblquad ( _vmomentsurfaceIntegrand , self . _gamma * ( R ** self . _beta - va ) / sigmaR1 - nsigma , self . _gamma * ( R ** self . _beta - va ) / sigmaR1 + nsigma , lambda x : - nsigma , lambda x : nsigma , ( R , self , logSigmaR , logsigmaR2 , sigmaR1 , self . _gamma , n , m ) , epsrel = _EPSREL ) [ 0 ] / sc . pi * norm / 2.
else :
if romberg :
return sc . real ( bovy_dblquad ( _vmomentderivsurfaceIntegrand , self . _gamma * ( R ** self . _beta - va ) / sigmaR1 - nsigma , self . _gamma * ( R ** self . _beta - va ) / sigmaR1 + nsigma , lambda x : - nsigma , lambda x : nsigma , [ R , self , logSigmaR , logsigmaR2 , sigmaR1 , self . _gamma , n , m , deriv ] , tol = 10. ** - 8 ) / sc . pi * norm / 2. )
else :
return integrate . dblquad ( _vmomentderivsurfaceIntegrand , self . _gamma * ( R ** self . _beta - va ) / sigmaR1 - nsigma , self . _gamma * ( R ** self . _beta - va ) / sigmaR1 + nsigma , lambda x : - nsigma , lambda x : nsigma , ( R , self , logSigmaR , logsigmaR2 , sigmaR1 , self . _gamma , n , m , deriv ) , epsrel = _EPSREL ) [ 0 ] / sc . pi * norm / 2. |
def validate_args_and_kwargs ( fname , args , kwargs , max_fname_arg_count , compat_args ) :
"""Checks whether parameters passed to the * args and * * kwargs argument in a
function ` fname ` are valid parameters as specified in ` * compat _ args `
and whether or not they are set to their default values .
Parameters
fname : str
The name of the function being passed the ` * * kwargs ` parameter
args : tuple
The ` * args ` parameter passed into a function
kwargs : dict
The ` * * kwargs ` parameter passed into ` fname `
max _ fname _ arg _ count : int
The minimum number of arguments that the function ` fname `
requires , excluding those in ` args ` . Used for displaying
appropriate error messages . Must be non - negative .
compat _ args : OrderedDict
A ordered dictionary of keys that ` kwargs ` is allowed to
have and their associated default values . Note that if there
is only one key , a generic dict can be passed in as well .
Raises
TypeError if ` args ` contains more values than there are
` compat _ args ` OR ` kwargs ` contains keys not in ` compat _ args `
ValueError if ` args ` contains values not at the default value ( ` None ` )
` kwargs ` contains keys in ` compat _ args ` that do not map to the default
value as specified in ` compat _ args `
See Also
validate _ args : Purely args validation .
validate _ kwargs : Purely kwargs validation .""" | # Check that the total number of arguments passed in ( i . e .
# args and kwargs ) does not exceed the length of compat _ args
_check_arg_length ( fname , args + tuple ( kwargs . values ( ) ) , max_fname_arg_count , compat_args )
# Check there is no overlap with the positional and keyword
# arguments , similar to what is done in actual Python functions
args_dict = dict ( zip ( compat_args , args ) )
for key in args_dict :
if key in kwargs :
raise TypeError ( "{fname}() got multiple values for keyword " "argument '{arg}'" . format ( fname = fname , arg = key ) )
kwargs . update ( args_dict )
validate_kwargs ( fname , kwargs , compat_args ) |
def flatten ( nested_list ) :
'''converts a list - of - lists to a single flat list''' | return_list = [ ]
for i in nested_list :
if isinstance ( i , list ) :
return_list += flatten ( i )
else :
return_list . append ( i )
return return_list |
def init_static_field ( state , field_class_name , field_name , field_type ) :
"""Initialize the static field with an allocated , but not initialized ,
object of the given type .
: param state : State associated to the field .
: param field _ class _ name : Class containing the field .
: param field _ name : Name of the field .
: param field _ type : Type of the field and the new object .""" | field_ref = SimSootValue_StaticFieldRef . get_ref ( state , field_class_name , field_name , field_type )
field_val = SimSootValue_ThisRef . new_object ( state , field_type )
state . memory . store ( field_ref , field_val ) |
def _match_iter_generic ( self , path_elements , start_at ) :
"""Implementation of match _ iter for > 1 self . elements""" | length = len ( path_elements )
# If bound to start , we stop searching at the first element
if self . bound_start :
end = 1
else :
end = length - self . length + 1
# If bound to end , we start searching as late as possible
if self . bound_end :
start = length - self . length
else :
start = start_at
if start > end or start < start_at or end > length - self . length + 1 : # It ' s impossible to match . Either
# 1 ) the search has a fixed start and end , and path _ elements
# does not have enough elements for a match , or
# 2 ) To match the bound _ end , we have to start before the start _ at ,
# which means the search is impossible
# 3 ) The end is after the last possible end point in path _ elements
return
for index in range ( start , end ) :
matched = True
i = index
for matcher in self . elements :
element = path_elements [ i ]
i += 1
if not matcher . match ( element ) :
matched = False
break
if matched :
yield index + self . length |
def parse ( self , lines ) :
'''Parse signature file lines .
@ lines - A list of lines from a signature file .
Returns None .''' | signature = None
for line in lines : # Split at the first comment delimiter ( if any ) and strip the
# result
line = line . split ( '#' ) [ 0 ] . strip ( )
# Ignore blank lines and lines that are nothing but comments .
# We also don ' t support the ' ! mime ' style line entries .
if line and line [ 0 ] != '!' : # Parse this signature line
sigline = SignatureLine ( line )
# Level 0 means the first line of a signature entry
if sigline . level == 0 : # If there is an existing signature , append it to the signature list ,
# unless the text in its title field has been filtered by user - defined
# filter rules .
if signature and not self . _filtered ( signature . title ) :
self . signatures . append ( signature )
# Create a new signature object ; use the size of self . signatures to
# assign each signature a unique ID .
signature = Signature ( len ( self . signatures ) , sigline )
# Else , just append this line to the existing signature
elif signature : # signature . append ( sigline )
signature . lines . append ( sigline )
# If this is not the first line of a signature entry and there is no other
# existing signature entry , something is very wrong with the
# signature file .
else :
raise ParserException ( "Invalid signature line: '%s'" % line )
# Add the final signature to the signature list
if signature :
if not self . _filtered ( signature . lines [ 0 ] . format ) :
self . signatures . append ( signature )
# Sort signatures by confidence ( aka , length of their magic bytes ) ,
# largest first
self . signatures . sort ( key = lambda x : x . confidence , reverse = True ) |
def _load_url ( url ) :
"""Loads a URL resource from a remote server""" | try :
response = requests . get ( url )
return BytesIO ( response . content )
except IOError as ex :
parser . error ( "{url} could not be loaded remotely! ({ex})" . format ( url = url , ex = ex ) ) |
def get_activities ( self ) :
"""Gets all ` ` Activities ` ` .
In plenary mode , the returned list contains all known activites
or an error results . Otherwise , the returned list may contain
only those activities that are accessible through this session .
return : ( osid . learning . ActivityList ) - a ` ` ActivityList ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . ResourceLookupSession . get _ resources
# NOTE : This implementation currently ignores plenary view
collection = JSONClientValidated ( 'learning' , collection = 'Activity' , runtime = self . _runtime )
result = collection . find ( self . _view_filter ( ) ) . sort ( '_id' , DESCENDING )
return objects . ActivityList ( result , runtime = self . _runtime , proxy = self . _proxy ) |
def predict_is ( self , h = 5 , fit_once = True , fit_method = 'MLE' , intervals = False , ** kwargs ) :
"""Makes dynamic in - sample predictions with the estimated model
Parameters
h : int ( default : 5)
How many steps would you like to forecast ?
fit _ once : boolean
( default : True ) Fits only once before the in - sample prediction ; if False , fits after every new datapoint
fit _ method : string
Which method to fit the model with
intervals : boolean
Whether to return prediction intervals
Returns
- pd . DataFrame with predicted values""" | predictions = [ ]
for t in range ( 0 , h ) :
x = GARCH ( p = self . p , q = self . q , data = self . data [ 0 : - h + t ] )
if fit_once is False :
x . fit ( method = fit_method , printer = False )
if t == 0 :
if fit_once is True :
x . fit ( method = fit_method , printer = False )
saved_lvs = x . latent_variables
predictions = x . predict ( 1 , intervals = intervals )
else :
if fit_once is True :
x . latent_variables = saved_lvs
predictions = pd . concat ( [ predictions , x . predict ( 1 , intervals = intervals ) ] )
if intervals is True :
predictions . rename ( columns = { 0 : self . data_name , 1 : "1% Prediction Interval" , 2 : "5% Prediction Interval" , 3 : "95% Prediction Interval" , 4 : "99% Prediction Interval" } , inplace = True )
else :
predictions . rename ( columns = { 0 : self . data_name } , inplace = True )
predictions . index = self . index [ - h : ]
return predictions |
def serialize_header ( self , pyobj , typecode = None , ** kw ) :
'''Serialize a Python object in SOAP - ENV : Header , make
sure everything in Header unique ( no # href ) . Must call
serialize first to create a document .
Parameters :
pyobjs - - instances to serialize in SOAP Header
typecode - - default typecode''' | kw [ 'unique' ] = True
soap_env = _reserved_ns [ 'SOAP-ENV' ]
# header = self . dom . getElement ( soap _ env , ' Header ' )
header = self . _header
if header is None :
header = self . _header = self . dom . createAppendElement ( soap_env , 'Header' )
typecode = getattr ( pyobj , 'typecode' , typecode )
if typecode is None :
raise RuntimeError ( 'typecode is required to serialize pyobj in header' )
helt = typecode . serialize ( header , self , pyobj , ** kw ) |
def _format_device ( var ) :
"""Returns the device with an annotation specifying ` ResourceVariable ` .
" legacy " means a normal tf . Variable while " resource " means a ResourceVariable .
For example :
` ( legacy ) `
` ( resource ) `
` / job : learner / task : 0 / device : CPU : * ( legacy ) `
` / job : learner / task : 0 / device : CPU : * ( resource ) `
Args :
var : The Tensorflow Variable to print .""" | if var . dtype . name . endswith ( "_ref" ) :
resource_var_annotation = "(legacy)"
else :
resource_var_annotation = "(resource)"
if var . device :
return "{} {}" . format ( var . device , resource_var_annotation )
else :
return resource_var_annotation |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.