signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def whereIsYadis ( resp ) :
"""Given a HTTPResponse , return the location of the Yadis document .
May be the URL just retrieved , another URL , or None , if I can ' t
find any .
[ non - blocking ]
@ returns : str or None"""
|
# Attempt to find out where to go to discover the document
# or if we already have it
content_type = resp . headers . get ( 'content-type' )
# According to the spec , the content - type header must be an exact
# match , or else we have to look for an indirection .
if ( content_type and content_type . split ( ';' , 1 ) [ 0 ] . lower ( ) == YADIS_CONTENT_TYPE ) :
return resp . final_url
else : # Try the header
yadis_loc = resp . headers . get ( YADIS_HEADER_NAME . lower ( ) )
if not yadis_loc : # Parse as HTML if the header is missing .
# XXX : do we want to do something with content - type , like
# have a whitelist or a blacklist ( for detecting that it ' s
# HTML ) ?
# Decode body by encoding of file
content_type = content_type or ''
encoding = content_type . rsplit ( ';' , 1 )
if len ( encoding ) == 2 and encoding [ 1 ] . strip ( ) . startswith ( 'charset=' ) :
encoding = encoding [ 1 ] . split ( '=' , 1 ) [ 1 ] . strip ( )
else :
encoding = 'UTF-8'
try :
content = resp . body . decode ( encoding )
except UnicodeError : # Keep encoded version in case yadis location can be found before encoding shut this up .
# Possible errors will be caught lower .
content = resp . body
try :
yadis_loc = findHTMLMeta ( StringIO ( content ) )
except ( MetaNotFound , UnicodeError ) : # UnicodeError : Response body could not be encoded and xrds location
# could not be found before troubles occurs .
pass
return yadis_loc
|
def index ( self ) :
"""Returns the first occurrence of the effect in your pedalboard"""
|
if self . pedalboard is None :
raise IndexError ( 'Effect not contains a pedalboard' )
return self . pedalboard . effects . index ( self )
|
def execute_scriptfunction ( ) -> None :
"""Execute a HydPy script function .
Function | execute _ scriptfunction | is indirectly applied and
explained in the documentation on module | hyd | ."""
|
try :
args_given = [ ]
kwargs_given = { }
for arg in sys . argv [ 1 : ] :
if len ( arg ) < 3 :
args_given . append ( arg )
else :
try :
key , value = parse_argument ( arg )
kwargs_given [ key ] = value
except ValueError :
args_given . append ( arg )
logfilepath = prepare_logfile ( kwargs_given . pop ( 'logfile' , 'stdout' ) )
logstyle = kwargs_given . pop ( 'logstyle' , 'plain' )
try :
funcname = str ( args_given . pop ( 0 ) )
except IndexError :
raise ValueError ( 'The first positional argument defining the function ' 'to be called is missing.' )
try :
func = hydpy . pub . scriptfunctions [ funcname ]
except KeyError :
available_funcs = objecttools . enumeration ( sorted ( hydpy . pub . scriptfunctions . keys ( ) ) )
raise ValueError ( f'There is no `{funcname}` function callable by `hyd.py`. ' f'Choose one of the following instead: {available_funcs}.' )
args_required = inspect . getfullargspec ( func ) . args
nmb_args_required = len ( args_required )
nmb_args_given = len ( args_given )
if nmb_args_given != nmb_args_required :
enum_args_given = ''
if nmb_args_given :
enum_args_given = ( f' ({objecttools.enumeration(args_given)})' )
enum_args_required = ''
if nmb_args_required :
enum_args_required = ( f' ({objecttools.enumeration(args_required)})' )
raise ValueError ( f'Function `{funcname}` requires `{nmb_args_required:d}` ' f'positional arguments{enum_args_required}, but ' f'`{nmb_args_given:d}` are given{enum_args_given}.' )
with _activate_logfile ( logfilepath , logstyle , 'info' , 'warning' ) :
func ( * args_given , ** kwargs_given )
except BaseException as exc :
if logstyle not in LogFileInterface . style2infotype2string :
logstyle = 'plain'
with _activate_logfile ( logfilepath , logstyle , 'exception' , 'exception' ) :
arguments = ', ' . join ( sys . argv )
print ( f'Invoking hyd.py with arguments `{arguments}` ' f'resulted in the following error:\n{str(exc)}\n\n' f'See the following stack traceback for debugging:\n' , file = sys . stderr )
traceback . print_tb ( sys . exc_info ( ) [ 2 ] )
|
def save_split_next ( self ) :
"""Save out blurbs created from " blurb split " .
They don ' t have dates , so we have to get creative ."""
|
filenames = [ ]
# the " date " MUST have a leading zero .
# this ensures these files sort after all
# newly created blurbs .
width = int ( math . ceil ( math . log ( len ( self ) , 10 ) ) ) + 1
i = 1
blurb = Blurbs ( )
while self :
metadata , body = self . pop ( )
metadata [ 'date' ] = str ( i ) . rjust ( width , '0' )
if 'release date' in metadata :
del metadata [ 'release date' ]
blurb . append ( ( metadata , body ) )
filename = blurb . _extract_next_filename ( )
blurb . save ( filename )
blurb . clear ( )
filenames . append ( filename )
i += 1
return filenames
|
def dumpBlock ( self , block_name ) :
"""API the list all information related with the block _ name
: param block _ name : Name of block to be dumped ( Required )
: type block _ name : str"""
|
try :
return self . dbsBlock . dumpBlock ( block_name )
except HTTPError as he :
raise he
except dbsException as de :
dbsExceptionHandler ( de . eCode , de . message , self . logger . exception , de . serverError )
except Exception as ex :
sError = "DBSReaderModel/dumpBlock. %s\n. Exception trace: \n %s" % ( ex , traceback . format_exc ( ) )
dbsExceptionHandler ( 'dbsException-server-error' , ex . message , self . logger . exception , sError )
|
def assure_migrations_table_setup ( db ) :
"""Make sure the migrations table is set up in the database ."""
|
from mig . models import MigrationData
if not MigrationData . __table__ . exists ( db . bind ) :
MigrationData . metadata . create_all ( db . bind , tables = [ MigrationData . __table__ ] )
|
def _add_freeform_sp ( self , origin_x , origin_y ) :
"""Add a freeform ` p : sp ` element having no drawing elements .
* origin _ x * and * origin _ y * are specified in slide coordinates , and
represent the location of the local coordinates origin on the slide ."""
|
spTree = self . _shapes . _spTree
return spTree . add_freeform_sp ( origin_x + self . _left , origin_y + self . _top , self . _width , self . _height )
|
def perform ( self , agent_indices , observ ) :
"""Compute batch of actions and a summary for a batch of observation .
Args :
agent _ indices : Tensor containing current batch indices .
observ : Tensor of a batch of observations for all agents .
Returns :
Tuple of action batch tensor and summary tensor ."""
|
with tf . name_scope ( 'perform/' ) :
observ = self . _observ_filter . transform ( observ )
if self . _last_state is None :
state = None
else :
state = tools . nested . map ( lambda x : tf . gather ( x , agent_indices ) , self . _last_state )
with tf . device ( '/gpu:0' if self . _use_gpu else '/cpu:0' ) :
output = self . _network ( observ [ : , None ] , tf . ones ( observ . shape [ 0 ] ) , state )
action = tf . cond ( self . _is_training , output . policy . sample , output . policy . mode )
logprob = output . policy . log_prob ( action ) [ : , 0 ]
# pylint : disable = g - long - lambda
summary = tf . cond ( self . _should_log , lambda : tf . summary . merge ( [ tf . summary . histogram ( 'mode' , output . policy . mode ( ) [ : , 0 ] ) , tf . summary . histogram ( 'action' , action [ : , 0 ] ) , tf . summary . histogram ( 'logprob' , logprob ) ] ) , str )
# Remember current policy to append to memory in the experience callback .
if self . _last_state is None :
assign_state = tf . no_op ( )
else :
assign_state = utility . assign_nested_vars ( self . _last_state , output . state , agent_indices )
remember_last_action = tf . scatter_update ( self . _last_action , agent_indices , action [ : , 0 ] )
policy_params = tools . nested . filter ( lambda x : isinstance ( x , tf . Tensor ) , output . policy . parameters )
assert policy_params , 'Policy has no parameters to store.'
remember_last_policy = tools . nested . map ( lambda var , val : tf . scatter_update ( var , agent_indices , val [ : , 0 ] ) , self . _last_policy , policy_params , flatten = True )
with tf . control_dependencies ( ( assign_state , remember_last_action ) + remember_last_policy ) :
return action [ : , 0 ] , tf . identity ( summary )
|
def needs_to_be_resolved ( parent_obj , attr_name ) :
"""This function determines , if a reference ( CrossReference ) needs to be
resolved or not ( while creating the model , while resolving references ) .
Args :
parent _ obj : the object containing the attribute to be resolved .
attr _ name : the attribute identification object .
Returns :
True if the attribute needs to be resolved . Else False .
In case of lists of references , this function return true if any of the
references in the list needs to be resolved .
Note : outside the model building process ( from _ file or from _ str ) this
function always returns False ."""
|
if hasattr ( get_model ( parent_obj ) , "_tx_reference_resolver" ) :
return get_model ( parent_obj ) . _tx_reference_resolver . has_unresolved_crossrefs ( parent_obj , attr_name )
else :
return False
|
def get_data ( context , id , keys ) :
"""get _ data ( context , id , keys )
Retrieve data field from a remoteci .
> > > dcictl remoteci - get - data [ OPTIONS ]
: param string id : ID of the remote CI to show [ required ]
: param string id : Keys of the data field to retrieve [ optional ]"""
|
if keys :
keys = keys . split ( ',' )
result = remoteci . get_data ( context , id = id , keys = keys )
utils . format_output ( result , context . format , keys )
|
def remove_menu ( self , menu ) :
"""Removes a sub - menu from the context menu .
: param menu : Sub - menu to remove ."""
|
self . _menus . remove ( menu )
for action in menu . actions ( ) :
self . removeAction ( action )
|
def unlockFile ( self , fileName , byteOffset , length , dokanFileInfo ) :
"""Unlock a file .
: param fileName : name of file to unlock
: type fileName : ctypes . c _ wchar _ p
: param byteOffset : location to start unlock
: type byteOffset : ctypes . c _ longlong
: param length : number of bytes to unlock
: type length : ctypes . c _ longlong
: param dokanFileInfo : used by Dokan
: type dokanFileInfo : PDOKAN _ FILE _ INFO
: return : error code
: rtype : ctypes . c _ int"""
|
return self . operations ( 'unlockFile' , fileName , byteOffset , length )
|
def bridge_to_vlan ( br ) :
'''Returns the VLAN ID of a bridge .
Args :
br : A string - bridge name
Returns :
VLAN ID of the bridge . The VLAN ID is 0 if the bridge is not a fake
bridge . If the bridge does not exist , False is returned .
CLI Example :
. . code - block : : bash
salt ' * ' openvswitch . bridge _ to _ parent br0'''
|
cmd = 'ovs-vsctl br-to-vlan {0}' . format ( br )
result = __salt__ [ 'cmd.run_all' ] ( cmd )
if result [ 'retcode' ] != 0 :
return False
return int ( result [ 'stdout' ] )
|
def seek ( self , offset , whence = os . SEEK_SET ) :
"""Seek to position in stream , see file . seek"""
|
pos = None
if whence == os . SEEK_SET :
pos = self . offset + offset
elif whence == os . SEEK_CUR :
pos = self . tell ( ) + offset
elif whence == os . SEEK_END :
pos = self . offset + self . len + offset
else :
raise ValueError ( "invalid whence {}" . format ( whence ) )
if pos > self . offset + self . len or pos < self . offset :
raise ValueError ( "seek position beyond chunk area" )
self . parent_fd . seek ( pos , os . SEEK_SET )
|
def clean ( self ) :
'''This code prevents multiple individuals from substituting for the
same class and class teacher . It also prevents an individual from
substituting for a class in which they are a teacher .'''
|
super ( SubstituteReportingForm , self ) . clean ( )
occurrences = self . cleaned_data . get ( 'occurrences' , [ ] )
staffMember = self . cleaned_data . get ( 'staffMember' )
replacementFor = self . cleaned_data . get ( 'replacedStaffMember' , [ ] )
event = self . cleaned_data . get ( 'event' )
for occ in occurrences :
for this_sub in occ . eventstaffmember_set . all ( ) :
if this_sub . replacedStaffMember == replacementFor :
self . add_error ( 'occurrences' , ValidationError ( _ ( 'One or more classes you have selected already has a substitute teacher for that class.' ) , code = 'invalid' ) )
if event and staffMember :
if staffMember in [ x . staffMember for x in event . eventstaffmember_set . filter ( category__in = [ getConstant ( 'general__eventStaffCategoryAssistant' ) , getConstant ( 'general__eventStaffCategoryInstructor' ) ] ) ] :
self . add_error ( 'event' , ValidationError ( _ ( 'You cannot substitute teach for a class in which you were an instructor.' ) , code = 'invalid' ) )
|
def get_thread ( self , dwThreadId ) :
"""@ type dwThreadId : int
@ param dwThreadId : Global ID of the thread to look for .
@ rtype : L { Thread }
@ return : Thread object with the given global ID ."""
|
self . __initialize_snapshot ( )
if dwThreadId not in self . __threadDict :
msg = "Unknown thread ID: %d" % dwThreadId
raise KeyError ( msg )
return self . __threadDict [ dwThreadId ]
|
def get_probability_no_exceedance ( self , poes ) :
"""Compute and return the probability that in the time span for which the
rupture is defined , the rupture itself never generates a ground motion
value higher than a given level at a given site .
Such calculation is performed starting from the conditional probability
that an occurrence of the current rupture is producing a ground motion
value higher than the level of interest at the site of interest .
The actual formula used for such calculation depends on the temporal
occurrence model the rupture is associated with .
The calculation can be performed for multiple intensity measure levels
and multiple sites in a vectorized fashion .
: param poes :
2D numpy array containing conditional probabilities the the a
rupture occurrence causes a ground shaking value exceeding a
ground motion level at a site . First dimension represent sites ,
second dimension intensity measure levels . ` ` poes ` ` can be obtained
calling the : meth : ` method
< openquake . hazardlib . gsim . base . GroundShakingIntensityModel . get _ poes >"""
|
if numpy . isnan ( self . occurrence_rate ) : # nonparametric rupture
# Uses the formula
# ∑ p ( k | T ) * p ( X < x | rup ) ^ k
# where ` p ( k | T ) ` is the probability that the rupture occurs k times
# in the time span ` T ` , ` p ( X < x | rup ) ` is the probability that a
# rupture occurrence does not cause a ground motion exceedance , and
# thesummation ` ∑ ` is done over the number of occurrences ` k ` .
# ` p ( k | T ) ` is given by the attribute probs _ occur and
# ` p ( X < x | rup ) ` is computed as ` ` 1 - poes ` ` .
# Converting from 1d to 2d
if len ( poes . shape ) == 1 :
poes = numpy . reshape ( poes , ( - 1 , len ( poes ) ) )
p_kT = self . probs_occur
prob_no_exceed = numpy . array ( [ v * ( ( 1 - poes ) ** i ) for i , v in enumerate ( p_kT ) ] )
prob_no_exceed = numpy . sum ( prob_no_exceed , axis = 0 )
prob_no_exceed [ prob_no_exceed > 1. ] = 1.
# sanity check
prob_no_exceed [ poes == 0. ] = 1.
# avoid numeric issues
return prob_no_exceed
# parametric rupture
tom = self . temporal_occurrence_model
return tom . get_probability_no_exceedance ( self . occurrence_rate , poes )
|
def cpc ( self ) :
""": class : ` ~ zhmcclient . Cpc ` : The : term : ` CPC ` to which this storage group
is associated .
The returned : class : ` ~ zhmcclient . Cpc ` has only a minimal set of
properties populated ."""
|
# We do here some lazy loading .
if not self . _cpc :
cpc_uri = self . get_property ( 'cpc-uri' )
cpc_mgr = self . manager . console . manager . client . cpcs
self . _cpc = cpc_mgr . resource_object ( cpc_uri )
return self . _cpc
|
def task_add ( self , subsystem , name , pid ) :
"""Add process ( with pid ) to a cgroup
: param subsystem : the cgroup subsystem ( currently support ' memory ' , and ' cpuset ' )
: param name : name of the cgroup
: param pid : PID to add"""
|
args = { 'subsystem' : subsystem , 'name' : name , 'pid' : pid , }
self . _task_chk . check ( args )
return self . _client . json ( 'cgroup.task-add' , args )
|
def getWaveletData ( eda ) :
'''This function computes the wavelet coefficients
INPUT :
data : DataFrame , index is a list of timestamps at 8Hz , columns include EDA , filtered _ eda
OUTPUT :
wave1Second : DateFrame , index is a list of timestamps at 1Hz , columns include OneSecond _ feature1 , OneSecond _ feature2 , OneSecond _ feature3
waveHalfSecond : DateFrame , index is a list of timestamps at 2Hz , columns include HalfSecond _ feature1 , HalfSecond _ feature2'''
|
# Create wavelet dataframes
oneSecond = halfSecond = # Compute wavelets
cA_n , cD_3 , cD_2 , cD_1 = pywt . wavedec ( eda , 'Haar' , level = 3 )
# 3 = 1Hz , 2 = 2Hz , 1 = 4Hz
# Wavelet 1 second window
N = int ( len ( eda ) / sampling_rate )
coeff1 = np . max ( abs ( np . reshape ( cD_1 [ 0 : 4 * N ] , ( N , 4 ) ) ) , axis = 1 )
coeff2 = np . max ( abs ( np . reshape ( cD_2 [ 0 : 2 * N ] , ( N , 2 ) ) ) , axis = 1 )
coeff3 = abs ( cD_3 [ 0 : N ] )
wave1Second = pd . DataFrame ( { 'OneSecond_feature1' : coeff1 , 'OneSecond_feature2' : coeff2 , 'OneSecond_feature3' : coeff3 } )
wave1Second . index = oneSecond [ : len ( wave1Second ) ]
# Wavelet Half second window
N = int ( np . floor ( ( len ( data ) / 8.0 ) * 2 ) )
coeff1 = np . max ( abs ( np . reshape ( cD_1 [ 0 : 2 * N ] , ( N , 2 ) ) ) , axis = 1 )
coeff2 = abs ( cD_2 [ 0 : N ] )
waveHalfSecond = pd . DataFrame ( { 'HalfSecond_feature1' : coeff1 , 'HalfSecond_feature2' : coeff2 } )
waveHalfSecond . index = halfSecond [ : len ( waveHalfSecond ) ]
return wave1Second , waveHalfSecond
|
def direct_to_template ( request , template , extra_context = None , mimetype = None , ** kwargs ) :
"""Render a given template with any extra URL parameters in the context as
` ` { { params } } ` ` ."""
|
if extra_context is None :
extra_context = { }
dictionary = { 'params' : kwargs }
for key , value in extra_context . items ( ) :
if callable ( value ) :
dictionary [ key ] = value ( )
else :
dictionary [ key ] = value
t = loader . get_template ( template )
return HttpResponse ( t . render ( context = dictionary , request = request ) , content_type = mimetype )
|
def get ( self , * args , ** kw ) :
"""Load the configuration if necessary and forward the call to the parent ."""
|
if not self . _loaded :
self . load_config ( )
return super ( FedmsgConfig , self ) . get ( * args , ** kw )
|
def dashrepl ( value ) :
"""Replace any non - word characters with a dash ."""
|
patt = re . compile ( r'\W' , re . UNICODE )
return re . sub ( patt , '-' , value )
|
def stencil ( ** kwargs ) :
"""Applying genotype calls to multi - way alignment incidence matrix
: param alnfile : alignment incidence file ( h5 ) ,
: param gtypefile : genotype calls by GBRS ( tsv ) ,
: param grpfile : gene ID to isoform ID mapping info ( tsv )
: return : genotyped version of alignment incidence file ( h5)"""
|
alnfile = kwargs . get ( 'alnfile' )
gtypefile = kwargs . get ( 'gtypefile' )
grpfile = kwargs . get ( 'grpfile' )
if grpfile is None :
grpfile2chk = os . path . join ( DATA_DIR , 'ref.gene2transcripts.tsv' )
if os . path . exists ( grpfile2chk ) :
grpfile = grpfile2chk
else :
print >> sys . stderr , '[gbrs::stencil] A group file is *not* given. Genotype will be stenciled as is.'
# Load alignment incidence matrix ( ' alnfile ' is assumed to be in multiway transcriptome )
alnmat = emase . AlignmentPropertyMatrix ( h5file = alnfile , grpfile = grpfile )
# Load genotype calls
hid = dict ( zip ( alnmat . hname , np . arange ( alnmat . num_haplotypes ) ) )
gid = dict ( zip ( alnmat . gname , np . arange ( len ( alnmat . gname ) ) ) )
gtmask = np . zeros ( ( alnmat . num_haplotypes , alnmat . num_loci ) )
gtcall_g = dict . fromkeys ( alnmat . gname )
with open ( gtypefile ) as fh :
if grpfile is not None :
gtcall_t = dict . fromkeys ( alnmat . lname )
for curline in dropwhile ( is_comment , fh ) :
item = curline . rstrip ( ) . split ( "\t" )
g , gt = item [ : 2 ]
gtcall_g [ g ] = gt
hid2set = np . array ( [ hid [ c ] for c in gt ] )
tid2set = np . array ( alnmat . groups [ gid [ g ] ] )
gtmask [ np . meshgrid ( hid2set , tid2set ) ] = 1.0
for t in tid2set :
gtcall_t [ alnmat . lname [ t ] ] = gt
else :
for curline in dropwhile ( is_comment , fh ) :
item = curline . rstrip ( ) . split ( "\t" )
g , gt = item [ : 2 ]
gtcall_g [ g ] = gt
hid2set = np . array ( [ hid [ c ] for c in gt ] )
gtmask [ np . meshgrid ( hid2set , gid [ g ] ) ] = 1.0
alnmat . multiply ( gtmask , axis = 2 )
for h in xrange ( alnmat . num_haplotypes ) :
alnmat . data [ h ] . eliminate_zeros ( )
outfile = kwargs . get ( 'outfile' )
if outfile is None :
outfile = 'gbrs.stenciled.' + os . path . basename ( alnfile )
alnmat . save ( h5file = outfile )
|
def space_set_acl ( args ) :
"""Assign an ACL role to list of users for a workspace"""
|
acl_updates = [ { "email" : user , "accessLevel" : args . role } for user in args . users ]
r = fapi . update_workspace_acl ( args . project , args . workspace , acl_updates )
fapi . _check_response_code ( r , 200 )
errors = r . json ( ) [ 'usersNotFound' ]
if len ( errors ) :
eprint ( "Unable to assign role for unrecognized users:" )
for user in errors :
eprint ( "\t{0}" . format ( user [ 'email' ] ) )
return 1
if fcconfig . verbosity :
print ( "Successfully updated {0} role(s)" . format ( len ( acl_updates ) ) )
return 0
|
def _connect_signal ( self , index ) :
"""Create signals for building indexes ."""
|
post_save_signal = ElasticSignal ( index , 'build' )
post_save_signal . connect ( post_save , sender = index . object_type )
self . signals . append ( post_save_signal )
post_delete_signal = ElasticSignal ( index , 'remove_object' )
post_delete_signal . connect ( post_delete , sender = index . object_type )
self . signals . append ( post_delete_signal )
# Connect signals for all dependencies .
for dependency in index . get_dependencies ( ) : # Automatically convert m2m fields to dependencies .
if isinstance ( dependency , ( models . ManyToManyField , ManyToManyDescriptor ) ) :
dependency = ManyToManyDependency ( dependency )
elif not isinstance ( dependency , Dependency ) :
raise TypeError ( "Unsupported dependency type: {}" . format ( repr ( dependency ) ) )
signal = dependency . connect ( index )
self . signals . extend ( signal )
|
def get_length ( self , n1 , n2 , bond_type = BOND_SINGLE ) :
"""Return the length of a bond between n1 and n2 of type bond _ type
Arguments :
| ` ` n1 ` ` - - the atom number of the first atom in the bond
| ` ` n2 ` ` - - the atom number of the second atom the bond
Optional argument :
| ` ` bond _ type ` ` - - the type of bond [ default = BOND _ SINGLE ]
This is a safe method for querying a bond _ length . If no answer can be
found , this get _ length returns None ."""
|
dataset = self . lengths . get ( bond_type )
if dataset == None :
return None
return dataset . get ( frozenset ( [ n1 , n2 ] ) )
|
def _get_sizes_checksums ( checksums_path ) :
"""Returns { URL : ( size , checksum ) } s stored within file ."""
|
checksums = { }
for line in _read_file ( checksums_path ) . split ( '\n' ) :
if not line :
continue
# URL might have spaces inside , but size and checksum will not .
url , size , checksum = line . rsplit ( ' ' , 2 )
checksums [ url ] = ( int ( size ) , checksum )
return checksums
|
def take_snapshot ( self , entity_id , lt = None , lte = None ) :
"""Takes a snapshot of the entity as it existed after the most recent
event , optionally less than , or less than or equal to , a particular position ."""
|
snapshot = None
if self . _snapshot_strategy : # Get the latest event ( optionally until a particular position ) .
latest_event = self . event_store . get_most_recent_event ( entity_id , lt = lt , lte = lte )
# If there is something to snapshot , then look for a snapshot
# taken before or at the entity version of the latest event . Please
# note , the snapshot might have a smaller version number than
# the latest event if events occurred since the latest snapshot was taken .
if latest_event is not None :
latest_snapshot = self . _snapshot_strategy . get_snapshot ( entity_id , lt = lt , lte = lte )
latest_version = latest_event . originator_version
if latest_snapshot and latest_snapshot . originator_version == latest_version : # If up - to - date snapshot exists , there ' s nothing to do .
snapshot = latest_snapshot
else : # Otherwise recover entity state from latest snapshot .
if latest_snapshot :
initial_state = entity_from_snapshot ( latest_snapshot )
gt = latest_snapshot . originator_version
else :
initial_state = None
gt = None
# Fast - forward entity state to latest version .
entity = self . get_and_project_events ( entity_id = entity_id , gt = gt , lte = latest_version , initial_state = initial_state , )
# Take snapshot from entity .
snapshot = self . _snapshot_strategy . take_snapshot ( entity_id , entity , latest_version )
return snapshot
|
def content ( self ) :
"""Return the entire section content ."""
|
return _bfd . section_get_content ( self . bfd , self . _ptr , 0 , self . size )
|
def derivLogCdfNormal ( z ) :
"""Robust implementations of derivative of the log cdf of a standard normal .
@ see [ [ https : / / github . com / mseeger / apbsint / blob / master / src / eptools / potentials / SpecfunServices . h original implementation ] ]
in C from Matthias Seeger ."""
|
if ( abs ( z ) < ERF_CODY_LIMIT1 ) : # Phi ( z ) approx ( 1 + y R _ 3 ( y ^ 2 ) ) / 2 , y = z / sqrt ( 2)
return 2.0 * np . exp ( logPdfNormal ( z ) ) / ( 1.0 + ( z / M_SQRT2 ) * _erfRationalHelperR3 ( 0.5 * z * z ) )
elif ( z < 0.0 ) : # Phi ( z ) approx N ( z ) Q ( - z ) / ( - z ) , z < 0
return - z / _erfRationalHelper ( - z )
else :
t = np . exp ( logPdfNormal ( z ) )
return t / ( 1.0 - t * _erfRationalHelper ( z ) / z )
|
def set_geometry ( self , geom ) :
"""A convenience function to set the geometry variables .
Args :
geom : A tuple containing ( thet0 , thet , phi0 , phi , alpha , beta ) .
See the Scatterer class documentation for a description of these
angles ."""
|
( self . thet0 , self . thet , self . phi0 , self . phi , self . alpha , self . beta ) = geom
|
def getOrCreateForeignKey ( self , model_class , field_name ) :
"""Return related random object to set as ForeignKey ."""
|
# Getting related object type
# Eg : < django . db . models . fields . related . ForeignKey : test _ ForeignKey >
instance = getattr ( model_class , field_name ) . field
# Getting the model name by instance to find / create first id / pk .
# Eg : < class ' django . contrib . auth . models . User ' >
related_model = instance . related_model ( ) . __class__
# Trying to get random id from queryset .
objects = related_model . objects . all ( )
if objects . exists ( ) :
return self . randomize ( objects )
# Returning first object from tuple ` ( < User : user _ name > , False ) `
return related_model . objects . get_or_create ( pk = 1 ) [ 0 ]
|
def validator ( node , value ) :
'''Colander validator that checks whether a given value is a valid
is company number .
For our purposes , we expect a firm number that
is composed of nine or ten characters like 2028445291.
Sometimes a company number is formatted with separation marks like 0.400.378.485.
Therefore , there is also a : Func : ` actoren . validators . kbo _ preparer ` which transforms such input .
: raises colander . Invalid : if the value is valid Belgian company number .'''
|
if not re . match ( r'^[0-9]{9,10}$' , value ) :
raise colander . Invalid ( node , 'Dit is geen correct ondernemingsnummer.' )
|
def step ( h , logy = None , axes = None , ** kwargs ) :
"""Make a matplotlib step plot from a ROOT histogram .
Parameters
h : Hist
A rootpy Hist
logy : bool , optional ( default = None )
If True then clip the y range between 1E - 300 and 1E300.
If None ( the default ) then automatically determine if the axes are
log - scale and if this clipping should be performed .
axes : matplotlib Axes instance , optional ( default = None )
The axes to plot on . If None then use the global current axes .
kwargs : additional keyword arguments , optional
Additional keyword arguments are passed directly to
matplotlib ' s fill _ between function .
Returns
Returns the value from matplotlib ' s fill _ between function ."""
|
if axes is None :
axes = plt . gca ( )
if logy is None :
logy = axes . get_yscale ( ) == 'log'
_set_defaults ( h , kwargs , [ 'common' , 'line' ] )
if kwargs . get ( 'color' ) is None :
kwargs [ 'color' ] = h . GetLineColor ( 'mpl' )
y = np . array ( list ( h . y ( ) ) + [ 0. ] )
if logy :
np . clip ( y , 1E-300 , 1E300 , out = y )
return axes . step ( list ( h . xedges ( ) ) , y , where = 'post' , ** kwargs )
|
def blit_rect ( self , console : tcod . console . Console , x : int , y : int , width : int , height : int , bg_blend : int , ) -> None :
"""Blit onto a Console without scaling or rotation .
Args :
console ( Console ) : Blit destination Console .
x ( int ) : Console tile X position starting from the left at 0.
y ( int ) : Console tile Y position starting from the top at 0.
width ( int ) : Use - 1 for Image width .
height ( int ) : Use - 1 for Image height .
bg _ blend ( int ) : Background blending mode to use ."""
|
lib . TCOD_image_blit_rect ( self . image_c , _console ( console ) , x , y , width , height , bg_blend )
|
def divs ( x , y ) :
"""safe division"""
|
tmp = np . ones ( x . shape )
nonzero_y = y != 0
tmp [ nonzero_y ] = x [ nonzero_y ] / y [ nonzero_y ]
return tmp
|
def addColumn ( self , col , index = None ) :
'Insert column at given index or after all columns .'
|
if col :
if index is None :
index = len ( self . columns )
col . sheet = self
self . columns . insert ( index , col )
return col
|
def get_raw ( self ) :
""": rtype : bytearray"""
|
buff = bytearray ( )
buff += self . get_obj ( )
for i in self . list :
buff += i . get_raw ( )
return buff
|
def get ( self , sid ) :
"""Constructs a ModelBuildContext
: param sid : The unique string that identifies the resource
: returns : twilio . rest . autopilot . v1 . assistant . model _ build . ModelBuildContext
: rtype : twilio . rest . autopilot . v1 . assistant . model _ build . ModelBuildContext"""
|
return ModelBuildContext ( self . _version , assistant_sid = self . _solution [ 'assistant_sid' ] , sid = sid , )
|
def _gst_available ( ) :
"""Determine whether Gstreamer and the Python GObject bindings are
installed ."""
|
try :
import gi
except ImportError :
return False
try :
gi . require_version ( 'Gst' , '1.0' )
except ( ValueError , AttributeError ) :
return False
try :
from gi . repository import Gst
# noqa
except ImportError :
return False
return True
|
def get_file_from_iso_fp ( self , outfp , ** kwargs ) : # type : ( BinaryIO , Any ) - > None
'''A method to fetch a single file from the ISO and write it out
to the file object .
Parameters :
outfp - The file object to write data to .
blocksize - The number of bytes in each transfer .
iso _ path - The absolute ISO9660 path to lookup on the ISO ( exclusive
with rr _ path , joliet _ path , and udf _ path ) .
rr _ path - The absolute Rock Ridge path to lookup on the ISO ( exclusive
with iso _ path , joliet _ path , and udf _ path ) .
joliet _ path - The absolute Joliet path to lookup on the ISO ( exclusive
with iso _ path , rr _ path , and udf _ path ) .
udf _ path - The absolute UDF path to lookup on the ISO ( exclusive with
iso _ path , rr _ path , and joliet _ path ) .
Returns :
Nothing .'''
|
if not self . _initialized :
raise pycdlibexception . PyCdlibInvalidInput ( 'This object is not yet initialized; call either open() or new() to create an ISO' )
blocksize = 8192
joliet_path = None
iso_path = None
rr_path = None
udf_path = None
num_paths = 0
for key in kwargs :
if key == 'blocksize' :
blocksize = kwargs [ key ]
elif key == 'iso_path' and kwargs [ key ] is not None :
iso_path = utils . normpath ( kwargs [ key ] )
num_paths += 1
elif key == 'rr_path' and kwargs [ key ] is not None :
rr_path = utils . normpath ( kwargs [ key ] )
num_paths += 1
elif key == 'joliet_path' and kwargs [ key ] is not None :
joliet_path = utils . normpath ( kwargs [ key ] )
num_paths += 1
elif key == 'udf_path' and kwargs [ key ] is not None :
udf_path = utils . normpath ( kwargs [ key ] )
num_paths += 1
else :
raise pycdlibexception . PyCdlibInvalidInput ( 'Unknown keyword %s' % ( key ) )
if num_paths != 1 :
raise pycdlibexception . PyCdlibInvalidInput ( "Exactly one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path' must be passed" )
if udf_path is not None :
self . _udf_get_file_from_iso_fp ( outfp , blocksize , udf_path )
else :
self . _get_file_from_iso_fp ( outfp , blocksize , iso_path , rr_path , joliet_path )
|
def clean_data ( freqs , data , chunk , avg_bin ) :
"""Extract time - varying ( wandering ) lines from strain data .
Parameters
freqs : list
List containing the frequencies of the wandering lines .
data : pycbc . types . TimeSeries
Strain data to extract the wandering lines from .
chunk : float
Duration of the chunks the data will be divided into to account
for the time variation of the wandering lines . Should be smaller
than data . duration , and allow for at least a few chunks .
avg _ bin : float
Duration of the bins each chunk will be divided into for averaging
the inner product when measuring the parameters of the line . Should
be smaller than chunk .
Returns
data : pycbc . types . TimeSeries
The strain data with the wandering lines removed ."""
|
if avg_bin >= chunk :
raise ValueError ( 'The bin size for averaging the inner product ' 'must be less than the chunk size.' )
if chunk >= data . duration :
raise ValueError ( 'The chunk size must be less than the ' 'data duration.' )
steps = numpy . arange ( 0 , int ( data . duration / chunk ) - 0.5 , 0.5 )
seglen = chunk * data . sample_rate
tref = float ( data . start_time )
for freq in freqs :
for step in steps :
start , end = int ( step * seglen ) , int ( ( step + 1 ) * seglen )
chunk_line = matching_line ( freq , data [ start : end ] , tref , bin_size = avg_bin )
# Apply hann window on sides of chunk _ line to smooth boundaries
# and avoid discontinuities
hann_window = numpy . hanning ( len ( chunk_line ) )
apply_hann = TimeSeries ( numpy . ones ( len ( chunk_line ) ) , delta_t = chunk_line . delta_t , epoch = chunk_line . start_time )
if step == 0 :
apply_hann . data [ len ( hann_window ) / 2 : ] *= hann_window [ len ( hann_window ) / 2 : ]
elif step == steps [ - 1 ] :
apply_hann . data [ : len ( hann_window ) / 2 ] *= hann_window [ : len ( hann_window ) / 2 ]
else :
apply_hann . data *= hann_window
chunk_line . data *= apply_hann . data
data . data [ start : end ] -= chunk_line . data . real
return data
|
def bind ( cls ) :
"""Bind the buttons to adapter ' s event handler ."""
|
super ( cls , cls ) . bind ( )
cls . search_btn_el . bind ( "click" , cls . start )
cls . input_el . bind ( "keypress" , func_on_enter ( cls . start ) )
|
def _update_record ( self , identifier = None , rtype = None , name = None , content = None ) :
"""Update a record from the hosted zone ."""
|
return self . _change_record_sets ( 'UPSERT' , rtype , name , content )
|
def write_segment ( buff , segment , ver , ver_range , eci = False ) :
"""Writes a segment .
: param buff : The byte buffer .
: param _ Segment segment : The segment to serialize .
: param ver : ` ` None ` ` if a QR Code is written , " M1 " , " M2 " , " M3 " , or " M4 " if a
Micro QR Code is written .
: param ver _ range : " M1 " , " M2 " , " M3 " , or " M4 " if a Micro QR Code is written ,
otherwise a constant representing a range of QR Code versions ."""
|
mode = segment . mode
append_bits = buff . append_bits
# Write ECI header if requested
if eci and mode == consts . MODE_BYTE and segment . encoding != consts . DEFAULT_BYTE_ENCODING :
append_bits ( consts . MODE_ECI , 4 )
append_bits ( get_eci_assignment_number ( segment . encoding ) , 8 )
if ver is None : # QR Code
append_bits ( mode , 4 )
elif ver > consts . VERSION_M1 : # Micro QR Code ( M1 has no mode indicator )
append_bits ( consts . MODE_TO_MICRO_MODE_MAPPING [ mode ] , ver + 3 )
# Character count indicator
append_bits ( segment . char_count , consts . CHAR_COUNT_INDICATOR_LENGTH [ mode ] [ ver_range ] )
buff . extend ( segment . bits )
|
def update_hit_count_ajax ( request , * args , ** kwargs ) :
"""Deprecated in 1.2 . Use hitcount . views . HitCountJSONView instead ."""
|
warnings . warn ( "hitcount.views.update_hit_count_ajax is deprecated. " "Use hitcount.views.HitCountJSONView instead." , RemovedInHitCount13Warning )
view = HitCountJSONView . as_view ( )
return view ( request , * args , ** kwargs )
|
def export_obo ( path_to_file , connection = None ) :
"""export database to obo file
: param path _ to _ file : path to export file
: param connection : connection string ( optional )
: return :"""
|
db = DbManager ( connection )
db . export_obo ( path_to_export_file = path_to_file )
db . session . close ( )
|
def httpretty_callback ( request , uri , headers ) :
"""httpretty request handler .
converts a call intercepted by httpretty to
the stack - in - a - box infrastructure
: param request : request object
: param uri : the uri of the request
: param headers : headers for the response
: returns : tuple - ( int , dict , string ) containing :
int - the http response status code
dict - the headers for the http response
string - http string response"""
|
method = request . method
response_headers = CaseInsensitiveDict ( )
response_headers . update ( headers )
request_headers = CaseInsensitiveDict ( )
request_headers . update ( request . headers )
request . headers = request_headers
return StackInABox . call_into ( method , request , uri , response_headers )
|
def is_initialised ( self ) :
"""Check whether the simulation has been initialised .
Args :
None
Returns :
None"""
|
if not self . lattice :
raise AttributeError ( 'Running a simulation needs the lattice to be initialised' )
if not self . atoms :
raise AttributeError ( 'Running a simulation needs the atoms to be initialised' )
if not self . number_of_jumps and not self . for_time :
raise AttributeError ( 'Running a simulation needs number_of_jumps or for_time to be set' )
|
async def _throttled_request ( self , request ) :
'''Process a single request , respecting the concurrency limit .'''
|
disconnect = False
try :
timeout = self . processing_timeout
async with timeout_after ( timeout ) :
async with self . _incoming_concurrency :
if self . is_closing ( ) :
return
if self . _cost_fraction :
await sleep ( self . _cost_fraction * self . cost_sleep )
result = await self . handle_request ( request )
except ( ProtocolError , RPCError ) as e :
result = e
except TaskTimeout :
self . logger . info ( f'incoming request {request} timed out after {timeout} secs' )
result = RPCError ( JSONRPC . SERVER_BUSY , 'server busy - request timed out' )
except ReplyAndDisconnect as e :
result = e . result
disconnect = True
except ExcessiveSessionCostError :
result = RPCError ( JSONRPC . EXCESSIVE_RESOURCE_USAGE , 'excessive resource usage' )
disconnect = True
except CancelledError :
raise
except Exception :
self . logger . exception ( f'exception handling {request}' )
result = RPCError ( JSONRPC . INTERNAL_ERROR , 'internal server error' )
if isinstance ( request , Request ) :
message = request . send_result ( result )
if message :
await self . _send_message ( message )
if isinstance ( result , Exception ) :
self . _bump_errors ( result )
if disconnect :
await self . close ( )
|
def main ( self , * args , ** kwargs ) :
"""Catch all exceptions ."""
|
try :
result = super ( ) . main ( * args , ** kwargs )
return result
except Exception :
if HAS_SENTRY :
self . _handle_sentry ( )
if not ( sys . stdin . isatty ( ) and sys . stdout . isatty ( ) ) :
raise
self . _handle_github ( )
|
def _FillEventSourceHeap ( self , storage_writer , event_source_heap , start_with_first = False ) :
"""Fills the event source heap with the available written event sources .
Args :
storage _ writer ( StorageWriter ) : storage writer for a session storage .
event _ source _ heap ( _ EventSourceHeap ) : event source heap .
start _ with _ first ( Optional [ bool ] ) : True if the function should start
with the first written event source ."""
|
if self . _processing_profiler :
self . _processing_profiler . StartTiming ( 'fill_event_source_heap' )
if self . _processing_profiler :
self . _processing_profiler . StartTiming ( 'get_event_source' )
if start_with_first :
event_source = storage_writer . GetFirstWrittenEventSource ( )
else :
event_source = storage_writer . GetNextWrittenEventSource ( )
if self . _processing_profiler :
self . _processing_profiler . StopTiming ( 'get_event_source' )
while event_source :
event_source_heap . PushEventSource ( event_source )
if event_source_heap . IsFull ( ) :
break
if self . _processing_profiler :
self . _processing_profiler . StartTiming ( 'get_event_source' )
event_source = storage_writer . GetNextWrittenEventSource ( )
if self . _processing_profiler :
self . _processing_profiler . StopTiming ( 'get_event_source' )
if self . _processing_profiler :
self . _processing_profiler . StopTiming ( 'fill_event_source_heap' )
|
def add_binding ( self , node , value , report_redef = True ) :
"""Called when a binding is altered .
- ` node ` is the statement responsible for the change
- ` value ` is the optional new value , a Binding instance , associated
with the binding ; if None , the binding is deleted if it exists .
- if ` report _ redef ` is True ( default ) , rebinding while unused will be
reported ."""
|
redefinedWhileUnused = False
if not isinstance ( self . scope , ClassScope ) :
for scope in self . scope_stack [ : : - 1 ] :
existing = scope . get ( value . name )
if ( isinstance ( existing , Importation ) and not existing . used and ( not isinstance ( value , Importation ) or value . fullName == existing . fullName ) and report_redef and not self . different_forks ( node , existing . source ) ) :
redefinedWhileUnused = True
self . report ( messages . RedefinedWhileUnused , node , value . name , existing . source )
existing = self . scope . get ( value . name )
if not redefinedWhileUnused and self . has_parent ( value . source , ast . ListComp ) :
if ( existing and report_redef and not self . has_parent ( existing . source , ( ast . For , ast . ListComp ) ) and not self . different_forks ( node , existing . source ) ) :
self . report ( messages . RedefinedInListComp , node , value . name , existing . source )
if ( isinstance ( existing , Definition ) and not existing . used and not self . different_forks ( node , existing . source ) ) :
self . report ( messages . RedefinedWhileUnused , node , value . name , existing . source )
else :
self . scope [ value . name ] = value
|
def ones_comp_sum16 ( num1 : int , num2 : int ) -> int :
"""Calculates the 1 ' s complement sum for 16 - bit numbers .
Args :
num1 : 16 - bit number .
num2 : 16 - bit number .
Returns :
The calculated result ."""
|
carry = 1 << 16
result = num1 + num2
return result if result < carry else result + 1 - carry
|
def get_indicator ( self , resource ) :
"""Return the modification time and size of a ` Resource ` ."""
|
path = resource . real_path
# on dos , mtime does not change for a folder when files are added
if os . name != 'posix' and os . path . isdir ( path ) :
return ( os . path . getmtime ( path ) , len ( os . listdir ( path ) ) , os . path . getsize ( path ) )
return ( os . path . getmtime ( path ) , os . path . getsize ( path ) )
|
def _generate_squashed_layer_path_id ( self ) :
"""This function generates the id used to name the directory to
store the squashed layer content in the archive .
This mimics what Docker does here : https : / / github . com / docker / docker / blob / v1.10.0 - rc1 / image / v1 / imagev1 . go # L42
To make it simpler we do reuse old image metadata and
modify it to what it should look which means to be exact
as https : / / github . com / docker / docker / blob / v1.10.0 - rc1 / image / v1 / imagev1 . go # L64"""
|
# Using OrderedDict , because order of JSON elements is important
v1_metadata = OrderedDict ( self . old_image_config )
# Update image creation date
v1_metadata [ 'created' ] = self . date
# Remove unnecessary elements
# Do not fail if key is not found
for key in 'history' , 'rootfs' , 'container' :
v1_metadata . pop ( key , None )
# Docker internally changes the order of keys between
# exported metadata ( why oh why ? ! ) . We need to add ' os '
# element after ' layer _ id '
operating_system = v1_metadata . pop ( 'os' , None )
# The ' layer _ id ' element is the chain _ id of the
# squashed layer
v1_metadata [ 'layer_id' ] = "sha256:%s" % self . chain_ids [ - 1 ]
# Add back ' os ' element
if operating_system :
v1_metadata [ 'os' ] = operating_system
# The ' parent ' element is the name of the directory ( inside the
# exported tar archive ) of the last layer that we move
# ( layer below squashed layer )
if self . layer_paths_to_move :
if self . layer_paths_to_squash :
parent = self . layer_paths_to_move [ - 1 ]
else :
parent = self . layer_paths_to_move [ 0 ]
v1_metadata [ 'parent' ] = "sha256:%s" % parent
# The ' Image ' element is the id of the layer from which we squash
if self . squash_id : # Update image id , should be one layer below squashed layer
v1_metadata [ 'config' ] [ 'Image' ] = self . squash_id
else :
v1_metadata [ 'config' ] [ 'Image' ] = ""
# Get the sha256sum of the JSON exported metadata ,
# we do not care about the metadata anymore
sha = self . _dump_json ( v1_metadata ) [ 1 ]
return sha
|
def _process_deprecated ( attrib , deprecated_attrib , kwargs ) :
"""Processes optional deprecate arguments"""
|
if deprecated_attrib not in DEPRECATIONS :
raise ValueError ( '{0} not included in deprecations list' . format ( deprecated_attrib ) )
if deprecated_attrib in kwargs :
warnings . warn ( "'{0}' is DEPRECATED use '{1}' instead" . format ( deprecated_attrib , DEPRECATIONS [ deprecated_attrib ] ) , DeprecationWarning )
if attrib :
raise ValueError ( "You can't use both '{0}' and '{1}'. " "Please only use one of them" . format ( deprecated_attrib , DEPRECATIONS [ deprecated_attrib ] ) )
else :
return kwargs . pop ( deprecated_attrib )
return attrib
|
def WriteToFD ( self , fd ) :
"""Write out the updated configuration to the fd ."""
|
if self . writeback :
self . writeback . SaveDataToFD ( self . writeback_data , fd )
else :
raise RuntimeError ( "Attempting to write a configuration without a " "writeback location." )
|
def _html_to_img_tuples ( html : str , format : str = 'jpg' , n_images : int = 10 ) -> list :
"Parse the google images html to img tuples containining ` ( fname , url ) `"
|
bs = BeautifulSoup ( html , 'html.parser' )
img_tags = bs . find_all ( 'div' , { 'class' : 'rg_meta' } )
metadata_dicts = ( json . loads ( e . text ) for e in img_tags )
img_tuples = ( ( _img_fname ( d [ 'ou' ] ) , d [ 'ou' ] ) for d in metadata_dicts if d [ 'ity' ] == format )
return list ( itertools . islice ( img_tuples , n_images ) )
|
def _load_webgl_backend ( ipython ) :
"""Load the webgl backend for the IPython notebook"""
|
from . . import app
app_instance = app . use_app ( "ipynb_webgl" )
if app_instance . backend_name == "ipynb_webgl" :
ipython . write ( "Vispy IPython module has loaded successfully" )
else : # TODO : Improve this error message
ipython . write_err ( "Unable to load webgl backend of Vispy" )
|
def posterior_predictive_to_xarray ( self ) :
"""Convert posterior _ predictive samples to xarray ."""
|
data = self . posterior_predictive
if not isinstance ( data , dict ) :
raise TypeError ( "DictConverter.posterior_predictive is not a dictionary" )
return dict_to_dataset ( data , library = None , coords = self . coords , dims = self . dims )
|
def public_key_to_address ( public_key : Union [ PublicKey , bytes ] ) -> ChecksumAddress :
"""Converts a public key to an Ethereum address ."""
|
if isinstance ( public_key , PublicKey ) :
public_key = public_key . format ( compressed = False )
assert isinstance ( public_key , bytes )
return to_checksum_address ( sha3 ( public_key [ 1 : ] ) [ - 20 : ] )
|
async def parse_get_schema_response ( get_schema_response : str ) -> ( str , str ) :
"""Parse a GET _ SCHEMA response to get Schema in the format compatible with Anoncreds API
: param get _ schema _ response : response of GET _ SCHEMA request .
: return : Schema Id and Schema json .
id : identifier of schema
attrNames : array of attribute name strings
name : Schema ' s name string
version : Schema ' s version string
ver : Version of the Schema json"""
|
logger = logging . getLogger ( __name__ )
logger . debug ( "parse_get_schema_response: >>> get_schema_response: %r" , get_schema_response )
if not hasattr ( parse_get_schema_response , "cb" ) :
logger . debug ( "parse_get_schema_response: Creating callback" )
parse_get_schema_response . cb = create_cb ( CFUNCTYPE ( None , c_int32 , c_int32 , c_char_p , c_char_p ) )
c_get_schema_response = c_char_p ( get_schema_response . encode ( 'utf-8' ) )
( schema_id , schema_json ) = await do_call ( 'indy_parse_get_schema_response' , c_get_schema_response , parse_get_schema_response . cb )
res = ( schema_id . decode ( ) , schema_json . decode ( ) )
logger . debug ( "parse_get_schema_response: <<< res: %r" , res )
return res
|
def list_bucket ( self , bucket ) :
"""Create several files and paginate through them .
Production apps should set page _ size to a practical value .
Args :
bucket : bucket ."""
|
self . response . write ( 'Listbucket result:\n' )
page_size = 1
stats = gcs . listbucket ( bucket + '/foo' , max_keys = page_size )
while True :
count = 0
for stat in stats :
count += 1
self . response . write ( repr ( stat ) )
self . response . write ( '\n' )
if count != page_size or count == 0 :
break
stats = gcs . listbucket ( bucket + '/foo' , max_keys = page_size , marker = stat . filename )
|
def _fit_full ( self , pairs , y ) :
"""Learn full metric using MMC .
Parameters
X : ( n x d ) data matrix
each row corresponds to a single instance
constraints : 4 - tuple of arrays
( a , b , c , d ) indices into X , with ( a , b ) specifying similar and ( c , d )
dissimilar pairs"""
|
num_dim = pairs . shape [ 2 ]
error1 = error2 = 1e10
eps = 0.01
# error - bound of iterative projection on C1 and C2
A = self . A_
pos_pairs , neg_pairs = pairs [ y == 1 ] , pairs [ y == - 1 ]
# Create weight vector from similar samples
pos_diff = pos_pairs [ : , 0 , : ] - pos_pairs [ : , 1 , : ]
w = np . einsum ( 'ij,ik->jk' , pos_diff , pos_diff ) . ravel ( )
# ` w ` is the sum of all outer products of the rows in ` pos _ diff ` .
# The above ` einsum ` is equivalent to the much more inefficient :
# w = np . apply _ along _ axis (
# lambda x : np . outer ( x , x ) . ravel ( ) ,
# X [ a ] - X [ b ]
# ) . sum ( axis = 0)
t = w . dot ( A . ravel ( ) ) / 100.0
w_norm = np . linalg . norm ( w )
w1 = w / w_norm
# make ` w ` a unit vector
t1 = t / w_norm
# distance from origin to ` w ^ T * x = t ` plane
cycle = 1
alpha = 0.1
# initial step size along gradient
grad1 = self . _fS1 ( pos_pairs , A )
# gradient of similarity
# constraint function
grad2 = self . _fD1 ( neg_pairs , A )
# gradient of dissimilarity
# constraint function
M = self . _grad_projection ( grad1 , grad2 )
# gradient of fD1 orthogonal to fS1
A_old = A . copy ( )
for cycle in xrange ( self . max_iter ) : # projection of constraints C1 and C2
satisfy = False
for it in xrange ( self . max_proj ) : # First constraint :
# f ( A ) = \ sum _ { i , j \ in S } d _ ij ' A d _ ij < = t ( 1)
# (1 ) can be rewritten as a linear constraint : w ^ T x = t ,
# where x is the unrolled matrix of A ,
# w is also an unrolled matrix of W where
# W _ { kl } = \ sum _ { i , j \ in S } d _ ij ^ k * d _ ij ^ l
x0 = A . ravel ( )
if w . dot ( x0 ) <= t :
x = x0
else :
x = x0 + ( t1 - w1 . dot ( x0 ) ) * w1
A [ : ] = x . reshape ( num_dim , num_dim )
# Second constraint :
# PSD constraint A > = 0
# project A onto domain A > 0
l , V = np . linalg . eigh ( ( A + A . T ) / 2 )
A [ : ] = np . dot ( V * np . maximum ( 0 , l [ None , : ] ) , V . T )
fDC2 = w . dot ( A . ravel ( ) )
error2 = ( fDC2 - t ) / t
if error2 < eps :
satisfy = True
break
# third constraint : gradient ascent
# max : g ( A ) > = 1
# here we suppose g ( A ) = fD ( A ) = \ sum _ { I , J \ in D } sqrt ( d _ ij ' A d _ ij )
obj_previous = self . _fD ( neg_pairs , A_old )
# g ( A _ old )
obj = self . _fD ( neg_pairs , A )
# g ( A )
if satisfy and ( obj > obj_previous or cycle == 0 ) : # If projection of 1 and 2 is successful , and such projection
# improves objective function , slightly increase learning rate
# and update from the current A .
alpha *= 1.05
A_old [ : ] = A
grad2 = self . _fS1 ( pos_pairs , A )
grad1 = self . _fD1 ( neg_pairs , A )
M = self . _grad_projection ( grad1 , grad2 )
A += alpha * M
else : # If projection of 1 and 2 failed , or obj < = obj _ previous due
# to projection of 1 and 2 , shrink learning rate and re - update
# from the previous A .
alpha /= 2
A [ : ] = A_old + alpha * M
delta = np . linalg . norm ( alpha * M ) / np . linalg . norm ( A_old )
if delta < self . convergence_threshold :
break
if self . verbose :
print ( 'mmc iter: %d, conv = %f, projections = %d' % ( cycle , delta , it + 1 ) )
if delta > self . convergence_threshold :
self . converged_ = False
if self . verbose :
print ( 'mmc did not converge, conv = %f' % ( delta , ) )
else :
self . converged_ = True
if self . verbose :
print ( 'mmc converged at iter %d, conv = %f' % ( cycle , delta ) )
self . A_ [ : ] = A_old
self . n_iter_ = cycle
self . transformer_ = transformer_from_metric ( self . A_ )
return self
|
def igft ( self , s_hat ) :
r"""Compute the inverse graph Fourier transform .
The inverse graph Fourier transform of a Fourier domain signal
: math : ` \ hat { s } ` is defined as
. . math : : s = U \ hat { s } ,
where : math : ` U ` is the Fourier basis : attr : ` U ` .
Parameters
s _ hat : array _ like
Graph signal in the Fourier domain .
Returns
s : ndarray
Representation of s _ hat in the vertex domain .
Examples
> > > G = graphs . Logo ( )
> > > G . compute _ fourier _ basis ( )
> > > s _ hat = np . random . normal ( size = ( G . N , 5 , 1 ) )
> > > s = G . igft ( s _ hat )
> > > s _ hat _ star = G . gft ( s )
> > > np . all ( ( s _ hat - s _ hat _ star ) < 1e - 10)
True"""
|
s_hat = self . _check_signal ( s_hat )
return np . tensordot ( self . U , s_hat , ( [ 1 ] , [ 0 ] ) )
|
def build_specfile_sections ( spec ) :
"""Builds the sections of a rpm specfile ."""
|
str = ""
mandatory_sections = { 'DESCRIPTION' : '\n%%description\n%s\n\n' , }
str = str + SimpleTagCompiler ( mandatory_sections ) . compile ( spec )
optional_sections = { 'DESCRIPTION_' : '%%description -l %s\n%s\n\n' , 'CHANGELOG' : '%%changelog\n%s\n\n' , 'X_RPM_PREINSTALL' : '%%pre\n%s\n\n' , 'X_RPM_POSTINSTALL' : '%%post\n%s\n\n' , 'X_RPM_PREUNINSTALL' : '%%preun\n%s\n\n' , 'X_RPM_POSTUNINSTALL' : '%%postun\n%s\n\n' , 'X_RPM_VERIFY' : '%%verify\n%s\n\n' , # These are for internal use but could possibly be overridden
'X_RPM_PREP' : '%%prep\n%s\n\n' , 'X_RPM_BUILD' : '%%build\n%s\n\n' , 'X_RPM_INSTALL' : '%%install\n%s\n\n' , 'X_RPM_CLEAN' : '%%clean\n%s\n\n' , }
# Default prep , build , install and clean rules
# TODO : optimize those build steps , to not compile the project a second time
if 'X_RPM_PREP' not in spec :
spec [ 'X_RPM_PREP' ] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"' + '\n%setup -q'
if 'X_RPM_BUILD' not in spec :
spec [ 'X_RPM_BUILD' ] = '[ ! -e "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && mkdir "$RPM_BUILD_ROOT"'
if 'X_RPM_INSTALL' not in spec :
spec [ 'X_RPM_INSTALL' ] = 'scons --install-sandbox="$RPM_BUILD_ROOT" "$RPM_BUILD_ROOT"'
if 'X_RPM_CLEAN' not in spec :
spec [ 'X_RPM_CLEAN' ] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"'
str = str + SimpleTagCompiler ( optional_sections , mandatory = 0 ) . compile ( spec )
return str
|
def save_trailer ( self , tocpos ) :
"""Save the trailer to disk .
CArchives can be opened from the end - the trailer points
back to the start ."""
|
totallen = tocpos + self . toclen + self . TRLLEN
pyvers = sys . version_info [ 0 ] * 10 + sys . version_info [ 1 ]
trl = struct . pack ( self . TRLSTRUCT , self . MAGIC , totallen , tocpos , self . toclen , pyvers )
self . lib . write ( trl )
|
def undo ( self ) :
"""Undo the last command by adding its inverse action to the stack .
This method automatically takes care of applying the correct
inverse action when it is called consecutively ( ie . without a
calling ` ` push ` ` in between ) .
The ` ` qtesigSavedState ` ` signal is triggered whenever enough undo
operations have been performed to put the document back into the
last saved state .
. . warning : The ` ` qtesigSaveState ` ` is triggered whenever the
logic of the undo operations * * should * * have led back to
that state , but since the ` ` UndoStack ` ` only stacks and
` ` QtmacsUndoCommand ` ` objects it may well be the document is
* * not * * in the last saved state , eg . because not all
modifications were protected by undo objects , or because the
` ` QtmacsUndoCommand ` ` objects have a bug . It is therefore
advisable to check in the calling class if the content is
indeed identical by comparing it with a temporarily stored
copy .
| Args |
* * * None * *
| Signals |
* ` ` qtesigSavedState ` ` : the document is the last saved state .
| Returns |
* * * None * *
| Raises |
* * * None * *"""
|
# If it is the first call to this method after a ` ` push ` ` then
# reset ` ` qteIndex ` ` to the last element , otherwise just
# decrease it .
if not self . _wasUndo :
self . _qteIndex = len ( self . _qteStack )
else :
self . _qteIndex -= 1
# Flag that the last action was an ` undo ` operation .
self . _wasUndo = True
if self . _qteIndex <= 0 :
return
# Make a copy of the command and push it to the stack .
undoObj = self . _qteStack [ self . _qteIndex - 1 ]
undoObj = QtmacsUndoCommand ( undoObj )
self . _push ( undoObj )
# If the just pushed undo object restored the last saved state
# then trigger the ` ` qtesigSavedState ` ` signal and set the
# _ qteLastSaveUndoIndex variable again . This is necessary
# because an undo command will not * remove * any elements from
# the undo stack but * add * the inverse operation to the
# stack . Therefore , when enough undo operations have been
# performed to reach the last saved state that means that the
# last addition to the stack is now implicitly the new last
# save point .
if ( self . _qteIndex - 1 ) == self . _qteLastSavedUndoIndex :
self . qtesigSavedState . emit ( QtmacsMessage ( ) )
self . saveState ( )
|
def lengths ( self , sr , polylines , lengthUnit , calculationType ) :
"""The lengths operation is performed on a geometry service resource .
This operation calculates the 2D Euclidean or geodesic lengths of each
polyline specified in the input array .
Inputs :
polylines - array of polylines whose lengths are to be computed
( structured as JSON geometry objects returned by
the ArcGIS REST API ) .
sr - spatial reference of the input geometries WKID .
lengthUnit - unit in which lengths of polylines will be calculated .
calculationType - defines the length calculation for the geometry ."""
|
allowedCalcTypes = [ 'planar' , 'geodesic' , 'preserveShape' ]
if calculationType not in allowedCalcTypes :
raise AttributeError ( "Invalid calculation Type" )
url = self . _url + "/lengths"
params = { "f" : "json" , "sr" : sr , "polylines" : self . __geomToStringArray ( geometries = polylines , returnType = "list" ) , "lengthUnit" : lengthUnit , "calculationType" : calculationType }
return self . _get ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
|
def parse_exposure ( self , node ) :
"""Parses < Exposure >
@ param node : Node containing the < Exposure > element
@ type node : xml . etree . Element
@ raise ParseError : Raised when the exposure name is not
being defined in the context of a component type ."""
|
if self . current_component_type == None :
self . raise_error ( 'Exposures must be defined in a component type' )
try :
name = node . lattrib [ 'name' ]
except :
self . raise_error ( '<Exposure> must specify a name' )
try :
dimension = node . lattrib [ 'dimension' ]
except :
self . raise_error ( "Exposure '{0}' must specify a dimension" , name )
description = node . lattrib . get ( 'description' , '' )
self . current_component_type . add_exposure ( Exposure ( name , dimension , description ) )
|
def new ( self , name , summary = None , description = None , protected = None , restricted = None , download_restricted = None , contains_phi = None , tags = None , properties = None , bill_to = None , ** kwargs ) :
""": param name : The name of the project
: type name : string
: param summary : If provided , a short summary of what the project contains
: type summary : string
: param description : If provided , the new project description
: type name : string
: param protected : If provided , whether the project should be protected
: type protected : boolean
: param restricted : If provided , whether the project should be restricted
: type restricted : boolean
: param download _ restricted : If provided , whether external downloads should be restricted
: type download _ restricted : boolean
: param contains _ phi : If provided , whether the project should be marked as containing protected health information ( PHI )
: type contains _ phi : boolean
: param tags : If provided , tags to associate with the project
: type tags : list of strings
: param properties : If provided , properties to associate with the project
: type properties : dict
: param bill _ to : If provided , ID of the entity to which any costs associated with this project will be billed ; must be the ID of the requesting user or an org of which the requesting user is a member with allowBillableActivities permission
: type bill _ to : string
Creates a new project . Initially only the user performing this action
will be in the permissions / member list , with ADMINISTER access .
See the API documentation for the ` / project / new
< https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Projects # API - method % 3A - % 2Fproject % 2Fnew > ` _
method for more info ."""
|
input_hash = { }
input_hash [ "name" ] = name
if summary is not None :
input_hash [ "summary" ] = summary
if description is not None :
input_hash [ "description" ] = description
if protected is not None :
input_hash [ "protected" ] = protected
if restricted is not None :
input_hash [ "restricted" ] = restricted
if download_restricted is not None :
input_hash [ "downloadRestricted" ] = download_restricted
if contains_phi is not None :
input_hash [ "containsPHI" ] = contains_phi
if bill_to is not None :
input_hash [ "billTo" ] = bill_to
if tags is not None :
input_hash [ "tags" ] = tags
if properties is not None :
input_hash [ "properties" ] = properties
self . set_id ( dxpy . api . project_new ( input_hash , ** kwargs ) [ "id" ] )
self . _desc = { }
return self . _dxid
|
def get ( self , request ) :
"""Forwards to CAS login URL or verifies CAS ticket
: param request :
: return :"""
|
next_page = request . GET . get ( 'next' )
required = request . GET . get ( 'required' , False )
service_url = get_service_url ( request , next_page )
client = get_cas_client ( service_url = service_url , request = request )
if not next_page and settings . CAS_STORE_NEXT and 'CASNEXT' in request . session :
next_page = request . session [ 'CASNEXT' ]
del request . session [ 'CASNEXT' ]
if not next_page :
next_page = get_redirect_url ( request )
if request . user . is_authenticated :
if settings . CAS_LOGGED_MSG is not None :
message = settings . CAS_LOGGED_MSG % request . user . get_username ( )
messages . success ( request , message )
return self . successful_login ( request = request , next_page = next_page )
ticket = request . GET . get ( 'ticket' )
if ticket :
user = authenticate ( ticket = ticket , service = service_url , request = request )
pgtiou = request . session . get ( "pgtiou" )
if user is not None :
auth_login ( request , user )
if not request . session . exists ( request . session . session_key ) :
request . session . create ( )
SessionTicket . objects . create ( session_key = request . session . session_key , ticket = ticket )
if pgtiou and settings . CAS_PROXY_CALLBACK : # Delete old PGT
ProxyGrantingTicket . objects . filter ( user = user , session_key = request . session . session_key ) . delete ( )
# Set new PGT ticket
try :
pgt = ProxyGrantingTicket . objects . get ( pgtiou = pgtiou )
pgt . user = user
pgt . session_key = request . session . session_key
pgt . save ( )
except ProxyGrantingTicket . DoesNotExist :
pass
if settings . CAS_LOGIN_MSG is not None :
name = user . get_username ( )
message = settings . CAS_LOGIN_MSG % name
messages . success ( request , message )
return self . successful_login ( request = request , next_page = next_page )
elif settings . CAS_RETRY_LOGIN or required :
return HttpResponseRedirect ( client . get_login_url ( ) )
else :
raise PermissionDenied ( _ ( 'Login failed.' ) )
else :
if settings . CAS_STORE_NEXT :
request . session [ 'CASNEXT' ] = next_page
return HttpResponseRedirect ( client . get_login_url ( ) )
|
def choose_boundary ( ) :
"""Generate a multipart boundry .
: returns : A boundary string"""
|
global BOUNDARY_PREFIX
if BOUNDARY_PREFIX is None :
BOUNDARY_PREFIX = "urlfetch"
try :
uid = repr ( os . getuid ( ) )
BOUNDARY_PREFIX += "." + uid
except AttributeError :
pass
try :
pid = repr ( os . getpid ( ) )
BOUNDARY_PREFIX += "." + pid
except AttributeError :
pass
return "%s.%s" % ( BOUNDARY_PREFIX , uuid . uuid4 ( ) . hex )
|
def get_argument ( # noqa : F811
self , name : str , default : Union [ None , str , _ArgDefaultMarker ] = _ARG_DEFAULT , strip : bool = True , ) -> Optional [ str ] :
"""Returns the value of the argument with the given name .
If default is not provided , the argument is considered to be
required , and we raise a ` MissingArgumentError ` if it is missing .
If the argument appears in the request more than once , we return the
last value .
This method searches both the query and body arguments ."""
|
return self . _get_argument ( name , default , self . request . arguments , strip )
|
def add_collection ( self , property_name , use_context = True ) :
"""Add collection property to schema
: param property _ name : str , property name
: param property _ name : str , property name
: return : shiftschema . property . CollectionProperty"""
|
if self . has_property ( property_name ) :
err = 'Property "{}" already exists'
raise PropertyExists ( err . format ( property_name ) )
prop = CollectionProperty ( use_context = bool ( use_context ) )
self . collections [ property_name ] = prop
return prop
|
def update_firmware ( self , hardware_id , ipmi = True , raid_controller = True , bios = True , hard_drive = True ) :
"""Update hardware firmware .
This will cause the server to be unavailable for ~ 20 minutes .
: param int hardware _ id : The ID of the hardware to have its firmware
updated .
: param bool ipmi : Update the ipmi firmware .
: param bool raid _ controller : Update the raid controller firmware .
: param bool bios : Update the bios firmware .
: param bool hard _ drive : Update the hard drive firmware .
Example : :
# Check the servers active transactions to see progress
result = mgr . update _ firmware ( hardware _ id = 1234)"""
|
return self . hardware . createFirmwareUpdateTransaction ( bool ( ipmi ) , bool ( raid_controller ) , bool ( bios ) , bool ( hard_drive ) , id = hardware_id )
|
def is_valid ( self , context ) :
"""Checks through the previous _ actions iterable if required actions have
been executed"""
|
if self . requires :
for r in self . requires :
if not r in context . executed_actions :
raise RequirementMissingError ( "Action '%s' requires '%s'" % ( self . name , r ) )
return True
|
def create_group ( value ) :
"""Create the group wrapper node ."""
|
node = etree . Element ( 'div' , attrib = { 'class' : 'group-by' } )
span = etree . Element ( 'span' , attrib = { 'class' : 'group-label' } )
span . text = value
node . append ( span )
return node
|
def write_bits ( self , stream , raw_bits , padded , left_right , endian ) :
"""Write the bits . Once the size of the written bits is equal
to the number of the reserved bits , flush it to the stream"""
|
if padded :
if left_right :
self . _write_bits += raw_bits
else :
self . _write_bits = raw_bits + self . _write_bits
if len ( self . _write_bits ) == self . reserved_bits :
bits = self . _endian_transform ( self . _write_bits , endian )
# if it ' s padded , and all of the bits in the field weren ' t used ,
# we need to flush out the unused bits
# TODO should we save the value of the unused bits so the data that
# is written out matches exactly what was read ?
if self . reserved_bits < self . cls . width * 8 :
filler = [ 0 ] * ( ( self . cls . width * 8 ) - self . reserved_bits )
if left_right :
bits += filler
else :
bits = filler + bits
stream . write_bits ( bits )
self . _write_bits = [ ]
else : # if an unpadded field ended up using the same BitfieldRW and
# as a previous padded field , there will be unwritten bits left in
# self . _ write _ bits . These need to be flushed out as well
if len ( self . _write_bits ) > 0 :
stream . write_bits ( self . _write_bits )
self . _write_bits = [ ]
stream . write_bits ( raw_bits )
|
def writeCell ( self , row , col , value ) :
'''write a cell'''
|
if self . __sheet is None :
self . openSheet ( super ( ExcelWrite , self ) . DEFAULT_SHEET )
self . __sheet . write ( row , col , value )
|
def patch ( self , ** kw ) :
"""Update the environment for the duration of a context ."""
|
old_environ = self . _environ
self . _environ = self . _environ . copy ( )
self . _environ . update ( kw )
yield
self . _environ = old_environ
|
def get_joke ( ) :
"""Return a Ron Swanson quote .
Returns None if unable to retrieve a quote ."""
|
page = requests . get ( "http://ron-swanson-quotes.herokuapp.com/v2/quotes" )
if page . status_code == 200 :
jokes = [ ]
jokes = json . loads ( page . content . decode ( page . encoding ) )
return '"' + jokes [ 0 ] + '" - Ron Swanson'
return None
|
def key_to_metric ( self , key ) :
"""Replace all non - letter characters with underscores"""
|
return '' . join ( l if l in string . letters else '_' for l in key )
|
def is_eighth_sponsor ( self ) :
"""Determine whether the given user is associated with an .
: class : ` intranet . apps . eighth . models . EighthSponsor ` and , therefore , should view activity
sponsoring information ."""
|
# FIXME : remove recursive dep
from . . eighth . models import EighthSponsor
return EighthSponsor . objects . filter ( user = self ) . exists ( )
|
def _build_str_from_time_items ( items ) :
"""根据解析出的时间字符串关键字计算标准时间表示格式的字符串
: return : 标准时间格式字符串表示形式"""
|
if not items :
return None
items = [ int ( item ) for item in items if item ]
items = items + [ 0 for _ in xrange ( 6 - len ( items ) ) ]
return '%d-%02d-%02d %02d:%02d:%02d' % ( items [ 0 ] , items [ 1 ] , items [ 2 ] , items [ 3 ] , items [ 4 ] , items [ 5 ] )
|
def nlmsg_for_each_attr ( nlh , hdrlen , rem ) :
"""Iterate over a stream of attributes in a message .
https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / include / netlink / msg . h # L123
Positional arguments :
nlh - - Netlink message header ( nlmsghdr class instance ) .
hdrlen - - length of family header ( integer ) .
rem - - initialized to len , holds bytes currently remaining in stream ( c _ int ) .
Returns :
Generator yielding nl _ attr instances ."""
|
return nla_for_each_attr ( nlmsg_attrdata ( nlh , hdrlen ) , nlmsg_attrlen ( nlh , hdrlen ) , rem )
|
def destroy ( self , scriptid , params = None ) :
'''/ v1 / startupscript / destroy
POST - account
Remove a startup script
Link : https : / / www . vultr . com / api / # startupscript _ destroy'''
|
params = update_params ( params , { 'SCRIPTID' : scriptid } )
return self . request ( '/v1/startupscript/destroy' , params , 'POST' )
|
def is_dicteq ( dict1_ , dict2_ , almosteq_ok = True , verbose_err = True ) :
"""Checks to see if dicts are the same . Performs recursion . Handles numpy"""
|
import utool as ut
assert len ( dict1_ ) == len ( dict2_ ) , 'dicts are not of same length'
try :
for ( key1 , val1 ) , ( key2 , val2 ) in zip ( dict1_ . items ( ) , dict2_ . items ( ) ) :
assert key1 == key2 , 'key mismatch'
assert type ( val1 ) == type ( val2 ) , 'vals are not same type'
if HAVE_NUMPY and np . iterable ( val1 ) :
if almosteq_ok and ut . is_float ( val1 ) :
assert np . all ( ut . almost_eq ( val1 , val2 ) ) , 'float vals are not within thresh'
else :
assert all ( [ np . all ( x1 == x2 ) for ( x1 , x2 ) in zip ( val1 , val2 ) ] ) , 'np vals are different'
elif isinstance ( val1 , dict ) :
is_dicteq ( val1 , val2 , almosteq_ok = almosteq_ok , verbose_err = verbose_err )
else :
assert val1 == val2 , 'vals are different'
except AssertionError as ex :
if verbose_err :
ut . printex ( ex )
return False
return True
|
def p_fromitem_list ( self , t ) :
"""fromitem _ list : fromitem _ list ' , ' fromitem
| fromitem"""
|
if len ( t ) == 2 :
t [ 0 ] = [ t [ 1 ] ]
elif len ( t ) == 4 :
t [ 0 ] = t [ 1 ] + [ t [ 3 ] ]
else :
raise NotImplementedError ( 'unk_len' , len ( t ) )
# pragma : no cover
|
def loadJSON ( self , jdata ) :
"""Loads the given JSON information for this column .
: param jdata : < dict >"""
|
super ( ReferenceColumn , self ) . loadJSON ( jdata )
# load additional information
self . __reference = jdata . get ( 'reference' ) or self . __reference
self . __removeAction = jdata . get ( 'removeAction' ) or self . __removeAction
|
def _mock_request ( self , ** kwargs ) :
"""A mocked out make _ request call that bypasses all network calls
and simply returns any mocked responses defined ."""
|
model = kwargs . get ( 'model' )
service = model . service_model . endpoint_prefix
operation = model . name
LOG . debug ( '_make_request: %s.%s' , service , operation )
return self . load_response ( service , operation )
|
def has_update ( self ) :
"""Depending on the interval :
returns True if its time for an update ,
returns False if its not yet time for an update"""
|
_time = time
if _time ( ) > self . next_update :
self . update_data ( )
self . next_update = _time ( ) + self . interval
return True
return False
|
def _compare_expected ( expected , output , sess , onnx , decimal = 5 , onnx_shape = None , ** kwargs ) :
"""Compares the expected output against the runtime outputs .
This is specific to * onnxruntime * due to variable * sess *
of type * onnxruntime . InferenceSession * ."""
|
tested = 0
if isinstance ( expected , list ) :
if isinstance ( output , list ) :
onnx_shapes = [ _ . shape for _ in sess . get_outputs ( ) ]
if 'Out0' in kwargs :
expected = expected [ : 1 ]
output = output [ : 1 ]
del kwargs [ 'Out0' ]
if 'Reshape' in kwargs :
del kwargs [ 'Reshape' ]
output = numpy . hstack ( output ) . ravel ( )
output = output . reshape ( ( len ( expected ) , len ( output . ravel ( ) ) // len ( expected ) ) )
if len ( expected ) != len ( output ) :
raise OnnxRuntimeAssertionError ( "Unexpected number of outputs '{0}', expected={1}, got={2}" . format ( onnx , len ( expected ) , len ( output ) ) )
for exp , out , osh in zip ( expected , output , onnx_shapes ) :
_compare_expected ( exp , out , sess , onnx , decimal = decimal , onnx_shape = osh , ** kwargs )
tested += 1
else :
raise OnnxRuntimeAssertionError ( "Type mismatch for '{0}', output type is {1}" . format ( onnx , type ( output ) ) )
elif isinstance ( expected , dict ) :
if not isinstance ( output , dict ) :
raise OnnxRuntimeAssertionError ( "Type mismatch for '{0}'" . format ( onnx ) )
for k , v in output . items ( ) :
if k not in expected :
continue
msg = compare_outputs ( expected [ k ] , v , decimal = decimal , ** kwargs )
if msg :
raise OnnxRuntimeAssertionError ( "Unexpected output '{0}' in model '{1}'\n{2}" . format ( k , onnx , msg ) )
tested += 1
elif isinstance ( expected , numpy . ndarray ) :
if isinstance ( output , list ) :
if expected . shape [ 0 ] == len ( output ) and isinstance ( output [ 0 ] , dict ) :
import pandas
output = pandas . DataFrame ( output )
output = output [ list ( sorted ( output . columns ) ) ]
output = output . values
if isinstance ( output , ( dict , list ) ) :
if len ( output ) != 1 :
ex = str ( output )
if len ( ex ) > 70 :
ex = ex [ : 70 ] + "..."
raise OnnxRuntimeAssertionError ( "More than one output when 1 is expected for onnx '{0}'\n{1}" . format ( onnx , ex ) )
output = output [ - 1 ]
if not isinstance ( output , numpy . ndarray ) :
raise OnnxRuntimeAssertionError ( "output must be an array for onnx '{0}' not {1}" . format ( onnx , type ( output ) ) )
if onnx_shape is not None :
if len ( onnx_shape ) == 2 :
cols = onnx_shape [ 1 ]
ecols = output . shape [ 1 ] if len ( output . shape ) == 2 else 1
if cols != ecols :
raise OnnxRuntimeAssertionError ( "Unexpected onnx shape {0} != {1} for onnx '{2}'" . format ( onnx_shape , output . shape , onnx ) )
msg = compare_outputs ( expected , output , decimal = decimal , ** kwargs )
if isinstance ( msg , ExpectedAssertionError ) :
raise msg
if msg :
raise OnnxRuntimeAssertionError ( "Unexpected output in model '{0}'\n{1}" . format ( onnx , msg ) )
tested += 1
else :
from scipy . sparse . csr import csr_matrix
if isinstance ( expected , csr_matrix ) : # DictVectorizer
one_array = numpy . array ( output )
msg = compare_outputs ( expected . todense ( ) , one_array , decimal = decimal , ** kwargs )
if msg :
raise OnnxRuntimeAssertionError ( "Unexpected output in model '{0}'\n{1}" . format ( onnx , msg ) )
tested += 1
else :
raise OnnxRuntimeAssertionError ( "Unexpected type for expected output ({1}) and onnx '{0}'" . format ( onnx , type ( expected ) ) )
if tested == 0 :
raise OnnxRuntimeAssertionError ( "No test for onnx '{0}'" . format ( onnx ) )
|
def absSymPath ( path ) :
"""like os . path . abspath except it doesn ' t dereference symlinks"""
|
curr_path = os . getcwd ( )
return os . path . normpath ( os . path . join ( curr_path , path ) )
|
def delete_user_rating ( self , item_type , item_id ) :
"""Deletes from the list of rating of the current user , the rating provided for the specified element type .
: param item _ type : One of : series , episode , banner .
: param item _ id : The TheTVDB Id of the item .
: return : a python dictionary with either the result of the search or an error from TheTVDB ."""
|
raw_response = requests_util . run_request ( 'delete' , self . API_BASE_URL + '/user/ratings/%s/%d' % ( item_type , item_id ) , headers = self . __get_header_with_auth ( ) )
return self . parse_raw_response ( raw_response )
|
def stream_events ( signals : Sequence [ Signal ] , filter : Callable [ [ T_Event ] , bool ] = None , * , max_queue_size : int = 0 ) -> AsyncIterator [ T_Event ] :
"""Return an async generator that yields events from the given signals .
Only events that pass the filter callable ( if one has been given ) are returned .
If no filter function was given , all events are yielded from the generator .
: param signals : the signals to get events from
: param filter : a callable that takes an event object as an argument and returns ` ` True ` ` if
the event should pass , ` ` False ` ` if not
: param max _ queue _ size : maximum size of the queue , after which it will start to drop events"""
|
@ async_generator
async def streamer ( ) :
try :
while True :
event = await queue . get ( )
if filter is None or filter ( event ) :
await yield_ ( event )
finally :
cleanup ( )
def cleanup ( ) :
nonlocal queue
if queue is not None :
for signal in signals :
signal . disconnect ( queue . put_nowait )
queue = None
assert check_argument_types ( )
queue = Queue ( max_queue_size )
# type : Queue [ T _ Event ]
for signal in signals :
signal . connect ( queue . put_nowait )
gen = [ streamer ( ) ]
# this is to allow the reference count to drop to 0
weakref . finalize ( gen [ 0 ] , cleanup )
return gen . pop ( )
|
def create_repository_configuration ( repository , no_sync = False ) :
"""Create a new RepositoryConfiguration . If the provided repository URL is for external repository , it is cloned into internal one .
: return BPM Task ID of the new RepositoryConfiguration creation"""
|
repo = create_repository_configuration_raw ( repository , no_sync )
if repo :
return utils . format_json ( repo )
|
def likelihood ( x , m = None , Cinv = None , sigma = 1 , detC = None ) :
"""return likelihood of x for the normal density N ( m , sigma * * 2 * Cinv * * - 1)"""
|
# testing : MC integrate must be one : mean ( p ( x _ i ) ) * volume ( where x _ i are uniformely sampled )
# for i in xrange ( 3 ) : print mean ( [ cma . likelihood ( 20 * r - 10 , dim * [ 0 ] , None , 3 ) for r in rand ( 10000 , dim ) ] ) * 20 * * dim
if m is None :
dx = x
else :
dx = x - m
# array ( x ) - array ( m )
n = len ( x )
s2pi = ( 2 * np . pi ) ** ( n / 2. )
if Cinv is None :
return exp ( - sum ( dx ** 2 ) / sigma ** 2 / 2 ) / s2pi / sigma ** n
if detC is None :
detC = 1. / np . linalg . linalg . det ( Cinv )
return exp ( - np . dot ( dx , np . dot ( Cinv , dx ) ) / sigma ** 2 / 2 ) / s2pi / abs ( detC ) ** 0.5 / sigma ** n
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.