signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def title ( cls , message = None ) :
'''Set the title of the process''' | if message == None :
return getproctitle ( )
else :
setproctitle ( 'qless-py-worker %s' % message )
logger . info ( message ) |
def get_frequencies_with_eigenvectors ( self , q ) :
"""Calculate phonon frequencies and eigenvectors at a given q - point
Parameters
q : array _ like
A q - vector .
shape = ( 3 , )
Returns
( frequencies , eigenvectors )
frequencies : ndarray
Phonon frequencies
shape = ( bands , ) , dtype = ' double ' , order = ' C '
eigenvectors : ndarray
Phonon eigenvectors
shape = ( bands , bands ) , dtype = ' complex ' , order = ' C '""" | self . _set_dynamical_matrix ( )
if self . _dynamical_matrix is None :
msg = ( "Dynamical matrix has not yet built." )
raise RuntimeError ( msg )
self . _dynamical_matrix . set_dynamical_matrix ( q )
dm = self . _dynamical_matrix . get_dynamical_matrix ( )
frequencies = [ ]
eigvals , eigenvectors = np . linalg . eigh ( dm )
frequencies = [ ]
for eig in eigvals :
if eig < 0 :
frequencies . append ( - np . sqrt ( - eig ) )
else :
frequencies . append ( np . sqrt ( eig ) )
return np . array ( frequencies ) * self . _factor , eigenvectors |
def compute_laplacian_matrix ( self , copy = True , return_lapsym = False , ** kwargs ) :
"""Note : this function will compute the laplacian matrix . In order to acquire
the existing laplacian matrix use self . laplacian _ matrix as
comptute _ laplacian _ matrix ( ) will re - compute the laplacian matrix .
Parameters
copy : boolean , whether to return copied version of the self . laplacian _ matrix
return _ lapsym : boolean , if True returns additionally the symmetrized version of
the requested laplacian and the re - normalization weights .
* * kwargs : see laplacian . py docmuentation for arguments for each method .
Returns
self . laplacian _ matrix : sparse matrix ( N _ obs , N _ obs ) .
The requested laplacian .
self . laplacian _ symmetric : sparse matrix ( N _ obs , N _ obs )
The symmetric laplacian .
self . laplacian _ weights : ndarray ( N _ obs , )
The renormalization weights used to make
laplacian _ matrix from laplacian _ symmetric""" | if self . affinity_matrix is None :
self . compute_affinity_matrix ( )
kwds = self . laplacian_kwds . copy ( )
kwds . update ( kwargs )
kwds [ 'full_output' ] = return_lapsym
result = compute_laplacian_matrix ( self . affinity_matrix , self . laplacian_method , ** kwds )
if return_lapsym :
( self . laplacian_matrix , self . laplacian_symmetric , self . laplacian_weights ) = result
else :
self . laplacian_matrix = result
if copy :
return self . laplacian_matrix . copy ( )
else :
return self . laplacian_matrix |
def _parse_procedures ( self , contents , iface ) :
"""" Parses the list of procedures or signatures defined in the generic , operator
or assignment interface .
: arg contents : the text between the interface . . . end interface keywords .
: arg iface : the fortpy . elements . Interface instance that will own the parsed
procedures / signatures .""" | procs = contents . split ( "module procedure" )
stripped = [ p . strip ( ) for p in procs if p . strip ( ) != "" ]
for embed in stripped : # We want to first extract the name of the procedure , then find its element
# instance to append to the Interface instance ' s procedures list .
methods = re . split ( ",\s*" , embed . replace ( "&\n" , "" ) )
keys = [ "{}.{}" . format ( iface . module . name , m ) for m in methods ]
iface . procedures . extend ( keys ) |
def prepare_env ( app , env , docname ) :
"""Prepares the sphinx environment to store sphinx - needs internal data .""" | if not hasattr ( env , 'needs_all_needs' ) : # Used to store all needed information about all needs in document
env . needs_all_needs = { }
if not hasattr ( env , 'needs_functions' ) : # Used to store all registered functions for supporting dynamic need values .
env . needs_functions = { }
# needs _ functions = getattr ( app . config , ' needs _ functions ' , [ ] )
needs_functions = app . needs_functions
if needs_functions is None :
needs_functions = [ ]
if not isinstance ( needs_functions , list ) :
raise SphinxError ( 'Config parameter needs_functions must be a list!' )
# Register built - in functions
for need_common_func in needs_common_functions :
register_func ( env , need_common_func )
# Register functions configured by user
for needs_func in needs_functions :
register_func ( env , needs_func )
app . config . needs_hide_options += [ 'hidden' ]
app . config . needs_extra_options [ 'hidden' ] = directives . unchanged
if not hasattr ( env , 'needs_workflow' ) : # Used to store workflow status information for already executed tasks .
# Some tasks like backlink _ creation need be be performed only once .
# But most sphinx - events get called several times ( for each single document file ) , which would also
# execute our code several times . . .
env . needs_workflow = { 'backlink_creation' : False , 'dynamic_values_resolved' : False } |
def sample ( self , bqm , apply_flux_bias_offsets = True , ** kwargs ) :
"""Sample from the given Ising model .
Args :
h ( list / dict ) :
Linear biases of the Ising model . If a list , the list ' s indices
are used as variable labels .
J ( dict of ( int , int ) : float ) :
Quadratic biases of the Ising model .
apply _ flux _ bias _ offsets ( bool , optional ) :
If True , use the calculated flux _ bias offsets ( if available ) .
* * kwargs :
Optional keyword arguments for the sampling method , specified per solver .
Examples :
This example uses : class : ` . VirtualGraphComposite ` to instantiate a composed sampler
that submits an Ising problem to a D - Wave solver selected by the user ' s
default
: std : doc : ` D - Wave Cloud Client configuration file < cloud - client : intro > ` .
The problem represents a logical
NOT gate using penalty function : math : ` P = xy ` , where variable x is the gate ' s input
and y the output . This simple two - variable problem is manually minor - embedded
to a single : std : doc : ` Chimera < system : intro > ` unit cell : each variable
is represented by a chain of half the cell ' s qubits , x as qubits 0 , 1 , 4 , 5,
and y as qubits 2 , 3 , 6 , 7.
The chain strength is set to half the maximum allowed found from querying the solver ' s extended
J range . In this example , the ten returned samples all represent valid states of
the NOT gate .
> > > from dwave . system . samplers import DWaveSampler
> > > from dwave . system . composites import VirtualGraphComposite
> > > embedding = { ' x ' : { 0 , 4 , 1 , 5 } , ' y ' : { 2 , 6 , 3 , 7 } }
> > > DWaveSampler ( ) . properties [ ' extended _ j _ range ' ] # doctest : + SKIP
[ - 2.0 , 1.0]
> > > sampler = VirtualGraphComposite ( DWaveSampler ( ) , embedding , chain _ strength = 1 ) # doctest : + SKIP
> > > J = { ( ' x ' , ' y ' ) : 1}
> > > response = sampler . sample _ ising ( h , J , num _ reads = 10 ) # doctest : + SKIP
> > > for sample in response . samples ( ) : # doctest : + SKIP
. . . print ( sample )
{ ' y ' : - 1 , ' x ' : 1}
{ ' y ' : 1 , ' x ' : - 1}
{ ' y ' : - 1 , ' x ' : 1}
{ ' y ' : - 1 , ' x ' : 1}
{ ' y ' : - 1 , ' x ' : 1}
{ ' y ' : 1 , ' x ' : - 1}
{ ' y ' : 1 , ' x ' : - 1}
{ ' y ' : 1 , ' x ' : - 1}
{ ' y ' : - 1 , ' x ' : 1}
{ ' y ' : 1 , ' x ' : - 1}
See ` Ocean Glossary < https : / / docs . ocean . dwavesys . com / en / latest / glossary . html > ` _
for explanations of technical terms in descriptions of Ocean tools .""" | child = self . child
if apply_flux_bias_offsets :
if self . flux_biases is not None :
kwargs [ FLUX_BIAS_KWARG ] = self . flux_biases
return child . sample ( bqm , ** kwargs ) |
def myreplace ( astr , thefind , thereplace ) :
"""in string astr replace all occurences of thefind with thereplace""" | alist = astr . split ( thefind )
new_s = alist . split ( thereplace )
return new_s |
def _get_lineage ( self , tax_id , merge_obsolete = True ) :
"""Return a list of [ ( rank , tax _ id ) ] describing the lineage of
tax _ id . If ` ` merge _ obsolete ` ` is True and ` ` tax _ id ` ` has been
replaced , use the corresponding value in table merged .""" | # Be sure we aren ' t working with an obsolete tax _ id
if merge_obsolete :
tax_id = self . _get_merged ( tax_id )
# Note : joining with ranks seems like a no - op , but for some
# reason it results in a faster query using sqlite , as well as
# an ordering from leaf - - > root . Might be a better idea to
# sort explicitly if this is the expected behavior , but it
# seems like for the most part , the lineage is converted to a
# dict and the order is irrelevant .
cmd = """
WITH RECURSIVE a AS (
SELECT tax_id, parent_id, rank
FROM {nodes}
WHERE tax_id = {}
UNION ALL
SELECT p.tax_id, p.parent_id, p.rank
FROM a JOIN {nodes} p ON a.parent_id = p.tax_id
)
SELECT a.rank, a.tax_id FROM a
JOIN {ranks} using(rank)
""" . format ( self . placeholder , nodes = self . nodes , ranks = self . ranks_table )
# with some versions of sqlite3 , an error is raised when no
# rows are returned ; with others , an empty list is returned .
try :
with self . engine . connect ( ) as con :
result = con . execute ( cmd , ( tax_id , ) )
# reorder so that root is first
lineage = result . fetchall ( ) [ : : - 1 ]
except sqlalchemy . exc . ResourceClosedError :
lineage = [ ]
if not lineage :
raise ValueError ( 'tax id "{}" not found' . format ( tax_id ) )
return lineage |
def update_todo_menu ( self ) :
"""Update todo list menu""" | editorstack = self . get_current_editorstack ( )
results = editorstack . get_todo_results ( )
self . todo_menu . clear ( )
filename = self . get_current_filename ( )
for text , line0 in results :
icon = ima . icon ( 'todo' )
slot = lambda _checked , _l = line0 : self . load ( filename , goto = _l )
action = create_action ( self , text = text , icon = icon , triggered = slot )
self . todo_menu . addAction ( action )
self . update_todo_actions ( ) |
def searchPageFor ( doc , pno , text , hit_max = 16 , quads = False ) :
"""Search for a string on a page .
Args :
pno : page number
text : string to be searched for
hit _ max : maximum hits
quads : return quads instead of rectangles
Returns :
a list of rectangles or quads , each containing an occurrence .""" | return doc [ pno ] . searchFor ( text , hit_max = hit_max , quads = quads ) |
def _field_to_json ( field , row_value ) :
"""Convert a field into JSON - serializable values .
Args :
field ( : class : ` ~ google . cloud . bigquery . schema . SchemaField ` , ) :
The SchemaField to use for type conversion and field name .
row _ value ( Union [ Sequence [ list ] , any , ] ) :
Row data to be inserted . If the SchemaField ' s mode is
REPEATED , assume this is a list . If not , the type
is inferred from the SchemaField ' s field _ type .
Returns :
any :
A JSON - serializable object .""" | if row_value is None :
return None
if field . mode == "REPEATED" :
return _repeated_field_to_json ( field , row_value )
if field . field_type == "RECORD" :
return _record_field_to_json ( field . fields , row_value )
return _scalar_field_to_json ( field , row_value ) |
def announced ( self , state , announcement ) :
'''This part of contract is just to let the guy know we are here .''' | state . problem = state . factory ( state . agent , ** announcement . payload )
state . medium . bid ( message . Bid ( ) ) |
def removeBinder ( self , name ) :
"""Remove a binder from a table""" | root = self . etree
t_bindings = root . find ( 'bindings' )
t_binder = t_bindings . find ( name )
if t_binder :
t_bindings . remove ( t_binder )
return True
return False |
def ccnot_circuit ( qubits : Qubits ) -> Circuit :
"""Standard decomposition of CCNOT ( Toffoli ) gate into
six CNOT gates ( Plus Hadamard and T gates . ) [ Nielsen2000 ] _
. . [ Nielsen2000]
M . A . Nielsen and I . L . Chuang , Quantum Computation and Quantum
Information , Cambridge University Press ( 2000 ) .""" | if len ( qubits ) != 3 :
raise ValueError ( 'Expected 3 qubits' )
q0 , q1 , q2 = qubits
circ = Circuit ( )
circ += H ( q2 )
circ += CNOT ( q1 , q2 )
circ += T ( q2 ) . H
circ += CNOT ( q0 , q2 )
circ += T ( q2 )
circ += CNOT ( q1 , q2 )
circ += T ( q2 ) . H
circ += CNOT ( q0 , q2 )
circ += T ( q1 )
circ += T ( q2 )
circ += H ( q2 )
circ += CNOT ( q0 , q1 )
circ += T ( q0 )
circ += T ( q1 ) . H
circ += CNOT ( q0 , q1 )
return circ |
def four_motor_swerve_drivetrain ( lr_motor , rr_motor , lf_motor , rf_motor , lr_angle , rr_angle , lf_angle , rf_angle , x_wheelbase = 2 , y_wheelbase = 2 , speed = 5 , deadzone = None , ) :
"""Four motors that can be rotated in any direction
If any motors are inverted , then you will need to multiply that motor ' s
value by - 1.
: param lr _ motor : Left rear motor value ( - 1 to 1 ) ; 1 is forward
: param rr _ motor : Right rear motor value ( - 1 to 1 ) ; 1 is forward
: param lf _ motor : Left front motor value ( - 1 to 1 ) ; 1 is forward
: param rf _ motor : Right front motor value ( - 1 to 1 ) ; 1 is forward
: param lr _ angle : Left rear motor angle in degrees ( 0 to 360 measured clockwise from forward position )
: param rr _ angle : Right rear motor angle in degrees ( 0 to 360 measured clockwise from forward position )
: param lf _ angle : Left front motor angle in degrees ( 0 to 360 measured clockwise from forward position )
: param rf _ angle : Right front motor angle in degrees ( 0 to 360 measured clockwise from forward position )
: param x _ wheelbase : The distance in feet between right and left wheels .
: param y _ wheelbase : The distance in feet between forward and rear wheels .
: param speed : Speed of robot in feet per second ( see above )
: param deadzone : A function that adjusts the output of the motor ( see : func : ` linear _ deadzone ` )
: returns : Speed of robot in x ( ft / s ) , Speed of robot in y ( ft / s ) ,
clockwise rotation of robot ( radians / s )""" | if deadzone :
lf_motor = deadzone ( lf_motor )
lr_motor = deadzone ( lr_motor )
rf_motor = deadzone ( rf_motor )
rr_motor = deadzone ( rr_motor )
# Calculate speed of each wheel
lr = lr_motor * speed
rr = rr_motor * speed
lf = lf_motor * speed
rf = rf_motor * speed
# Calculate angle in radians
lr_rad = math . radians ( lr_angle )
rr_rad = math . radians ( rr_angle )
lf_rad = math . radians ( lf_angle )
rf_rad = math . radians ( rf_angle )
# Calculate wheelbase radius
wheelbase_radius = math . hypot ( x_wheelbase / 2.0 , y_wheelbase / 2.0 )
# Calculates the Vx and Vy components
# Sin an Cos inverted because forward is 0 on swerve wheels
Vx = ( ( math . sin ( lr_rad ) * lr ) + ( math . sin ( rr_rad ) * rr ) + ( math . sin ( lf_rad ) * lf ) + ( math . sin ( rf_rad ) * rf ) )
Vy = ( ( math . cos ( lr_rad ) * lr ) + ( math . cos ( rr_rad ) * rr ) + ( math . cos ( lf_rad ) * lf ) + ( math . cos ( rf_rad ) * rf ) )
# Adjusts the angle corresponding to a diameter that is perpendicular to the radius ( add or subtract 45deg )
lr_rad = ( lr_rad + ( math . pi / 4 ) ) % ( 2 * math . pi )
rr_rad = ( rr_rad - ( math . pi / 4 ) ) % ( 2 * math . pi )
lf_rad = ( lf_rad - ( math . pi / 4 ) ) % ( 2 * math . pi )
rf_rad = ( rf_rad + ( math . pi / 4 ) ) % ( 2 * math . pi )
# Finds the rotational velocity by finding the torque and adding them up
Vw = wheelbase_radius * ( ( math . cos ( lr_rad ) * lr ) + ( math . cos ( rr_rad ) * - rr ) + ( math . cos ( lf_rad ) * lf ) + ( math . cos ( rf_rad ) * - rf ) )
Vx *= 0.25
Vy *= 0.25
Vw *= 0.25
return Vx , Vy , Vw |
def rmon_alarm_entry_snmp_oid ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
rmon = ET . SubElement ( config , "rmon" , xmlns = "urn:brocade.com:mgmt:brocade-rmon" )
alarm_entry = ET . SubElement ( rmon , "alarm-entry" )
alarm_index_key = ET . SubElement ( alarm_entry , "alarm-index" )
alarm_index_key . text = kwargs . pop ( 'alarm_index' )
snmp_oid = ET . SubElement ( alarm_entry , "snmp-oid" )
snmp_oid . text = kwargs . pop ( 'snmp_oid' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def set_condition ( self , condition , condition_instr = None ) :
"""Defines the condition which decides how the basic block exits
: param condition :
: type condition :
: param condition _ instr : If the ' condition ' argument is a Variable , then condition _ instr is None , else , condition _ instr should be of type CmpInstruction
: type condition _ instr : CmpInstruction""" | assert ( isinstance ( condition , Numeric ) )
if condition_instr is not None :
assert ( isinstance ( condition_instr , CmpInstruction ) )
self . condition = condition
self . condition_instr = condition_instr
if condition_instr is not None :
if condition_instr . lhs not in self . defined_variables :
if isinstance ( condition_instr . lhs , Variable ) :
self . defined_variables . append ( condition_instr . lhs )
if isinstance ( condition_instr . rhs_1 , Variable ) :
if condition_instr . rhs_1 not in self . used_variables :
self . used_variables . append ( condition_instr . rhs_1 )
if isinstance ( condition_instr . rhs_2 , Variable ) :
if condition_instr . rhs_2 not in self . used_variables :
self . used_variables . append ( condition_instr . rhs_2 ) |
def show_cationpi ( self ) :
"""Visualizes cation - pi interactions""" | grp = self . getPseudoBondGroup ( "Cation-Pi-%i" % self . tid , associateWith = [ self . model ] )
grp . lineWidth = 3
grp . lineType = self . chimera . Dash
for i , cat in enumerate ( self . plcomplex . pication ) :
m = self . model
r = m . newResidue ( "pseudoatoms" , " " , 1 , " " )
chargecenter = m . newAtom ( "CHARGE" , self . chimera . Element ( "CHARGE" ) )
x , y , z = cat . charge_center
chargecenter . setCoord ( self . chimera . Coord ( x , y , z ) )
r . addAtom ( chargecenter )
centroid = m . newAtom ( "CENTROID" , self . chimera . Element ( "CENTROID" ) )
x , y , z = cat . ring_center
centroid . setCoord ( self . chimera . Coord ( x , y , z ) )
r . addAtom ( centroid )
b = grp . newPseudoBond ( centroid , chargecenter )
b . color = self . colorbyname ( 'orange' )
if cat . protcharged :
self . bs_res_ids += cat . charge_atoms
else :
self . bs_res_ids += cat . ring_atoms |
def neg ( self , value , name = '' ) :
"""Integer negative :
name = - value""" | return self . sub ( values . Constant ( value . type , 0 ) , value , name = name ) |
def _read_record ( self , f , blk , chans ) :
"""Read raw data from a single EDF channel .
Parameters
i _ chan : int
index of the channel to read
begsam : int
index of the first sample
endsam : int
index of the last sample
Returns
numpy . ndarray
A vector with the data as written on file , in 16 - bit precision""" | dat_in_rec = empty ( ( len ( chans ) , self . max_smp ) )
i_ch_in_dat = 0
for i_ch in chans :
offset , n_smp_per_chan = self . _offset ( blk , i_ch )
f . seek ( offset )
x = fromfile ( f , count = n_smp_per_chan , dtype = EDF_FORMAT )
ratio = int ( self . max_smp / n_smp_per_chan )
dat_in_rec [ i_ch_in_dat , : ] = repeat ( x , ratio )
i_ch_in_dat += 1
return dat_in_rec |
def __add_bootstrap_tour_step ( self , message , selector = None , name = None , title = None , alignment = None , duration = None ) :
"""Allows the user to add tour steps for a website .
@ Params
message - The message to display .
selector - The CSS Selector of the Element to attach to .
name - If creating multiple tours at the same time ,
use this to select the tour you wish to add steps to .
title - Additional header text that appears above the message .
alignment - Choose from " top " , " bottom " , " left " , and " right " .
( " top " is the default alignment ) .
duration - ( Bootstrap Tours ONLY ) The amount of time , in seconds ,
before automatically advancing to the next tour step .""" | if selector != "html" :
selector = self . __make_css_match_first_element_only ( selector )
element_row = "element: '%s'," % selector
else :
element_row = ""
if not duration :
duration = "0"
else :
duration = str ( float ( duration ) * 1000.0 )
step = ( """{
%s
title: '%s',
content: '%s',
orphan: true,
placement: 'auto %s',
smartPlacement: true,
duration: %s,
},""" % ( element_row , title , message , alignment , duration ) )
self . _tour_steps [ name ] . append ( step ) |
def search_archive ( pattern , archive , verbosity = 0 , interactive = True ) :
"""Search pattern in archive members .""" | if not pattern :
raise util . PatoolError ( "empty search pattern" )
util . check_existing_filename ( archive )
if verbosity >= 0 :
util . log_info ( "Searching %r in %s ..." % ( pattern , archive ) )
res = _search_archive ( pattern , archive , verbosity = verbosity , interactive = interactive )
if res == 1 and verbosity >= 0 :
util . log_info ( "... %r not found" % pattern )
return res |
def off ( self ) :
"""Turn off curses""" | self . win . keypad ( 0 )
curses . nocbreak ( )
curses . echo ( )
try :
curses . curs_set ( 1 )
except :
pass
curses . endwin ( ) |
def single_qubit_op_to_framed_phase_form ( mat : np . ndarray ) -> Tuple [ np . ndarray , complex , complex ] :
"""Decomposes a 2x2 unitary M into U ^ - 1 * diag ( 1 , r ) * U * diag ( g , g ) .
U translates the rotation axis of M to the Z axis .
g fixes a global phase factor difference caused by the translation .
r ' s phase is the amount of rotation around M ' s rotation axis .
This decomposition can be used to decompose controlled single - qubit
rotations into controlled - Z operations bordered by single - qubit operations .
Args :
mat : The qubit operation as a 2x2 unitary matrix .
Returns :
A 2x2 unitary U , the complex relative phase factor r , and the complex
global phase factor g . Applying M is equivalent ( up to global phase ) to
applying U , rotating around the Z axis to apply r , then un - applying U .
When M is controlled , the control must be rotated around the Z axis to
apply g .""" | vals , vecs = np . linalg . eig ( mat )
u = np . conj ( vecs ) . T
r = vals [ 1 ] / vals [ 0 ]
g = vals [ 0 ]
return u , r , g |
def ensure_context ( ** vars ) :
"""Ensures that a context is in the stack , creates one otherwise .""" | ctx = _context_stack . top
stacked = False
if not ctx :
ctx = Context ( )
stacked = True
_context_stack . push ( ctx )
ctx . update ( vars )
try :
yield ctx
finally :
if stacked :
_context_stack . pop ( ) |
def intervalCreateSimulateAnalyze ( netParams = None , simConfig = None , output = False , interval = None ) :
'''Sequence of commands create , simulate and analyse network''' | import os
from . . import sim
( pops , cells , conns , stims , rxd , simData ) = sim . create ( netParams , simConfig , output = True )
try :
if sim . rank == 0 :
if os . path . exists ( 'temp' ) :
for f in os . listdir ( 'temp' ) :
os . unlink ( 'temp/{}' . format ( f ) )
else :
os . mkdir ( 'temp' )
sim . intervalSimulate ( interval )
except Exception as e :
print ( e )
return
sim . pc . barrier ( )
sim . analyze ( )
if output :
return ( pops , cells , conns , stims , simData ) |
def _updateNumbers ( self , linenumers ) :
"""add / remove line numbers""" | b = self . blockCount ( )
c = b - linenumers
if c > 0 : # remove lines numbers
for _ in range ( c ) : # remove last line :
self . setFocus ( )
storeCursorPos = self . textCursor ( )
self . moveCursor ( QtGui . QTextCursor . End , QtGui . QTextCursor . MoveAnchor )
self . moveCursor ( QtGui . QTextCursor . StartOfLine , QtGui . QTextCursor . MoveAnchor )
self . moveCursor ( QtGui . QTextCursor . End , QtGui . QTextCursor . KeepAnchor )
self . textCursor ( ) . removeSelectedText ( )
self . textCursor ( ) . deletePreviousChar ( )
self . setTextCursor ( storeCursorPos )
elif c < 0 : # add line numbers
for i in range ( - c ) :
self . appendPlainText ( str ( b + i + 1 ) ) |
def checkout ( url , version = None ) :
"""Checks out latest version of item or repository .
: param url : URL of repo or item to check out .
: param version : Version number to check out .""" | from grit import Repo
r = Repo ( url )
def _write ( item ) :
log . debug ( 'writing: %s' % item . name )
if item . type != 'blob' :
return
if r . type in [ 'repo' , 'proxy' , 'local' ] :
path = os . path . join ( r . name , item . path )
pdir = os . path . dirname ( path )
if not os . path . isdir ( pdir ) :
os . makedirs ( pdir )
else :
path = item . name
f = open ( path , 'w' )
f . write ( item . data ( ) )
f . close ( )
if r . type == 'blob' :
_write ( r )
else :
items = r . items ( )
count = 1
total = len ( items )
while count <= total :
print '[%s/%s] %0.2f%%' % ( count , total , ( float ( count ) / total ) * 100 ) , '*' * count , '\r' ,
_write ( items [ count - 1 ] )
count += 1
sys . stdout . flush ( )
print |
def _kl_divergence_disagreement ( self , proba ) :
"""Calculate the Kullback - Leibler ( KL ) divergence disaagreement measure .
Parameters
proba : array - like , shape = ( n _ samples , n _ students , n _ class )
Returns
disagreement : list of float , shape = ( n _ samples )
The kl _ divergence of the given probability .""" | n_students = np . shape ( proba ) [ 1 ]
consensus = np . mean ( proba , axis = 1 )
# shape = ( n _ samples , n _ class )
# average probability of each class across all students
consensus = np . tile ( consensus , ( n_students , 1 , 1 ) ) . transpose ( 1 , 0 , 2 )
kl = np . sum ( proba * np . log ( proba / consensus ) , axis = 2 )
return np . mean ( kl , axis = 1 ) |
def core_periphery_dir ( W , gamma = 1 , C0 = None , seed = None ) :
'''The optimal core / periphery subdivision is a partition of the network
into two nonoverlapping groups of nodes , a core group and a periphery
group . The number of core - group edges is maximized , and the number of
within periphery edges is minimized .
The core - ness is a statistic which quantifies the goodness of the
optimal core / periphery subdivision ( with arbitrary relative value ) .
The algorithm uses a variation of the Kernighan - Lin graph partitioning
algorithm to optimize a core - structure objective described in
Borgatti & Everett ( 2000 ) Soc Networks 21:375-395
See Rubinov , Ypma et al . ( 2015 ) PNAS 112:10032-7
Parameters
W : NxN np . ndarray
directed connection matrix
gamma : core - ness resolution parameter
Default value = 1
gamma > 1 detects small core , large periphery
0 < gamma < 1 detects large core , small periphery
C0 : NxN np . ndarray
Initial core structure
seed : hashable , optional
If None ( default ) , use the np . random ' s global random state to generate random numbers .
Otherwise , use a new np . random . RandomState instance seeded with the given value .''' | rng = get_rng ( seed )
n = len ( W )
np . fill_diagonal ( W , 0 )
if C0 == None :
C = rng . randint ( 2 , size = ( n , ) )
else :
C = C0 . copy ( )
# methodological note , the core - detection null model is not corrected
# for degree cf community detection ( to enable detection of hubs )
s = np . sum ( W )
p = np . mean ( W )
b = W - gamma * p
B = ( b + b . T ) / ( 2 * s )
cix , = np . where ( C )
ncix , = np . where ( np . logical_not ( C ) )
q = np . sum ( B [ np . ix_ ( cix , cix ) ] ) - np . sum ( B [ np . ix_ ( ncix , ncix ) ] )
# sqish
flag = True
it = 0
while flag :
it += 1
if it > 100 :
raise BCTParamError ( 'Infinite Loop aborted' )
flag = False
# initial node indices
ixes = np . arange ( n )
Ct = C . copy ( )
while len ( ixes ) > 0 :
Qt = np . zeros ( ( n , ) )
ctix , = np . where ( Ct )
nctix , = np . where ( np . logical_not ( Ct ) )
q0 = ( np . sum ( B [ np . ix_ ( ctix , ctix ) ] ) - np . sum ( B [ np . ix_ ( nctix , nctix ) ] ) )
Qt [ ctix ] = q0 - 2 * np . sum ( B [ ctix , : ] , axis = 1 )
Qt [ nctix ] = q0 + 2 * np . sum ( B [ nctix , : ] , axis = 1 )
max_Qt = np . max ( Qt [ ixes ] )
u , = np . where ( np . abs ( Qt [ ixes ] - max_Qt ) < 1e-10 )
# tunourn
u = u [ rng . randint ( len ( u ) ) ]
Ct [ ixes [ u ] ] = np . logical_not ( Ct [ ixes [ u ] ] )
# casga
ixes = np . delete ( ixes , u )
if max_Qt - q > 1e-10 :
flag = True
C = Ct . copy ( )
cix , = np . where ( C )
ncix , = np . where ( np . logical_not ( C ) )
q = ( np . sum ( B [ np . ix_ ( cix , cix ) ] ) - np . sum ( B [ np . ix_ ( ncix , ncix ) ] ) )
cix , = np . where ( C )
ncix , = np . where ( np . logical_not ( C ) )
q = np . sum ( B [ np . ix_ ( cix , cix ) ] ) - np . sum ( B [ np . ix_ ( ncix , ncix ) ] )
return C , q |
def projects_from_metadata ( metadata ) :
"""Extract the project dependencies from a metadata spec .""" | projects = [ ]
for data in metadata :
meta = distlib . metadata . Metadata ( fileobj = io . StringIO ( data ) )
projects . extend ( pypi . just_name ( project ) for project in meta . run_requires )
return frozenset ( map ( packaging . utils . canonicalize_name , projects ) ) |
def update_with_result ( self , result ) :
"""Update item - model with result from host
State is sent from host after processing had taken place
and represents the events that took place ; including
log messages and completion status .
Arguments :
result ( dict ) : Dictionary following the Result schema""" | assert isinstance ( result , dict ) , "%s is not a dictionary" % result
for type in ( "instance" , "plugin" ) :
id = ( result [ type ] or { } ) . get ( "id" )
is_context = not id
if is_context :
item = self . instances [ 0 ]
else :
item = self . items . get ( id )
if item is None : # If an item isn ' t there yet
# no worries . It ' s probably because
# reset is still running and the
# item in question is a new instance
# not yet added to the model .
continue
item . isProcessing = False
item . currentProgress = 1
item . processed = True
item . hasWarning = item . hasWarning or any ( [ record [ "levelno" ] == logging . WARNING for record in result [ "records" ] ] )
if result . get ( "error" ) :
item . hasError = True
item . amountFailed += 1
else :
item . succeeded = True
item . amountPassed += 1
item . duration += result [ "duration" ]
item . finishedAt = time . time ( )
if item . itemType == "plugin" and not item . actionsIconVisible :
actions = list ( item . actions )
# Context specific actions
for action in list ( actions ) :
if action [ "on" ] == "failed" and not item . hasError :
actions . remove ( action )
if action [ "on" ] == "succeeded" and not item . succeeded :
actions . remove ( action )
if action [ "on" ] == "processed" and not item . processed :
actions . remove ( action )
if actions :
item . actionsIconVisible = True
# Update section item
class DummySection ( object ) :
hasWarning = False
hasError = False
succeeded = False
section_item = DummySection ( )
for section in self . sections :
if item . itemType == "plugin" and section . name == item . verb :
section_item = section
if ( item . itemType == "instance" and section . name == item . category ) :
section_item = section
section_item . hasWarning = ( section_item . hasWarning or item . hasWarning )
section_item . hasError = section_item . hasError or item . hasError
section_item . succeeded = section_item . succeeded or item . succeeded
section_item . isProcessing = False |
def gauss_fit ( map_data , chs = None , mode = 'deg' , amplitude = 1 , x_mean = 0 , y_mean = 0 , x_stddev = None , y_stddev = None , theta = None , cov_matrix = None , noise = 0 , ** kwargs ) :
"""make a 2D Gaussian model and fit the observed data with the model .
Args :
map _ data ( xarray . Dataarray ) : Dataarray of cube or single chs .
chs ( list of int ) : in prep .
mode ( str ) : Coordinates for the fitting
' pix '
' deg '
amplitude ( float or None ) : Initial amplitude value of Gaussian fitting .
x _ mean ( float ) : Initial value of mean of the fitting Gaussian in x .
y _ mean ( float ) : Initial value of mean of the fitting Gaussian in y .
x _ stddev ( float or None ) : Standard deviation of the Gaussian in x before rotating by theta .
y _ stddev ( float or None ) : Standard deviation of the Gaussian in y before rotating by theta .
theta ( float , optional or None ) : Rotation angle in radians .
cov _ matrix ( ndarray , optional ) : A 2x2 covariance matrix . If specified , overrides the ` ` x _ stddev ` ` , ` ` y _ stddev ` ` , and ` ` theta ` ` defaults .
Returns :
decode cube ( xarray cube ) with fitting results in array and attrs .""" | if chs is None :
chs = np . ogrid [ 0 : 63 ]
# the number of channels would be changed
if len ( chs ) > 1 :
for n , ch in enumerate ( chs ) :
subdata = np . transpose ( np . full_like ( map_data [ : , : , ch ] , map_data . values [ : , : , ch ] ) )
subdata [ np . isnan ( subdata ) ] = 0
if mode == 'deg' :
mX , mY = np . meshgrid ( map_data . x , map_data . y )
elif mode == 'pix' :
mX , mY = np . mgrid [ 0 : len ( map_data . y ) , 0 : len ( map_data . x ) ]
g_init = models . Gaussian2D ( amplitude = np . nanmax ( subdata ) , x_mean = x_mean , y_mean = y_mean , x_stddev = x_stddev , y_stddev = y_stddev , theta = theta , cov_matrix = cov_matrix , ** kwargs ) + models . Const2D ( noise )
fit_g = fitting . LevMarLSQFitter ( )
g = fit_g ( g_init , mX , mY , subdata )
g_init2 = models . Gaussian2D ( amplitude = np . nanmax ( subdata - g . amplitude_1 ) , x_mean = x_mean , y_mean = y_mean , x_stddev = x_stddev , y_stddev = y_stddev , theta = theta , cov_matrix = cov_matrix , ** kwargs )
fit_g2 = fitting . LevMarLSQFitter ( )
g2 = fit_g2 ( g_init2 , mX , mY , subdata )
if n == 0 :
results = np . array ( [ g2 ( mX , mY ) ] )
peaks = np . array ( [ g2 . amplitude . value ] )
x_means = np . array ( [ g2 . x_mean . value ] )
y_means = np . array ( [ g2 . y_mean . value ] )
x_stddevs = np . array ( [ g2 . x_stddev . value ] )
y_stddevs = np . array ( [ g2 . y_stddev . value ] )
thetas = np . array ( [ g2 . theta . value ] )
if fit_g2 . fit_info [ 'param_cov' ] is None :
unserts = nop . array ( [ 0 ] )
else :
error = np . diag ( fit_g2 . fit_info [ 'param_cov' ] ) ** 0.5
uncerts = np . array ( [ error [ 0 ] ] )
else :
results = np . append ( results , [ g2 ( mX , mY ) ] , axis = 0 )
peaks = np . append ( peaks , [ g2 . amplitude . value ] , axis = 0 )
x_means = np . append ( x_means , [ g2 . x_mean . value ] , axis = 0 )
y_means = np . append ( y_means , [ g2 . y_mean . value ] , axis = 0 )
x_stddevs = np . append ( x_stddevs , [ g2 . x_stddev . value ] , axis = 0 )
y_stddevs = np . append ( y_stddevs , [ g2 . y_stddev . value ] , axis = 0 )
thetas = np . append ( thetas , [ g2 . theta . value ] , axis = 0 )
if fit_g2 . fit_info [ 'param_cov' ] is None :
uncerts = np . append ( uncerts , [ 0 ] , axis = 0 )
else :
error = np . diag ( fit_g2 . fit_info [ 'param_cov' ] ) ** 0.5
uncerts = np . append ( uncerts , [ error [ 0 ] ] , axis = 0 )
result = map_data . copy ( )
result . values = np . transpose ( results )
result . attrs . update ( { 'peak' : peaks , 'x_mean' : x_means , 'y_mean' : y_means , 'x_stddev' : x_stddevs , 'y_stddev' : y_stddevs , 'theta' : thetas , 'uncert' : uncerts } )
else :
subdata = np . transpose ( np . full_like ( map_data [ : , : , 0 ] , map_data . values [ : , : , 0 ] ) )
subdata [ np . isnan ( subdata ) ] = 0
if mode == 'deg' :
mX , mY = np . meshgrid ( map_data . x , map_data . y )
elif mode == 'pix' :
mX , mY = np . mgrid [ 0 : len ( map_data . y ) , 0 : len ( map_data . x ) ]
g_init = models . Gaussian2D ( amplitude = np . nanmax ( subdata ) , x_mean = x_mean , y_mean = y_mean , x_stddev = x_stddev , y_stddev = y_stddev , theta = theta , cov_matrix = cov_matrix , ** kwargs ) + models . Const2D ( noise )
fit_g = fitting . LevMarLSQFitter ( )
g = fit_g ( g_init , mX , mY , subdata )
g_init2 = models . Gaussian2D ( amplitude = np . nanmax ( subdata - g . amplitude_1 ) , x_mean = x_mean , y_mean = y_mean , x_stddev = x_stddev , y_stddev = y_stddev , theta = theta , cov_matrix = cov_matrix , ** kwargs )
fit_g2 = fitting . LevMarLSQFitter ( )
g2 = fit_g2 ( g_init2 , mX , mY , subdata )
results = np . array ( [ g2 ( mX , mY ) ] )
peaks = np . array ( [ g2 . amplitude . value ] )
x_means = np . array ( [ g2 . x_mean . value ] )
y_means = np . array ( [ g2 . y_mean . value ] )
x_stddevs = np . array ( [ g2 . x_stddev . value ] )
y_stddevs = np . array ( [ g2 . y_stddev . value ] )
thetas = np . array ( [ g2 . theta . value ] )
error = np . diag ( fit_g2 . fit_info [ 'param_cov' ] ) ** 0.5
uncerts = np . array ( error [ 0 ] )
result = map_data . copy ( )
result . values = np . transpose ( results )
result . attrs . update ( { 'peak' : peaks , 'x_mean' : x_means , 'y_mean' : y_means , 'x_stddev' : x_stddevs , 'y_stddev' : y_stddevs , 'theta' : thetas , 'uncert' : uncerts } )
return result |
def qteKeyPress ( self , msgObj ) :
"""Record the key presses .""" | # Unpack the data structure .
( srcObj , keysequence , macroName ) = msgObj . data
# Return immediately if the key sequence does not specify a
# macro ( yet ) .
if macroName is None :
return
# If the macro to repeat is this very macro then disable the
# macro proxy , otherwise execute the macro that would have run
# originally .
if macroName == self . qteMacroName ( ) :
self . abort ( )
else :
msg = 'Executing macro {} through {}'
msg = msg . format ( macroName , self . qteMacroName ( ) )
self . qteMain . qteStatus ( msg )
self . qteMain . qteRunMacro ( macroName , srcObj , keysequence ) |
def from_cstr_to_pystr ( data , length ) :
"""Revert C pointer to Python str
Parameters
data : ctypes pointer
pointer to data
length : ctypes pointer
pointer to length of data""" | if PY3 :
res = [ ]
for i in range ( length . value ) :
try :
res . append ( str ( data [ i ] . decode ( 'ascii' ) ) )
except UnicodeDecodeError :
res . append ( str ( data [ i ] . decode ( 'utf-8' ) ) )
else :
res = [ ]
for i in range ( length . value ) :
try :
res . append ( str ( data [ i ] . decode ( 'ascii' ) ) )
except UnicodeDecodeError : # pylint : disable = undefined - variable
res . append ( unicode ( data [ i ] . decode ( 'utf-8' ) ) )
return res |
def get_sos_decomposition ( sdp , y_mat = None , threshold = 0.0 ) :
"""Given a solution of the dual problem , it returns the SOS
decomposition .
: param sdp : The SDP relaxation to be solved .
: type sdp : : class : ` ncpol2sdpa . sdp ` .
: param y _ mat : Optional parameter providing the dual solution of the
moment matrix . If not provided , the solution is extracted
from the sdp object .
: type y _ mat : : class : ` numpy . array ` .
: param threshold : Optional parameter for specifying the threshold value
below which the eigenvalues and entries of the
eigenvectors are disregarded .
: type threshold : float .
: returns : The SOS decomposition of [ sigma _ 0 , sigma _ 1 , . . . , sigma _ m ]
: rtype : list of : class : ` sympy . core . exp . Expr ` .""" | if len ( sdp . monomial_sets ) != 1 :
raise Exception ( "Cannot automatically match primal and dual " + "variables." )
elif len ( sdp . y_mat [ 1 : ] ) != len ( sdp . constraints ) :
raise Exception ( "Cannot automatically match constraints with blocks " + "in the dual solution." )
elif sdp . status == "unsolved" and y_mat is None :
raise Exception ( "The SDP relaxation is unsolved and dual solution " + "is not provided!" )
elif sdp . status != "unsolved" and y_mat is None :
y_mat = sdp . y_mat
sos = [ ]
for y_mat_block in y_mat :
term = 0
vals , vecs = np . linalg . eigh ( y_mat_block )
for j , val in enumerate ( vals ) :
if val < - 0.001 :
raise Exception ( "Large negative eigenvalue: " + val + ". Matrix cannot be positive." )
elif val > 0 :
sub_term = 0
for i , entry in enumerate ( vecs [ : , j ] ) :
sub_term += entry * sdp . monomial_sets [ 0 ] [ i ]
term += val * sub_term ** 2
term = expand ( term )
new_term = 0
if term . is_Mul :
elements = [ term ]
else :
elements = term . as_coeff_mul ( ) [ 1 ] [ 0 ] . as_coeff_add ( ) [ 1 ]
for element in elements :
_ , coeff = separate_scalar_factor ( element )
if abs ( coeff ) > threshold :
new_term += element
sos . append ( new_term )
return sos |
def copy ( self ) :
"""Return a deep copy""" | result = Scalar ( self . size , self . deriv )
result . v = self . v
if self . deriv > 0 :
result . d [ : ] = self . d [ : ]
if self . deriv > 1 :
result . dd [ : ] = self . dd [ : ]
return result |
def mutations_batcher ( self , flush_count = FLUSH_COUNT , max_row_bytes = MAX_ROW_BYTES ) :
"""Factory to create a mutation batcher associated with this instance .
For example :
. . literalinclude : : snippets _ table . py
: start - after : [ START bigtable _ mutations _ batcher ]
: end - before : [ END bigtable _ mutations _ batcher ]
: type table : class
: param table : class : ` ~ google . cloud . bigtable . table . Table ` .
: type flush _ count : int
: param flush _ count : ( Optional ) Maximum number of rows per batch . If it
reaches the max number of rows it calls finish _ batch ( ) to
mutate the current row batch . Default is FLUSH _ COUNT ( 1000
rows ) .
: type max _ row _ bytes : int
: param max _ row _ bytes : ( Optional ) Max number of row mutations size to
flush . If it reaches the max number of row mutations size it
calls finish _ batch ( ) to mutate the current row batch .
Default is MAX _ ROW _ BYTES ( 5 MB ) .""" | return MutationsBatcher ( self , flush_count , max_row_bytes ) |
def works ( self , ids = None , query = None , filter = None , offset = None , limit = None , sample = None , sort = None , order = None , facet = None , select = None , cursor = None , cursor_max = 5000 , ** kwargs ) :
'''Search Crossref works
: param ids : [ Array ] DOIs ( digital object identifier ) or other identifiers
: param query : [ String ] A query string
: param filter : [ Hash ] Filter options . See examples for usage .
Accepts a dict , with filter names and their values . For repeating filter names
pass in a list of the values to that filter name , e . g . ,
` { ' award _ funder ' : [ ' 10.13039/100004440 ' , ' 10.13039/100000861 ' ] } ` .
See https : / / github . com / CrossRef / rest - api - doc # filter - names
for filter names and their descriptions and : func : ` ~ habanero . Crossref . filter _ names `
and : func : ` ~ habanero . Crossref . filter _ details `
: param offset : [ Fixnum ] Number of record to start at , from 1 to 10000
: param limit : [ Fixnum ] Number of results to return . Not relavant when searching with specific dois .
Default : 20 . Max : 1000
: param sample : [ Fixnum ] Number of random results to return . when you use the sample parameter ,
the limit and offset parameters are ignored . Max : 100
: param sort : [ String ] Field to sort on . Note : If the API call includes a query , then the sort
order will be by the relevance score . If no query is included , then the sort order
will be by DOI update date . See sorting _ for possible values .
: param order : [ String ] Sort order , one of ' asc ' or ' desc '
: param facet : [ Boolean / String ] Set to ` true ` to include facet results ( default : false ) .
Optionally , pass a query string , e . g . , ` facet = type - name : * ` or ` facet = license = * ` .
See Facets _ for options .
: param select : [ String / list ( Strings ) ] Crossref metadata records can be
quite large . Sometimes you just want a few elements from the schema . You can " select "
a subset of elements to return . This can make your API calls much more efficient . Not
clear yet which fields are allowed here .
: param cursor : [ String ] Cursor character string to do deep paging . Default is None .
Pass in ' * ' to start deep paging . Any combination of query , filters and facets may be
used with deep paging cursors . While rows may be specified along with cursor , offset
and sample cannot be used .
See https : / / github . com / CrossRef / rest - api - doc / blob / master / rest _ api . md # deep - paging - with - cursors
: param cursor _ max : [ Fixnum ] Max records to retrieve . Only used when cursor param used . Because
deep paging can result in continuous requests until all are retrieved , use this
parameter to set a maximum number of records . Of course , if there are less records
found than this value , you will get only those found .
: param kwargs : additional named arguments passed on to ` requests . get ` , e . g . , field
queries ( see examples and FieldQueries _ )
: return : A dict
Usage : :
from habanero import Crossref
cr = Crossref ( )
cr . works ( )
cr . works ( ids = ' 10.1371 / journal . pone . 0033693 ' )
dois = [ ' 10.1371 / journal . pone . 0033693 ' , ]
cr . works ( ids = dois )
x = cr . works ( query = " ecology " )
x [ ' status ' ]
x [ ' message - type ' ]
x [ ' message - version ' ]
x [ ' message ' ]
x [ ' message ' ] [ ' total - results ' ]
x [ ' message ' ] [ ' items - per - page ' ]
x [ ' message ' ] [ ' query ' ]
x [ ' message ' ] [ ' items ' ]
# Get full text links
x = cr . works ( filter = { ' has _ full _ text ' : True } )
# Parse output to various data pieces
x = cr . works ( filter = { ' has _ full _ text ' : True } )
# # get doi for each item
[ z [ ' DOI ' ] for z in x [ ' message ' ] [ ' items ' ] ]
# # get doi and url for each item
[ { " doi " : z [ ' DOI ' ] , " url " : z [ ' URL ' ] } for z in x [ ' message ' ] [ ' items ' ] ]
# # # print every doi
for i in x [ ' message ' ] [ ' items ' ] :
print i [ ' DOI ' ]
# filters - pass in as a dict
# # see https : / / github . com / CrossRef / rest - api - doc # filter - names
cr . works ( filter = { ' has _ full _ text ' : True } )
cr . works ( filter = { ' has _ funder ' : True , ' has _ full _ text ' : True } )
cr . works ( filter = { ' award _ number ' : ' CBET - 0756451 ' , ' award _ funder ' : ' 10.13039/1000001 ' } )
# # to repeat a filter name , pass in a list
x = cr . works ( filter = { ' award _ funder ' : [ ' 10.13039/100004440 ' , ' 10.13039/100000861 ' ] } , limit = 100)
map ( lambda z : z [ ' funder ' ] [ 0 ] [ ' DOI ' ] , x [ ' message ' ] [ ' items ' ] )
# Deep paging , using the cursor parameter
# # this search should lead to only ~ 215 results
cr . works ( query = " widget " , cursor = " * " , cursor _ max = 100)
# # this search should lead to only ~ 2500 results , in chunks of 500
res = cr . works ( query = " octopus " , cursor = " * " , limit = 500)
sum ( [ len ( z [ ' message ' ] [ ' items ' ] ) for z in res ] )
# # about 167 results
res = cr . works ( query = " extravagant " , cursor = " * " , limit = 50 , cursor _ max = 500)
sum ( [ len ( z [ ' message ' ] [ ' items ' ] ) for z in res ] )
# # cursor _ max to get back only a maximum set of results
res = cr . works ( query = " widget " , cursor = " * " , cursor _ max = 100)
sum ( [ len ( z [ ' message ' ] [ ' items ' ] ) for z in res ] )
# # cursor _ max - especially useful when a request could be very large
# # # e . g . , " ecology " results in ~ 275K records , lets max at 10,000
# # # with 1000 at a time
res = cr . works ( query = " ecology " , cursor = " * " , cursor _ max = 10000 , limit = 1000)
sum ( [ len ( z [ ' message ' ] [ ' items ' ] ) for z in res ] )
items = [ z [ ' message ' ] [ ' items ' ] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z [ ' DOI ' ] for z in items ] [ 0:50]
# field queries
res = cr . works ( query = " ecology " , query _ author = ' carl boettiger ' )
[ x [ ' author ' ] [ 0 ] [ ' family ' ] for x in res [ ' message ' ] [ ' items ' ] ]
# select certain fields to return
# # as a comma separated string
cr . works ( query = " ecology " , select = " DOI , title " )
# # or as a list
cr . works ( query = " ecology " , select = [ " DOI " , " title " ] )''' | if ids . __class__ . __name__ != 'NoneType' :
return request ( self . mailto , self . base_url , "/works/" , ids , query , filter , offset , limit , sample , sort , order , facet , select , None , None , None , None , ** kwargs )
else :
return Request ( self . mailto , self . base_url , "/works/" , query , filter , offset , limit , sample , sort , order , facet , select , cursor , cursor_max , None , ** kwargs ) . do_request ( ) |
def pointer_gate ( num_qubits , U ) :
"""Make a pointer gate on ` num _ qubits ` . The one - qubit gate U will act on the
qubit addressed by the pointer qubits interpreted as an unsigned binary
integer .
There are P = floor ( lg ( num _ qubits ) ) pointer qubits , and qubits numbered
N - 1
N - 2
N - P
are those reserved to represent the pointer . The first N - P qubits
are the qubits which the one - qubit gate U can act on .""" | ptr_bits = int ( floor ( np . log2 ( num_qubits ) ) )
data_bits = num_qubits - ptr_bits
ptr_state = 0
assert ptr_bits > 0
program = pq . Program ( )
program . defgate ( "CU" , controlled ( ptr_bits , U ) )
for _ , target_qubit , changed in gray ( ptr_bits ) :
if changed is None :
for ptr_qubit in range ( num_qubits - ptr_bits , num_qubits ) :
program . inst ( X ( ptr_qubit ) )
ptr_state ^= 1 << ( ptr_qubit - data_bits )
else :
program . inst ( X ( data_bits + changed ) )
ptr_state ^= 1 << changed
if target_qubit < data_bits :
control_qubits = tuple ( data_bits + i for i in range ( ptr_bits ) )
program . inst ( ( "CU" , ) + control_qubits + ( target_qubit , ) )
fixup ( program , data_bits , ptr_bits , ptr_state )
return program |
def merge_contextual ( self , other ) :
"""Merge in contextual info from a template Compound .""" | # TODO : This is currently dependent on our data model ? Make more robust to schema changes
# Currently we assume all lists at Compound level , with 1 further potential nested level of lists
for k in self . keys ( ) : # print ( ' key : % s ' % k )
for item in self [ k ] : # print ( ' item : % s ' % item )
for other_item in other . get ( k , [ ] ) : # Skip text properties ( don ' t merge names , labels , roles )
if isinstance ( other_item , six . text_type ) :
continue
for otherk in other_item . keys ( ) :
if isinstance ( other_item [ otherk ] , list ) :
if len ( other_item [ otherk ] ) > 0 and len ( item [ otherk ] ) > 0 :
other_nested_item = other_item [ otherk ] [ 0 ]
for othernestedk in other_nested_item . keys ( ) :
for nested_item in item [ otherk ] :
if not nested_item [ othernestedk ] :
nested_item [ othernestedk ] = other_nested_item [ othernestedk ]
elif not item [ otherk ] :
item [ otherk ] = other_item [ otherk ]
log . debug ( 'Result: %s' % self . serialize ( ) )
return self |
def determine_result ( self , returncode , returnsignal , output , isTimeout ) :
"""Parse the output of the tool and extract the verification result .
This method always needs to be overridden .
If the tool gave a result , this method needs to return one of the
benchexec . result . RESULT _ * strings .
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
( e . g . , " CRASH " , " OUT _ OF _ MEMORY " , etc . ) .""" | for line in output :
if "All test cases time out or crash, giving up!" in line :
return "Couldn't run: all seeds time out or crash"
if "ERROR: couldn't run FairFuzz" in line :
return "Couldn't run FairFuzz"
if "CRASHES FOUND" in line :
return result . RESULT_FALSE_REACH
if "DONE RUNNING" in line :
return result . RESULT_DONE
return result . RESULT_UNKNOWN |
def _has_skip ( self , msg ) :
'''The message contains the skipping keyword no not .
: return type : Bool''' | for skip in self . skips :
if re . search ( skip , msg ) :
return True
return False |
def init ( self , acct : Account , payer_acct : Account , gas_limit : int , gas_price : int ) -> str :
"""This interface is used to call the TotalSupply method in ope4
that initialize smart contract parameter .
: param acct : an Account class that used to sign the transaction .
: param payer _ acct : an Account class that used to pay for the transaction .
: param gas _ limit : an int value that indicate the gas limit .
: param gas _ price : an int value that indicate the gas price .
: return : the hexadecimal transaction hash value .""" | func = InvokeFunction ( 'init' )
tx_hash = self . __sdk . get_network ( ) . send_neo_vm_transaction ( self . __hex_contract_address , acct , payer_acct , gas_limit , gas_price , func )
return tx_hash |
def _DropCommonSuffixes ( filename ) :
"""Drops common suffixes like _ test . cc or - inl . h from filename .
For example :
> > > _ DropCommonSuffixes ( ' foo / foo - inl . h ' )
' foo / foo '
> > > _ DropCommonSuffixes ( ' foo / bar / foo . cc ' )
' foo / bar / foo '
> > > _ DropCommonSuffixes ( ' foo / foo _ internal . h ' )
' foo / foo '
> > > _ DropCommonSuffixes ( ' foo / foo _ unusualinternal . h ' )
' foo / foo _ unusualinternal '
Args :
filename : The input filename .
Returns :
The filename with the common suffix removed .""" | for suffix in ( 'test.cc' , 'regtest.cc' , 'unittest.cc' , 'inl.h' , 'impl.h' , 'internal.h' ) :
if ( filename . endswith ( suffix ) and len ( filename ) > len ( suffix ) and filename [ - len ( suffix ) - 1 ] in ( '-' , '_' ) ) :
return filename [ : - len ( suffix ) - 1 ]
return os . path . splitext ( filename ) [ 0 ] |
def default_marshaller ( obj ) :
"""Retrieve the state of the given object .
Calls the ` ` _ _ getstate _ _ ( ) ` ` method of the object if available , otherwise returns the
` ` _ _ dict _ _ ` ` of the object .
: param obj : the object to marshal
: return : the marshalled object state""" | if hasattr ( obj , '__getstate__' ) :
return obj . __getstate__ ( )
try :
return obj . __dict__
except AttributeError :
raise TypeError ( '{!r} has no __dict__ attribute and does not implement __getstate__()' . format ( obj . __class__ . __name__ ) ) |
def samplerate ( self ) :
"""The audio ' s sample rate ( an int ) .""" | if hasattr ( self . mgfile . info , 'sample_rate' ) :
return self . mgfile . info . sample_rate
elif self . type == 'opus' : # Opus is always 48kHz internally .
return 48000
return 0 |
def remove_entries_for_scope ( self , user_scope , scope_name , scope_value , key ) :
"""RemoveEntriesForScope .
[ Preview API ] Remove the entry or entries under the specified path
: param str user _ scope : User - Scope at which to remove the value . Should be " me " for the current user or " host " for all users .
: param str scope _ name : Scope at which to get the setting for ( e . g . " project " or " team " )
: param str scope _ value : Value of the scope ( e . g . the project or team id )
: param str key : Root key of the entry or entries to remove""" | route_values = { }
if user_scope is not None :
route_values [ 'userScope' ] = self . _serialize . url ( 'user_scope' , user_scope , 'str' )
if scope_name is not None :
route_values [ 'scopeName' ] = self . _serialize . url ( 'scope_name' , scope_name , 'str' )
if scope_value is not None :
route_values [ 'scopeValue' ] = self . _serialize . url ( 'scope_value' , scope_value , 'str' )
if key is not None :
route_values [ 'key' ] = self . _serialize . url ( 'key' , key , 'str' )
self . _send ( http_method = 'DELETE' , location_id = '4cbaafaf-e8af-4570-98d1-79ee99c56327' , version = '5.0-preview.1' , route_values = route_values ) |
async def update_intervals_data ( self ) :
"""Update intervals data json for specified time period .""" | url = '{}/users/{}/intervals' . format ( API_URL , self . userid )
intervals = await self . device . api_get ( url )
if intervals is None :
_LOGGER . error ( 'Unable to fetch eight intervals data.' )
else :
self . intervals = intervals [ 'intervals' ] |
def meter_data_from_csv ( filepath_or_buffer , tz = None , start_col = "start" , value_col = "value" , gzipped = False , freq = None , ** kwargs ) :
"""Load meter data from a CSV file .
Default format : :
start , value
2017-01-01T00:00:00 + 00:00,0.31
2017-01-02T00:00:00 + 00:00,0.4
2017-01-03T00:00:00 + 00:00,0.58
Parameters
filepath _ or _ buffer : : any : ` str ` or file - handle
File path or object .
tz : : any : ` str ` , optional
E . g . , ` ` ' UTC ' ` ` or ` ` ' US / Pacific ' ` `
start _ col : : any : ` str ` , optional , default ` ` ' start ' ` `
Date period start column .
value _ col : : any : ` str ` , optional , default ` ` ' value ' ` `
Value column , can be in any unit .
gzipped : : any : ` bool ` , optional
Whether file is gzipped .
freq : : any : ` str ` , optional
If given , apply frequency to data using : any : ` pandas . DataFrame . resample ` .
* * kwargs
Extra keyword arguments to pass to : any : ` pandas . read _ csv ` , such as
` ` sep = ' | ' ` ` .""" | read_csv_kwargs = { "usecols" : [ start_col , value_col ] , "dtype" : { value_col : np . float64 } , "parse_dates" : [ start_col ] , "index_col" : start_col , }
if gzipped :
read_csv_kwargs . update ( { "compression" : "gzip" } )
# allow passing extra kwargs
read_csv_kwargs . update ( kwargs )
df = pd . read_csv ( filepath_or_buffer , ** read_csv_kwargs ) . tz_localize ( "UTC" )
if tz is not None :
df = df . tz_convert ( tz )
if freq == "hourly" :
df = df . resample ( "H" ) . sum ( )
elif freq == "daily" :
df = df . resample ( "D" ) . sum ( )
return df |
def parquet_file ( self , hdfs_dir , schema = None , name = None , database = None , external = True , like_file = None , like_table = None , persist = False , ) :
"""Make indicated parquet file in HDFS available as an Ibis table .
The table created can be optionally named and persisted , otherwise a
unique name will be generated . Temporarily , for any non - persistent
external table created by Ibis we will attempt to drop it when the
underlying object is garbage collected ( or the Python interpreter shuts
down normally ) .
Parameters
hdfs _ dir : string
Path in HDFS
schema : ibis Schema
If no schema provided , and neither of the like _ * argument is passed ,
one will be inferred from one of the parquet files in the directory .
like _ file : string
Absolute path to Parquet file in HDFS to use for schema
definitions . An alternative to having to supply an explicit schema
like _ table : string
Fully scoped and escaped string to an Impala table whose schema we
will use for the newly created table .
name : string , optional
random unique name generated otherwise
database : string , optional
Database to create the ( possibly temporary ) table in
external : boolean , default True
If a table is external , the referenced data will not be deleted when
the table is dropped in Impala . Otherwise ( external = False ) Impala
takes ownership of the Parquet file .
persist : boolean , default False
Do not drop the table upon Ibis garbage collection / interpreter
shutdown
Returns
parquet _ table : ImpalaTable""" | name , database = self . _get_concrete_table_path ( name , database , persist = persist )
# If no schema provided , need to find some absolute path to a file in
# the HDFS directory
if like_file is None and like_table is None and schema is None :
file_name = self . hdfs . _find_any_file ( hdfs_dir )
like_file = pjoin ( hdfs_dir , file_name )
stmt = ddl . CreateTableParquet ( name , hdfs_dir , schema = schema , database = database , example_file = like_file , example_table = like_table , external = external , can_exist = False , )
self . _execute ( stmt )
return self . _wrap_new_table ( name , database , persist ) |
def filter_international_words ( buf ) :
"""We define three types of bytes :
alphabet : english alphabets [ a - zA - Z ]
international : international characters [ \x80 - \xFF ]
marker : everything else [ ^ a - zA - Z \x80 - \xFF ]
The input buffer can be thought to contain a series of words delimited
by markers . This function works to filter all words that contain at
least one international character . All contiguous sequences of markers
are replaced by a single space ascii character .
This filter applies to all scripts which do not use English characters .""" | filtered = bytearray ( )
# This regex expression filters out only words that have at - least one
# international character . The word may include one marker character at
# the end .
words = re . findall ( b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?' , buf )
for word in words :
filtered . extend ( word [ : - 1 ] )
# If the last character in the word is a marker , replace it with a
# space as markers shouldn ' t affect our analysis ( they are used
# similarly across all languages and may thus have similar
# frequencies ) .
last_char = word [ - 1 : ]
if not last_char . isalpha ( ) and last_char < b'\x80' :
last_char = b' '
filtered . extend ( last_char )
return filtered |
def delete_whitespaces ( self , arg ) :
"""Removes newlines , tabs and whitespaces at the beginning , the end and if there is more than one .
: param arg : A string , the string which shell be cleaned
: return : A string , the cleaned string""" | # Deletes whitespaces after a newline
arg = re . sub ( re_newline_spc , '' , arg )
# Deletes every whitespace , tabulator , newline at the beginning of the string
arg = re . sub ( re_starting_whitespc , '' , arg )
# Deletes whitespace or tabulator if followed by whitespace or tabulator
arg = re . sub ( re_multi_spc_tab , '' , arg )
# Deletes newline if it is followed by an other one
arg = re . sub ( re_double_newline , '' , arg )
# Deletes newlines and whitespaces at the end of the string
arg = re . sub ( re_ending_spc_newline , '' , arg )
return arg |
def build_options ( func ) :
"""Add " build " Click options to function .
: param function func : The function to wrap .
: return : The wrapped function .
: rtype : function""" | func = click . option ( '-a' , '--banner-greatest-tag' , is_flag = True , help = 'Override banner-main-ref to be the tag with the highest version number.' ) ( func )
func = click . option ( '-A' , '--banner-recent-tag' , is_flag = True , help = 'Override banner-main-ref to be the most recent committed tag.' ) ( func )
func = click . option ( '-b' , '--show-banner' , help = 'Show a warning banner.' , is_flag = True ) ( func )
func = click . option ( '-B' , '--banner-main-ref' , help = "Don't show banner on this ref and point banner URLs to this ref. Default master." ) ( func )
func = click . option ( '-i' , '--invert' , help = 'Invert/reverse order of versions.' , is_flag = True ) ( func )
func = click . option ( '-p' , '--priority' , type = click . Choice ( ( 'branches' , 'tags' ) ) , help = "Group these kinds of versions at the top (for themes that don't separate them)." ) ( func )
func = click . option ( '-r' , '--root-ref' , help = 'The branch/tag at the root of DESTINATION. Will also be in subdir. Default master.' ) ( func )
func = click . option ( '-s' , '--sort' , multiple = True , type = click . Choice ( ( 'semver' , 'alpha' , 'time' ) ) , help = 'Sort versions. Specify multiple times to sort equal values of one kind.' ) ( func )
func = click . option ( '-t' , '--greatest-tag' , is_flag = True , help = 'Override root-ref to be the tag with the highest version number.' ) ( func )
func = click . option ( '-T' , '--recent-tag' , is_flag = True , help = 'Override root-ref to be the most recent committed tag.' ) ( func )
func = click . option ( '-w' , '--whitelist-branches' , multiple = True , help = 'Whitelist branches that match the pattern. Can be specified more than once.' ) ( func )
func = click . option ( '-W' , '--whitelist-tags' , multiple = True , help = 'Whitelist tags that match the pattern. Can be specified more than once.' ) ( func )
return func |
def purge_stream ( self , stream_id , remove_definition = False , sandbox = None ) :
"""Clears all the data in a given stream and the calculated intervals
: param stream _ id : The stream id
: param remove _ definition : Whether to remove the stream definition as well
: param sandbox : The sandbox id
: return : None""" | if sandbox is not None :
raise NotImplementedError
if stream_id not in self . streams :
raise StreamNotFoundError ( stream_id )
self . data [ stream_id ] = StreamInstanceCollection ( )
self . streams [ stream_id ] . calculated_intervals = TimeIntervals ( )
if remove_definition :
del self . data [ stream_id ]
del self . streams [ stream_id ] |
def get_effective_agent_id ( self ) :
"""Gets the Id of the effective agent in use by this session .
If is _ authenticated ( ) is true , then the effective agent may be
the same as the agent returned by get _ authenticated _ agent ( ) . If
is _ authenticated ( ) is false , then the effective agent may be a
default agent used for authorization by an unknwon or anonymous
user .
return : ( osid . id . Id ) - the effective agent
compliance : mandatory - This method must be implemented .""" | if self . is_authenticated ( ) :
return self . _proxy . get_authentication ( ) . get_agent_id ( )
elif self . _proxy is not None and self . _proxy . has_effective_agent ( ) :
return self . _proxy . get_effective_agent_id ( )
else :
return Id ( identifier = 'MC3GUE$T@MIT.EDU' , namespace = 'osid.agent.Agent' , authority = 'MIT-OEIT' ) |
def phi_s ( spin1x , spin1y , spin2x , spin2y ) :
"""Returns the sum of the in - plane perpendicular spins .""" | phi1 = phi_from_spinx_spiny ( spin1x , spin1y )
phi2 = phi_from_spinx_spiny ( spin2x , spin2y )
return ( phi1 + phi2 ) % ( 2 * numpy . pi ) |
def _get_number_from_fmt ( fmt ) :
"""Helper function for extract _ values ,
figures out string length from format string .""" | if '%' in fmt : # its datetime
return len ( ( "{0:" + fmt + "}" ) . format ( dt . datetime . now ( ) ) )
else : # its something else
fmt = fmt . lstrip ( '0' )
return int ( re . search ( '[0-9]+' , fmt ) . group ( 0 ) ) |
def disconnect_all ( self , context , ports ) :
"""Disconnect All Command , will the assign all the vnics on the vm to the default network ,
which is sign to be disconnected
: param models . QualiDriverModels . ResourceRemoteCommandContext context : the context the command runs on
: param list [ string ] ports : the ports of the connection between the remote resource and the local resource , NOT IN USE ! ! !""" | resource_details = self . _parse_remote_model ( context )
# execute command
res = self . command_wrapper . execute_command_with_connection ( context , self . virtual_switch_disconnect_command . disconnect_all , resource_details . vm_uuid )
return set_command_result ( result = res , unpicklable = False ) |
def register_error_code ( code , exception_type , domain = 'core' ) :
"""Register a new error code""" | Logger . _error_code_to_exception [ code ] = ( exception_type , domain )
Logger . _domain_codes [ domain ] . add ( code ) |
def set_until ( self , frame , lineno = None ) :
"""Stop when the current line number in frame is greater than lineno or
when returning from frame .""" | if lineno is None :
lineno = frame . f_lineno + 1
self . _set_stopinfo ( frame , lineno ) |
def longest_bitonic_sequence ( array : list ) -> int :
"""This function calculates the longest bitonic subsequence in a given list .
A bitonic sequence is a sequence of numbers which is first strictly increasing then after a point strictly decreasing .
Examples :
> > > longest _ bitonic _ sequence ( [ 0 , 8 , 4 , 12 , 2 , 10 , 6 , 14 , 1 , 9 , 5 , 13 , 3 , 11 , 7 , 15 ] )
> > > longest _ bitonic _ sequence ( [ 1 , 11 , 2 , 10 , 4 , 5 , 2 , 1 ] )
> > > longest _ bitonic _ sequence ( [ 80 , 60 , 30 , 40 , 20 , 10 ] )
Args :
array ( list ) : A list of integers .
Returns :
int : Length of the longest bitonic subsequence .""" | n = len ( array )
inc_subseq = [ 1 ] * n
for i in range ( n ) :
for j in range ( i ) :
if array [ i ] > array [ j ] and inc_subseq [ i ] < inc_subseq [ j ] + 1 :
inc_subseq [ i ] = inc_subseq [ j ] + 1
dec_subseq = [ 1 ] * n
for i in reversed ( range ( n - 1 ) ) :
for j in reversed ( range ( i , n ) ) :
if array [ i ] > array [ j ] and dec_subseq [ i ] < dec_subseq [ j ] + 1 :
dec_subseq [ i ] = dec_subseq [ j ] + 1
max_length = inc_subseq [ 0 ] + dec_subseq [ 0 ] - 1
for i in range ( n ) :
max_length = max ( ( inc_subseq [ i ] + dec_subseq [ i ] - 1 ) , max_length )
return max_length |
def break_at_capitals ( input_text ) :
"""This function splits the given string at each occurrence of a uppercase letter .
Args :
input _ text : A string containing mixed ( upper and lower case ) characters
Returns :
A list of words extracted by splitting at each uppercase character .
Examples :
> > > break _ at _ capitals ( ' LearnToBuildAnythingWithGoogle ' )
[ ' Learn ' , ' To ' , ' Build ' , ' Anything ' , ' With ' , ' Google ' ]
> > > break _ at _ capitals ( ' ApmlifyingTheBlack + DeveloperCommunity ' )
[ ' Apmlifying ' , ' The ' , ' Black + ' , ' Developer ' , ' Community ' ]
> > > break _ at _ capitals ( ' UpdateInTheGoEcoSystem ' )
[ ' Update ' , ' In ' , ' The ' , ' Go ' , ' Eco ' , ' System ' ]""" | import re
return re . findall ( '[A-Z][^A-Z]*' , input_text ) |
def _build_namespace_dict ( cls , obj , dots = False ) :
"""Recursively replaces all argparse . Namespace and optparse . Values
with dicts and drops any keys with None values .
Additionally , if dots is True , will expand any dot delimited
keys .
: param obj : Namespace , Values , or dict to iterate over . Other
values will simply be returned .
: type obj : argparse . Namespace or optparse . Values or dict or *
: param dots : If True , any properties on obj that contain dots ( . )
will be broken down into child dictionaries .
: return : A new dictionary or the value passed if obj was not a
dict , Namespace , or Values .
: rtype : dict or *""" | # We expect our root object to be a dict , but it may come in as
# a namespace
obj = namespace_to_dict ( obj )
# We only deal with dictionaries
if not isinstance ( obj , dict ) :
return obj
# Get keys iterator
keys = obj . keys ( ) if PY3 else obj . iterkeys ( )
if dots : # Dots needs sorted keys to prevent parents from
# clobbering children
keys = sorted ( list ( keys ) )
output = { }
for key in keys :
value = obj [ key ]
if value is None : # Avoid unset options .
continue
save_to = output
result = cls . _build_namespace_dict ( value , dots )
if dots : # Split keys by dots as this signifies nesting
split = key . split ( '.' )
if len ( split ) > 1 : # The last index will be the key we assign result to
key = split . pop ( )
# Build the dict tree if needed and change where
# we ' re saving to
for child_key in split :
if child_key in save_to and isinstance ( save_to [ child_key ] , dict ) :
save_to = save_to [ child_key ]
else : # Clobber or create
save_to [ child_key ] = { }
save_to = save_to [ child_key ]
# Save
if key in save_to :
save_to [ key ] . update ( result )
else :
save_to [ key ] = result
return output |
def read_pkginfo ( filename ) :
"""Help us read the pkginfo without accessing _ _ init _ _""" | COMMENT_CHAR = '#'
OPTION_CHAR = '='
options = { }
f = open ( filename )
for line in f :
if COMMENT_CHAR in line :
line , comment = line . split ( COMMENT_CHAR , 1 )
if OPTION_CHAR in line :
option , value = line . split ( OPTION_CHAR , 1 )
option = option . strip ( )
value = value . strip ( )
options [ option ] = value
f . close ( )
return options |
def generateFeatures ( numFeatures ) :
"""Return string features .
If < = 62 features are requested , output will be single character
alphanumeric strings . Otherwise , output will be [ " F1 " , " F2 " , . . . ]""" | # Capital letters , lowercase letters , numbers
candidates = ( [ chr ( i + 65 ) for i in xrange ( 26 ) ] + [ chr ( i + 97 ) for i in xrange ( 26 ) ] + [ chr ( i + 48 ) for i in xrange ( 10 ) ] )
if numFeatures > len ( candidates ) :
candidates = [ "F{}" . format ( i ) for i in xrange ( numFeatures ) ]
return candidates
return candidates [ : numFeatures ] |
def from_backend ( self , dagobah_id ) :
"""Reconstruct this Dagobah instance from the backend .""" | logger . debug ( 'Reconstructing Dagobah instance from backend with ID {0}' . format ( dagobah_id ) )
rec = self . backend . get_dagobah_json ( dagobah_id )
if not rec :
raise DagobahError ( 'dagobah with id %s does not exist ' 'in backend' % dagobah_id )
self . _construct_from_json ( rec ) |
def _write_var_data_sparse ( self , f , zVar , var , dataType , numElems , recVary , oneblock ) :
'''Writes a VVR and a VXR for this block of sparse data
Parameters :
f : file
The open CDF file
zVar : bool
True if this is for a z variable
var : int
The variable number
dataType : int
The CDF data type of this variable
numElems : str
The number of elements in each record
recVary : bool
True if the value varies across records
oneblock : list
A list of data in the form [ startrec , endrec , [ data ] ]
Returns :
recend : int
Just the " endrec " value input by the user in " oneblock "''' | rec_start = oneblock [ 0 ]
rec_end = oneblock [ 1 ]
indata = oneblock [ 2 ]
numValues = self . _num_values ( zVar , var )
# Convert oneblock [ 2 ] into a byte stream
_ , data = self . _convert_data ( dataType , numElems , numValues , indata )
# Gather dimension information
if zVar :
vdr_offset = self . zvarsinfo [ var ] [ 1 ]
else :
vdr_offset = self . rvarsinfo [ var ] [ 1 ]
# Write one VVR
offset = self . _write_vvr ( f , data )
f . seek ( vdr_offset + 28 , 0 )
# Get first VXR
vxrOne = int . from_bytes ( f . read ( 8 ) , 'big' , signed = True )
foundSpot = 0
usedEntries = 0
currentVXR = 0
# Search through VXRs to find an open one
while foundSpot == 0 and vxrOne > 0 : # have a VXR
f . seek ( vxrOne , 0 )
currentVXR = f . tell ( )
f . seek ( vxrOne + 12 , 0 )
vxrNext = int . from_bytes ( f . read ( 8 ) , 'big' , signed = True )
nEntries = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True )
usedEntries = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True )
if ( usedEntries == nEntries ) : # all entries are used - - check the next vxr in link
vxrOne = vxrNext
else : # found a vxr with an vailable entry spot
foundSpot = 1
# vxrOne = = 0 from vdr ' s vxrhead vxrOne = = - 1 from a vxr ' s vxrnext
if ( vxrOne == 0 or vxrOne == - 1 ) : # no available vxr . . . create a new one
currentVXR = self . _create_vxr ( f , rec_start , rec_end , vdr_offset , currentVXR , offset )
else :
self . _use_vxrentry ( f , currentVXR , rec_start , rec_end , offset )
# Modify the VDR ' s MaxRec if needed
f . seek ( vdr_offset + 24 , 0 )
recNumc = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True )
if ( rec_end > recNumc ) :
self . _update_offset_value ( f , vdr_offset + 24 , 4 , rec_end )
return rec_end |
def statement ( self ) :
"""statement : assign _ statement
| expression
| control
| empty
Feature For Loop adds :
| loop
Feature Func adds :
| func
| return statement""" | if self . cur_token . type == TokenTypes . VAR :
self . tokenizer . start_saving ( self . cur_token )
self . variable ( )
peek_var = self . cur_token
self . tokenizer . replay ( )
self . eat ( )
if peek_var . type == TokenTypes . ASSIGN :
return self . assign_statement ( )
else :
return self . expression ( )
elif self . cur_token . type in TokenTypes . control ( self . features ) :
return self . control ( )
elif self . cur_token . type in TokenTypes . loop ( self . features ) :
return self . loop ( )
elif self . cur_token . type in TokenTypes . func ( self . features ) :
if self . cur_token . type == TokenTypes . FUNC :
return self . func ( )
elif self . cur_token . type == TokenTypes . RETURN :
return self . return_statement ( )
self . error ( "Invalid token or unfinished statement" ) |
def renderForSignal ( self , stm : Union [ HdlStatement , List [ HdlStatement ] ] , s : RtlSignalBase , connectOut ) -> Optional [ Tuple [ LNode , Union [ RtlSignalBase , LPort ] ] ] :
"""Walk statement and render nodes which are representing
hardware components ( MUX , LATCH , FF , . . . ) for specified signal""" | # filter statements for this signal only if required
if not isinstance ( stm , HdlStatement ) :
stm = list ( walkStatementsForSig ( stm , s ) )
if not stm :
return None
elif len ( stm ) != 1 :
raise NotImplementedError ( "deduced MUX" )
else :
stm = stm [ 0 ]
# render assignment instances
if isinstance ( stm , Assignment ) :
return self . createAssignment ( stm , connectOut )
encl = stm . _enclosed_for
full_ev_dep = stm . _is_completly_event_dependent
par = stm . parentStm
parent_ev_dep = par is not None and par . _now_is_event_dependent
# render IfContainer instances
if isinstance ( stm , IfContainer ) :
if full_ev_dep and not parent_ev_dep : # FF with optional MUX
return self . renderEventDepIfContainer ( stm , s , connectOut )
else :
latched = par is None and not parent_ev_dep and s not in encl
# MUX / LATCH / MUX + LATCH
controls = [ stm . cond ]
ren = self . renderForSignal ( stm . ifTrue , s , False )
if ren is not None :
inputs = [ ren [ 1 ] ]
else :
inputs = [ ]
for c , stms in stm . elIfs :
controls . append ( c )
ren = self . renderForSignal ( stms , s , False )
if ren is not None :
inputs . append ( ren [ 1 ] )
if stm . ifFalse :
ren = self . renderForSignal ( stm . ifFalse , s , False )
if ren is not None :
inputs . append ( ren [ 1 ] )
return self . createMux ( s , inputs , controls , connectOut , latched = latched )
# render SwitchContainer instances
elif isinstance ( stm , SwitchContainer ) :
latched = s not in encl
inputs = [ ]
for _ , stms in stm . cases :
d = self . renderForSignal ( stms , s , False )
if d is not None :
_ , port = d
inputs . append ( port )
else :
assert latched , ( s , stm )
if stm . default :
d = self . renderForSignal ( stm . default , s , False )
if d is not None :
_ , port = d
inputs . append ( port )
else :
assert latched , ( s , stm )
return self . createMux ( s , inputs , stm . switchOn , connectOut , latched = latched )
else :
raise TypeError ( stm ) |
def install ( self , force_install = False ) :
"""Installs an app . Blocks until the installation is complete , or raises : exc : ` AppInstallError ` if it fails .
While this method runs , " progress " events will be emitted regularly with the following signature : : :
( sent _ this _ interval , sent _ total , total _ size )
: param force _ install : Install even if installing this pbw on this platform is usually forbidden .
: type force _ install : bool""" | if not ( force_install or self . _bundle . should_permit_install ( ) ) :
raise AppInstallError ( "This pbw is not supported on this platform." )
if self . _pebble . firmware_version . major < 3 :
self . _install_legacy2 ( )
else :
self . _install_modern ( ) |
def getCollapseRequestsFn ( self ) :
"""Helper function to determine which collapseRequests function to use
from L { _ collapseRequests } , or None for no merging""" | # first , seek through builder , global , and the default
collapseRequests_fn = self . config . collapseRequests
if collapseRequests_fn is None :
collapseRequests_fn = self . master . config . collapseRequests
if collapseRequests_fn is None :
collapseRequests_fn = True
# then translate False and True properly
if collapseRequests_fn is False :
collapseRequests_fn = None
elif collapseRequests_fn is True :
collapseRequests_fn = self . _defaultCollapseRequestFn
return collapseRequests_fn |
def _load_compiled ( self , file_path ) :
"""Accepts a path to a compiled plugin and returns a module object .
file _ path : A string that represents a complete file path to a compiled
plugin .""" | name = os . path . splitext ( os . path . split ( file_path ) [ - 1 ] ) [ 0 ]
plugin_directory = os . sep . join ( os . path . split ( file_path ) [ 0 : - 1 ] )
compiled_directory = os . path . join ( plugin_directory , '__pycache__' )
# Use glob to autocomplete the filename .
compiled_file = glob . glob ( os . path . join ( compiled_directory , ( name + '.*' ) ) ) [ 0 ]
plugin = imp . load_compiled ( name , compiled_file )
return plugin |
def getConstructorArguments ( ) :
"""Return constructor argument associated with ColumnPooler .
@ return defaults ( list ) a list of args and default values for each argument""" | argspec = inspect . getargspec ( ColumnPooler . __init__ )
return argspec . args [ 1 : ] , argspec . defaults |
def address ( self ) :
"""The full proxied address to this page""" | path = urlsplit ( self . target ) . path
suffix = '/' if not path or path . endswith ( '/' ) else ''
return '%s%s/%s%s' % ( self . _ui_address [ : - 1 ] , self . _proxy_prefix , self . route , suffix ) |
def load ( self ) :
"""Loads the children for this record item .
: return < bool > | changed""" | if self . __loaded :
return False
self . __loaded = True
self . setChildIndicatorPolicy ( self . DontShowIndicatorWhenChildless )
# loads the children for this widget
tree = self . treeWidget ( )
if tree . groupBy ( ) :
grps = self . childRecords ( ) . grouped ( tree . groupBy ( ) )
for grp , records in grps . items ( ) :
tree . createGroupItem ( grp , records , self )
else :
for record in self . childRecords ( ) :
tree . createRecordItem ( record , self )
return True |
def read_next_maf ( file , species_to_lengths = None , parse_e_rows = False ) :
"""Read the next MAF block from ` file ` and return as an ` Alignment `
instance . If ` parse _ i _ rows ` is true , empty components will be created
when e rows are encountered .""" | alignment = Alignment ( species_to_lengths = species_to_lengths )
# Attributes line
line = readline ( file , skip_blank = True )
if not line :
return None
fields = line . split ( )
if fields [ 0 ] != 'a' :
raise Exception ( "Expected 'a ...' line" )
alignment . attributes = parse_attributes ( fields [ 1 : ] )
if 'score' in alignment . attributes :
alignment . score = alignment . attributes [ 'score' ]
del alignment . attributes [ 'score' ]
else :
alignment . score = 0
# Sequence lines
last_component = None
while 1 :
line = readline ( file )
# EOF or Blank line terminates alignment components
if not line or line . isspace ( ) :
break
if line . isspace ( ) :
break
# Parse row
fields = line . split ( )
if fields [ 0 ] == 's' : # An ' s ' row contains sequence for a component
component = Component ( )
component . src = fields [ 1 ]
component . start = int ( fields [ 2 ] )
component . size = int ( fields [ 3 ] )
component . strand = fields [ 4 ]
component . src_size = int ( fields [ 5 ] )
if len ( fields ) > 6 :
component . text = fields [ 6 ] . strip ( )
# Add to set
alignment . add_component ( component )
last_component = component
elif fields [ 0 ] == 'e' : # An ' e ' row , when no bases align for a given species this tells
# us something about the synteny
if parse_e_rows :
component = Component ( )
component . empty = True
component . src = fields [ 1 ]
component . start = int ( fields [ 2 ] )
component . size = int ( fields [ 3 ] )
component . strand = fields [ 4 ]
component . src_size = int ( fields [ 5 ] )
component . text = None
synteny = fields [ 6 ] . strip ( )
assert len ( synteny ) == 1 , "Synteny status in 'e' rows should be denoted with a single character code"
component . synteny_empty = synteny
alignment . add_component ( component )
last_component = component
elif fields [ 0 ] == 'i' : # An ' i ' row , indicates left and right synteny status for the
# previous component , we hope ; )
assert fields [ 1 ] == last_component . src , "'i' row does not follow matching 's' row"
last_component . synteny_left = ( fields [ 2 ] , int ( fields [ 3 ] ) )
last_component . synteny_right = ( fields [ 4 ] , int ( fields [ 5 ] ) )
elif fields [ 0 ] == 'q' :
assert fields [ 1 ] == last_component . src , "'q' row does not follow matching 's' row"
# TODO : Should convert this to an integer array ?
last_component . quality = fields [ 2 ]
return alignment |
def retry ( * dargs , ** dkw ) :
"""Wrap a function with a new ` Retrying ` object .
: param dargs : positional arguments passed to Retrying object
: param dkw : keyword arguments passed to the Retrying object""" | # support both @ retry and @ retry ( ) as valid syntax
if len ( dargs ) == 1 and callable ( dargs [ 0 ] ) :
return retry ( ) ( dargs [ 0 ] )
else :
def wrap ( f ) :
if asyncio and asyncio . iscoroutinefunction ( f ) :
r = AsyncRetrying ( * dargs , ** dkw )
elif tornado and hasattr ( tornado . gen , 'is_coroutine_function' ) and tornado . gen . is_coroutine_function ( f ) :
r = TornadoRetrying ( * dargs , ** dkw )
else :
r = Retrying ( * dargs , ** dkw )
return r . wraps ( f )
return wrap |
def set ( self , section , key , value , comment = None ) :
"""Set config value with data type transformation ( to str )
: param str section : Section to set config for
: param str key : Key to set config for
: param value : Value for key . It can be any primitive type .
: param str comment : Comment for the key""" | self . _read_sources ( )
if ( section , key ) in self . _dot_keys :
section , key = self . _dot_keys [ ( section , key ) ]
elif section in self . _dot_keys :
section = self . _dot_keys [ section ]
if not isinstance ( value , str ) :
value = str ( value )
self . _parser . set ( section , key , value )
self . _add_dot_key ( section , key )
if comment :
self . _set_comment ( section , comment , key ) |
def make_article_info_dates ( self ) :
"""Makes the section containing important dates for the article : typically
Received , Accepted , and Published .""" | dates_div = etree . Element ( 'div' , { 'id' : 'article-dates' } )
d = './front/article-meta/history/date'
received = self . article . root . xpath ( d + "[@date-type='received']" )
accepted = self . article . root . xpath ( d + "[@date-type='accepted']" )
if received :
b = etree . SubElement ( dates_div , 'b' )
b . text = 'Received: '
dt = self . date_tuple_from_date ( received [ 0 ] , 'Received' )
formatted_date_string = self . format_date_string ( dt )
append_new_text ( dates_div , formatted_date_string + '; ' )
if accepted :
b = etree . SubElement ( dates_div , 'b' )
b . text = 'Accepted: '
dt = self . date_tuple_from_date ( accepted [ 0 ] , 'Accepted' )
formatted_date_string = self . format_date_string ( dt )
append_new_text ( dates_div , formatted_date_string + '; ' )
# Published date is required
pub_date = self . article . root . xpath ( "./front/article-meta/pub-date[@pub-type='epub']" ) [ 0 ]
b = etree . SubElement ( dates_div , 'b' )
b . text = 'Published: '
dt = self . date_tuple_from_date ( pub_date , 'Published' )
formatted_date_string = self . format_date_string ( dt )
append_new_text ( dates_div , formatted_date_string )
return dates_div |
def processPayment ( self , amount , fees , paidOnline = True , methodName = None , methodTxn = None , submissionUser = None , collectedByUser = None , forceFinalize = False , status = None , notify = None ) :
'''When a payment processor makes a successful payment against an invoice , it can call this method
which handles status updates , the creation of a final registration object ( if applicable ) , and
the firing of appropriate registration - related signals .''' | epsilon = .01
paymentTime = timezone . now ( )
logger . info ( 'Processing payment and creating registration objects if applicable.' )
# The payment history record is primarily for convenience , and passed values are not
# validated . Payment processing apps should keep individual transaction records with
# a ForeignKey to the Invoice object .
paymentHistory = self . data . get ( 'paymentHistory' , [ ] )
paymentHistory . append ( { 'dateTime' : paymentTime . isoformat ( ) , 'amount' : amount , 'fees' : fees , 'paidOnline' : paidOnline , 'methodName' : methodName , 'methodTxn' : methodTxn , 'submissionUser' : getattr ( submissionUser , 'id' , None ) , 'collectedByUser' : getattr ( collectedByUser , 'id' , None ) , } )
self . data [ 'paymentHistory' ] = paymentHistory
self . amountPaid += amount
self . fees += fees
self . paidOnline = paidOnline
if submissionUser and not self . submissionUser :
self . submissionUser = submissionUser
if collectedByUser and not self . collectedByUser :
self . collectedByUser = collectedByUser
# if this completed the payment , then finalize the registration and mark
# the invoice as Paid unless told to do otherwise .
if forceFinalize or abs ( self . outstandingBalance ) < epsilon :
self . status = status or self . PaymentStatus . paid
if not self . finalRegistration and self . temporaryRegistration :
self . finalRegistration = self . temporaryRegistration . finalize ( dateTime = paymentTime )
else :
self . sendNotification ( invoicePaid = True , thisPaymentAmount = amount , payerEmail = notify )
self . save ( )
if self . finalRegistration :
for eventReg in self . finalRegistration . eventregistration_set . filter ( cancelled = False ) : # There can only be one eventreg per event in a registration , so we
# can filter on temporaryRegistration event to get the invoiceItem
# to which we should attach a finalEventRegistration
this_invoice_item = self . invoiceitem_set . filter ( temporaryEventRegistration__event = eventReg . event , finalEventRegistration__isnull = True ) . first ( )
if this_invoice_item :
this_invoice_item . finalEventRegistration = eventReg
this_invoice_item . save ( )
else : # The payment wasn ' t completed so don ' t finalize , but do send a notification recording the payment .
if notify :
self . sendNotification ( invoicePaid = True , thisPaymentAmount = amount , payerEmail = notify )
else :
self . sendNotification ( invoicePaid = True , thisPaymentAmount = amount )
self . save ( )
# If there were transaction fees , then these also need to be allocated among the InvoiceItems
# All fees from payments are allocated proportionately .
self . allocateFees ( ) |
def getmembers ( self ) :
"""Gets members ( vars ) from all scopes , using both runtime and static .
This method will attempt both static and runtime getmembers . This is the
recommended way of getting available members .
Returns :
Set of available vars .
Raises :
NotImplementedError if any scope fails to implement ' getmembers ' .""" | names = set ( )
for scope in self . scopes :
if isinstance ( scope , type ) :
names . update ( structured . getmembers_static ( scope ) )
else :
names . update ( structured . getmembers_runtime ( scope ) )
return names |
def make_basename ( self , fn = None , ext = None ) :
"""make a filesystem - compliant basename for this file""" | fb , oldext = os . path . splitext ( os . path . basename ( fn or self . fn ) )
ext = ext or oldext . lower ( )
fb = String ( fb ) . hyphenify ( ascii = True )
return '' . join ( [ fb , ext ] ) |
def get_path_contents ( self , project , provider_name , service_endpoint_id = None , repository = None , commit_or_branch = None , path = None ) :
"""GetPathContents .
[ Preview API ] Gets the contents of a directory in the given source code repository .
: param str project : Project ID or project name
: param str provider _ name : The name of the source provider .
: param str service _ endpoint _ id : If specified , the ID of the service endpoint to query . Can only be omitted for providers that do not use service endpoints , e . g . TFVC or TFGit .
: param str repository : If specified , the vendor - specific identifier or the name of the repository to get branches . Can only be omitted for providers that do not support multiple repositories .
: param str commit _ or _ branch : The identifier of the commit or branch from which a file ' s contents are retrieved .
: param str path : The path contents to list , relative to the root of the repository .
: rtype : [ SourceRepositoryItem ]""" | route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
if provider_name is not None :
route_values [ 'providerName' ] = self . _serialize . url ( 'provider_name' , provider_name , 'str' )
query_parameters = { }
if service_endpoint_id is not None :
query_parameters [ 'serviceEndpointId' ] = self . _serialize . query ( 'service_endpoint_id' , service_endpoint_id , 'str' )
if repository is not None :
query_parameters [ 'repository' ] = self . _serialize . query ( 'repository' , repository , 'str' )
if commit_or_branch is not None :
query_parameters [ 'commitOrBranch' ] = self . _serialize . query ( 'commit_or_branch' , commit_or_branch , 'str' )
if path is not None :
query_parameters [ 'path' ] = self . _serialize . query ( 'path' , path , 'str' )
response = self . _send ( http_method = 'GET' , location_id = '7944d6fb-df01-4709-920a-7a189aa34037' , version = '5.0-preview.1' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( '[SourceRepositoryItem]' , self . _unwrap_collection ( response ) ) |
def parse_security_group ( self , global_params , region , group ) :
"""Parse a single Redsfhit security group
: param global _ params : Parameters shared for all regions
: param region : Name of the AWS region
: param security ) _ group : Security group""" | vpc_id = group [ 'VpcId' ] if 'VpcId' in group and group [ 'VpcId' ] else ec2_classic
manage_dictionary ( self . vpcs , vpc_id , VPCConfig ( self . vpc_resource_types ) )
security_group = { }
security_group [ 'name' ] = group [ 'GroupName' ]
security_group [ 'id' ] = group [ 'GroupId' ]
security_group [ 'description' ] = group [ 'Description' ]
security_group [ 'owner_id' ] = group [ 'OwnerId' ]
security_group [ 'rules' ] = { 'ingress' : { } , 'egress' : { } }
security_group [ 'rules' ] [ 'ingress' ] [ 'protocols' ] , security_group [ 'rules' ] [ 'ingress' ] [ 'count' ] = self . __parse_security_group_rules ( group [ 'IpPermissions' ] )
security_group [ 'rules' ] [ 'egress' ] [ 'protocols' ] , security_group [ 'rules' ] [ 'egress' ] [ 'count' ] = self . __parse_security_group_rules ( group [ 'IpPermissionsEgress' ] )
self . vpcs [ vpc_id ] . security_groups [ group [ 'GroupId' ] ] = security_group |
def check_if_not ( x , * checks , ** params ) :
"""Run checks only if parameters are not equal to a specified value
Parameters
x : excepted value
Checks not run if parameters equal x
checks : function
Unnamed arguments , check functions to be run
params : object
Named arguments , parameters to be checked
Raises
ValueError : unacceptable choice of parameters""" | for p in params :
if params [ p ] is not x and params [ p ] != x :
[ check ( ** { p : params [ p ] } ) for check in checks ] |
def _rle_decode ( data ) :
"""Decodes run - length - encoded ` data ` .""" | if not data :
return data
new = b''
last = b''
for cur in data :
if last == b'\0' :
new += last * cur
last = b''
else :
new += last
last = bytes ( [ cur ] )
return new + last |
def _renew_token ( self , retry = True ) :
"""Renew expired ThreatConnect Token .""" | self . renewing = True
self . log . info ( 'Renewing ThreatConnect Token' )
self . log . info ( 'Current Token Expiration: {}' . format ( self . _token_expiration ) )
try :
params = { 'expiredToken' : self . _token }
url = '{}/appAuth' . format ( self . _token_url )
r = get ( url , params = params , verify = self . _session . verify )
if not r . ok or 'application/json' not in r . headers . get ( 'content-type' , '' ) :
if ( r . status_code == 401 and 'application/json' in r . headers . get ( 'content-type' , '' ) and 'Retry token is invalid' in r . json ( ) . get ( 'message' ) ) : # TODO : remove this once token renewal issue is fixed
self . log . error ( 'params: {}' . format ( params ) )
self . log . error ( 'url: {}' . format ( r . url ) )
# log failure
err_reason = r . text or r . reason
err_msg = 'Token Retry Error. API status code: {}, API message: {}.'
raise RuntimeError ( 1042 , err_msg . format ( r . status_code , err_reason ) )
elif retry :
warn_msg = 'Token Retry Error. API status code: {}, API message: {}.'
self . log . warning ( warn_msg . format ( r . status_code , r . text ) )
# delay and retry token renewal
time . sleep ( 15 )
self . _renew_token ( False )
else :
err_reason = r . text or r . reason
err_msg = 'Token Retry Error. API status code: {}, API message: {}.'
raise RuntimeError ( 1042 , err_msg . format ( r . status_code , err_reason ) )
data = r . json ( )
if retry and ( data . get ( 'apiToken' ) is None or data . get ( 'apiTokenExpires' ) is None ) : # add retry logic to handle case if the token renewal doesn ' t return valid data
warn_msg = 'Token Retry Error: no values for apiToken or apiTokenExpires ({}).'
self . log . warning ( warn_msg . format ( r . text ) )
self . _renew_token ( False )
else :
self . _token = data . get ( 'apiToken' )
self . _token_expiration = int ( data . get ( 'apiTokenExpires' ) )
self . log . info ( 'New Token Expiration: {}' . format ( self . _token_expiration ) )
self . renewing = False
except exceptions . SSLError :
self . log . error ( u'SSL Error during token renewal.' )
self . renewing = False |
def append_field ( self , header , value , mask = None ) :
"""Append a match field .
Argument Description
header match field header ID which is defined automatically in
` ` ofproto ` `
value match field value
mask mask value to the match field
The available ` ` header ` ` is as follows .
Header ID Description
OXM _ OF _ IN _ PORT Switch input port
OXM _ OF _ IN _ PHY _ PORT Switch physical input port
OXM _ OF _ METADATA Metadata passed between tables
OXM _ OF _ ETH _ DST Ethernet destination address
OXM _ OF _ ETH _ SRC Ethernet source address
OXM _ OF _ ETH _ TYPE Ethernet frame type
OXM _ OF _ VLAN _ VID VLAN id
OXM _ OF _ VLAN _ PCP VLAN priority
OXM _ OF _ IP _ DSCP IP DSCP ( 6 bits in ToS field )
OXM _ OF _ IP _ ECN IP ECN ( 2 bits in ToS field )
OXM _ OF _ IP _ PROTO IP protocol
OXM _ OF _ IPV4 _ SRC IPv4 source address
OXM _ OF _ IPV4 _ DST IPv4 destination address
OXM _ OF _ TCP _ SRC TCP source port
OXM _ OF _ TCP _ DST TCP destination port
OXM _ OF _ UDP _ SRC UDP source port
OXM _ OF _ UDP _ DST UDP destination port
OXM _ OF _ SCTP _ SRC SCTP source port
OXM _ OF _ SCTP _ DST SCTP destination port
OXM _ OF _ ICMPV4 _ TYPE ICMP type
OXM _ OF _ ICMPV4 _ CODE ICMP code
OXM _ OF _ ARP _ OP ARP opcode
OXM _ OF _ ARP _ SPA ARP source IPv4 address
OXM _ OF _ ARP _ TPA ARP target IPv4 address
OXM _ OF _ ARP _ SHA ARP source hardware address
OXM _ OF _ ARP _ THA ARP target hardware address
OXM _ OF _ IPV6 _ SRC IPv6 source address
OXM _ OF _ IPV6 _ DST IPv6 destination address
OXM _ OF _ IPV6 _ FLABEL IPv6 Flow Label
OXM _ OF _ ICMPV6 _ TYPE ICMPv6 type
OXM _ OF _ ICMPV6 _ CODE ICMPv6 code
OXM _ OF _ IPV6 _ ND _ TARGET Target address for ND
OXM _ OF _ IPV6 _ ND _ SLL Source link - layer for ND
OXM _ OF _ IPV6 _ ND _ TLL Target link - layer for ND
OXM _ OF _ MPLS _ LABEL MPLS label
OXM _ OF _ MPLS _ TC MPLS TC
OXM _ OF _ MPLS _ BOS MPLS BoS bit
OXM _ OF _ PBB _ ISID PBB I - SID
OXM _ OF _ TUNNEL _ ID Logical Port Metadata
OXM _ OF _ IPV6 _ EXTHDR IPv6 Extension Header pseudo - field""" | self . fields . append ( OFPMatchField . make ( header , value , mask ) ) |
def contacted ( self ) :
""": retuns : A boolean indicating whether the logged in user has contacted
the owner of this profile .""" | try :
contacted_span = self . _contacted_xpb . one_ ( self . profile_tree )
except :
return False
else :
timestamp = contacted_span . replace ( 'Last contacted ' , '' )
return helpers . parse_date_updated ( timestamp ) |
def check_photometry_categorize ( x , y , levels , tags = None ) :
'''Put every point in its category .
levels must be sorted .''' | x = numpy . asarray ( x )
y = numpy . asarray ( y )
ys = y . copy ( )
ys . sort ( )
# Mean of the upper half
m = ys [ len ( ys ) // 2 : ] . mean ( )
y /= m
m = 1.0
s = ys [ len ( ys ) // 2 : ] . std ( )
result = [ ]
if tags is None :
tags = list ( six . moves . range ( len ( levels ) + 1 ) )
for l , t in zip ( levels , tags ) :
indc = y < l
if indc . any ( ) :
x1 = x [ indc ]
y1 = y [ indc ]
result . append ( ( x1 , y1 , t ) )
x = x [ ~ indc ]
y = y [ ~ indc ]
else :
result . append ( ( x , y , tags [ - 1 ] ) )
return result , ( m , s ) |
def set_cluster_info ( self , aws_access_key_id = None , aws_secret_access_key = None , aws_region = None , aws_availability_zone = None , vpc_id = None , subnet_id = None , master_elastic_ip = None , disallow_cluster_termination = None , enable_ganglia_monitoring = None , node_bootstrap_file = None , master_instance_type = None , slave_instance_type = None , initial_nodes = None , max_nodes = None , slave_request_type = None , fallback_to_ondemand = None , node_base_cooldown_period = None , node_spot_cooldown_period = None , custom_config = None , use_hbase = None , custom_ec2_tags = None , use_hadoop2 = None , use_spark = None , use_qubole_placement_policy = None , maximum_bid_price_percentage = None , timeout_for_request = None , maximum_spot_instance_percentage = None , stable_maximum_bid_price_percentage = None , stable_timeout_for_request = None , stable_allow_fallback = True , spot_block_duration = None , ebs_volume_count = None , ebs_volume_type = None , ebs_volume_size = None , root_volume_size = None , fairscheduler_config_xml = None , default_pool = None , encrypted_ephemerals = None , ssh_public_key = None , persistent_security_group = None , enable_presto = None , bastion_node_public_dns = None , role_instance_profile = None , presto_custom_config = None , is_ha = None , env_name = None , python_version = None , r_version = None , enable_rubix = None ) :
"""Kwargs :
` aws _ access _ key _ id ` : The access key id for customer ' s aws account . This
is required for creating the cluster .
` aws _ secret _ access _ key ` : The secret access key for customer ' s aws
account . This is required for creating the cluster .
` aws _ region ` : AWS region to create the cluster in .
` aws _ availability _ zone ` : The availability zone to create the cluster
in .
` vpc _ id ` : The vpc to create the cluster in .
` subnet _ id ` : The subnet to create the cluster in .
` master _ elastic _ ip ` : Elastic IP to attach to master node
` disallow _ cluster _ termination ` : Set this to True if you don ' t want
qubole to auto - terminate idle clusters . Use this option with
extreme caution .
` enable _ ganglia _ monitoring ` : Set this to True if you want to enable
ganglia monitoring for the cluster .
` node _ bootstrap _ file ` : name of the node bootstrap file for this
cluster . It should be in stored in S3 at
< your - default - location > / scripts / hadoop /
` master _ instance _ type ` : The instance type to use for the Hadoop master
node .
` slave _ instance _ type ` : The instance type to use for the Hadoop slave
nodes .
` initial _ nodes ` : Number of nodes to start the cluster with .
` max _ nodes ` : Maximum number of nodes the cluster may be auto - scaled up
to .
` slave _ request _ type ` : Purchasing option for slave instances .
Valid values : " ondemand " , " hybrid " , " spot " .
` fallback _ to _ ondemand ` : Fallback to on - demand nodes if spot nodes could not be
obtained . Valid only if slave _ request _ type is ' spot ' .
` node _ base _ cooldown _ period ` : Time for which an on - demand node waits before termination ( Unit : minutes )
` node _ spot _ cooldown _ period ` : Time for which a spot node waits before termination ( Unit : minutes )
` custom _ config ` : Custom Hadoop configuration overrides .
` use _ hbase ` : Start hbase daemons on the cluster . Uses Hadoop2
` use _ hadoop2 ` : Use hadoop2 in this cluster
` use _ spark ` : Use spark in this cluster
` use _ qubole _ placement _ policy ` : Use Qubole Block Placement policy for
clusters with spot nodes .
` maximum _ bid _ price _ percentage ` : ( Valid only when ` slave _ request _ type `
is hybrid or spot . ) Maximum value to bid for spot
instances , expressed as a percentage of the base price
for the slave node instance type .
` timeout _ for _ request ` : Timeout for a spot instance request ( Unit :
minutes )
` maximum _ spot _ instance _ percentage ` : Maximum percentage of instances
that may be purchased from the AWS Spot market . Valid only when
slave _ request _ type is " hybrid " .
` stable _ maximum _ bid _ price _ percentage ` : Maximum value to bid for stable node spot
instances , expressed as a percentage of the base price
( applies to both master and slave nodes ) .
` stable _ timeout _ for _ request ` : Timeout for a stable node spot instance request ( Unit :
minutes )
` stable _ allow _ fallback ` : Whether to fallback to on - demand instances for
stable nodes if spot instances are not available
` spot _ block _ duration ` : Time for which the spot block instance is provisioned ( Unit :
minutes )
` ebs _ volume _ count ` : Number of EBS volumes to attach
to each instance of the cluster .
` ebs _ volume _ type ` : Type of the EBS volume . Valid
values are ' standard ' ( magnetic ) and ' ssd ' .
` ebs _ volume _ size ` : Size of each EBS volume , in GB .
` root _ volume _ size ` : Size of root volume , in GB .
` fairscheduler _ config _ xml ` : XML string with custom configuration
parameters for the fair scheduler .
` default _ pool ` : The default pool for the fair scheduler .
` encrypted _ ephemerals ` : Encrypt the ephemeral drives on the instance .
` ssh _ public _ key ` : SSH key to use to login to the instances .
` persistent _ security _ group ` : Comma - separated list of persistent
security groups for the cluster .
` enable _ presto ` : Enable Presto on the cluster .
` presto _ custom _ config ` : Custom Presto configuration overrides .
` bastion _ node _ public _ dns ` : Public dns name of the bastion node . Required only if cluster is in private subnet .
` is _ ha ` : Enabling HA config for cluster
` env _ name ` : Name of python and R environment . ( For Spark clusters )
` python _ version ` : Version of Python for environment . ( For Spark clusters )
` r _ version ` : Version of R for environment . ( For Spark clusters )
` enable _ rubix ` : Enable rubix on the cluster ( For Presto clusters )""" | self . disallow_cluster_termination = disallow_cluster_termination
self . enable_ganglia_monitoring = enable_ganglia_monitoring
self . node_bootstrap_file = node_bootstrap_file
self . set_node_configuration ( master_instance_type , slave_instance_type , initial_nodes , max_nodes , slave_request_type , fallback_to_ondemand , custom_ec2_tags , node_base_cooldown_period , node_spot_cooldown_period , root_volume_size )
self . set_ec2_settings ( aws_access_key_id , aws_secret_access_key , aws_region , aws_availability_zone , vpc_id , subnet_id , master_elastic_ip , bastion_node_public_dns , role_instance_profile )
self . set_hadoop_settings ( custom_config , use_hbase , use_hadoop2 , use_spark , use_qubole_placement_policy , is_ha , enable_rubix )
self . set_spot_instance_settings ( maximum_bid_price_percentage , timeout_for_request , maximum_spot_instance_percentage )
self . set_stable_spot_instance_settings ( stable_maximum_bid_price_percentage , stable_timeout_for_request , stable_allow_fallback )
self . set_spot_block_settings ( spot_block_duration )
self . set_ebs_volume_settings ( ebs_volume_count , ebs_volume_type , ebs_volume_size )
self . set_fairscheduler_settings ( fairscheduler_config_xml , default_pool )
self . set_security_settings ( encrypted_ephemerals , ssh_public_key , persistent_security_group )
self . set_presto_settings ( enable_presto , presto_custom_config )
self . set_env_settings ( env_name , python_version , r_version ) |
def get_reference_repository ( self , reference : Optional [ Path ] , repo : str ) -> Optional [ Path ] :
"""Returns a repository to use in clone command , if there is one to be referenced .
Either provided by the user of generated from already cloned branches ( master is preferred ) .
: param reference : Path to a local repository provided by the user or None .
: param repo : Reference for which remote repository .""" | if reference is not None :
return reference . absolute ( )
repo_path = self . get_path_to_repo ( repo )
if not repo_path . exists ( ) :
return None
master = repo_path / "master"
if master . exists ( ) and master . is_dir ( ) :
return master
for existing_branch in repo_path . iterdir ( ) :
if not existing_branch . is_dir ( ) :
continue
return existing_branch . resolve ( )
return None |
def check_run ( check , env , rate , times , pause , delay , log_level , as_json , break_point ) :
"""Run an Agent check .""" | envs = get_configured_envs ( check )
if not envs :
echo_failure ( 'No active environments found for `{}`.' . format ( check ) )
echo_info ( 'See what is available to start via `ddev env ls {}`.' . format ( check ) )
abort ( )
if not env :
if len ( envs ) > 1 :
echo_failure ( 'Multiple active environments found for `{}`, please specify one.' . format ( check ) )
echo_info ( 'See what is active via `ddev env ls`.' )
abort ( )
env = envs [ 0 ]
if env not in envs :
echo_failure ( '`{}` is not an active environment.' . format ( env ) )
echo_info ( 'See what is active via `ddev env ls`.' )
abort ( )
environment = create_interface ( check , env )
environment . run_check ( rate = rate , times = times , pause = pause , delay = delay , log_level = log_level , as_json = as_json , break_point = break_point )
echo_success ( 'Note: ' , nl = False )
echo_info ( 'If some metrics are missing, you may want to try again with the -r / --rate flag.' ) |
def chunk_generator ( N , n ) :
"""Returns a generator of slice objects .
Parameters
N : int
The size of one of the dimensions of a two - dimensional array .
n : int
The number of arrays of shape ( ' N ' , ' get _ chunk _ size ( N , n ) ' ) that fit into
memory .
Returns
Slice objects of the type ' slice ( start , stop ) ' are generated , representing
the set of indices specified by ' range ( start , stop ) ' .""" | chunk_size = get_chunk_size ( N , n )
for start in range ( 0 , N , chunk_size ) :
yield slice ( start , min ( start + chunk_size , N ) ) |
async def set_endpoint_for_did ( wallet_handle : int , did : str , address : str , transport_key : str ) -> None :
"""Set / replaces endpoint information for the given DID .
: param wallet _ handle : Wallet handle ( created by open _ wallet ) .
: param did : The DID to resolve endpoint .
: param address : The DIDs endpoint address .
: param transport _ key : The DIDs transport key ( ver key , key id ) .
: return : Error code""" | logger = logging . getLogger ( __name__ )
logger . debug ( "set_endpoint_for_did: >>> wallet_handle: %r, did: %r, address: %r, transport_key: %r" , wallet_handle , did , address , transport_key )
if not hasattr ( set_endpoint_for_did , "cb" ) :
logger . debug ( "set_endpoint_for_did: Creating callback" )
set_endpoint_for_did . cb = create_cb ( CFUNCTYPE ( None , c_int32 , c_int32 ) )
c_wallet_handle = c_int32 ( wallet_handle )
c_did = c_char_p ( did . encode ( 'utf-8' ) )
c_address = c_char_p ( address . encode ( 'utf-8' ) )
c_transport_key = c_char_p ( transport_key . encode ( 'utf-8' ) )
await do_call ( 'indy_set_endpoint_for_did' , c_wallet_handle , c_did , c_address , c_transport_key , set_endpoint_for_did . cb )
logger . debug ( "set_endpoint_for_did: <<<" ) |
def is_provider_configured ( opts , provider , required_keys = ( ) , log_message = True , aliases = ( ) ) :
'''Check and return the first matching and fully configured cloud provider
configuration .''' | if ':' in provider :
alias , driver = provider . split ( ':' )
if alias not in opts [ 'providers' ] :
return False
if driver not in opts [ 'providers' ] [ alias ] :
return False
for key in required_keys :
if opts [ 'providers' ] [ alias ] [ driver ] . get ( key , None ) is None :
if log_message is True : # There ' s at least one require configuration key which is not
# set .
log . warning ( "The required '%s' configuration setting is missing " "from the '%s' driver, which is configured under the " "'%s' alias." , key , provider , alias )
return False
# If we reached this far , there ' s a properly configured provider .
# Return it !
return opts [ 'providers' ] [ alias ] [ driver ]
for alias , drivers in six . iteritems ( opts [ 'providers' ] ) :
for driver , provider_details in six . iteritems ( drivers ) :
if driver != provider and driver not in aliases :
continue
# If we reached this far , we have a matching provider , let ' s see if
# all required configuration keys are present and not None .
skip_provider = False
for key in required_keys :
if provider_details . get ( key , None ) is None :
if log_message is True : # This provider does not include all necessary keys ,
# continue to next one .
log . warning ( "The required '%s' configuration setting is " "missing from the '%s' driver, which is configured " "under the '%s' alias." , key , provider , alias )
skip_provider = True
break
if skip_provider :
continue
# If we reached this far , the provider included all required keys
return provider_details
# If we reached this point , the provider is not configured .
return False |
def _database_default_options ( self , name ) :
"""Get a Database instance with the default settings .""" | return self . get_database ( name , codec_options = DEFAULT_CODEC_OPTIONS , read_preference = ReadPreference . PRIMARY , write_concern = DEFAULT_WRITE_CONCERN ) |
def _purge_children ( self ) :
"""Find dead children and put a response on the result queue .
: return :""" | for task_id , p in six . iteritems ( self . _running_tasks ) :
if not p . is_alive ( ) and p . exitcode :
error_msg = 'Task {} died unexpectedly with exit code {}' . format ( task_id , p . exitcode )
p . task . trigger_event ( Event . PROCESS_FAILURE , p . task , error_msg )
elif p . timeout_time is not None and time . time ( ) > float ( p . timeout_time ) and p . is_alive ( ) :
p . terminate ( )
error_msg = 'Task {} timed out after {} seconds and was terminated.' . format ( task_id , p . worker_timeout )
p . task . trigger_event ( Event . TIMEOUT , p . task , error_msg )
else :
continue
logger . info ( error_msg )
self . _task_result_queue . put ( ( task_id , FAILED , error_msg , [ ] , [ ] ) ) |
def pairs ( self , strand , cutoff = 0.001 , temp = 37.0 , pseudo = False , material = None , dangles = 'some' , sodium = 1.0 , magnesium = 0.0 ) :
'''Compute the pair probabilities for an ordered complex of strands .
Runs the \' pairs \' command .
: param strand : Strand on which to run pairs . Strands must be either
coral . DNA or coral . RNA ) .
: type strand : list
: param cutoff : Only probabilities above this cutoff appear in the
output .
: type cutoff : float
: param temp : Temperature setting for the computation . Negative values
are not allowed .
: type temp : float
: param pseudo : Enable pseudoknots .
: type pseudo : bool
: param material : The material setting to use in the computation . If set
to None ( the default ) , the material type is inferred
from the strands . Other settings available : ' dna ' for
DNA parameters , ' rna ' for RNA ( 1995 ) parameters , and
' rna1999 ' for the RNA 1999 parameters .
: type material : str
: param dangles : How to treat dangles in the computation . From the
user guide : For \' none \' : Dangle energies are ignored .
For \' some \' : \' A dangle energy is incorporated for
each unpaired base flanking a duplex \' . For ' all ' : all
dangle energy is considered .
: type dangles : str
: param sodium : Sodium concentration in solution ( molar ) , only applies
to DNA .
: type sodium : float
: param magnesium : Magnesium concentration in solution ( molar ) , only
applies to DNA >
: type magnesium : float
: returns : The probability matrix , where the ( i , j ) th entry
is the probability that base i is bound to base j . The matrix
is augmented ( it ' s N + 1 by N + 1 , where N is the number of bases
in the sequence ) with an ( N + 1 ) th column containing the
probability that each base is unpaired .
: rtype : numpy . array''' | # Set the material ( will be used to set command material flag )
material = self . _set_material ( strand , material )
# Set up command flags
cmd_args = self . _prep_cmd_args ( temp , dangles , material , pseudo , sodium , magnesium , multi = False )
# Set up the input file and run the command . Note : no STDOUT
lines = [ str ( strand ) ]
self . _run ( 'pairs' , cmd_args , lines )
# Read the output from file
ppairs = self . _read_tempfile ( 'pairs.ppairs' )
data = re . search ( '\n\n\d*\n(.*)' , ppairs , flags = re . DOTALL ) . group ( 1 )
N = len ( strand )
data_lines = [ line . split ( '\t' ) for line in data . split ( '\n' ) if line ]
prob_matrix = self . _pairs_to_np ( data_lines , N )
return prob_matrix |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.