signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def measure_each ( * qubits : raw_types . Qid , key_func : Callable [ [ raw_types . Qid ] , str ] = str ) -> List [ gate_operation . GateOperation ] :
"""Returns a list of operations individually measuring the given qubits .
The qubits are measured in the computational basis .
Args :
* qubits : The qubits to measure .
key _ func : Determines the key of the measurements of each qubit . Takes
the qubit and returns the key for that qubit . Defaults to str .
Returns :
A list of operations individually measuring the given qubits .""" | return [ MeasurementGate ( 1 , key_func ( q ) ) . on ( q ) for q in qubits ] |
def on_post_save ( sender , ** kwargs ) :
"""Expire ultracache cache keys affected by this object""" | if not invalidate :
return
if kwargs . get ( "raw" , False ) :
return
if sender is MigrationRecorder . Migration :
return
if issubclass ( sender , Model ) :
obj = kwargs [ "instance" ]
if isinstance ( obj , Model ) : # get _ for _ model itself is cached
try :
ct = ContentType . objects . get_for_model ( sender )
except RuntimeError : # This happens when ultracache is being used by another product
# during a test run .
return
if kwargs . get ( "created" , False ) : # Expire cache keys that contain objects of this content type
key = "ucache-ct-%s" % ct . id
to_delete = cache . get ( key , [ ] )
if to_delete :
try :
cache . delete_many ( to_delete )
except NotImplementedError :
for k in to_delete :
cache . delete ( k )
cache . delete ( key )
# Purge paths in reverse caching proxy that contain objects of
# this content type .
key = "ucache-ct-pth-%s" % ct . id
if purger is not None :
for li in cache . get ( key , [ ] ) :
purger ( li [ 0 ] , li [ 1 ] )
cache . delete ( key )
else : # Expire cache keys
key = "ucache-%s-%s" % ( ct . id , obj . pk )
to_delete = cache . get ( key , [ ] )
if to_delete :
try :
cache . delete_many ( to_delete )
except NotImplementedError :
for k in to_delete :
cache . delete ( k )
cache . delete ( key )
# Purge paths in reverse caching proxy
key = "ucache-pth-%s-%s" % ( ct . id , obj . pk )
if purger is not None :
for li in cache . get ( key , [ ] ) :
purger ( li [ 0 ] , li [ 1 ] )
cache . delete ( key ) |
def detect_Martin2013 ( dat_orig , s_freq , time , opts ) :
"""Spindle detection based on Martin et al . 2013
Parameters
dat _ orig : ndarray ( dtype = ' float ' )
vector with the data for one channel
s _ freq : float
sampling frequency
time : ndarray ( dtype = ' float ' )
vector with the time points for each sample
opts : instance of ' DetectSpindle '
' remez ' : dict
parameters for ' remez ' filter
' moving _ rms ' : dict
parameters for ' moving _ rms '
' percentile ' : float
percentile for detection threshold
Returns
list of dict
list of detected spindles
dict
' det _ value _ lo ' with detection value , ' det _ value _ hi ' is nan ,
' sel _ value ' is nan ( for consistency with other methods )
float
spindle density , per 30 - s epoch
References
Martin , N . et al . Neurobio Aging 34(2 ) , 468-76 ( 2013 ) .""" | dat_filt = transform_signal ( dat_orig , s_freq , 'remez' , opts . det_remez )
dat_det = transform_signal ( dat_filt , s_freq , 'moving_rms' , opts . moving_rms )
# downsampled
det_value = percentile ( dat_det , opts . det_thresh )
events = detect_events ( dat_det , 'above_thresh' , det_value )
if events is not None :
events *= int ( around ( s_freq * opts . moving_rms [ 'step' ] ) )
# upsample
events = _merge_close ( dat_filt , events , time , opts . tolerance )
events = within_duration ( events , time , opts . duration )
events = _merge_close ( dat_filt , events , time , opts . min_interval )
events = remove_straddlers ( events , time , s_freq )
power_peaks = peak_in_power ( events , dat_orig , s_freq , opts . power_peaks )
powers = power_in_band ( events , dat_orig , s_freq , opts . frequency )
sp_in_chan = make_spindles ( events , power_peaks , powers , dat_filt , dat_orig , time , s_freq )
else :
lg . info ( 'No spindle found' )
sp_in_chan = [ ]
values = { 'det_value_lo' : det_value , 'sel_value' : nan }
density = len ( sp_in_chan ) * s_freq * 30 / len ( dat_orig )
return sp_in_chan , values , density |
def load ( self , filename , fv_extern = None ) :
"""Read model stored in the file .
: param filename : Path to file with model
: param fv _ extern : external feature vector function is passed here
: return :""" | self . modelparams [ "mdl_stored_file" ] = filename
if fv_extern is not None :
self . modelparams [ "fv_extern" ] = fv_extern
# segparams [ ' modelparams ' ] = {
# ' mdl _ stored _ file ' : mdl _ stored _ file ,
# # ' fv _ extern ' : fv _ function
self . mdl = Model ( modelparams = self . modelparams ) |
def save_attribute ( elements , module_path ) :
"""Recursively save attributes with module name and signature .""" | for elem , signature in elements . items ( ) :
if isinstance ( signature , dict ) : # Submodule case
save_attribute ( signature , module_path + ( elem , ) )
elif signature . isattribute ( ) :
assert elem not in attributes
# we need unicity
attributes [ elem ] = ( module_path , signature , )
elif isinstance ( signature , Class ) :
save_attribute ( signature . fields , module_path + ( elem , ) ) |
def CutAtClosestPoint ( self , p ) :
"""Let x be the point on the polyline closest to p . Then
CutAtClosestPoint returns two new polylines , one representing
the polyline from the beginning up to x , and one representing
x onwards to the end of the polyline . x is the first point
returned in the second polyline .""" | ( closest , i ) = self . GetClosestPoint ( p )
tmp = [ closest ]
tmp . extend ( self . _points [ i + 1 : ] )
return ( Poly ( self . _points [ 0 : i + 1 ] ) , Poly ( tmp ) ) |
def rasterToWKB ( cls , rasterPath , srid , noData , raster2pgsql ) :
"""Accepts a raster file and converts it to Well Known Binary text using the raster2pgsql
executable that comes with PostGIS . This is the format that rasters are stored in a
PostGIS database .""" | raster2pgsqlProcess = subprocess . Popen ( [ raster2pgsql , '-s' , srid , '-N' , noData , rasterPath , 'n_a' ] , stdout = subprocess . PIPE )
# This commandline tool generates the SQL to load the raster into the database
# However , we want to use SQLAlchemy to load the values into the database .
# We do this by extracting the value from the sql that is generated .
sql , error = raster2pgsqlProcess . communicate ( )
if sql : # This esoteric line is used to extract only the value of the raster ( which is stored as a Well Know Binary string )
# Example of Output :
# BEGIN ;
# INSERT INTO " idx _ index _ maps " ( " rast " ) VALUES ( ' 0100 . . . 56C096CE87 ' : : raster ) ;
# END ;
# The WKB is wrapped in single quotes . Splitting on single quotes isolates it as the
# second item in the resulting list .
wellKnownBinary = sql . split ( "'" ) [ 1 ]
else :
print ( error )
raise
return wellKnownBinary |
def pretty_str ( self , indent = 0 ) :
"""Return a human - readable string representation of this object .
Kwargs :
indent ( int ) : The amount of spaces to use as indentation .""" | if self . body :
return '\n' . join ( stmt . pretty_str ( indent ) for stmt in self . body )
else :
return ( ' ' * indent ) + '[empty]' |
def __is_outside_of_builddir ( project , path_to_check ) :
"""Check if a project lies outside of its expected directory .""" | bdir = project . builddir
cprefix = os . path . commonprefix ( [ path_to_check , bdir ] )
return cprefix != bdir |
def get_field_analysis ( self , field ) :
"""Get the FieldAnalysis for a given fieldname
: param field : TODO
: return : : class : ` FieldClassAnalysis `""" | class_analysis = self . get_class_analysis ( field . get_class_name ( ) )
if class_analysis :
return class_analysis . get_field_analysis ( field )
return None |
def get_default_config ( self ) :
"""Returns the default collector settings""" | config = super ( VMStatCollector , self ) . get_default_config ( )
config . update ( { 'path' : 'vmstat' } )
return config |
def get_authorization_url ( self , acr_values = None , prompt = None , scope = None , custom_params = None ) :
"""Function to get the authorization url that can be opened in the
browser for the user to provide authorization and authentication
Parameters :
* * * acr _ values ( list , optional ) : * * acr values in the order of priority
* * * prompt ( string , optional ) : * * prompt = login is required if you want to force alter current user session ( in case user is already logged in from site1 and site2 constructs authorization request and want to force alter current user session )
* * * scope ( list , optional ) : * * scopes required , takes the one provided during site registrations by default
* * * custom _ params ( dict , optional ) : * * Any custom arguments that the client wishes to pass on to the OP can be passed on as extra parameters to the function
Returns :
* * string : * * The authorization url that the user must access for authentication and authorization
Raises :
* * OxdServerError : * * If the oxd throws an error for any reason .""" | params = { "oxd_id" : self . oxd_id }
if scope and isinstance ( scope , list ) :
params [ "scope" ] = scope
if acr_values and isinstance ( acr_values , list ) :
params [ "acr_values" ] = acr_values
if prompt and isinstance ( prompt , str ) :
params [ "prompt" ] = prompt
if custom_params :
params [ "custom_parameters" ] = custom_params
logger . debug ( "Sending command `get_authorization_url` with params %s" , params )
response = self . msgr . request ( "get_authorization_url" , ** params )
logger . debug ( "Received response: %s" , response )
if response [ 'status' ] == 'error' :
raise OxdServerError ( response [ 'data' ] )
return response [ 'data' ] [ 'authorization_url' ] |
def _check_geo_param ( self , arg_list ) :
r"""Checks each function call to make sure that the user has provided at least one of the following geographic
parameters : ' stid ' , ' state ' , ' country ' , ' county ' , ' radius ' , ' bbox ' , ' cwa ' , ' nwsfirezone ' , ' gacc ' , or ' subgacc ' .
Arguments :
arg _ list : list , mandatory
A list of kwargs from other functions .
Returns :
None .
Raises :
MesoPyError if no geographic search criteria is provided .""" | geo_func = lambda a , b : any ( i in b for i in a )
check = geo_func ( self . geo_criteria , arg_list )
if check is False :
raise MesoPyError ( 'No stations or geographic search criteria specified. Please provide one of the ' 'following: stid, state, county, country, radius, bbox, cwa, nwsfirezone, gacc, subgacc' ) |
def int_0_inf ( cls , string ) :
'''Convert string to int .
If ` ` inf ` ` is supplied , it returns ` ` 0 ` ` .''' | if string == 'inf' :
return 0
try :
value = int ( string )
except ValueError as error :
raise argparse . ArgumentTypeError ( error )
if value < 0 :
raise argparse . ArgumentTypeError ( _ ( 'Value must not be negative.' ) )
else :
return value |
def _parse_response_for_all_events ( self , response ) :
"""This function will retrieve * most * of the event data , excluding Organizer & Attendee details""" | items = response . xpath ( u'//m:FindItemResponseMessage/m:RootFolder/t:Items/t:CalendarItem' , namespaces = soap_request . NAMESPACES )
if not items :
items = response . xpath ( u'//m:GetItemResponseMessage/m:Items/t:CalendarItem' , namespaces = soap_request . NAMESPACES )
if items :
self . count = len ( items )
log . debug ( u'Found %s items' % self . count )
for item in items :
self . _add_event ( xml = soap_request . M . Items ( deepcopy ( item ) ) )
else :
log . debug ( u'No calendar items found with search parameters.' )
return self |
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_chassis_id ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_lldp_neighbor_detail = ET . Element ( "get_lldp_neighbor_detail" )
config = get_lldp_neighbor_detail
output = ET . SubElement ( get_lldp_neighbor_detail , "output" )
lldp_neighbor_detail = ET . SubElement ( output , "lldp-neighbor-detail" )
local_interface_name_key = ET . SubElement ( lldp_neighbor_detail , "local-interface-name" )
local_interface_name_key . text = kwargs . pop ( 'local_interface_name' )
remote_interface_name_key = ET . SubElement ( lldp_neighbor_detail , "remote-interface-name" )
remote_interface_name_key . text = kwargs . pop ( 'remote_interface_name' )
remote_chassis_id = ET . SubElement ( lldp_neighbor_detail , "remote-chassis-id" )
remote_chassis_id . text = kwargs . pop ( 'remote_chassis_id' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def multipart_content ( * files ) :
"""Returns a mutlipart content .
Note :
This script was clearly inspired by write - mime - multipart .""" | outer = MIMEMultipart ( )
for fname in files :
mtype = get_type ( fname )
maintype , subtype = mtype . split ( '/' , 1 )
with open ( fname ) as f :
msg = MIMEText ( f . read ( ) , _subtype = subtype )
msg . add_header ( 'Content-Disposition' , 'attachment' , filename = os . path . basename ( fname ) )
outer . attach ( msg )
return outer . as_string ( ) |
def z ( self , what ) :
"""Change redshift .""" | if not isinstance ( what , numbers . Real ) :
raise exceptions . SynphotError ( 'Redshift must be a real scalar number.' )
self . _z = float ( what )
self . _redshift_model = RedshiftScaleFactor ( self . _z )
if self . z_type == 'wavelength_only' :
self . _redshift_flux_model = None
else : # conserve _ flux
self . _redshift_flux_model = Scale ( 1 / ( 1 + self . _z ) ) |
def removeDerivedMSCal ( msname ) :
"""Remove the derived columns like HA from an MS or CalTable .
It removes the columns using the data manager DerivedMSCal .
Such columns are HA , HA1 , HA2 , PA1 , PA2 , LAST , LAST1 , LAST2 , AZEL1,
AZEL2 , and UVW _ J2000.
It fails if one of the columns already exists .""" | # Open the MS
t = table ( msname , readonly = False , ack = False )
# Remove the columns stored as DerivedMSCal .
dmi = t . getdminfo ( )
for x in dmi . values ( ) :
if x [ 'TYPE' ] == 'DerivedMSCal' :
t . removecols ( x [ 'COLUMNS' ] )
t . flush ( ) |
def reactToAMQPMessage ( message , send_back ) :
"""React to given ( AMQP ) message . ` message ` is expected to be
: py : func : ` collections . namedtuple ` structure from : mod : ` . structures ` filled
with all necessary data .
Args :
message ( object ) : One of the request objects defined in
: mod : ` . structures ` .
send _ back ( fn reference ) : Reference to function for responding . This is
useful for progress monitoring for example . Function takes
one parameter , which may be response structure / namedtuple , or
string or whatever would be normally returned .
Returns :
object : Response class from : mod : ` . structures ` .
Raises :
ValueError : if bad type of ` message ` structure is given .""" | if _instanceof ( message , ExportRequest ) :
tmp_folder = ltp . create_ltp_package ( aleph_record = message . aleph_record , book_id = message . book_uuid , urn_nbn = message . urn_nbn , ebook_fn = message . filename , url = message . url , data = base64 . b64decode ( message . b64_data ) )
# remove directory from export dir , if already there
out_dir = os . path . join ( settings . EXPORT_DIR , os . path . basename ( message . book_uuid ) )
if os . path . exists ( out_dir ) :
shutil . rmtree ( out_dir )
shutil . move ( tmp_folder , settings . EXPORT_PREFIX + out_dir )
return True
elif _instanceof ( message , TrackingRequest ) :
uuid = message . book_uuid
status = [ item . replace ( uuid , "" ) . replace ( "/" , "" ) for item in os . listdir ( settings . EXPORT_DIR ) if uuid in item ]
if not status :
raise ValueError ( "UUID %s not found!" % uuid )
status = status [ 0 ]
success = [ "ok" , "success" , "done" ]
success = sum ( ( [ x , x + "_" ] for x in success ) , [ ] )
# add _ to the end
return TrackingResponse ( book_id = uuid , exported = status . lower ( ) in success , error = status , )
raise ValueError ( "Unknown type of request: '" + str ( type ( message ) ) + "'!" ) |
def printTPRegionParams ( tpregion ) :
"""Note : assumes we are using TemporalMemory / TPShim in the TPRegion""" | tm = tpregion . getSelf ( ) . _tfdr
print "------------PY TemporalMemory Parameters ------------------"
print "numberOfCols =" , tm . columnDimensions
print "cellsPerColumn =" , tm . cellsPerColumn
print "minThreshold =" , tm . minThreshold
print "activationThreshold =" , tm . activationThreshold
print "newSynapseCount =" , tm . maxNewSynapseCount
print "initialPerm =" , tm . initialPermanence
print "connectedPerm =" , tm . connectedPermanence
print "permanenceInc =" , tm . permanenceIncrement
print "permanenceDec =" , tm . permanenceDecrement
print "predictedSegmentDecrement=" , tm . predictedSegmentDecrement
print |
def save_data ( self ) :
"""Store data as a JSON dump .""" | filename = os . path . join ( self . _get_prefix ( ) , self . model . site )
self . store . store_json ( filename , self . model . get_dict ( ) ) |
def is_internet_on ( host = "8.8.8.8" , port = 53 , timeout = 3 ) :
"""Checks if machine has internet connection
: param host : hostname to test
: param port : port of hostname
: param timeout : seconds before discarding connection
: return : True iff machine has internet connection""" | socket . setdefaulttimeout ( timeout )
socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) . connect ( ( host , port ) ) |
def _set_mult_grid_attr ( self ) :
"""Set multiple attrs from grid file given their names in the grid file .""" | grid_objs = self . _get_grid_files ( )
if self . grid_attrs is None :
self . grid_attrs = { }
# Override GRID _ ATTRS with entries in grid _ attrs
attrs = internal_names . GRID_ATTRS . copy ( )
for k , v in self . grid_attrs . items ( ) :
if k not in attrs :
raise ValueError ( 'Unrecognized internal name, {!r}, specified for a ' 'custom grid attribute name. See the full list of ' 'valid internal names below:\n\n{}' . format ( k , list ( internal_names . GRID_ATTRS . keys ( ) ) ) )
attrs [ k ] = ( v , )
for name_int , names_ext in attrs . items ( ) :
for name in names_ext :
grid_attr = _get_grid_attr ( grid_objs , name )
if grid_attr is not None :
TIME_STR = internal_names . TIME_STR
renamed_attr = _rename_coords ( grid_attr , attrs )
if ( ( TIME_STR not in renamed_attr . dims ) and ( TIME_STR in renamed_attr . coords ) ) :
renamed_attr = renamed_attr . drop ( TIME_STR )
setattr ( self , name_int , renamed_attr )
break |
def composite ( df , sameGenderMZ , sameGenderDZ , size = ( 16 , 24 ) ) :
"""Embed both absdiff figures and heritability figures .""" | fig = plt . figure ( 1 , size )
ax1a = plt . subplot2grid ( ( 6 , 4 ) , ( 0 , 0 ) , rowspan = 2 , colspan = 1 )
ax2a = plt . subplot2grid ( ( 6 , 4 ) , ( 0 , 1 ) , rowspan = 2 , colspan = 1 )
ax3a = plt . subplot2grid ( ( 6 , 4 ) , ( 0 , 2 ) , rowspan = 2 , colspan = 1 )
ax4a = plt . subplot2grid ( ( 6 , 4 ) , ( 0 , 3 ) , rowspan = 2 , colspan = 1 )
ax1b = plt . subplot2grid ( ( 6 , 4 ) , ( 2 , 0 ) , rowspan = 2 , colspan = 2 )
ax2b = plt . subplot2grid ( ( 6 , 4 ) , ( 2 , 2 ) , rowspan = 2 , colspan = 2 )
ax3b = plt . subplot2grid ( ( 6 , 4 ) , ( 4 , 0 ) , rowspan = 2 , colspan = 2 )
ax4b = plt . subplot2grid ( ( 6 , 4 ) , ( 4 , 2 ) , rowspan = 2 , colspan = 2 )
# Telomeres
telomeres = extract_trait ( df , "Sample name" , "telomeres.Length" )
mzTelomeres = extract_twin_values ( sameGenderMZ , telomeres )
dzTelomeres = extract_twin_values ( sameGenderDZ , telomeres )
plot_paired_values ( ax1b , mzTelomeres , dzTelomeres , label = "Telomere length" )
plot_abs_diff ( ax1a , mzTelomeres , dzTelomeres , label = "Telomere length" )
# CCNX
CCNX = extract_trait ( df , "Sample name" , "ccn.chrX" )
mzCCNX = extract_twin_values ( sameGenderMZ , CCNX , gender = "Female" )
dzCCNX = extract_twin_values ( sameGenderDZ , CCNX , gender = "Female" )
dzCCNX = filter_low_values ( dzCCNX , 1.75 )
plot_paired_values ( ax2b , mzCCNX , dzCCNX , gender = "Female only" , label = "ChrX copy number" )
plot_abs_diff ( ax2a , mzCCNX , dzCCNX , label = "ChrX copy number" )
# CCNY
CCNY = extract_trait ( df , "Sample name" , "ccn.chrY" )
mzCCNY = extract_twin_values ( sameGenderMZ , CCNY , gender = "Male" )
dzCCNY = extract_twin_values ( sameGenderDZ , CCNY , gender = "Male" )
dzCCNY = filter_low_values ( dzCCNY , .75 )
plot_paired_values ( ax3b , mzCCNY , dzCCNY , gender = "Male only" , label = "ChrY copy number" )
plot_abs_diff ( ax3a , mzCCNY , dzCCNY , label = "ChrY copy number" )
# CCNY
TRA = extract_trait ( df , "Sample name" , "TRA.PPM" )
mzTRA = extract_twin_values ( sameGenderMZ , TRA )
dzTRA = extract_twin_values ( sameGenderDZ , TRA )
plot_paired_values ( ax4b , mzTRA , dzTRA , label = "TCR-$\\alpha$ deletions" )
plot_abs_diff ( ax4a , mzTRA , dzTRA , label = "TCR-$\\alpha$ deletions" )
plt . tight_layout ( )
root = fig . add_axes ( ( 0 , 0 , 1 , 1 ) )
# ABCD absdiff , EFGH heritability
labels = ( ( .03 , .99 , 'A' ) , ( .27 , .99 , 'B' ) , ( .53 , .99 , 'C' ) , ( .77 , .99 , 'D' ) , ( .03 , .67 , 'E' ) , ( .53 , .67 , 'F' ) , ( .03 , .34 , 'G' ) , ( .53 , .34 , 'H' ) )
panel_labels ( root , labels )
root . set_xlim ( 0 , 1 )
root . set_ylim ( 0 , 1 )
root . set_axis_off ( ) |
def elcm_session_list ( irmc_info ) :
"""send an eLCM request to list all sessions
: param irmc _ info : node info
: returns : dict object of sessions if succeed
' SessionList ' :
' Contains ' :
{ ' Id ' : id1 , ' Name ' : name1 } ,
{ ' Id ' : id2 , ' Name ' : name2 } ,
{ ' Id ' : idN , ' Name ' : nameN } ,
: raises : SCCIClientError if SCCI failed""" | # Send GET request to the server
resp = elcm_request ( irmc_info , method = 'GET' , path = '/sessionInformation/' )
if resp . status_code == 200 :
return _parse_elcm_response_body_as_json ( resp )
else :
raise scci . SCCIClientError ( ( 'Failed to list sessions with ' 'error code %s' % resp . status_code ) ) |
def n_exec_stmt ( self , node ) :
"""exec _ stmt : : = expr exprlist DUP _ TOP EXEC _ STMT
exec _ stmt : : = expr exprlist EXEC _ STMT""" | self . write ( self . indent , 'exec ' )
self . preorder ( node [ 0 ] )
if not node [ 1 ] [ 0 ] . isNone ( ) :
sep = ' in '
for subnode in node [ 1 ] :
self . write ( sep ) ;
sep = ", "
self . preorder ( subnode )
self . println ( )
self . prune ( ) |
def refweights ( self ) :
"""A | numpy | | numpy . ndarray | with equal weights for all segment
junctions . .
> > > from hydpy . models . hstream import *
> > > parameterstep ( ' 1d ' )
> > > states . qjoints . shape = 5
> > > states . qjoints . refweights
array ( [ 0.2 , 0.2 , 0.2 , 0.2 , 0.2 ] )""" | # pylint : disable = unsubscriptable - object
# due to a pylint bug ( see https : / / github . com / PyCQA / pylint / issues / 870)
return numpy . full ( self . shape , 1. / self . shape [ 0 ] , dtype = float ) |
def _compiled ( self ) :
"""Return a string of the self . block compiled to a block of
code that can be execed to get a function to execute""" | # Dev Notes :
# Because of fast locals in functions in both CPython and PyPy , getting a
# function to execute makes the code a few times faster than
# just executing it in the global exec scope .
prog = [ self . _prog_start ]
simple_func = { # OPS
'w' : lambda x : x , 'r' : lambda x : x , '~' : lambda x : '(~' + x + ')' , '&' : lambda l , r : '(' + l + '&' + r + ')' , '|' : lambda l , r : '(' + l + '|' + r + ')' , '^' : lambda l , r : '(' + l + '^' + r + ')' , 'n' : lambda l , r : '(~(' + l + '&' + r + '))' , '+' : lambda l , r : '(' + l + '+' + r + ')' , '-' : lambda l , r : '(' + l + '-' + r + ')' , '*' : lambda l , r : '(' + l + '*' + r + ')' , '<' : lambda l , r : 'int(' + l + '<' + r + ')' , '>' : lambda l , r : 'int(' + l + '>' + r + ')' , '=' : lambda l , r : 'int(' + l + '==' + r + ')' , 'x' : lambda sel , f , t : '({}) if ({}==0) else ({})' . format ( f , sel , t ) , }
def shift ( value , direction , shift_amt ) :
if shift_amt == 0 :
return value
else :
return '(%s %s %d)' % ( value , direction , shift_amt )
def make_split ( ) :
if split_start_bit == 0 :
bit = '(%d & %s)' % ( ( 1 << split_length ) - 1 , source )
elif len ( net . args [ 0 ] ) - split_start_bit == split_length :
bit = '(%s >> %d)' % ( source , split_start_bit )
else :
bit = '(%d & (%s >> %d))' % ( ( 1 << split_length ) - 1 , source , split_start_bit )
return shift ( bit , '<<' , split_res_start_bit )
for net in self . block :
if net . op in simple_func :
argvals = ( self . _arg_varname ( arg ) for arg in net . args )
expr = simple_func [ net . op ] ( * argvals )
elif net . op == 'c' :
expr = ''
for i in range ( len ( net . args ) ) :
if expr is not '' :
expr += ' | '
shiftby = sum ( len ( j ) for j in net . args [ i + 1 : ] )
expr += shift ( self . _arg_varname ( net . args [ i ] ) , '<<' , shiftby )
elif net . op == 's' :
source = self . _arg_varname ( net . args [ 0 ] )
expr = ''
split_length = 0
split_start_bit = - 2
split_res_start_bit = - 1
for i , b in enumerate ( net . op_param ) :
if b != split_start_bit + split_length :
if split_start_bit >= 0 : # create a wire
expr += make_split ( ) + '|'
split_length = 1
split_start_bit = b
split_res_start_bit = i
else :
split_length += 1
expr += make_split ( )
elif net . op == 'm' :
read_addr = self . _arg_varname ( net . args [ 0 ] )
mem = net . op_param [ 1 ]
if isinstance ( net . op_param [ 1 ] , RomBlock ) :
expr = 'd["%s"]._get_read_data(%s)' % ( self . _mem_varname ( mem ) , read_addr )
else : # memories act async for reads
expr = 'd["%s"].get(%s, %s)' % ( self . _mem_varname ( mem ) , read_addr , self . default_value )
elif net . op == '@' :
mem = self . _mem_varname ( net . op_param [ 1 ] )
write_addr , write_val , write_enable = ( self . _arg_varname ( a ) for a in net . args )
prog . append ( ' if {}:' . format ( write_enable ) )
prog . append ( ' mem_ws.append(("{}", {}, {}))' . format ( mem , write_addr , write_val ) )
continue
# memwrites are special
else :
raise PyrtlError ( 'FastSimulation cannot handle primitive "%s"' % net . op )
# prog . append ( ' # ' + str ( net ) )
result = self . _dest_varname ( net . dests [ 0 ] )
if len ( net . dests [ 0 ] ) == self . _no_mask_bitwidth [ net . op ] ( net ) :
prog . append ( " %s = %s" % ( result , expr ) )
else :
mask = str ( net . dests [ 0 ] . bitmask )
prog . append ( ' %s = %s & %s' % ( result , mask , expr ) )
# add traced wires to dict
if self . tracer is not None :
for wire_name in self . tracer . trace :
wire = self . block . wirevector_by_name [ wire_name ]
if not isinstance ( wire , ( Input , Const , Register , Output ) ) :
v_wire_name = self . _varname ( wire )
prog . append ( ' outs["%s"] = %s' % ( wire_name , v_wire_name ) )
prog . append ( " return regs, outs, mem_ws" )
return '\n' . join ( prog ) |
def line_oriented ( cls , line_oriented_options , console ) :
"""Given Goal . Options and a Console , yields functions for writing to stdout and stderr , respectively .
The passed options instance will generally be the ` Goal . Options ` of a ` LineOriented ` ` Goal ` .""" | if type ( line_oriented_options ) != cls . Options :
raise AssertionError ( 'Expected Options for `{}`, got: {}' . format ( cls . __name__ , line_oriented_options ) )
output_file = line_oriented_options . values . output_file
sep = line_oriented_options . values . sep . encode ( 'utf-8' ) . decode ( 'unicode_escape' )
stdout , stderr = console . stdout , console . stderr
if output_file :
stdout = open ( output_file , 'w' )
try :
print_stdout = lambda msg : print ( msg , file = stdout , end = sep )
print_stderr = lambda msg : print ( msg , file = stderr )
yield print_stdout , print_stderr
finally :
if output_file :
stdout . close ( )
else :
stdout . flush ( )
stderr . flush ( ) |
def read_spec ( filename , verbose = False ) :
"""Read an SBP specification .
Parameters
filename : str
Local filename for specification .
verbose : bool
Print out some debugging info
Returns
Raises
Exception
On empty file .
yaml . YAMLError
On Yaml parsing error
voluptuous . Invalid
On invalid SBP schema""" | contents = None
with open ( filename , 'r' ) as f :
contents = yaml . load ( f )
if contents is None :
raise Exception ( "Empty yaml file: %s." % filename )
try :
s . package_schema ( contents )
except Exception as e :
sys . stderr . write ( "Invalid SBP YAML specification: %s.\n" % filename )
raise e
return contents |
def mvir ( self , H = 70. , Om = 0.3 , overdens = 200. , wrtcrit = False , forceint = False , ro = None , vo = None , use_physical = False ) : # use _ physical necessary bc of pop = False , does nothing inside
"""NAME :
mvir
PURPOSE :
calculate the virial mass
INPUT :
H = ( default : 70 ) Hubble constant in km / s / Mpc
Om = ( default : 0.3 ) Omega matter
overdens = ( 200 ) overdensity which defines the virial radius
wrtcrit = ( False ) if True , the overdensity is wrt the critical density rather than the mean matter density
ro = distance scale in kpc or as Quantity ( default : object - wide , which if not set is 8 kpc ) )
vo = velocity scale in km / s or as Quantity ( default : object - wide , which if not set is 220 km / s ) )
KEYWORDS :
forceint = if True , calculate the mass through integration of the density , even if an explicit expression for the mass exists
OUTPUT :
M ( < rvir )
HISTORY :
2014-09-12 - Written - Bovy ( IAS )""" | if ro is None :
ro = self . _ro
if vo is None :
vo = self . _vo
# Evaluate the virial radius
try :
rvir = self . rvir ( H = H , Om = Om , overdens = overdens , wrtcrit = wrtcrit , use_physical = False , ro = ro , vo = vo )
except AttributeError :
raise AttributeError ( "This potential does not have a '_scale' defined to base the concentration on or does not support calculating the virial radius" )
return self . mass ( rvir , forceint = forceint , use_physical = False , ro = ro , vo = vo ) |
def _create_group_tree ( self , levels ) :
"""This method creates a group tree""" | if levels [ 0 ] != 0 :
raise KPError ( "Invalid group tree" )
for i in range ( len ( self . groups ) ) :
if ( levels [ i ] == 0 ) :
self . groups [ i ] . parent = self . root_group
self . groups [ i ] . index = len ( self . root_group . children )
self . root_group . children . append ( self . groups [ i ] )
continue
j = i - 1
while j >= 0 :
if levels [ j ] < levels [ i ] :
if levels [ i ] - levels [ j ] != 1 :
raise KPError ( "Invalid group tree" )
self . groups [ i ] . parent = self . groups [ j ]
self . groups [ i ] . index = len ( self . groups [ j ] . children )
self . groups [ i ] . parent . children . append ( self . groups [ i ] )
break
if j == 0 :
raise KPError ( "Invalid group tree" )
j -= 1
for e in range ( len ( self . entries ) ) :
for g in range ( len ( self . groups ) ) :
if self . entries [ e ] . group_id == self . groups [ g ] . id_ :
self . groups [ g ] . entries . append ( self . entries [ e ] )
self . entries [ e ] . group = self . groups [ g ]
# from original KeePassX - code , but what does it do ?
self . entries [ e ] . index = 0
return True |
def update_url_params ( url , replace_all = False , ** url_params ) :
""": return : url with its query updated from url _ query ( non - matching params are retained )""" | # Ensure ' replace _ all ' can be sent as a url param
if not ( replace_all is True or replace_all is False ) :
url_params [ 'replace_all' ] = replace_all
if not url or not url_params :
return url or None
scheme , netloc , url_path , url_query , fragment = _urlsplit ( url )
if replace_all is True :
url_query = url_params
else :
url_query = _parse_qs ( url_query )
url_query . update ( url_params )
return _urlunsplit ( ( scheme , netloc , url_path , _unquote ( _urlencode ( url_query , doseq = True ) ) , fragment ) ) |
def add_event ( self , name , time , chan ) :
"""Action : add a single event .""" | self . annot . add_event ( name , time , chan = chan )
self . update_annotations ( ) |
def from_tuple ( tup ) :
"""Convert a tuple into a range with error handling .
Parameters
tup : tuple ( len 2 or 3)
The tuple to turn into a range .
Returns
range : range
The range from the tuple .
Raises
ValueError
Raised when the tuple length is not 2 or 3.""" | if len ( tup ) not in ( 2 , 3 ) :
raise ValueError ( 'tuple must contain 2 or 3 elements, not: %d (%r' % ( len ( tup ) , tup , ) , )
return range ( * tup ) |
def isfile ( self , path = None , client_kwargs = None , assume_exists = None ) :
"""Return True if path is an existing regular file .
Args :
path ( str ) : Path or URL .
client _ kwargs ( dict ) : Client arguments .
assume _ exists ( bool or None ) : This value define the value to return
in the case there is no enough permission to determinate the
existing status of the file . If set to None , the permission
exception is reraised ( Default behavior ) . if set to True or
False , return this value .
Returns :
bool : True if file exists .""" | relative = self . relpath ( path )
if not relative : # Root always exists and is a directory
return False
if path [ - 1 ] != '/' and not self . is_locator ( path , relative = True ) :
return self . exists ( path = path , client_kwargs = client_kwargs , assume_exists = assume_exists )
return False |
def isvector_or_scalar ( a ) :
"""one - dimensional arrays having shape [ N ] ,
row and column matrices having shape [ 1 N ] and
[ N 1 ] correspondingly , and their generalizations
having shape [ 1 1 . . . N . . . 1 1 1 ] .
Scalars have shape [ 1 1 . . . 1 ] .
Empty arrays dont count""" | try :
return a . size and a . ndim - a . shape . count ( 1 ) <= 1
except :
return False |
def token ( self , value ) :
"""Set the Token of the message .
: type value : String
: param value : the Token
: raise AttributeError : if value is longer than 256""" | if value is None :
self . _token = value
return
if not isinstance ( value , str ) :
value = str ( value )
if len ( value ) > 256 :
raise AttributeError
self . _token = value |
def get_proxy_ticket ( self , pgt ) :
"""Returns proxy ticket given the proxy granting ticket""" | response = requests . get ( self . get_proxy_url ( pgt ) )
if response . status_code == 200 :
from lxml import etree
root = etree . fromstring ( response . content )
tickets = root . xpath ( "//cas:proxyTicket" , namespaces = { "cas" : "http://www.yale.edu/tp/cas" } )
if len ( tickets ) == 1 :
return tickets [ 0 ] . text
errors = root . xpath ( "//cas:authenticationFailure" , namespaces = { "cas" : "http://www.yale.edu/tp/cas" } )
if len ( errors ) == 1 :
raise CASError ( errors [ 0 ] . attrib [ 'code' ] , errors [ 0 ] . text )
raise CASError ( "Bad http code %s" % response . status_code ) |
def save_assets ( self , dest_path ) :
"""Save plot assets alongside dest _ path .
Some plots may have assets , like bitmap files , which need to be
saved alongside the rendered plot file .
: param dest _ path : path of the main output file .""" | for idx , subplot in enumerate ( self . subplots ) :
subplot . save_assets ( dest_path , suffix = '_%d' % idx ) |
def visit_Call ( self , node ) :
"""Function calls are not handled for now .
> > > import gast as ast
> > > from pythran import passmanager , backend
> > > node = ast . parse ( ' ' '
. . . def foo ( ) :
. . . a = _ _ builtin _ _ . range ( 10 ) ' ' ' )
> > > pm = passmanager . PassManager ( " test " )
> > > res = pm . gather ( RangeValues , node )
> > > res [ ' a ' ]
Interval ( low = - inf , high = inf )""" | for alias in self . aliases [ node . func ] :
if alias is MODULES [ '__builtin__' ] [ 'getattr' ] :
attr_name = node . args [ - 1 ] . s
attribute = attributes [ attr_name ] [ - 1 ]
self . add ( node , attribute . return_range ( None ) )
elif isinstance ( alias , Intrinsic ) :
alias_range = alias . return_range ( [ self . visit ( n ) for n in node . args ] )
self . add ( node , alias_range )
else :
return self . generic_visit ( node )
return self . result [ node ] |
def buscar_rules ( self , id_ambiente_vip , id_vip = '' ) :
"""Search rules by environmentvip _ id
: return : Dictionary with the following structure :
{ ' name _ rule _ opt ' : [ { ' name _ rule _ opt ' : < name > , ' id ' : < id > } , . . . ] }
: raise InvalidParameterError : Environment VIP identifier is null and invalid .
: raise EnvironmentVipNotFoundError : Environment VIP not registered .
: raise InvalidParameterError : finalidade _ txt and cliente _ txt is null and invalid .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response .""" | url = 'environment-vip/get/rules/' + str ( id_ambiente_vip ) + '/' + str ( id_vip )
code , xml = self . submit ( None , 'GET' , url )
return self . response ( code , xml ) |
def lock_key ( group_id , item_id , group_width = 8 ) :
"""Creates a lock ID where the lower bits are the group ID and the upper bits
are the item ID . This allows the use of a bigint namespace for items , with a
limited space for grouping .
: group _ id : an integer identifying the group . Must be less than
2 ^ : group _ width :
: item _ id : item _ id an integer . must be less than 2 ^ ( 63 - : group _ width : ) - 1
: gropu _ width : the number of bits to reserve for the group ID .""" | if group_id >= ( 1 << group_width ) :
raise Exception ( "Group ID is too big" )
if item_id >= ( 1 << ( 63 - group_width ) ) - 1 :
raise Exception ( "Item ID is too big" )
return ( item_id << group_width ) | group_id |
def enable_passive_svc_checks ( self , service ) :
"""Enable passive checks for a service
Format of the line that triggers function call : :
ENABLE _ PASSIVE _ SVC _ CHECKS ; < host _ name > ; < service _ description >
: param service : service to edit
: type service : alignak . objects . service . Service
: return : None""" | if not service . passive_checks_enabled :
service . modified_attributes |= DICT_MODATTR [ "MODATTR_PASSIVE_CHECKS_ENABLED" ] . value
service . passive_checks_enabled = True
self . send_an_element ( service . get_update_status_brok ( ) ) |
def stripHtml ( html , joiner = '' ) :
"""Strips out the HTML tags from the inputted text , returning the basic
text . This algorightm was found on
[ http : / / stackoverflow . com / questions / 753052 / strip - html - from - strings - in - python StackOverflow ] .
: param html | < str >
: return < str >""" | stripper = HTMLStripper ( )
stripper . feed ( html . replace ( '<br>' , '\n' ) . replace ( '<br/>' , '\n' ) )
return stripper . text ( joiner ) |
def layers ( ) :
"""Get the layers module good for TF 1 and TF 2 work for now .""" | global _cached_layers
if _cached_layers is not None :
return _cached_layers
layers_module = tf . layers
try :
from tensorflow . python import tf2
# pylint : disable = g - direct - tensorflow - import , g - import - not - at - top
if tf2 . enabled ( ) :
tf . logging . info ( "Running in V2 mode, using Keras layers." )
layers_module = tf . keras . layers
except ImportError :
pass
_cached_layers = layers_module
return layers_module |
def register_bse_task ( self , * args , ** kwargs ) :
"""Register a Bethe - Salpeter task .""" | kwargs [ "task_class" ] = BseTask
return self . register_task ( * args , ** kwargs ) |
def knowledge_base_path ( cls , project , knowledge_base ) :
"""Return a fully - qualified knowledge _ base string .""" | return google . api_core . path_template . expand ( 'projects/{project}/knowledgeBases/{knowledge_base}' , project = project , knowledge_base = knowledge_base , ) |
def find_best_rsquared ( list_of_fits ) :
"""Return the best fit , based on rsquared""" | res = sorted ( list_of_fits , key = lambda x : x . rsquared )
return res [ - 1 ] |
async def handle_agent_job_done ( self , agent_addr , message : AgentJobDone ) :
"""Handle an AgentJobDone message . Send the data back to the client , and start new job if needed""" | if agent_addr in self . _registered_agents :
self . _logger . info ( "Job %s %s finished on agent %s" , message . job_id [ 0 ] , message . job_id [ 1 ] , agent_addr )
# Remove the job from the list of running jobs
del self . _job_running [ message . job_id ]
# Sent the data back to the client
await ZMQUtils . send_with_addr ( self . _client_socket , message . job_id [ 0 ] , BackendJobDone ( message . job_id [ 1 ] , message . result , message . grade , message . problems , message . tests , message . custom , message . state , message . archive , message . stdout , message . stderr ) )
# The agent is available now
self . _available_agents . append ( agent_addr )
else :
self . _logger . warning ( "Job result %s %s from non-registered agent %s" , message . job_id [ 0 ] , message . job_id [ 1 ] , agent_addr )
# update the queue
await self . update_queue ( ) |
def provider_factory ( factory = _sentinel , scope = NoneScope ) :
'''Decorator to create a provider using the given factory , and scope .
Can also be used in a non - decorator manner .
: param scope : Scope key , factory , or instance
: type scope : object or callable
: return : decorator
: rtype : decorator''' | if factory is _sentinel :
return functools . partial ( provider_factory , scope = scope )
provider = Provider ( factory , scope )
return provider |
def unflat_take ( items_list , unflat_index_list ) :
r"""Returns nested subset of items _ list
Args :
items _ list ( list ) :
unflat _ index _ list ( list ) : nested list of indices
CommandLine :
python - m utool . util _ list - - exec - unflat _ take
SeeAlso :
ut . take
Example :
> > > # DISABLE _ DOCTEST
> > > from utool . util _ list import * # NOQA
> > > items _ list = [ 1 , 2 , 3 , 4 , 5]
> > > unflat _ index _ list = [ [ 0 , 1 ] , [ 2 , 3 ] , [ 0 , 4 ] ]
> > > result = unflat _ take ( items _ list , unflat _ index _ list )
> > > print ( result )
[ [ 1 , 2 ] , [ 3 , 4 ] , [ 1 , 5 ] ]""" | return [ unflat_take ( items_list , xs ) if isinstance ( xs , list ) else take ( items_list , xs ) for xs in unflat_index_list ] |
def code ( ctx , show_hidden , query , single ) :
"""Generate codes .
Generate codes from credentials stored on your YubiKey .
Provide a query string to match one or more specific credentials .
Touch and HOTP credentials require a single match to be triggered .""" | ensure_validated ( ctx )
controller = ctx . obj [ 'controller' ]
creds = [ ( cr , c ) for ( cr , c ) in controller . calculate_all ( ) if show_hidden or not cr . is_hidden ]
creds = _search ( creds , query )
if len ( creds ) == 1 :
cred , code = creds [ 0 ]
if cred . touch :
prompt_for_touch ( )
try :
if cred . oath_type == OATH_TYPE . HOTP : # HOTP might require touch , we don ' t know .
# Assume yes after 500ms .
hotp_touch_timer = Timer ( 0.500 , prompt_for_touch )
hotp_touch_timer . start ( )
creds = [ ( cred , controller . calculate ( cred ) ) ]
hotp_touch_timer . cancel ( )
elif code is None :
creds = [ ( cred , controller . calculate ( cred ) ) ]
except APDUError as e :
if e . sw == SW . SECURITY_CONDITION_NOT_SATISFIED :
ctx . fail ( 'Touch credential timed out!' )
elif single :
_error_multiple_hits ( ctx , [ cr for cr , c in creds ] )
if single :
click . echo ( creds [ 0 ] [ 1 ] . value )
else :
creds . sort ( )
outputs = [ ( cr . printable_key , c . value if c else '[Touch Credential]' if cr . touch else '[HOTP Credential]' if cr . oath_type == OATH_TYPE . HOTP else '' ) for ( cr , c ) in creds ]
longest_name = max ( len ( n ) for ( n , c ) in outputs ) if outputs else 0
longest_code = max ( len ( c ) for ( n , c ) in outputs ) if outputs else 0
format_str = u'{:<%d} {:>%d}' % ( longest_name , longest_code )
for name , result in outputs :
click . echo ( format_str . format ( name , result ) ) |
def usergroups_list ( self , ** kwargs ) -> SlackResponse :
"""List all User Groups for a team""" | self . _validate_xoxp_token ( )
return self . api_call ( "usergroups.list" , http_verb = "GET" , params = kwargs ) |
def _create_cd_drives ( cd_drives , controllers = None , parent_ref = None ) :
'''Returns a list of vim . vm . device . VirtualDeviceSpec objects representing the
CD / DVD drives to be created for a virtual machine
cd _ drives
CD / DVD drive properties
controllers
CD / DVD drive controllers ( IDE , SATA )
parent _ ref
Parent object reference''' | cd_drive_specs = [ ]
keys = range ( - 3000 , - 3050 , - 1 )
if cd_drives :
devs = [ dvd [ 'adapter' ] for dvd in cd_drives ]
log . trace ( 'Creating cd/dvd drives %s' , devs )
for drive , key in zip ( cd_drives , keys ) : # if a controller is not available / cannot be created we should use the
# one which is available by default , this is ' IDE 0'
controller_key = 200
if controllers :
controller = _get_device_by_label ( controllers , drive [ 'controller' ] )
controller_key = controller . key
cd_drive_specs . append ( _apply_cd_drive ( drive [ 'adapter' ] , key , drive [ 'device_type' ] , 'add' , client_device = drive [ 'client_device' ] if 'client_device' in drive else None , datastore_iso_file = drive [ 'datastore_iso_file' ] if 'datastore_iso_file' in drive else None , connectable = drive [ 'connectable' ] if 'connectable' in drive else None , controller_key = controller_key , parent_ref = parent_ref ) )
return cd_drive_specs |
def calculate_nonagonal_number ( nth_order : int ) -> int :
"""Computes the nth nonagonal number
A nonagonal number is a figurate number that extends the concept of triangular and square numbers to the
n - gon ( 9 - sided polygon ) i . e . , nonagon . The nth nonagonal number is calculated using the formula :
( ( n * ( ( 7 * n ) - 5 ) ) / 2 ) .
Args :
nth _ order ( int ) : The order of the nonagonal number to be calculated
Returns :
int : The nth nonagonal number .
Examples :
> > > calculate _ nonagonal _ number ( 10)
325
> > > calculate _ nonagonal _ number ( 15)
750
> > > calculate _ nonagonal _ number ( 18)
1089""" | return int ( ( ( nth_order * ( ( 7 * nth_order ) - 5 ) ) / 2 ) ) |
def _create_storage_folder ( self ) :
'''Creates a storage folder using the query name by replacing
spaces in the query with ' _ ' ( underscore )''' | try :
print ( colored ( '\nCreating Storage Folder...' , 'yellow' ) )
self . _storageFolder = os . path . join ( self . _destinationFolder , self . _imageQuery . replace ( ' ' , '_' ) )
os . makedirs ( self . _storageFolder )
print ( colored ( 'Storage Folder - ' + self . _storageFolder + ' created.' , 'green' ) )
except FileExistsError :
print ( colored ( 'Storage Folder - ' + self . _storageFolder + ' already exists.' , 'yellow' ) )
except Exception as exception :
raise exception |
def _str_dotted_getattr ( obj , name ) :
"""Expands extends getattr to allow dots in x to indicate nested objects .
Args :
obj ( object ) : an object .
name ( str ) : a name for a field in the object .
Returns :
Any : the value of named attribute .
Raises :
AttributeError : if the named attribute does not exist .""" | for part in name . split ( '.' ) :
obj = getattr ( obj , part )
return str ( obj ) if obj else None |
def readline ( self ) :
"""Read and return a line of text .
: rtype : str
: return : the next line of text in the file , including the
newline character""" | _complain_ifclosed ( self . closed )
line = self . f . readline ( )
if self . __encoding :
return line . decode ( self . __encoding , self . __errors )
else :
return line |
def close_room ( self , room , namespace = None ) :
"""Close a room .""" | return self . socketio . close_room ( room = room , namespace = namespace or self . namespace ) |
def _activation_summary ( x ) :
"""Helper to create summaries for activations .
Creates a summary that provides a histogram of activations .
Creates a summary that measures the sparsity of activations .
Args :
x : Tensor
Returns :
nothing""" | # Remove ' tower _ [ 0-9 ] / ' from the name in case this is a multi - GPU training
# session . This helps the clarity of presentation on tensorboard .
tensor_name = re . sub ( '%s_[0-9]*/' % TOWER_NAME , '' , x . op . name )
tf . summary . histogram ( tensor_name + '/activations' , x )
tf . summary . scalar ( tensor_name + '/sparsity' , tf . nn . zero_fraction ( x ) ) |
def sort ( self , callback = None ) :
"""Sort through each item with a callback .
: param callback : The callback
: type callback : callable or None
: rtype : Collection""" | items = self . items
if callback :
return self . __class__ ( sorted ( items , key = callback ) )
else :
return self . __class__ ( sorted ( items ) ) |
def _images ( self , sys_output ) :
'''a helper method for parsing docker image output''' | import re
gap_pattern = re . compile ( '\t|\s{2,}' )
image_list = [ ]
output_lines = sys_output . split ( '\n' )
column_headers = gap_pattern . split ( output_lines [ 0 ] )
for i in range ( 1 , len ( output_lines ) ) :
columns = gap_pattern . split ( output_lines [ i ] )
if len ( columns ) == len ( column_headers ) :
image_details = { }
for j in range ( len ( columns ) ) :
image_details [ column_headers [ j ] ] = columns [ j ]
image_list . append ( image_details )
return image_list |
def set_option ( self , option , value ) :
"""Set a plugin option in configuration file .
Note : Use sig _ option _ changed to call it from widgets of the
same or another plugin .""" | CONF . set ( self . CONF_SECTION , str ( option ) , value ) |
def Reset ( self ) :
"""Reset the lexer to process a new data feed .""" | # The first state
self . state = "INITIAL"
self . state_stack = [ ]
# The buffer we are parsing now
self . buffer = ""
self . error = 0
self . verbose = 0
# The index into the buffer where we are currently pointing
self . processed = 0
self . processed_buffer = "" |
def cross_product ( p1 , p2 , o = ( 0 , 0 ) ) :
"""Returns cross product
Args :
p1 , p2 : point ( x , y )
o : origin""" | v1 = vector ( o , p1 )
v2 = vector ( o , p2 )
return v1 [ 0 ] * v2 [ 1 ] - v1 [ 1 ] * v2 [ 0 ] |
def chunks_generator ( iterable , count_items_in_chunk ) :
"""Очень внимательно ! Не дает обходить дважды
: param iterable :
: param count _ items _ in _ chunk :
: return :""" | iterator = iter ( iterable )
for first in iterator : # stops when iterator is depleted
def chunk ( ) : # construct generator for next chunk
yield first
# yield element from for loop
for more in islice ( iterator , count_items_in_chunk - 1 ) :
yield more
# yield more elements from the iterator
yield chunk ( ) |
def _combine_variant_collections ( cls , combine_fn , variant_collections , kwargs ) :
"""Create a single VariantCollection from multiple different collections .
Parameters
cls : class
Should be VariantCollection
combine _ fn : function
Function which takes any number of sets of variants and returns
some combination of them ( typically union or intersection ) .
variant _ collections : tuple of VariantCollection
kwargs : dict
Optional dictionary of keyword arguments to pass to the initializer
for VariantCollection .""" | kwargs [ "variants" ] = combine_fn ( * [ set ( vc ) for vc in variant_collections ] )
kwargs [ "source_to_metadata_dict" ] = cls . _merge_metadata_dictionaries ( [ vc . source_to_metadata_dict for vc in variant_collections ] )
kwargs [ "sources" ] = set . union ( * ( [ vc . sources for vc in variant_collections ] ) )
for key , value in variant_collections [ 0 ] . to_dict ( ) . items ( ) : # If some optional parameter isn ' t explicitly specified as an
# argument to union ( ) or intersection ( ) then use the same value
# as the first VariantCollection .
# I ' m doing this so that the meaning of VariantCollection . union
# and VariantCollection . intersection with a single argument is
# the identity function ( rather than setting optional parameters
# to their default values .
if key not in kwargs :
kwargs [ key ] = value
return cls ( ** kwargs ) |
def recv_exit_status ( self ) :
"""Return the exit status from the process on the server . This is
mostly useful for retrieving the reults of an L { exec _ command } .
If the command hasn ' t finished yet , this method will wait until
it does , or until the channel is closed . If no exit status is
provided by the server , - 1 is returned .
@ return : the exit code of the process on the server .
@ rtype : int
@ since : 1.2""" | self . status_event . wait ( )
assert self . status_event . isSet ( )
return self . exit_status |
def visit_Slice ( self , node : ast . Slice ) -> slice :
"""Visit ` ` lower ` ` , ` ` upper ` ` and ` ` step ` ` and recompute the node as a ` ` slice ` ` .""" | lower = None
# type : Optional [ int ]
if node . lower is not None :
lower = self . visit ( node = node . lower )
upper = None
# type : Optional [ int ]
if node . upper is not None :
upper = self . visit ( node = node . upper )
step = None
# type : Optional [ int ]
if node . step is not None :
step = self . visit ( node = node . step )
result = slice ( lower , upper , step )
self . recomputed_values [ node ] = result
return result |
def create_job_queue ( self , queue_name , priority , state , compute_env_order ) :
"""Create a job queue
: param queue _ name : Queue name
: type queue _ name : str
: param priority : Queue priority
: type priority : int
: param state : Queue state
: type state : string
: param compute _ env _ order : Compute environment list
: type compute _ env _ order : list of dict
: return : Tuple of Name , ARN
: rtype : tuple of str""" | for variable , var_name in ( ( queue_name , 'jobQueueName' ) , ( priority , 'priority' ) , ( state , 'state' ) , ( compute_env_order , 'computeEnvironmentOrder' ) ) :
if variable is None :
raise ClientException ( '{0} must be provided' . format ( var_name ) )
if state not in ( 'ENABLED' , 'DISABLED' ) :
raise ClientException ( 'state {0} must be one of ENABLED | DISABLED' . format ( state ) )
if self . get_job_queue_by_name ( queue_name ) is not None :
raise ClientException ( 'Job queue {0} already exists' . format ( queue_name ) )
if len ( compute_env_order ) == 0 :
raise ClientException ( 'At least 1 compute environment must be provided' )
try : # orders and extracts computeEnvironment names
ordered_compute_environments = [ item [ 'computeEnvironment' ] for item in sorted ( compute_env_order , key = lambda x : x [ 'order' ] ) ]
env_objects = [ ]
# Check each ARN exists , then make a list of compute env ' s
for arn in ordered_compute_environments :
env = self . get_compute_environment_by_arn ( arn )
if env is None :
raise ClientException ( 'Compute environment {0} does not exist' . format ( arn ) )
env_objects . append ( env )
except Exception :
raise ClientException ( 'computeEnvironmentOrder is malformed' )
# Create new Job Queue
queue = JobQueue ( queue_name , priority , state , env_objects , compute_env_order , self . region_name )
self . _job_queues [ queue . arn ] = queue
return queue_name , queue . arn |
def crawl_links ( self , seed_url = None ) :
"""Find new links given a seed URL and follow them breadth - first .
Save page responses as PART . html files .
Return the PART . html filenames created during crawling .""" | if seed_url is not None :
self . seed_url = seed_url
if self . seed_url is None :
sys . stderr . write ( 'Crawling requires a seed URL.\n' )
return [ ]
prev_part_num = utils . get_num_part_files ( )
crawled_links = set ( )
uncrawled_links = OrderedSet ( )
uncrawled_links . add ( self . seed_url )
try :
while uncrawled_links : # Check limit on number of links and pages to crawl
if self . limit_reached ( len ( crawled_links ) ) :
break
url = uncrawled_links . pop ( last = False )
# Remove protocol , fragments , etc . to get unique URLs
unique_url = utils . remove_protocol ( utils . clean_url ( url ) )
if unique_url not in crawled_links :
raw_resp = utils . get_raw_resp ( url )
if raw_resp is None :
if not self . args [ 'quiet' ] :
sys . stderr . write ( 'Failed to parse {0}.\n' . format ( url ) )
continue
resp = lh . fromstring ( raw_resp )
if self . page_crawled ( resp ) :
continue
crawled_links . add ( unique_url )
new_links = self . get_new_links ( url , resp )
uncrawled_links . update ( new_links )
if not self . args [ 'quiet' ] :
print ( 'Crawled {0} (#{1}).' . format ( url , len ( crawled_links ) ) )
# Write page response to PART . html file
utils . write_part_file ( self . args , url , raw_resp , resp , len ( crawled_links ) )
except ( KeyboardInterrupt , EOFError ) :
pass
curr_part_num = utils . get_num_part_files ( )
return utils . get_part_filenames ( curr_part_num , prev_part_num ) |
def get_ptr ( data , offset = None , ptr_type = ctypes . c_void_p ) :
"""Returns a void pointer to the data""" | ptr = ctypes . cast ( ctypes . pointer ( data ) , ctypes . c_void_p )
if offset :
ptr = ctypes . c_void_p ( ptr . value + offset )
if ptr_type != ctypes . c_void_p :
ptr = ctypes . cast ( ptr , ptr_type )
return ptr |
def get_lang_class ( lang ) :
"""Import and load a Language class .
lang ( unicode ) : Two - letter language code , e . g . ' en ' .
RETURNS ( Language ) : Language class .""" | global LANGUAGES
# Check if an entry point is exposed for the language code
entry_point = get_entry_point ( "spacy_languages" , lang )
if entry_point is not None :
LANGUAGES [ lang ] = entry_point
return entry_point
if lang not in LANGUAGES :
try :
module = importlib . import_module ( ".lang.%s" % lang , "spacy" )
except ImportError as err :
raise ImportError ( Errors . E048 . format ( lang = lang , err = err ) )
LANGUAGES [ lang ] = getattr ( module , module . __all__ [ 0 ] )
return LANGUAGES [ lang ] |
def varea_stack ( self , stackers , ** kw ) :
'''Generate multiple ` ` VArea ` ` renderers for levels stacked bottom
to top .
Args :
stackers ( seq [ str ] ) : a list of data source field names to stack
successively for ` ` y1 ` ` and ` ` y1 ` ` varea coordinates .
Additionally , the ` ` name ` ` of the renderer will be set to
the value of each successive stacker ( this is useful with the
special hover variable ` ` $ name ` ` )
Any additional keyword arguments are passed to each call to ` ` varea ` ` .
If a keyword value is a list or tuple , then each call will get one
value from the sequence .
Returns :
list [ GlyphRenderer ]
Examples :
Assuming a ` ` ColumnDataSource ` ` named ` ` source ` ` with columns
*2016 * and * 2017 * , then the following call to ` ` varea _ stack ` ` will
will create two ` ` VArea ` ` renderers that stack :
. . code - block : : python
p . varea _ stack ( [ ' 2016 ' , ' 2017 ' ] , x = ' x ' , color = [ ' blue ' , ' red ' ] , source = source )
This is equivalent to the following two separate calls :
. . code - block : : python
p . varea ( y1 = stack ( ) , y2 = stack ( ' 2016 ' ) , x = ' x ' , color = ' blue ' , source = source , name = ' 2016 ' )
p . varea ( y1 = stack ( ' 2016 ' ) , y2 = stack ( ' 2016 ' , ' 2017 ' ) , x = ' x ' , color = ' red ' , source = source , name = ' 2017 ' )''' | result = [ ]
for kw in _double_stack ( stackers , "y1" , "y2" , ** kw ) :
result . append ( self . varea ( ** kw ) )
return result |
def update_keys ( self ) :
"""Updates the Google API key with the text value""" | from . . . main import add_api_key
add_api_key ( "reddit_api_user_agent" , self . reddit_api_user_agent . get ( ) )
add_api_key ( "reddit_api_client_id" , self . reddit_api_client_id . get ( ) )
add_api_key ( "reddit_api_client_secret" , self . reddit_api_client_secret . get ( ) ) |
def _collect_data_references ( self , irsb , irsb_addr ) :
"""Unoptimises IRSB and _ add _ data _ reference ' s for individual statements or
for parts of statements ( e . g . Store )
: param pyvex . IRSB irsb : Block to scan for data references
: param int irsb _ addr : Address of block
: return : None""" | if irsb . data_refs :
self . _process_irsb_data_refs ( irsb )
elif irsb . statements :
irsb = self . _unoptimize_irsb ( irsb )
# for each statement , collect all constants that are referenced or used .
self . _collect_data_references_by_scanning_stmts ( irsb , irsb_addr ) |
def score_genes ( adata , gene_list , ctrl_size = 50 , gene_pool = None , n_bins = 25 , score_name = 'score' , random_state = 0 , copy = False , use_raw = False ) : # we use the scikit - learn convention of calling the seed " random _ state "
"""Score a set of genes [ Satija15 ] _ .
The score is the average expression of a set of genes subtracted with the
average expression of a reference set of genes . The reference set is
randomly sampled from the ` gene _ pool ` for each binned expression value .
This reproduces the approach in Seurat [ Satija15 ] _ and has been implemented
for Scanpy by Davide Cittaro .
Parameters
adata : : class : ` ~ anndata . AnnData `
The annotated data matrix .
gene _ list : iterable
The list of gene names used for score calculation .
ctrl _ size : ` int ` , optional ( default : 50)
Number of reference genes to be sampled . If ` len ( gene _ list ) ` is not too
low , you can set ` ctrl _ size = len ( gene _ list ) ` .
gene _ pool : ` list ` or ` None ` , optional ( default : ` None ` )
Genes for sampling the reference set . Default is all genes .
n _ bins : ` int ` , optional ( default : 25)
Number of expression level bins for sampling .
score _ name : ` str ` , optional ( default : ` ' score ' ` )
Name of the field to be added in ` . obs ` .
random _ state : ` int ` , optional ( default : 0)
The random seed for sampling .
copy : ` bool ` , optional ( default : ` False ` )
Copy ` adata ` or modify it inplace .
use _ raw : ` bool ` , optional ( default : ` False ` )
Use ` raw ` attribute of ` adata ` if present .
Returns
Depending on ` copy ` , returns or updates ` adata ` with an additional field
` score _ name ` .
Examples
See this ` notebook < https : / / github . com / theislab / scanpy _ usage / tree / master / 180209 _ cell _ cycle > ` _ _ .""" | logg . info ( 'computing score \'{}\'' . format ( score_name ) , r = True )
adata = adata . copy ( ) if copy else adata
if random_state :
np . random . seed ( random_state )
gene_list_in_var = [ ]
var_names = adata . raw . var_names if use_raw else adata . var_names
for gene in gene_list :
if gene in var_names :
gene_list_in_var . append ( gene )
else :
logg . warn ( 'gene: {} is not in adata.var_names and will be ignored' . format ( gene ) )
gene_list = set ( gene_list_in_var [ : ] )
if not gene_pool :
gene_pool = list ( var_names )
else :
gene_pool = [ x for x in gene_pool if x in var_names ]
# Trying here to match the Seurat approach in scoring cells .
# Basically we need to compare genes against random genes in a matched
# interval of expression .
_adata = adata . raw if use_raw else adata
# TODO : this densifies the whole data matrix for ` gene _ pool `
if issparse ( _adata . X ) :
obs_avg = pd . Series ( np . nanmean ( _adata [ : , gene_pool ] . X . toarray ( ) , axis = 0 ) , index = gene_pool )
# average expression of genes
else :
obs_avg = pd . Series ( np . nanmean ( _adata [ : , gene_pool ] . X , axis = 0 ) , index = gene_pool )
# average expression of genes
obs_avg = obs_avg [ np . isfinite ( obs_avg ) ]
# Sometimes ( and I don ' t know how ) missing data may be there , with nansfor
n_items = int ( np . round ( len ( obs_avg ) / ( n_bins - 1 ) ) )
obs_cut = obs_avg . rank ( method = 'min' ) // n_items
control_genes = set ( )
# now pick ` ctrl _ size ` genes from every cut
for cut in np . unique ( obs_cut . loc [ gene_list ] ) :
r_genes = np . array ( obs_cut [ obs_cut == cut ] . index )
np . random . shuffle ( r_genes )
control_genes . update ( set ( r_genes [ : ctrl_size ] ) )
# uses full r _ genes if ctrl _ size > len ( r _ genes )
# To index , we need a list - indexing implies an order .
control_genes = list ( control_genes - gene_list )
gene_list = list ( gene_list )
X_list = _adata [ : , gene_list ] . X
if issparse ( X_list ) :
X_list = X_list . toarray ( )
X_control = _adata [ : , control_genes ] . X
if issparse ( X_control ) :
X_control = X_control . toarray ( )
X_control = np . nanmean ( X_control , axis = 1 )
if len ( gene_list ) == 0 : # We shouldn ' t even get here , but just in case
logg . hint ( 'could not add \n' ' \'{}\', score of gene set (adata.obs)' . format ( score_name ) )
return adata if copy else None
elif len ( gene_list ) == 1 :
score = _adata [ : , gene_list ] . X - X_control
else :
score = np . nanmean ( X_list , axis = 1 ) - X_control
adata . obs [ score_name ] = pd . Series ( np . array ( score ) . ravel ( ) , index = adata . obs_names )
logg . info ( ' finished' , time = True , end = ' ' if _settings_verbosity_greater_or_equal_than ( 3 ) else '\n' )
logg . hint ( 'added\n' ' \'{}\', score of gene set (adata.obs)' . format ( score_name ) )
return adata if copy else None |
def rlmb_base_stochastic_discrete_noresize ( ) :
"""Base setting with stochastic discrete model .""" | hparams = rlmb_base ( )
hparams . generative_model = "next_frame_basic_stochastic_discrete"
hparams . generative_model_params = "next_frame_basic_stochastic_discrete"
hparams . resize_height_factor = 1
hparams . resize_width_factor = 1
return hparams |
async def restrictChatMember ( self , chat_id , user_id , until_date = None , can_send_messages = None , can_send_media_messages = None , can_send_other_messages = None , can_add_web_page_previews = None ) :
"""See : https : / / core . telegram . org / bots / api # restrictchatmember""" | p = _strip ( locals ( ) )
return await self . _api_request ( 'restrictChatMember' , _rectify ( p ) ) |
def _log ( self , s ) :
r"""Log a string . It flushes but doesn ' t append \ n , so do that yourself .""" | # TODO ( tewalds ) : Should this be using logging . info instead ? How to see them
# outside of google infrastructure ?
sys . stderr . write ( s )
sys . stderr . flush ( ) |
def parseTopDepth ( self , descendants = ( ) ) :
"""Parse tex for highest tag in hierarchy
> > > TOC . fromLatex ( ' \\ section { Hah } \\ subsection { No } ' ) . parseTopDepth ( )
> > > s = ' \\ subsubsubsection { Yo } \\ subsubsection { Hah } '
> > > TOC . fromLatex ( s ) . parseTopDepth ( )
> > > h = ( ' section ' , ' subsubsection ' , ' subsubsubsection ' )
> > > TOC . fromLatex ( s , hierarchy = h ) . parseTopDepth ( )""" | descendants = list ( descendants ) or list ( getattr ( self . source , 'descendants' , descendants ) )
if not descendants :
return - 1
return min ( TOC . getHeadingLevel ( e , self . hierarchy ) for e in descendants ) |
def compute_acf ( cls , filename , start_index = None , end_index = None , per_walker = False , walkers = None , parameters = None ) :
"""Computes the autocorrleation function of the model params in the
given file .
By default , parameter values are averaged over all walkers at each
iteration . The ACF is then calculated over the averaged chain . An
ACF per - walker will be returned instead if ` ` per _ walker = True ` ` .
Parameters
filename : str
Name of a samples file to compute ACFs for .
start _ index : { None , int }
The start index to compute the acl from . If None , will try to use
the number of burn - in iterations in the file ; otherwise , will start
at the first sample .
end _ index : { None , int }
The end index to compute the acl to . If None , will go to the end
of the current iteration .
per _ walker : optional , bool
Return the ACF for each walker separately . Default is False .
walkers : optional , int or array
Calculate the ACF using only the given walkers . If None ( the
default ) all walkers will be used .
parameters : optional , str or array
Calculate the ACF for only the given parameters . If None ( the
default ) will calculate the ACF for all of the model params .
Returns
dict :
Dictionary of arrays giving the ACFs for each parameter . If
` ` per - walker ` ` is True , the arrays will have shape
` ` nwalkers x niterations ` ` .""" | acfs = { }
with cls . _io ( filename , 'r' ) as fp :
if parameters is None :
parameters = fp . variable_params
if isinstance ( parameters , str ) or isinstance ( parameters , unicode ) :
parameters = [ parameters ]
for param in parameters :
if per_walker : # just call myself with a single walker
if walkers is None :
walkers = numpy . arange ( fp . nwalkers )
arrays = [ cls . compute_acf ( filename , start_index = start_index , end_index = end_index , per_walker = False , walkers = ii , parameters = param ) [ param ] for ii in walkers ]
acfs [ param ] = numpy . vstack ( arrays )
else :
samples = fp . read_raw_samples ( param , thin_start = start_index , thin_interval = 1 , thin_end = end_index , walkers = walkers , flatten = False ) [ param ]
samples = samples . mean ( axis = 0 )
acfs [ param ] = autocorrelation . calculate_acf ( samples ) . numpy ( )
return acfs |
def get_published_topics ( self ) :
"""Get a list of published topics for this instance .
Streams applications publish streams to a a topic that can be subscribed to by other
applications . This allows a microservice approach where publishers
and subscribers are independent of each other .
A published stream has a topic and a schema . It is recommended that a
topic is only associated with a single schema .
Streams may be published and subscribed by applications regardless of the
implementation language . For example a Python application can publish
a stream of JSON tuples that are subscribed to by SPL and Java applications .
Returns :
list ( PublishedTopic ) : List of currently published topics .""" | published_topics = [ ]
# A topic can be published multiple times
# ( typically with the same schema ) but the
# returned list only wants to contain a topic , schema
# pair once . I . e . the list of topics being published is
# being returned , not the list of streams .
seen_topics = { }
for es in self . get_exported_streams ( ) :
pt = es . _as_published_topic ( )
if pt is not None :
if pt . topic in seen_topics :
if pt . schema is None :
continue
if pt . schema in seen_topics [ pt . topic ] :
continue
seen_topics [ pt . topic ] . append ( pt . schema )
else :
seen_topics [ pt . topic ] = [ pt . schema ]
published_topics . append ( pt )
return published_topics |
def render_summary ( self , include_title = True , request = None ) :
"""Render the traceback for the interactive console .""" | title = ''
frames = [ ]
classes = [ 'traceback' ]
if not self . frames :
classes . append ( 'noframe-traceback' )
if include_title :
if self . is_syntax_error :
title = text_ ( 'Syntax Error' )
else :
title = text_ ( 'Traceback <small>(most recent call last)' '</small>' )
for frame in self . frames :
frames . append ( text_ ( '<li%s>%s' ) % ( frame . info and text_ ( ' title="%s"' % escape ( frame . info ) ) or text_ ( '' ) , frame . render ( ) ) )
if self . is_syntax_error :
description_wrapper = text_ ( '<pre class=syntaxerror>%s</pre>' )
else :
description_wrapper = text_ ( '<blockquote>%s</blockquote>' )
vars = { 'classes' : text_ ( ' ' . join ( classes ) ) , 'title' : title and text_ ( '<h3 class="traceback">%s</h3>' % title ) or text_ ( '' ) , 'frames' : text_ ( '\n' . join ( frames ) ) , 'description' : description_wrapper % escape ( self . exception ) , }
app = request . app
template = app . ps . jinja2 . env . get_template ( 'debugtoolbar/exception_summary.html' )
return template . render ( app = app , request = request , ** vars ) |
def project_geometry ( geometry , crs , to_latlong = False ) :
"""Project a shapely Polygon or MultiPolygon from WGS84 to UTM , or vice - versa
Parameters
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed - in geometry
to _ latlong : bool
if True , project from crs to WGS84 , if False , project
from crs to local UTM zone
Returns
geometry _ proj , crs : tuple ( projected shapely geometry , crs of the
projected geometry )""" | gdf = gpd . GeoDataFrame ( )
gdf . crs = crs
gdf . name = 'geometry to project'
gdf [ 'geometry' ] = None
gdf . loc [ 0 , 'geometry' ] = geometry
gdf_proj = project_gdf ( gdf , to_latlong = to_latlong )
geometry_proj = gdf_proj [ 'geometry' ] . iloc [ 0 ]
return geometry_proj , gdf_proj . crs |
def sorted_for_ner ( crf_classes ) :
"""Return labels sorted in a default order suitable for NER tasks :
> > > sorted _ for _ ner ( [ ' B - ORG ' , ' B - PER ' , ' O ' , ' I - PER ' ] )
[ ' O ' , ' B - ORG ' , ' B - PER ' , ' I - PER ' ]""" | def key ( cls ) :
if len ( cls ) > 2 and cls [ 1 ] == '-' : # group names like B - ORG and I - ORG together
return cls . split ( '-' , 1 ) [ 1 ] , cls
return '' , cls
return sorted ( crf_classes , key = key ) |
def swf2png ( swf_path , png_path , swfrender_path = "swfrender" ) :
"""Convert SWF slides into a PNG image
Raises :
OSError is raised if swfrender is not available .
ConversionError is raised if image cannot be created .""" | # Currently rely on swftools
# Would be great to have a native python dependency to convert swf into png or jpg .
# However it seems that pyswf isn ' t flawless . Some graphical elements ( like the text ! ) are lost during
# the export .
try :
cmd = [ swfrender_path , swf_path , '-o' , png_path ]
subprocess . check_output ( cmd , stderr = subprocess . STDOUT )
except subprocess . CalledProcessError as e :
raise ConversionError ( "Failed to convert SWF file %s.\n" "\tCommand: %s\n" "\tExit status: %s.\n" "\tOutput:\n%s" % ( swf_path , " " . join ( cmd ) , e . returncode , e . output ) ) |
def _lowest_rowids ( self , table , limit ) :
"""Gets the lowest available row ids for table insertion . Keeps things tidy !
Parameters
table : str
The name of the table being modified
limit : int
The number of row ids needed
Returns
available : sequence
An array of all available row ids""" | try :
t = self . query ( "SELECT id FROM {}" . format ( table ) , unpack = True , fmt = 'table' )
ids = t [ 'id' ]
all_ids = np . array ( range ( 1 , max ( ids ) ) )
except TypeError :
ids = None
all_ids = np . array ( range ( 1 , limit + 1 ) )
available = all_ids [ np . in1d ( all_ids , ids , assume_unique = True , invert = True ) ] [ : limit ]
# If there aren ' t enough empty row ids , start using the new ones
if len ( available ) < limit :
diff = limit - len ( available )
available = np . concatenate ( ( available , np . array ( range ( max ( ids ) + 1 , max ( ids ) + 1 + diff ) ) ) )
return available |
def inferred_var_seqs_plus_flanks ( self , ref_seq , flank_length ) :
'''Returns start position of first flank sequence , plus a list of sequences -
the REF , plus one for each ALT . sequence . Order same as in ALT column''' | flank_start = max ( 0 , self . POS - flank_length )
flank_end = min ( len ( ref_seq ) - 1 , self . ref_end_pos ( ) + flank_length )
seqs = [ ref_seq [ flank_start : self . POS ] + self . REF + ref_seq [ self . ref_end_pos ( ) + 1 : flank_end + 1 ] ]
for alt in self . ALT :
seqs . append ( ref_seq [ flank_start : self . POS ] + alt + ref_seq [ self . ref_end_pos ( ) + 1 : flank_end + 1 ] )
return flank_start , seqs |
def save_files ( self , nodes ) :
"""Saves user defined files using give nodes .
: param nodes : Nodes .
: type nodes : list
: return : Method success .
: rtype : bool""" | metrics = { "Opened" : 0 , "Cached" : 0 }
for node in nodes :
file = node . file
if self . __container . get_editor ( file ) :
if self . __container . save_file ( file ) :
metrics [ "Opened" ] += 1
self . __uncache ( file )
else :
cache_data = self . __files_cache . get_content ( file )
if cache_data is None :
LOGGER . warning ( "!> {0} | '{1}' file doesn't exists in files cache!" . format ( self . __class__ . __name__ , file ) )
continue
if cache_data . document :
file_handle = File ( file )
file_handle . content = [ cache_data . document . toPlainText ( ) . toUtf8 ( ) ]
if file_handle . write ( ) :
metrics [ "Cached" ] += 1
self . __uncache ( file )
else :
LOGGER . warning ( "!> {0} | '{1}' file document doesn't exists in files cache!" . format ( self . __class__ . __name__ , file ) )
self . __container . engine . notifications_manager . notify ( "{0} | '{1}' opened file(s) and '{2}' cached file(s) saved!" . format ( self . __class__ . __name__ , metrics [ "Opened" ] , metrics [ "Cached" ] ) ) |
def __cutRaw ( self , oiraw , maxLength ) :
'''现将句子按句子完结符号切分 , 如果切分完后一个句子长度超过限定值
, 再对该句子进行切分''' | vec = [ ]
m = re . findall ( u".*?[。?!;;!?]" , oiraw )
num , l , last = 0 , 0 , 0
for i in range ( len ( m ) ) :
if ( num + len ( m [ i ] ) >= maxLength ) :
vec . append ( "" . join ( m [ last : i ] ) )
last = i
num = len ( m [ i ] )
else :
num += len ( m [ i ] )
l += len ( m [ i ] )
if ( len ( oiraw ) - l + num >= maxLength ) :
vec . append ( "" . join ( m [ last : len ( m ) ] ) )
vec . append ( oiraw [ l : ] )
else :
vec . append ( oiraw [ l - num : ] )
return vec |
def image ( self , img ) :
"""Set buffer to value of Python Imaging Library image . The image should
be in 1 bit mode and a size equal to the display size .""" | if img . mode != '1' :
raise ValueError ( 'Image must be in mode 1.' )
imwidth , imheight = img . size
if imwidth != self . width or imheight != self . height :
raise ValueError ( 'Image must be same dimensions as display ({0}x{1}).' . format ( self . width , self . height ) )
# Grab all the pixels from the image , faster than getpixel .
pixels = img . load ( )
# Clear buffer
for i in range ( len ( self . buf ) ) :
self . buf [ i ] = 0
# Iterate through the pixels
for x in range ( self . width ) : # yes this double loop is slow ,
for y in range ( self . height ) : # but these displays are small !
if pixels [ ( x , y ) ] :
self . pixel ( x , y , 1 ) |
def _get_slack_array_construct ( self ) :
"""Returns a construct for an array of slack bus data .""" | bus_no = integer . setResultsName ( "bus_no" )
s_rating = real . setResultsName ( "s_rating" )
# MVA
v_rating = real . setResultsName ( "v_rating" )
# kV
v_magnitude = real . setResultsName ( "v_magnitude" )
# p . u .
ref_angle = real . setResultsName ( "ref_angle" )
# p . u .
q_max = Optional ( real ) . setResultsName ( "q_max" )
# p . u .
q_min = Optional ( real ) . setResultsName ( "q_min" )
# p . u .
v_max = Optional ( real ) . setResultsName ( "v_max" )
# p . u .
v_min = Optional ( real ) . setResultsName ( "v_min" )
# p . u .
p_guess = Optional ( real ) . setResultsName ( "p_guess" )
# p . u .
# Loss participation coefficient
lp_coeff = Optional ( real ) . setResultsName ( "lp_coeff" )
ref_bus = Optional ( boolean ) . setResultsName ( "ref_bus" )
status = Optional ( boolean ) . setResultsName ( "status" )
slack_data = bus_no + s_rating + v_rating + v_magnitude + ref_angle + q_max + q_min + v_max + v_min + p_guess + lp_coeff + ref_bus + status + scolon
slack_data . setParseAction ( self . push_slack )
slack_array = Literal ( "SW.con" ) + "=" + "[" + "..." + ZeroOrMore ( slack_data + Optional ( "]" + scolon ) )
return slack_array |
def send_cmd ( self , command , connId = 'default' ) :
"""Sends any command to FTP server . Returns server output .
Parameters :
- command - any valid command to be sent ( invalid will result in exception ) .
- connId ( optional ) - connection identifier . By default equals ' default '
Example :
| send cmd | HELP |""" | thisConn = self . __getConnection ( connId )
outputMsg = ""
try :
outputMsg += str ( thisConn . sendcmd ( command ) )
except ftplib . all_errors as e :
raise FtpLibraryError ( str ( e ) )
if self . printOutput :
logger . info ( outputMsg )
return outputMsg |
def _kvmatrix2d ( km , vm ) :
'''km = [ [ [ 1 ] , [ 3 ] ] , [ [ 1 , 2 ] , [ 3 , ' a ' ] ] , [ [ 1 , 2 , 22 ] ] ]
show _ kmatrix ( km )
vm = [ [ [ 222 ] ] , [ ' b ' ] ]
show _ vmatrix ( vm )
d = _ kvmatrix2d ( km , vm )''' | d = { }
kmwfs = get_kmwfs ( km )
vmwfs = elel . get_wfs ( vm )
lngth = vmwfs . __len__ ( )
for i in range ( 0 , lngth ) :
value = elel . getitem_via_pathlist ( vm , vmwfs [ i ] )
cond = elel . is_leaf ( value )
if ( cond ) :
_setitem_via_pathlist ( d , kmwfs [ i ] , value )
else :
_setdefault_via_pathlist ( d , kmwfs [ i ] )
return ( d ) |
def _rlmb_tiny_overrides ( ) :
"""Parameters to override for tiny setting excluding agent - related hparams .""" | return dict ( epochs = 1 , num_real_env_frames = 128 , model_train_steps = 2 , max_num_noops = 1 , eval_max_num_noops = 1 , generative_model_params = "next_frame_tiny" , stop_loop_early = True , resize_height_factor = 2 , resize_width_factor = 2 , wm_eval_rollout_ratios = [ 1 ] , rl_env_max_episode_steps = 7 , eval_rl_env_max_episode_steps = 7 , simulated_rollout_length = 2 , eval_sampling_temps = [ 0.0 , 1.0 ] , ) |
def from_struct ( klass , struct , timezone = pytz . UTC ) :
"""Returns MayaDT instance from a 9 - tuple struct
It ' s assumed to be from gmtime ( ) .""" | struct_time = time . mktime ( struct ) - utc_offset ( struct )
dt = Datetime . fromtimestamp ( struct_time , timezone )
return klass ( klass . __dt_to_epoch ( dt ) ) |
def search_maintenance_window_entities ( self , ** kwargs ) : # noqa : E501
"""Search over a customer ' s maintenance windows # noqa : E501
# noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . search _ maintenance _ window _ entities ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param SortableSearchRequest body :
: return : ResponseContainerPagedMaintenanceWindow
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . search_maintenance_window_entities_with_http_info ( ** kwargs )
# noqa : E501
else :
( data ) = self . search_maintenance_window_entities_with_http_info ( ** kwargs )
# noqa : E501
return data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.