signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_associated_profile_names ( profile_path , result_role , org_vm , server , include_classnames = False ) :
"""Get the Associated profiles and return the string names ( org : name : version )
for each profile as a list .""" | insts = get_associated_profiles ( profile_path , result_role , server )
names = [ ]
for inst in insts :
if include_classnames :
names . append ( "(%s)%s" % ( inst . classname , profile_name ( org_vm , inst ) ) )
else :
names . append ( profile_name ( org_vm , inst ) )
return names |
def get_template_sources ( self , template_name , template_dirs = None ) :
"""Returns the absolute paths to " template _ name " in the specified app .
If the name does not contain an app name ( no colon ) , an empty list
is returned .
The parent FilesystemLoader . load _ template _ source ( ) will take care
of the actual loading for us .""" | if ':' not in template_name :
return [ ]
app_name , template_name = template_name . split ( ":" , 1 )
template_dir = get_app_template_dir ( app_name )
if template_dir :
try :
from django . template import Origin
origin = Origin ( name = join ( template_dir , template_name ) , template_name = template_name , loader = self , )
except ( ImportError , TypeError ) :
origin = join ( template_dir , template_name )
return [ origin ]
return [ ] |
def transaction ( self , request ) :
"""Ideally at this method , you will check the
caller reference against a user id or uniquely
identifiable attribute ( if you are already not
using it as the caller reference ) and the type
of transaction ( either pay , reserve etc ) . For
the sake of the example , we assume all the users
get charged $ 100""" | request_url = request . build_absolute_uri ( )
parsed_url = urlparse . urlparse ( request_url )
query = parsed_url . query
dd = dict ( map ( lambda x : x . split ( "=" ) , query . split ( "&" ) ) )
resp = self . purchase ( 100 , dd )
return HttpResponseRedirect ( "%s?status=%s" % ( reverse ( "app_offsite_amazon_fps" ) , resp [ "status" ] ) ) |
def pts_on_bezier_curve ( P = [ ( 0.0 , 0.0 ) ] , n_seg = 0 ) :
'''Return list N + 1 points representing N line segments on bezier curve
defined by control points P .''' | assert isinstance ( P , list )
assert len ( P ) > 0
for p in P :
assert isinstance ( p , tuple )
for i in p :
assert len ( p ) > 1
assert isinstance ( i , float )
assert isinstance ( n_seg , int )
assert n_seg >= 0
return [ pt_on_bezier_curve ( P , float ( i ) / n_seg ) for i in range ( n_seg ) ] + [ P [ - 1 ] ] |
def tempoAdjust2 ( self , tempoFactor ) :
"""Adjust tempo by aggregating active basal cell votes for pre vs . post
: param tempoFactor : scaling signal to MC clock from last sequence item
: return : adjusted scaling signal""" | late_votes = ( len ( self . adtm . getNextBasalPredictedCells ( ) ) - len ( self . apicalIntersect ) ) * - 1
early_votes = len ( self . apicalIntersect )
votes = late_votes + early_votes
print ( 'vote tally' , votes )
if votes > 0 :
tempoFactor = tempoFactor * 0.5
print 'speed up'
elif votes < 0 :
tempoFactor = tempoFactor * 2
print 'slow down'
elif votes == 0 :
print 'pick randomly'
if random . random ( ) > 0.5 :
tempoFactor = tempoFactor * 0.5
print 'random pick: speed up'
else :
tempoFactor = tempoFactor * 2
print 'random pick: slow down'
return tempoFactor |
def get_raw ( self ) :
""": rtype : bytearray""" | buff = bytearray ( )
buff += writesleb128 ( self . size )
for i in self . handlers :
buff += i . get_raw ( )
if self . size <= 0 :
buff += writeuleb128 ( self . catch_all_addr )
return buff |
def saturation_equivalent_potential_temperature ( pressure , temperature ) :
r"""Calculate saturation equivalent potential temperature .
This calculation must be given an air parcel ' s pressure and temperature .
The implementation uses the formula outlined in [ Bolton1980 ] _ for the
equivalent potential temperature , and assumes a saturated process .
First , because we assume a saturated process , the temperature at the LCL is
equivalent to the current temperature . Therefore the following equation
. . math : : T _ { L } = \ frac { 1 } { \ frac { 1 } { T _ { D } - 56 } + \ frac { ln ( T _ { K } / T _ { D } ) } { 800 } } + 56
reduces to
. . math : : T _ { L } = T _ { K }
Then the potential temperature at the temperature / LCL is calculated :
. . math : : \ theta _ { DL } = T _ { K } \ left ( \ frac { 1000 } { p - e } \ right ) ^ k
\ left ( \ frac { T _ { K } } { T _ { L } } \ right ) ^ { . 28r }
However , because
. . math : : T _ { L } = T _ { K }
it follows that
. . math : : \ theta _ { DL } = T _ { K } \ left ( \ frac { 1000 } { p - e } \ right ) ^ k
Both of these are used to calculate the final equivalent potential temperature :
. . math : : \ theta _ { E } = \ theta _ { DL } \ exp \ left [ \ left ( \ frac { 3036 . } { T _ { K } }
-1.78 \ right ) * r ( 1 + . 448r ) \ right ]
Parameters
pressure : ` pint . Quantity `
Total atmospheric pressure
temperature : ` pint . Quantity `
Temperature of parcel
Returns
` pint . Quantity `
The saturation equivalent potential temperature of the parcel
Notes
[ Bolton1980 ] _ formula for Theta - e is used ( for saturated case ) , since according to
[ DaviesJones2009 ] _ it is the most accurate non - iterative formulation
available .""" | t = temperature . to ( 'kelvin' ) . magnitude
p = pressure . to ( 'hPa' ) . magnitude
e = saturation_vapor_pressure ( temperature ) . to ( 'hPa' ) . magnitude
r = saturation_mixing_ratio ( pressure , temperature ) . magnitude
th_l = t * ( 1000 / ( p - e ) ) ** mpconsts . kappa
th_es = th_l * np . exp ( ( 3036. / t - 1.78 ) * r * ( 1 + 0.448 * r ) )
return th_es * units . kelvin |
def recognized_release ( self ) :
"""Check if this Release value is something we can parse .
: rtype : bool""" | _ , _ , rest = self . get_release_parts ( )
# If " rest " is not a well - known value here , then this package is
# using a Release value pattern we cannot recognize .
if rest == '' or re . match ( r'%{\??dist}' , rest ) :
return True
return False |
def load ( self ) :
"""Loads a user ' s inventory
Queries the user ' s inventory , parses each item , and adds
each item to the inventory . Note this class should not be
used directly , but rather usr . inventory should be used to
access a user ' s inventory .
Parameters
usr ( User ) - The user to load the inventory for
Raises
invalidUser
parseException""" | self . items = { }
pg = self . usr . getPage ( "http://www.neopets.com/objects.phtml?type=inventory" )
# Indicates an empty inventory
if "You aren't carrying anything" in pg . content :
return
try :
for row in pg . find_all ( "td" , "contentModuleContent" ) [ 1 ] . table . find_all ( "tr" ) :
for item in row . find_all ( "td" ) :
name = item . text
# Some item names contain extra information encapsulated in paranthesis
if "(" in name :
name = name . split ( "(" ) [ 0 ]
tmpItem = Item ( name )
tmpItem . id = item . a [ 'onclick' ] . split ( "(" ) [ 1 ] . replace ( ");" , "" )
tmpItem . img = item . img [ 'src' ]
tmpItem . desc = item . img [ 'alt' ]
tmpItem . usr = self . usr
self . items [ name ] = tmpItem
except Exception :
logging . getLogger ( "neolib.inventory" ) . exception ( "Unable to parse user inventory." , { 'pg' : pg } )
raise parseException |
def copy ( self , other ) :
"""Copy metadata from another : py : class : ` Metadata ` object .
Returns the : py : class : ` Metadata ` object , allowing convenient
code like this : :
md = Metadata ( ) . copy ( other _ md )
: param Metadata other : The metadata to copy .
: rtype : : py : class : ` Metadata `""" | # copy from other to self
self . data . update ( other . data )
if other . comment is not None :
self . comment = other . comment
return self |
def get_reversed_statuses ( context ) :
"""Return a mapping of exit codes to status strings .
Args :
context ( scriptworker . context . Context ) : the scriptworker context
Returns :
dict : the mapping of exit codes to status strings .""" | _rev = { v : k for k , v in STATUSES . items ( ) }
_rev . update ( dict ( context . config [ 'reversed_statuses' ] ) )
return _rev |
def _incoming ( self , packet ) :
"""Callback for data received from the copter .""" | if len ( packet . data ) < 1 :
logger . warning ( 'Localization packet received with incorrect' + 'length (length is {})' . format ( len ( packet . data ) ) )
return
pk_type = struct . unpack ( '<B' , packet . data [ : 1 ] ) [ 0 ]
data = packet . data [ 1 : ]
# Decoding the known packet types
# TODO : more generic decoding scheme ?
decoded_data = None
if pk_type == self . RANGE_STREAM_REPORT :
if len ( data ) % 5 != 0 :
logger . error ( 'Wrong range stream report data lenght' )
return
decoded_data = { }
raw_data = data
for i in range ( int ( len ( data ) / 5 ) ) :
anchor_id , distance = struct . unpack ( '<Bf' , raw_data [ : 5 ] )
decoded_data [ anchor_id ] = distance
raw_data = raw_data [ 5 : ]
pk = LocalizationPacket ( pk_type , data , decoded_data )
self . receivedLocationPacket . call ( pk ) |
def Description ( self ) :
"""Returns searchable data as Description""" | descr = " " . join ( ( self . getId ( ) , self . aq_parent . Title ( ) ) )
return safe_unicode ( descr ) . encode ( 'utf-8' ) |
def firstAnnot ( self ) :
"""Points to first annotation on page""" | CheckParent ( self )
val = _fitz . Page_firstAnnot ( self )
if val :
val . thisown = True
val . parent = weakref . proxy ( self )
# owning page object
self . _annot_refs [ id ( val ) ] = val
return val |
def resample ( old_wavelengths , new_wavelengths ) :
"""Resample a spectrum to a new wavelengths map while conserving total flux .
: param old _ wavelengths :
The original wavelengths array .
: type old _ wavelengths :
: class : ` numpy . array `
: param new _ wavelengths :
The new wavelengths array to resample onto .
: type new _ wavelengths :
: class : ` numpy . array `""" | data = [ ]
old_px_indices = [ ]
new_px_indices = [ ]
for i , new_wl_i in enumerate ( new_wavelengths ) : # These indices should span just over the new wavelength pixel .
indices = np . unique ( np . clip ( old_wavelengths . searchsorted ( new_wavelengths [ i : i + 2 ] , side = "left" ) + [ - 1 , + 1 ] , 0 , old_wavelengths . size - 1 ) )
N = np . ptp ( indices )
if N == 0 : # ' Fake ' pixel .
data . append ( np . nan )
new_px_indices . append ( i )
old_px_indices . extend ( indices )
continue
# Sanity checks .
assert ( old_wavelengths [ indices [ 0 ] ] <= new_wl_i or indices [ 0 ] == 0 )
assert ( new_wl_i <= old_wavelengths [ indices [ 1 ] ] or indices [ 1 ] == old_wavelengths . size - 1 )
fractions = np . ones ( N )
# Edges are handled as fractions between rebinned pixels .
_ = np . clip ( i + 1 , 0 , new_wavelengths . size - 1 )
lhs = old_wavelengths [ indices [ 0 ] : indices [ 0 ] + 2 ]
rhs = old_wavelengths [ indices [ - 1 ] - 1 : indices [ - 1 ] + 1 ]
fractions [ 0 ] = ( lhs [ 1 ] - new_wavelengths [ i ] ) / np . ptp ( lhs )
fractions [ - 1 ] = ( new_wavelengths [ _ ] - rhs [ 0 ] ) / np . ptp ( rhs )
# Being binned to a single pixel . Prevent overflow from fringe cases .
fractions = np . clip ( fractions , 0 , 1 )
fractions /= fractions . sum ( )
data . extend ( fractions )
new_px_indices . extend ( [ i ] * N )
# Mark the new pixel indices affected .
old_px_indices . extend ( np . arange ( * indices ) )
# And the old pixel indices .
return sparse . csc_matrix ( ( data , ( old_px_indices , new_px_indices ) ) , shape = ( old_wavelengths . size , new_wavelengths . size ) ) |
def update_ptr_record ( self , device , record , domain_name , data = None , ttl = None , comment = None ) :
"""Updates a PTR record with the supplied values .""" | device_type = self . _resolve_device_type ( device )
href , svc_name = self . _get_ptr_details ( device , device_type )
try :
rec_id = record . id
except AttributeError :
rec_id = record
rec = { "name" : domain_name , "id" : rec_id , "type" : "PTR" , "data" : data , }
if ttl is not None : # Minimum TTL is 300 seconds
rec [ "ttl" ] = max ( 300 , ttl )
if comment is not None : # Maximum comment length is 160 chars
rec [ "comment" ] = comment [ : 160 ]
body = { "recordsList" : { "records" : [ rec ] } , "link" : { "content" : "" , "href" : href , "rel" : svc_name , } }
uri = "/rdns"
try :
resp , resp_body = self . _async_call ( uri , body = body , method = "PUT" , has_response = False , error_class = exc . PTRRecordUpdateFailed )
except exc . EndpointNotFound as e :
raise exc . InvalidPTRRecord ( "The record domain/IP address " "information is not valid for this device." )
return resp_body . get ( "status" ) == "COMPLETED" |
def setArg ( self , namespace , key , value ) :
"""Set a single argument in this namespace""" | assert key is not None
assert value is not None
namespace = self . _fixNS ( namespace )
# try to ensure that internally it ' s consistent , at least : str - > str
if isinstance ( value , bytes ) :
value = str ( value , encoding = "utf-8" )
self . args [ ( namespace , key ) ] = value
if not ( namespace is BARE_NS ) :
self . namespaces . add ( namespace ) |
def get_crystal_system ( self ) :
"""Get the crystal system for the structure , e . g . , ( triclinic ,
orthorhombic , cubic , etc . ) .
Returns :
( str ) : Crystal system for structure or None if system cannot be detected .""" | n = self . _space_group_data [ "number" ]
f = lambda i , j : i <= n <= j
cs = { "triclinic" : ( 1 , 2 ) , "monoclinic" : ( 3 , 15 ) , "orthorhombic" : ( 16 , 74 ) , "tetragonal" : ( 75 , 142 ) , "trigonal" : ( 143 , 167 ) , "hexagonal" : ( 168 , 194 ) , "cubic" : ( 195 , 230 ) }
crystal_sytem = None
for k , v in cs . items ( ) :
if f ( * v ) :
crystal_sytem = k
break
return crystal_sytem |
def solve ( self ) :
"""Start ( or re - start ) optimisation . This method implements the
framework for the alternation between ` X ` and ` D ` updates in a
dictionary learning algorithm .
If option ` ` Verbose ` ` is ` ` True ` ` , the progress of the
optimisation is displayed at every iteration . At termination
of this method , attribute : attr : ` itstat ` is a list of tuples
representing statistics of each iteration .
Attribute : attr : ` timer ` is an instance of : class : ` . util . Timer `
that provides the following labelled timers :
` ` init ` ` : Time taken for object initialisation by
: meth : ` _ _ init _ _ `
` ` solve ` ` : Total time taken by call ( s ) to : meth : ` solve `
` ` solve _ wo _ func ` ` : Total time taken by call ( s ) to
: meth : ` solve ` , excluding time taken to compute functional
value and related iteration statistics""" | # Construct tuple of status display column titles and set status
# display strings
hdrtxt = [ 'Itn' , 'Fnc' , 'DFid' , u ( 'Regℓ1' ) ]
hdrstr , fmtstr , nsep = common . solve_status_str ( hdrtxt , fwdth0 = type ( self ) . fwiter , fprec = type ( self ) . fpothr )
# Print header and separator strings
if self . opt [ 'Verbose' ] :
if self . opt [ 'StatusHeader' ] :
print ( hdrstr )
print ( "-" * nsep )
# Reset timer
self . timer . start ( [ 'solve' , 'solve_wo_eval' ] )
# Create process pool
if self . nproc > 0 :
self . pool = mp . Pool ( processes = self . nproc )
for self . j in range ( self . j , self . j + self . opt [ 'MaxMainIter' ] ) : # Perform a set of update steps
self . step ( )
# Evaluate functional
self . timer . stop ( 'solve_wo_eval' )
fnev = self . evaluate ( )
self . timer . start ( 'solve_wo_eval' )
# Record iteration stats
tk = self . timer . elapsed ( 'solve' )
itst = self . IterationStats ( * ( ( self . j , ) + fnev + ( tk , ) ) )
self . itstat . append ( itst )
# Display iteration stats if Verbose option enabled
if self . opt [ 'Verbose' ] :
print ( fmtstr % itst [ : - 1 ] )
# Call callback function if defined
if self . opt [ 'Callback' ] is not None :
if self . opt [ 'Callback' ] ( self ) :
break
# Clean up process pool
if self . nproc > 0 :
self . pool . close ( )
self . pool . join ( )
# Increment iteration count
self . j += 1
# Record solve time
self . timer . stop ( [ 'solve' , 'solve_wo_eval' ] )
# Print final separator string if Verbose option enabled
if self . opt [ 'Verbose' ] and self . opt [ 'StatusHeader' ] :
print ( "-" * nsep )
# Return final dictionary
return self . getdict ( ) |
def stopThread ( self ) :
"""Stops spawned NSThread .""" | if self . _thread is not None :
self . performSelector_onThread_withObject_waitUntilDone_ ( 'stopPowerNotificationsThread' , self . _thread , None , objc . YES )
self . _thread = None |
def __getFormat ( self , format ) :
"""Defaults to JSON [ ps : ' RDF ' is the native rdflib representation ]""" | if format == "XML" :
self . sparql . setReturnFormat ( XML )
self . format = "XML"
elif format == "RDF" :
self . sparql . setReturnFormat ( RDF )
self . format = "RDF"
else :
self . sparql . setReturnFormat ( JSON )
self . format = "JSON" |
def init ( ) :
"""Execute init tasks for all components ( virtualenv , pip ) .""" | print ( yellow ( "# Setting up development environment...\n" , True ) )
virtualenv . init ( )
virtualenv . update ( )
print ( green ( "\n# DONE." , True ) )
print ( green ( "Type " ) + green ( "activate" , True ) + green ( " to enable your dev virtual environment." ) ) |
def maybe_sendraw ( self , host_port : Tuple [ int , int ] , messagedata : bytes ) :
"""Send message to recipient if the transport is running .""" | # Don ' t sleep if timeout is zero , otherwise a context - switch is done
# and the message is delayed , increasing its latency
sleep_timeout = self . throttle_policy . consume ( 1 )
if sleep_timeout :
gevent . sleep ( sleep_timeout )
# Check the udp socket is still available before trying to send the
# message . There must be * no context - switches after this test * .
if hasattr ( self . server , 'socket' ) :
self . server . sendto ( messagedata , host_port , ) |
def run_outdated ( cls , options ) :
"""Print outdated user packages .""" | latest_versions = sorted ( cls . find_packages_latest_versions ( cls . options ) , key = lambda p : p [ 0 ] . project_name . lower ( ) )
for dist , latest_version , typ in latest_versions :
if latest_version > dist . parsed_version :
if options . all :
pass
elif options . pinned :
if cls . can_be_updated ( dist , latest_version ) :
continue
elif not options . pinned :
if not cls . can_be_updated ( dist , latest_version ) :
continue
elif options . update :
print ( dist . project_name if options . brief else 'Updating %s to Latest: %s [%s]' % ( cls . output_package ( dist ) , latest_version , typ ) )
main ( [ 'install' , '--upgrade' ] + ( [ '--user' ] if ENABLE_USER_SITE else [ ] ) + [ dist . key ] )
continue
print ( dist . project_name if options . brief else '%s - Latest: %s [%s]' % ( cls . output_package ( dist ) , latest_version , typ ) ) |
def disable_node ( self , service_name , node_name ) :
"""Disables a given node name for the given service name via the
" disable server " HAProxy command .""" | logger . info ( "Disabling server %s/%s" , service_name , node_name )
return self . send_command ( "disable server %s/%s" % ( service_name , node_name ) ) |
def parse_transform ( transform_str ) :
"""Converts a valid SVG transformation string into a 3x3 matrix .
If the string is empty or null , this returns a 3x3 identity matrix""" | if not transform_str :
return np . identity ( 3 )
elif not isinstance ( transform_str , str ) :
raise TypeError ( 'Must provide a string to parse' )
total_transform = np . identity ( 3 )
transform_substrs = transform_str . split ( ')' ) [ : - 1 ]
# Skip the last element , because it should be empty
for substr in transform_substrs :
total_transform = total_transform . dot ( _parse_transform_substr ( substr ) )
return total_transform |
def frontend_routing ( self , context ) :
"""Returns the targeted frontend and original state
: type context : satosa . context . Context
: rtype satosa . frontends . base . FrontendModule
: param context : The response context
: return : frontend""" | target_frontend = context . state [ STATE_KEY ]
satosa_logging ( logger , logging . DEBUG , "Routing to frontend: %s " % target_frontend , context . state )
context . target_frontend = target_frontend
frontend = self . frontends [ context . target_frontend ] [ "instance" ]
return frontend |
def item_links_addition ( self , data ) :
"""Add the links for each community .""" | links_item_factory = self . context . get ( 'links_item_factory' , default_links_item_factory )
data [ 'links' ] = links_item_factory ( data )
return data |
def tenant_quota_usages ( request , tenant_id = None , targets = None ) :
"""Get our quotas and construct our usage object .
: param tenant _ id : Target tenant ID . If no tenant _ id is provided ,
a the request . user . project _ id is assumed to be used .
: param targets : A tuple of quota names to be retrieved .
If unspecified , all quota and usage information is retrieved .""" | if not tenant_id :
tenant_id = request . user . project_id
disabled_quotas = get_disabled_quotas ( request , targets )
usages = QuotaUsage ( )
futurist_utils . call_functions_parallel ( ( _get_tenant_compute_usages , [ request , usages , disabled_quotas , tenant_id ] ) , ( _get_tenant_network_usages , [ request , usages , disabled_quotas , tenant_id ] ) , ( _get_tenant_volume_usages , [ request , usages , disabled_quotas , tenant_id ] ) )
return usages |
def load_descendant_articles_for_section ( context , section , featured_in_homepage = None , featured_in_section = None , featured_in_latest = None , count = 5 ) :
"""Returns all descendant articles ( filtered using the parameters )
If the ` locale _ code ` in the context is not the main language , it will
return the translations of the live articles .""" | request = context . get ( 'request' )
locale = context . get ( 'locale_code' )
page = section . get_main_language_page ( )
settings = SiteSettings . for_site ( request . site ) if request else None
qs = ArticlePage . objects . descendant_of ( page ) . filter ( language__is_main_language = True )
article_ordering = settings and settings . article_ordering_within_section
cms_ordering = article_ordering and settings . article_ordering_within_section != ArticleOrderingChoices . CMS_DEFAULT_SORTING
if article_ordering and cms_ordering :
order_by = ArticleOrderingChoices . get ( settings . article_ordering_within_section ) . name . lower ( )
order_by = order_by if order_by . find ( '_desc' ) == - 1 else '-{}' . format ( order_by . replace ( '_desc' , '' ) )
qs = qs . order_by ( order_by )
if featured_in_homepage is not None :
qs = qs . filter ( featured_in_homepage = featured_in_homepage ) . order_by ( '-featured_in_homepage_start_date' )
if featured_in_latest is not None :
qs = qs . filter ( featured_in_latest = featured_in_latest )
if featured_in_section is not None :
qs = qs . filter ( featured_in_section = featured_in_section ) . order_by ( '-featured_in_section_start_date' )
if not locale :
return qs . live ( ) [ : count ]
return get_pages ( context , qs , locale ) [ : count ] |
def insertBefore ( self , child , beforeChild ) :
'''insertBefore - Inserts a child before # beforeChild
@ param child < AdvancedTag / str > - Child block to insert
@ param beforeChild < AdvancedTag / str > - Child block to insert before . if None , will be appended
@ return - The added child . Note , if it is a text block ( str ) , the return isl NOT be linked by reference .
@ raises ValueError - If # beforeChild is defined and is not a child of this node''' | # When the second arg is null / None , the node is appended . The argument is required per JS API , but null is acceptable . .
if beforeChild is None :
return self . appendBlock ( child )
# If # child is an AdvancedTag , we need to add it to both blocks and children .
isChildTag = isTagNode ( child )
myBlocks = self . blocks
myChildren = self . children
# Find the index # beforeChild falls under current element
try :
blocksIdx = myBlocks . index ( beforeChild )
if isChildTag :
childrenIdx = myChildren . index ( beforeChild )
except ValueError : # # beforeChild is not a child of this element . Raise error .
raise ValueError ( 'Provided "beforeChild" is not a child of element, cannot insert.' )
# Add to blocks in the right spot
self . blocks = myBlocks [ : blocksIdx ] + [ child ] + myBlocks [ blocksIdx : ]
# Add to child in the right spot
if isChildTag :
self . children = myChildren [ : childrenIdx ] + [ child ] + myChildren [ childrenIdx : ]
return child |
def _build_src_index ( self ) :
"""Build an indices for fast lookup of a source given its name
or coordinates .""" | self . _srcs = sorted ( self . _srcs , key = lambda t : t [ 'offset' ] )
nsrc = len ( self . _srcs )
radec = np . zeros ( ( 2 , nsrc ) )
for i , src in enumerate ( self . _srcs ) :
radec [ : , i ] = src . radec
self . _src_skydir = SkyCoord ( ra = radec [ 0 ] , dec = radec [ 1 ] , unit = u . deg )
self . _src_radius = self . _src_skydir . separation ( self . skydir ) |
def __execute_sext ( self , instr ) :
"""Execute SEXT instruction .""" | op0_size = instr . operands [ 0 ] . size
op2_size = instr . operands [ 2 ] . size
op0_val = self . read_operand ( instr . operands [ 0 ] )
op0_msb = extract_sign_bit ( op0_val , op0_size )
op2_mask = ( 2 ** op2_size - 1 ) & ~ ( 2 ** op0_size - 1 ) if op0_msb == 1 else 0x0
op2_val = op0_val | op2_mask
self . write_operand ( instr . operands [ 2 ] , op2_val )
return None |
def _builtin_from_array_list ( required_type , value , list_level ) :
"""Helper method to make : func : ` from _ array _ list ` available to all classes extending this ,
without the need for additional imports .
: param required _ type : Type as what it should be parsed as . Any builtin .
: param value : The result to parse
: param list _ level : " list of " * list _ level
: return :""" | return from_array_list ( required_type , value , list_level , is_builtin = True ) |
def next_chunk ( self ) :
"""Returns the chunk immediately following ( and adjacent to ) this one .""" | raise NotImplementedError ( "%s not implemented for %s" % ( self . next_chunk . __func__ . __name__ , self . __class__ . __name__ ) ) |
def flatten_blocks ( lines , num_indents = - 1 ) :
"""Take a list ( block ) or string ( statement ) and flattens it into a string
with indentation .""" | # The standard indent is four spaces
INDENTATION = " " * 4
if not lines :
return ""
# If this is a string , add the indentation and finish here
if isinstance ( lines , six . string_types ) :
return INDENTATION * num_indents + lines
# If this is not a string , join the lines and recurse
return "\n" . join ( [ flatten_blocks ( line , num_indents + 1 ) for line in lines ] ) |
def MafMotifSelect ( mafblock , pwm , motif = None , threshold = 0 ) :
if motif != None and len ( motif ) != len ( pwm ) :
raise Exception ( "pwm and motif must be the same length" )
# generic alignment
alignlist = [ c . text for c in mafblock . components ]
align = pwmx . Align ( alignlist )
nrows , ncols = align . dims
# chr , chr _ start , chr _ stop = align . headers [ 0]
# required sequence length
minSeqLen = len ( motif )
# record the text sizes from the alignment rows
align_match_lens = [ ]
for start in range ( ncols - minSeqLen ) :
if align . rows [ 0 ] [ start ] == '-' :
continue
subseq = ""
pwm_score_vec = [ ]
motif_score_vec = [ ]
max_cols = 0
for ir in range ( nrows ) :
expanded = align . rows [ ir ] . count ( '-' , start , minSeqLen )
subtext = align . rows [ ir ] [ start : minSeqLen + expanded ]
max_cols = max ( len ( subtext ) , max_cols )
subseq = subtext . replace ( '-' , '' )
revseq = pwmx . reverse_complement ( subseq )
# pwm score
nill , f_score = pwm . score_seq ( subseq ) [ 0 ]
r_score , nill = pwm . score_seq ( revseq ) [ 0 ]
pwm_score_vec . append ( max ( f_score , r_score ) )
# consensus score
if motif is not None :
for_score = int ( pwmx . match_consensus ( subseq , motif ) )
rev_score = int ( pwmx . match_consensus ( revseq , motif ) )
motif_score_vec . append ( max ( for_score , rev_score ) )
# check threshold
try :
assert not isnan ( max ( pwm_score_vec ) )
assert not isnan ( max ( motif_score_vec ) )
except :
print ( pwm_score_vec , motif_score_vec , file = sys . stderr )
print ( len ( subseq ) , len ( pwm ) , file = sys . stderr )
if max ( pwm_score_vec ) < threshold :
continue
if max ( motif_score_vec ) < threshold :
continue
# chop block
col_start = start
col_end = max_cols + 1
motifmaf = mafblock . slice ( col_start , col_end )
yield motifmaf , pwm_score_vec , motif_score_vec
"""for ir in range ( nrows ) :
# scan alignment row for motif subsequences
for start in range ( ncols ) :
if align . rows [ ir ] [ start ] = = ' - ' : continue
elif align . rows [ ir ] [ start ] = = ' n ' : continue
elif align . rows [ ir ] [ start ] = = ' N ' : continue
# gather enough subseq for motif
for ic in range ( start , ncols ) :
char = align . rows [ ir ] [ ic ] . upper ( )
if char = = ' - ' or char = = ' N ' : continue
else : subseq + = char
if len ( subseq ) = = minSeqLen :
revseq = pwmx . reverse _ complement ( subseq )
align _ match _ lens . append ( ic )
# pwm score
nill , f _ score = pwm . score _ seq ( subseq ) [ 0]
r _ score , nill = pwm . score _ seq ( revseq ) [ 0]
pwm _ score _ vec . append ( max ( f _ score , r _ score ) )
# consensus score
if motif is not None :
for _ score = int ( pwmx . match _ consensus ( subseq , motif ) )
rev _ score = int ( pwmx . match _ consensus ( revseq , motif ) )
motif _ score _ vec . append ( max ( for _ score , rev _ score ) )
# check threshold
try :
assert not isnan ( max ( pwm _ score _ vec ) )
assert not isnan ( max ( motif _ score _ vec ) )
except :
print > > sys . stderr , pwm _ score _ vec , motif _ score _ vec
print > > sys . stderr , len ( subseq ) , len ( pwm )
if max ( pwm _ score _ vec ) < threshold : continue
if max ( motif _ score _ vec ) < threshold : continue
# chop block
col _ start = start
col _ end = max ( align _ match _ lens ) + 1
motifmaf = mafblock . slice ( col _ start , col _ end )
print subseq , revseq , ic
print align _ match _ lens
yield motifmaf , pwm _ score _ vec , motif _ score _ vec""" | |
def get_region ( service , region , profile ) :
"""Retrieve the region for a particular AWS service based on configured region and / or profile .""" | _ , region , _ , _ = _get_profile ( service , region , None , None , profile )
return region |
def find_closest_calculated_frequencies ( input_freqs , metric_freqs ) :
"""Given a value ( or array ) of input frequencies find the closest values in
the list of frequencies calculated in the metric .
Parameters
input _ freqs : numpy . array or float
The frequency ( ies ) that you want to find the closest value in
metric _ freqs
metric _ freqs : numpy . array
The list of frequencies calculated by the metric
Returns
output _ freqs : numpy . array or float
The list of closest values to input _ freqs for which the metric was
computed""" | try :
refEv = numpy . zeros ( len ( input_freqs ) , dtype = float )
except TypeError :
refEv = numpy . zeros ( 1 , dtype = float )
input_freqs = numpy . array ( [ input_freqs ] )
if len ( metric_freqs ) == 1 :
refEv [ : ] = metric_freqs [ 0 ]
return refEv
# FIXME : This seems complicated for what is a simple operation . Is there
# a simpler * and * faster way of doing this ?
# NOTE : This function assumes a sorted list of frequencies
# NOTE : totmass and f _ cutoff are both numpy arrays as this function is
# designed so that the cutoff can be calculated for many systems
# simulataneously
for i in range ( len ( metric_freqs ) ) :
if i == 0 : # If frequency is lower than halfway between the first two entries
# use the first ( lowest ) value
logicArr = input_freqs < ( ( metric_freqs [ 0 ] + metric_freqs [ 1 ] ) / 2. )
elif i == ( len ( metric_freqs ) - 1 ) : # If frequency is larger than halfway between the last two entries
# use the last ( highest ) value
logicArr = input_freqs > ( ( metric_freqs [ - 2 ] + metric_freqs [ - 1 ] ) / 2. )
else : # For frequencies within the range in freqs , check which points
# should use the frequency corresponding to index i .
logicArrA = input_freqs > ( ( metric_freqs [ i - 1 ] + metric_freqs [ i ] ) / 2. )
logicArrB = input_freqs < ( ( metric_freqs [ i ] + metric_freqs [ i + 1 ] ) / 2. )
logicArr = numpy . logical_and ( logicArrA , logicArrB )
if logicArr . any ( ) :
refEv [ logicArr ] = metric_freqs [ i ]
return refEv |
def sorted ( self ) :
"""Return a ( start , end ) tuple where start < = end .""" | if self . start < self . end :
return self . start , self . end
else :
return self . end , self . start |
def dumps ( self , cnf , ** kwargs ) :
"""Dump config ' cnf ' to a string .
: param cnf : Configuration data to dump
: param kwargs : optional keyword parameters to be sanitized : : dict
: return : string represents the configuration""" | kwargs = anyconfig . utils . filter_options ( self . _dump_opts , kwargs )
return self . dump_to_string ( cnf , ** kwargs ) |
def email_embed_image ( email , img_content_id , img_data ) :
"""email is a django . core . mail . EmailMessage object""" | img = MIMEImage ( img_data )
img . add_header ( 'Content-ID' , '<%s>' % img_content_id )
img . add_header ( 'Content-Disposition' , 'inline' )
email . attach ( img ) |
def deprecated ( func ) :
'''This is a decorator which can be used to mark functions
as deprecated . It will result in a warning being emitted
when the function is used .
https : / / wiki . python . org / moin / PythonDecoratorLibrary # Generating _ Deprecation _ Warnings''' | def new_func ( * args , ** kwargs ) :
warn ( "Call to deprecated function {}." . format ( func . __name__ ) , category = DeprecationWarning )
return func ( * args , ** kwargs )
new_func . __name__ = func . __name__
new_func . __doc__ = func . __doc__
new_func . __dict__ . update ( func . __dict__ )
return new_func |
def plot_freq ( self , x , y , title = '' , ylabel = None , scale = 'semilogy' ) :
"""Plot mean frequency spectrum and display in dialog .
Parameters
x : list
vector with frequencies
y : ndarray
vector with amplitudes
title : str
plot title
ylabel : str
plot y label
scale : str
semilogy , loglog or linear""" | freq = self . frequency
scaling = freq [ 'scaling' ] . get_value ( )
if ylabel is None :
if freq [ 'complex' ] . get_value ( ) :
ylabel = 'Amplitude (uV)'
elif 'power' == scaling :
ylabel = 'Power spectral density (uV ** 2 / Hz)'
elif 'energy' == scaling :
ylabel = 'Energy spectral density (uV ** 2)'
self . parent . plot_dialog = PlotDialog ( self . parent )
self . parent . plot_dialog . canvas . plot ( x , y , title , ylabel , scale = scale )
self . parent . show_plot_dialog ( ) |
def install_firmware ( self , firmware_information ) :
"""Installs firmware to a logical interconnect . The three operations that are supported for the firmware
update are Stage ( uploads firmware to the interconnect ) , Activate ( installs firmware on the interconnect ) ,
and Update ( which does a Stage and Activate in a sequential manner ) .
Args :
firmware _ information : Options to install firmware to a logical interconnect .
Returns :
dict""" | firmware_uri = self . _helper . build_subresource_uri ( self . data [ "uri" ] , subresource_path = self . FIRMWARE_PATH )
return self . _helper . update ( firmware_information , firmware_uri ) |
def _read_with_sitk ( datapath ) :
"""Reads file using SimpleITK . Returns array of pixels ( image located in datapath ) and its metadata .
: param datapath : path to file ( img or dicom )
: return : tuple ( data3d , metadata ) , where data3d is array of pixels""" | try :
import SimpleITK as Sitk
except ImportError as e :
logger . error ( "Unable to import SimpleITK. On Windows try version 1.0.1" )
image = Sitk . ReadImage ( datapath )
data3d = dcmtools . get_pixel_array_from_sitk ( image )
# data3d , original _ dtype = dcmreaddata . get _ pixel _ array _ from _ dcmobj ( image )
metadata = _metadata ( image , datapath )
return data3d , metadata |
def set_clear_color ( self , color = 'black' , alpha = None ) :
"""Set the screen clear color
This is a wrapper for gl . glClearColor .
Parameters
color : str | tuple | instance of Color
Color to use . See vispy . color . Color for options .
alpha : float | None
Alpha to use .""" | self . glir . command ( 'FUNC' , 'glClearColor' , * Color ( color , alpha ) . rgba ) |
def _plot ( self ) :
"""Plot all dots for series""" | r_max = min ( self . view . x ( 1 ) - self . view . x ( 0 ) , ( self . view . y ( 0 ) or 0 ) - self . view . y ( 1 ) ) / ( 2 * 1.05 )
for serie in self . series :
self . dot ( serie , r_max ) |
def seqannotation ( self , seqrecord , allele , loc ) :
"""Gets the Annotation from the found sequence
: return : The Annotation from the found sequence
: rtype : Annotation""" | # seqrecord = self . seqrecord ( allele , loc )
complete_annotation = get_features ( seqrecord )
annotation = Annotation ( annotation = complete_annotation , method = 'match' , complete_annotation = True )
if self . alignments :
alignment = { f : self . annoated_alignments [ loc ] [ allele ] [ f ] [ 'Seq' ] for f in self . annoated_alignments [ loc ] [ allele ] . keys ( ) }
annotation . aligned = alignment
return annotation |
def fix_fasta ( fasta ) :
"""remove pesky characters from fasta file header""" | for seq in parse_fasta ( fasta ) :
seq [ 0 ] = remove_char ( seq [ 0 ] )
if len ( seq [ 1 ] ) > 0 :
yield seq |
def symbol_list ( what_list ) :
'''provide default symbol lists
Parameters
what _ list : string
String name of symbol lists provided ; " list1 " , " list2 " ,
" lines1 " or " lines2 " .''' | if what_list is "list1" :
symbol = [ 'ro' , 'bo' , 'ko' , 'go' , 'mo' , 'r-' , 'b-' , 'k-' , 'g-' , 'm-' , 'r--' , 'b--' , 'k--' , 'g--' , 'r1' ]
# symbol = [ ' r + ' , ' ro ' , ' r - ' ]
elif what_list is "list2" :
symbol = [ 'r-' , 'b--' , 'g-.' , 'k:' , 'md' , '.' , 'o' , 'v' , '^' , '<' , '>' , '1' , '2' , '3' , '4' , 's' , 'p' , '*' , 'h' , 'H' , '+' ]
elif what_list is "lines1" :
symbol = [ 'b--' , 'k--' , 'r--' , 'c--' , 'm--' , 'g--' , 'b-' , 'k-' , 'r-' , 'c-' , 'm-' , 'g-' , 'b.' , 'b-.' , 'k-.' , 'r-.' , 'c-.' , 'm-.' , 'g-.' , 'b:' , 'k:' , 'r:' , 'c:' , 'm:' , 'g:' ]
elif what_list is "lines2" :
symbol = [ 'g:' , 'r-.' , 'k-' , 'b--' , 'k-.' , 'b+' , 'r:' , 'b-' , 'c--' , 'm--' , 'g--' , 'r-' , 'c-' , 'm-' , 'g-' , 'k-.' , 'c-.' , 'm-.' , 'g-.' , 'k:' , 'r:' , 'c:' , 'm:' , 'b-.' , 'b:' ]
return symbol |
def standard_aggregation ( C ) :
"""Compute the sparsity pattern of the tentative prolongator .
Parameters
C : csr _ matrix
strength of connection matrix
Returns
AggOp : csr _ matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
Cpts : array
array of Cpts , i . e . , Cpts [ i ] = root node of aggregate i
Examples
> > > from scipy . sparse import csr _ matrix
> > > from pyamg . gallery import poisson
> > > from pyamg . aggregation . aggregate import standard _ aggregation
> > > A = poisson ( ( 4 , ) , format = ' csr ' ) # 1D mesh with 4 vertices
> > > A . todense ( )
matrix ( [ [ 2 . , - 1 . , 0 . , 0 . ] ,
[ - 1 . , 2 . , - 1 . , 0 . ] ,
[ 0 . , - 1 . , 2 . , - 1 . ] ,
[ 0 . , 0 . , - 1 . , 2 . ] ] )
> > > standard _ aggregation ( A ) [ 0 ] . todense ( ) # two aggregates
matrix ( [ [ 1 , 0 ] ,
[1 , 0 ] ,
[0 , 1 ] ,
[0 , 1 ] ] , dtype = int8)
> > > A = csr _ matrix ( [ [ 1,0,0 ] , [ 0,1,1 ] , [ 0,1,1 ] ] )
> > > A . todense ( ) # first vertex is isolated
matrix ( [ [ 1 , 0 , 0 ] ,
[0 , 1 , 1 ] ,
[0 , 1 , 1 ] ] )
> > > standard _ aggregation ( A ) [ 0 ] . todense ( ) # one aggregate
matrix ( [ [ 0 ] ,
[1 ] ] , dtype = int8)
See Also
amg _ core . standard _ aggregation""" | if not isspmatrix_csr ( C ) :
raise TypeError ( 'expected csr_matrix' )
if C . shape [ 0 ] != C . shape [ 1 ] :
raise ValueError ( 'expected square matrix' )
index_type = C . indptr . dtype
num_rows = C . shape [ 0 ]
Tj = np . empty ( num_rows , dtype = index_type )
# stores the aggregate # s
Cpts = np . empty ( num_rows , dtype = index_type )
# stores the Cpts
fn = amg_core . standard_aggregation
num_aggregates = fn ( num_rows , C . indptr , C . indices , Tj , Cpts )
Cpts = Cpts [ : num_aggregates ]
if num_aggregates == 0 : # return all zero matrix and no Cpts
return csr_matrix ( ( num_rows , 1 ) , dtype = 'int8' ) , np . array ( [ ] , dtype = index_type )
else :
shape = ( num_rows , num_aggregates )
if Tj . min ( ) == - 1 : # some nodes not aggregated
mask = Tj != - 1
row = np . arange ( num_rows , dtype = index_type ) [ mask ]
col = Tj [ mask ]
data = np . ones ( len ( col ) , dtype = 'int8' )
return coo_matrix ( ( data , ( row , col ) ) , shape = shape ) . tocsr ( ) , Cpts
else : # all nodes aggregated
Tp = np . arange ( num_rows + 1 , dtype = index_type )
Tx = np . ones ( len ( Tj ) , dtype = 'int8' )
return csr_matrix ( ( Tx , Tj , Tp ) , shape = shape ) , Cpts |
def paint ( self , p , * args ) :
'''I have no idea why , but we need to generate the picture after painting otherwise
it draws incorrectly .''' | if self . picturenotgened :
self . generatePicture ( self . getBoundingParents ( ) [ 0 ] . rect ( ) )
self . picturenotgened = False
pg . ImageItem . paint ( self , p , * args )
self . generatePicture ( self . getBoundingParents ( ) [ 0 ] . rect ( ) ) |
def validate ( self , config ) :
"""Validate the given config against the ` Scheme ` .
Args :
config ( dict ) : The configuration to validate .
Raises :
errors . SchemeValidationError : The configuration fails
validation against the ` Schema ` .""" | if not isinstance ( config , dict ) :
raise errors . SchemeValidationError ( 'Scheme can only validate a dictionary config, but was given ' '{} (type: {})' . format ( config , type ( config ) ) )
for arg in self . args : # the option exists in the config
if arg . name in config :
arg . validate ( config [ arg . name ] )
# the option does not exist in the config
else : # if the option is not required , then it is fine to omit .
# otherwise , its omission constitutes a validation error .
if arg . required :
raise errors . SchemeValidationError ( 'Option "{}" is required, but not found.' . format ( arg . name ) ) |
def cbpdn_class_label_lookup ( label ) :
"""Get a CBPDN class from a label string .""" | clsmod = { 'admm' : admm_cbpdn . ConvBPDN , 'fista' : fista_cbpdn . ConvBPDN }
if label in clsmod :
return clsmod [ label ]
else :
raise ValueError ( 'Unknown ConvBPDN solver method %s' % label ) |
def transaction ( commit = True ) :
"""Wrap a context with a commit / rollback .""" | try :
yield SessionContext . session
if commit :
SessionContext . session . commit ( )
except Exception :
if SessionContext . session :
SessionContext . session . rollback ( )
raise |
def subscribe ( self , event , hook ) :
"""Subscribe a callback to an event
Parameters
event : str
Available events are ' precall ' , ' postcall ' , and ' capacity ' .
precall is called with : ( connection , command , query _ kwargs )
postcall is called with : ( connection , command , query _ kwargs , response )
capacity is called with : ( connection , command , query _ kwargs , response , capacity )
hook : callable""" | if hook not in self . _hooks [ event ] :
self . _hooks [ event ] . append ( hook ) |
def recount_view ( request ) :
"""Recount number _ of _ messages for all threads and number _ of _ responses for all requests .
Also set the change _ date for every thread to the post _ date of the latest message
associated with that thread .""" | requests_changed = 0
for req in Request . objects . all ( ) :
recount = Response . objects . filter ( request = req ) . count ( )
if req . number_of_responses != recount :
req . number_of_responses = recount
req . save ( )
requests_changed += 1
threads_changed = 0
for thread in Thread . objects . all ( ) :
recount = Message . objects . filter ( thread = thread ) . count ( )
if thread . number_of_messages != recount :
thread . number_of_messages = recount
thread . save ( )
threads_changed += 1
dates_changed = 0
for thread in Thread . objects . all ( ) :
if thread . change_date != thread . message_set . latest ( 'post_date' ) . post_date :
thread . change_date = thread . message_set . latest ( 'post_date' ) . post_date
thread . save ( )
dates_changed += 1
messages . add_message ( request , messages . SUCCESS , MESSAGES [ 'RECOUNTED' ] . format ( requests_changed = requests_changed , request_count = Request . objects . all ( ) . count ( ) , threads_changed = threads_changed , thread_count = Thread . objects . all ( ) . count ( ) , dates_changed = dates_changed , ) )
return HttpResponseRedirect ( reverse ( 'utilities' ) ) |
def encode ( cls , s ) :
"""converts a plain text string to base64 encoding
: param s : unicode str | bytes , the base64 encoded string
: returns : unicode str""" | b = ByteString ( s )
be = base64 . b64encode ( b ) . strip ( )
return String ( be ) |
def _use_tables ( objs ) :
'''Whether a collection of Bokeh objects contains a TableWidget
Args :
objs ( seq [ Model or Document ] ) :
Returns :
bool''' | from . . models . widgets import TableWidget
return _any ( objs , lambda obj : isinstance ( obj , TableWidget ) ) |
def get ( cls , parent = None , id = None , data = None ) :
"""Inherit info from parent and return new object""" | # TODO - allow fetching of parent based on child ?
if parent is not None :
route = copy ( parent . route )
else :
route = { }
if id is not None and cls . ID_NAME is not None :
route [ cls . ID_NAME ] = id
obj = cls ( key = parent . key , route = route , config = parent . config )
if data : # This is used in " get all " queries
obj . data = data
else :
obj . fetch ( )
return obj |
def add ( self , tid , result , role , session , oid = None , content = None , anony = None ) :
'''taobao . traderate . add 新增单个评价
新增单个评价 ( 注 : 在评价之前需要对订单成功的时间进行判定 ( end _ time ) , 如果超过15天 , 不能再通过该接口进行评价 )''' | request = TOPRequest ( 'taobao.traderate.add' )
request [ 'tid' ] = tid
request [ 'result' ] = result
request [ 'role' ] = role
if oid != None :
request [ 'oid' ] = oid
if content != None :
request [ 'content' ] = content
if anony != None :
request [ 'anony' ] = anony
self . create ( self . execute ( request , session ) [ 'trade_rate' ] )
return self |
def get ( self , blocking = True ) :
"""Gets a connection .
Args :
blocking : Whether to block when max _ size connections are already in use .
If false , may return None .
Returns :
A connection to the database .
Raises :
PoolAlreadyClosedError : if close ( ) method was already called on
this pool .""" | if self . closed :
raise PoolAlreadyClosedError ( "Connection pool is already closed." )
# NOTE : Once we acquire capacity from the semaphore , it is essential that we
# return it eventually . On success , this responsibility is delegated to
# _ ConnectionProxy .
if not self . limiter . acquire ( blocking = blocking ) :
return None
c = None
# pop is atomic , but if we did a check first , it would not be atomic with
# the pop .
try :
c = self . idle_conns . pop ( )
except IndexError : # Create a connection , release the pool allocation if it fails .
try :
c = self . connect_func ( )
except Exception :
self . limiter . release ( )
raise
return _ConnectionProxy ( self , c ) |
def get_partial_DOS ( self ) :
"""Return frequency points and partial DOS as a tuple .
Projection is done to atoms and may be also done along directions
depending on the parameters at run _ partial _ dos .
Returns
A tuple with ( frequency _ points , partial _ dos ) .
frequency _ points : ndarray
shape = ( frequency _ sampling _ points , ) , dtype = ' double '
partial _ dos :
shape = ( frequency _ sampling _ points , projections ) , dtype = ' double '""" | warnings . warn ( "Phonopy.get_partial_DOS is deprecated. " "Use Phonopy.get_projected_dos_dict." , DeprecationWarning )
pdos = self . get_projected_dos_dict ( )
return pdos [ 'frequency_points' ] , pdos [ 'projected_dos' ] |
def handle_dump ( args ) :
"""usage : cosmic - ray dump < session - file >
JSON dump of session data . This output is typically run through other
programs to produce reports .
Each line of output is a list with two elements : a WorkItem and a
WorkResult , both JSON - serialized . The WorkResult can be null , indicating a
WorkItem with no results .""" | session_file = get_db_name ( args [ '<session-file>' ] )
with use_db ( session_file , WorkDB . Mode . open ) as database :
for work_item , result in database . completed_work_items :
print ( json . dumps ( ( work_item , result ) , cls = WorkItemJsonEncoder ) )
for work_item in database . pending_work_items :
print ( json . dumps ( ( work_item , None ) , cls = WorkItemJsonEncoder ) )
return ExitCode . OK |
def cached_idxs ( method ) :
"""this function is used as a decorator for caching""" | def method_wrapper ( self , * args , ** kwargs ) :
tail = '_' . join ( str ( idx ) for idx in args )
_cache_attr_name = '_cache_' + method . __name__ + '_' + tail
_bool_attr_name = '_cached_' + method . __name__ + '_' + tail
is_cached = getattr ( self , _bool_attr_name )
if not is_cached :
result = method ( self , * args , ** kwargs )
setattr ( self , _cache_attr_name , result )
setattr ( self , _bool_attr_name , True )
return getattr ( self , _cache_attr_name )
return method_wrapper |
def find_statements ( self , query , language = 'spo' , type = 'triples' , flush = None , limit = None ) :
"""Run a query in a format supported by the Fedora Resource Index ( e . g . , SPO
or Sparql ) and return the results .
: param query : query as a string
: param language : query language to use ; defaults to ' spo '
: param type : type of query - tuples or triples ; defaults to ' triples '
: param flush : flush results to get recent changes ; defaults to False
: rtype : : class : ` rdflib . ConjunctiveGraph ` when type is ` ` triples ` ` ; list
of dictionaries ( keys based on return fields ) when type is ` ` tuples ` `""" | http_args = { 'type' : type , 'lang' : language , 'query' : query , }
if type == 'triples' :
result_format = 'N-Triples'
elif type == 'tuples' :
result_format = 'CSV'
if limit is not None :
http_args [ 'limit' ] = limit
# else - error / exception ?
http_args [ 'format' ] = result_format
return self . _query ( result_format , http_args , flush ) |
def swipe ( self , element , x , y , duration = None ) :
"""Swipe over an element
: param element : either a WebElement , PageElement or element locator as a tuple ( locator _ type , locator _ value )
: param x : horizontal movement
: param y : vertical movement
: param duration : time to take the swipe , in ms""" | if not self . driver_wrapper . is_mobile_test ( ) :
raise Exception ( 'Swipe method is not implemented in Selenium' )
# Get center coordinates of element
center = self . get_center ( element )
initial_context = self . driver_wrapper . driver . current_context
if self . driver_wrapper . is_web_test ( ) or initial_context != 'NATIVE_APP' :
center = self . get_native_coords ( center )
# Android needs absolute end coordinates and ios needs movement
end_x = x if self . driver_wrapper . is_ios_test ( ) else center [ 'x' ] + x
end_y = y if self . driver_wrapper . is_ios_test ( ) else center [ 'y' ] + y
self . driver_wrapper . driver . swipe ( center [ 'x' ] , center [ 'y' ] , end_x , end_y , duration )
if self . driver_wrapper . is_web_test ( ) or initial_context != 'NATIVE_APP' :
self . driver_wrapper . driver . switch_to . context ( initial_context ) |
def detect_r_peaks ( ecg_signal , sample_rate , time_units = False , volts = False , resolution = None , device = "biosignalsplux" , plot_result = False ) :
"""Brief
Python implementation of R peak detection algorithm ( proposed by Raja Selvaraj ) .
Description
Pan - Tompkins algorithm is one of the gold - standard algorithms in R - peak detection on ECG due to its low
computational complexity , which allows for real - time applications , preserving high accuracy values .
This function allows the detection of these events in ECG signals using the Pan - Tompkins .
Parameters
ecg _ signal : list
List of ECG acquired samples .
sample _ rate : int
Sampling frequency .
time _ units : boolean
If True this function will return the R peak position in seconds .
volts : boolean
If True , then the conversion of raw units to mV will be done . Resolution needs to be
specified .
resolution : int or None
Selected resolution for data acquisition .
device : str
Specification of the device category .
plot _ result : boolean
If True it will be presented a graphical representation of the R peak position in the ECG
signal .
Returns
out : R peak position ( ndarray ) , R peak amplitude ( ndarray )
R peak position ( sample number or time instant in seconds ) and amplitude ( raw or mV ) .""" | if volts is True :
if resolution is not None : # ecg _ signal = ( ( ecg _ signal / 2 * * resolution ) - 0.5 ) * 3
ecg_signal = raw_to_phy ( "ECG" , device , ecg_signal , resolution , option = "mV" )
else :
raise RuntimeError ( "For converting raw units to mV is mandatory the specification of " "acquisition resolution." )
if time_units is True :
time = numpy . linspace ( 0 , len ( ecg_signal ) / sample_rate , len ( ecg_signal ) )
else :
time = numpy . linspace ( 0 , len ( ecg_signal ) - 1 , len ( ecg_signal ) )
# Filtering Step of Pan - Tompkins Algorithm .
filtered = _ecg_band_pass_filter ( ecg_signal , sample_rate )
# Differentiation Step of Pan - Tompkins Algorithm .
differentiated = _differentiate ( filtered )
# Rectification Step of Pan - Tompkins Algorithm .
squared = _squaring ( differentiated )
# Integration Step of Pan - Tompkins Algorithm .
integrated = _integration ( squared , sample_rate )
rr_buffer , spk1 , npk1 , threshold = _buffer_ini ( integrated , sample_rate )
probable_peaks , possible_peaks = _detects_peaks ( integrated , sample_rate )
definitive_peaks = _checkup ( probable_peaks , integrated , sample_rate , rr_buffer , spk1 , npk1 , threshold )
definitive_peaks = list ( map ( int , definitive_peaks ) )
# Rephasing step .
definitive_peaks_rephase = numpy . array ( definitive_peaks ) - 30 * ( sample_rate / 1000 )
definitive_peaks_rephase = list ( map ( int , definitive_peaks_rephase ) )
if time_units is True :
peaks = numpy . array ( time ) [ definitive_peaks_rephase ]
else :
peaks = definitive_peaks_rephase
amplitudes = numpy . array ( ecg_signal ) [ definitive_peaks_rephase ]
# If plot is invoked by plot _ result flag , then a graphical representation of the R peaks is
# presented to the user .
if plot_result is True :
time_int = numpy . array ( time [ 1 : ] )
integrated = numpy . array ( integrated )
fig = figure ( x_axis_label = 'Time (s)' , y_axis_label = 'Raw Data' , ** opensignals_kwargs ( "figure" ) )
fig . line ( time_int , integrated , ** opensignals_kwargs ( "line" ) )
fig . circle ( time_int [ definitive_peaks ] , integrated [ definitive_peaks ] , size = 30 , color = "#00893E" , legend = "Definitive Peaks" )
fig . circle ( time_int [ probable_peaks ] , integrated [ probable_peaks ] , size = 20 , color = "#009EE3" , legend = "Probable Peaks" )
fig . circle ( time_int [ possible_peaks ] , integrated [ possible_peaks ] , size = 10 , color = "#302683" , legend = "Possible Peaks" )
fig2 = figure ( x_axis_label = 'Time (s)' , y_axis_label = 'Raw Data' , ** opensignals_kwargs ( "figure" ) )
fig2 . line ( time , ecg_signal , ** opensignals_kwargs ( "line" ) )
fig2 . circle ( time [ definitive_peaks_rephase ] , numpy . array ( ecg_signal ) [ definitive_peaks_rephase ] , size = 30 , color = opensignals_color_pallet ( ) , legend = "Definitive Peaks" )
opensignals_style ( [ fig , fig2 ] )
grid_plot = gridplot ( [ [ fig ] , [ fig2 ] ] , ** opensignals_kwargs ( "gridplot" ) )
show ( grid_plot )
return peaks , amplitudes |
def users_for_perm ( cls , instance , perm_name , user_ids = None , group_ids = None , limit_group_permissions = False , skip_group_perms = False , db_session = None , ) :
"""return PermissionTuples for users AND groups that have given
permission for the resource , perm _ name is _ _ any _ permission _ _ then
users with any permission will be listed
: param instance :
: param perm _ name :
: param user _ ids : limits the permissions to specific user ids
: param group _ ids : limits the permissions to specific group ids
: param limit _ group _ permissions : should be used if we do not want to have
user objects returned for group permissions , this might cause performance
issues for big groups
: param skip _ group _ perms : do not attach group permissions to the resultset
: param db _ session :
: return :""" | # noqa
db_session = get_db_session ( db_session , instance )
users_perms = resource_permissions_for_users ( cls . models_proxy , [ perm_name ] , [ instance . resource_id ] , user_ids = user_ids , group_ids = group_ids , limit_group_permissions = limit_group_permissions , skip_group_perms = skip_group_perms , db_session = db_session , )
if instance . owner_user_id :
users_perms . append ( PermissionTuple ( instance . owner , ALL_PERMISSIONS , "user" , None , instance , True , True ) )
if instance . owner_group_id and not skip_group_perms :
for user in instance . owner_group . users :
users_perms . append ( PermissionTuple ( user , ALL_PERMISSIONS , "group" , instance . owner_group , instance , True , True , ) )
return users_perms |
def search_first ( self , * criterion , ** kwargs ) :
"""Returns the first match based on criteria or None .""" | query = self . _query ( * criterion )
query = self . _order_by ( query , ** kwargs )
query = self . _filter ( query , ** kwargs )
# NB : pagination must go last
query = self . _paginate ( query , ** kwargs )
return query . first ( ) |
def stupid_hack ( most = 10 , wait = None ) :
"""Return a random time between 1 - 10 Seconds .""" | # Stupid Hack For Public Cloud so it is not overwhelmed with API requests .
if wait is not None :
time . sleep ( wait )
else :
time . sleep ( random . randrange ( 1 , most ) ) |
def gpg_command ( args , env = None ) :
"""Prepare common GPG command line arguments .""" | if env is None :
env = os . environ
cmd = get_gnupg_binary ( neopg_binary = env . get ( 'NEOPG_BINARY' ) )
return [ cmd ] + args |
def send_apdu ( self , cla , ins , p1 , p2 , data = None , mrl = 0 , check_status = True ) :
"""Send an ISO / IEC 7816-4 APDU to the Type 4 Tag .
The 4 byte APDU header ( class , instruction , parameter 1 and 2)
is constructed from the first four parameters ( cla , ins , p1,
p2 ) without interpretation . The byte string * data * argument
represents the APDU command data field . It is encoded as a
short or extended length field followed by the * data *
bytes . The length field is not transmitted if * data * is None
or an empty string . The maximum acceptable number of response
data bytes is given with the max - response - length * mrl *
argument . The value of * mrl * is transmitted as the 7816-4 APDU
Le field after appropriate conversion .
By default , the response is returned as a byte array not
including the status word , a : exc : ` Type4TagCommandError `
exception is raised for any status word other than
9000h . Response status verification can be disabled with
* check _ status * set to False , the byte array will then include
the response status word at the last two positions .
Transmission errors always raise a : exc : ` Type4TagCommandError `
exception .""" | apdu = bytearray ( [ cla , ins , p1 , p2 ] )
if not self . _extended_length_support :
if data and len ( data ) > 255 :
raise ValueError ( "unsupported command data length" )
if mrl and mrl > 256 :
raise ValueError ( "unsupported max response length" )
if data :
apdu += pack ( '>B' , len ( data ) ) + data
if mrl > 0 :
apdu += pack ( '>B' , 0 if mrl == 256 else mrl )
else :
if data and len ( data ) > 65535 :
raise ValueError ( "invalid command data length" )
if mrl and mrl > 65536 :
raise ValueError ( "invalid max response length" )
if data :
apdu += pack ( ">xH" , len ( data ) ) + data
if mrl > 0 :
le = 0 if mrl == 65536 else mrl
apdu += pack ( ">H" , le ) if data else pack ( ">xH" , le )
apdu = self . transceive ( apdu )
if not apdu or len ( apdu ) < 2 :
raise Type4TagCommandError ( nfc . tag . PROTOCOL_ERROR )
if check_status and apdu [ - 2 : ] != b"\x90\x00" :
raise Type4TagCommandError . from_status ( apdu [ - 2 : ] )
return apdu [ : - 2 ] if check_status else apdu |
def match_in_kwargs ( self , match_args , kwargs ) :
"""Matches against kwargs .""" | for match , default in match_args :
names = get_match_names ( match )
if names :
tempvar = self . get_temp_var ( )
self . add_def ( tempvar + " = " + "" . join ( kwargs + '.pop("' + name + '") if "' + name + '" in ' + kwargs + " else " for name in names ) + default , )
with self . down_a_level ( ) :
self . match ( match , tempvar )
else :
raise CoconutDeferredSyntaxError ( "keyword-only pattern-matching function arguments must have names" , self . loc ) |
def get_devices ( self , refresh = False , generic_type = None ) :
"""Get all devices from Abode .""" | if refresh or self . _devices is None :
if self . _devices is None :
self . _devices = { }
_LOGGER . info ( "Updating all devices..." )
response = self . send_request ( "get" , CONST . DEVICES_URL )
response_object = json . loads ( response . text )
if ( response_object and not isinstance ( response_object , ( tuple , list ) ) ) :
response_object = [ response_object ]
_LOGGER . debug ( "Get Devices Response: %s" , response . text )
for device_json in response_object : # Attempt to reuse an existing device
device = self . _devices . get ( device_json [ 'id' ] )
# No existing device , create a new one
if device :
device . update ( device_json )
else :
device = new_device ( device_json , self )
if not device :
_LOGGER . debug ( "Skipping unknown device: %s" , device_json )
continue
self . _devices [ device . device_id ] = device
# We will be treating the Abode panel itself as an armable device .
panel_response = self . send_request ( "get" , CONST . PANEL_URL )
panel_json = json . loads ( panel_response . text )
self . _panel . update ( panel_json )
_LOGGER . debug ( "Get Mode Panel Response: %s" , response . text )
alarm_device = self . _devices . get ( CONST . ALARM_DEVICE_ID + '1' )
if alarm_device :
alarm_device . update ( panel_json )
else :
alarm_device = ALARM . create_alarm ( panel_json , self )
self . _devices [ alarm_device . device_id ] = alarm_device
if generic_type :
devices = [ ]
for device in self . _devices . values ( ) :
if ( device . generic_type is not None and device . generic_type in generic_type ) :
devices . append ( device )
return devices
return list ( self . _devices . values ( ) ) |
def insert_tasks_ignore_duplicate_names ( tasks , queue , * args , ** kwargs ) :
"""Insert a batch of tasks into a specific queue . If a
DuplicateTaskNameError is raised , loop through the tasks and insert the
remaining , ignoring and logging the duplicate tasks .
Returns the number of successfully inserted tasks .""" | from google . appengine . api import taskqueue
try :
inserted = _insert_tasks ( tasks , queue , * args , ** kwargs )
return inserted
except taskqueue . DuplicateTaskNameError : # At least one task failed in our batch , attempt to re - insert the
# remaining tasks . Named tasks can never be transactional .
reinsert = _tasks_to_reinsert ( tasks , transactional = False )
count = len ( reinsert )
inserted = len ( tasks ) - count
# Our subsequent task inserts should raise TaskAlreadyExistsError at
# least once , but that will be swallowed by _ insert _ tasks .
for task in reinsert :
inserted += _insert_tasks ( [ task ] , queue , * args , ** kwargs )
return inserted |
def get ( self , username , width = None , height = None ) :
"""Retrieve public details on a given user .
Note : Supplying the optional w or h parameters will result
in the ' custom ' photo URL being added to the ' profile _ image ' object :
: param username [ string ] : The user ’ s username . Required .
: param width [ integer ] : Profile image width in pixels .
: param height [ integer ] : Profile image height in pixels .
: return : [ User ] : The Unsplash User .""" | url = "/users/{username}" . format ( username = username )
params = { "w" : width , "h" : height }
result = self . _get ( url , params = params )
return UserModel . parse ( result ) |
def findmin ( psr , method = 'Nelder-Mead' , history = False , formbats = False , renormalize = True , bounds = { } , ** kwargs ) :
"""Use scipy . optimize . minimize to find minimum - chisq timing solution ,
passing through all extra options . Resets psr [ . . . ] . val to the final solution ,
and returns the final chisq . Will use chisq gradient if method requires it .
Ignores deleted points .""" | ctr , err = psr . vals ( ) , psr . errs ( )
# to avoid losing precision , we ' re searching in units of parameter errors
if numpy . any ( err == 0.0 ) :
print ( "Warning: one or more fit parameters have zero a priori error, and won't be searched." )
hloc , hval = [ ] , [ ]
def func ( xs ) :
psr . vals ( [ c + x * e for x , c , e in zip ( xs , ctr , err ) ] )
ret = chisq ( psr , formbats = formbats )
if numpy . isnan ( ret ) :
print ( "Warning: chisq is nan at {0}." . format ( psr . vals ( ) ) )
if history :
hloc . append ( psr . vals ( ) )
hval . append ( ret )
return ret
def dfunc ( xs ) :
psr . vals ( [ c + x * e for x , c , e in zip ( xs , ctr , err ) ] )
dc = dchisq ( psr , formbats = formbats , renormalize = renormalize )
ret = numpy . array ( [ d * e for d , e in zip ( dc , err ) ] , 'd' )
return ret
opts = kwargs . copy ( )
if method not in [ 'Nelder-Mead' , 'Powell' ] :
opts [ 'jac' ] = dfunc
if method in [ 'L-BFGS-B' ] :
opts [ 'bounds' ] = [ ( float ( ( bounds [ par ] [ 0 ] - ctr [ i ] ) / err [ i ] ) , float ( ( bounds [ par ] [ 1 ] - ctr [ i ] ) / err [ i ] ) ) if par in bounds else ( None , None ) for i , par in enumerate ( psr . pars ( ) ) ]
res = scipy . optimize . minimize ( func , [ 0.0 ] * len ( ctr ) , method = method , ** opts )
if hasattr ( res , 'message' ) :
print ( res . message )
# this will also set parameters to the minloc
minchisq = func ( res . x )
if history :
return minchisq , numpy . array ( hval ) , numpy . array ( hloc )
else :
return minchisq |
def running_covar ( xx = True , xy = False , yy = False , remove_mean = False , symmetrize = False , sparse_mode = 'auto' , modify_data = False , column_selection = None , diag_only = False , nsave = 5 ) :
"""Returns a running covariance estimator
Returns an estimator object that can be fed chunks of X and Y data , and
that can generate on - the - fly estimates of mean , covariance , running sum
and second moment matrix .
Parameters
xx : bool
Estimate the covariance of X
xy : bool
Estimate the cross - covariance of X and Y
yy : bool
Estimate the covariance of Y
remove _ mean : bool
Remove the data mean in the covariance estimation
symmetrize : bool
Use symmetric estimates with sum defined by sum _ t x _ t + y _ t and
second moment matrices defined by X ' X + Y ' Y and Y ' X + X ' Y .
modify _ data : bool
If remove _ mean = True , the mean will be removed in the input data ,
without creating an independent copy . This option is faster but should
only be selected if the input data is not used elsewhere .
sparse _ mode : str
one of :
* ' dense ' : always use dense mode
* ' sparse ' : always use sparse mode if possible
* ' auto ' : automatic
column _ selection : ndarray ( k , dtype = int ) or None
Indices of those columns that are to be computed . If None , all columns are computed .
diag _ only : bool
If True , the computation is restricted to the diagonal entries ( autocorrelations ) only .
nsave : int
Depth of Moment storage . Moments computed from each chunk will be
combined with Moments of similar statistical weight using the pairwise
combination algorithm described in [ 1 ] _ .
References
. . [ 1 ] http : / / i . stanford . edu / pub / cstr / reports / cs / tr / 79/773 / CS - TR - 79-773 . pdf""" | return RunningCovar ( compute_XX = xx , compute_XY = xy , compute_YY = yy , sparse_mode = sparse_mode , modify_data = modify_data , remove_mean = remove_mean , symmetrize = symmetrize , column_selection = column_selection , diag_only = diag_only , nsave = nsave ) |
def list_all_free_shipping_coupons ( cls , ** kwargs ) :
"""List FreeShippingCoupons
Return a list of FreeShippingCoupons
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . list _ all _ free _ shipping _ coupons ( async = True )
> > > result = thread . get ( )
: param async bool
: param int page : page number
: param int size : page size
: param str sort : page order
: return : page [ FreeShippingCoupon ]
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _list_all_free_shipping_coupons_with_http_info ( ** kwargs )
else :
( data ) = cls . _list_all_free_shipping_coupons_with_http_info ( ** kwargs )
return data |
def git_pretty ( ) :
"""returns a pretty summary of the commit or unkown if not in git repo""" | if git_repo ( ) is None :
return "unknown"
pretty = subprocess . check_output ( [ "git" , "log" , "--pretty=format:%h %s" , "-n" , "1" ] )
pretty = pretty . decode ( "utf-8" )
pretty = pretty . strip ( )
return pretty |
def porosimetry ( im , sizes = 25 , inlets = None , access_limited = True , mode = 'hybrid' ) :
r"""Performs a porosimetry simulution on the image
Parameters
im : ND - array
An ND image of the porous material containing True values in the
pore space .
sizes : array _ like or scalar
The sizes to invade . If a list of values of provided they are used
directly . If a scalar is provided then that number of points spanning
the min and max of the distance transform are used .
inlets : ND - array , boolean
A boolean mask with True values indicating where the invasion
enters the image . By default all faces are considered inlets ,
akin to a mercury porosimetry experiment . Users can also apply
solid boundaries to their image externally before passing it in ,
allowing for complex inlets like circular openings , etc . This argument
is only used if ` ` access _ limited ` ` is ` ` True ` ` .
access _ limited : Boolean
This flag indicates if the intrusion should only occur from the
surfaces ( ` ` access _ limited ` ` is True , which is the default ) , or
if the invading phase should be allowed to appear in the core of
the image . The former simulates experimental tools like mercury
intrusion porosimetry , while the latter is useful for comparison
to gauge the extent of shielding effects in the sample .
mode : string
Controls with method is used to compute the result . Options are :
' hybrid ' - ( default ) Performs a distance tranform of the void space ,
thresholds to find voxels larger than ` ` sizes [ i ] ` ` , trims the resulting
mask if ` ` access _ limitations ` ` is ` ` True ` ` , then dilates it using the
efficient fft - method to obtain the non - wetting fluid configuration .
' dt ' - Same as ' hybrid ' , except uses a second distance transform ,
relative to the thresholded mask , to find the invading fluid
configuration . The choice of ' dt ' or ' hybrid ' depends on speed , which
is system and installation specific .
' mio ' - Using a single morphological image opening step to obtain the
invading fluid confirguration directly , * then * trims if
` ` access _ limitations ` ` is ` ` True ` ` . This method is not ideal and is
included mostly for comparison purposes . The morphological operations
are done using fft - based method implementations .
Returns
image : ND - array
A copy of ` ` im ` ` with voxel values indicating the sphere radius at
which it becomes accessible from the inlets . This image can be used
to find invading fluid configurations as a function of applied
capillary pressure by applying a boolean comparison :
` ` inv _ phase = im > r ` ` where ` ` r ` ` is the radius ( in voxels ) of the
invading sphere . Of course , ` ` r ` ` can be converted to capillary
pressure using your favorite model .
Notes
There are many ways to perform this filter , and PoreSpy offer 3 , which
users can choose between via the ` ` mode ` ` argument . These methods all
work in a similar way by finding which foreground voxels can accomodate
a sphere of a given radius , then repeating for smaller radii .
See Also
fftmorphology
local _ thickness""" | if im . ndim != im . squeeze ( ) . ndim :
warnings . warn ( 'Input image conains a singleton axis:' + str ( im . shape ) + ' Reduce dimensionality with np.squeeze(im) to avoid' + ' unexpected behavior.' )
dt = spim . distance_transform_edt ( im > 0 )
if inlets is None :
inlets = get_border ( im . shape , mode = 'faces' )
if isinstance ( sizes , int ) :
sizes = sp . logspace ( start = sp . log10 ( sp . amax ( dt ) ) , stop = 0 , num = sizes )
else :
sizes = sp . unique ( sizes ) [ - 1 : : - 1 ]
if im . ndim == 2 :
strel = ps_disk
else :
strel = ps_ball
if mode == 'mio' :
pw = int ( sp . floor ( dt . max ( ) ) )
impad = sp . pad ( im , mode = 'symmetric' , pad_width = pw )
inletspad = sp . pad ( inlets , mode = 'symmetric' , pad_width = pw )
inlets = sp . where ( inletspad )
# sizes = sp . unique ( sp . around ( sizes , decimals = 0 ) . astype ( int ) ) [ - 1 : : - 1]
imresults = sp . zeros ( sp . shape ( impad ) )
for r in tqdm ( sizes ) :
imtemp = fftmorphology ( impad , strel ( r ) , mode = 'erosion' )
if access_limited :
imtemp = trim_disconnected_blobs ( imtemp , inlets )
imtemp = fftmorphology ( imtemp , strel ( r ) , mode = 'dilation' )
if sp . any ( imtemp ) :
imresults [ ( imresults == 0 ) * imtemp ] = r
imresults = extract_subsection ( imresults , shape = im . shape )
elif mode == 'dt' :
inlets = sp . where ( inlets )
imresults = sp . zeros ( sp . shape ( im ) )
for r in tqdm ( sizes ) :
imtemp = dt >= r
if access_limited :
imtemp = trim_disconnected_blobs ( imtemp , inlets )
if sp . any ( imtemp ) :
imtemp = spim . distance_transform_edt ( ~ imtemp ) < r
imresults [ ( imresults == 0 ) * imtemp ] = r
elif mode == 'hybrid' :
inlets = sp . where ( inlets )
imresults = sp . zeros ( sp . shape ( im ) )
for r in tqdm ( sizes ) :
imtemp = dt >= r
if access_limited :
imtemp = trim_disconnected_blobs ( imtemp , inlets )
if sp . any ( imtemp ) :
imtemp = fftconvolve ( imtemp , strel ( r ) , mode = 'same' ) > 0.0001
imresults [ ( imresults == 0 ) * imtemp ] = r
else :
raise Exception ( 'Unreckognized mode ' + mode )
return imresults |
def satisfies ( self , other ) :
"""Check if the capabilities of a primitive are enough to satisfy a requirement .
Should be called on a Requirement that is acting as a
capability of a primitive . This method returning true means
that the capability advertised here is enough to handle
representing the data described by the Requirement passed in
as ' other ' .
Here is a chart showing what satisfies what .
other
A C 0 1
| Y N N N N
s A | Y Y Y Y Y
e C | Y - Y Y Y
l 0 | Y * * Y N
f 1 | Y * * N Y
' ' = No Care
A = arbitrary
C = Constant
0 = ZERO
1 = ONE
Y = YES
N = NO
- = Could satisfy with multiple instances
* = Not yet determined behavior . Used for bitbanging controllers .""" | if other . isnocare :
return True
if self . isnocare :
return False
if self . arbitrary :
return True
if self . constant and not other . arbitrary :
return True
if self . value is other . value and not other . arbitrary and not other . constant :
return True
return False |
def rounded_rectangle_region ( width , height , radius ) :
"""Returns a rounded rectangle wx . Region""" | bmp = wx . Bitmap . FromRGBA ( width , height )
# Mask color is # 00000
dc = wx . MemoryDC ( bmp )
dc . Brush = wx . Brush ( ( 255 , ) * 3 )
# Any non - black would do
dc . DrawRoundedRectangle ( 0 , 0 , width , height , radius )
dc . SelectObject ( wx . NullBitmap )
bmp . SetMaskColour ( ( 0 , ) * 3 )
return wx . Region ( bmp ) |
def write_sequences_to_xlsx ( path , seqs ) :
"""Create a XLSX file listing the given sequences .
Arguments
path : str or pathlib . Path
The name of the file to create .
seqs : dict
A mapping of names to sequences , which can be either protein or DNA .""" | from openpyxl import Workbook
wb = Workbook ( )
ws = wb . active
for row , id in enumerate ( seqs , 1 ) :
ws . cell ( row , 1 ) . value = id
ws . cell ( row , 2 ) . value = seqs [ id ]
wb . save ( path ) |
def create_handlers_map ( prefix = '.*' ) :
"""Create new handlers map .
Args :
prefix : url prefix to use .
Returns :
list of ( regexp , handler ) pairs for WSGIApplication constructor .""" | return [ ( prefix + '/output' , _BarrierHandler ) , ( prefix + '/run' , _PipelineHandler ) , ( prefix + '/finalized' , _PipelineHandler ) , ( prefix + '/cleanup' , _CleanupHandler ) , ( prefix + '/abort' , _PipelineHandler ) , ( prefix + '/fanout' , _FanoutHandler ) , ( prefix + '/fanout_abort' , _FanoutAbortHandler ) , ( prefix + '/callback' , _CallbackHandler ) , ( prefix + '/rpc/tree' , status_ui . _TreeStatusHandler ) , ( prefix + '/rpc/class_paths' , status_ui . _ClassPathListHandler ) , ( prefix + '/rpc/list' , status_ui . _RootListHandler ) , ( prefix + '(/.+)' , status_ui . _StatusUiHandler ) , ] |
def number_of_attributes ( self ) :
"""int : number of attributes .""" | if not self . _is_parsed :
self . _Parse ( )
self . _is_parsed = True
return len ( self . _attributes ) |
def confidenceInterval ( n , k , alpha = 0.68 , errorbar = False ) :
"""Given n tests and k successes , return efficiency and confidence interval .""" | try :
e = float ( k ) / float ( n )
except ZeroDivisionError :
return np . nan , [ np . nan , np . nan ]
bins = 1000001
dx = 1. / bins
efficiency = np . linspace ( 0 , 1 , bins )
# MODIFIED FOR LARGE NUMBERS
if n + 2 > 1000 :
a = gammalnStirling ( n + 2 )
else :
a = scipy . special . gammaln ( n + 2 )
if k + 1 > 1000 :
b = gammalnStirling ( k + 1 )
else :
b = scipy . special . gammaln ( k + 1 )
if n - k + 1 > 1000 :
c = gammalnStirling ( n - k + 1 )
else :
c = scipy . special . gammaln ( n - k + 1 )
if k == 0 :
p = np . concatenate ( [ [ np . exp ( a - b - c ) ] , np . exp ( a - b - c + ( k * np . log ( efficiency [ 1 : - 1 ] ) ) + ( n - k ) * np . log ( 1. - efficiency [ 1 : - 1 ] ) ) , [ 0. ] ] )
elif k == n :
p = np . concatenate ( [ [ 0. ] , np . exp ( a - b - c + ( k * np . log ( efficiency [ 1 : - 1 ] ) ) + ( n - k ) * np . log ( 1. - efficiency [ 1 : - 1 ] ) ) , [ np . exp ( a - b - c ) ] ] )
else :
p = np . concatenate ( [ [ 0. ] , np . exp ( a - b - c + ( k * np . log ( efficiency [ 1 : - 1 ] ) ) + ( n - k ) * np . log ( 1. - efficiency [ 1 : - 1 ] ) ) , [ 0. ] ] )
i = np . argsort ( p ) [ : : - 1 ]
p_i = np . take ( p , i )
s = i [ np . cumsum ( p_i * dx ) < alpha ]
low = min ( np . min ( s ) * dx , e )
high = max ( np . max ( s ) * dx , e )
if not errorbar :
return e , [ low , high ]
else :
return e , [ e - low , high - e ] |
def keyring_refresh ( ** kwargs ) :
"""Refresh the keyring in the cocaine - runtime .""" | ctx = Context ( ** kwargs )
ctx . execute_action ( 'keyring:refresh' , ** { 'tvm' : ctx . repo . create_secure_service ( 'tvm' ) , } ) |
def warn_or_error ( removal_version , deprecated_entity_description , hint = None , deprecation_start_version = None , stacklevel = 3 , frame_info = None , context = 1 , ensure_stderr = False ) :
"""Check the removal _ version against the current pants version .
Issues a warning if the removal version is > current pants version , or an error otherwise .
: param string removal _ version : The pantsbuild . pants version at which the deprecated entity
will be / was removed .
: param string deprecated _ entity _ description : A short description of the deprecated entity , that
we can embed in warning / error messages .
: param string hint : A message describing how to migrate from the removed entity .
: param string deprecation _ start _ version : The pantsbuild . pants version at which the entity will
begin to display a deprecation warning . This must be less
than the ` removal _ version ` . If not provided , the
deprecation warning is always displayed .
: param int stacklevel : The stacklevel to pass to warnings . warn .
: param FrameInfo frame _ info : If provided , use this frame info instead of getting one from
` stacklevel ` .
: param int context : The number of lines of source code surrounding the selected frame to display
in a warning message .
: param bool ensure _ stderr : Whether use warnings . warn , or use warnings . showwarning to print
directly to stderr .
: raises DeprecationApplicationError : if the removal _ version parameter is invalid .
: raises CodeRemovedError : if the current version is later than the version marked for removal .""" | removal_semver = validate_deprecation_semver ( removal_version , 'removal version' )
if deprecation_start_version :
deprecation_start_semver = validate_deprecation_semver ( deprecation_start_version , 'deprecation start version' )
if deprecation_start_semver >= removal_semver :
raise InvalidSemanticVersionOrderingError ( 'The deprecation start version {} must be less than the end version {}.' . format ( deprecation_start_version , removal_version ) )
elif PANTS_SEMVER < deprecation_start_semver :
return
msg = 'DEPRECATED: {} {} removed in version {}.' . format ( deprecated_entity_description , get_deprecated_tense ( removal_version ) , removal_version )
if hint :
msg += '\n {}' . format ( hint )
# We need to have filename and line _ number for warnings . formatwarning , which appears to be the only
# way to get a warning message to display to stderr . We get that from frame _ info - - it ' s too bad
# we have to reconstruct the ` stacklevel ` logic ourselves , but we do also gain the ability to have
# multiple lines of context , which is neat .
if frame_info is None :
frame_info = _get_frame_info ( stacklevel , context = context )
_ , filename , line_number , _ , code_context , _ = frame_info
if code_context :
context_lines = '' . join ( code_context )
else :
context_lines = '<no code context available>'
if removal_semver > PANTS_SEMVER :
if ensure_stderr : # No warning filters can stop us from printing this message directly to stderr .
warning_msg = warnings . formatwarning ( msg , DeprecationWarning , filename , line_number , line = context_lines )
print ( warning_msg , file = sys . stderr )
else : # This output is filtered by warning filters .
with _greater_warnings_context ( context_lines ) :
warnings . warn_explicit ( message = DeprecationWarning ( msg ) if PY2 else msg , category = DeprecationWarning , filename = filename , lineno = line_number )
return msg
else :
raise CodeRemovedError ( msg ) |
def compile ( definition , handlers = { } ) :
"""Generates validation function for validating JSON schema passed in ` ` definition ` ` .
Example :
. . code - block : : python
import fastjsonschema
validate = fastjsonschema . compile ( { ' type ' : ' string ' } )
validate ( ' hello ' )
This implementation support keyword ` ` default ` ` :
. . code - block : : python
validate = fastjsonschema . compile ( {
' type ' : ' object ' ,
' properties ' : {
' a ' : { ' type ' : ' number ' , ' default ' : 42 } ,
data = validate ( { } )
assert data = = { ' a ' : 42}
Supported implementations are draft - 04 , draft - 06 and draft - 07 . Which version
should be used is determined by ` $ draft ` in your ` ` definition ` ` . When not
specified , the latest implementation is used ( draft - 07 ) .
. . code - block : : python
validate = fastjsonschema . compile ( {
' $ schema ' : ' http : / / json - schema . org / draft - 04 / schema ' ,
' type ' : ' number ' ,
You can pass mapping from URI to function that should be used to retrieve
remote schemes used in your ` ` definition ` ` in parameter ` ` handlers ` ` .
Exception : any : ` JsonSchemaDefinitionException ` is raised when generating the
code fails ( bad definition ) .
Exception : any : ` JsonSchemaException ` is raised from generated funtion when
validation fails ( data do not follow the definition ) .""" | resolver , code_generator = _factory ( definition , handlers )
global_state = code_generator . global_state
# Do not pass local state so it can recursively call itself .
exec ( code_generator . func_code , global_state )
return global_state [ resolver . get_scope_name ( ) ] |
def anyword_substring_search ( target_words , query_words ) :
"""return True if all query _ words match""" | matches_required = len ( query_words )
matches_found = 0
for query_word in query_words :
reply = anyword_substring_search_inner ( query_word , target_words )
if reply is not False :
matches_found += 1
else : # this is imp , otherwise will keep checking
# when the final answer is already False
return False
if ( matches_found == matches_required ) :
return True
else :
return False |
def templateDoc ( self ) :
"""JSON serializable template to will all necessary details to recreate this
stimulus in another session .
: returns : dict""" | doc = dict ( self . componentDoc ( False ) . items ( ) + self . testDoc ( ) . items ( ) )
# go through auto - parameter selected components and use location index
autoparams = copy . deepcopy ( self . _autoParams . allData ( ) )
for p in autoparams :
selection = p [ 'selection' ]
serializable_selection = [ ]
for component in selection :
idx = self . indexByComponent ( component )
serializable_selection . append ( idx )
p [ 'selection' ] = serializable_selection
doc [ 'autoparameters' ] = autoparams
doc [ 'reorder' ] = self . reorderName
return doc |
def reboot ( self , ** params ) :
"""Reboot outlet
Args :
params ( dict ) , must contain parameter " outlet " - outlet number
Example :
params = { ' outlet ' : 1}""" | outlet = params [ 'outlet' ]
# main menu
self . tn . write ( '\x1b\r\n' )
self . until_done ( )
# Device Manager
self . tn . write ( '1\r\n' )
self . until_done ( )
# Outlet Management
self . tn . write ( '2\r\n' )
self . until_done ( )
# Outlet Control
self . tn . write ( '1\r\n' )
self . until_done ( )
# Select outlet
self . tn . write ( '%d\r\n' % outlet )
self . until_done ( )
# Control
self . tn . write ( '1\r\n' )
self . until_done ( )
# off
self . tn . write ( '2\r\n' )
self . until ( 'to cancel' )
self . tn . write ( 'YES\r\n' )
self . until ( 'to continue' )
self . tn . write ( '\r\n' )
self . until_done ( )
time . sleep ( 5 )
# on
self . tn . write ( '1\r\n' )
self . until ( 'to cancel' )
self . tn . write ( 'YES\r\n' )
self . until ( 'to continue' )
self . tn . write ( '\r\n' )
self . until_done ( ) |
async def wait_for_body_middleware ( environ , start_response = None ) :
'''Use this middleware to wait for the full body .
This middleware wait for the full body to be received before letting
other middleware to be processed .
Useful when using synchronous web - frameworks such as : django : ` django < > ` .''' | if environ . get ( 'wsgi.async' ) :
try :
chunk = await environ [ 'wsgi.input' ] . read ( )
except TypeError :
chunk = b''
environ [ 'wsgi.input' ] = BytesIO ( chunk )
environ . pop ( 'wsgi.async' ) |
def convertReadAlignment ( self , read , readGroupSet , readGroupId ) :
"""Convert a pysam ReadAlignment to a GA4GH ReadAlignment""" | samFile = self . getFileHandle ( self . _dataUrl )
# TODO fill out remaining fields
# TODO refine in tandem with code in converters module
ret = protocol . ReadAlignment ( )
# ret . fragmentId = ' TODO '
ret . aligned_quality . extend ( read . query_qualities )
ret . aligned_sequence = read . query_sequence
if SamFlags . isFlagSet ( read . flag , SamFlags . READ_UNMAPPED ) :
ret . ClearField ( "alignment" )
else :
ret . alignment . CopyFrom ( protocol . LinearAlignment ( ) )
ret . alignment . mapping_quality = read . mapping_quality
ret . alignment . position . CopyFrom ( protocol . Position ( ) )
ret . alignment . position . reference_name = samFile . getrname ( read . reference_id )
ret . alignment . position . position = read . reference_start
ret . alignment . position . strand = protocol . POS_STRAND
if SamFlags . isFlagSet ( read . flag , SamFlags . READ_REVERSE_STRAND ) :
ret . alignment . position . strand = protocol . NEG_STRAND
for operation , length in read . cigar :
gaCigarUnit = ret . alignment . cigar . add ( )
gaCigarUnit . operation = SamCigar . int2ga ( operation )
gaCigarUnit . operation_length = length
gaCigarUnit . reference_sequence = ""
# TODO fix this !
ret . duplicate_fragment = SamFlags . isFlagSet ( read . flag , SamFlags . DUPLICATE_READ )
ret . failed_vendor_quality_checks = SamFlags . isFlagSet ( read . flag , SamFlags . FAILED_QUALITY_CHECK )
ret . fragment_length = read . template_length
ret . fragment_name = read . query_name
for key , value in read . tags : # Useful for inspecting the structure of read tags
# print ( " { key } { ktype } : { value } , { vtype } " . format (
# key = key , ktype = type ( key ) , value = value , vtype = type ( value ) ) )
protocol . setAttribute ( ret . attributes . attr [ key ] . values , value )
if SamFlags . isFlagSet ( read . flag , SamFlags . MATE_UNMAPPED ) :
ret . next_mate_position . Clear ( )
else :
ret . next_mate_position . Clear ( )
if read . next_reference_id != - 1 :
ret . next_mate_position . reference_name = samFile . getrname ( read . next_reference_id )
else :
ret . next_mate_position . reference_name = ""
ret . next_mate_position . position = read . next_reference_start
ret . next_mate_position . strand = protocol . POS_STRAND
if SamFlags . isFlagSet ( read . flag , SamFlags . MATE_REVERSE_STRAND ) :
ret . next_mate_position . strand = protocol . NEG_STRAND
if SamFlags . isFlagSet ( read . flag , SamFlags . READ_PAIRED ) :
ret . number_reads = 2
else :
ret . number_reads = 1
ret . read_number = - 1
if SamFlags . isFlagSet ( read . flag , SamFlags . FIRST_IN_PAIR ) :
if SamFlags . isFlagSet ( read . flag , SamFlags . SECOND_IN_PAIR ) :
ret . read_number = 2
else :
ret . read_number = 0
elif SamFlags . isFlagSet ( read . flag , SamFlags . SECOND_IN_PAIR ) :
ret . read_number = 1
ret . improper_placement = not SamFlags . isFlagSet ( read . flag , SamFlags . READ_PROPER_PAIR )
ret . read_group_id = readGroupId
ret . secondary_alignment = SamFlags . isFlagSet ( read . flag , SamFlags . SECONDARY_ALIGNMENT )
ret . supplementary_alignment = SamFlags . isFlagSet ( read . flag , SamFlags . SUPPLEMENTARY_ALIGNMENT )
ret . id = readGroupSet . getReadAlignmentId ( ret )
return ret |
def get_reply ( self , method , replyroot ) :
"""Process the I { reply } for the specified I { method } by unmarshalling it
into into Python object ( s ) .
@ param method : The name of the invoked method .
@ type method : str
@ param replyroot : The reply XML root node received after invoking the
specified method .
@ type replyroot : L { Element }
@ return : The unmarshalled reply . The returned value is an L { Object } or
a I { list } depending on whether the service returns a single object
or a collection .
@ rtype : L { Object } or I { list }""" | soapenv = replyroot . getChild ( "Envelope" , envns )
soapenv . promotePrefixes ( )
soapbody = soapenv . getChild ( "Body" , envns )
soapbody = self . multiref . process ( soapbody )
nodes = self . replycontent ( method , soapbody )
rtypes = self . returned_types ( method )
if len ( rtypes ) > 1 :
return self . replycomposite ( rtypes , nodes )
if len ( rtypes ) == 0 :
return
if rtypes [ 0 ] . multi_occurrence ( ) :
return self . replylist ( rtypes [ 0 ] , nodes )
if len ( nodes ) :
resolved = rtypes [ 0 ] . resolve ( nobuiltin = True )
return self . unmarshaller ( ) . process ( nodes [ 0 ] , resolved ) |
def _assemble_influence ( stmt ) :
"""Assemble an Influence statement into text .""" | subj_str = _assemble_agent_str ( stmt . subj . concept )
obj_str = _assemble_agent_str ( stmt . obj . concept )
# Note that n is prepended to increase to make it " an increase "
if stmt . subj . delta [ 'polarity' ] is not None :
subj_delta_str = ' decrease' if stmt . subj . delta [ 'polarity' ] == - 1 else 'n increase'
subj_str = 'a%s in %s' % ( subj_delta_str , subj_str )
if stmt . obj . delta [ 'polarity' ] is not None :
obj_delta_str = ' decrease' if stmt . obj . delta [ 'polarity' ] == - 1 else 'n increase'
obj_str = 'a%s in %s' % ( obj_delta_str , obj_str )
stmt_str = '%s causes %s' % ( subj_str , obj_str )
return _make_sentence ( stmt_str ) |
def read_object_array ( f , data , options ) :
"""Reads an array of objects recursively .
Read the elements of the given HDF5 Reference array recursively
in the and constructs a ` ` numpy . object _ ` ` array from its elements ,
which is returned .
Parameters
f : h5py . File
The HDF5 file handle that is open .
data : numpy . ndarray of h5py . Reference
The array of HDF5 References to read and make an object array
from .
options : hdf5storage . core . Options
hdf5storage options object .
Raises
NotImplementedError
If reading the object from file is currently not supported .
Returns
obj _ array : numpy . ndarray of numpy . object \ _
The Python object array containing the items pointed to by
` data ` .
See Also
write _ object _ array
hdf5storage . Options . group _ for _ references
h5py . Reference""" | # Go through all the elements of data and read them using their
# references , and the putting the output in new object array .
data_derefed = np . zeros ( shape = data . shape , dtype = 'object' )
for index , x in np . ndenumerate ( data ) :
data_derefed [ index ] = read_data ( f , None , None , options , dsetgrp = f [ x ] )
return data_derefed |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.