signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def artifactCreated ( self , * args , ** kwargs ) :
"""Artifact Creation Messages
Whenever the ` createArtifact ` end - point is called , the queue will create
a record of the artifact and post a message on this exchange . All of this
happens before the queue returns a signed URL for the caller to upload
the actual artifact with ( pending on ` storageType ` ) .
This means that the actual artifact is rarely available when this message
is posted . But it is not unreasonable to assume that the artifact will
will become available at some point later . Most signatures will expire in
30 minutes or so , forcing the uploader to call ` createArtifact ` with
the same payload again in - order to continue uploading the artifact .
However , in most cases ( especially for small artifacts ) it ' s very
reasonable assume the artifact will be available within a few minutes .
This property means that this exchange is mostly useful for tools
monitoring task evaluation . One could also use it count number of
artifacts per task , or _ index _ artifacts though in most cases it ' ll be
smarter to index artifacts after the task in question have completed
successfully .
This exchange outputs : ` ` v1 / artifact - created - message . json # ` ` This exchange takes the following keys :
* routingKeyKind : Identifier for the routing - key kind . This is always ` ' primary ' ` for the formalized routing key . ( required )
* taskId : ` taskId ` for the task this message concerns ( required )
* runId : ` runId ` of latest run for the task , ` _ ` if no run is exists for the task . ( required )
* workerGroup : ` workerGroup ` of latest run for the task , ` _ ` if no run is exists for the task . ( required )
* workerId : ` workerId ` of latest run for the task , ` _ ` if no run is exists for the task . ( required )
* provisionerId : ` provisionerId ` this task is targeted at . ( required )
* workerType : ` workerType ` this task must run on . ( required )
* schedulerId : ` schedulerId ` this task was created by . ( required )
* taskGroupId : ` taskGroupId ` this task was created in . ( required )
* reserved : Space reserved for future routing - key entries , you should always match this entry with ` # ` . As automatically done by our tooling , if not specified .""" | ref = { 'exchange' : 'artifact-created' , 'name' : 'artifactCreated' , 'routingKey' : [ { 'constant' : 'primary' , 'multipleWords' : False , 'name' : 'routingKeyKind' , } , { 'multipleWords' : False , 'name' : 'taskId' , } , { 'multipleWords' : False , 'name' : 'runId' , } , { 'multipleWords' : False , 'name' : 'workerGroup' , } , { 'multipleWords' : False , 'name' : 'workerId' , } , { 'multipleWords' : False , 'name' : 'provisionerId' , } , { 'multipleWords' : False , 'name' : 'workerType' , } , { 'multipleWords' : False , 'name' : 'schedulerId' , } , { 'multipleWords' : False , 'name' : 'taskGroupId' , } , { 'multipleWords' : True , 'name' : 'reserved' , } , ] , 'schema' : 'v1/artifact-created-message.json#' , }
return self . _makeTopicExchange ( ref , * args , ** kwargs ) |
def encodeEntitiesReentrant ( self , input ) :
"""Do a global encoding of a string , replacing the predefined
entities and non ASCII values with their entities and
CharRef counterparts . Contrary to xmlEncodeEntities , this
routine is reentrant , and result must be deallocated .""" | ret = libxml2mod . xmlEncodeEntitiesReentrant ( self . _o , input )
return ret |
def _load_mirteFile ( d , m ) :
"""Loads the dictionary from the mirteFile into < m >""" | defs = d [ 'definitions' ] if 'definitions' in d else { }
insts = d [ 'instances' ] if 'instances' in d else { }
# Filter out existing instances
insts_to_skip = [ ]
for k in insts :
if k in m . insts :
m . update_instance ( k , dict ( insts [ k ] ) )
insts_to_skip . append ( k )
for k in insts_to_skip :
del ( insts [ k ] )
# Sort module definitions by dependency
it = sort_by_successors ( six . viewkeys ( defs ) , dual_cover ( six . viewkeys ( defs ) , restricted_cover ( six . viewkeys ( defs ) , depsOf_of_mirteFile_module_definition ( defs ) ) ) )
# Add module definitions
for k in it :
m . add_module_definition ( k , module_definition_from_mirteFile_dict ( m , defs [ k ] ) )
# Sort instance declarations by dependency
it = sort_by_successors ( six . viewkeys ( insts ) , dual_cover ( six . viewkeys ( insts ) , restricted_cover ( six . viewkeys ( insts ) , depsOf_of_mirteFile_instance_definition ( m , insts ) ) ) )
# Create instances
for k in it :
settings = dict ( insts [ k ] )
del ( settings [ 'module' ] )
m . create_instance ( k , insts [ k ] [ 'module' ] , settings ) |
def vcf_to_npz ( input , output , compressed = True , overwrite = False , fields = None , exclude_fields = None , rename_fields = None , types = None , numbers = None , alt_number = DEFAULT_ALT_NUMBER , fills = None , region = None , tabix = True , samples = None , transformers = None , buffer_size = DEFAULT_BUFFER_SIZE , chunk_length = DEFAULT_CHUNK_LENGTH , log = None ) :
"""Read data from a VCF file into NumPy arrays and save as a . npz file .
. . versionchanged : : 1.12.0
Now will not create any output file if no variants are found in the VCF file or
matching the requested region .
Parameters
input : string
{ input }
output : string
{ output }
compressed : bool , optional
If True ( default ) , save with compression .
overwrite : bool , optional
{ overwrite }
fields : list of strings , optional
{ fields }
exclude _ fields : list of strings , optional
{ exclude _ fields }
rename _ fields : dict [ str - > str ] , optional
{ rename _ fields }
types : dict , optional
{ types }
numbers : dict , optional
{ numbers }
alt _ number : int , optional
{ alt _ number }
fills : dict , optional
{ fills }
region : string , optional
{ region }
tabix : string , optional
{ tabix }
samples : list of strings
{ samples }
transformers : list of transformer objects , optional
{ transformers }
buffer _ size : int , optional
{ buffer _ size }
chunk _ length : int , optional
{ chunk _ length }
log : file - like , optional
{ log }""" | # guard condition
if not overwrite and os . path . exists ( output ) :
raise ValueError ( 'file exists at path %r; use overwrite=True to replace' % output )
# read all data into memory
data = read_vcf ( input = input , fields = fields , exclude_fields = exclude_fields , rename_fields = rename_fields , types = types , numbers = numbers , alt_number = alt_number , buffer_size = buffer_size , chunk_length = chunk_length , log = log , fills = fills , region = region , tabix = tabix , samples = samples , transformers = transformers )
if data is None : # no data , bail out
return
# setup save function
if compressed :
savez = np . savez_compressed
else :
savez = np . savez
# save as npz
savez ( output , ** data ) |
def build ( self , sources = None , headers = None ) :
'''sources can be file name or code :
sources = [ ' x . c ' , ' int main ( ) { } ' ]
or
sources = ' int main ( ) { } ' ''' | tempdir = None
strings , files = separate_sources ( sources )
if len ( strings ) or headers : # TODO : remove tempdir
tempdir = tmpdir ( )
temp_list = [ tmpfile ( x , tempdir , '.c' ) for x in strings ]
if headers :
for n , s in headers . items ( ) :
( Path ( tempdir ) / n ) . write_text ( s )
cmd = self . command_list ( files + temp_list )
if tempdir :
cmd += [ '-I' + tempdir ]
self . proc = Proc ( cmd ) . call ( )
# for x in temp _ list :
# os . remove ( x )
if not self . ok :
raise AvrGccCompileError ( cmd , sources , self . error_text ) |
def value ( self ) :
"""Returns a copy of the original map ' s value . Nested values are
pure Python values as returned by : attr : ` Datatype . value ` from
the nested types .
: rtype : dict""" | pvalue = { }
for key in self . _value :
pvalue [ key ] = self . _value [ key ] . value
return pvalue |
def open ( self ) :
"""Implementation of Reporter callback .""" | safe_mkdir ( os . path . dirname ( self . _html_dir ) )
self . _report_file = open ( self . report_path ( ) , 'w' ) |
def _find_files ( metadata ) :
'''Looks for all the files in the S3 bucket cache metadata''' | ret = [ ]
found = { }
for bucket_dict in metadata :
for bucket_name , data in six . iteritems ( bucket_dict ) :
filepaths = [ k [ 'Key' ] for k in data ]
filepaths = [ k for k in filepaths if not k . endswith ( '/' ) ]
if bucket_name not in found :
found [ bucket_name ] = True
ret . append ( { bucket_name : filepaths } )
else :
for bucket in ret :
if bucket_name in bucket :
bucket [ bucket_name ] += filepaths
break
return ret |
def step ( self ) -> Number :
"Return next value along annealed schedule ." | self . n += 1
return self . func ( self . start , self . end , self . n / self . n_iter ) |
def destroy ( self ) :
"""Destroy all vagrant box involved in the deployment .""" | v = vagrant . Vagrant ( root = os . getcwd ( ) , quiet_stdout = False , quiet_stderr = True )
v . destroy ( ) |
def get_all_hits ( self ) :
'''Get all HITs''' | if not self . connect_to_turk ( ) :
return False
try :
hits = [ ]
paginator = self . mtc . get_paginator ( 'list_hits' )
for page in paginator . paginate ( ) :
hits . extend ( page [ 'HITs' ] )
except Exception as e :
print e
return False
hits_data = self . _hit_xml_to_object ( hits )
return hits_data |
def _find_matching_expectation ( self , args , kwargs ) :
"""Return a matching expectation .
Returns the first expectation that matches the ones declared . Tries one with specific
arguments first , then falls back to an expectation that allows arbitrary arguments .
: return : The matching ` ` Expectation ` ` , if one was found .
: rtype : Expectation , None""" | for expectation in self . _expectations :
if expectation . satisfy_exact_match ( args , kwargs ) :
return expectation
for expectation in self . _expectations :
if expectation . satisfy_custom_matcher ( args , kwargs ) :
return expectation
for expectation in self . _expectations :
if expectation . satisfy_any_args_match ( ) :
return expectation |
def _list_object_parts ( self , bucket_name , object_name , upload_id ) :
"""List all parts .
: param bucket _ name : Bucket name to list parts for .
: param object _ name : Object name to list parts for .
: param upload _ id : Upload id of the previously uploaded object name .""" | is_valid_bucket_name ( bucket_name )
is_non_empty_string ( object_name )
is_non_empty_string ( upload_id )
query = { 'uploadId' : upload_id , 'max-parts' : '1000' }
is_truncated = True
part_number_marker = ''
while is_truncated :
if part_number_marker :
query [ 'part-number-marker' ] = str ( part_number_marker )
response = self . _url_open ( 'GET' , bucket_name = bucket_name , object_name = object_name , query = query )
parts , is_truncated , part_number_marker = parse_list_parts ( response . data , bucket_name = bucket_name , object_name = object_name , upload_id = upload_id )
for part in parts :
yield part |
def set_window_position ( self , x , y , windowHandle = 'current' ) :
"""Sets the x , y position of the current window . ( window . moveTo )
: Args :
- x : the x - coordinate in pixels to set the window position
- y : the y - coordinate in pixels to set the window position
: Usage :
driver . set _ window _ position ( 0,0)""" | if self . w3c :
if windowHandle != 'current' :
warnings . warn ( "Only 'current' window is supported for W3C compatibile browsers." )
return self . set_window_rect ( x = int ( x ) , y = int ( y ) )
else :
self . execute ( Command . SET_WINDOW_POSITION , { 'x' : int ( x ) , 'y' : int ( y ) , 'windowHandle' : windowHandle } ) |
def getWifiState ( self ) :
'''Gets the Wi - Fi enabled state .
@ return : One of WIFI _ STATE _ DISABLED , WIFI _ STATE _ DISABLING , WIFI _ STATE _ ENABLED , WIFI _ STATE _ ENABLING , WIFI _ STATE _ UNKNOWN''' | result = self . device . shell ( 'dumpsys wifi' )
if result :
state = result . splitlines ( ) [ 0 ]
if self . WIFI_IS_ENABLED_RE . match ( state ) :
return self . WIFI_STATE_ENABLED
elif self . WIFI_IS_DISABLED_RE . match ( state ) :
return self . WIFI_STATE_DISABLED
print >> sys . stderr , "UNKNOWN WIFI STATE:" , state
return self . WIFI_STATE_UNKNOWN |
def get_job ( db , job_id , username = None ) :
"""If job _ id is negative , return the last calculation of the current
user , otherwise returns the job _ id unchanged .
: param db : a : class : ` openquake . server . dbapi . Db ` instance
: param job _ id : a job ID ( can be negative and can be nonexisting )
: param username : an user name ( if None , ignore it )
: returns : a valid job or None if the original job ID was invalid""" | job_id = int ( job_id )
if job_id > 0 :
dic = dict ( id = job_id )
if username :
dic [ 'user_name' ] = username
try :
return db ( 'SELECT * FROM job WHERE ?A' , dic , one = True )
except NotFound :
return
# else negative job _ id
if username :
joblist = db ( 'SELECT * FROM job WHERE user_name=?x ' 'ORDER BY id DESC LIMIT ?x' , username , - job_id )
else :
joblist = db ( 'SELECT * FROM job ORDER BY id DESC LIMIT ?x' , - job_id )
if not joblist : # no jobs
return
else :
return joblist [ - 1 ] |
def avail_images ( call = None ) :
'''Return available Linode images .
CLI Example :
. . code - block : : bash
salt - cloud - - list - images my - linode - config
salt - cloud - f avail _ images my - linode - config''' | if call == 'action' :
raise SaltCloudException ( 'The avail_images function must be called with -f or --function.' )
response = _query ( 'avail' , 'distributions' )
ret = { }
for item in response [ 'DATA' ] :
name = item [ 'LABEL' ]
ret [ name ] = item
return ret |
def get_residual_norms ( H , self_adjoint = False ) :
'''Compute relative residual norms from Hessenberg matrix .
It is assumed that the initial guess is chosen as zero .''' | H = H . copy ( )
n_ , n = H . shape
y = numpy . eye ( n_ , 1 , dtype = H . dtype )
resnorms = [ 1. ]
for i in range ( n_ - 1 ) :
G = Givens ( H [ i : i + 2 , [ i ] ] )
if self_adjoint :
H [ i : i + 2 , i : i + 3 ] = G . apply ( H [ i : i + 2 , i : i + 3 ] )
else :
H [ i : i + 2 , i : ] = G . apply ( H [ i : i + 2 , i : ] )
y [ i : i + 2 ] = G . apply ( y [ i : i + 2 ] )
resnorms . append ( numpy . abs ( y [ i + 1 , 0 ] ) )
if n_ == n :
resnorms . append ( 0. )
return numpy . array ( resnorms ) |
def from_ranges ( ranges , name , data_key , start_key = 'offset' , length_key = 'length' ) :
"""Creates a list of commands from a list of ranges . Each range
is converted to two commands : a start _ * and a stop _ * .""" | commands = [ ]
for r in ranges :
data = r [ data_key ]
start = r [ start_key ]
stop = start + r [ length_key ]
commands . extend ( Command . start_stop ( name , start , stop , data ) )
return commands |
def _warning ( self , msg , node_id , ex , * args , ** kwargs ) :
"""Handles the error messages .
. . note : : If ` self . raises ` is True the dispatcher interrupt the dispatch
when an error occur , otherwise it logs a warning .""" | raises = self . raises ( ex ) if callable ( self . raises ) else self . raises
if raises and isinstance ( ex , DispatcherError ) :
ex . update ( self )
raise ex
self . _errors [ node_id ] = msg % ( ( node_id , ex ) + args )
node_id = '/' . join ( self . full_name + ( node_id , ) )
if raises :
raise DispatcherError ( msg , node_id , ex , * args , sol = self , ** kwargs )
else :
kwargs [ 'exc_info' ] = kwargs . get ( 'exc_info' , 1 )
log . error ( msg , node_id , ex , * args , ** kwargs ) |
def _cursor_down ( self , value ) :
"""Moves the cursor down by ` ` value ` ` .""" | self . _cursor . clearSelection ( )
if self . _cursor . atEnd ( ) :
self . _cursor . insertText ( '\n' )
else :
self . _cursor . movePosition ( self . _cursor . Down , self . _cursor . MoveAnchor , value )
self . _last_cursor_pos = self . _cursor . position ( ) |
def unmarkelect_comment ( self , msg_data_id , index , user_comment_id ) :
"""将评论取消精选""" | return self . _post ( 'comment/unmarkelect' , data = { 'msg_data_id' : msg_data_id , 'index' : index , 'user_comment_id' : user_comment_id , } ) |
def surface_trim_tessellate ( v1 , v2 , v3 , v4 , vidx , tidx , trims , tessellate_args ) :
"""Triangular tessellation algorithm for trimmed surfaces .
This function can be directly used as an input to : func : ` . make _ triangle _ mesh ` using ` ` tessellate _ func ` ` keyword
argument .
: param v1 : vertex 1
: type v1 : Vertex
: param v2 : vertex 2
: type v2 : Vertex
: param v3 : vertex 3
: type v3 : Vertex
: param v4 : vertex 4
: type v4 : Vertex
: param vidx : vertex numbering start value
: type vidx : int
: param tidx : triangle numbering start value
: type tidx : int
: param trims : trim curves
: type trims : list , tuple
: param tessellate _ args : tessellation arguments
: type tessellate _ args : dict
: return : lists of vertex and triangle objects in ( vertex _ list , triangle _ list ) format
: type : tuple""" | # Tolerance value
tol = 10e-8
tols = tol ** 2
vtol = ( ( tols , tols ) , ( - tols , tols ) , ( - tols , - tols ) , ( tols , - tols ) )
# Start processing vertices
vertices = [ v1 , v2 , v3 , v4 ]
for idx in range ( len ( vertices ) ) :
for trim in trims :
cf = 1 if trim . opt [ 'reversed' ] else - 1
uv = [ p + ( cf * t ) for p , t in zip ( vertices [ idx ] . uv , vtol [ idx ] ) ]
if linalg . wn_poly ( uv , trim . evalpts ) :
if trim . opt [ 'reversed' ] :
if vertices [ idx ] . opt_get ( 'trim' ) is None or not vertices [ idx ] . opt_get ( 'trim' ) :
vertices [ idx ] . inside = False
vertices [ idx ] . opt = [ 'no_trim' , True ]
# always triangulate
else :
vertices [ idx ] . inside = True
vertices [ idx ] . opt = [ 'trim' , True ]
# always trim
else :
if trim . opt [ 'reversed' ] :
if vertices [ idx ] . opt_get ( 'no_trim' ) is None or not vertices [ idx ] . opt_get ( 'no_trim' ) :
vertices [ idx ] . inside = True
# If all vertices are marked as inside , then don ' t generate triangles
vertices_inside = [ v1 . inside , v2 . inside , v3 . inside , v4 . inside ]
if all ( vertices_inside ) :
return [ ] , [ ]
# Generate edges as rays
edge1 = ray . Ray ( v1 . uv , v2 . uv )
edge2 = ray . Ray ( v2 . uv , v3 . uv )
edge3 = ray . Ray ( v3 . uv , v4 . uv )
edge4 = ray . Ray ( v4 . uv , v1 . uv )
# Put all edge rays to a list
edges = [ edge1 , edge2 , edge3 , edge4 ]
# List of intersections
intersections = [ ]
# Loop all trim curves
for trim in trims :
pts = trim . evalpts
for idx in range ( len ( pts ) - 1 ) : # Generate a ray from trim curve ' s evaluated points
trim_ray = ray . Ray ( pts [ idx ] , pts [ idx + 1 ] )
# Intersection test of the trim curve ' s ray with all edges
for idx2 in range ( len ( edges ) ) :
t1 , t2 , status = ray . intersect ( edges [ idx2 ] , trim_ray )
if status == ray . RayIntersection . INTERSECT :
if 0.0 - tol < t1 < 1.0 + tol and 0.0 - tol < t2 < 1.0 + tol :
intersections . append ( [ idx2 , t1 , edges [ idx2 ] . eval ( t = t1 ) ] )
# Add first vertex to the end of the list
vertices . append ( v1 )
# Local vertex numbering index
nvi = 0
# Process vertices and intersections
tris_vertices = [ ]
for idx in range ( 0 , len ( vertices ) - 1 ) : # If two consecutively - ordered vertices are inside the trim , there should be no intersection
if vertices [ idx ] . inside and vertices [ idx + 1 ] . inside :
continue
# If current vertex is not inside the trim , add it to the vertex list
if not vertices [ idx ] . inside :
tris_vertices . append ( vertices [ idx ] )
# If next vertex is inside the trim , there might be an intersection
if ( not vertices [ idx ] . inside and vertices [ idx + 1 ] . inside ) or ( vertices [ idx ] . inside and not vertices [ idx + 1 ] . inside ) : # Try to find all intersections ( multiple intersections are possible )
isects = [ ]
for isect in intersections :
if isect [ 0 ] == idx :
isects . append ( isect )
if isects : # Find minimum t value and therefore minimum uv value of the intersection
t_min = 1.0 + tol
uv_min = [ ]
for isect in isects :
if isect [ 1 ] < t_min :
t_min = isect [ 1 ]
uv_min = isect [ 2 ]
# Check uv for min max
for pi in range ( 2 ) :
if uv_min [ pi ] - tol <= 0.0 <= uv_min [ pi ] + tol :
uv_min [ pi ] = 0.0
elif uv_min [ pi ] - tol <= 1.0 <= uv_min [ pi ] + tol :
uv_min [ pi ] = 1.0
# Create a vertex with the minimum uv value
vert = Vertex ( )
vert . id = vidx + nvi
vert . uv = uv_min
# Add to lists
tris_vertices . append ( vert )
# Increment local vertex numbering index
nvi += 1
# Triangulate vertices
tris = polygon_triangulate ( tidx , * tris_vertices )
# Check again if the barycentric coordinates of the triangles are inside
for idx in range ( len ( tris ) ) :
tri_center = linalg . triangle_center ( tris [ idx ] , uv = True )
for trim in trims :
if linalg . wn_poly ( tri_center , trim . evalpts ) :
if trim . opt [ 'reversed' ] :
if tris [ idx ] . opt_get ( 'trim' ) is None or not tris [ idx ] . opt_get ( 'trim' ) :
tris [ idx ] . inside = False
tris [ idx ] . opt = [ 'no_trim' , True ]
# always triangulate
else :
tris [ idx ] . inside = True
tris [ idx ] . opt = [ 'trim' , True ]
# always trim
else :
if trim . opt [ 'reversed' ] :
if tris [ idx ] . opt_get ( 'no_trim' ) is None or not tris [ idx ] . opt_get ( 'no_trim' ) :
tris [ idx ] . inside = True
# Extract triangles which are not inside the trim
tris_final = [ ]
for tri in tris :
if not tri . inside :
tris_final . append ( tri )
return tris_vertices , tris_final |
def process_request ( self , request_info = None , ** kwargs ) :
"""The AuthorizationRequest endpoint
: param request _ info : The authorization request as a dictionary
: return : dictionary""" | if isinstance ( request_info , AuthorizationErrorResponse ) :
return request_info
_cid = request_info [ "client_id" ]
cinfo = self . endpoint_context . cdb [ _cid ]
try :
cookie = kwargs [ 'cookie' ]
except KeyError :
cookie = ''
else :
del kwargs [ 'cookie' ]
if proposed_user ( request_info ) :
kwargs [ 'req_user' ] = proposed_user ( request_info )
else :
try :
_login_hint = request_info [ 'login_hint' ]
except KeyError :
pass
else :
if self . endpoint_context . login_hint_lookup :
kwargs [ 'req_user' ] = self . endpoint_context . login_hint_lookup [ _login_hint ]
info = self . setup_auth ( request_info , request_info [ "redirect_uri" ] , cinfo , cookie , ** kwargs )
if 'error' in info :
return info
try :
_function = info [ 'function' ]
except KeyError : # already authenticated
logger . debug ( "- authenticated -" )
logger . debug ( "AREQ keys: %s" % request_info . keys ( ) )
res = self . authz_part2 ( info [ 'user' ] , info [ 'authn_event' ] , request_info , cookie = cookie )
return res
else :
try : # Run the authentication function
return { 'http_response' : _function ( ** info [ 'args' ] ) , 'return_uri' : request_info [ "redirect_uri" ] }
except Exception as err :
logger . exception ( err )
return { 'http_response' : 'Internal error: {}' . format ( err ) } |
def tag_lookup ( request ) :
"""JSON endpoint that returns a list of potential tags .
Used for upload template autocomplete .""" | tag = request . GET [ 'tag' ]
tagSlug = slugify ( tag . strip ( ) )
tagCandidates = Tag . objects . values ( 'word' ) . filter ( slug__startswith = tagSlug )
tags = json . dumps ( [ candidate [ 'word' ] for candidate in tagCandidates ] )
return HttpResponse ( tags , content_type = 'application/json' ) |
def ApplicationDatagram ( self , app , stream , text ) :
"""Called when a datagram is received over a stream .""" | # we should only proceed if we are in UDP mode
if stype != socket . SOCK_DGRAM :
return
# decode the data
data = base64 . decodestring ( text )
# open an UDP socket
sock = socket . socket ( type = stype )
# send the data
try :
sock . sendto ( data , addr )
except socket . error , e :
print 'error: %s' % e |
def assertSignalNotFired ( self , signal , * args , ** kwargs ) :
"""Assert that a signal was fired with appropriate arguments .
: param signal :
The : class : ` Signal ` that should not have been fired .
Typically this is ` ` SomeClass . on _ some _ signal ` ` reference
: param args :
List of positional arguments passed to the signal handler
: param kwargs :
List of keyword arguments passed to the signal handler""" | event = ( signal , args , kwargs )
self . assertNotIn ( event , self . _events_seen , "\nSignal unexpectedly fired: {}\n" . format ( event ) ) |
def set_media_params_after ( self , params ) :
"""If we ' re not doing a full run , limit to media uploaded to wordpress ' recently ' .
' Recently ' in this case means 90 days before the date we ' re processing content from .
The wp . com REST API doesn ' t have a way to limit based on media modification date ,
but this should be relatively close .
: param params : the GET params dict , which may be updated to include the " after " key
: return : None ( side effect : possibly modified params dict )""" | if not self . full :
if self . modified_after :
ninety_days_ago = self . modified_after - timedelta ( days = 90 )
else :
ninety_days_ago = datetime . utcnow ( ) - timedelta ( days = 90 )
params [ "after" ] = ninety_days_ago . isoformat ( ) |
def force_lazy_import ( name ) :
"""Import any modules off of " name " by iterating a new list rather than a generator so that this
library works with lazy imports .""" | obj = import_object ( name )
module_items = list ( getattr ( obj , '__dict__' , { } ) . items ( ) )
for key , value in module_items :
if getattr ( value , '__module__' , None ) :
import_object ( name + '.' + key ) |
def calcAFunc ( self , MaggNow , AaggNow ) :
'''Calculate a new aggregate savings rule based on the history of the
aggregate savings and aggregate market resources from a simulation .
Calculates an aggregate saving rule for each macroeconomic Markov state .
Parameters
MaggNow : [ float ]
List of the history of the simulated aggregate market resources for an economy .
AaggNow : [ float ]
List of the history of the simulated aggregate savings for an economy .
Returns
( unnamed ) : CapDynamicRule
Object containing new saving rules for each Markov state .''' | verbose = self . verbose
discard_periods = self . T_discard
# Throw out the first T periods to allow the simulation to approach the SS
update_weight = 1. - self . DampingFac
# Proportional weight to put on new function vs old function parameters
total_periods = len ( MaggNow )
# Trim the histories of M _ t and A _ t and convert them to logs
logAagg = np . log ( AaggNow [ discard_periods : total_periods ] )
logMagg = np . log ( MaggNow [ discard_periods - 1 : total_periods - 1 ] )
MrkvHist = self . MrkvNow_hist [ discard_periods - 1 : total_periods - 1 ]
# For each Markov state , regress A _ t on M _ t and update the saving rule
AFunc_list = [ ]
rSq_list = [ ]
for i in range ( self . MrkvArray . shape [ 0 ] ) :
these = i == MrkvHist
slope , intercept , r_value , p_value , std_err = stats . linregress ( logMagg [ these ] , logAagg [ these ] )
# if verbose :
# plt . plot ( logMagg [ these ] , logAagg [ these ] , ' . ' )
# Make a new aggregate savings rule by combining the new regression parameters
# with the previous guess
intercept = update_weight * intercept + ( 1.0 - update_weight ) * self . intercept_prev [ i ]
slope = update_weight * slope + ( 1.0 - update_weight ) * self . slope_prev [ i ]
AFunc_list . append ( AggregateSavingRule ( intercept , slope ) )
# Make a new next - period capital function
rSq_list . append ( r_value ** 2 )
# Save the new values as " previous " values for the next iteration
self . intercept_prev [ i ] = intercept
self . slope_prev [ i ] = slope
# Plot aggregate resources vs aggregate savings for this run and print the new parameters
if verbose :
print ( 'intercept=' + str ( self . intercept_prev ) + ', slope=' + str ( self . slope_prev ) + ', r-sq=' + str ( rSq_list ) )
# plt . show ( )
return AggShocksDynamicRule ( AFunc_list ) |
def p_struct ( self , p ) :
'''struct : STRUCT IDENTIFIER ' { ' field _ seq ' } ' annotations''' | p [ 0 ] = ast . Struct ( name = p [ 2 ] , fields = p [ 4 ] , annotations = p [ 6 ] , lineno = p . lineno ( 2 ) ) |
def read_config_info ( ini_file ) :
"""Read the INI file
Args :
ini _ file - path to the file
Returns :
A dictionary of stuff from the INI file
Exits :
1 - if problems are encountered""" | try :
config = RawConfigParser ( )
config . optionxform = lambda option : option
config . read ( ini_file )
the_stuff = { }
for section in config . sections ( ) :
the_stuff [ section ] = { }
for option in config . options ( section ) :
the_stuff [ section ] [ option ] = config . get ( section , option )
return the_stuff
except Exception as wtf :
logging . error ( 'Exception caught in read_config_info(): {}' . format ( wtf ) )
traceback . print_exc ( file = sys . stdout )
return sys . exit ( 1 ) |
def to_html ( self ) :
"""Render a Cell MessageElement as html
: returns : The html representation of the Cell MessageElement
: rtype : basestring""" | # Apply bootstrap alignment classes first
if self . align is 'left' :
if self . style_class is None :
self . style_class = 'text-left'
else :
self . style_class += ' text-left'
elif self . align is 'right' :
if self . style_class is None :
self . style_class = 'text-right'
else :
self . style_class += ' text-right'
elif self . align is 'center' :
if self . style_class is None :
self . style_class = 'text-center'
else :
self . style_class += ' text-center'
# Special case for when we want to put a nested table in a cell
# We don ' t use isinstance because of recursive imports with table
class_name = self . content . __class__ . __name__
if class_name in [ 'BulletedList' , 'Table' , 'Image' , 'Message' ] :
html = self . content . to_html ( )
else :
html = self . content . to_html ( wrap_slash = self . wrap_slash )
# Check if we have a header or not then render
if self . header_flag is True :
return '<th%s colspan=%i>%s</th>\n' % ( self . html_attributes ( ) , self . span , html )
else :
return '<td%s colspan=%i>%s</td>\n' % ( self . html_attributes ( ) , self . span , html ) |
def skip_signatures_and_duplicates_concat_well_known_metadata ( cls , default_dup_action = None , additional_rules = None ) :
"""Produces a rule set useful in many deploy jar creation contexts .
The rule set skips duplicate entries by default , retaining the 1st encountered . In addition it
has the following special handling :
- jar signature metadata is dropped
- jar indexing files INDEX . LIST are dropped
- ` ` java . util . ServiceLoader ` ` provider - configuration files are concatenated in the order
encountered
: param default _ dup _ action : An optional default action to take for duplicates . Defaults to
` Duplicate . SKIP ` if not specified .
: param additional _ rules : Optionally one or more jar rules to add to those described above .
: returns : JarRules""" | default_dup_action = Duplicate . validate_action ( default_dup_action or Duplicate . SKIP )
additional_rules = assert_list ( additional_rules , expected_type = ( Duplicate , Skip ) )
rules = [ Skip ( r'^META-INF/[^/]+\.SF$' ) , # signature file
Skip ( r'^META-INF/[^/]+\.DSA$' ) , # default signature alg . file
Skip ( r'^META-INF/[^/]+\.RSA$' ) , # default signature alg . file
Skip ( r'^META-INF/INDEX.LIST$' ) , # interferes with Class - Path : see man jar for i option
Duplicate ( r'^META-INF/services/' , Duplicate . CONCAT_TEXT ) ]
# 1 svc fqcn per line
return JarRules ( rules = rules + additional_rules , default_dup_action = default_dup_action ) |
def corr ( x , y = None , method = None ) :
"""Compute the correlation ( matrix ) for the input RDD ( s ) using the
specified method .
Methods currently supported : I { pearson ( default ) , spearman } .
If a single RDD of Vectors is passed in , a correlation matrix
comparing the columns in the input RDD is returned . Use C { method = }
to specify the method to be used for single RDD inout .
If two RDDs of floats are passed in , a single float is returned .
: param x : an RDD of vector for which the correlation matrix is to be computed ,
or an RDD of float of the same cardinality as y when y is specified .
: param y : an RDD of float of the same cardinality as x .
: param method : String specifying the method to use for computing correlation .
Supported : ` pearson ` ( default ) , ` spearman `
: return : Correlation matrix comparing columns in x .
> > > x = sc . parallelize ( [ 1.0 , 0.0 , - 2.0 ] , 2)
> > > y = sc . parallelize ( [ 4.0 , 5.0 , 3.0 ] , 2)
> > > zeros = sc . parallelize ( [ 0.0 , 0.0 , 0.0 ] , 2)
> > > abs ( Statistics . corr ( x , y ) - 0.6546537 ) < 1e - 7
True
> > > Statistics . corr ( x , y ) = = Statistics . corr ( x , y , " pearson " )
True
> > > Statistics . corr ( x , y , " spearman " )
0.5
> > > from math import isnan
> > > isnan ( Statistics . corr ( x , zeros ) )
True
> > > from pyspark . mllib . linalg import Vectors
> > > rdd = sc . parallelize ( [ Vectors . dense ( [ 1 , 0 , 0 , - 2 ] ) , Vectors . dense ( [ 4 , 5 , 0 , 3 ] ) ,
. . . Vectors . dense ( [ 6 , 7 , 0 , 8 ] ) , Vectors . dense ( [ 9 , 0 , 0 , 1 ] ) ] )
> > > pearsonCorr = Statistics . corr ( rdd )
> > > print ( str ( pearsonCorr ) . replace ( ' nan ' , ' NaN ' ) )
[ [ 1 . 0.05564149 NaN 0.40047142]
[ 0.05564149 1 . NaN 0.91359586]
[ NaN NaN 1 . NaN ]
[ 0.40047142 0.91359586 NaN 1 . ] ]
> > > spearmanCorr = Statistics . corr ( rdd , method = " spearman " )
> > > print ( str ( spearmanCorr ) . replace ( ' nan ' , ' NaN ' ) )
[ [ 1 . 0.10540926 NaN 0.4 ]
[ 0.10540926 1 . NaN 0.9486833 ]
[ NaN NaN 1 . NaN ]
[ 0.4 0.9486833 NaN 1 . ] ]
> > > try :
. . . Statistics . corr ( rdd , " spearman " )
. . . print ( " Method name as second argument without ' method = ' shouldn ' t be allowed . " )
. . . except TypeError :
. . . pass""" | # Check inputs to determine whether a single value or a matrix is needed for output .
# Since it ' s legal for users to use the method name as the second argument , we need to
# check if y is used to specify the method name instead .
if type ( y ) == str :
raise TypeError ( "Use 'method=' to specify method name." )
if not y :
return callMLlibFunc ( "corr" , x . map ( _convert_to_vector ) , method ) . toArray ( )
else :
return callMLlibFunc ( "corr" , x . map ( float ) , y . map ( float ) , method ) |
def convert_complex_output ( out_in ) :
"""Convert complex values in the output dictionary ` out _ in ` to pairs of
real and imaginary parts .""" | out = { }
for key , val in out_in . iteritems ( ) :
if val . data . dtype in complex_types :
rval = copy ( val )
rval . data = val . data . real
out [ 'real(%s)' % key ] = rval
ival = copy ( val )
ival . data = val . data . imag
out [ 'imag(%s)' % key ] = ival
else :
out [ key ] = val
return out |
def dispatch ( self , * args , ** kwargs ) :
"""Decorate the view dispatcher with csrf _ exempt .""" | return super ( EntryTrackback , self ) . dispatch ( * args , ** kwargs ) |
def infer_type ( expr , scope ) :
"""Try to infer the type of x [ y ] if y is a known value ( literal ) .""" | # Do we know what the key even is ?
if isinstance ( expr . key , ast . Literal ) :
key = expr . key . value
else :
return protocol . AnyType
container_type = infer_type ( expr . value , scope )
try : # Associative types are not subject to scoping rules so we can just
# reflect using IAssociative .
return associative . reflect ( container_type , key ) or protocol . AnyType
except NotImplementedError :
return protocol . AnyType |
def _get_all_children ( self , ) :
"""return the list of children of a node""" | res = ''
if self . child_nodes :
for c in self . child_nodes :
res += ' child = ' + str ( c ) + '\n'
if c . child_nodes :
for grandchild in c . child_nodes :
res += ' child = ' + str ( grandchild ) + '\n'
else :
res += ' child = None\n'
return res |
def get_series ( self , series ) :
"""Returns a census series API handler .""" | if series == "acs1" :
return self . census . acs1dp
elif series == "acs5" :
return self . census . acs5
elif series == "sf1" :
return self . census . sf1
elif series == "sf3" :
return self . census . sf3
else :
return None |
def addAttachment ( self , oid , file_path ) :
"""Adds an attachment to a feature service
Input :
oid - string - OBJECTID value to add attachment to
file _ path - string - path to file
Output :
JSON Repsonse""" | if self . hasAttachments == True :
attachURL = self . _url + "/%s/addAttachment" % oid
params = { 'f' : 'json' }
parsed = urlparse . urlparse ( attachURL )
files = { 'attachment' : file_path }
res = self . _post ( url = attachURL , param_dict = params , files = files , securityHandler = self . _securityHandler , proxy_port = self . _proxy_port , proxy_url = self . _proxy_url )
return self . _unicode_convert ( res )
else :
return "Attachments are not supported for this feature service." |
def set ( self , lang , instance ) :
"""Establece en la instancia actual los atributos de traducción
y la almacena en un diccionario de claves _ create _ key y valores
el objeto con los atributos dinámicos .""" | if self . _cache_is_too_big ( ) :
self . cache = { }
instance_key = TransCache . _create_key ( lang , instance )
instance . _translations_are_cached = True
instance . load_translations ( lang = lang )
self . cache [ instance_key ] = instance |
def _process_trait_mappings ( self , raw , limit = None ) :
"""This method mapps traits from / to . . .
Triples created :
: param limit :
: return :""" | if self . test_mode :
graph = self . testgraph
else :
graph = self . graph
line_counter = 0
model = Model ( graph )
with open ( raw , 'r' ) as csvfile :
filereader = csv . reader ( csvfile , delimiter = ',' , quotechar = '\"' )
next ( filereader , None )
# skip header line
for row in filereader :
line_counter += 1
# need to skip the last line
if len ( row ) < 8 :
LOG . info ( "skipping line %d: %s" , line_counter , '\t' . join ( row ) )
continue
( vto_id , pto_id , cmo_id , ato_column , species , trait_class , trait_type , qtl_count ) = row
ato_id = re . sub ( r'ATO #' , 'AQTLTrait:' , re . sub ( r'\].*' , '' , re . sub ( r'\[' , '' , ato_column ) ) )
ato_id = ato_id . strip ( )
ato_label = re . sub ( r'.*\]\s*' , '' , ato_column )
model . addClassToGraph ( ato_id , ato_label . strip ( ) )
if re . match ( r'VT:.*' , vto_id ) :
model . addClassToGraph ( vto_id , None )
model . addEquivalentClass ( ato_id , vto_id )
if re . match ( r'LPT:.*' , pto_id ) :
model . addClassToGraph ( pto_id , None )
model . addXref ( ato_id , pto_id )
if re . match ( r'CMO:.*' , cmo_id ) :
model . addClassToGraph ( cmo_id , None )
model . addXref ( ato_id , cmo_id )
LOG . info ( "Done with trait mappings" )
return |
def delete_credit_card_payment_by_id ( cls , credit_card_payment_id , ** kwargs ) :
"""Delete CreditCardPayment
Delete an instance of CreditCardPayment by its ID .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . delete _ credit _ card _ payment _ by _ id ( credit _ card _ payment _ id , async = True )
> > > result = thread . get ( )
: param async bool
: param str credit _ card _ payment _ id : ID of creditCardPayment to delete . ( required )
: return : None
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _delete_credit_card_payment_by_id_with_http_info ( credit_card_payment_id , ** kwargs )
else :
( data ) = cls . _delete_credit_card_payment_by_id_with_http_info ( credit_card_payment_id , ** kwargs )
return data |
def import_doc ( self , file_uris , docsearch , current_doc = None ) :
"""Import the specified PDF files""" | doc = None
docs = [ ]
pages = [ ]
file_uris = [ self . fs . safe ( uri ) for uri in file_uris ]
imported = [ ]
for file_uri in file_uris :
logger . info ( "Importing PDF from '%s'" % ( file_uri ) )
idx = 0
for child in self . fs . recurse ( file_uri ) :
gc . collect ( )
if not self . check_file_type ( child ) :
continue
h = PdfDoc . hash_file ( self . fs , child )
if docsearch . is_hash_in_index ( h ) :
logger . info ( "Document %s already found in the index. Skipped" , child )
continue
imported . append ( child )
doc = PdfDoc ( self . fs , docsearch . rootdir )
error = doc . import_pdf ( child )
if error :
continue
docs . append ( doc )
pages += [ p for p in doc . pages ]
idx += 1
return ImportResult ( imported_file_uris = imported , select_doc = doc , new_docs = docs , new_docs_pages = pages , stats = { _ ( "PDF" ) : len ( docs ) , _ ( "Document(s)" ) : len ( docs ) , _ ( "Page(s)" ) : sum ( [ d . nb_pages for d in docs ] ) , } , ) |
def _get_request_obj ( csr ) :
'''Returns a CSR object based on PEM text .''' | text = _text_or_file ( csr )
text = get_pem_entry ( text , pem_type = 'CERTIFICATE REQUEST' )
return M2Crypto . X509 . load_request_string ( text ) |
def delete_record ( self , identifier = None , rtype = None , name = None , content = None , ** kwargs ) :
"""Delete an existing record .
If record does not exist , do nothing .
If an identifier is specified , use it , otherwise do a lookup using type , name and content .""" | if not rtype and kwargs . get ( 'type' ) :
warnings . warn ( 'Parameter "type" is deprecated, use "rtype" instead.' , DeprecationWarning )
rtype = kwargs . get ( 'type' )
return self . _delete_record ( identifier = identifier , rtype = rtype , name = name , content = content ) |
def __move ( self , current_pos ) :
'''Move in the feature map .
Args :
current _ pos : The now position .
Returns :
The next position .''' | if self . __move_range is not None :
next_pos = np . random . randint ( current_pos - self . __move_range , current_pos + self . __move_range )
if next_pos < 0 :
next_pos = 0
elif next_pos >= self . var_arr . shape [ 0 ] - 1 :
next_pos = self . var_arr . shape [ 0 ] - 1
return next_pos
else :
next_pos = np . random . randint ( self . var_arr . shape [ 0 ] - 1 )
return next_pos |
def unsafe_execute ( self , result = None ) :
"""un - wrapped execution , can raise excepetion
: return : Execution result
: rtype : kser . result . Result""" | if result :
self . result += result
with opentracing . tracer . start_span ( obj = self , child_of = KserSpan . extract_span ( self ) , span_factory = KserSpan ) as span :
self . result = self . _onsuccess ( self . _postrun ( self . _run ( ) ) )
span . obj = self
return self . result |
def _select_input ( self ) :
"""Select current line ( without selecting console prompt )""" | line , index = self . get_position ( 'eof' )
if self . current_prompt_pos is None :
pline , pindex = line , index
else :
pline , pindex = self . current_prompt_pos
self . setSelection ( pline , pindex , line , index ) |
def from_dict ( d ) :
"""Re - create the noise model from a dictionary representation .
: param Dict [ str , Any ] d : The dictionary representation .
: return : The restored noise model .
: rtype : NoiseModel""" | return NoiseModel ( gates = [ KrausModel . from_dict ( t ) for t in d [ "gates" ] ] , assignment_probs = { int ( qid ) : np . array ( a ) for qid , a in d [ "assignment_probs" ] . items ( ) } , ) |
def find_button ( browser , value ) :
"""Find a button with the given value .
Searches for the following different kinds of buttons :
< input type = " submit " >
< input type = " reset " >
< input type = " button " >
< input type = " image " >
< button >
< { a , p , div , span , . . . } role = " button " >
Returns : an : class : ` ElementSelector `""" | field_types = ( 'submit' , 'reset' , 'button-element' , 'button' , 'image' , 'button-role' , )
return reduce ( operator . add , ( find_field_with_value ( browser , field_type , value ) for field_type in field_types ) ) |
def list_all_defaults ( self ) : # type : ( ) - > Dict [ str , List [ SkillEntry ] ]
"""Returns { ' skill _ group ' : [ SkillEntry ( ' name ' ) ] }""" | skills = self . list ( )
name_to_skill = { skill . name : skill for skill in skills }
defaults = { group : [ ] for group in self . SKILL_GROUPS }
for section_name , skill_names in self . repo . get_default_skill_names ( ) :
section_skills = [ ]
for skill_name in skill_names :
if skill_name in name_to_skill :
section_skills . append ( name_to_skill [ skill_name ] )
else :
LOG . warning ( 'No such default skill: ' + skill_name )
defaults [ section_name ] = section_skills
return defaults |
def StripTypeInfo ( rendered_data ) :
"""Strips type information from rendered data . Useful for debugging .""" | if isinstance ( rendered_data , ( list , tuple ) ) :
return [ StripTypeInfo ( d ) for d in rendered_data ]
elif isinstance ( rendered_data , dict ) :
if "value" in rendered_data and "type" in rendered_data :
return StripTypeInfo ( rendered_data [ "value" ] )
else :
result = { }
for k , v in iteritems ( rendered_data ) :
result [ k ] = StripTypeInfo ( v )
return result
else :
return rendered_data |
def send_news_message ( self , user_id , media_id , kf_account = None ) :
"""发送永久素材中的图文消息 。
: param user _ id : 用户 ID 。 就是你收到的 ` Message ` 的 source
: param media _ id : 媒体文件 ID
: param kf _ account : 发送消息的客服账户 , 默认值为 None , None 为不指定
: return : 返回的 JSON 数据包""" | data = { "touser" : user_id , "msgtype" : "mpnews" , "mpnews" : { "media_id" : media_id } }
if kf_account is not None :
data [ 'customservice' ] = { 'kf_account' : kf_account }
return self . post ( url = "https://api.weixin.qq.com/cgi-bin/message/custom/send" , data = data ) |
def implode_multi_values ( self , name , data ) :
"""Due to the way Angular organizes it model , when Form data is sent via a POST request ,
then for this kind of widget , the posted data must to be converted into a format suitable
for Django ' s Form validation .""" | mkeys = [ k for k in data . keys ( ) if k . startswith ( name + '.' ) ]
mvls = [ data . pop ( k ) [ 0 ] for k in mkeys ]
if mvls :
data . setlist ( name , mvls ) |
def recursive_get ( obj , key ) :
'''Get an attribute or a key recursively .
: param obj : The object to fetch attribute or key on
: type obj : object | dict
: param key : Either a string in dotted - notation ar an array of string
: type key : string | list | tuple''' | if not obj or not key :
return
parts = key . split ( '.' ) if isinstance ( key , basestring ) else key
key = parts . pop ( 0 )
if isinstance ( obj , dict ) :
value = obj . get ( key , None )
else :
value = getattr ( obj , key , None )
return recursive_get ( value , parts ) if parts else value |
def pdfextract_dois ( pdf_file ) :
"""Extract DOIs of references using ` pdfextract < https : / / github . com / CrossRef / pdfextract > ` _ .
. . note : :
See ` ` libbmc . citations . pdf . pdfextract ` ` function as this one is just a wrapper around it .
See ` ` libbmc . citations . plaintext . get _ cited _ dois ` ` as well for the returned value , as it is ultimately called by this function .
: param pdf _ file : Path to the PDF file to handle .
: returns : A dict of cleaned plaintext citations and their associated DOI .""" | # Call pdf - extract on the PDF file
references = pdfextract ( pdf_file )
# Parse the resulting XML
root = ET . fromstring ( references )
plaintext_references = [ e . text for e in root . iter ( "reference" ) ]
# Call the plaintext methods to fetch DOIs
return plaintext . get_cited_dois ( plaintext_references ) |
def get_logger ( ) :
"""Get or create logger ( if it does not exist )
@ rtype : RootLogger""" | name = Logr . get_logger_name ( )
if name not in Logr . loggers :
Logr . configure_check ( )
Logr . loggers [ name ] = logging . Logger ( name )
Logr . loggers [ name ] . addHandler ( Logr . handler )
return Logr . loggers [ name ] |
def _calculate_progress ( self , match ) :
'''Calculates the final progress value found by the regex''' | if not self . progress_expr :
return safe_float ( match . group ( 1 ) )
else :
return self . _eval_progress ( match ) |
def _peek_unicode ( self , is_long ) : # type : ( bool ) - > Tuple [ Optional [ str ] , Optional [ str ] ]
"""Peeks ahead non - intrusively by cloning then restoring the
initial state of the parser .
Returns the unicode value is it ' s a valid one else None .""" | # we always want to restore after exiting this scope
with self . _state ( save_marker = True , restore = True ) :
if self . _current not in { "u" , "U" } :
raise self . parse_error ( InternalParserError , "_peek_unicode() entered on non-unicode value" )
self . inc ( )
# Dropping prefix
self . mark ( )
if is_long :
chars = 8
else :
chars = 4
if not self . inc_n ( chars ) :
value , extracted = None , None
else :
extracted = self . extract ( )
if extracted [ 0 ] . lower ( ) == "d" and extracted [ 1 ] . strip ( "01234567" ) :
return None , None
try :
value = chr ( int ( extracted , 16 ) )
except ValueError :
value = None
return value , extracted |
def _validate_entity_cls ( self , entity_cls ) :
"""Validate that Entity is a valid class""" | # Import here to avoid cyclic dependency
from protean . core . entity import Entity
if not issubclass ( entity_cls , Entity ) :
raise AssertionError ( f'Entity {entity_cls.__name__} must be subclass of `Entity`' )
if entity_cls . meta_ . abstract is True :
raise NotSupportedError ( f'{entity_cls.__name__} class has been marked abstract' f' and cannot be instantiated' ) |
def transpose_nested_dictionary ( nested_dict ) :
"""Given a nested dictionary from k1 - > k2 > value
transpose its outer and inner keys so it maps
k2 - > k1 - > value .""" | result = defaultdict ( dict )
for k1 , d in nested_dict . items ( ) :
for k2 , v in d . items ( ) :
result [ k2 ] [ k1 ] = v
return result |
def groups_invite ( self , * , channel : str , user : str , ** kwargs ) -> SlackResponse :
"""Invites a user to a private channel .
Args :
channel ( str ) : The group id . e . g . ' G1234567890'
user ( str ) : The user id . e . g . ' U1234567890'""" | self . _validate_xoxp_token ( )
kwargs . update ( { "channel" : channel , "user" : user } )
return self . api_call ( "groups.invite" , json = kwargs ) |
def _infer_sig_len ( file_name , fmt , n_sig , dir_name , pb_dir = None ) :
"""Infer the length of a signal from a dat file .
Parameters
file _ name : str
Name of the dat file
fmt : str
WFDB fmt of the dat file
n _ sig : int
Number of signals contained in the dat file
Notes
sig _ len * n _ sig * bytes _ per _ sample = = file _ size""" | if pb_dir is None :
file_size = os . path . getsize ( os . path . join ( dir_name , file_name ) )
else :
file_size = download . _remote_file_size ( file_name = file_name , pb_dir = pb_dir )
sig_len = int ( file_size / ( BYTES_PER_SAMPLE [ fmt ] * n_sig ) )
return sig_len |
def exec_nb_cmd ( self , cmd ) :
'''Yield None until cmd finished''' | r_out = [ ]
r_err = [ ]
rcode = None
cmd = self . _cmd_str ( cmd )
logmsg = 'Executing non-blocking command: {0}' . format ( cmd )
if self . passwd :
logmsg = logmsg . replace ( self . passwd , ( '*' * 6 ) )
log . debug ( logmsg )
for out , err , rcode in self . _run_nb_cmd ( cmd ) :
if out is not None :
r_out . append ( out )
if err is not None :
r_err . append ( err )
yield None , None , None
yield '' . join ( r_out ) , '' . join ( r_err ) , rcode |
def get_short_annotations ( annotations ) :
"""Converts full GATK annotation name to the shortened version
: param annotations :
: return :""" | # Annotations need to match VCF header
short_name = { 'QualByDepth' : 'QD' , 'FisherStrand' : 'FS' , 'StrandOddsRatio' : 'SOR' , 'ReadPosRankSumTest' : 'ReadPosRankSum' , 'MappingQualityRankSumTest' : 'MQRankSum' , 'RMSMappingQuality' : 'MQ' , 'InbreedingCoeff' : 'ID' }
short_annotations = [ ]
for annotation in annotations :
if annotation in short_name :
annotation = short_name [ annotation ]
short_annotations . append ( annotation )
return short_annotations |
def mark_offer_as_win ( self , offer_id ) :
"""Mark offer as win
: param offer _ id : the offer id
: return Response""" | return self . _create_put_request ( resource = OFFERS , billomat_id = offer_id , command = WIN , ) |
def neg_loglik ( self , beta ) :
"""Creates the negative log - likelihood of the model
Parameters
beta : np . array
Contains untransformed starting values for latent variables
Returns
The negative logliklihood of the model""" | lmda , Y , _ , theta = self . _model ( beta )
return - np . sum ( ss . t . logpdf ( x = Y , df = self . latent_variables . z_list [ - ( len ( self . X_names ) * 2 ) - 2 ] . prior . transform ( beta [ - ( len ( self . X_names ) * 2 ) - 2 ] ) , loc = theta , scale = np . exp ( lmda / 2.0 ) ) ) |
def assert_array ( A , shape = None , uniform = None , ndim = None , size = None , dtype = None , kind = None ) :
r"""Asserts whether the given array or sparse matrix has the given properties
Parameters
A : ndarray , scipy . sparse matrix or array - like
the array under investigation
shape : shape , optional , default = None
asserts if the array has the requested shape . Be careful with vectors
because this will distinguish between row vectors ( 1 , n ) , column vectors
( n , 1 ) and arrays ( n , ) . If you want to be less specific , consider using
size
square : None | True | False
if not None , asserts whether the array dimensions are uniform ( e . g .
square for a ndim = 2 array ) ( True ) , or not uniform ( False ) .
size : int , optional , default = None
asserts if the arrays has the requested number of elements
ndim : int , optional , default = None
asserts if the array has the requested dimension
dtype : type , optional , default = None
asserts if the array data has the requested data type . This check is
strong , e . g . int and int64 are not equal . If you want a weaker check ,
consider the kind option
kind : string , optional , default = None
Checks if the array data is of the specified kind . Options include ' i '
for integer types , ' f ' for float types Check numpy . dtype . kind for
possible options . An additional option is ' numeric ' for either integer
or float .
Raises
AssertionError
If assertions has failed""" | try :
if shape is not None :
if not np . array_equal ( np . shape ( A ) , shape ) :
raise AssertionError ( 'Expected shape ' + str ( shape ) + ' but given array has shape ' + str ( np . shape ( A ) ) )
if uniform is not None :
shapearr = np . array ( np . shape ( A ) )
is_uniform = np . count_nonzero ( shapearr - shapearr [ 0 ] ) == 0
if uniform and not is_uniform :
raise AssertionError ( 'Given array is not uniform \n' + str ( shapearr ) )
elif not uniform and is_uniform :
raise AssertionError ( 'Given array is not nonuniform: \n' + str ( shapearr ) )
if size is not None :
if not np . size ( A ) == size :
raise AssertionError ( 'Expected size ' + str ( size ) + ' but given array has size ' + str ( np . size ( A ) ) )
if ndim is not None :
if not ndim == np . ndim ( A ) :
raise AssertionError ( 'Expected shape ' + str ( ndim ) + ' but given array has shape ' + str ( np . ndim ( A ) ) )
if dtype is not None : # now we must create an array if we don ' t have one yet
if not isinstance ( A , ( np . ndarray ) ) and not scisp . issparse ( A ) :
A = np . array ( A )
if not np . dtype ( dtype ) == A . dtype :
raise AssertionError ( 'Expected data type ' + str ( dtype ) + ' but given array has data type ' + str ( A . dtype ) )
if kind is not None : # now we must create an array if we don ' t have one yet
if not isinstance ( A , ( np . ndarray ) ) and not scisp . issparse ( A ) :
A = np . array ( A )
if kind == 'numeric' :
if not ( A . dtype . kind == 'i' or A . dtype . kind == 'f' ) :
raise AssertionError ( 'Expected numerical data, but given array has data kind ' + str ( A . dtype . kind ) )
elif not A . dtype . kind == kind :
raise AssertionError ( 'Expected data kind ' + str ( kind ) + ' but given array has data kind ' + str ( A . dtype . kind ) )
except Exception as ex :
if isinstance ( ex , AssertionError ) :
raise ex
else : # other exception raised in the test code above
print ( 'Found exception: ' , ex )
raise AssertionError ( 'Given argument is not an array of the expected shape or type:\n' + 'arg = ' + str ( A ) + '\ntype = ' + str ( type ( A ) ) ) |
def deep_get ( d , * keys , default = None ) :
"""Recursive safe search in a dictionary of dictionaries .
Args :
d : the dictionary to work with
* keys : the list of keys to work with
default : the default value to return if the recursive search did not succeed
Returns :
The value wich was found recursively in d , or default if the search did not succeed
Example :
> > > d = { " user " : { " id " : 1 , " login " : " foo " } , " date " : " 2016-04-27 " }
> > > deep _ get ( d , " user " , " login " )
" foo "
> > > deep _ get ( d , " user " )
{ " id " : 1 , " login " : " foo " }
> > > deep _ get ( d , " user " , " name " )
None
> > > deep _ get ( d , " user " , " name " , default = " bar " )
" bar " """ | for key in keys :
try :
d = d [ key ]
except ( KeyError , IndexError , TypeError ) :
return default
return d |
def model_config ( instance_type , model , role = None , image = None ) :
"""Export Airflow model config from a SageMaker model
Args :
instance _ type ( str ) : The EC2 instance type to deploy this Model to . For example , ' ml . p2 . xlarge '
model ( sagemaker . model . FrameworkModel ) : The SageMaker model to export Airflow config from
role ( str ) : The ` ` ExecutionRoleArn ` ` IAM Role ARN for the model
image ( str ) : An container image to use for deploying the model
Returns :
dict : Model config that can be directly used by SageMakerModelOperator in Airflow . It can also be part
of the config used by SageMakerEndpointOperator and SageMakerTransformOperator in Airflow .""" | s3_operations = { }
model . image = image or model . image
if isinstance ( model , sagemaker . model . FrameworkModel ) :
container_def = prepare_framework_container_def ( model , instance_type , s3_operations )
else :
container_def = model . prepare_container_def ( instance_type )
base_name = utils . base_name_from_image ( container_def [ 'Image' ] )
model . name = model . name or utils . name_from_base ( base_name )
primary_container = session . _expand_container_def ( container_def )
config = { 'ModelName' : model . name , 'PrimaryContainer' : primary_container , 'ExecutionRoleArn' : role or model . role }
if model . vpc_config :
config [ 'VpcConfig' ] = model . vpc_config
if s3_operations :
config [ 'S3Operations' ] = s3_operations
return config |
def matches ( self , object ) :
"""< Purpose >
Return True if ' object ' matches this schema , False if it doesn ' t .
If the caller wishes to signal an error on a failed match , check _ match ( )
should be called , which will raise a ' exceptions . FormatError ' exception .""" | try :
self . check_match ( object )
except securesystemslib . exceptions . FormatError :
return False
else :
return True |
def notify_created ( room , event , user ) :
"""Notifies about the creation of a chatroom .
: param room : the chatroom
: param event : the event
: param user : the user performing the action""" | tpl = get_plugin_template_module ( 'emails/created.txt' , chatroom = room , event = event , user = user )
_send ( event , tpl ) |
def complete_list_value ( self , return_type : GraphQLList [ GraphQLOutputType ] , field_nodes : List [ FieldNode ] , info : GraphQLResolveInfo , path : ResponsePath , result : Iterable [ Any ] , ) -> AwaitableOrValue [ Any ] :
"""Complete a list value .
Complete a list value by completing each item in the list with the inner type .""" | if not isinstance ( result , Iterable ) or isinstance ( result , str ) :
raise TypeError ( "Expected Iterable, but did not find one for field" f" {info.parent_type.name}.{info.field_name}." )
# This is specified as a simple map , however we ' re optimizing the path where
# the list contains no coroutine objects by avoiding creating another coroutine
# object .
item_type = return_type . of_type
awaitable_indices : List [ int ] = [ ]
append_awaitable = awaitable_indices . append
completed_results : List [ Any ] = [ ]
append_result = completed_results . append
for index , item in enumerate ( result ) : # No need to modify the info object containing the path , since from here on
# it is not ever accessed by resolver functions .
field_path = add_path ( path , index )
completed_item = self . complete_value_catching_error ( item_type , field_nodes , info , field_path , item )
if isawaitable ( completed_item ) :
append_awaitable ( index )
append_result ( completed_item )
if not awaitable_indices :
return completed_results
# noinspection PyShadowingNames
async def get_completed_results ( ) :
for index , result in zip ( awaitable_indices , await gather ( * ( completed_results [ index ] for index in awaitable_indices ) ) , ) :
completed_results [ index ] = result
return completed_results
return get_completed_results ( ) |
def promote_alert_to_case ( self , alert_id ) :
"""This uses the TheHiveAPI to promote an alert to a case
: param alert _ id : Alert identifier
: return : TheHive Case
: rtype : json""" | req = self . url + "/api/alert/{}/createCase" . format ( alert_id )
try :
return requests . post ( req , headers = { 'Content-Type' : 'application/json' } , proxies = self . proxies , auth = self . auth , verify = self . cert , data = json . dumps ( { } ) )
except requests . exceptions . RequestException as the_exception :
raise AlertException ( "Couldn't promote alert to case: {}" . format ( the_exception ) )
return None |
def append_attribute ( self , name , value , content ) :
"""Append an attribute name / value into L { Content . data } .
@ param name : The attribute name
@ type name : basestring
@ param value : The attribute ' s value
@ type value : basestring
@ param content : The current content being unmarshalled .
@ type content : L { Content }""" | type = self . resolver . findattr ( name )
if type is None :
log . warn ( 'attribute (%s) type, not-found' , name )
else :
value = self . translated ( value , type )
Core . append_attribute ( self , name , value , content ) |
def render_obs ( self , obs ) :
"""Render a frame given an observation .""" | start_time = time . time ( )
self . _obs = obs
self . check_valid_queued_action ( )
self . _update_camera ( point . Point . build ( self . _obs . observation . raw_data . player . camera ) )
for surf in self . _surfaces : # Render that surface .
surf . draw ( surf )
mouse_pos = self . get_mouse_pos ( )
if mouse_pos : # Draw a small mouse cursor
self . all_surfs ( _Surface . draw_circle , colors . green , mouse_pos . world_pos , 0.1 )
self . draw_actions ( )
with sw ( "flip" ) :
pygame . display . flip ( )
self . _render_times . append ( time . time ( ) - start_time ) |
def _cleanstarlog ( file_in ) :
"""cleaning history . data or star . log file , e . g . to take care of
repetitive restarts .
private , should not be called by user directly
Parameters
file _ in : string
Typically the filename of the mesa output history . data or
star . log file , creates a clean file called history . datasa or
star . logsa .
( thanks to Raphael for providing this tool )""" | file_out = file_in + 'sa'
f = open ( file_in )
lignes = f . readlines ( )
f . close ( )
nb = np . array ( [ ] , dtype = int )
# model number
nb = np . concatenate ( ( nb , [ int ( lignes [ len ( lignes ) - 1 ] . split ( ) [ 0 ] ) ] ) )
nbremove = np . array ( [ ] , dtype = int )
# model number
i = - 1
for i in np . arange ( len ( lignes ) - 1 , 0 , - 1 ) :
line = lignes [ i - 1 ]
if i > 6 and line != "" :
if int ( line . split ( ) [ 0 ] ) >= nb [ - 1 ] :
nbremove = np . concatenate ( ( nbremove , [ i - 1 ] ) )
else :
nb = np . concatenate ( ( nb , [ int ( line . split ( ) [ 0 ] ) ] ) )
i = - 1
for j in nbremove :
lignes . remove ( lignes [ j ] )
fout = open ( file_out , 'w' )
for j in np . arange ( len ( lignes ) ) :
fout . write ( lignes [ j ] )
fout . close ( ) |
async def get_user_data ( self ) :
"""Get Tautulli userdata .""" | userdata = { }
sessions = self . session_data . get ( 'sessions' , { } )
try :
async with async_timeout . timeout ( 8 , loop = self . _loop ) :
for username in self . tautulli_users :
userdata [ username ] = { }
userdata [ username ] [ 'Activity' ] = None
for session in sessions :
if session [ 'username' ] . lower ( ) == username . lower ( ) :
userdata [ username ] [ 'Activity' ] = session [ 'state' ]
for key in session :
if key != 'Username' :
userdata [ username ] [ key ] = session [ key ]
break
self . tautulli_user_data = userdata
except ( asyncio . TimeoutError , aiohttp . ClientError , KeyError ) :
msg = "Can not load data from Tautulli."
logger ( msg , 40 ) |
def _validate ( self , key , val , arg_types ) :
"""Ensures that the key and the value are valid arguments to be used with
the feed .""" | if key in arg_types :
arg_type = arg_types [ key ]
else :
if ANY_ARG not in arg_types :
raise CloudantArgumentError ( 116 , key )
arg_type = arg_types [ ANY_ARG ]
if arg_type == ANY_TYPE :
return
if ( not isinstance ( val , arg_type ) or ( isinstance ( val , bool ) and int in arg_type ) ) :
raise CloudantArgumentError ( 117 , key , arg_type )
if isinstance ( val , int ) and val < 0 and not isinstance ( val , bool ) :
raise CloudantArgumentError ( 118 , key , val )
if key == 'feed' :
valid_vals = ( 'continuous' , 'normal' , 'longpoll' )
if self . _source == 'CouchDB' :
valid_vals = ( 'continuous' , 'longpoll' )
if val not in valid_vals :
raise CloudantArgumentError ( 119 , val , valid_vals )
if key == 'style' and val not in ( 'main_only' , 'all_docs' ) :
raise CloudantArgumentError ( 120 , val ) |
def slugify ( text , delim = u'-' ) :
"""Generate an ASCII - only slug .""" | result = [ ]
for word in _punct_re . split ( text . lower ( ) ) :
result . extend ( unidecode ( word ) . split ( ) )
return unicode ( delim . join ( result ) ) |
def warning ( * args ) :
"""Display warning message via stderr or GUI .""" | if sys . stdin . isatty ( ) :
print ( 'WARNING:' , * args , file = sys . stderr )
else :
notify_warning ( * args ) |
def start ( self , future ) :
"""Execute Future .
Return the Future ' s result , or raise its exception .
: param future :
: return :""" | self . _check_frozen ( )
self . _freeze = True
loop : asyncio . AbstractEventLoop = self . loop
try :
loop . run_until_complete ( self . _startup_polling ( ) )
result = loop . run_until_complete ( future )
except ( KeyboardInterrupt , SystemExit ) :
result = None
loop . stop ( )
finally :
loop . run_until_complete ( self . _shutdown_polling ( ) )
log . warning ( "Goodbye!" )
return result |
def main ( ) :
"""Main entrypoint for command - line webserver .""" | parser = argparse . ArgumentParser ( )
parser . add_argument ( "-H" , "--host" , help = "Web server Host address to bind to" , default = "0.0.0.0" , action = "store" , required = False )
parser . add_argument ( "-p" , "--port" , help = "Web server Port to bind to" , default = 8080 , action = "store" , required = False )
args = parser . parse_args ( )
logging . basicConfig ( )
run ( host = args . host , port = args . port , reloader = True , server = SERVER ) |
def update_config ( self ) :
"""Update the configuration files according to the current
in - memory SExtractor configuration .""" | # - - Write filter configuration file
# First check the filter itself
filter = self . config [ 'FILTER_MASK' ]
rows = len ( filter )
cols = len ( filter [ 0 ] )
# May raise ValueError , OK
filter_f = __builtin__ . open ( self . config [ 'FILTER_NAME' ] , 'w' )
filter_f . write ( "CONV NORM\n" )
filter_f . write ( "# %dx%d Generated from sextractor.py module.\n" % ( rows , cols ) )
for row in filter :
filter_f . write ( " " . join ( map ( repr , row ) ) )
filter_f . write ( "\n" )
filter_f . close ( )
# - - Write parameter list file
parameters_f = __builtin__ . open ( self . config [ 'PARAMETERS_NAME' ] , 'w' )
for parameter in self . config [ 'PARAMETERS_LIST' ] :
print ( parameter , file = parameters_f )
parameters_f . close ( )
# - - Write NNW configuration file
nnw_f = __builtin__ . open ( self . config [ 'STARNNW_NAME' ] , 'w' )
nnw_f . write ( nnw_config )
nnw_f . close ( )
# - - Write main configuration file
main_f = __builtin__ . open ( self . config [ 'CONFIG_FILE' ] , 'w' )
for key in self . config . keys ( ) :
if ( key in SExtractor . _SE_config_special_keys ) :
continue
if ( key == "PHOT_AUTOPARAMS" ) : # tuple instead of a single value
value = " " . join ( map ( str , self . config [ key ] ) )
else :
value = str ( self . config [ key ] )
print ( ( "%-16s %-16s # %s" % ( key , value , SExtractor . _SE_config [ key ] [ 'comment' ] ) ) , file = main_f )
main_f . close ( ) |
def handle_device_json ( self , data ) :
"""Manage the device json list .""" | self . _device_json . insert ( 0 , data )
self . _device_json . pop ( ) |
def merge_two ( one , other , merge_strategy = MergeStrategy . UNION , silent = False , pixel_strategy = PixelStrategy . FIRST ) : # type : ( GeoRaster2 , GeoRaster2 , MergeStrategy , bool , PixelStrategy ) - > GeoRaster2
"""Merge two rasters into one .
Parameters
one : GeoRaster2
Left raster to merge .
other : GeoRaster2
Right raster to merge .
merge _ strategy : MergeStrategy , optional
Merge strategy , from : py : data : ` telluric . georaster . MergeStrategy ` ( default to " union " ) .
silent : bool , optional
Whether to raise errors or return some result , default to False ( raise errors ) .
pixel _ strategy : PixelStrategy , optional
Pixel strategy , from : py : data : ` telluric . georaster . PixelStrategy ` ( default to " top " ) .
Returns
GeoRaster2""" | other_res = _prepare_other_raster ( one , other )
if other_res is None :
if silent :
return one
else :
raise ValueError ( "rasters do not intersect" )
else :
other = other . copy_with ( image = other_res . image , band_names = other_res . band_names )
# To make MyPy happy
# Create a list of single band rasters
# Cropping won ' t happen twice , since other was already cropped
all_band_names , projected_rasters = _prepare_rasters ( [ other ] , merge_strategy , first = one )
if not all_band_names and not silent :
raise ValueError ( "rasters have no bands in common, use another merge strategy" )
prepared_rasters = _apply_pixel_strategy ( projected_rasters , pixel_strategy )
prepared_rasters = _explode_rasters ( prepared_rasters , all_band_names )
# Merge common bands
prepared_rasters = _merge_common_bands ( _explode_raster ( one , all_band_names ) + prepared_rasters )
# Merge all bands
raster = reduce ( _stack_bands , prepared_rasters )
return one . copy_with ( image = raster . image , band_names = raster . band_names ) |
def flowtable ( self ) :
"""get a flat flow table globally""" | ftable = dict ( )
for table in self . flow_table :
for k , v in table . items ( ) :
if k not in ftable :
ftable [ k ] = set ( v )
else :
[ ftable [ k ] . add ( i ) for i in v ]
# convert set to list
for k in ftable :
ftable [ k ] = list ( ftable [ k ] )
return ftable |
def cmd ( send , msg , args ) :
"""Changes the output filter .
Syntax : { command } [ - - channel channel ] < filter | - - show | - - list | - - reset | - - chain filter , [ filter2 , . . . ] >""" | if args [ 'type' ] == 'privmsg' :
send ( 'Filters must be set in channels, not via private message.' )
return
isadmin = args [ 'is_admin' ] ( args [ 'nick' ] )
parser = arguments . ArgParser ( args [ 'config' ] )
parser . add_argument ( '--channel' , nargs = '?' , default = args [ 'target' ] )
group = parser . add_mutually_exclusive_group ( )
group . add_argument ( 'filter' , nargs = '?' )
group . add_argument ( '--show' , action = 'store_true' )
group . add_argument ( '--list' , action = 'store_true' )
group . add_argument ( '--reset' , '--clear' , action = 'store_true' )
group . add_argument ( '--chain' )
if not msg :
send ( get_filters ( args [ 'handler' ] , args [ 'target' ] ) )
return
try :
cmdargs = parser . parse_args ( msg )
except arguments . ArgumentException as e :
send ( str ( e ) )
return
if cmdargs . list :
send ( "Available filters are %s" % ", " . join ( textutils . output_filters . keys ( ) ) )
elif cmdargs . reset and isadmin :
args [ 'handler' ] . outputfilter [ cmdargs . channel ] . clear ( )
send ( "Okay!" )
elif cmdargs . chain and isadmin :
if not args [ 'handler' ] . outputfilter [ cmdargs . channel ] :
send ( "Must have a filter set in order to chain." )
return
filter_list , output = textutils . append_filters ( cmdargs . chain )
if filter_list is not None :
args [ 'handler' ] . outputfilter [ cmdargs . channel ] . extend ( filter_list )
send ( output )
elif cmdargs . show :
send ( get_filters ( args [ 'handler' ] , cmdargs . channel ) )
elif isadmin : # If we ' re just adding a filter without chain , blow away any existing filters .
filter_list , output = textutils . append_filters ( cmdargs . filter )
if filter_list is not None :
args [ 'handler' ] . outputfilter [ cmdargs . channel ] . clear ( )
args [ 'handler' ] . outputfilter [ cmdargs . channel ] . extend ( filter_list )
send ( output )
else :
send ( 'This command requires admin privileges.' ) |
def round_linestring_coords ( ls , precision ) :
"""Round the coordinates of a shapely LineString to some decimal precision .
Parameters
ls : shapely LineString
the LineString to round the coordinates of
precision : int
decimal precision to round coordinates to
Returns
LineString""" | return LineString ( [ [ round ( x , precision ) for x in c ] for c in ls . coords ] ) |
def _createAbsMagEstimationDict ( ) :
"""loads magnitude _ estimation . dat which is from
http : / / xoomer . virgilio . it / hrtrace / Sk . htm on 24/01/2014 and based on
Schmid - Kaler ( 1982)
creates a dict in the form [ Classletter ] [ ClassNumber ] [ List of values for
each L Class ]""" | magnitude_estimation_filepath = resource_filename ( __name__ , 'data/magnitude_estimation.dat' )
raw_table = np . loadtxt ( magnitude_estimation_filepath , '|S5' )
absMagDict = { 'O' : { } , 'B' : { } , 'A' : { } , 'F' : { } , 'G' : { } , 'K' : { } , 'M' : { } }
for row in raw_table :
if sys . hexversion >= 0x03000000 : # otherwise we get byte ints or b ' caused by 2to3
starClass = row [ 0 ] . decode ( "utf-8" )
absMagDict [ starClass [ 0 ] ] [ int ( starClass [ 1 ] ) ] = [ float ( x ) for x in row [ 1 : ] ]
else : # dict of spectral type = { abs mag for each luminosity class }
absMagDict [ row [ 0 ] [ 0 ] ] [ int ( row [ 0 ] [ 1 ] ) ] = [ float ( x ) for x in row [ 1 : ] ]
# manually typed from table headers - used to match columns with the L
# class ( header )
LClassRef = { 'V' : 0 , 'IV' : 1 , 'III' : 2 , 'II' : 3 , 'Ib' : 4 , 'Iab' : 5 , 'Ia' : 6 , 'Ia0' : 7 }
return absMagDict , LClassRef |
def ReadContainers ( self , database_link , options = None ) :
"""Reads all collections in a database .
: param str database _ link :
The link to the database .
: param dict options :
The request options for the request .
: return : Query Iterable of Collections .
: rtype :
query _ iterable . QueryIterable""" | if options is None :
options = { }
return self . QueryContainers ( database_link , None , options ) |
def bin_kb_dense ( M , positions , length = 10 , contigs = None ) :
"""Perform binning with a fixed genomic length in
kilobase pairs ( kb ) . Fragments will be binned such
that their total length is closest to the specified input .
If a contig list is specified , binning will be performed
such that fragments never overlap two contigs .""" | unit = 10 ** 3
ul = unit * length
unit = positions / ul
n = len ( positions )
idx = [ i for i in range ( n - 1 ) if np . ceil ( unit [ i ] ) < np . ceil ( unit [ i + 1 ] ) ]
binned_positions = positions [ idx ]
m = len ( idx ) - 1
N = np . zeros ( ( m , m ) )
for i in range ( m ) :
N [ i ] = np . array ( [ M [ idx [ j ] : idx [ j + 1 ] , idx [ i ] : idx [ i + 1 ] ] . sum ( ) for j in range ( m ) ] )
return N , binned_positions |
def get_default_jvm_path ( ) :
"""Retrieves the path to the default or first found JVM library
: return : The path to the JVM shared library file
: raise ValueError : No JVM library found""" | if sys . platform == "cygwin" : # Cygwin
from . _cygwin import WindowsJVMFinder
finder = WindowsJVMFinder ( )
elif sys . platform == "win32" : # Windows
from . _windows import WindowsJVMFinder
finder = WindowsJVMFinder ( )
elif sys . platform == "darwin" : # Mac OS X
from . _darwin import DarwinJVMFinder
finder = DarwinJVMFinder ( )
else : # Use the Linux way for other systems
from . _linux import LinuxJVMFinder
finder = LinuxJVMFinder ( )
return finder . get_jvm_path ( ) |
def get_graph_by_most_recent ( self , name : str ) -> Optional [ BELGraph ] :
"""Get the most recently created network with the given name as a : class : ` pybel . BELGraph ` .""" | network = self . get_most_recent_network_by_name ( name )
if network is None :
return
return network . as_bel ( ) |
def _dockerKill ( containerName , action ) :
"""Deprecated . Kills the specified container .
: param str containerName : The name of the container created by docker _ call
: param int action : What action should be taken on the container ?""" | running = containerIsRunning ( containerName )
if running is None : # This means that the container doesn ' t exist . We will see this if the
# container was run with - - rm and has already exited before this call .
logger . debug ( 'The container with name "%s" appears to have already been ' 'removed. Nothing to ' 'do.' , containerName )
else :
if action in ( None , FORGO ) :
logger . debug ( 'The container with name %s continues to exist as we ' 'were asked to forgo a ' 'post-job action on it.' , containerName )
else :
logger . debug ( 'The container with name %s exists. Running ' 'user-specified defer functions.' , containerName )
if running and action >= STOP :
logger . debug ( 'Stopping container "%s".' , containerName )
for attempt in retry ( predicate = dockerPredicate ) :
with attempt :
subprocess . check_call ( [ 'docker' , 'stop' , containerName ] )
else :
logger . debug ( 'The container "%s" was not found to be running.' , containerName )
if action >= RM : # If the container was run with - - rm , then stop will most likely
# remove the container . We first check if it is running then
# remove it .
running = containerIsRunning ( containerName )
if running is not None :
logger . debug ( 'Removing container "%s".' , containerName )
for attempt in retry ( predicate = dockerPredicate ) :
with attempt :
subprocess . check_call ( [ 'docker' , 'rm' , '-f' , containerName ] )
else :
logger . debug ( 'Container "%s" was not found on the system.' 'Nothing to remove.' , containerName ) |
def setup_gui ( self ) :
"""Setup the main layout of the widget .""" | layout = QGridLayout ( self )
layout . setContentsMargins ( 0 , 0 , 0 , 0 )
layout . addWidget ( self . canvas , 0 , 1 )
layout . addLayout ( self . setup_toolbar ( ) , 0 , 3 , 2 , 1 )
layout . setColumnStretch ( 0 , 100 )
layout . setColumnStretch ( 2 , 100 )
layout . setRowStretch ( 1 , 100 ) |
def convert_pmod ( pmod ) :
"""Update BEL1 pmod ( ) protein modification term""" | if pmod . args [ 0 ] . value in spec [ "bel1_migration" ] [ "protein_modifications" ] :
pmod . args [ 0 ] . value = spec [ "bel1_migration" ] [ "protein_modifications" ] [ pmod . args [ 0 ] . value ]
return pmod |
def lowpass ( ts , cutoff_hz , order = 3 ) :
"""forward - backward butterworth low - pass filter""" | orig_ndim = ts . ndim
if ts . ndim is 1 :
ts = ts [ : , np . newaxis ]
channels = ts . shape [ 1 ]
fs = ( len ( ts ) - 1.0 ) / ( ts . tspan [ - 1 ] - ts . tspan [ 0 ] )
nyq = 0.5 * fs
cutoff = cutoff_hz / nyq
b , a = signal . butter ( order , cutoff , btype = 'low' )
if not np . all ( np . abs ( np . roots ( a ) ) < 1.0 ) :
raise ValueError ( 'Filter will not be stable with these values.' )
dtype = ts . dtype
output = np . zeros ( ( len ( ts ) , channels ) , dtype )
for i in range ( channels ) :
output [ : , i ] = signal . filtfilt ( b , a , ts [ : , i ] )
if orig_ndim is 1 :
output = output [ : , 0 ]
return Timeseries ( output , ts . tspan , labels = ts . labels ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.