signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def purge_objects ( self , request ) :
"""Removes all objects in this table .
This action first displays a confirmation page ;
next , it deletes all objects and redirects back to the change list .""" | def truncate_table ( model ) :
if settings . TRUNCATE_TABLE_SQL_STATEMENT :
from django . db import connection
sql = settings . TRUNCATE_TABLE_SQL_STATEMENT . format ( db_table = model . _meta . db_table )
cursor = connection . cursor ( )
cursor . execute ( sql )
else :
model . objects . all ( ) . delete ( )
modeladmin = self
opts = modeladmin . model . _meta
# Check that the user has delete permission for the actual model
if not request . user . is_superuser :
raise PermissionDenied
if not modeladmin . has_delete_permission ( request ) :
raise PermissionDenied
# If the user has already confirmed or cancelled the deletion ,
# ( eventually ) do the deletion and return to the change list view again .
if request . method == 'POST' :
if 'btn-confirm' in request . POST :
try :
n = modeladmin . model . objects . count ( )
truncate_table ( modeladmin . model )
modeladmin . message_user ( request , _ ( "Successfully removed %d rows" % n ) , messages . SUCCESS ) ;
except Exception as e :
modeladmin . message_user ( request , _ ( u'ERROR' ) + ': %r' % e , messages . ERROR )
else :
modeladmin . message_user ( request , _ ( "Action cancelled by user" ) , messages . SUCCESS ) ;
return HttpResponseRedirect ( reverse ( 'admin:%s_%s_changelist' % ( opts . app_label , opts . model_name ) ) )
context = { "title" : _ ( "Purge all %s ... are you sure?" ) % opts . verbose_name_plural , "opts" : opts , "app_label" : opts . app_label , }
# Display the confirmation page
return render ( request , 'admin/easyaudit/purge_confirmation.html' , context ) |
def query_phenomizer ( usr , pwd , * hpo_terms ) :
"""Query the phenomizer web tool
Arguments :
usr ( str ) : A username for phenomizer
pwd ( str ) : A password for phenomizer
hpo _ terms ( list ) : A list with hpo terms
Returns :
raw _ answer : The raw result from phenomizer""" | base_string = 'http://compbio.charite.de/phenomizer/phenomizer/PhenomizerServiceURI'
questions = { 'mobilequery' : 'true' , 'terms' : ',' . join ( hpo_terms ) , 'username' : usr , 'password' : pwd }
try :
r = requests . get ( base_string , params = questions , timeout = 10 )
except requests . exceptions . Timeout :
raise RuntimeError ( "The request timed out." )
if not r . status_code == requests . codes . ok :
raise RuntimeError ( "Phenomizer returned a bad status code: %s" % r . status_code )
r . encoding = 'utf-8'
return r |
def mjpeg_url ( self , channelno = None , typeno = None ) :
"""Return MJPEG streaming url
Params :
channelno : integer , the video channel index which starts from 1,
default 1 if not specified .
typeno : the stream type , default 0 if not specified . It can be
the following value :
0 - Main Stream
1 - Extra Stream 1 ( Sub Stream )
2 - Extra Stream 2 ( Sub Stream )""" | if channelno is None :
channelno = 0
if typeno is None :
typeno = 1
cmd = "mjpg/video.cgi?channel={0}&subtype={1}" . format ( channelno , typeno )
return '{0}{1}' . format ( self . _base_url , cmd ) |
def is_variable ( tup ) :
"""Takes ( name , object ) tuple , returns True if it is a variable .""" | name , item = tup
if callable ( item ) : # function or class
return False
if isinstance ( item , types . ModuleType ) : # imported module
return False
if name . startswith ( "_" ) : # private property
return False
return True |
def backgr ( img , mask = None , mode = MODE_AUTO , thresh = 2 , splinepoints = 5 , scale = 1 , maxiter = 40 , convergence = .001 ) :
'''Iterative spline - based background correction .
mode - one of MODE _ AUTO , MODE _ DARK , MODE _ BRIGHT or MODE _ GRAY
thresh - thresh is threshold to cut at , in units of sigma .
splinepoints - # of points in spline in each direction
scale - scale the image by this factor ( e . g . 2 = operate on 1/2 of the points )
maxiter - maximum # of iterations
convergence - result has converged when the standard deviation of the
difference between iterations is this fraction of the
maximum image intensity .
Spline mesh is splinepoints x splinepoints . Modes are
defined by the background intensity , Larger thresh - > slower but
more stable convergence . Returns background matrix .''' | assert img . ndim == 2 , "Image must be 2-d"
assert splinepoints >= 3 , "The minimum grid size is 3x3"
assert maxiter >= 1
assert mode in [ MODE_AUTO , MODE_BRIGHT , MODE_DARK , MODE_GRAY ] , mode + " is not a valid background mode"
orig_shape = np . array ( img . shape ) . copy ( )
input_mask = mask
if mask is None :
mask = np . ones ( orig_shape , dtype = bool )
# start with mask = whole image
clip_imin = clip_jmin = 0
clip_imax = img . shape [ 0 ]
clip_jmax = img . shape [ 1 ]
clip_shape = orig_shape
else :
isum = np . sum ( mask , 1 )
jsum = np . sum ( mask , 0 )
clip_imin = np . min ( np . argwhere ( isum != 0 ) )
clip_imax = np . max ( np . argwhere ( isum != 0 ) ) + 1
clip_jmin = np . min ( np . argwhere ( jsum != 0 ) )
clip_jmax = np . max ( np . argwhere ( jsum != 0 ) ) + 1
clip_shape = np . array ( [ clip_imax - clip_imin , clip_jmax - clip_jmin ] )
subsample_shape = ( clip_shape / scale ) . astype ( int )
ratio = ( clip_shape . astype ( float ) - 1 ) / ( subsample_shape . astype ( float ) - 1 )
transform = np . array ( [ [ ratio [ 0 ] , 0 ] , [ 0 , ratio [ 1 ] ] ] )
inverse_transform = np . array ( [ [ 1.0 / ratio [ 0 ] , 0 ] , [ 0 , 1.0 / ratio [ 1 ] ] ] )
img = affine_transform ( img [ clip_imin : clip_imax , clip_jmin : clip_jmax ] , transform , output_shape = tuple ( subsample_shape ) , order = 2 )
mask = affine_transform ( mask [ clip_imin : clip_imax , clip_jmin : clip_jmax ] . astype ( float ) , transform , output_shape = tuple ( subsample_shape ) , order = 2 ) > .5
orig_mask = mask
if mode == 'auto' :
mode = automode ( img [ orig_mask ] )
elif mode == 'dark' or mode == 'low' :
mode = - 1
elif mode == 'bright' or mode == 'high' :
mode = 1
elif mode == 'gray' or mode == 'grey' or mode == 'mid' :
mode = 0
# Base the stop criterion on a fraction of the image dynamic range
stop_criterion = max ( ( np . max ( img ) - np . min ( img ) ) * convergence , np . finfo ( img . dtype ) . eps )
[ r , c ] = img . shape
oldres = np . zeros ( ( r , c ) )
# old background
for i in range ( maxiter ) :
px , py , pz = splineimage ( img , splinepoints , np . array ( mask ) )
# now with mask
res = evalspline2d ( np . arange ( c ) , np . arange ( r ) , px , py , pz )
comp = img - res
diff = res [ orig_mask ] - oldres [ orig_mask ]
# # # Compute std . deviation in same way as matlab std ( ) , ( not numpy . std ( ) )
stddiff = unbiased_std ( diff )
if stddiff < stop_criterion : # stop _ criterion instead of . 004
break
elif i == maxiter :
warnings . warn ( 'Background did not converge after %d iterations.\nMake sure that the foreground/background mode is correct.' % ( i ) )
oldres = res
# calculate new mask
backgr = comp [ mask ]
sigma = unbiased_std ( backgr )
cut = sigma * thresh
if mode < 0 :
mask = comp < cut
elif mode > 0 :
mask = comp > - cut
else :
mask = abs ( comp ) < cut
mask &= orig_mask
nnz = np . sum ( mask )
if nnz < .01 * np . sum ( orig_mask ) :
warnings . warn ( 'Less than 1%% of the pixels used for fitting,\ntry starting again with a larger threshold value' )
break
output = np . zeros ( orig_shape , img . dtype )
output [ clip_imin : clip_imax , clip_jmin : clip_jmax ] = affine_transform ( res , inverse_transform , output_shape = tuple ( clip_shape ) , order = 3 )
if input_mask is not None :
output [ ~ input_mask ] = 0
return output |
def get_f_clvd ( self ) :
"""Returns the statistic f _ clvd : the signed ratio of the sizes of the
intermediate and largest principal moments : :
f _ clvd = - b _ axis _ eigenvalue / max ( | t _ axis _ eigenvalue | , | p _ axis _ eigenvalue | )""" | if not self . principal_axes : # Principal axes not yet defined for moment tensor - raises error
raise ValueError ( 'Principal Axes not defined!' )
denominator = np . max ( np . array ( [ fabs ( self . principal_axes . t_axis [ 'eigenvalue' ] ) , fabs ( self . principal_axes . p_axis [ 'eigenvalue' ] ) ] ) )
self . f_clvd = - self . principal_axes . b_axis [ 'eigenvalue' ] / denominator
return self . f_clvd |
def get_requirements ( * args ) :
"""Get requirements from pip requirement files .""" | requirements = set ( )
contents = get_contents ( * args )
for line in contents . splitlines ( ) : # Strip comments .
line = re . sub ( r'^#.*|\s#.*' , '' , line )
# Ignore empty lines
if line and not line . isspace ( ) :
requirements . add ( re . sub ( r'\s+' , '' , line ) )
return sorted ( requirements ) |
def list_document_libraries ( self , limit = None , * , query = None , order_by = None , batch = None ) :
"""Returns a collection of document libraries for this site
( a collection of Drive instances )
: param int limit : max no . of items to get . Over 999 uses batch .
: param query : applies a OData filter to the request
: type query : Query or str
: param order _ by : orders the result set based on this condition
: type order _ by : Query or str
: param int batch : batch size , retrieves items in
batches allowing to retrieve more items than the limit .
: return : list of items in this folder
: rtype : list [ Drive ] or Pagination""" | return self . site_storage . get_drives ( limit = limit , query = query , order_by = order_by , batch = batch ) |
def _read ( self ) :
"""get two list , each list contains two elements : name and nd . array value""" | _ , data_img_name , label_img_name = self . f . readline ( ) . strip ( '\n' ) . split ( "\t" )
data = { }
label = { }
data [ self . data_name ] , label [ self . label_name ] = self . _read_img ( data_img_name , label_img_name )
return list ( data . items ( ) ) , list ( label . items ( ) ) |
def _closest_centroid ( self , x ) :
"""Returns the index of the closest centroid to the sample""" | closest_centroid = 0
distance = 10 ^ 9
for i in range ( self . n_clusters ) :
current_distance = linalg . norm ( x - self . centroids [ i ] )
if current_distance < distance :
closest_centroid = i
distance = current_distance
return closest_centroid |
def encode ( val , base , minlen = 0 ) :
"""Given an integer value ( val ) and a numeric base ( base ) ,
encode it into the string of symbols with the given base .
( with minimum length minlen )
Returns the ( left - padded ) re - encoded val as a string .""" | base , minlen = int ( base ) , int ( minlen )
code_string = get_code_string ( base )
result = ""
while val > 0 :
result = code_string [ val % base ] + result
val //= base
return code_string [ 0 ] * max ( minlen - len ( result ) , 0 ) + result |
def sign_statement ( self , statement , node_name , key_file , node_id , id_attr ) :
"""Sign an XML statement .
The parameters actually used in this CryptoBackend
implementation are :
: param statement : XML as string
: param node _ name : Name of the node to sign
: param key _ file : xmlsec key _ spec string ( ) , filename ,
' pkcs11 : / / ' URI or PEM data
: returns : Signed XML as string""" | import xmlsec
import lxml . etree
xml = xmlsec . parse_xml ( statement )
signed = xmlsec . sign ( xml , key_file )
signed_str = lxml . etree . tostring ( signed , xml_declaration = False , encoding = "UTF-8" )
if not isinstance ( signed_str , six . string_types ) :
signed_str = signed_str . decode ( "utf-8" )
return signed_str |
def generate_random_string ( cls , length ) :
"""Generatesa a [ length ] characters alpha numeric secret""" | # avoid things that could be mistaken ex : ' I ' and ' 1'
letters = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ"
return "" . join ( [ random . choice ( letters ) for _ in range ( length ) ] ) |
def parse ( self , raw_sections = None , namespaces = True , strip_comments = True , strip_whitespaces = True , strip_quotation_markers = True , raise_parsing_errors = True ) :
"""Process the file content and extracts the sections / attributes
as nested : class : ` collections . OrderedDict ` dictionaries or dictionaries .
Usage : :
> > > content = [ " ; Comment . \\ n " , " Attribute 1 = \\ " Value A \\ " \\ n " , " Attribute 2 = \\ " Value B \\ " \\ n " ]
> > > sections _ file _ parser = SectionsFileParser ( )
> > > sections _ file _ parser . content = content
> > > sections _ file _ parser . parse ( strip _ comments = False )
< foundations . parsers . SectionsFileParser object at 0x860323123 >
> > > sections _ file _ parser . sections . keys ( )
[ u ' _ defaults ' ]
> > > sections _ file _ parser . sections [ " _ defaults " ] . values ( )
[ u ' Value A ' , u ' Value B ' ]
> > > sections _ file _ parser . parse ( strip _ comments = False , strip _ quotation _ markers = False )
< foundations . parsers . SectionsFileParser object at 0x860323123 >
> > > sections _ file _ parser . sections [ " _ defaults " ] . values ( )
[ u ' " Value A " ' , u ' " Value B " ' ]
> > > sections _ file _ parser . comments
OrderedDict ( [ ( u ' _ defaults | # 0 ' , { u ' content ' : u ' Comment . ' , u ' id ' : 0 } ) ] )
> > > sections _ file _ parser . parse ( )
< foundations . parsers . SectionsFileParser object at 0x860323123 >
> > > sections _ file _ parser . sections [ " _ defaults " ]
OrderedDict ( [ ( u ' _ defaults | Attribute 1 ' , u ' Value A ' ) , ( u ' _ defaults | Attribute 2 ' , u ' Value B ' ) ] )
> > > sections _ file _ parser . parse ( namespaces = False )
< foundations . parsers . SectionsFileParser object at 0x860323123 >
> > > sections _ file _ parser . sections [ " _ defaults " ]
OrderedDict ( [ ( u ' Attribute 1 ' , u ' Value A ' ) , ( u ' Attribute 2 ' , u ' Value B ' ) ] )
: param raw _ sections : Ignored raw sections .
: type raw _ sections : tuple or list
: param namespaces : Attributes and comments are namespaced .
: type namespaces : bool
: param strip _ comments : Comments are stripped .
: type strip _ comments : bool
: param strip _ whitespaces : Whitespaces are stripped .
: type strip _ whitespaces : bool
: param strip _ quotation _ markers : Attributes values quotation markers are stripped .
: type strip _ quotation _ markers : bool
: param raise _ parsing _ errors : Raise parsing errors .
: type raise _ parsing _ errors : bool
: return : SectionFileParser instance .
: rtype : SectionFileParser""" | LOGGER . debug ( "> Reading sections from: '{0}'." . format ( self . path ) )
if not self . content :
self . read ( )
attributes = { } if not self . __preserve_order else OrderedDict ( )
section = self . __defaults_section
raw_sections = raw_sections or [ ]
commentId = 0
for i , line in enumerate ( self . content ) : # Comments matching .
search = re . search ( r"^\s*[{0}](?P<comment>.+)$" . format ( "" . join ( self . __comment_limiters ) ) , line )
if search :
if not strip_comments :
comment = namespaces and foundations . namespace . set_namespace ( section , "{0}{1}" . format ( self . __comment_marker , commentId ) , self . __namespace_splitter ) or "{0}{1}" . format ( self . __comment_marker , commentId )
self . __comments [ comment ] = { "id" : commentId , "content" : strip_whitespaces and search . group ( "comment" ) . strip ( ) or search . group ( "comment" ) }
commentId += 1
continue
# Sections matching .
search = re . search ( r"^\s*\[(?P<section>.+)\]\s*$" , line )
if search :
section = strip_whitespaces and search . group ( "section" ) . strip ( ) or search . group ( "section" )
if not self . __preserve_order :
attributes = { }
else :
attributes = OrderedDict ( )
rawContent = [ ]
continue
if section in raw_sections :
rawContent . append ( line )
attributes [ self . __raw_section_content_identifier ] = rawContent
else : # Empty line matching .
search = re . search ( r"^\s*$" , line )
if search :
continue
# Attributes matching .
search = re . search ( r"^(?P<attribute>.+?)[{0}](?P<value>.+)$" . format ( "" . join ( self . __splitters ) ) , line ) or re . search ( r"^(?P<attribute>.+?)[{0}]\s*$" . format ( "" . join ( self . __splitters ) ) , line )
if search :
attribute = search . group ( "attribute" ) . strip ( ) if strip_whitespaces else search . group ( "attribute" )
attribute = foundations . namespace . set_namespace ( section , attribute , self . __namespace_splitter ) if namespaces else attribute
if len ( search . groups ( ) ) == 2 :
value = search . group ( "value" ) . strip ( ) if strip_whitespaces else search . group ( "value" )
attributes [ attribute ] = value . strip ( "" . join ( self . __quotation_markers ) ) if strip_quotation_markers else value
else :
attributes [ attribute ] = None
else :
self . __parsing_errors . append ( foundations . exceptions . AttributeStructureParsingError ( "Attribute structure is invalid: {0}" . format ( line ) , i + 1 ) )
self . __sections [ section ] = attributes
LOGGER . debug ( "> Sections: '{0}'." . format ( self . __sections ) )
LOGGER . debug ( "> '{0}' file parsing done!" . format ( self . path ) )
if self . __parsing_errors and raise_parsing_errors :
raise foundations . exceptions . FileStructureParsingError ( "{0} | '{1}' structure is invalid, parsing exceptions occured!" . format ( self . __class__ . __name__ , self . path ) )
return self |
def get_connections ( self ) :
"""Returns a list of site pairs that are Voronoi Neighbors , along
with their real - space distances .""" | con = [ ]
maxconn = self . max_connectivity
for ii in range ( 0 , maxconn . shape [ 0 ] ) :
for jj in range ( 0 , maxconn . shape [ 1 ] ) :
if maxconn [ ii ] [ jj ] != 0 :
dist = self . s . get_distance ( ii , jj )
con . append ( [ ii , jj , dist ] )
return con |
def marker_for_line ( self , line ) :
"""Returns the marker that is displayed at the specified line number if
any .
: param line : The marker line .
: return : Marker of None
: rtype : pyqode . core . Marker""" | block = self . editor . document ( ) . findBlockByNumber ( line )
try :
return block . userData ( ) . messages
except AttributeError :
return [ ] |
def psq2 ( d1 , d2 ) :
"""Compute the PSQ2 measure .
Args :
d1 ( np . ndarray ) : The first distribution .
d2 ( np . ndarray ) : The second distribution .""" | d1 , d2 = flatten ( d1 ) , flatten ( d2 )
def f ( p ) :
return sum ( ( p ** 2 ) * np . nan_to_num ( np . log ( p * len ( p ) ) ) )
return abs ( f ( d1 ) - f ( d2 ) ) |
def fake ( args ) :
"""% prog fake input . bed
Make fake ` scaffolds . fasta ` . Use case for this is that sometimes I would
receive just the csv / bed file and I ' d like to use path ( ) out of the box .""" | from math import ceil
from random import choice
from Bio import SeqIO
from Bio . Seq import Seq
from Bio . SeqRecord import SeqRecord
p = OptionParser ( fake . __doc__ )
p . set_outfile ( )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
inputbed , = args
bed = Bed ( inputbed )
recs = [ ]
for seqid , sb in bed . sub_beds ( ) :
maxend = max ( x . end for x in sb )
size = int ( ceil ( maxend / 1000. ) * 1000 )
seq = "" . join ( [ choice ( "ACGT" ) for x in xrange ( size ) ] )
rec = SeqRecord ( Seq ( seq ) , id = seqid , description = "" )
recs . append ( rec )
fw = must_open ( opts . outfile , "w" )
SeqIO . write ( recs , fw , "fasta" ) |
def remove_editor_tab ( self , editor ) :
"""Removes the * * Script _ Editor _ tabWidget * * Widget tab with given editor .
: param editor : Editor .
: type editor : Editor
: return : Method success .
: rtype : bool""" | LOGGER . debug ( "> Removing tab with Editor '{0}'." . format ( editor ) )
self . Script_Editor_tabWidget . removeTab ( self . get_editorTab ( editor ) )
return True |
def make_skip_list ( cts ) :
"""Return hand - defined list of place names to skip and not attempt to geolocate . If users would like to exclude
country names , this would be the function to do it with .""" | # maybe make these non - country searches but don ' t discard , at least for
# some ( esp . bodies of water )
special_terms = [ "Europe" , "West" , "the West" , "South Pacific" , "Gulf of Mexico" , "Atlantic" , "the Black Sea" , "Black Sea" , "North America" , "Mideast" , "Middle East" , "the Middle East" , "Asia" , "the Caucasus" , "Africa" , "Central Asia" , "Balkans" , "Eastern Europe" , "Arctic" , "Ottoman Empire" , "Asia-Pacific" , "East Asia" , "Horn of Africa" , "Americas" , "North Africa" , "the Strait of Hormuz" , "Mediterranean" , "East" , "North" , "South" , "Latin America" , "Southeast Asia" , "Western Pacific" , "South Asia" , "Persian Gulf" , "Central Europe" , "Western Hemisphere" , "Western Europe" , "European Union (E.U.)" , "EU" , "European Union" , "E.U." , "Asia-Pacific" , "Europe" , "Caribbean" , "US" , "U.S." , "Persian Gulf" , "West Africa" , "North" , "East" , "South" , "West" , "Western Countries" ]
# Some words are recurring spacy problems . . .
spacy_problems = [ "Kurd" , "Qur'an" ]
# skip _ list = list ( cts . keys ( ) ) + special _ terms
skip_list = special_terms + spacy_problems
skip_list = set ( skip_list )
return skip_list |
def evaluate ( self ) :
"""Run the engine on the expression
This method performs alignment which is necessary no matter what engine
is being used , thus its implementation is in the base class .
Returns
obj : object
The result of the passed expression .""" | if not self . _is_aligned :
self . result_type , self . aligned_axes = _align ( self . expr . terms )
# make sure no names in resolvers and locals / globals clash
res = self . _evaluate ( )
return _reconstruct_object ( self . result_type , res , self . aligned_axes , self . expr . terms . return_type ) |
def decrypt ( self , message , ** kwargs ) :
"""Decrypt the contents of a string or file - like object ` ` message ` ` .
: type message : file or str or : class : ` io . BytesIO `
: param message : A string or file - like object to decrypt .
: param bool always _ trust : Instruct GnuPG to ignore trust checks .
: param str passphrase : The passphrase for the secret key used for decryption .
: param str output : A filename to write the decrypted output to .""" | stream = _make_binary_stream ( message , self . _encoding )
result = self . decrypt_file ( stream , ** kwargs )
stream . close ( )
return result |
def getBestAngle ( entities , current_yaw , current_health ) :
'''Scan through 360 degrees , looking for the best direction in which to take the next step .''' | us = findUs ( entities )
scores = [ ]
# Normalise current yaw :
while current_yaw < 0 :
current_yaw += 360
while current_yaw > 360 :
current_yaw -= 360
# Look for best option
for i in range ( agent_search_resolution ) : # Calculate cost of turning :
ang = 2 * math . pi * ( old_div ( i , float ( agent_search_resolution ) ) )
yaw = i * 360.0 / float ( agent_search_resolution )
yawdist = min ( abs ( yaw - current_yaw ) , 360 - abs ( yaw - current_yaw ) )
turncost = agent_turn_weight * yawdist
score = turncost
# Calculate entity proximity cost for new ( x , z ) :
x = us [ "x" ] + agent_stepsize - math . sin ( ang )
z = us [ "z" ] + agent_stepsize * math . cos ( ang )
for ent in entities :
dist = ( ent [ "x" ] - x ) * ( ent [ "x" ] - x ) + ( ent [ "z" ] - z ) * ( ent [ "z" ] - z )
if ( dist == 0 ) :
continue
weight = 0.0
if ent [ "name" ] == MOB_TYPE :
weight = agent_mob_weight
dist -= 1
# assume mobs are moving towards us
if dist <= 0 :
dist = 0.1
elif ent [ "name" ] == GOAL_TYPE :
weight = agent_goal_weight * current_health / 20.0
score += old_div ( weight , float ( dist ) )
# Calculate cost of proximity to edges :
distRight = ( 2 + old_div ( ARENA_WIDTH , 2 ) ) - x
distLeft = ( - 2 - old_div ( ARENA_WIDTH , 2 ) ) - x
distTop = ( 2 + old_div ( ARENA_BREADTH , 2 ) ) - z
distBottom = ( - 2 - old_div ( ARENA_BREADTH , 2 ) ) - z
score += old_div ( agent_edge_weight , float ( distRight * distRight * distRight * distRight ) )
score += old_div ( agent_edge_weight , float ( distLeft * distLeft * distLeft * distLeft ) )
score += old_div ( agent_edge_weight , float ( distTop * distTop * distTop * distTop ) )
score += old_div ( agent_edge_weight , float ( distBottom * distBottom * distBottom * distBottom ) )
scores . append ( score )
# Find best score :
i = scores . index ( max ( scores ) )
# Return as an angle in degrees :
return i * 360.0 / float ( agent_search_resolution ) |
def get ( self ) :
""": return : response stats dict""" | stats = { }
if self . start_date is not None :
stats [ "start_date" ] = self . start_date
if self . end_date is not None :
stats [ "end_date" ] = self . end_date
if self . aggregated_by is not None :
stats [ "aggregated_by" ] = self . aggregated_by
if self . sort_by_metric is not None :
stats [ "sort_by_metric" ] = self . sort_by_metric
if self . sort_by_direction is not None :
stats [ "sort_by_direction" ] = self . sort_by_direction
if self . limit is not None :
stats [ "limit" ] = self . limit
if self . offset is not None :
stats [ "offset" ] = self . offset
if self . categories is not None :
stats [ 'categories' ] = [ category . get ( ) for category in self . categories ]
return stats |
def telnet_login ( self , pri_prompt_terminator = "#" , alt_prompt_terminator = ">" , username_pattern = r"Login Name:" , pwd_pattern = r"assword" , delay_factor = 1 , max_loops = 60 , ) :
"""Telnet login : can be username / password or just password .""" | super ( HPProcurveTelnet , self ) . telnet_login ( pri_prompt_terminator = pri_prompt_terminator , alt_prompt_terminator = alt_prompt_terminator , username_pattern = username_pattern , pwd_pattern = pwd_pattern , delay_factor = delay_factor , max_loops = max_loops , ) |
def check_file ( self , fs , info ) : # type : ( FS , Info ) - > bool
"""Check if a filename should be included .
Override to exclude files from the walk .
Arguments :
fs ( FS ) : A filesystem instance .
info ( Info ) : A resource info object .
Returns :
bool : ` True ` if the file should be included .""" | if self . exclude is not None and fs . match ( self . exclude , info . name ) :
return False
return fs . match ( self . filter , info . name ) |
def login ( self , username : str , password : str , course : int ) -> requests . Response :
"""登入課程""" | try : # 操作所需資訊
payload = { 'name' : username , 'passwd' : password , 'rdoCourse' : course }
# 回傳嘗試登入的回應
return self . __session . post ( self . __url + '/Login' , data = payload , timeout = 0.5 , verify = False )
except requests . exceptions . Timeout :
return None |
def _compute_predicates ( table_op , predicates , data , scope , ** kwargs ) :
"""Compute the predicates for a table operation .
Parameters
table _ op : TableNode
predicates : List [ ir . ColumnExpr ]
data : pd . DataFrame
scope : dict
kwargs : dict
Returns
computed _ predicate : pd . Series [ bool ]
Notes
This handles the cases where the predicates are computed columns , in
addition to the simple case of named columns coming directly from the input
table .""" | for predicate in predicates : # Map each root table of the predicate to the data so that we compute
# predicates on the result instead of any left or right tables if the
# Selection is on a Join . Project data to only inlude columns from
# the root table .
root_tables = predicate . op ( ) . root_tables ( )
# handle suffixes
additional_scope = { }
data_columns = frozenset ( data . columns )
for root_table in root_tables :
mapping = remap_overlapping_column_names ( table_op , root_table , data_columns )
if mapping is not None :
new_data = data . loc [ : , mapping . keys ( ) ] . rename ( columns = mapping )
else :
new_data = data
additional_scope [ root_table ] = new_data
new_scope = toolz . merge ( scope , additional_scope )
yield execute ( predicate , scope = new_scope , ** kwargs ) |
def discover_files ( base_path , sub_path = '' , ext = '' , trim_base_path = False ) :
"""Discovers all files with certain extension in given paths .""" | file_list = [ ]
for root , dirs , files in walk ( path . join ( base_path , sub_path ) ) :
if trim_base_path :
root = path . relpath ( root , base_path )
file_list . extend ( [ path . join ( root , file_name ) for file_name in files if file_name . endswith ( ext ) ] )
return sorted ( file_list ) |
def magic_string ( string , filename = None ) :
"""Returns tuple of ( num _ of _ matches , array _ of _ matches )
arranged highest confidence match first
If filename is provided it will be used in the computation .
: param string : string representation to check
: param filename : original filename
: return : list of possible matches , highest confidence first""" | if not string :
raise ValueError ( "Input was empty" )
head , foot = _string_details ( string )
ext = ext_from_filename ( filename ) if filename else None
info = _identify_all ( head , foot , ext )
info . sort ( key = lambda x : x . confidence , reverse = True )
return info |
def _get_preseq_params ( data , preseq_cmd , read_count ) :
"""Get parameters through resources .
If " step " or " extrap " limit are not provided , then calculate optimal values based on read count .""" | defaults = { 'seg_len' : 100000 , # maximum segment length when merging paired end bam reads
'steps' : 300 , # number of points on the plot
'extrap_fraction' : 3 , # extrapolate up to X times read _ count
'extrap' : None , # extrapolate up to X reads
'step' : None , # step size ( number of reads between points on the plot )
'options' : '' , }
params = { }
main_opts = [ ( "-e" , "-extrap" ) , ( "-l" , "-seg_len" ) , ( "-s" , "-step" ) ]
other_opts = config_utils . get_resources ( "preseq" , data [ "config" ] ) . get ( "options" , [ ] )
if isinstance ( other_opts , str ) :
other_opts = [ other_opts ]
for sht , lng in main_opts :
if sht in other_opts :
i = other_opts . index ( sht )
elif lng in other_opts :
i = other_opts . index ( lng )
else :
i = None
if i is not None :
params [ lng [ 1 : ] ] = other_opts [ i + 1 ]
other_opts = other_opts [ : i ] + other_opts [ i + 2 : ]
params [ 'options' ] = ' ' . join ( other_opts )
for k , v in config_utils . get_resources ( "preseq" , data [ "config" ] ) . items ( ) :
if k != 'options' :
params [ k ] = v
params [ 'steps' ] = params . get ( 'steps' , defaults [ 'steps' ] )
if preseq_cmd == 'c_curve' :
params [ 'extrap_fraction' ] = 1
else :
if params . get ( 'step' ) is None :
if params . get ( 'extrap' ) is None :
unrounded__extrap = read_count * params . get ( 'extrap_fraction' , defaults [ 'extrap_fraction' ] )
unrounded__step = unrounded__extrap // params [ 'steps' ]
if params . get ( 'extrap_fraction' ) is not None : # extrap _ fraction explicitly provided
params [ 'extrap' ] = unrounded__extrap
params [ 'step' ] = unrounded__step
else :
power_of_10 = 10 ** math . floor ( math . log ( unrounded__step , 10 ) )
rounded__step = int ( math . floor ( unrounded__step // power_of_10 ) * power_of_10 )
rounded__extrap = int ( rounded__step ) * params [ 'steps' ]
params [ 'step' ] = rounded__step
params [ 'extrap' ] = rounded__extrap
else :
params [ 'step' ] = params [ 'extrap' ] // params [ 'steps' ]
elif params . get ( 'extrap' ) is None :
params [ 'extrap' ] = params [ 'step' ] * params [ 'steps' ]
params [ 'step' ] = params . get ( 'step' , defaults [ 'step' ] )
params [ 'extrap' ] = params . get ( 'extrap' , defaults [ 'extrap' ] )
params [ 'seg_len' ] = params . get ( 'seg_len' , defaults [ 'seg_len' ] )
logger . info ( "Preseq: running {steps} steps of size {step}, extap limit {extrap}" . format ( ** params ) )
return params |
def report ( self , item_id , report_format = "json" ) :
"""Retrieves the specified report for the analyzed item , referenced by item _ id .
Available formats include : json .
: type item _ id : str
: param item _ id : File ID number
: type report _ format : str
: param report _ format : Return format
: rtype : dict
: return : Dictionary representing the JSON parsed data or raw , for other
formats / JSON parsing failure .""" | if report_format == "html" :
return "Report Unavailable"
# grab an analysis id from the submission id .
response = self . _request ( "/analysis/sample/{sample_id}" . format ( sample_id = item_id ) , headers = self . headers )
try : # the highest score is probably the most interesting .
# vmray uses this internally with sample _ highest _ vti _ score so this seems like a safe assumption .
analysis_id = 0
top_score = - 1
for analysis in response . json ( ) [ 'data' ] :
if analysis [ 'analysis_vti_score' ] > top_score :
top_score = analysis [ 'analysis_vti_score' ]
analysis_id = analysis [ 'analysis_id' ]
except ( ValueError , KeyError ) as e :
raise sandboxapi . SandboxError ( e )
# assume report format json .
response = self . _request ( "/analysis/{analysis_id}/archive/logs/summary.json" . format ( analysis_id = analysis_id ) , headers = self . headers )
# if response is JSON , return it as an object .
try :
return response . json ( )
except ValueError :
pass
# otherwise , return the raw content .
return response . content |
def fit_predict ( self , sequences , y = None ) :
"""Performs clustering on X and returns cluster labels .
Parameters
sequences : list of array - like , each of shape [ sequence _ length , n _ features ]
A list of multivariate timeseries . Each sequence may have
a different length , but they all must have the same number
of features .
Returns
Y : list of ndarray , each of shape [ sequence _ length , ]
Cluster labels""" | if hasattr ( super ( MultiSequenceClusterMixin , self ) , 'fit_predict' ) :
check_iter_of_sequences ( sequences , allow_trajectory = self . _allow_trajectory )
labels = super ( MultiSequenceClusterMixin , self ) . fit_predict ( sequences )
else :
self . fit ( sequences )
labels = self . predict ( sequences )
if not isinstance ( labels , list ) :
labels = self . _split ( labels )
return labels |
def deserialize ( serialized_material_description ) : # type : ( dynamodb _ types . BINARY _ ATTRIBUTE ) - > Dict [ Text , Text ]
"""Deserialize a serialized material description attribute into a material description dictionary .
: param dict serialized _ material _ description : DynamoDB attribute value containing serialized material description .
: returns : Material description dictionary
: rtype : dict
: raises InvalidMaterialDescriptionError : if malformed version
: raises InvalidMaterialDescriptionVersionError : if unknown version is found""" | try :
_raw_material_description = serialized_material_description [ Tag . BINARY . dynamodb_tag ]
material_description_bytes = io . BytesIO ( _raw_material_description )
total_bytes = len ( _raw_material_description )
except ( TypeError , KeyError ) :
message = "Invalid material description"
_LOGGER . exception ( message )
raise InvalidMaterialDescriptionError ( message )
# We don ' t currently do anything with the version , but do check to make sure it is the one we know about .
_read_version ( material_description_bytes )
material_description = { }
try :
while material_description_bytes . tell ( ) < total_bytes :
name = to_str ( decode_value ( material_description_bytes ) )
value = to_str ( decode_value ( material_description_bytes ) )
material_description [ name ] = value
except struct . error :
message = "Invalid material description"
_LOGGER . exception ( message )
raise InvalidMaterialDescriptionError ( message )
return material_description |
def verse_lookup ( self , book_name , book_chapter , verse , cache_chapter = True ) :
"""Looks up a verse from online . recoveryversion . bible , then returns it .""" | verses_list = self . get_chapter ( book_name , str ( book_chapter ) , cache_chapter = cache_chapter )
return verses_list [ int ( verse ) - 1 ] |
def MakeStatResponse ( self , tsk_file , tsk_attribute = None , append_name = None ) :
"""Given a TSK info object make a StatEntry .
Note that tsk uses two things to uniquely identify a data stream - the inode
object given in tsk _ file and the attribute object which may correspond to an
ADS of this file for filesystems which support ADS . We store both of these
in the stat response .
Args :
tsk _ file : A TSK File object for the specified inode .
tsk _ attribute : A TSK Attribute object for the ADS . If None we use the main
stream .
append _ name : If specified we append this name to the last element of the
pathspec .
Returns :
A StatEntry which can be used to re - open this exact VFS node .""" | precondition . AssertOptionalType ( append_name , Text )
info = tsk_file . info
response = rdf_client_fs . StatEntry ( )
meta = info . meta
if meta :
response . st_ino = meta . addr
for attribute in [ "mode" , "nlink" , "uid" , "gid" , "size" , "atime" , "mtime" , "ctime" , "crtime" ] :
try :
value = int ( getattr ( meta , attribute ) )
if value < 0 :
value &= 0xFFFFFFFF
setattr ( response , "st_%s" % attribute , value )
except AttributeError :
pass
name = info . name
child_pathspec = self . pathspec . Copy ( )
if append_name is not None : # Append the name to the most inner pathspec
child_pathspec . last . path = utils . JoinPath ( child_pathspec . last . path , append_name )
child_pathspec . last . inode = meta . addr
if tsk_attribute is not None :
child_pathspec . last . ntfs_type = int ( tsk_attribute . info . type )
child_pathspec . last . ntfs_id = int ( tsk_attribute . info . id )
child_pathspec . last . stream_name = tsk_attribute . info . name
# Update the size with the attribute size .
response . st_size = tsk_attribute . info . size
default = rdf_paths . PathSpec . tsk_fs_attr_type . TSK_FS_ATTR_TYPE_DEFAULT
last = child_pathspec . last
if last . ntfs_type != default or last . ntfs_id : # This is an ads and should be treated as a file .
# Clear all file type bits .
response . st_mode &= ~ self . stat_type_mask
response . st_mode |= stat . S_IFREG
else :
child_pathspec . last . ntfs_type = None
child_pathspec . last . ntfs_id = None
child_pathspec . last . stream_name = None
if name : # Encode the type onto the st _ mode response
response . st_mode |= self . FILE_TYPE_LOOKUP . get ( int ( name . type ) , 0 )
if meta : # What if the types are different ? What to do here ?
response . st_mode |= self . META_TYPE_LOOKUP . get ( int ( meta . type ) , 0 )
# Write the pathspec on the response .
response . pathspec = child_pathspec
return response |
def execute_go_cmd ( self , cmd , gopath = None , args = None , env = None , workunit_factory = None , workunit_name = None , workunit_labels = None , ** kwargs ) :
"""Runs a Go command that is optionally targeted to a Go workspace .
If a ` workunit _ factory ` is supplied the command will run in a work unit context .
: param string cmd : Go command to execute , e . g . ' test ' for ` go test `
: param string gopath : An optional $ GOPATH which points to a valid Go workspace from which to run
the command .
: param list args : An optional list of arguments and flags to pass to the Go command .
: param dict env : A custom environment to launch the Go command in . If ` None ` the current
environment is used .
: param workunit _ factory : An optional callable that can produce a ` WorkUnit ` context
: param string workunit _ name : An optional name for the work unit ; defaults to the ` cmd `
: param list workunit _ labels : An optional sequence of labels for the work unit .
: param kwargs : Keyword arguments to pass through to ` subprocess . Popen ` .
: returns : A tuple of the exit code and the go command that was run .
: rtype : ( int , : class : ` GoDistribution . GoCommand ` )""" | go_cmd = self . create_go_cmd ( cmd , gopath = gopath , args = args )
if workunit_factory is None :
return go_cmd . spawn ( ** kwargs ) . wait ( )
else :
name = workunit_name or cmd
labels = [ WorkUnitLabel . TOOL ] + ( workunit_labels or [ ] )
with workunit_factory ( name = name , labels = labels , cmd = str ( go_cmd ) ) as workunit :
process = go_cmd . spawn ( env = env , stdout = workunit . output ( 'stdout' ) , stderr = workunit . output ( 'stderr' ) , ** kwargs )
returncode = process . wait ( )
workunit . set_outcome ( WorkUnit . SUCCESS if returncode == 0 else WorkUnit . FAILURE )
return returncode , go_cmd |
def custom_size ( self , minimum : int = 40 , maximum : int = 62 ) -> int :
"""Generate clothing size using custom format .
: param minimum : Minimum value .
: param maximum : Maximum value .
: return : Clothing size .""" | return self . random . randint ( minimum , maximum ) |
def out_64 ( library , session , space , offset , data , extended = False ) :
"""Write in an 64 - bit value from the specified memory space and offset .
Corresponds to viOut64 * functions of the VISA library .
: param library : the visa library wrapped by ctypes .
: param session : Unique logical identifier to a session .
: param space : Specifies the address space . ( Constants . * SPACE * )
: param offset : Offset ( in bytes ) of the address or register from which to read .
: param data : Data to write to bus .
: param extended : Use 64 bits offset independent of the platform .
: return : return value of the library call .
: rtype : : class : ` pyvisa . constants . StatusCode `""" | if extended :
return library . viOut64Ex ( session , space , offset , data )
else :
return library . viOut64 ( session , space , offset , data ) |
def get_char ( self , offset = 0 ) :
"""Return the current character in the working string .""" | if not self . has_space ( offset = offset ) :
return ''
return self . string [ self . pos + offset ] |
def get_candidate_votes ( self , candidate ) :
"""Get all votes attached to a CandidateElection for a Candidate in
this election .""" | candidate_election = CandidateElection . objects . get ( candidate = candidate , election = self )
return candidate_election . votes . all ( ) |
def format_name ( self ) :
"""Formats the media file based on enhanced metadata .
The actual name of the file and even the name of the directory
structure where the file is to be stored .""" | self . formatted_filename = formatter . format_filename ( self . series_name , self . season_number , self . episode_numbers , self . episode_names , self . extension )
self . formatted_dirname = self . location
if cfg . CONF . move_files_enabled :
self . formatted_dirname = formatter . format_location ( self . series_name , self . season_number )
self . out_location = os . path . join ( self . formatted_dirname , self . formatted_filename ) |
def run_multiple_column_experiment ( ) :
"""Compare the ideal observer against a multi - column sensorimotor network .""" | # Create the objects
featureRange = [ 5 , 10 , 20 , 30 ]
pointRange = 1
objectRange = [ 100 ]
numLocations = [ 10 ]
numPoints = 10
numTrials = 10
columnRange = [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ]
useLocation = 1
resultsDir = os . path . dirname ( os . path . realpath ( __file__ ) )
args = [ ]
for c in reversed ( columnRange ) :
for o in reversed ( objectRange ) :
for l in numLocations :
for f in featureRange :
for t in range ( numTrials ) :
args . append ( { "numObjects" : o , "numLocations" : l , "numFeatures" : f , "numColumns" : c , "trialNum" : t , "pointRange" : pointRange , "numPoints" : numPoints , "useLocation" : useLocation } )
print "Number of experiments:" , len ( args )
idealResultsFile = os . path . join ( resultsDir , "ideal_multi_column_useLocation_{}.pkl" . format ( useLocation ) )
pool = Pool ( processes = cpu_count ( ) )
result = pool . map ( run_ideal_classifier , args )
# Pickle results for later use
with open ( idealResultsFile , "wb" ) as f :
cPickle . dump ( result , f )
htmResultsFile = os . path . join ( resultsDir , "column_convergence_results.pkl" )
runExperimentPool ( numObjects = objectRange , numLocations = [ 10 ] , numFeatures = featureRange , numColumns = columnRange , numPoints = 10 , nTrials = numTrials , numWorkers = cpu_count ( ) , resultsName = htmResultsFile )
with open ( htmResultsFile , "rb" ) as f :
results = cPickle . load ( f )
with open ( idealResultsFile , "rb" ) as f :
resultsIdeal = cPickle . load ( f )
plt . figure ( )
plotConvergenceByColumn ( results , columnRange , featureRange , numTrials )
plotConvergenceByColumn ( resultsIdeal , columnRange , featureRange , numTrials , "--" )
plt . savefig ( 'plots/ideal_observer_multiple_column.pdf' ) |
async def is_object_synced_to_cn ( self , client , pid ) :
"""Check if object with { pid } has successfully synced to the CN .
CNRead . describe ( ) is used as it ' s a light - weight HTTP HEAD request .
This assumes that the call is being made over a connection that has been
authenticated and has read or better access on the given object if it exists .""" | try :
await client . describe ( pid )
except d1_common . types . exceptions . DataONEException :
return False
return True |
def find_all ( source , substring , start = None , end = None , overlap = False ) :
"""Return every location a substring can be found in a source string .
source
The source string to search .
start
Start offset to read from ( default : start )
end
End offset to stop reading at ( default : end )
overlap
Whether to return overlapping matches ( default : false )""" | return [ x for x in find_all_iter ( source , substring , start , end , overlap ) ] |
def get_work_item_template ( self , project , type , fields = None , as_of = None , expand = None ) :
"""GetWorkItemTemplate .
[ Preview API ] Returns a single work item from a template .
: param str project : Project ID or project name
: param str type : The work item type name
: param str fields : Comma - separated list of requested fields
: param datetime as _ of : AsOf UTC date time string
: param str expand : The expand parameters for work item attributes . Possible options are { None , Relations , Fields , Links , All } .
: rtype : : class : ` < WorkItem > < azure . devops . v5_1 . work - item - tracking . models . WorkItem > `""" | route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
if type is not None :
route_values [ 'type' ] = self . _serialize . url ( 'type' , type , 'str' )
query_parameters = { }
if fields is not None :
query_parameters [ 'fields' ] = self . _serialize . query ( 'fields' , fields , 'str' )
if as_of is not None :
query_parameters [ 'asOf' ] = self . _serialize . query ( 'as_of' , as_of , 'iso-8601' )
if expand is not None :
query_parameters [ '$expand' ] = self . _serialize . query ( 'expand' , expand , 'str' )
response = self . _send ( http_method = 'GET' , location_id = '62d3d110-0047-428c-ad3c-4fe872c91c74' , version = '5.1-preview.3' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( 'WorkItem' , response ) |
def load_nameserver_credentials ( self , working_directory , num_tries = 60 , interval = 1 ) :
"""loads the nameserver credentials in cases where master and workers share a filesystem
Parameters
working _ directory : str
the working directory for the HPB run ( see master )
num _ tries : int
number of attempts to find the file ( default 60)
interval : float
waiting period between the attempts""" | fn = os . path . join ( working_directory , 'HPB_run_%s_pyro.pkl' % self . run_id )
for i in range ( num_tries ) :
try :
with open ( fn , 'rb' ) as fh :
self . nameserver , self . nameserver_port = pickle . load ( fh )
return
except FileNotFoundError :
self . logger . warning ( 'config file %s not found (trail %i/%i)' % ( fn , i + 1 , num_tries ) )
time . sleep ( interval )
except :
raise
raise RuntimeError ( "Could not find the nameserver information, aborting!" ) |
def _mpl_marker2pgfp_marker ( data , mpl_marker , marker_face_color ) :
"""Translates a marker style of matplotlib to the corresponding style
in PGFPlots .""" | # try default list
try :
pgfplots_marker = _MP_MARKER2PGF_MARKER [ mpl_marker ]
except KeyError :
pass
else :
if ( marker_face_color is not None ) and pgfplots_marker == "o" :
pgfplots_marker = "*"
data [ "tikz libs" ] . add ( "plotmarks" )
marker_options = None
return ( data , pgfplots_marker , marker_options )
# try plotmarks list
try :
data [ "tikz libs" ] . add ( "plotmarks" )
pgfplots_marker , marker_options = _MP_MARKER2PLOTMARKS [ mpl_marker ]
except KeyError : # There ' s no equivalent for the pixel marker ( , ) in Pgfplots .
pass
else :
if ( marker_face_color is not None and ( not isinstance ( marker_face_color , str ) or marker_face_color . lower ( ) != "none" ) and pgfplots_marker not in [ "|" , "-" , "asterisk" , "star" ] ) :
pgfplots_marker += "*"
return ( data , pgfplots_marker , marker_options )
return data , None , None |
def changed ( self ) :
"""Tells if module configuration has been changed ( it needs to be saved )""" | # Not installed modules cannot issue a change to save
if not self . installed :
return False
# Check if module status changed
if self . _info . _changed :
return True
# Check model changes
for model in self . models ( ) :
if model . objects . changed ( ) . count ( ) or model . objects . deleted ( ) . count ( ) :
return True
# Nothing passed , module not changed
return False |
def add_to_class ( self , cls , name ) :
'''Overrides the base class to add a PhoheNumberDescriptor rather than the standard FieldDescriptor''' | self . model_class = cls
setattr ( cls , name , PhoneNumberDescriptor ( self ) )
self . _bound = True |
def index ( self ) :
"""Get the index of the day of the week .
Returns
IntegerValue
The index of the day of the week . Ibis follows pandas conventions ,
where * * Monday = 0 and Sunday = 6 * * .""" | import ibis . expr . operations as ops
return ops . DayOfWeekIndex ( self . op ( ) . arg ) . to_expr ( ) |
def properties ( self ) :
"""The properties property .
Returns :
( hash ) . the property value . ( defaults to : { } )""" | if 'properties' in self . _values :
return self . _values [ 'properties' ]
self . _values [ 'properties' ] = copy . deepcopy ( self . _defaults [ 'properties' ] )
return self . _values [ 'properties' ] |
def has_frames ( self , destination ) :
"""Whether specified queue has any frames .
@ param destination : The queue name ( destinationination ) .
@ type destination : C { str }
@ return : Whether there are any frames in the specified queue .
@ rtype : C { bool }""" | return ( destination in self . queue_metadata ) and bool ( self . queue_metadata [ destination ] [ 'frames' ] ) |
def slugify ( s ) :
"""Converts the given string to a URL slug .""" | s = strip_accents ( s . replace ( "'" , '' ) . lower ( ) )
return re . sub ( '[^a-z0-9]+' , ' ' , s ) . strip ( ) . replace ( ' ' , '-' ) |
def plot_single_configuration ( self , config_nr , sens_file ) :
"""plot sensitivity distribution with center of mass for
a single configuration . The electrodes used are colored .
Parameters
config _ nr : int
number of configuration
sens _ file : string , file path
filename to sensitvity file""" | indices = elem . load_column_file_to_elements_advanced ( sens_file , [ 2 , 3 ] , False , False )
elem . plt_opt . title = ''
elem . plt_opt . reverse = True
elem . plt_opt . cbmin = - 1
elem . plt_opt . cbmax = 1
elem . plt_opt . cblabel = r'fill'
elem . plt_opt . xlabel = 'x (m)'
elem . plt_opt . ylabel = 'z (m)'
fig = plt . figure ( figsize = ( 5 , 7 ) )
ax = fig . add_subplot ( 111 )
ax , pm , cb = elem . plot_element_data_to_ax ( indices [ 0 ] , ax , scale = 'asinh' , no_cb = False , )
ax . scatter ( self . sens_centers [ config_nr , 0 ] , self . sens_centers [ config_nr , 1 ] , marker = '*' , s = 50 , color = 'w' , edgecolors = 'w' , )
self . color_electrodes ( config_nr , ax )
# Output
sensf = sens_file . split ( 'sens' ) [ - 1 ]
sensf = sensf . split ( '.' ) [ 0 ]
out = 'sens_center_' + sensf + '.png'
fig . savefig ( out , bbox_inches = 'tight' , dpi = 300 )
fig . clf ( )
plt . close ( fig ) |
def SaveDataToFD ( self , raw_data , fd ) :
"""Merge the raw data with the config file and store it .""" | fd . write ( yaml . Dump ( raw_data ) . encode ( "utf-8" ) ) |
def createCatalog ( config , roi = None , lon = None , lat = None ) :
"""Create a catalog object""" | import ugali . observation . catalog
if roi is None :
roi = createROI ( config , lon , lat )
catalog = ugali . observation . catalog . Catalog ( config , roi = roi )
return catalog |
def add_magic_table_from_data ( self , dtype , data ) :
"""Add a MagIC table to the contribution from a data list
Parameters
dtype : str
MagIC table type , i . e . ' specimens '
data : list of dicts
data list with format [ { ' key1 ' : ' val1 ' , . . . } , { ' key1 ' : ' val2 ' , . . . } , . . . } ]""" | self . tables [ dtype ] = MagicDataFrame ( dtype = dtype , data = data )
if dtype == 'measurements' :
self . tables [ 'measurements' ] . add_sequence ( )
return dtype , self . tables [ dtype ] |
def save ( self , revision ) :
""": param revision :
: type revision : : class : ` revision . data . Revision `""" | if not isinstance ( revision , Revision ) :
raise InvalidArgType ( )
self . state . update ( revision ) |
def build_spec ( user , repo , sha = None , prov = None , extraMetadata = [ ] ) :
"""Build grlc specification for the given github user / repo .""" | loader = grlc . utils . getLoader ( user , repo , sha = sha , prov = prov )
files = loader . fetchFiles ( )
raw_repo_uri = loader . getRawRepoUri ( )
# Fetch all . rq files
items = [ ]
allowed_ext = [ "rq" , "sparql" , "json" , "tpf" ]
for c in files :
glogger . debug ( '>>>>>>>>>>>>>>>>>>>>>>>>>c_name: {}' . format ( c [ 'name' ] ) )
extension = c [ 'name' ] . split ( '.' ) [ - 1 ]
if extension in allowed_ext :
call_name = c [ 'name' ] . split ( '.' ) [ 0 ]
# Retrieve extra metadata from the query decorators
query_text = loader . getTextFor ( c )
item = None
if extension == "json" :
query_text = json . loads ( query_text )
if extension in [ "rq" , "sparql" , "json" ] :
glogger . debug ( "===================================================================" )
glogger . debug ( "Processing SPARQL query: {}" . format ( c [ 'name' ] ) )
glogger . debug ( "===================================================================" )
item = process_sparql_query_text ( query_text , loader , call_name , extraMetadata )
elif "tpf" == extension :
glogger . debug ( "===================================================================" )
glogger . debug ( "Processing TPF query: {}" . format ( c [ 'name' ] ) )
glogger . debug ( "===================================================================" )
item = process_tpf_query_text ( query_text , raw_repo_uri , call_name , extraMetadata )
else :
glogger . info ( "Ignoring unsupported source call name: {}" . format ( c [ 'name' ] ) )
if item :
items . append ( item )
return items |
def copy_dependency_images ( tile ) :
"""Copy all documentation from dependencies into build / output / doc folder""" | env = Environment ( tools = [ ] )
outputbase = os . path . join ( 'build' , 'output' )
depbase = os . path . join ( 'build' , 'deps' )
for dep in tile . dependencies :
depdir = os . path . join ( depbase , dep [ 'unique_id' ] )
outputdir = os . path . join ( outputbase )
deptile = IOTile ( depdir )
for image in deptile . find_products ( 'firmware_image' ) :
name = os . path . basename ( image )
input_path = os . path . join ( depdir , name )
output_path = os . path . join ( outputdir , name )
env . Command ( [ output_path ] , [ input_path ] , Copy ( "$TARGET" , "$SOURCE" ) ) |
def process_actions ( action_ids = None ) :
"""Process actions in the publishing schedule .
Returns the number of actions processed .""" | actions_taken = 0
action_list = PublishAction . objects . prefetch_related ( 'content_object' , ) . filter ( scheduled_time__lte = timezone . now ( ) , )
if action_ids is not None :
action_list = action_list . filter ( id__in = action_ids )
for action in action_list :
action . process_action ( )
action . delete ( )
actions_taken += 1
return actions_taken |
def projects_from_cli ( args ) :
"""Take arguments through the CLI can create a list of specified projects .""" | description = ( 'Determine if a set of project dependencies will work with ' 'Python 3' )
parser = argparse . ArgumentParser ( description = description )
req_help = 'path(s) to a pip requirements file (e.g. requirements.txt)'
parser . add_argument ( '--requirements' , '-r' , nargs = '+' , default = ( ) , help = req_help )
meta_help = 'path(s) to a PEP 426 metadata file (e.g. PKG-INFO, pydist.json)'
parser . add_argument ( '--metadata' , '-m' , nargs = '+' , default = ( ) , help = meta_help )
parser . add_argument ( '--projects' , '-p' , nargs = '+' , default = ( ) , help = 'name(s) of projects to test for Python 3 support' )
parser . add_argument ( '--verbose' , '-v' , action = 'store_true' , help = 'verbose output (e.g. list compatibility overrides)' )
parsed = parser . parse_args ( args )
if not ( parsed . requirements or parsed . metadata or parsed . projects ) :
parser . error ( "Missing 'requirements', 'metadata', or 'projects'" )
projects = [ ]
if parsed . verbose :
logging . getLogger ( 'ciu' ) . setLevel ( logging . INFO )
projects . extend ( projects_ . projects_from_requirements ( parsed . requirements ) )
metadata = [ ]
for metadata_path in parsed . metadata :
with io . open ( metadata_path ) as file :
metadata . append ( file . read ( ) )
projects . extend ( projects_ . projects_from_metadata ( metadata ) )
projects . extend ( map ( packaging . utils . canonicalize_name , parsed . projects ) )
return projects |
def _lti16 ( ins ) :
'''Compares & pops top 2 operands out of the stack , and checks
if the 1st operand < 2nd operand ( top of the stack ) .
Pushes 0 if False , 1 if True .
16 bit signed version''' | output = _16bit_oper ( ins . quad [ 2 ] , ins . quad [ 3 ] )
output . append ( 'call __LTI16' )
output . append ( 'push af' )
REQUIRES . add ( 'lti16.asm' )
return output |
def buff ( self , target , buff , ** kwargs ) :
"""Summon \a buff and apply it to \a target
If keyword arguments are given , attempt to set the given
values to the buff . Example :
player . buff ( target , health = random . randint ( 1 , 5 ) )
NOTE : Any Card can buff any other Card . The controller of the
Card that buffs the target becomes the controller of the buff .""" | ret = self . controller . card ( buff , self )
ret . source = self
ret . apply ( target )
for k , v in kwargs . items ( ) :
setattr ( ret , k , v )
return ret |
def add_columns ( self , data , column_names = None , inplace = False ) :
"""Returns an SFrame with multiple columns added . The number of
elements in all columns must match the length of every other column of
the SFrame .
If inplace = = False ( default ) this operation does not modify the
current SFrame , returning a new SFrame .
If inplace = = True , this operation modifies the current
SFrame , returning self .
Parameters
data : list [ SArray ] or SFrame
The columns to add .
column _ names : list of string , optional
A list of column names . All names must be specified . ` ` column _ names ` ` is
ignored if data is an SFrame .
inplace : bool , optional . Defaults to False .
Whether the SFrame is modified in place .
Returns
out : SFrame
The current SFrame .
See Also
add _ column
Examples
> > > sf = turicreate . SFrame ( { ' id ' : [ 1 , 2 , 3 ] , ' val ' : [ ' A ' , ' B ' , ' C ' ] } )
> > > sf2 = turicreate . SFrame ( { ' species ' : [ ' cat ' , ' dog ' , ' fossa ' ] ,
. . . ' age ' : [ 3 , 5 , 9 ] } )
> > > res = sf . add _ columns ( sf2)
> > > res
| id | val | age | species |
| 1 | A | 3 | cat |
| 2 | B | 5 | dog |
| 3 | C | 9 | fossa |
[3 rows x 4 columns ]""" | datalist = data
if isinstance ( data , SFrame ) :
other = data
datalist = [ other . select_column ( name ) for name in other . column_names ( ) ]
column_names = other . column_names ( )
my_columns = set ( self . column_names ( ) )
for name in column_names :
if name in my_columns :
raise ValueError ( "Column '" + name + "' already exists in current SFrame" )
else :
if not _is_non_string_iterable ( datalist ) :
raise TypeError ( "datalist must be an iterable" )
if not _is_non_string_iterable ( column_names ) :
raise TypeError ( "column_names must be an iterable" )
if not all ( [ isinstance ( x , SArray ) for x in datalist ] ) :
raise TypeError ( "Must give column as SArray" )
if not all ( [ isinstance ( x , str ) for x in column_names ] ) :
raise TypeError ( "Invalid column name in list : must all be str" )
if inplace :
ret = self
else :
ret = self . copy ( )
with cython_context ( ) :
ret . __proxy__ . add_columns ( [ x . __proxy__ for x in datalist ] , column_names )
ret . _cache = None
return ret |
def PC_varExplained ( Y , standardized = True ) :
"""Run PCA and calculate the cumulative fraction of variance
Args :
Y : phenotype values
standardize : if True , phenotypes are standardized
Returns :
var : cumulative distribution of variance explained""" | # figuring out the number of latent factors
if standardized :
Y -= Y . mean ( 0 )
Y /= Y . std ( 0 )
covY = sp . cov ( Y )
S , U = linalg . eigh ( covY + 1e-6 * sp . eye ( covY . shape [ 0 ] ) )
S = S [ : : - 1 ]
rv = np . array ( [ S [ 0 : i ] . sum ( ) for i in range ( 1 , S . shape [ 0 ] ) ] )
rv /= S . sum ( )
return rv |
def get_date_range_this_year ( now = None ) :
"""Return the starting and ending date of the current school year .""" | if now is None :
now = datetime . datetime . now ( ) . date ( )
if now . month <= settings . YEAR_TURNOVER_MONTH :
date_start = datetime . datetime ( now . year - 1 , 8 , 1 )
# TODO ; don ' t hardcode these values
date_end = datetime . datetime ( now . year , 7 , 1 )
else :
date_start = datetime . datetime ( now . year , 8 , 1 )
date_end = datetime . datetime ( now . year + 1 , 7 , 1 )
return timezone . make_aware ( date_start ) , timezone . make_aware ( date_end ) |
def resolve ( uri , include_paths ) :
"""Banana banana""" | include_filename , line_ranges , symbol = __parse_include ( uri )
if include_filename is None :
return None
include_path = find_file ( include_filename , include_paths )
if include_path is None :
return None
return __get_content ( include_path . strip ( ) , line_ranges , symbol ) |
def char_repl ( x ) :
"""A little embarrassing : couldn ' t figure out how to avoid crashes when
hardcore unicode occurs in description fields . . .""" | r = ""
for c in x :
if ord ( c ) > 255 :
r += "?"
else :
r += c
return r |
def parse ( self , rule : str ) :
"""Parses policy to tree .
Translate a policy written in the policy language into a tree of
Check objects .""" | # Empty rule means always accept
if not rule :
return checks . TrueCheck ( )
for token , value in self . _parse_tokenize ( rule ) :
self . _shift ( token , value )
try :
return self . result
except ValueError :
LOG . exception ( 'Failed to understand rule %r' , rule )
# Fail closed
return checks . FalseCheck ( ) |
def sort ( self , key_or_list , direction = None ) :
"""Sorts this cursor ' s results .
Pass a field name and a direction , either
: data : ` ~ pymongo . ASCENDING ` or : data : ` ~ pymongo . DESCENDING ` : :
for doc in collection . find ( ) . sort ( ' field ' , pymongo . ASCENDING ) :
print ( doc )
To sort by multiple fields , pass a list of ( key , direction ) pairs : :
for doc in collection . find ( ) . sort ( [
( ' field1 ' , pymongo . ASCENDING ) ,
( ' field2 ' , pymongo . DESCENDING ) ] ) :
print ( doc )
Beginning with MongoDB version 2.6 , text search results can be
sorted by relevance : :
cursor = db . test . find (
{ ' $ text ' : { ' $ search ' : ' some words ' } } ,
{ ' score ' : { ' $ meta ' : ' textScore ' } } )
# Sort by ' score ' field .
cursor . sort ( [ ( ' score ' , { ' $ meta ' : ' textScore ' } ) ] )
for doc in cursor :
print ( doc )
Raises : class : ` ~ pymongo . errors . InvalidOperation ` if this cursor has
already been used . Only the last : meth : ` sort ` applied to this
cursor has any effect .
: Parameters :
- ` key _ or _ list ` : a single key or a list of ( key , direction )
pairs specifying the keys to sort on
- ` direction ` ( optional ) : only used if ` key _ or _ list ` is a single
key , if not given : data : ` ~ pymongo . ASCENDING ` is assumed""" | self . __check_okay_to_chain ( )
keys = helpers . _index_list ( key_or_list , direction )
self . __ordering = helpers . _index_document ( keys )
return self |
def includeme ( config ) :
"""Include pyramid _ multiauth into a pyramid configurator .
This function provides a hook for pyramid to include the default settings
for auth via pyramid _ multiauth . Activate it like so :
config . include ( " pyramid _ multiauth " )
This will pull the list of registered authn policies from the deployment
settings , and configure and install each policy in order . The policies to
use can be specified in one of two ways :
* as the name of a module to be included .
* as the name of a callable along with a set of parameters .
Here ' s an example suite of settings :
multiauth . policies = ipauth1 ipauth2 pyramid _ browserid
multiauth . policy . ipauth1 . use = pyramid _ ipauth . IPAuthentictionPolicy
multiauth . policy . ipauth1 . ipaddrs = 123.123.0.0/16
multiauth . policy . ipauth1 . userid = local1
multiauth . policy . ipauth2 . use = pyramid _ ipauth . IPAuthentictionPolicy
multiauth . policy . ipauth2 . ipaddrs = 124.124.0.0/16
multiauth . policy . ipauth2 . userid = local2
This will configure a MultiAuthenticationPolicy with three policy objects .
The first two will be IPAuthenticationPolicy objects created by passing
in the specified keyword arguments . The third will be a BrowserID
authentication policy just like you would get from executing :
config . include ( " pyramid _ browserid " )
As a side - effect , the configuration will also get the additional views
that pyramid _ browserid sets up by default .
The * group finder function * and the * authorization policy * are also read
from configuration if specified :
multiauth . authorization _ policy = mypyramidapp . acl . Custom
multiauth . groupfinder = mypyramidapp . acl . groupfinder""" | # Grab the pyramid - wide settings , to look for any auth config .
settings = config . get_settings ( )
# Hook up a default AuthorizationPolicy .
# Get the authorization policy from config if present .
# Default ACLAuthorizationPolicy is usually what you want .
authz_class = settings . get ( "multiauth.authorization_policy" , "pyramid.authorization.ACLAuthorizationPolicy" )
authz_policy = config . maybe_dotted ( authz_class ) ( )
# If the app configures one explicitly then this will get overridden .
# In autocommit mode this needs to be done before setting the authn policy .
config . set_authorization_policy ( authz_policy )
# Get the groupfinder from config if present .
groupfinder = settings . get ( "multiauth.groupfinder" , None )
groupfinder = config . maybe_dotted ( groupfinder )
# Look for callable policy definitions .
# Suck them all out at once and store them in a dict for later use .
policy_definitions = get_policy_definitions ( settings )
# Read and process the list of policies to load .
# We build up a list of callables which can be executed at config commit
# time to obtain the final list of policies .
# Yeah , it ' s complicated . But we want to be able to inherit any default
# views or other config added by the sub - policies when they ' re included .
# Process policies in reverse order so that things at the front of the
# list can override things at the back of the list .
policy_factories = [ ]
policy_names = settings . get ( "multiauth.policies" , "" ) . split ( )
for policy_name in reversed ( policy_names ) :
if policy_name in policy_definitions : # It ' s a policy defined using a callable .
# Just append it straight to the list .
definition = policy_definitions [ policy_name ]
factory = config . maybe_dotted ( definition . pop ( "use" ) )
policy_factories . append ( ( factory , policy_name , definition ) )
else : # It ' s a module to be directly included .
try :
factory = policy_factory_from_module ( config , policy_name )
except ImportError :
err = "pyramid_multiauth: policy %r has no settings " "and is not importable" % ( policy_name , )
raise ValueError ( err )
policy_factories . append ( ( factory , policy_name , { } ) )
# OK . We now have a list of callbacks which need to be called at
# commit time , and will return the policies in reverse order .
# Register a special action to pull them into our list of policies .
policies = [ ]
def grab_policies ( ) :
for factory , name , kwds in policy_factories :
policy = factory ( ** kwds )
if policy :
policy . _pyramid_multiauth_name = name
if not policies or policy is not policies [ 0 ] : # Remember , they ' re being processed in reverse order .
# So each new policy needs to go at the front .
policies . insert ( 0 , policy )
config . action ( None , grab_policies , order = PHASE2_CONFIG )
authn_policy = MultiAuthenticationPolicy ( policies , groupfinder )
config . set_authentication_policy ( authn_policy ) |
def write_dna ( dna , path ) :
'''Write DNA to a file ( genbank or fasta ) .
: param dna : DNA sequence to write to file
: type dna : coral . DNA
: param path : file path to write . Has to be genbank or fasta file .
: type path : str''' | # Check if path filetype is valid , remember for later
ext = os . path . splitext ( path ) [ 1 ]
if ext == '.gb' or ext == '.ape' :
filetype = 'genbank'
elif ext == '.fa' or ext == '.fasta' :
filetype = 'fasta'
else :
raise ValueError ( 'Only genbank or fasta files are supported.' )
# Convert features to Biopython form
# Information lost on conversion :
# specificity of feature type
# strandedness
# topology
features = [ ]
for feature in dna . features :
features . append ( _coral_to_seqfeature ( feature ) )
# Biopython doesn ' t like ' None ' here
# FIXME : this is a legacy feature - remove ?
bio_id = dna . id if hasattr ( dna , 'id' ) else ''
# Maximum length of name is 16
seq = SeqRecord ( Seq ( str ( dna ) , alphabet = ambiguous_dna ) , id = bio_id , name = dna . name [ 0 : 16 ] . replace ( ' ' , '_' ) , features = features , description = dna . name )
if dna . circular :
seq . annotations [ 'data_file_division' ] = 'circular'
else :
seq . annotations [ 'data_file_division' ] = 'linear'
if filetype == 'genbank' :
SeqIO . write ( seq , path , 'genbank' )
elif filetype == 'fasta' :
SeqIO . write ( seq , path , 'fasta' ) |
def get_branding_ids ( self ) :
"""Gets the branding asset ` ` Ids ` ` .
return : ( osid . id . IdList ) - a list of asset ` ` Ids ` `
* compliance : mandatory - - This method must be implemented . *""" | from . . id . objects import IdList
if 'brandingIds' not in self . _my_map :
return IdList ( [ ] )
id_list = [ ]
for idstr in self . _my_map [ 'brandingIds' ] :
id_list . append ( Id ( idstr ) )
return IdList ( id_list ) |
def all_props ( self ) :
"""Return a dictionary with the values of all children , and place holders for all of the section
argumemts . It combines props and arg _ props""" | d = self . arg_props
d . update ( self . props )
return d |
def transform ( list , pattern , indices = [ 1 ] ) :
"""Matches all elements of ' list ' agains the ' pattern '
and returns a list of the elements indicated by indices of
all successfull matches . If ' indices ' is omitted returns
a list of first paranthethised groups of all successfull
matches .""" | result = [ ]
for e in list :
m = re . match ( pattern , e )
if m :
for i in indices :
result . append ( m . group ( i ) )
return result |
def training_config ( estimator , inputs = None , job_name = None , mini_batch_size = None ) :
"""Export Airflow training config from an estimator
Args :
estimator ( sagemaker . estimator . EstimatorBase ) :
The estimator to export training config from . Can be a BYO estimator ,
Framework estimator or Amazon algorithm estimator .
inputs : Information about the training data . Please refer to the ` ` fit ( ) ` ` method of
the associated estimator , as this can take any of the following forms :
* ( str ) - The S3 location where training data is saved .
* ( dict [ str , str ] or dict [ str , sagemaker . session . s3 _ input ] ) - If using multiple channels for
training data , you can specify a dict mapping channel names
to strings or : func : ` ~ sagemaker . session . s3 _ input ` objects .
* ( sagemaker . session . s3 _ input ) - Channel configuration for S3 data sources that can provide
additional information about the training dataset . See : func : ` sagemaker . session . s3 _ input `
for full details .
* ( sagemaker . amazon . amazon _ estimator . RecordSet ) - A collection of
Amazon : class : ~ ` Record ` objects serialized and stored in S3.
For use with an estimator for an Amazon algorithm .
* ( list [ sagemaker . amazon . amazon _ estimator . RecordSet ] ) - A list of
: class : ~ ` sagemaker . amazon . amazon _ estimator . RecordSet ` objects , where each instance is
a different channel of training data .
job _ name ( str ) : Specify a training job name if needed .
mini _ batch _ size ( int ) : Specify this argument only when estimator is a built - in estimator of an
Amazon algorithm . For other estimators , batch size should be specified in the estimator .
Returns :
dict : Training config that can be directly used by SageMakerTrainingOperator in Airflow .""" | train_config = training_base_config ( estimator , inputs , job_name , mini_batch_size )
train_config [ 'TrainingJobName' ] = estimator . _current_job_name
if estimator . tags is not None :
train_config [ 'Tags' ] = estimator . tags
return train_config |
def _map_reduce ( self , map , reduce , out , session , read_pref , ** kwargs ) :
"""Internal mapReduce helper .""" | cmd = SON ( [ ( "mapReduce" , self . __name ) , ( "map" , map ) , ( "reduce" , reduce ) , ( "out" , out ) ] )
collation = validate_collation_or_none ( kwargs . pop ( 'collation' , None ) )
cmd . update ( kwargs )
inline = 'inline' in out
if inline :
user_fields = { 'results' : 1 }
else :
user_fields = None
read_pref = ( ( session and session . _txn_read_preference ( ) ) or read_pref )
with self . __database . client . _socket_for_reads ( read_pref , session ) as ( sock_info , slave_ok ) :
if ( sock_info . max_wire_version >= 4 and ( 'readConcern' not in cmd ) and inline ) :
read_concern = self . read_concern
else :
read_concern = None
if 'writeConcern' not in cmd and not inline :
write_concern = self . _write_concern_for ( session )
else :
write_concern = None
return self . _command ( sock_info , cmd , slave_ok , read_pref , read_concern = read_concern , write_concern = write_concern , collation = collation , session = session , user_fields = user_fields ) |
def prepare_data_keys ( primary_master_key , master_keys , algorithm , encryption_context ) :
"""Prepares a DataKey to be used for encrypting message and list
of EncryptedDataKey objects to be serialized into header .
: param primary _ master _ key : Master key with which to generate the encryption data key
: type primary _ master _ key : aws _ encryption _ sdk . key _ providers . base . MasterKey
: param master _ keys : All master keys with which to encrypt data keys
: type master _ keys : list of : class : ` aws _ encryption _ sdk . key _ providers . base . MasterKey `
: param algorithm : Algorithm to use for encryption
: type algorithm : aws _ encryption _ sdk . identifiers . Algorithm
: param dict encryption _ context : Encryption context to use when generating data key
: rtype : tuple containing : class : ` aws _ encryption _ sdk . structures . DataKey `
and set of : class : ` aws _ encryption _ sdk . structures . EncryptedDataKey `""" | encrypted_data_keys = set ( )
encrypted_data_encryption_key = None
data_encryption_key = primary_master_key . generate_data_key ( algorithm , encryption_context )
_LOGGER . debug ( "encryption data generated with master key: %s" , data_encryption_key . key_provider )
for master_key in master_keys : # Don ' t re - encrypt the encryption data key ; we already have the ciphertext
if master_key is primary_master_key :
encrypted_data_encryption_key = EncryptedDataKey ( key_provider = data_encryption_key . key_provider , encrypted_data_key = data_encryption_key . encrypted_data_key )
encrypted_data_keys . add ( encrypted_data_encryption_key )
continue
encrypted_key = master_key . encrypt_data_key ( data_key = data_encryption_key , algorithm = algorithm , encryption_context = encryption_context )
encrypted_data_keys . add ( encrypted_key )
_LOGGER . debug ( "encryption key encrypted with master key: %s" , master_key . key_provider )
return data_encryption_key , encrypted_data_keys |
def get_goid2color_pval ( self ) :
"""Return a go2color dict containing GO colors determined by P - value .""" | go2color = { }
self . set_goid2color_pval ( go2color )
color_dflt = self . alpha2col [ 1.000 ]
for goid in self . go2res :
if goid not in go2color :
go2color [ goid ] = color_dflt
return go2color |
def update_docstrings ( ) :
"""update the docstring of each module using info in the
modules / README . md file""" | modules_dict = parse_readme ( )
files = { }
# update modules
for mod in modules_dict :
mod_file = os . path . join ( modules_directory ( ) , mod + ".py" )
with open ( mod_file ) as f :
files [ mod ] = f . readlines ( )
for mod in files :
replaced = False
done = False
lines = False
out = [ ]
quotes = None
for row in files [ mod ] : # deal with single or double quoted docstring
if not quotes :
if row . strip ( ) . startswith ( '"""' ) :
quotes = '"""'
if row . strip ( ) . startswith ( "'''" ) :
quotes = "'''"
if quotes and row . strip ( ) . startswith ( quotes ) and not done :
out . append ( row )
if not replaced :
out = out + [ "" . join ( _to_docstring ( modules_dict [ mod ] ) ) . strip ( ) + "\n" ]
replaced = True
if lines :
done = True
if not done and not lines :
lines = True
continue
if not lines or done :
out . append ( row )
mod_file = os . path . join ( modules_directory ( ) , mod + ".py" )
with open ( mod_file , "w" ) as f :
f . writelines ( out )
print_stderr ( "Modules updated from README.md" ) |
def read_template_source ( filename ) :
"""Read the source of a Django template , returning the Unicode text .""" | # Import this late to be sure we don ' t trigger settings machinery too
# early .
from django . conf import settings
if not settings . configured :
settings . configure ( )
with open ( filename , "rb" ) as f :
text = f . read ( ) . decode ( settings . FILE_CHARSET )
return text |
def is_email ( ) :
"""Validates that a fields value is a valid email address .""" | email = ( ur'(?!^\.)' # No dot at start
ur'(?!.*\.@)' # No dot before at sign
ur'(?!.*@\.)' # No dot after at sign
ur'(?!.*\.$)' # No dot at the end
ur'(?!.*\.\.)' # No double dots anywhere
ur'^\S+' # Starts with one or more non - whitespace characters
ur'@' # Contains an at sign
ur'\S+$' # Ends with one or more non - whitespace characters
)
regex = re . compile ( email , re . IGNORECASE | re . UNICODE )
def validate ( value ) :
if not regex . match ( value ) :
return e ( "{} is not a valid email address" , value )
return validate |
def get_subject_with_remote_validation ( jwt_bu64 , base_url ) :
"""Same as get _ subject _ with _ local _ validation ( ) except that the signing certificate
is automatically downloaded from the CN .
- Additional possible validations errors :
- The certificate could not be retrieved from the root CN .""" | cert_obj = d1_common . cert . x509 . download_as_obj ( base_url )
return get_subject_with_local_validation ( jwt_bu64 , cert_obj ) |
def c_drop ( self , frequency ) :
'''Capacitance of an electrode covered in liquid , normalized per unit
area ( i . e . , units are F / mm ^ 2 ) .''' | try :
return np . interp ( frequency , self . _c_drop [ 'frequency' ] , self . _c_drop [ 'capacitance' ] )
except :
pass
return self . _c_drop |
def add_to_fileswitcher ( self , plugin , tabs , data , icon ) :
"""Add a plugin to the File Switcher .""" | if self . fileswitcher is None :
from spyder . widgets . fileswitcher import FileSwitcher
self . fileswitcher = FileSwitcher ( self , plugin , tabs , data , icon )
else :
self . fileswitcher . add_plugin ( plugin , tabs , data , icon )
self . fileswitcher . sig_goto_file . connect ( plugin . get_current_tab_manager ( ) . set_stack_index ) |
def _to_meta_data ( pif_obj , dataset_hit , mdf_acl ) :
"""Convert the meta - data from the PIF into MDF""" | pif = pif_obj . as_dictionary ( )
dataset = dataset_hit . as_dictionary ( )
mdf = { }
try :
if pif . get ( "names" ) :
mdf [ "title" ] = pif [ "names" ] [ 0 ]
else :
mdf [ "title" ] = "Citrine PIF " + str ( pif [ "uid" ] )
if pif . get ( "chemicalFormula" ) :
mdf [ "composition" ] = pif [ "chemicalFormula" ]
elif pif . get ( "composition" ) :
mdf [ "composition" ] = '' . join ( [ comp [ "element" ] for comp in pif [ "composition" ] if comp [ "element" ] ] )
if not mdf [ "composition" ] :
mdf . pop ( "composition" )
mdf [ "acl" ] = mdf_acl
mdf [ "source_name" ] = _construct_new_key ( dataset [ "name" ] )
if pif . get ( "contacts" ) :
mdf [ "data_contact" ] = [ ]
for contact in pif [ "contacts" ] :
data_c = { "given_name" : contact [ "name" ] [ "given" ] , # REQ
"family_name" : contact [ "name" ] [ "family" ] # REQ
}
if contact . get ( "email" ) :
data_c [ "email" ] = contact . get ( "email" , "" )
if contact . get ( "orcid" ) :
data_c [ "orcid" ] = contact . get ( "orcid" , "" )
mdf [ "data_contact" ] . append ( data_c )
if not mdf [ "data_contact" ] :
mdf . pop ( "data_contact" )
mdf [ "data_contributor" ] = [ { } ]
if "owner" in dataset :
name = dataset [ "owner" ] . split ( )
contributor = { "given_name" : name [ 0 ] , "family_name" : name [ 1 ] , "email" : dataset [ "email" ] }
mdf [ "data_contributor" ] = [ contributor ]
mdf [ "links" ] = { "landing_page" : "https://citrination.com/datasets/{}" . format ( dataset [ "id" ] ) , "publication" : [ ] }
if pif . get ( "references" ) :
mdf [ "author" ] = [ ]
mdf [ "citation" ] = [ ]
for ref in pif [ "references" ] :
if ref . get ( "doi" ) :
mdf [ "citation" ] . append ( ref [ "doi" ] )
# TODO : Make actual citation
mdf [ "links" ] [ "publication" ] . append ( ref [ "doi" ] )
if ref . get ( "authors" ) :
for author in ref [ "authors" ] :
if author . get ( "given" ) and author . get ( "family" ) :
mdf [ "author" ] . append ( { "given_name" : author [ "given" ] , "family_name" : author [ "family" ] } )
# Remove fields if blank
if not mdf [ "author" ] :
mdf . pop ( "author" )
if not mdf [ "citation" ] :
mdf . pop ( "citation" )
if not mdf [ "links" ] [ "publication" ] :
mdf [ "links" ] . pop ( "publication" )
if pif . get ( "licenses" , [ { } ] ) [ 0 ] . get ( "url" ) :
mdf [ "license" ] = pif [ "licenses" ] [ 0 ] [ "url" ]
if pif . get ( "tags" ) :
mdf [ "tags" ] = pif [ "tags" ]
# If required MDF metadata is missing from PIF , abort
except KeyError as e :
print ( "Error: Required MDF metadata" , str ( e ) , "not found in PIF" , pif [ "uid" ] )
return None
return mdf |
def _mouseMoveDrag ( moveOrDrag , x , y , xOffset , yOffset , duration , tween = linear , button = None ) :
"""Handles the actual move or drag event , since different platforms
implement them differently .
On Windows & Linux , a drag is a normal mouse move while a mouse button is
held down . On OS X , a distinct " drag " event must be used instead .
The code for moving and dragging the mouse is similar , so this function
handles both . Users should call the moveTo ( ) or dragTo ( ) functions instead
of calling _ mouseMoveDrag ( ) .
Args :
moveOrDrag ( str ) : Either ' move ' or ' drag ' , for the type of action this is .
x ( int , float , None , optional ) : How far left ( for negative values ) or
right ( for positive values ) to move the cursor . 0 by default .
y ( int , float , None , optional ) : How far up ( for negative values ) or
down ( for positive values ) to move the cursor . 0 by default .
xOffset ( int , float , None , optional ) : How far left ( for negative values ) or
right ( for positive values ) to move the cursor . 0 by default .
yOffset ( int , float , None , optional ) : How far up ( for negative values ) or
down ( for positive values ) to move the cursor . 0 by default .
duration ( float , optional ) : The amount of time it takes to move the mouse
cursor to the new xy coordinates . If 0 , then the mouse cursor is moved
instantaneously . 0.0 by default .
tween ( func , optional ) : The tweening function used if the duration is not
0 . A linear tween is used by default . See the tweens . py file for
details .
button ( str , int , optional ) : The mouse button clicked . Must be one of
' left ' , ' middle ' , ' right ' ( or 1 , 2 , or 3 ) respectively . ' left ' by
default .
Returns :
None""" | # The move and drag code is similar , but OS X requires a special drag event instead of just a move event when dragging .
# See https : / / stackoverflow . com / a / 2696107/1893164
assert moveOrDrag in ( 'move' , 'drag' ) , "moveOrDrag must be in ('move', 'drag'), not %s" % ( moveOrDrag )
if sys . platform != 'darwin' :
moveOrDrag = 'move'
# Only OS X needs the drag event specifically .
xOffset = int ( xOffset ) if xOffset is not None else 0
yOffset = int ( yOffset ) if yOffset is not None else 0
if x is None and y is None and xOffset == 0 and yOffset == 0 :
return
# Special case for no mouse movement at all .
startx , starty = position ( )
x = int ( x ) if x is not None else startx
y = int ( y ) if y is not None else starty
# x , y , xOffset , yOffset are now int .
x += xOffset
y += yOffset
width , height = size ( )
# Make sure x and y are within the screen bounds .
x = max ( 0 , min ( x , width - 1 ) )
y = max ( 0 , min ( y , height - 1 ) )
# If the duration is small enough , just move the cursor there instantly .
steps = [ ( x , y ) ]
if duration > MINIMUM_DURATION : # Non - instant moving / dragging involves tweening :
num_steps = max ( width , height )
sleep_amount = duration / num_steps
if sleep_amount < MINIMUM_SLEEP :
num_steps = int ( duration / MINIMUM_SLEEP )
sleep_amount = duration / num_steps
steps = [ getPointOnLine ( startx , starty , x , y , tween ( n / num_steps ) ) for n in range ( num_steps ) ]
# Making sure the last position is the actual destination .
steps . append ( ( x , y ) )
for tweenX , tweenY in steps :
if len ( steps ) > 1 : # A single step does not require tweening .
time . sleep ( sleep_amount )
_failSafeCheck ( )
tweenX = int ( round ( tweenX ) )
tweenY = int ( round ( tweenY ) )
if moveOrDrag == 'move' :
platformModule . _moveTo ( tweenX , tweenY )
elif moveOrDrag == 'drag' :
platformModule . _dragTo ( tweenX , tweenY , button )
else :
raise NotImplementedError ( 'Unknown value of moveOrDrag: {0}' . format ( moveOrDrag ) )
_failSafeCheck ( ) |
def camel_case_from_underscores ( string ) :
"""Generate a CamelCase string from an underscore _ string""" | components = string . split ( '_' )
string = ''
for component in components :
if component in abbreviations :
string += component
else :
string += component [ 0 ] . upper ( ) + component [ 1 : ] . lower ( )
return string |
def get_evidence ( assay ) :
"""Given an activity , return an INDRA Evidence object .
Parameters
assay : dict
an activity from the activities list returned by a query to the API
Returns
ev : : py : class : ` Evidence `
an : py : class : ` Evidence ` object containing the kinetics of the""" | kin = get_kinetics ( assay )
source_id = assay . get ( 'assay_chembl_id' )
if not kin :
return None
annotations = { 'kinetics' : kin }
chembl_doc_id = str ( assay . get ( 'document_chembl_id' ) )
pmid = get_pmid ( chembl_doc_id )
ev = Evidence ( source_api = 'chembl' , pmid = pmid , source_id = source_id , annotations = annotations )
return ev |
def append_faces ( vertices_seq , faces_seq ) :
"""Given a sequence of zero - indexed faces and vertices
combine them into a single array of faces and
a single array of vertices .
Parameters
vertices _ seq : ( n , ) sequence of ( m , d ) float
Multiple arrays of verticesvertex arrays
faces _ seq : ( n , ) sequence of ( p , j ) int
Zero indexed faces for matching vertices
Returns
vertices : ( i , d ) float
Points in space
faces : ( j , 3 ) int
Reference vertex indices""" | # the length of each vertex array
vertices_len = np . array ( [ len ( i ) for i in vertices_seq ] )
# how much each group of faces needs to be offset
face_offset = np . append ( 0 , np . cumsum ( vertices_len ) [ : - 1 ] )
new_faces = [ ]
for offset , faces in zip ( face_offset , faces_seq ) :
if len ( faces ) == 0 :
continue
# apply the index offset
new_faces . append ( faces + offset )
# stack to clean ( n , 3 ) float
vertices = vstack_empty ( vertices_seq )
# stack to clean ( n , 3 ) int
faces = vstack_empty ( new_faces )
return vertices , faces |
def fetchJobStoreFiles ( jobStore , options ) :
"""Takes a list of file names as glob patterns , searches for these within a
given directory , and attempts to take all of the files found and copy them
into options . localFilePath .
: param jobStore : A fileJobStore object .
: param options . fetch : List of file glob patterns to search
for in the jobStore and copy into options . localFilePath .
: param options . localFilePath : Local directory to copy files into .
: param options . jobStore : The path to the jobStore directory .""" | for jobStoreFile in options . fetch :
jobStoreHits = recursiveGlob ( directoryname = options . jobStore , glob_pattern = jobStoreFile )
for jobStoreFileID in jobStoreHits :
logger . debug ( "Copying job store file: %s to %s" , jobStoreFileID , options . localFilePath [ 0 ] )
jobStore . readFile ( jobStoreFileID , os . path . join ( options . localFilePath [ 0 ] , os . path . basename ( jobStoreFileID ) ) , symlink = options . useSymlinks ) |
def rae ( label , pred ) :
"""computes the relative absolute error ( condensed using standard deviation formula )""" | numerator = np . mean ( np . abs ( label - pred ) , axis = None )
denominator = np . mean ( np . abs ( label - np . mean ( label , axis = None ) ) , axis = None )
return numerator / denominator |
def paw_header ( filename , ppdesc ) :
"""Parse the PAW abinit header . Examples :
Paw atomic data for element Ni - Generated by AtomPAW ( N . Holzwarth ) + AtomPAW2Abinit v3.0.5
28.000 18.000 20061204 : zatom , zion , pspdat
7 7 2 0 350 0 . : pspcod , pspxc , lmax , lloc , mmax , r2well
paw3 1305 : pspfmt , creatorID
5 13 : basis _ size , lmn _ size
0 0 1 1 2 : orbitals
3 : number _ of _ meshes
1 3 350 1.1803778368E - 05 3.500000E - 02 : mesh 1 , type , size , rad _ step [ , log _ step ]
2 1 921 2.500000E - 03 : mesh 2 , type , size , rad _ step [ , log _ step ]
3 3 391 1.1803778368E - 05 3.500000E - 02 : mesh 3 , type , size , rad _ step [ , log _ step ]
2.300000 : r _ cut ( SPH )
2 0.
Another format :
C ( US d - loc ) - PAW data extracted from US - psp ( D . Vanderbilt ) - generated by USpp2Abinit v2.3.0
6.000 4.000 20090106 : zatom , zion , pspdat
7 11 1 0 560 0 . : pspcod , pspxc , lmax , lloc , mmax , r2well
paw4 2230 : pspfmt , creatorID
4 8 : basis _ size , lmn _ size
0 0 1 1 : orbitals
5 : number _ of _ meshes
1 2 560 1.5198032759E - 04 1.666667E - 02 : mesh 1 , type , size , rad _ step [ , log _ step ]
2 2 556 1.5198032759E - 04 1.666667E - 02 : mesh 2 , type , size , rad _ step [ , log _ step ]
3 2 576 1.5198032759E - 04 1.666667E - 02 : mesh 3 , type , size , rad _ step [ , log _ step ]
4 2 666 1.5198032759E - 04 1.666667E - 02 : mesh 4 , type , size , rad _ step [ , log _ step ]
5 2 673 1.5198032759E - 04 1.666667E - 02 : mesh 5 , type , size , rad _ step [ , log _ step ]
1.5550009124 : r _ cut ( PAW )
3 0 . : shape _ type , rshape
Yet nnother one :
Paw atomic data for element Si - Generated by atompaw v3.0.1.3 & AtomPAW2Abinit v3.3.1
14.000 4.000 20120814 : zatom , zion , pspdat
7 11 1 0 663 0 . : pspcod , pspxc , lmax , lloc , mmax , r2well
paw5 1331 : pspfmt , creatorID
4 8 : basis _ size , lmn _ size
0 0 1 1 : orbitals
5 : number _ of _ meshes
1 2 663 8.2129718540404674E - 04 1.1498160595656655E - 02 : mesh 1 , type , size , rad _ step [ , log _ step ]
2 2 658 8.2129718540404674E - 04 1.1498160595656655E - 02 : mesh 2 , type , size , rad _ step [ , log _ step ]
3 2 740 8.2129718540404674E - 04 1.1498160595656655E - 02 : mesh 3 , type , size , rad _ step [ , log _ step ]
4 2 819 8.2129718540404674E - 04 1.1498160595656655E - 02 : mesh 4 , type , size , rad _ step [ , log _ step ]
5 2 870 8.2129718540404674E - 04 1.1498160595656655E - 02 : mesh 5 , type , size , rad _ step [ , log _ step ]
1.5669671236 : r _ cut ( PAW )
2 0 . : shape _ type , rshape""" | supported_formats = [ "paw3" , "paw4" , "paw5" ]
if ppdesc . format not in supported_formats :
raise NotImplementedError ( "format %s not in %s" % ( ppdesc . format , supported_formats ) )
lines = _read_nlines ( filename , - 1 )
summary = lines [ 0 ]
header = _dict_from_lines ( lines [ : 5 ] , [ 0 , 3 , 6 , 2 , 2 ] , sep = ":" )
lines = lines [ 5 : ]
# TODO
# Parse orbitals and number of meshes .
header [ "orbitals" ] = [ int ( t ) for t in lines [ 0 ] . split ( ":" ) [ 0 ] . split ( ) ]
header [ "number_of_meshes" ] = num_meshes = int ( lines [ 1 ] . split ( ":" ) [ 0 ] )
# print filename , header
# Skip meshes =
lines = lines [ 2 + num_meshes : ]
# for midx in range ( num _ meshes ) :
# l = midx + 1
# print lines [ 0]
header [ "r_cut" ] = float ( lines [ 0 ] . split ( ":" ) [ 0 ] )
# print lines [ 1]
header . update ( _dict_from_lines ( lines [ 1 ] , [ 2 ] , sep = ":" ) )
# print ( " PAW header \ n " , header )
return PawAbinitHeader ( summary , ** header ) |
def transform_audio ( self , y ) :
'''Compute HCQT magnitude .
Parameters
y : np . ndarray
the audio buffer
Returns
data : dict
data [ ' mag ' ] : np . ndarray , shape = ( n _ frames , n _ bins )
The CQT magnitude''' | data = super ( HCQTMag , self ) . transform_audio ( y )
data . pop ( 'phase' )
return data |
def edit ( self , body ) :
"""Edit this comment .
: param str body : ( required ) , new body of the comment , Markdown
formatted
: returns : bool""" | if body :
json = self . _json ( self . _patch ( self . _api , data = dumps ( { 'body' : body } ) ) , 200 )
if json :
self . _update_ ( json )
return True
return False |
def ExpireRules ( self ) :
"""Removes any rules with an expiration date in the past .""" | rules = self . Get ( self . Schema . RULES )
new_rules = self . Schema . RULES ( )
now = time . time ( ) * 1e6
expired_session_ids = set ( )
for rule in rules :
if rule . expires > now :
new_rules . Append ( rule )
else :
for action in rule . actions :
if action . hunt_id :
expired_session_ids . add ( action . hunt_id )
if expired_session_ids :
with data_store . DB . GetMutationPool ( ) as pool : # Notify the worker to mark this hunt as terminated .
manager = queue_manager . QueueManager ( token = self . token )
manager . MultiNotifyQueue ( [ rdf_flows . GrrNotification ( session_id = session_id ) for session_id in expired_session_ids ] , mutation_pool = pool )
if len ( new_rules ) < len ( rules ) :
self . Set ( self . Schema . RULES , new_rules )
self . Flush ( ) |
def _attributeLinesToDict ( attributeLines ) :
"""Converts a list of obo ' Term ' lines to a dictionary .
: param attributeLines : a list of obo ' Term ' lines . Each line contains a key
and a value part which are separated by a ' : ' .
: return : a dictionary containing the attributes of an obo ' Term ' entry .
NOTE : Some attributes can occur multiple times in one single term , for
example ' is _ a ' or ' relationship ' . However , currently only the last
occurence is stored .""" | attributes = dict ( )
for line in attributeLines :
attributeId , attributeValue = line . split ( ':' , 1 )
attributes [ attributeId . strip ( ) ] = attributeValue . strip ( )
return attributes |
def global_request_interceptor ( self ) : # type : ( ) - > Callable
"""Decorator that can be used to add global request
interceptors easily to the builder .
The returned wrapper function can be applied as a decorator on
any function that processes the input . The function should
follow the signature of the process function in
: py : class : ` ask _ sdk _ runtime . dispatch _ components . request _ components . AbstractRequestInterceptor `
class .
: return : Wrapper function that can be decorated on a
interceptor process function .""" | def wrapper ( process_func ) :
if not callable ( process_func ) :
raise SkillBuilderException ( "Global Request Interceptor process_func input parameter " "should be callable" )
class_attributes = { "process" : lambda self , handler_input : process_func ( handler_input ) }
request_interceptor = type ( "RequestInterceptor{}" . format ( process_func . __name__ . title ( ) . replace ( "_" , "" ) ) , ( AbstractRequestInterceptor , ) , class_attributes )
self . add_global_request_interceptor ( request_interceptor = request_interceptor ( ) )
return wrapper |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.