signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def origin ( self , origin ) :
"""Set the origin . Pass a length three tuple of floats""" | ox , oy , oz = origin [ 0 ] , origin [ 1 ] , origin [ 2 ]
self . SetOrigin ( ox , oy , oz )
self . Modified ( ) |
def printable_name ( column , path = None ) :
"""Provided for debug output when rendering conditions .
User . name [ 3 ] [ " foo " ] [ 0 ] [ " bar " ] - > name [ 3 ] . foo [ 0 ] . bar""" | pieces = [ column . name ]
path = path or path_of ( column )
for segment in path :
if isinstance ( segment , str ) :
pieces . append ( segment )
else :
pieces [ - 1 ] += "[{}]" . format ( segment )
return "." . join ( pieces ) |
def get_tunnel_info_input_filter_type_filter_by_sip_src_ip ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_tunnel_info = ET . Element ( "get_tunnel_info" )
config = get_tunnel_info
input = ET . SubElement ( get_tunnel_info , "input" )
filter_type = ET . SubElement ( input , "filter-type" )
filter_by_sip = ET . SubElement ( filter_type , "filter-by-sip" )
src_ip = ET . SubElement ( filter_by_sip , "src-ip" )
src_ip . text = kwargs . pop ( 'src_ip' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def _vmomentsurfaceIntegrand ( vz , vR , vT , R , z , df , sigmaR1 , gamma , sigmaz1 , n , m , o ) : # pragma : no cover because this is too slow ; a warning is shown
"""Internal function that is the integrand for the vmomentsurface mass integration""" | return vR ** n * vT ** m * vz ** o * df ( R , vR * sigmaR1 , vT * sigmaR1 * gamma , z , vz * sigmaz1 , use_physical = False ) |
def write_to_fitsfile ( self , fitsfile , clobber = True ) :
"""Write this mapping to a FITS file , to avoid having to recompute it""" | from fermipy . skymap import Map
hpx_header = self . _hpx . make_header ( )
index_map = Map ( self . ipixs , self . wcs )
mult_map = Map ( self . mult_val , self . wcs )
prim_hdu = index_map . create_primary_hdu ( )
mult_hdu = index_map . create_image_hdu ( )
for key in [ 'COORDSYS' , 'ORDERING' , 'PIXTYPE' , 'ORDERING' , 'ORDER' , 'NSIDE' , 'FIRSTPIX' , 'LASTPIX' ] :
prim_hdu . header [ key ] = hpx_header [ key ]
mult_hdu . header [ key ] = hpx_header [ key ]
hdulist = fits . HDUList ( [ prim_hdu , mult_hdu ] )
hdulist . writeto ( fitsfile , overwrite = clobber ) |
def match_lists ( pos1 , pos2 , tolerance = MATCH_TOLERANCE , spherical = False ) :
"""Given two sets of x / y positions match the lists , uniquely .
: rtype : numpy . ma , numpy . ma
: param pos1 : list of x / y positions .
: param pos2 : list of x / y positions .
: param tolerance : float distance , in pixels , to consider a match
Algorithm :
- Find all the members of pos2 that are within tolerance of pos1 [ idx1 ] .
These pos2 members are match _ group _ 1
- Find all the members of pos1 that are within tolerance of match _ group _ 1 [ idx2 ] .
These pos1 members are match _ group _ 2
- If pos1 [ idx ] is in match _ group _ 2 then pos1 [ idx ] is a match of object at match _ group _ 1 [ idx2]""" | assert isinstance ( pos1 , numpy . ndarray )
assert isinstance ( pos2 , numpy . ndarray )
# build some arrays to hold the index of things that matched between lists .
npts2 = npts1 = 0
if len ( pos1 ) > 0 :
npts1 = len ( pos1 [ : , 0 ] )
pos1_idx_array = numpy . arange ( npts1 , dtype = numpy . int16 )
if len ( pos2 ) > 0 :
npts2 = len ( pos2 [ : , 0 ] )
pos2_idx_array = numpy . arange ( npts2 , dtype = numpy . int16 )
# this is the array of final matched index , - 1 indicates no match found .
match1 = numpy . ma . zeros ( npts1 , dtype = numpy . int16 )
match1 . mask = True
# this is the array of matches in pos2 , - 1 indicates no match found .
match2 = numpy . ma . zeros ( npts2 , dtype = numpy . int16 )
match2 . mask = True
# if one of the two input arrays are zero length then there is no matching to do .
if npts1 * npts2 == 0 :
return match1 , match2
for idx1 in range ( npts1 ) : # compute the distance source idx1 to each member of pos2
if not spherical :
sep = numpy . sqrt ( ( pos2 [ : , 0 ] - pos1 [ idx1 , 0 ] ) ** 2 + ( pos2 [ : , 1 ] - pos1 [ idx1 , 1 ] ) ** 2 )
else :
sep = numpy . sqrt ( ( numpy . cos ( numpy . radians ( pos1 [ idx1 , 1 ] ) ) * ( pos2 [ : , 0 ] - pos1 [ idx1 , 0 ] ) ) ** 2 + ( pos2 [ : , 1 ] - pos1 [ idx1 , 1 ] ) ** 2 )
# considered a match if sep is below tolerance and is the closest match available .
match_condition = numpy . all ( ( sep <= tolerance , sep == sep . min ( ) ) , axis = 0 )
# match _ group _ 1 is list of the indexes of pos2 entries that qualified as possible matches to pos1 [ idx1]
match_group_1 = pos2_idx_array [ match_condition ]
# For each of those pos2 objects that could be a match to pos1 [ idx ] find the best match in all of pos1
for idx2 in match_group_1 : # compute the distance from this pos2 object that is a possible match to pos1 [ idx1 ] to all members of pos1
sep = numpy . sqrt ( ( pos1 [ : , 0 ] - pos2 [ idx2 , 0 ] ) ** 2 + ( pos1 [ : , 1 ] - pos2 [ idx2 , 1 ] ) ** 2 )
# considered a match if sep is below tolerance and is the closest match available .
match_condition = numpy . all ( ( sep <= tolerance , sep == sep . min ( ) ) , axis = 0 )
match_group_2 = pos1_idx_array [ match_condition ]
# Are any of the pos1 members that were matches to the matched pos2 member the pos1 [ idx ] entry ?
if idx1 in match_group_2 :
match1 [ idx1 ] = idx2
match2 [ idx2 ] = idx1
# this BREAK is in here since once we have a match we ' re done .
break
return match1 , match2 |
def extern_store_f64 ( self , context_handle , f64 ) :
"""Given a context and double , return a new Handle to represent the double .""" | c = self . _ffi . from_handle ( context_handle )
return c . to_value ( f64 ) |
def get_queryset ( self ) :
"""Get QuerySet from cached widget .""" | kwargs = { model_field_name : self . request . GET . get ( form_field_name ) for form_field_name , model_field_name in self . widget . dependent_fields . items ( ) if form_field_name in self . request . GET and self . request . GET . get ( form_field_name , '' ) != '' }
return self . widget . filter_queryset ( self . request , self . term , self . queryset , ** kwargs ) |
def derive_queryset ( self , ** kwargs ) :
"""Derives our queryset .""" | # get our parent queryset
queryset = super ( SmartListView , self ) . get_queryset ( ** kwargs )
# apply any filtering
search_fields = self . derive_search_fields ( )
search_query = self . request . GET . get ( 'search' )
if search_fields and search_query :
term_queries = [ ]
for term in search_query . split ( ' ' ) :
field_queries = [ ]
for field in search_fields :
field_queries . append ( Q ( ** { field : term } ) )
term_queries . append ( reduce ( operator . or_ , field_queries ) )
queryset = queryset . filter ( reduce ( operator . and_ , term_queries ) )
# add any select related
related = self . derive_select_related ( )
if related :
queryset = queryset . select_related ( * related )
# return our queryset
return queryset |
def parse_package_ref ( self , ref ) :
"""Return tuple of architecture , package _ name , version , id""" | if not ref :
return None
parsed = re . match ( '(.*)\ (.*)\ (.*)\ (.*)' , ref )
return parsed . groups ( ) |
def print_prediction ( self , ptup , precision = 2 ) :
"""Print a summary of a predicted position .
The argument * ptup * is a tuple returned by : meth : ` predict ` . It is
printed to : data : ` sys . stdout ` in a reasonable format that uses Unicode
characters .""" | from . import ellipses
bestra , bestdec , maj , min , pa = ptup
f = ellipses . sigmascale ( 1 )
maj *= R2A
min *= R2A
pa *= R2D
print_ ( 'position =' , fmtradec ( bestra , bestdec , precision = precision ) )
print_ ( 'err(1σ) = %.*f" × %.*f" @ %.0f°' % ( precision , maj * f , precision , min * f , pa ) ) |
def has_node ( self , n , t = None ) :
"""Return True if the graph , at time t , contains the node n .
Parameters
n : node
t : snapshot id ( default None )
If None return the presence of the node in the flattened graph .
Examples
> > > G = dn . DynGraph ( ) # or DiGraph , MultiGraph , MultiDiGraph , etc
> > > G . add _ path ( [ 0,1,2 ] , t = 0)
> > > G . has _ node ( 0 , t = 0)
True
It is more readable and simpler to use
> > > 0 in G
True""" | if t is None :
try :
return n in self . _node
except TypeError :
return False
else :
deg = list ( self . degree ( [ n ] , t ) . values ( ) )
if len ( deg ) > 0 :
return deg [ 0 ] > 0
else :
return False |
def get_files ( path , ext = [ ] , include = True ) :
"""遍历提供的文件夹的所有子文件夹 , 饭后生成器对象 。
: param str path : 待处理的文件夹 。
: param list ext : 扩展名列表 。
: param bool include : 若值为 True , 代表 ext 提供的是包含列表 ;
否则是排除列表 。
: returns : 一个生成器对象 。""" | has_ext = len ( ext ) > 0
for p , d , fs in os . walk ( path ) :
for f in fs :
if has_ext :
in_ext = False
for name in ext :
if f . endswith ( name ) :
in_ext = True
break
if ( include and in_ext ) or ( not include and not in_ext ) :
yield os . path . join ( p , f )
else :
yield os . path . join ( p , f ) |
def get ( self , sid ) :
"""Constructs a PhoneNumberContext
: param sid : The unique string that identifies the resource
: returns : twilio . rest . proxy . v1 . service . phone _ number . PhoneNumberContext
: rtype : twilio . rest . proxy . v1 . service . phone _ number . PhoneNumberContext""" | return PhoneNumberContext ( self . _version , service_sid = self . _solution [ 'service_sid' ] , sid = sid , ) |
def add_dataset_to_collection ( dataset_id , collection_id , ** kwargs ) :
"""Add a single dataset to a dataset collection .""" | collection_i = _get_collection ( collection_id )
collection_item = _get_collection_item ( collection_id , dataset_id )
if collection_item is not None :
raise HydraError ( "Dataset Collection %s already contains dataset %s" , collection_id , dataset_id )
new_item = DatasetCollectionItem ( )
new_item . dataset_id = dataset_id
new_item . collection_id = collection_id
collection_i . items . append ( new_item )
db . DBSession . flush ( )
return 'OK' |
def recall ( result , reference ) :
"""Recall .
Parameters
result : array _ like
Input data containing objects . Can be any type but will be converted
into binary : background where 0 , object everywhere else .
reference : array _ like
Input data containing objects . Can be any type but will be converted
into binary : background where 0 , object everywhere else .
Returns
recall : float
The recall between two binary datasets , here mostly binary objects in images ,
which is defined as the fraction of relevant instances that are retrieved . The
recall is not symmetric .
See also
: func : ` precision `
Notes
Not symmetric . The inverse of the recall is : func : ` precision ` .
High recall means that an algorithm returned most of the relevant results .
References
. . [ 1 ] http : / / en . wikipedia . org / wiki / Precision _ and _ recall
. . [ 2 ] http : / / en . wikipedia . org / wiki / Confusion _ matrix # Table _ of _ confusion""" | result = numpy . atleast_1d ( result . astype ( numpy . bool ) )
reference = numpy . atleast_1d ( reference . astype ( numpy . bool ) )
tp = numpy . count_nonzero ( result & reference )
fn = numpy . count_nonzero ( ~ result & reference )
try :
recall = tp / float ( tp + fn )
except ZeroDivisionError :
recall = 0.0
return recall |
def copy ( self , new_name = None ) :
"""Returns a deep copy of the system
Parameters
new _ name : str , optional
Set a new meta name parameter .
Default : < old _ name > _ copy""" | _tmp = copy . deepcopy ( self )
if not new_name :
new_name = self . name + '_copy'
if str ( type ( self ) ) == "<class 'pymrio.core.mriosystem.IOSystem'>" :
_tmp . meta . note ( 'IOSystem copy {new} based on {old}' . format ( new = new_name , old = self . meta . name ) )
_tmp . meta . change_meta ( 'name' , new_name , log = False )
else :
_tmp . name = new_name
return _tmp |
def _query ( self , sql , * args ) :
"""Executes the specified ` sql ` query and returns the cursor""" | if not self . _con :
logger . debug ( ( "Open MBTiles file '%s'" ) % self . filename )
self . _con = sqlite3 . connect ( self . filename )
self . _cur = self . _con . cursor ( )
sql = ' ' . join ( sql . split ( ) )
logger . debug ( ( "Execute query '%s' %s" ) % ( sql , args ) )
try :
self . _cur . execute ( sql , * args )
except ( sqlite3 . OperationalError , sqlite3 . DatabaseError ) as e :
raise InvalidFormatError ( ( "%s while reading %s" ) % ( e , self . filename ) )
return self . _cur |
def register_request ( self , valid_responses ) :
"""Register a RPC request .
: param list valid _ responses : List of possible Responses that
we should be waiting for .
: return :""" | uuid = str ( uuid4 ( ) )
self . _response [ uuid ] = [ ]
for action in valid_responses :
self . _request [ action ] = uuid
return uuid |
def init ( track_log_handler ) :
"""( Re ) initialize track ' s file handler for track package logger .
Adds a stdout - printing handler automatically .""" | logger = logging . getLogger ( __package__ )
# TODO ( just document prominently )
# assume only one trial can run at once right now
# multi - concurrent - trial support will require complex filter logic
# based on the currently - running trial ( maybe we shouldn ' t allow multiple
# trials on different python threads , that ' s dumb )
to_rm = [ h for h in logger . handlers if isinstance ( h , TrackLogHandler ) ]
for h in to_rm :
logger . removeHandler ( h )
if not any ( isinstance ( h , StdoutHandler ) for h in logger . handlers ) :
handler = StdoutHandler ( )
handler . setFormatter ( _FORMATTER )
logger . addHandler ( handler )
track_log_handler . setFormatter ( _FORMATTER )
logger . addHandler ( track_log_handler )
logger . propagate = False
logger . setLevel ( logging . DEBUG ) |
def new ( path = '.' , template = None ) :
"""Creates a new project""" | path = abspath ( path . rstrip ( sep ) )
template = template or DEFAULT_TEMPLATE_URL
render_skeleton ( template , path , include_this = [ '.gitignore' ] , filter_this = [ '~*' , '*.py[co]' , '__pycache__' , '__pycache__/*' , '.git' , '.git/*' , '.hg' , '.hg/*' , '.svn' , '.svn/*' , ] )
print ( HELP_MSG % ( path , ) ) |
def _get_parents ( folds , linenum ) :
"""Get the parents at a given linenum .
If parents is empty , then the linenum belongs to the module .
Parameters
folds : list of : class : ` FoldScopeHelper `
linenum : int
The line number to get parents for . Typically this would be the
cursor position .
Returns
parents : list of : class : ` FoldScopeHelper `
A list of : class : ` FoldScopeHelper ` objects that describe the defintion
heirarcy for the given ` ` linenum ` ` . The 1st index will be the
top - level parent defined at the module level while the last index
will be the class or funtion that contains ` ` linenum ` ` .""" | # Note : this might be able to be sped up by finding some kind of
# abort - early condition .
parents = [ ]
for fold in folds :
start , end = fold . range
if linenum >= start and linenum <= end :
parents . append ( fold )
else :
continue
return parents |
def _properties_from_dict ( d , key_name = 'key' ) :
'''Transforms dictionary into pipeline object properties .
The output format conforms to boto ' s specification .
Example input :
' a ' : ' 1 ' ,
' ref ' : ' 2'
Example output :
' key ' : ' a ' ,
' stringValue ' : ' 1 ' ,
' key ' : ' b ' ,
' refValue ' : ' 2 ' ,''' | fields = [ ]
for key , value in six . iteritems ( d ) :
if isinstance ( value , dict ) :
fields . append ( { key_name : key , 'refValue' : value [ 'ref' ] , } )
else :
fields . append ( { key_name : key , 'stringValue' : value , } )
return fields |
def _getStore ( self ) :
"""Get the Store used for FTS .
If it does not exist , it is created and initialised .""" | storeDir = self . store . newDirectory ( self . indexDirectory )
if not storeDir . exists ( ) :
store = Store ( storeDir )
self . _initStore ( store )
return store
else :
return Store ( storeDir ) |
def hist ( self , xdata , disp = True , ** kwargs ) :
'''Graphs a histogram .
xdata : List of values to bin . Can optionally include a header , see testGraph _ barAndHist . py in https : / / github . com / Dfenestrator / GooPyCharts for an example .
disp : for displaying plots immediately . Set to True by default . Set to False for other operations , then use show ( ) to display the plot .
* * kwargs : Access to other Google Charts API options . The key is the option name , the value is the option ' s full JS code .''' | # combine data into proper format
data = [ self . xlabel ] + xdata
# Include other options , supplied by * * kwargs
other = ''
for option in kwargs :
other += option + ': ' + kwargs [ option ] + ',\n'
# input argument format to template is in dictionary format ( see template for where variables are inserted )
argDict = { 'data' : str ( data ) , 'title' : self . title , 'functionName' : slugify ( self . title ) , 'height' : self . height , 'width' : self . width , 'logScaleFlag' : 'false' , 'ylabel' : self . ylabel , 'plotType' : 'Histogram' , 'numFig' : self . numFig , 'other' : other }
self . javascript = ( graphPgTemplateStart + graphPgTemplate_hist + graphPgTemplateEnd ) % argDict
if disp :
self . dispFile ( ) |
def get_entry_point ( key , value ) :
"""Check if registered entry point is available for a given name and
load it . Otherwise , return None .
key ( unicode ) : Entry point name .
value ( unicode ) : Name of entry point to load .
RETURNS : The loaded entry point or None .""" | for entry_point in pkg_resources . iter_entry_points ( key ) :
if entry_point . name == value :
return entry_point . load ( ) |
def cmd_ip2asn ( ip ) :
"""Use Team Cymru ip2asn service to get information about a public IPv4 / IPv6.
Reference : https : / / www . team - cymru . com / IP - ASN - mapping . html
$ habu . ip2asn 8.8.8.8
" asn " : " 15169 " ,
" net " : " 8.8.8.0/24 " ,
" cc " : " US " ,
" rir " : " ARIN " ,
" asname " : " GOOGLE - Google LLC , US " ,
" country " : " United States " """ | try :
ipaddress . ip_address ( ip )
except ValueError :
logging . error ( 'Invalid IP address' )
sys . exit ( 1 )
data = ip2asn ( ip )
print ( json . dumps ( data , indent = 4 ) ) |
def has ( self , querypart_name , value = None ) :
"""Returns ` ` True ` ` if ` ` querypart _ name ` ` with ` ` value ` ` is set . For example
you can check if you already used condition by ` ` sql . has ( ' where ' ) ` ` .
If you want to check for more information , for example if that condition
also contain ID , you can do this by ` ` sql . has ( ' where ' , ' id ' ) ` ` .""" | if super ( ) . has ( querypart_name , value ) :
return True
if not value :
return super ( ) . has ( 'select_options' , querypart_name )
return False |
def validate ( self , bug : Bug , verbose : bool = True ) -> bool :
"""Checks that a given bug successfully builds , and that it produces an
expected set of test suite outcomes .
Parameters :
verbose : toggles verbosity of output . If set to ` True ` , the
outcomes of each test will be printed to the standard output .
Returns :
` True ` if bug behaves as expected , else ` False ` .""" | # attempt to rebuild - - don ' t worry , Docker ' s layer caching prevents us
# from actually having to rebuild everything from scratch : - )
try :
self . build ( bug , force = True , quiet = True )
except docker . errors . BuildError :
print ( "failed to build bug: {}" . format ( self . identifier ) )
return False
# provision a container
validated = True
try :
c = None
c = self . __installation . containers . provision ( bug )
# ensure we can compile the bug
# TODO : check compilation status !
print_task_start ( 'Compiling' )
self . __installation . containers . compile ( c )
print_task_end ( 'Compiling' , 'OK' )
for t in bug . tests :
if t . expected_outcome is True :
task = 'Running test: {}' . format ( t . name )
print_task_start ( task )
outcome = self . __installation . containers . execute ( c , t , verbose = verbose )
if not outcome . passed :
validated = False
print_task_end ( task , 'UNEXPECTED: FAIL' )
response = textwrap . indent ( outcome . response . output , ' ' * 4 )
print ( '\n' + response )
else :
print_task_end ( task , 'OK' )
if t . expected_outcome is False :
task = 'Running test: {}' . format ( t . name )
print_task_start ( task )
outcome = self . __installation . containers . execute ( c , t , verbose = verbose )
if outcome . passed :
validated = False
print_task_end ( task , 'UNEXPECTED: PASS' )
response = textwrap . indent ( outcome . response . output , ' ' * 4 )
print ( '\n' + response )
else :
print_task_end ( task , 'OK' )
# ensure that the container is destroyed !
finally :
if c :
del self . __installation . containers [ c . uid ]
return validated |
def do_edit ( self , line ) :
"""edit FILE
Copies the file locally , launches an editor to edit the file .
When the editor exits , if the file was modified then its copied
back .
You can specify the editor used with the - - editor command line
option when you start rshell , or by using the VISUAL or EDITOR
environment variable . if none of those are set , then vi will be used .""" | if len ( line ) == 0 :
print_err ( "Must provide a filename" )
return
filename = resolve_path ( line )
dev , dev_filename = get_dev_and_path ( filename )
mode = auto ( get_mode , filename )
if mode_exists ( mode ) and mode_isdir ( mode ) :
print_err ( "Unable to edit directory '{}'" . format ( filename ) )
return
if dev is None : # File is local
os . system ( "{} '{}'" . format ( EDITOR , filename ) )
else : # File is remote
with tempfile . TemporaryDirectory ( ) as temp_dir :
local_filename = os . path . join ( temp_dir , os . path . basename ( filename ) )
if mode_exists ( mode ) :
print ( 'Retrieving {} ...' . format ( filename ) )
cp ( filename , local_filename )
old_stat = get_stat ( local_filename )
os . system ( "{} '{}'" . format ( EDITOR , local_filename ) )
new_stat = get_stat ( local_filename )
if old_stat != new_stat :
self . print ( 'Updating {} ...' . format ( filename ) )
cp ( local_filename , filename ) |
def DeregisterAnalyzer ( cls , analyzer_class ) :
"""Deregisters a analyzer class .
The analyzer classes are identified based on their lower case name .
Args :
analyzer _ class ( type ) : class object of the analyzer .
Raises :
KeyError : if analyzer class is not set for the corresponding name .""" | analyzer_name = analyzer_class . NAME . lower ( )
if analyzer_name not in cls . _analyzer_classes :
raise KeyError ( 'analyzer class not set for name: {0:s}' . format ( analyzer_class . NAME ) )
del cls . _analyzer_classes [ analyzer_name ] |
def is_list_sorted ( lst ) :
"""This function checks if a given list is sorted in ascending order or not .
> > > is _ list _ sorted ( [ 1 , 2 , 4 , 6 , 8 , 10 , 12 , 14 , 16 , 17 ] )
True
> > > is _ list _ sorted ( [ 1 , 2 , 4 , 6 , 8 , 10 , 12 , 14 , 20 , 17 ] )
False
> > > is _ list _ sorted ( [ 1 , 2 , 4 , 6 , 8 , 10 , 15 , 14 , 20 ] )
False""" | return all ( lst [ i ] <= lst [ i + 1 ] for i in range ( len ( lst ) - 1 ) ) |
def en010 ( self , value = None ) :
"""Corresponds to IDD Field ` en010 `
mean coincident dry - bulb temperature to
Enthalpy corresponding to 1.0 % annual cumulative frequency of occurrence
Args :
value ( float ) : value for IDD Field ` en010 `
Unit : kJ / kg
if ` value ` is None it will not be checked against the
specification and is assumed to be a missing value
Raises :
ValueError : if ` value ` is not a valid value""" | if value is not None :
try :
value = float ( value )
except ValueError :
raise ValueError ( 'value {} need to be of type float ' 'for field `en010`' . format ( value ) )
self . _en010 = value |
def connected_components_subgraphs ( self , copy = True ) :
"""Iterates over connected components in current : class : ` BreakpointGraph ` object , and yields new instances of : class : ` BreakpointGraph ` with respective information deep - copied by default ( week reference is possible of specified in method call ) .
: param copy : a flag to signal if graph information has to be deep copied while producing new : class : ` BreakpointGraph ` instances , of just reference to respective data has to be made .
: type copy : ` ` Boolean ` `
: return : generator over connected components in current : class : ` BreakpointGraph ` wrapping respective connected components into new : class : ` BreakpointGraph ` objects .
: rtype : ` ` generator ` `""" | for component in nx . connected_components ( self . bg ) :
component = self . bg . subgraph ( component )
if copy :
component . copy ( )
yield BreakpointGraph ( component ) |
def delete_lines ( self , lines ) :
"""Delete all lines with given line numbers .
Args :
lines ( list ) : List of integers corresponding to line numbers to delete""" | for k , i in enumerate ( lines ) :
del self [ i - k ] |
def validate_input ( self , input_string , validators ) :
"""Validate the given input string against the specified list of validators .
: param input _ string : the input string to verify .
: param validators : the list of validators .
: raises InvalidValidator if the list of validators does not provide a valid InputValidator class .
: return : a boolean representing the validation result . True if the input string is valid ; False otherwise .""" | validation_result = True
if isinstance ( validators , BaseValidator ) :
validators = [ validators ]
elif validators is None :
validators = [ ]
if isinstance ( validators , list ) :
validation_results = [ ]
for validator in validators :
if isinstance ( validator , BaseValidator ) :
validation_results . append ( validator . validate ( input_string = input_string ) )
else :
raise InvalidValidator ( "Validator {} is not a valid validator" . format ( validator ) )
validation_result = all ( validation_results )
else :
raise InvalidValidator ( "Validator {} is not a valid validator" . format ( validators ) )
return validation_result |
def two_qubit_randomized_benchmarking ( sampler : sim . Sampler , first_qubit : devices . GridQubit , second_qubit : devices . GridQubit , * , num_clifford_range : Sequence [ int ] = range ( 5 , 50 , 5 ) , num_circuits : int = 20 , repetitions : int = 1000 ) -> RandomizedBenchMarkResult :
"""Clifford - based randomized benchmarking ( RB ) of two qubits .
A total of num _ circuits random circuits are generated , each of which
contains a fixed number of two - qubit Clifford gates plus one additional
Clifford that inverts the whole sequence and a measurement in the
z - basis . Each circuit is repeated a number of times and the average
| 00 > state population is determined from the measurement outcomes of all
of the circuits .
The above process is done for different circuit lengths specified by the
integers in num _ clifford _ range . For example , an integer 10 means the
random circuits will contain 10 Clifford gates each plus one inverting
Clifford . The user may use the result to extract an average gate fidelity ,
by analyzing the change in the average | 00 > state population at different
circuit lengths . For actual experiments , one should choose
num _ clifford _ range such that a clear exponential decay is observed in the
results .
The two - qubit Cliffords here are decomposed into CZ gates plus single - qubit
x and y rotations . See Barends et al . , Nature 508 , 500 for details .
Args :
sampler : The quantum engine or simulator to run the circuits .
first _ qubit : The first qubit under test .
second _ qubit : The second qubit under test .
num _ clifford _ range : The different numbers of Cliffords in the RB study .
num _ circuits : The number of random circuits generated for each
number of Cliffords .
repetitions : The number of repetitions of each circuit .
Returns :
A RandomizedBenchMarkResult object that stores and plots the result .""" | cliffords = _single_qubit_cliffords ( )
cfd_matrices = _two_qubit_clifford_matrices ( first_qubit , second_qubit , cliffords )
gnd_probs = [ ]
for num_cfds in num_clifford_range :
gnd_probs_l = [ ]
for _ in range ( num_circuits ) :
circuit = _random_two_q_clifford ( first_qubit , second_qubit , num_cfds , cfd_matrices , cliffords )
circuit . append ( ops . measure ( first_qubit , second_qubit , key = 'z' ) )
results = sampler . run ( circuit , repetitions = repetitions )
gnds = [ ( not r [ 0 ] and not r [ 1 ] ) for r in results . measurements [ 'z' ] ]
gnd_probs_l . append ( np . mean ( gnds ) )
gnd_probs . append ( float ( np . mean ( gnd_probs_l ) ) )
return RandomizedBenchMarkResult ( num_clifford_range , gnd_probs ) |
def get_stdlib_modules ( ) :
"""Returns a list containing the names of all the modules available in the
standard library .
Based on the function get _ root _ modules from the IPython project .
Present in IPython . core . completerlib in v0.13.1
Copyright ( C ) 2010-2011 The IPython Development Team .
Distributed under the terms of the BSD License .""" | modules = list ( sys . builtin_module_names )
for path in sys . path [ 1 : ] :
if 'site-packages' not in path :
modules += module_list ( path )
modules = set ( modules )
if '__init__' in modules :
modules . remove ( '__init__' )
modules = list ( modules )
return modules |
def redo ( self , channel , image ) :
"""Called from the reference viewer shell when a new image has
been added to a channel .""" | chname = channel . name
# Only update our GUI if the activity is in the focused
# channel
if self . active == chname :
imname = image . get ( 'name' , 'NONAME' )
self . set_info ( "A new image '%s' has been added to channel %s" % ( imname , chname ) )
return True |
def create_name_and_type ( self , name : str , descriptor : str ) -> NameAndType :
"""Creates a new : class : ` ConstantNameAndType ` , adding it to the pool and
returning it .
: param name : The name of the class .
: param descriptor : The descriptor for ` name ` .""" | self . append ( ( 12 , self . create_utf8 ( name ) . index , self . create_utf8 ( descriptor ) . index ) )
return self . get ( self . raw_count - 1 ) |
def delete_attachment ( request , link_field = None , uri = None ) :
"""Delete existing file and link .""" | if link_field is None :
link_field = "record_uri"
if uri is None :
uri = record_uri ( request )
# Remove file .
filters = [ Filter ( link_field , uri , core_utils . COMPARISON . EQ ) ]
storage = request . registry . storage
file_links , _ = storage . get_all ( "" , FILE_LINKS , filters = filters )
for link in file_links :
request . attachment . delete ( link [ 'location' ] )
# Remove link .
storage . delete_all ( "" , FILE_LINKS , filters = filters , with_deleted = False ) |
def connect ( self , dests = [ ] , name = None , id = '' , props = { } ) :
'''Connect this port to other ports .
After the connection has been made , a delayed reparse of the
connections for this and the destination port will be triggered .
@ param dests A list of the destination Port objects . Must be provided .
@ param name The name of the connection . If None , a suitable default
will be created based on the names of the two ports .
@ param id The ID of this connection . If None , one will be generated by
the RTC implementation .
@ param props Properties of the connection . Required values depend on
the type of the two ports being connected .
@ raises IncompatibleDataPortConnectionPropsError , FailedToConnectError''' | with self . _mutex :
if self . porttype == 'DataInPort' or self . porttype == 'DataOutPort' :
for prop in props :
if prop in self . properties :
if props [ prop ] not in [ x . strip ( ) for x in self . properties [ prop ] . split ( ',' ) ] and 'any' not in self . properties [ prop ] . lower ( ) : # Invalid property selected
raise exceptions . IncompatibleDataPortConnectionPropsError
for d in dests :
if prop in d . properties :
if props [ prop ] not in [ x . strip ( ) for x in d . properties [ prop ] . split ( ',' ) ] and 'any' not in d . properties [ prop ] . lower ( ) : # Invalid property selected
raise exceptions . IncompatibleDataPortConnectionPropsError
if not name :
name = self . name + '_' . join ( [ d . name for d in dests ] )
props = utils . dict_to_nvlist ( props )
profile = RTC . ConnectorProfile ( name , id , [ self . _obj ] + [ d . _obj for d in dests ] , props )
return_code , profile = self . _obj . connect ( profile )
if return_code != RTC . RTC_OK :
raise exceptions . FailedToConnectError ( return_code )
self . reparse_connections ( )
for d in dests :
d . reparse_connections ( ) |
def _decorate_fun ( self , fun ) :
"""Decorate function fun""" | msg = "Function %s is deprecated" % fun . __name__
if self . extra :
msg += "; %s" % self . extra
def wrapped ( * args , ** kwargs ) :
warnings . warn ( msg , category = DeprecationWarning )
return fun ( * args , ** kwargs )
wrapped . __name__ = fun . __name__
wrapped . __dict__ = fun . __dict__
wrapped . __doc__ = self . _update_doc ( fun . __doc__ )
return wrapped |
def rate_sp ( self ) :
"""Sets the rate _ sp at which the servo travels from 0 to 100.0 % ( half of the full
range of the servo ) . Units are in milliseconds . Example : Setting the rate _ sp
to 1000 means that it will take a 180 degree servo 2 second to move from 0
to 180 degrees . Note : Some servo controllers may not support this in which
case reading and writing will fail with ` - EOPNOTSUPP ` . In continuous rotation
servos , this value will affect the rate _ sp at which the speed ramps up or down .""" | self . _rate_sp , value = self . get_attr_int ( self . _rate_sp , 'rate_sp' )
return value |
def LoadConfig ( config_obj , config_file = None , config_fd = None , secondary_configs = None , contexts = None , reset = False , parser = ConfigFileParser ) :
"""Initialize a ConfigManager with the specified options .
Args :
config _ obj : The ConfigManager object to use and update . If None , one will be
created .
config _ file : Filename to read the config from .
config _ fd : A file - like object to read config data from .
secondary _ configs : A list of secondary config URLs to load .
contexts : Add these contexts to the config object .
reset : Completely wipe previous config before doing the load .
parser : Specify which parser to use .
Returns :
The resulting config object . The one passed in , unless None was specified .""" | if config_obj is None or reset : # Create a new config object .
config_obj = _CONFIG . MakeNewConfig ( )
# Initialize the config with a filename or file like object .
if config_file is not None :
config_obj . Initialize ( filename = config_file , must_exist = True , parser = parser )
elif config_fd is not None :
config_obj . Initialize ( fd = config_fd , parser = parser )
# Load all secondary files .
if secondary_configs :
for config_file in secondary_configs :
config_obj . LoadSecondaryConfig ( config_file )
if contexts :
for context in contexts :
config_obj . AddContext ( context )
return config_obj |
def get_connected_service_details ( self , project_id , name ) :
"""GetConnectedServiceDetails .
[ Preview API ]
: param str project _ id :
: param str name :
: rtype : : class : ` < WebApiConnectedServiceDetails > < azure . devops . v5_0 . core . models . WebApiConnectedServiceDetails > `""" | route_values = { }
if project_id is not None :
route_values [ 'projectId' ] = self . _serialize . url ( 'project_id' , project_id , 'str' )
if name is not None :
route_values [ 'name' ] = self . _serialize . url ( 'name' , name , 'str' )
response = self . _send ( http_method = 'GET' , location_id = 'b4f70219-e18b-42c5-abe3-98b07d35525e' , version = '5.0-preview.1' , route_values = route_values )
return self . _deserialize ( 'WebApiConnectedServiceDetails' , response ) |
def tag ( s , tokenize = True , encoding = "utf-8" , ** kwargs ) :
"""Returns a list of ( token , tag ) - tuples from the given string .""" | tags = [ ]
for sentence in parse ( s , tokenize , True , False , False , False , encoding , ** kwargs ) . split ( ) :
for token in sentence :
tags . append ( ( token [ 0 ] , token [ 1 ] ) )
return tags |
def _dumps ( self , obj ) :
"""If : prop : serialized is True , @ obj will be serialized
using : prop : serializer""" | if not self . serialized :
return obj
return self . serializer . dumps ( obj ) |
def _set_value ( value ) :
'''A function to detect if user is trying to pass a dictionary or list . parse it and return a
dictionary list or a string''' | # don ' t continue if already an acceptable data - type
if isinstance ( value , bool ) or isinstance ( value , dict ) or isinstance ( value , list ) :
return value
# check if json
if value . startswith ( 'j{' ) and value . endswith ( '}j' ) :
value = value . replace ( 'j{' , '{' )
value = value . replace ( '}j' , '}' )
try :
return salt . utils . json . loads ( value )
except Exception :
raise salt . exceptions . CommandExecutionError
# detect list of dictionaries
if '|' in value and r'\|' not in value :
values = value . split ( '|' )
items = [ ]
for value in values :
items . append ( _set_value ( value ) )
return items
# parse out dictionary if detected
if ':' in value and r'\:' not in value :
options = { }
# split out pairs
key_pairs = value . split ( ',' )
for key_pair in key_pairs :
k = key_pair . split ( ':' ) [ 0 ]
v = key_pair . split ( ':' ) [ 1 ]
options [ k ] = v
return options
# try making a list
elif ',' in value and r'\,' not in value :
value_items = value . split ( ',' )
return value_items
# just return a string
else : # remove escape chars if added
if r'\|' in value :
value = value . replace ( r'\|' , '|' )
if r'\:' in value :
value = value . replace ( r'\:' , ':' )
if r'\,' in value :
value = value . replace ( r'\,' , ',' )
return value |
def dropEvent ( self , event ) :
"""Listens for query ' s being dragged and dropped onto this tree .
: param event | < QDropEvent >""" | # overload the current filtering options
data = event . mimeData ( )
if data . hasFormat ( 'application/x-orb-table' ) and data . hasFormat ( 'application/x-orb-query' ) :
tableName = self . tableTypeName ( )
if nativestring ( data . data ( 'application/x-orb-table' ) ) == tableName :
data = nativestring ( data . data ( 'application/x-orb-query' ) )
query = Q . fromXmlString ( data )
self . setQuery ( query )
return
super ( XOrbTreeWidget , self ) . dropEvent ( event ) |
def start ( widget , processEvents = True , style = None , movie = None ) :
"""Starts a loader widget on the inputed widget .
: param widget | < QWidget >
: return < XLoaderWidget >""" | if style is None :
style = os . environ . get ( 'PROJEXUI_LOADER_STYLE' , 'gray' )
# there is a bug with the way the loader is handled in a splitter ,
# so bypass it
parent = widget . parent ( )
while isinstance ( parent , QSplitter ) :
parent = parent . parent ( )
# retrieve the loader widget
loader = getattr ( widget , '_private_xloader_widget' , None )
if not loader :
loader = XLoaderWidget ( parent , style )
# make sure that if the widget is destroyed , the loader closes
widget . destroyed . connect ( loader . deleteLater )
setattr ( widget , '_private_xloader_widget' , loader )
setattr ( widget , '_private_xloader_count' , 0 )
loader . move ( widget . pos ( ) )
if widget . isVisible ( ) :
loader . show ( )
if movie :
loader . setMovie ( movie )
widget . installEventFilter ( loader )
else :
count = getattr ( widget , '_private_xloader_count' , 0 )
setattr ( widget , '_private_xloader_count' , count + 1 )
loader . resize ( widget . size ( ) )
return loader |
def assemble_oligos ( dna_list , reference = None ) :
'''Given a list of DNA sequences , assemble into a single construct .
: param dna _ list : List of DNA sequences - they must be single - stranded .
: type dna _ list : coral . DNA list
: param reference : Expected sequence - once assembly completed , this will
be used to reorient the DNA ( assembly could potentially occur from either
side of a linear DNA construct if oligos are in a random order ) . If this
fails , an AssemblyError is raised .
: type reference : coral . DNA
: raises : AssemblyError if it can ' t assemble for any reason .
: returns : A single assembled DNA sequence
: rtype : coral . DNA''' | # FIXME : this protocol currently only supports 5 ' ends on the assembly
# Find all matches for every oligo . If more than 2 per side , error .
# Self - oligo is included in case the 3 ' end is self - complementary .
# 1 ) Find all unique 3 ' binders ( and non - binders ) .
match_3 = [ bind_unique ( seq , dna_list , right = True ) for i , seq in enumerate ( dna_list ) ]
# 2 ) Find all unique 5 ' binders ( and non - binders ) .
match_5 = [ bind_unique ( seq , dna_list , right = False ) for i , seq in enumerate ( dna_list ) ]
# Assemble into 2 - tuple
zipped = zip ( match_5 , match_3 )
# 3 ) If none found , error out with ' oligo n has no binders '
for i , oligo_match in enumerate ( zipped ) :
if not any ( oligo_match ) :
error = 'Oligo {} has no binding partners.' . format ( i + 1 )
raise AssemblyError ( error )
# 4 ) There should be exactly 2 oligos that bind at 3 ' end but
# not 5 ' .
ends = [ ]
for i , ( five , three ) in enumerate ( zipped ) :
if five is None and three is not None :
ends . append ( i )
# 5 ) If more than 2 , error with ' too many ends ' .
if len ( ends ) > 2 :
raise AssemblyError ( 'Too many (>2) end oligos found.' )
# 6 ) If more than 2 , error with ' not enough ends ' .
if len ( ends ) < 2 :
raise AssemblyError ( 'Not enough (<2) end oligos found.' )
# NOTE : If 1-4 are satisfied , unique linear assembly has been found ( proof ? )
# 8 ) Start with first end and build iteratively
last_index = ends [ 0 ]
assembly = dna_list [ last_index ]
flip = True
# This would be slightly less complicated if the sequences were tied to
# their match info in a tuple
# Append next region n - 1 times
for i in range ( len ( dna_list ) - 1 ) :
if flip : # Next oligo needs to be flipped before concatenation
# Grab 3 ' match from last oligo ' s info
current_index , matchlen = zipped [ last_index ] [ 1 ]
# Get new oligo sequence , make double - stranded for concatenation
next_oligo = dna_list [ current_index ] . to_ds ( )
# Reverse complement for concatenation
next_oligo = next_oligo . reverse_complement ( )
# Don ' t reverse complement the next one
flip = False
else : # Grab 5 ' match from last oligo ' s info
current_index , matchlen = zipped [ last_index ] [ 0 ]
# Get new oligo sequence , make double - stranded for concatenation
next_oligo = dna_list [ current_index ] . to_ds ( )
# Reverse complement the next one
flip = True
# Trim overlap from new sequence
next_oligo = next_oligo [ ( matchlen - 1 ) : ]
# Concatenate and update last oligo ' s information
assembly += next_oligo
last_index = current_index
if reference :
if assembly == reference or assembly == reference . reverse_complement ( ) :
return assembly
else :
raise AssemblyError ( 'Assembly did not match reference' )
else :
return assembly |
def least_common_multiple ( num1 , num2 ) :
"""Calculate the least common multiple ( LCM ) of two positive numbers .
Examples :
least _ common _ multiple ( 4 , 6 ) - > 12
least _ common _ multiple ( 15 , 17 ) - > 255
least _ common _ multiple ( 2 , 6 ) - > 6
: param num1 : An integer
: param num2 : An integer
: return : LCM of num1 and num2""" | max_num = max ( num1 , num2 )
while True :
if max_num % num1 == 0 and max_num % num2 == 0 :
lcm = max_num
break
max_num += 1
return lcm |
def write ( self , path ) :
'''Write assembly oligos and ( if applicable ) primers to csv .
: param path : path to csv file , including . csv extension .
: type path : str''' | with open ( path , 'wb' ) as oligo_file :
oligo_writer = csv . writer ( oligo_file , delimiter = ',' , quoting = csv . QUOTE_MINIMAL )
oligo_writer . writerow ( [ 'name' , 'oligo' , 'notes' ] )
for i , oligo in enumerate ( self . oligos ) :
name = 'oligo {}' . format ( i + 1 )
oligo_len = len ( oligo )
if i != len ( self . oligos ) - 1 :
oligo_tm = self . overlap_tms [ i ]
notes = 'oligo length: {}, ' . format ( oligo_len ) + 'overlap Tm: {:.2f}' . format ( oligo_tm )
else :
notes = 'oligo length: {}' . format ( oligo_len )
oligo_writer . writerow ( [ name , oligo , notes ] )
if self . primers :
for i , ( primer , melt ) in enumerate ( self . primers ) :
oligo_writer . writerow ( [ 'primer {}' . format ( i + 1 ) , primer , 'Tm: {:.2f}' . format ( melt ) ] ) |
def session_login ( self , user = None , passwd = None ) :
"""Performs a session login by posting the auth information
to the _ session endpoint .
: param str user : Username used to connect to server .
: param str auth _ token : Authentication token used to connect to server .""" | self . change_credentials ( user = user , auth_token = passwd ) |
def copy ( self , target , timeout = 500 ) :
"""Copy or download this file to a new local file""" | if self . metadata and 'encoding' in self . metadata :
with io . open ( target , 'w' , encoding = self . metadata [ 'encoding' ] ) as f :
for line in self :
f . write ( line )
else :
with io . open ( target , 'wb' ) as f :
for line in self :
if sys . version < '3' and isinstance ( line , unicode ) : # pylint : disable = undefined - variable
f . write ( line . encode ( 'utf-8' ) )
elif sys . version >= '3' and isinstance ( line , str ) :
f . write ( line . encode ( 'utf-8' ) )
else :
f . write ( line ) |
def by_type ( self , chamber , type , congress = CURRENT_CONGRESS ) :
"Return votes by type : missed , party , lone no , perfect" | check_chamber ( chamber )
path = "{congress}/{chamber}/votes/{type}.json" . format ( congress = congress , chamber = chamber , type = type )
return self . fetch ( path ) |
def _set_defaults ( self , v , load = False ) :
"""Setter method for defaults , mapped from YANG variable / show / defaults ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ defaults is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ defaults ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = defaults . defaults , is_container = 'container' , presence = False , yang_name = "defaults" , rest_name = "defaults" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Display default configuration' } } , namespace = 'urn:brocade.com:mgmt:brocade-common-def' , defining_module = 'brocade-common-def' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """defaults must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=defaults.defaults, is_container='container', presence=False, yang_name="defaults", rest_name="defaults", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Display default configuration'}}, namespace='urn:brocade.com:mgmt:brocade-common-def', defining_module='brocade-common-def', yang_type='container', is_config=True)""" , } )
self . __defaults = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def __undo_filter_sub ( self , scanline ) :
"""Undo sub filter .""" | ai = 0
# Loops starts at index fu .
for i in range ( self . fu , len ( scanline ) ) :
x = scanline [ i ]
a = scanline [ ai ]
# result
scanline [ i ] = ( x + a ) & 0xff
# result
ai += 1 |
def all_logging_disabled ( highest_level = logging . CRITICAL ) :
"""Disable all logging temporarily .
A context manager that will prevent any logging messages triggered during the body from being processed .
Args :
highest _ level : the maximum logging level that is being blocked""" | previous_level = logging . root . manager . disable
logging . disable ( highest_level )
try :
yield
finally :
logging . disable ( previous_level ) |
def importSignedCertificate ( self , alias , certFile ) :
"""This operation imports a certificate authority ( CA ) signed SSL
certificate into the key store .""" | params = { "f" : "json" }
files = { "file" : certFile }
url = self . _url + "/sslCertificates/{cert}/importSignedCertificate" . format ( cert = alias )
return self . _post ( url = url , files = files , param_dict = params , proxy_port = self . _proxy_port , proxy_url = self . _proxy_url ) |
def pause ( self ) :
"""Pause the animation .""" | if not self . _pause_level :
self . _paused_time = self . _clock ( ) + self . _offset
self . _paused_frame = self . current_frame
self . _pause_level += 1 |
def set_project ( self , project ) :
"""Set the project selection to the given project
: param project : the project to select
: type project : : class : ` djadapter . models . Project `
: returns : None
: rtype : None
: raises : ValueError""" | prjroot = self . prjbrws . model . root
prjitems = prjroot . childItems
for row , item in enumerate ( prjitems ) :
prj = item . internal_data ( )
if prj == project :
prjindex = self . prjbrws . model . index ( row , 0 )
break
else :
raise ValueError ( "Could not select the given taskfile. No project %s found." % project . name )
self . prjbrws . set_index ( 0 , prjindex ) |
def _set_exception ( self ) :
"""Called by a Job object to tell that an exception occured
during the processing of the function . The object will become
ready but not successful . The collector ' s notify _ ready ( )
method will be called , but NOT the callback method""" | assert not self . ready ( )
self . _data = sys . exc_info ( )
self . _success = False
self . _event . set ( )
if self . _collector is not None :
self . _collector . notify_ready ( self ) |
def unselect_all ( self ) :
"""Clearing the selected _ item also clears the focused _ item .""" | items = self . _get_selected_items ( )
with self . _suppress_selection_events ( ) :
self . _selection . clear ( )
self . queue_draw_item ( * items )
self . emit ( 'selection-changed' , self . _get_selected_items ( ) ) |
def get_log_entries ( self ) :
"""Gets all log entries .
In plenary mode , the returned list contains all known entries or
an error results . Otherwise , the returned list may contain only
those entries that are accessible through this session .
return : ( osid . logging . LogEntryList ) - a list of log entries
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . ResourceLookupSession . get _ resources
# NOTE : This implementation currently ignores plenary view
collection = JSONClientValidated ( 'logging' , collection = 'LogEntry' , runtime = self . _runtime )
result = collection . find ( self . _view_filter ( ) ) . sort ( '_id' , DESCENDING )
return objects . LogEntryList ( result , runtime = self . _runtime , proxy = self . _proxy ) |
def get_topic_for_path ( channel , chan_path_tuple ) :
"""Given channel ( dict ) that contains a hierary of TopicNode dicts , we use the
walk the path given in ` chan _ path _ tuple ` to find the corresponding TopicNode .""" | assert chan_path_tuple [ 0 ] == channel [ 'dirname' ] , 'Wrong channeldir'
chan_path_list = list ( chan_path_tuple )
chan_path_list . pop ( 0 )
# skip the channel name
if len ( chan_path_list ) == 0 :
return channel
current = channel
for subtopic in chan_path_list :
current = list ( filter ( lambda d : 'dirname' in d and d [ 'dirname' ] == subtopic , current [ 'children' ] ) ) [ 0 ]
return current |
def _update_atomtypes ( unatomtyped_topology , res_name , prototype ) :
"""Update atomtypes in residues in a topology using a prototype topology .
Atomtypes are updated when residues in each topology have matching names .
Parameters
unatomtyped _ topology : openmm . app . Topology
Topology lacking atomtypes defined by ` find _ atomtypes ` .
prototype : openmm . app . Topology
Prototype topology with atomtypes defined by ` find _ atomtypes ` .""" | for res in unatomtyped_topology . residues ( ) :
if res . name == res_name :
for old_atom , new_atom_id in zip ( [ atom for atom in res . atoms ( ) ] , [ atom . id for atom in prototype . atoms ( ) ] ) :
old_atom . id = new_atom_id |
def crawl_legend ( self , ax , legend ) :
"""Recursively look through objects in legend children""" | legendElements = list ( utils . iter_all_children ( legend . _legend_box , skipContainers = True ) )
legendElements . append ( legend . legendPatch )
for child in legendElements : # force a large zorder so it appears on top
child . set_zorder ( 1E6 + child . get_zorder ( ) )
try : # What kind of object . . .
if isinstance ( child , matplotlib . patches . Patch ) :
self . draw_patch ( ax , child , force_trans = ax . transAxes )
elif isinstance ( child , matplotlib . text . Text ) :
if not ( child is legend . get_children ( ) [ - 1 ] and child . get_text ( ) == 'None' ) :
self . draw_text ( ax , child , force_trans = ax . transAxes )
elif isinstance ( child , matplotlib . lines . Line2D ) :
self . draw_line ( ax , child , force_trans = ax . transAxes )
elif isinstance ( child , matplotlib . collections . Collection ) :
self . draw_collection ( ax , child , force_pathtrans = ax . transAxes )
else :
warnings . warn ( "Legend element %s not impemented" % child )
except NotImplementedError :
warnings . warn ( "Legend element %s not impemented" % child ) |
def delete_entities ( namespace , workspace , json_body ) :
"""Delete entities in a workspace .
Note : This action is not reversible . Be careful !
Args :
namespace ( str ) : project to which workspace belongs
workspace ( str ) : Workspace name
json _ body :
" entityType " : " string " ,
" entityName " : " string "
Swagger :
https : / / api . firecloud . org / # ! / Entities / deleteEntities""" | uri = "workspaces/{0}/{1}/entities/delete" . format ( namespace , workspace )
return __post ( uri , json = json_body ) |
def ungrab_server ( self , onerror = None ) :
"""Release the server if it was previously grabbed by this client .""" | request . UngrabServer ( display = self . display , onerror = onerror ) |
def region ( self , start = 0 , end = None ) :
'''Returns a region of ` ` Sequence . sequence ` ` , in FASTA format .
If called without kwargs , the entire sequence will be returned .
Args :
start ( int ) : Start position of the region to be returned . Default
is 0.
end ( int ) : End position of the region to be returned . Negative values
will function as they do when slicing strings .
Returns :
str : A region of ` ` Sequence . sequence ` ` , in FASTA format''' | if end is None :
end = len ( self . sequence )
return '>{}\n{}' . format ( self . id , self . sequence [ start : end ] ) |
def close ( self ) :
"""Shut down the socket connection , client and controller""" | self . _sock = None
self . _controller = None
if hasattr ( self , "_port" ) and self . _port :
portpicker . return_port ( self . _port )
self . _port = None |
def passwordLogin ( self , username ) :
"""Generate a new challenge for the given username .""" | self . challenge = secureRandom ( 16 )
self . username = username
return { 'challenge' : self . challenge } |
def display_name ( self ) :
"""Find the most appropriate display name for a user : look for a " display _ name " , then
a " real _ name " , and finally fall back to the always - present " name " .""" | for k in self . _NAME_KEYS :
if self . _raw . get ( k ) :
return self . _raw [ k ]
if "profile" in self . _raw and self . _raw [ "profile" ] . get ( k ) :
return self . _raw [ "profile" ] [ k ]
return self . _raw [ "name" ] |
def build_model_from_txt ( self , fname ) :
"""Construct the model and perform regressions based on data in a txt file .
Parameters
fname : str
The name of the file to load .""" | x_values , y_values = read_column_data_from_txt ( fname )
self . build_model_from_xy ( x_values , y_values ) |
def as_dict ( self ) :
"""Return the dependencies as a dictionary .
Returns :
dict : dictionary of dependencies .""" | return { 'name' : str ( self ) , 'modules' : [ m . as_dict ( ) for m in self . modules ] , 'packages' : [ p . as_dict ( ) for p in self . packages ] } |
def sql ( self ) :
"""If you access this attribute , we will build an SQLite database
out of the FASTA file and you will be able access everything in an
indexed fashion , and use the blaze library via sql . frame""" | from fasta . indexed import DatabaseFASTA , fasta_to_sql
db = DatabaseFASTA ( self . prefix_path + ".db" )
if not db . exists :
fasta_to_sql ( self . path , db . path )
return db |
def update ( self , data , overwrite = None , overwrite_sections = True , overwrite_options = True ) :
"""Updates the currently stored configuration with new * data * , given as a dictionary . When
* overwrite _ sections * is * False * , sections in * data * that are already present in the current
config are skipped . When * overwrite _ options * is * False * , existing options are not
overwritten . When * overwrite * is not * None * , both * overwrite _ sections * and
* overwrite _ options * are set to its value .""" | if overwrite is not None :
overwrite_sections = overwrite
overwrite_options = overwrite
for section , _data in six . iteritems ( data ) :
if not self . has_section ( section ) :
self . add_section ( section )
elif not overwrite_sections :
continue
for option , value in six . iteritems ( _data ) :
if overwrite_options or not self . has_option ( section , option ) :
self . set ( section , option , str ( value ) ) |
def get_field_names ( self ) :
"""Return the field names to get values for""" | col_model = self . request . get ( "colModel" , None )
if not col_model :
return [ "UID" , ]
names = [ ]
col_model = json . loads ( _u ( col_model ) )
if isinstance ( col_model , ( list , tuple ) ) :
names = map ( lambda c : c . get ( "columnName" , "" ) . strip ( ) , col_model )
# UID is used by reference widget to know the object that the user
# selected from the popup list
if "UID" not in names :
names . append ( "UID" )
return filter ( None , names ) |
def _calculate ( self , field ) :
'''If the offset is unknown , return 0''' | base_offset = 0
if self . base_field is not None :
base_offset = self . base_field . offset
target_offset = self . _field . offset
if ( target_offset is None ) or ( base_offset is None ) :
return 0
return target_offset - base_offset |
def run_operation ( jboss_config , operation , fail_on_error = True , retries = 1 ) :
'''Execute an operation against jboss instance through the CLI interface .
jboss _ config
Configuration dictionary with properties specified above .
operation
An operation to execute against jboss instance
fail _ on _ error ( default = True )
Is true , raise CommandExecutionError exception if execution fails .
If false , ' success ' property of the returned dictionary is set to False
retries :
Number of retries in case of " JBAS012144 : Could not connect to remote " error .
CLI Example :
. . code - block : : bash
salt ' * ' jboss7 _ cli . run _ operation ' { " cli _ path " : " integration . modules . sysmod . SysModuleTest . test _ valid _ docs " , " controller " : " 10.11.12.13:9999 " , " cli _ user " : " jbossadm " , " cli _ password " : " jbossadm " } ' my _ operation''' | cli_command_result = __call_cli ( jboss_config , operation , retries )
if cli_command_result [ 'retcode' ] == 0 :
if _is_cli_output ( cli_command_result [ 'stdout' ] ) :
cli_result = _parse ( cli_command_result [ 'stdout' ] )
cli_result [ 'success' ] = cli_result [ 'outcome' ] == 'success'
else :
raise CommandExecutionError ( 'Operation has returned unparseable output: {0}' . format ( cli_command_result [ 'stdout' ] ) )
else :
if _is_cli_output ( cli_command_result [ 'stdout' ] ) :
cli_result = _parse ( cli_command_result [ 'stdout' ] )
cli_result [ 'success' ] = False
match = re . search ( r'^(JBAS\d+):' , cli_result [ 'failure-description' ] )
cli_result [ 'err_code' ] = match . group ( 1 )
cli_result [ 'stdout' ] = cli_command_result [ 'stdout' ]
else :
if fail_on_error :
raise CommandExecutionError ( '''Command execution failed, return code={retcode}, stdout='{stdout}', stderr='{stderr}' ''' . format ( ** cli_command_result ) )
else :
cli_result = { 'success' : False , 'stdout' : cli_command_result [ 'stdout' ] , 'stderr' : cli_command_result [ 'stderr' ] , 'retcode' : cli_command_result [ 'retcode' ] }
return cli_result |
def round_to_specified_time ( self , a_datetime , hour , minute , second , mode = "lower" ) :
"""Round the given datetime to specified hour , minute and second .
: param mode : if ' lower ' , floor to . if ' upper ' , ceiling to .
* * 中文文档 * *
将给定时间对齐到最近的一个指定了小时 , 分钟 , 秒的时间上 。""" | mode = mode . lower ( )
new_datetime = datetime ( a_datetime . year , a_datetime . month , a_datetime . day , hour , minute , second )
if mode == "lower" :
if new_datetime <= a_datetime :
return new_datetime
else :
return timewrapper . add_days ( new_datetime , - 1 )
elif mode == "upper" :
if new_datetime >= a_datetime :
return new_datetime
else :
return timewrapper . add_days ( new_datetime , 1 )
else :
raise ValueError ( "'mode' has to be lower or upper!" ) |
def _find_feature_type ( self , feature_name , eopatch ) :
"""Iterates over allowed feature types of given EOPatch and tries to find a feature type for which there
exists a feature with given name
: return : A feature type or ` None ` if such feature type does not exist
: rtype : FeatureType or None""" | for feature_type in self . allowed_feature_types :
if feature_type . has_dict ( ) and feature_name in eopatch [ feature_type ] :
return feature_type
return None |
def hugoniot_p ( rho , rho0 , c0 , s ) :
"""calculate pressure along a Hugoniot
: param rho : density in g / cm ^ 3
: param rho0 : density at 1 bar in g / cm ^ 3
: param c0 : velocity at 1 bar in km / s
: param s : slope of the velocity change
: return : pressure in GPa""" | eta = 1. - ( rho0 / rho )
Ph = rho0 * c0 * c0 * eta / np . power ( ( 1. - s * eta ) , 2. )
return Ph |
def handle ( self , * controller_args , ** controller_kwargs ) :
"""handles the request and returns the response
This should set any response information directly onto self . response
this method has the same signature as the request handling methods
( eg , GET , POST ) so subclasses can override this method and add decorators
: param * controller _ args : tuple , the path arguments that will be passed to
the request handling method ( eg , GET , POST )
: param * * controller _ kwargs : dict , the query and body params merged together""" | req = self . request
res = self . response
res . set_header ( 'Content-Type' , "{};charset={}" . format ( self . content_type , self . encoding ) )
encoding = req . accept_encoding
res . encoding = encoding if encoding else self . encoding
res_method_name = ""
controller_methods = self . find_methods ( )
# controller _ args , controller _ kwargs = self . find _ method _ params ( )
for controller_method_name , controller_method in controller_methods :
try :
logger . debug ( "Attempting to handle request with {}.{}.{}" . format ( req . controller_info [ 'module_name' ] , req . controller_info [ 'class_name' ] , controller_method_name ) )
res . body = controller_method ( * controller_args , ** controller_kwargs )
res_method_name = controller_method_name
break
except VersionError as e :
logger . debug ( "Request {}.{}.{} failed version check [{} not in {}]" . format ( req . controller_info [ 'module_name' ] , req . controller_info [ 'class_name' ] , controller_method_name , e . request_version , e . versions ) )
except RouteError :
logger . debug ( "Request {}.{}.{} failed routing check" . format ( req . controller_info [ 'module_name' ] , req . controller_info [ 'class_name' ] , controller_method_name ) )
if not res_method_name : # https : / / www . w3 . org / Protocols / rfc2616 / rfc2616 - sec5 . html # sec5.1
# An origin server SHOULD return the status code 405 ( Method Not Allowed )
# if the method is known by the origin server but not allowed for the
# requested resource
raise CallError ( 405 , "Could not find a method to satisfy {}" . format ( req . path ) ) |
def filter ( self , u ) :
"""Filter the valid identities for this matcher .
: param u : unique identity which stores the identities to filter
: returns : a list of identities valid to work with this matcher .
: raises ValueError : when the unique identity is not an instance
of UniqueIdentity class""" | if not isinstance ( u , UniqueIdentity ) :
raise ValueError ( "<u> is not an instance of UniqueIdentity" )
filtered = [ ]
for id_ in u . identities :
email = None
name = None
if self . sources and id_ . source . lower ( ) not in self . sources :
continue
if self . _check_blacklist ( id_ ) :
continue
if self . strict :
if self . _check_pattern ( self . email_pattern , id_ . email ) :
email = id_ . email . lower ( )
if self . _check_pattern ( self . name_pattern , id_ . name ) :
name = id_ . name . lower ( )
else :
email = id_ . email . lower ( ) if id_ . email else None
name = id_ . name . lower ( ) if id_ . name else None
if email or name :
fid = EmailNameIdentity ( id_ . id , id_ . uuid , email , name )
filtered . append ( fid )
return filtered |
def _dequeue_update ( self , change ) :
"""Only update when all changes are done""" | self . _update_count -= 1
if self . _update_count != 0 :
return
self . update_shape ( change ) |
def ignored_double_corner ( intersection , tangent_s , tangent_t , edge_nodes1 , edge_nodes2 ) :
"""Check if an intersection is an " ignored " double corner .
. . note : :
This is a helper used only by : func : ` ignored _ corner ` , which in turn is
only used by : func : ` classify _ intersection ` .
Helper for : func : ` ignored _ corner ` where both ` ` s ` ` and
` ` t ` ` are ` ` 0 ` ` .
Does so by checking if either edge through the ` ` t ` ` corner goes
through the interior of the other surface . An interior check
is done by checking that a few cross products are positive .
Args :
intersection ( . Intersection ) : An intersection to " diagnose " .
tangent _ s ( numpy . ndarray ) : The tangent vector ( ` ` 2 x 1 ` ` array ) to
the first curve at the intersection .
tangent _ t ( numpy . ndarray ) : The tangent vector ( ` ` 2 x 1 ` ` array ) to
the second curve at the intersection .
edge _ nodes1 ( Tuple [ numpy . ndarray , numpy . ndarray , numpy . ndarray ] ) : The
nodes of the three edges of the first surface being intersected .
edge _ nodes2 ( Tuple [ numpy . ndarray , numpy . ndarray , numpy . ndarray ] ) : The
nodes of the three edges of the second surface being intersected .
Returns :
bool : Indicates if the corner is to be ignored .""" | # Compute the other edge for the ` ` s ` ` surface .
prev_index = ( intersection . index_first - 1 ) % 3
prev_edge = edge_nodes1 [ prev_index ]
alt_tangent_s = _curve_helpers . evaluate_hodograph ( 1.0 , prev_edge )
# First check if ` ` tangent _ t ` ` is interior to the ` ` s ` ` surface .
cross_prod1 = _helpers . cross_product ( tangent_s . ravel ( order = "F" ) , tangent_t . ravel ( order = "F" ) )
# A positive cross product indicates that ` ` tangent _ t ` ` is
# interior to ` ` tangent _ s ` ` . Similar for ` ` alt _ tangent _ s ` ` .
# If ` ` tangent _ t ` ` is interior to both , then the surfaces
# do more than just " kiss " at the corner , so the corner should
# not be ignored .
if cross_prod1 >= 0.0 : # Only compute ` ` cross _ prod2 ` ` if we need to .
cross_prod2 = _helpers . cross_product ( alt_tangent_s . ravel ( order = "F" ) , tangent_t . ravel ( order = "F" ) )
if cross_prod2 >= 0.0 :
return False
# If ` ` tangent _ t ` ` is not interior , we check the other ` ` t ` `
# edge that ends at the corner .
prev_index = ( intersection . index_second - 1 ) % 3
prev_edge = edge_nodes2 [ prev_index ]
alt_tangent_t = _curve_helpers . evaluate_hodograph ( 1.0 , prev_edge )
# Change the direction of the " in " tangent so that it points " out " .
alt_tangent_t *= - 1.0
cross_prod3 = _helpers . cross_product ( tangent_s . ravel ( order = "F" ) , alt_tangent_t . ravel ( order = "F" ) )
if cross_prod3 >= 0.0 : # Only compute ` ` cross _ prod4 ` ` if we need to .
cross_prod4 = _helpers . cross_product ( alt_tangent_s . ravel ( order = "F" ) , alt_tangent_t . ravel ( order = "F" ) )
if cross_prod4 >= 0.0 :
return False
# If neither of ` ` tangent _ t ` ` or ` ` alt _ tangent _ t ` ` are interior
# to the ` ` s ` ` surface , one of two things is true . Either
# the two surfaces have no interior intersection ( 1 ) or the
# ` ` s ` ` surface is bounded by both edges of the ` ` t ` ` surface
# at the corner intersection ( 2 ) . To detect ( 2 ) , we only need
# check if ` ` tangent _ s ` ` is interior to both ` ` tangent _ t ` `
# and ` ` alt _ tangent _ t ` ` . ` ` cross _ prod1 ` ` contains
# ( tangent _ s ) x ( tangent _ t ) , so it ' s negative will tell if
# ` ` tangent _ s ` ` is interior . Similarly , ` ` cross _ prod3 ` `
# contains ( tangent _ s ) x ( alt _ tangent _ t ) , but we also reversed
# the sign on ` ` alt _ tangent _ t ` ` so switching the sign back
# and reversing the arguments in the cross product cancel out .
return cross_prod1 > 0.0 or cross_prod3 < 0.0 |
def humanize_hours ( total_hours , frmt = '{hours:02d}:{minutes:02d}:{seconds:02d}' , negative_frmt = None ) :
"""Given time in hours , return a string representing the time .""" | seconds = int ( float ( total_hours ) * 3600 )
return humanize_seconds ( seconds , frmt , negative_frmt ) |
def path ( self , * paths , ** kwargs ) :
"""Create new Path based on self . root and provided paths .
: param paths : List of sub paths
: param kwargs : required = False
: rtype : Path""" | return self . __class__ ( self . __root__ , * paths , ** kwargs ) |
def nz ( value , none_value , strict = True ) :
'''This function is named after an old VBA function . It returns a default
value if the passed in value is None . If strict is False it will
treat an empty string as None as well .
example :
x = None
nz ( x , " hello " )
- - > " hello "
nz ( x , " " )
nz ( y , " hello " )
nz ( y , " hello " , False )
- - > " hello "''' | if not DEBUG :
debug = False
else :
debug = False
if debug :
print ( "START nz frameworkutilities.py ----------------------\n" )
if value is None and strict :
return_val = none_value
elif strict and value is not None :
return_val = value
elif not strict and not is_not_null ( value ) :
return_val = none_value
else :
return_val = value
if debug :
print ( "value: %s | none_value: %s | return_val: %s" % ( value , none_value , return_val ) )
if debug :
print ( "END nz frameworkutilities.py ----------------------\n" )
return return_val |
def plot_gender ( data , options ) :
"""Plots the gender .
: param data : the data to plot .
: param options : the options .
: type data : numpy . recarray
: type options : argparse . Namespace
Plots the summarized intensities of the markers on the Y chromosomes in
function of the markers on the X chromosomes , with problematic samples with
different colors .
Also uses : py : func : ` print _ data _ to _ file ` to save the data , so that it is
faster to rerun the analysis .""" | if data is None : # there is a problem . . .
msg = ( "no data: specify either '--bfile' and '--intensities', or " "'--summarized-intensities'" )
raise ProgramError ( msg )
import matplotlib as mpl
if options . format != "X11" and mpl . get_backend ( ) != "agg" :
mpl . use ( "Agg" )
import matplotlib . pyplot as plt
if options . format != "X11" :
plt . ioff ( )
# The figure and axes
fig = plt . figure ( )
fig . subplots_adjust ( top = 0.84 )
ax = fig . add_subplot ( 111 )
# Changing the spines
ax . xaxis . set_ticks_position ( "bottom" )
ax . yaxis . set_ticks_position ( "left" )
ax . spines [ "top" ] . set_visible ( False )
ax . spines [ "right" ] . set_visible ( False )
# Setting the title
ax . set_xlabel ( options . xlabel )
ax . set_ylabel ( options . ylabel )
# For the legend
plot_object = [ ]
labels = [ ]
# Plotting the OK males
males = np . logical_and ( data [ "gender" ] == "Male" , data [ "status" ] == "OK" )
tmp , = ax . plot ( data [ "chr23" ] [ males ] , data [ "chr24" ] [ males ] , "o" , ms = 5 , mec = "#0099CC" , mfc = "#0099CC" )
plot_object . append ( tmp )
labels . append ( "OK Males (n={})" . format ( sum ( males ) ) )
if options . summarized_intensities is None :
print_data_to_file ( data [ males ] , "{}.ok_males.txt" . format ( options . out ) )
# Plotting the OK females
females = np . logical_and ( data [ "gender" ] == "Female" , data [ "status" ] == "OK" )
tmp , = ax . plot ( data [ "chr23" ] [ females ] , data [ "chr24" ] [ females ] , "o" , ms = 5 , mec = "#CC0000" , mfc = "#CC0000" )
plot_object . append ( tmp )
labels . append ( "OK Females (n={})" . format ( sum ( females ) ) )
if options . summarized_intensities is None :
print_data_to_file ( data [ females ] , "{}.ok_females.txt" . format ( options . out ) )
# Plotting the OK unknowns
unknowns = np . logical_and ( data [ "gender" ] == "Unknown" , data [ "status" ] == "OK" )
tmp , = ax . plot ( data [ "chr23" ] [ unknowns ] , data [ "chr24" ] [ unknowns ] , "o" , ms = 5 , mec = "#555555" , mfc = "#555555" )
plot_object . append ( tmp )
labels . append ( "OK Unknowns (n={})" . format ( sum ( unknowns ) ) )
if options . summarized_intensities is None :
print_data_to_file ( data [ unknowns ] , "{}.ok_unknowns.txt" . format ( options . out ) )
# Plotting the Problem males
males = np . logical_and ( data [ "gender" ] == "Male" , data [ "status" ] == "Problem" )
tmp , = ax . plot ( data [ "chr23" ] [ males ] , data [ "chr24" ] [ males ] , "^" , ms = 6 , mec = "#000000" , mfc = "#669900" )
plot_object . append ( tmp )
labels . append ( "Problematic Males (n={})" . format ( sum ( males ) ) )
if options . summarized_intensities is None :
print_data_to_file ( data [ males ] , "{}.problematic_males.txt" . format ( options . out ) )
# Plotting the Problem females
females = np . logical_and ( data [ "gender" ] == "Female" , data [ "status" ] == "Problem" )
tmp , = ax . plot ( data [ "chr23" ] [ females ] , data [ "chr24" ] [ females ] , "v" , ms = 6 , mec = "#000000" , mfc = "#9933CC" )
plot_object . append ( tmp )
labels . append ( "Problematic Females (n={})" . format ( sum ( females ) ) )
if options . summarized_intensities is None :
print_data_to_file ( data [ females ] , "{}.problematic_females.txt" . format ( options . out ) )
# Plotting the Problem unknowns
unknowns = np . logical_and ( data [ "gender" ] == "Unknown" , data [ "status" ] == "Problem" )
tmp , = ax . plot ( data [ "chr23" ] [ unknowns ] , data [ "chr24" ] [ unknowns ] , ">" , ms = 6 , mec = "#000000" , mfc = "#555555" )
plot_object . append ( tmp )
labels . append ( "Problematic Unknown (n={})" . format ( sum ( unknowns ) ) )
if options . summarized_intensities is None :
print_data_to_file ( data [ unknowns ] , "{}.problematic_unknowns.txt" . format ( options . out ) )
# the legend
prop = mpl . font_manager . FontProperties ( size = 10 )
leg = ax . legend ( plot_object , labels , loc = 8 , numpoints = 1 , fancybox = True , prop = prop , ncol = 2 , bbox_to_anchor = ( 0. , 1.02 , 1. , .102 ) , borderaxespad = 0. )
# Setting the limit
xlim = ax . get_xlim ( )
ax . set_xlim ( ( xlim [ 0 ] - 0.01 , xlim [ 1 ] + 0.01 ) )
ylim = ax . get_ylim ( )
ax . set_ylim ( ( ylim [ 0 ] - 0.01 , ylim [ 1 ] + 0.01 ) )
if options . format == "X11" :
plt . show ( )
else :
file_name = "{}.{}" . format ( options . out , options . format )
try :
plt . savefig ( file_name )
except IOError :
msg = "{}: can't write file" . format ( file_name )
raise ProgramError ( msg ) |
def splay_health ( health_target ) :
"""Set Health Check path , port , and protocol .
Args :
health _ target ( str ) : The health target . ie ` ` HTTP : 80 ` `
Returns :
HealthCheck : A * * collections . namedtuple * * class with * path * , * port * ,
* proto * , and * target * attributes .""" | HealthCheck = collections . namedtuple ( 'HealthCheck' , [ 'path' , 'port' , 'proto' , 'target' ] )
proto , health_port_path = health_target . split ( ':' )
port , * health_path = health_port_path . split ( '/' )
if proto == 'TCP' :
path = ''
elif not health_path :
path = '/healthcheck'
else :
path = '/{0}' . format ( '/' . join ( health_path ) )
target = '{0}:{1}{2}' . format ( proto , port , path )
health = HealthCheck ( path , port , proto , target )
LOG . info ( health )
return health |
def flow_tuple ( data ) :
"""Tuple for flow ( src , dst , sport , dport , proto )""" | src = net_utils . inet_to_str ( data [ 'packet' ] . get ( 'src' ) ) if data [ 'packet' ] . get ( 'src' ) else None
dst = net_utils . inet_to_str ( data [ 'packet' ] . get ( 'dst' ) ) if data [ 'packet' ] . get ( 'dst' ) else None
sport = data [ 'transport' ] . get ( 'sport' ) if data . get ( 'transport' ) else None
dport = data [ 'transport' ] . get ( 'dport' ) if data . get ( 'transport' ) else None
proto = data [ 'transport' ] . get ( 'type' ) if data . get ( 'transport' ) else data [ 'packet' ] [ 'type' ]
return ( src , dst , sport , dport , proto ) |
def folderitem ( self , obj , item , index ) :
"""Service triggered each time an item is iterated in folderitems .
The use of this service prevents the extra - loops in child objects .
: obj : the instance of the class to be foldered
: item : dict containing the properties of the object to be used by
the template
: index : current index of the item""" | DueDate = obj . getDueDate
item [ "getDateReceived" ] = self . ulocalized_time ( obj . getDateReceived )
item [ "getDueDate" ] = self . ulocalized_time ( DueDate )
if DueDate and DueDate < DateTime ( ) :
item [ "after" ] [ "DueDate" ] = get_image ( "late.png" , title = t ( _ ( "Late Analysis" ) ) )
# Add Priority column
priority_sort_key = obj . getPrioritySortkey
if not priority_sort_key : # Default priority is Medium = 3.
# The format of PrioritySortKey is < priority > . < created >
priority_sort_key = "3.%s" % obj . created . ISO8601 ( )
priority = priority_sort_key . split ( "." ) [ 0 ]
priority_text = t ( PRIORITIES . getValue ( priority ) )
html = "<div title='{}' class='priority-ico priority-{}'><div>"
item [ "replace" ] [ "Priority" ] = html . format ( priority_text , priority )
return item |
def _get_authorization_headers ( self ) -> dict :
"""Constructs and returns the Authorization header for the client app .
Args :
None
Returns :
header dict for communicating with the authorization endpoints""" | auth = base64 . encodestring ( ( self . client_id + ':' + self . client_secret ) . encode ( 'latin-1' ) ) . decode ( 'latin-1' )
auth = auth . replace ( '\n' , '' ) . replace ( ' ' , '' )
auth = 'Basic {}' . format ( auth )
headers = { 'Authorization' : auth }
return headers |
def get_json_event_start ( event_buffer ) :
"""get the event start of an event that is different ( in time ) from the
adjoining event , in XML format""" | event_start_pattern = '{"_cd":"'
time_key_pattern = '"_time":"'
time_end_pattern = '"'
event_end_pattern = '"},\n'
event_end_pattern2 = '"}[]'
# old json output format bug
event_start = event_buffer . find ( event_start_pattern )
event_end = event_buffer . find ( event_end_pattern , event_start ) + len ( event_end_pattern )
if event_end < 0 :
event_end = event_buffer . find ( event_end_pattern2 , event_start ) + len ( event_end_pattern2 )
if ( event_end < 0 ) :
return ( - 1 , - 1 , "" )
time_start = event_buffer . find ( time_key_pattern , event_start ) + len ( time_key_pattern )
time_end = event_buffer . find ( time_end_pattern , time_start + 1 )
last_time = event_buffer [ time_start : time_end ]
event_start = event_end
while event_end > 0 :
event_start = event_buffer . find ( event_start_pattern , event_start + 1 )
event_end = event_buffer . find ( event_end_pattern , event_start ) + len ( event_end_pattern )
if event_end < 0 :
event_end = event_buffer . find ( event_end_pattern2 , event_start ) + len ( event_end_pattern2 )
if ( event_end < 0 ) :
return ( - 1 , - 1 , "" )
time_start = event_buffer . find ( time_key_pattern , event_start ) + len ( time_key_pattern )
time_end = event_buffer . find ( time_end_pattern , time_start + 1 )
this_time = event_buffer [ time_start : time_end ]
if this_time != last_time :
return ( event_start - 2 , event_end , last_time )
event_start = event_end
return ( - 1 , - 1 , "" ) |
def subtract_timedelta ( self , delta ) :
"""Remove timedelta duration from the instance .
: param delta : The timedelta instance
: type delta : datetime . timedelta
: rtype : Time""" | if delta . days :
raise TypeError ( "Cannot subtract timedelta with days to Time." )
return self . subtract ( seconds = delta . seconds , microseconds = delta . microseconds ) |
def get_pattern_mat ( oracle , pattern ) :
"""Output a matrix containing patterns in rows from a vmo .
: param oracle : input vmo object
: param pattern : pattern extracted from oracle
: return : a numpy matrix that could be used to visualize the pattern extracted .""" | pattern_mat = np . zeros ( ( len ( pattern ) , oracle . n_states - 1 ) )
for i , p in enumerate ( pattern ) :
length = p [ 1 ]
for s in p [ 0 ] :
pattern_mat [ i ] [ s - length : s - 1 ] = 1
return pattern_mat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.