signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_all_destinations ( self , server_id ) :
"""Return all listener destinations in a WBEM server .
This function contacts the WBEM server and retrieves the listener
destinations by enumerating the instances of CIM class
" CIM _ ListenerDestinationCIMXML " in the Interop namespace of the WBEM
server .
Parameters :
server _ id ( : term : ` string ` ) :
The server ID of the WBEM server , returned by
: meth : ` ~ pywbem . WBEMSubscriptionManager . add _ server ` .
Returns :
: class : ` py : list ` of : class : ` ~ pywbem . CIMInstance ` : The listener
destination instances .
Raises :
Exceptions raised by : class : ` ~ pywbem . WBEMConnection ` ."""
|
# Validate server _ id
server = self . _get_server ( server_id )
return server . conn . EnumerateInstances ( DESTINATION_CLASSNAME , namespace = server . interop_ns )
|
def getModulePath ( project_path , module_name , verbose ) :
'''Searches for module _ name in searchpath and returns the filepath .
If no filepath was found , returns None .'''
|
if not module_name :
return None
sys . path . append ( project_path )
try :
package = pkgutil . get_loader ( module_name )
except ImportError :
if verbose :
print ( "Parent module for " + module_name + " not found." )
return None
except :
if verbose :
print ( module_name + " not loaded for bizarre reasons" )
try :
if package :
if package . get_code ( module_name ) :
filename = package . get_code ( module_name ) . co_filename
return filename
elif package . find_spec ( module_name ) . has_location == False :
return None
# built - in module such as itertools
else :
pass
# perhaps filename is in package . find _ spec ( module _ name ) . origin ?
pass
# a good reference is https : / / www . python . org / dev / peps / pep - 0302/
except ImportError :
if verbose :
print ( "Code object unavailable for " + module_name )
return None
except AttributeError :
if verbose :
print ( module_name + " is an ExtensionFileLoader object" )
return None
except :
if verbose :
print ( module_name + " not loaded for bizarre reasons" )
return None
else :
if verbose :
print ( "Module " + module_name + " not found." )
return None
|
def _get_corenlp_version ( ) :
"Return the corenlp version pointed at by CORENLP _ HOME , or None"
|
corenlp_home = os . environ . get ( "CORENLP_HOME" )
if corenlp_home :
for fn in os . listdir ( corenlp_home ) :
m = re . match ( "stanford-corenlp-([\d.]+)-models.jar" , fn )
if m :
return m . group ( 1 )
|
def _all_resources ( self ) :
"""Return the complete collection of resources as a list of
dictionaries .
: rtype : : class : ` sandman2 . model . Model `"""
|
queryset = self . __model__ . query
args = { k : v for ( k , v ) in request . args . items ( ) if k not in ( 'page' , 'export' ) }
limit = None
if args :
filters = [ ]
order = [ ]
for key , value in args . items ( ) :
if value . startswith ( '%' ) :
filters . append ( getattr ( self . __model__ , key ) . like ( str ( value ) , escape = '/' ) )
elif key == 'sort' :
direction = desc if value . startswith ( '-' ) else asc
order . append ( direction ( getattr ( self . __model__ , value . lstrip ( '-' ) ) ) )
elif key == 'limit' :
limit = int ( value )
elif hasattr ( self . __model__ , key ) :
filters . append ( getattr ( self . __model__ , key ) == value )
else :
raise BadRequestException ( 'Invalid field [{}]' . format ( key ) )
queryset = queryset . filter ( * filters ) . order_by ( * order )
if 'page' in request . args :
resources = queryset . paginate ( page = int ( request . args [ 'page' ] ) , per_page = limit ) . items
else :
queryset = queryset . limit ( limit )
resources = queryset . all ( )
return [ r . to_dict ( ) for r in resources ]
|
def is_cross_origin ( request ) :
"""Compare headers HOST and ORIGIN . Remove protocol prefix from ORIGIN , then
compare . Return true if they are not equal
example HTTP _ HOST : ' 127.0.0.1:5000'
example HTTP _ ORIGIN : ' http : / / 127.0.0.1:5000'"""
|
origin = request . environ . get ( "HTTP_ORIGIN" )
host = request . environ . get ( "HTTP_HOST" )
if origin is None : # origin is sometimes omitted by the browser when origin and host are equal
return False
if origin . startswith ( "http://" ) :
origin = origin . replace ( "http://" , "" )
elif origin . startswith ( "https://" ) :
origin = origin . replace ( "https://" , "" )
return host != origin
|
def reverseCommit ( self ) :
"""Re - insert the previously deleted line ."""
|
if self . markerPos is None :
return
# Remove the specified string from the same position in every line
# in between the mark and the cursor ( inclusive ) .
col = min ( ( self . markerPos [ 1 ] , self . cursorPos [ 1 ] ) )
for line in range ( self . markerPos [ 0 ] , self . cursorPos [ 0 ] + 1 ) :
self . qteWidget . setSelection ( line , col , line , col + len ( self . text ) )
self . baseClass . removeSelectedText ( )
self . qteWidget . setCursorPosition ( * self . cursorPos )
|
def get_ids_from_folder ( path , part_name ) :
"""Return all ids from the given folder , which have a corresponding beamformedSignal file ."""
|
valid_ids = set ( { } )
for xml_file in glob . glob ( os . path . join ( path , '*.xml' ) ) :
idx = os . path . splitext ( os . path . basename ( xml_file ) ) [ 0 ]
if idx not in BAD_FILES [ part_name ] :
valid_ids . add ( idx )
return valid_ids
|
def _compute_mean ( map1 , map2 ) :
"""Make a map that is the mean of two maps"""
|
data = ( map1 . data + map2 . data ) / 2.
return HpxMap ( data , map1 . hpx )
|
def lines_intersect ( pt1_p , pt2_p , pt1_q , pt2_q ) :
'''Return true if two line segments intersect
pt1 _ p , pt2 _ p - endpoints of first line segment
pt1 _ q , pt2 _ q - endpoints of second line segment'''
|
# The idea here is to do the cross - product of the vector from
# point 1 to point 2 of one segment against the cross products from
# both points of the other segment . If any of the cross products are zero ,
# the point is colinear with the line . If the cross products differ in
# sign , then one point is on one side of the line and the other is on
# the other . If that happens for both , then the lines must cross .
for pt1_a , pt2_a , pt1_b , pt2_b in ( ( pt1_p , pt2_p , pt1_q , pt2_q ) , ( pt1_q , pt2_q , pt1_p , pt2_p ) ) :
v_a = pt2_a - pt1_a
cross_a_1b = np . cross ( v_a , pt1_b - pt2_a )
if cross_a_1b == 0 and colinear_intersection_test ( pt1_a , pt2_a , pt1_b ) :
return True
cross_a_2b = np . cross ( v_a , pt2_b - pt2_a )
if cross_a_2b == 0 and colinear_intersection_test ( pt1_a , pt2_a , pt2_b ) :
return True
if ( cross_a_1b < 0 ) == ( cross_a_2b < 0 ) :
return False
return True
|
def _qname ( self , name ) :
"""Convert name to an XML QName
e . g . pdf : Producer - > { http : / / ns . adobe . com / pdf / 1.3 / } Producer"""
|
if isinstance ( name , QName ) :
return name
if not isinstance ( name , str ) :
raise TypeError ( "{} must be str" . format ( name ) )
if name == '' :
return name
if name . startswith ( '{' ) :
return name
prefix , tag = name . split ( ':' , maxsplit = 1 )
uri = self . NS [ prefix ]
return QName ( uri , tag )
|
def remove_user ( self , user_name , role ) :
"""Calls CF ' s remove user with org"""
|
role_uri = self . _get_role_uri ( role = role )
return self . api . delete ( path = role_uri , data = { 'username' : user_name } )
|
def file_w_create_directories ( filepath ) :
"""Recursively create some directories if needed so that the directory where
@ filepath must be written exists , then open it in " w " mode and return the
file object ."""
|
dirname = os . path . dirname ( filepath )
if dirname and dirname != os . path . curdir and not os . path . isdir ( dirname ) :
os . makedirs ( dirname )
return open ( filepath , 'w' )
|
def do_get ( self , line ) :
"""get < peer >
eg . get sw1"""
|
def f ( p , args ) :
print ( p . get ( ) )
self . _request ( line , f )
|
def search_normalize ( self , results ) :
"""Append host id to search results to be able to initialize found
: class : ` Interface ` successfully"""
|
for interface in results :
interface [ u'host_id' ] = self . host . id
# pylint : disable = no - member
return super ( Interface , self ) . search_normalize ( results )
|
def get_report ( self , value ) :
"""Return provided field Python value formatted for use in report filter"""
|
if self . multiselect :
value = value or [ ]
children = [ ]
for child in value :
children . append ( self . cast_to_report ( child ) )
return children
return self . cast_to_report ( value )
|
def union ( self , other ) :
"""OR together version ranges .
Calculates the union of this range with one or more other ranges .
Args :
other : VersionRange object ( or list of ) to OR with .
Returns :
New VersionRange object representing the union ."""
|
if not hasattr ( other , "__iter__" ) :
other = [ other ]
bounds = self . bounds [ : ]
for range in other :
bounds += range . bounds
bounds = self . _union ( bounds )
range = VersionRange ( None )
range . bounds = bounds
return range
|
def installed ( name , nodataset = False , brand_opts = None ) :
'''Ensure zone is installed
name : string
name of the zone
nodataset : boolean
do not create a ZFS file system
brand _ opts : boolean
brand specific options to pass'''
|
ret = { 'name' : name , 'changes' : { } , 'result' : None , 'comment' : '' }
zones = __salt__ [ 'zoneadm.list' ] ( installed = True , configured = True )
if name in zones :
if zones [ name ] [ 'state' ] == 'configured' :
if __opts__ [ 'test' ] :
res_install = { 'status' : True }
else :
res_install = __salt__ [ 'zoneadm.install' ] ( name , nodataset , brand_opts )
ret [ 'result' ] = res_install [ 'status' ]
if ret [ 'result' ] :
ret [ 'changes' ] [ name ] = 'installed'
ret [ 'comment' ] = 'The zone {0} was installed.' . format ( name )
else :
ret [ 'comment' ] = [ ]
ret [ 'comment' ] . append ( 'Failed to install zone {0}!' . format ( name ) )
if 'message' in res_install :
ret [ 'comment' ] . append ( res_install [ 'message' ] )
ret [ 'comment' ] = "\n" . join ( ret [ 'comment' ] )
else :
ret [ 'result' ] = True
ret [ 'comment' ] = 'zone {0} already installed.' . format ( name )
else :
ret [ 'result' ] = False
ret [ 'comment' ] = 'zone {0} is not configured!' . format ( name )
return ret
|
def similarities ( self , rank ) :
"""Returns similarity scores for models with specified rank ."""
|
self . _check_rank ( rank )
return [ result . similarity for result in self . results [ rank ] ]
|
def update ( cls , domain , source , dest_add , dest_del ) :
"""Update a domain mail forward destinations ."""
|
result = None
if dest_add or dest_del :
current_destinations = cls . get_destinations ( domain , source )
fwds = current_destinations [ : ]
if dest_add :
for dest in dest_add :
if dest not in fwds :
fwds . append ( dest )
if dest_del :
for dest in dest_del :
if dest in fwds :
fwds . remove ( dest )
if ( ( len ( current_destinations ) != len ( fwds ) ) or ( current_destinations != fwds ) ) :
cls . echo ( 'Updating mail forward %s@%s' % ( source , domain ) )
options = { 'destinations' : fwds }
result = cls . call ( 'domain.forward.update' , domain , source , options )
return result
|
def to_dict ( self ) :
"""Get a dictionary representation of this item , formatted for Elasticsearch"""
|
out = { }
fields = self . __class__ . search_objects . mapping . properties . properties
for key in fields : # TODO : What if we ' ve mapped the property to a different name ? Will we allow that ?
attribute = getattr ( self , key )
field = fields [ key ]
# I believe this should take the highest priority .
if hasattr ( field , "to_es" ) :
out [ key ] = field . to_es ( attribute )
# First we check it this is a manager , in which case we have many related objects
elif isinstance ( attribute , models . Manager ) :
if issubclass ( attribute . model , Indexable ) : # TODO : We want this to have some awareness of the relevant field .
out [ key ] = [ obj . to_dict ( ) for obj in attribute . all ( ) ]
else :
out [ key ] = list ( attribute . values_list ( "pk" , flat = True ) )
elif callable ( attribute ) :
out [ key ] = attribute ( )
elif isinstance ( attribute , Indexable ) :
out [ key ] = attribute . to_dict ( )
else :
out [ key ] = attribute
if out [ key ] is None :
del out [ key ]
return out
|
def get_predicate_indices ( tags : List [ str ] ) -> List [ int ] :
"""Return the word indices of a predicate in BIO tags ."""
|
return [ ind for ind , tag in enumerate ( tags ) if 'V' in tag ]
|
def register_halfmaxes ( self , telescope , band , lower , upper ) :
"""Register precomputed half - max points ."""
|
if ( telescope , band ) in self . _halfmaxes :
raise AlreadyDefinedError ( 'half-max points for %s/%s already ' 'defined' , telescope , band )
self . _note ( telescope , band )
self . _halfmaxes [ telescope , band ] = ( lower , upper )
return self
|
def _handle_hidden_tables ( self , tbl_list , attr_name ) :
"""Return list of tables , potentially removing hidden elements
Parameters
tbl _ list : list of node - like
Type of list elements will vary depending upon parser used
attr _ name : str
Name of the accessor for retrieving HTML attributes
Returns
list of node - like
Return type matches ` tbl _ list `"""
|
if not self . displayed_only :
return tbl_list
return [ x for x in tbl_list if "display:none" not in getattr ( x , attr_name ) . get ( 'style' , '' ) . replace ( " " , "" ) ]
|
def copy ( self , source_table , dest_table , create_disposition = CreateDisposition . CREATE_IF_NEEDED , write_disposition = WriteDisposition . WRITE_TRUNCATE ) :
"""Copies ( or appends ) a table to another table .
: param source _ table :
: type source _ table : BQTable
: param dest _ table :
: type dest _ table : BQTable
: param create _ disposition : whether to create the table if needed
: type create _ disposition : CreateDisposition
: param write _ disposition : whether to append / truncate / fail if the table exists
: type write _ disposition : WriteDisposition"""
|
job = { "configuration" : { "copy" : { "sourceTable" : { "projectId" : source_table . project_id , "datasetId" : source_table . dataset_id , "tableId" : source_table . table_id , } , "destinationTable" : { "projectId" : dest_table . project_id , "datasetId" : dest_table . dataset_id , "tableId" : dest_table . table_id , } , "createDisposition" : create_disposition , "writeDisposition" : write_disposition , } } }
self . run_job ( dest_table . project_id , job , dataset = dest_table . dataset )
|
def dump_results ( self , success ) :
"""Dump simulation results to ` ` dat ` ` and ` ` lst ` ` files
Returns
None"""
|
system = self . system
t , _ = elapsed ( )
if success and ( not system . files . no_output ) : # system . varout . dump ( )
system . varout . dump_np_vars ( )
_ , s = elapsed ( t )
logger . info ( 'Simulation data dumped in {:s}.' . format ( s ) )
|
def add_permute ( self , name , dim , input_name , output_name ) :
"""Add a permute layer . Assumes that the input has dimensions in the order [ Seq , C , H , W ]
Parameters
name : str
The name of this layer .
dim : tuple
The order in which to permute the input dimensions = [ seq , C , H , W ] .
Must have length 4 and a permutation of ` ` [ 0 , 1 , 2 , 3 ] ` ` .
examples :
Lets say input has shape : [ seq , C , H , W ] .
If ` ` dim ` ` is set to ` ` [ 0 , 3 , 1 , 2 ] ` ` ,
then the output has shape ` ` [ W , C , H ] ` `
and has the same sequence length that of the input .
If ` ` dim ` ` is set to ` ` [ 3 , 1 , 2 , 0 ] ` ` ,
and the input is a sequence of data
with length ` ` Seq ` ` and shape ` ` [ C , 1 , 1 ] ` ` ,
then the output is a unit sequence of data with shape ` ` [ C , 1 , Seq ] ` ` .
If ` ` dim ` ` is set to ` ` [ 0 , 3 , 2 , 1 ] ` ` ,
the output is a reverse of the input : ` ` [ C , H , W ] - > [ W , H , C ] ` ` .
If ` ` dim ` ` is not set , or is set to ` ` [ 0 , 1 , 2 , 3 ] ` ` ,
the output is the same as the input .
input _ name : str
The input blob name of this layer .
output _ name : str
The output blob name of this layer .
See Also
add _ flatten , add _ reshape"""
|
spec = self . spec
nn_spec = self . nn_spec
# Add a new layer
spec_layer = nn_spec . layers . add ( )
spec_layer . name = name
spec_layer . input . append ( input_name )
spec_layer . output . append ( output_name )
spec_layer_params = spec_layer . permute
spec_layer_params . axis . extend ( list ( dim ) )
if len ( dim ) != 4 :
raise ValueError ( "Length of the 'dim' parameter must be equal to 4" )
|
def split_url ( url ) :
"""Split the given URL ` ` base # anchor ` ` into ` ` ( base , anchor ) ` ` ,
or ` ` ( base , None ) ` ` if no anchor is present .
In case there are two or more ` ` # ` ` characters ,
return only the first two tokens : ` ` a # b # c = > ( a , b ) ` ` .
: param string url : the url
: rtype : list of str"""
|
if url is None :
return ( None , None )
array = url . split ( "#" )
if len ( array ) == 1 :
array . append ( None )
return tuple ( array [ 0 : 2 ] )
|
def with_result_cache ( func ) :
"""Decorator specifically for is _ active . If self . result _ cache is set to a { }
the is _ active results will be cached for each set of params ."""
|
def inner ( self , * args , ** kwargs ) :
dic = self . result_cache
cache_key = None
if dic is not None :
cache_key = ( args , tuple ( kwargs . items ( ) ) )
try :
result = dic . get ( cache_key )
except TypeError as e : # not hashable
log . debug ( 'Switchboard result cache not active for this "%s" check due to: %s within args: %s' , args [ 0 ] , e , repr ( cache_key ) [ : 200 ] )
cache_key = None
else :
if result is not None :
return result
result = func ( self , * args , ** kwargs )
if cache_key is not None :
dic [ cache_key ] = result
return result
return inner
|
def _OpenFilesForRead ( self , metadata_value_pairs , token ) :
"""Open files all at once if necessary ."""
|
aff4_paths = [ result . AFF4Path ( metadata . client_urn ) for metadata , result in metadata_value_pairs ]
fds = aff4 . FACTORY . MultiOpen ( aff4_paths , mode = "r" , token = token )
fds_dict = dict ( [ ( fd . urn , fd ) for fd in fds ] )
return fds_dict
|
def __stopOpenThread ( self ) :
"""stop OpenThread stack
Returns :
True : successful to stop OpenThread stack and thread interface down
False : fail to stop OpenThread stack"""
|
print 'call stopOpenThread'
try :
if self . __sendCommand ( 'thread stop' ) [ 0 ] == 'Done' :
return self . __sendCommand ( 'ifconfig down' ) [ 0 ] == 'Done'
else :
return False
except Exception , e :
ModuleHelper . WriteIntoDebugLogger ( "stopOpenThread() Error: " + str ( e ) )
|
def fix_360_to_0 ( self ) :
'''Sometimes we have to create an arc using from _ angle and to _ angle computed numerically .
If from _ angle = = to _ angle , it may sometimes happen that a tiny discrepancy will make from _ angle > to _ angle , and instead of
getting a 0 - length arc we end up with a 360 - degree arc .
Sometimes we know for sure that a 360 - degree arc is not what we want , and in those cases
the problem is easy to fix . This helper method does that . It checks whether from _ angle and to _ angle are numerically similar ,
and if so makes them equal .
> > > a = Arc ( ( 0 , 0 ) , 1 , 0 , - tol / 2 , True )
Arc ( [ 0.000 , 0.000 ] , 1.000 , 0.000 , - 0.000 , True , degrees = 360.000)
> > > a . fix _ 360 _ to _ 0 ( )
Arc ( [ 0.000 , 0.000 ] , 1.000 , - 0.000 , - 0.000 , True , degrees = 0.000)'''
|
if abs ( self . from_angle - self . to_angle ) < tol :
self . from_angle = self . to_angle
|
def _parse_caption ( self , node , state ) :
"""Parse a Caption of the node .
: param node : The lxml node to parse
: param state : The global state necessary to place the node in context
of the document as a whole ."""
|
if node . tag not in [ "caption" , "figcaption" ] : # captions used in Tables
return state
# Add a Caption
parent = state [ "parent" ] [ node ]
stable_id = ( f"{state['document'].name}" f"::" f"{'caption'}" f":" f"{state['caption']['idx']}" )
# Set name for Section
name = node . attrib [ "name" ] if "name" in node . attrib else None
if isinstance ( parent , Table ) :
state [ "context" ] [ node ] = Caption ( document = state [ "document" ] , table = parent , figure = None , stable_id = stable_id , name = name , position = state [ "caption" ] [ "idx" ] , )
elif isinstance ( parent , Figure ) :
state [ "context" ] [ node ] = Caption ( document = state [ "document" ] , table = None , figure = parent , stable_id = stable_id , name = name , position = state [ "caption" ] [ "idx" ] , )
else :
raise NotImplementedError ( "Caption must be a child of Table or Figure." )
state [ "caption" ] [ "idx" ] += 1
return state
|
def event_doctree_resolved ( app , doctree , _ ) :
"""Called by Sphinx after phase 3 ( resolving ) .
* Replace Imgur text nodes with data from the Sphinx cache .
* Call finalizer for ImgurImageNode nodes .
: param sphinx . application . Sphinx app : Sphinx application object .
: param docutils . nodes . document doctree : Tree of docutils nodes .
: param _ : Not used ."""
|
album_cache = app . builder . env . imgur_album_cache
image_cache = app . builder . env . imgur_image_cache
for node in doctree . traverse ( ImgurTextNode ) :
cache = album_cache if node . album else image_cache
if node . name == 'imgur-description' :
text = cache [ node . imgur_id ] . description
else :
text = cache [ node . imgur_id ] . title
node . replace_self ( [ docutils . nodes . Text ( text ) ] )
for node in doctree . traverse ( ImgurImageNode ) :
if node . album and not album_cache [ node . imgur_id ] . cover_id :
app . warn ( 'Album cover Imgur ID for {} not available in local cache.' . format ( node . imgur_id ) )
node . replace_self ( [ docutils . nodes . Text ( '' ) ] )
else :
node . finalize ( album_cache , image_cache , lambda m : app . builder . env . warn_node ( m , node ) )
|
def insert ( self , parent , position , row = None ) :
"""insert ( parent , position , row = None )
: param parent :
A valid : obj : ` Gtk . TreeIter ` , or : obj : ` None `
: type parent : : obj : ` Gtk . TreeIter ` or : obj : ` None `
: param position :
position to insert the new row , or - 1 for last
: type position : : obj : ` int `
: param row : a list of values to apply to the newly inserted row or : obj : ` None `
: type row : [ : obj : ` object ` ] or : obj : ` None `
: returns : a : obj : ` Gtk . TreeIter ` pointing to the new row
: rtype : : obj : ` Gtk . TreeIter `
Creates a new row at ` position ` . If parent is not : obj : ` None ` , then
the row will be made a child of ` parent ` . Otherwise , the row will be
created at the toplevel . If ` position ` is - 1 or is larger than the
number of rows at that level , then the new row will be inserted to the
end of the list .
The returned iterator will point to the newly inserted row . The row
will be empty after this function is called if ` row ` is : obj : ` None ` .
To fill in values , you need to call : obj : ` Gtk . TreeStore . set ` \\ ( ) or
: obj : ` Gtk . TreeStore . set _ value ` \\ ( ) .
If ` row ` isn ' t : obj : ` None ` it has to be a list of values which will be
used to fill the row ."""
|
return self . _do_insert ( parent , position , row )
|
def next_time_open ( location ) :
"""Returns the next possible opening hours object , or ( False , None )
if location is currently open or there is no such object
I . e . when is the company open for the next time ?"""
|
if not is_open ( location ) :
now = get_now ( )
now_time = datetime . time ( now . hour , now . minute , now . second )
found_opening_hours = False
for i in range ( 8 ) :
l_weekday = ( now . isoweekday ( ) + i ) % 7
ohs = OpeningHours . objects . filter ( company = location , weekday = l_weekday ) . order_by ( 'weekday' , 'from_hour' )
if ohs . count ( ) :
for oh in ohs :
future_now = now + datetime . timedelta ( days = i )
# same day issue
tmp_now = datetime . datetime ( future_now . year , future_now . month , future_now . day , oh . from_hour . hour , oh . from_hour . minute , oh . from_hour . second )
if tmp_now < now :
tmp_now = now
# be sure to set the bound correctly . . .
if is_open ( location , now = tmp_now ) :
found_opening_hours = oh
break
if found_opening_hours is not False :
return found_opening_hours , tmp_now
return False , None
|
def type_complexity ( type_ ) :
"""Computes an indicator for the complexity of ` type _ ` .
If the return value is 0 , the supplied type is not parameterizable .
Otherwise , set bits in the return value denote the following features :
- bit 0 : The type could be parameterized but is not .
- bit 1 : The type represents an iterable container with 1 constrained type parameter .
- bit 2 : The type represents a mapping with a constrained value type ( 2 parameters ) .
- bit 3 : The type represents an n - tuple ( n parameters ) .
Since these features are mutually exclusive , only a ` Union ` can have more than one bit set ."""
|
if ( not typing or not isinstance ( type_ , ( typing . TypingMeta , GenericWrapperMeta ) ) or type_ is AnyType ) :
return 0
if issubclass ( type_ , typing . Union ) :
return reduce ( operator . or_ , map ( type_complexity , type_ . __union_params__ ) )
if issubclass ( type_ , typing . Tuple ) :
if type_ . __tuple_params__ is None :
return 1
elif type_ . __tuple_use_ellipsis__ :
return 2
else :
return 8
if isinstance ( type_ , GenericWrapperMeta ) :
type_count = 0
for p in reversed ( type_ . parameters ) :
if type_count > 0 :
type_count += 1
if p is AnyType :
continue
if not isinstance ( p , typing . TypeVar ) or p . __constraints__ or p . __bound__ :
type_count += 1
return 1 << min ( type_count , 2 )
return 0
|
def get_exclusions ( path ) :
"""Generates exclusion patterns from a ` ` . dockerignore ` ` file located in the given path . Returns ` ` None ` ` if the
file does not exist .
: param path : Path to look up the ` ` . dockerignore ` ` in .
: type path : unicode | str
: return : List of patterns , that can be passed into : func : ` get _ filter _ func ` .
: rtype : list [ ( _ _ RegEx , bool ) ]"""
|
if not os . path . isdir ( path ) :
return None
dockerignore_file = os . path . join ( path , '.dockerignore' )
if not os . path . isfile ( dockerignore_file ) :
return None
with open ( dockerignore_file , 'rb' ) as dif :
return list ( preprocess_matches ( dif . readlines ( ) ) )
|
def allinstances ( cls ) :
"""Return all instances that inherit from JB _ Gui
: returns : all instances that inherit from JB _ Gui
: rtype : list
: raises : None"""
|
JB_Gui . _allinstances = weakref . WeakSet ( [ i for i in cls . _allinstances if shiboken . isValid ( i ) ] )
return list ( cls . _allinstances )
|
def speed_heading ( msg ) :
"""Get speed and ground track ( or heading ) from the velocity message
( handles both airborne or surface message )
Args :
msg ( string ) : 28 bytes hexadecimal message string
Returns :
( int , float ) : speed ( kt ) , ground track or heading ( degree )"""
|
spd , trk_or_hdg , rocd , tag = velocity ( msg )
return spd , trk_or_hdg
|
def __gen_struct_anno_files ( self , top_level_layer ) :
"""A struct annotation file contains node ( struct ) attributes ( of
non - token nodes ) . It is e . g . used to annotate the type of a syntactic
category ( NP , VP etc . ) .
See also : _ _ gen _ hierarchy _ file ( )"""
|
paula_id = '{0}.{1}.{2}_{3}_struct' . format ( top_level_layer , self . corpus_name , self . name , top_level_layer )
E , tree = gen_paula_etree ( paula_id )
base_paula_id = self . paulamap [ 'hierarchy' ] [ top_level_layer ]
mflist = E ( 'multiFeatList' , { XMLBASE : base_paula_id + '.xml' } )
for node_id in select_nodes_by_layer ( self . dg , top_level_layer ) :
if not istoken ( self . dg , node_id ) :
mfeat = E ( 'multiFeat' , { XLINKHREF : '#{0}' . format ( node_id ) } )
node_dict = self . dg . node [ node_id ]
for attr in node_dict :
if attr not in IGNORED_NODE_ATTRIBS :
mfeat . append ( E ( 'feat' , { 'name' : attr , 'value' : node_dict [ attr ] } ) )
if self . human_readable : # adds node label as a < ! - - comment - - >
mfeat . append ( Comment ( node_dict . get ( 'label' ) ) )
mflist . append ( mfeat )
tree . append ( mflist )
self . files [ paula_id ] = tree
self . file2dtd [ paula_id ] = PaulaDTDs . multifeat
return paula_id
|
def stop ( self ) :
"""Tell the SocketIO thread to terminate ."""
|
if self . _thread :
_LOGGER . info ( "Stopping SocketIO thread..." )
# pylint : disable = W0212
self . _running = False
if self . _exit_event :
self . _exit_event . set ( )
self . _thread . join ( )
|
def format_status_json ( self ) :
"""Convert a Status object to json format ."""
|
status_json = { }
status_json [ 'code' ] = self . code
status_json [ 'message' ] = self . message
if self . details is not None :
status_json [ 'details' ] = self . details
return status_json
|
def _deprecated_kwargs ( kwargs , arg_newarg ) :
"""arg _ newarg is a list of tuples , where each tuple has a pair of strings .
( ' old _ arg ' , ' new _ arg ' )
A DeprecationWarning is raised for the arguments that need to be
replaced ."""
|
warn_for = [ ]
for ( arg , new_kw ) in arg_newarg :
if arg in kwargs . keys ( ) :
val = kwargs . pop ( arg )
kwargs [ new_kw ] = val
warn_for . append ( ( arg , new_kw ) )
if len ( warn_for ) > 0 :
if len ( warn_for ) == 1 :
warnings . warn ( "Argument '{}' is deprecated. Use {} instead" . format ( warn_for [ 0 ] [ 0 ] , warn_for [ 0 ] [ 1 ] ) , DeprecationWarning , stacklevel = 4 )
else :
args = ", " . join ( [ x [ 0 ] for x in warn_for ] )
repl = ", " . join ( [ x [ 1 ] for x in warn_for ] )
warnings . warn ( "Arguments '{}' are deprecated. Use '{}' instead respectively" . format ( args , repl ) , DeprecationWarning , stacklevel = 4 )
return kwargs
|
def _parse_distro_release_content ( line ) :
"""Parse a line from a distro release file .
Parameters :
* line : Line from the distro release file . Must be a unicode string
or a UTF - 8 encoded byte string .
Returns :
A dictionary containing all information items ."""
|
if isinstance ( line , bytes ) :
line = line . decode ( 'utf-8' )
matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN . match ( line . strip ( ) [ : : - 1 ] )
distro_info = { }
if matches : # regexp ensures non - None
distro_info [ 'name' ] = matches . group ( 3 ) [ : : - 1 ]
if matches . group ( 2 ) :
distro_info [ 'version_id' ] = matches . group ( 2 ) [ : : - 1 ]
if matches . group ( 1 ) :
distro_info [ 'codename' ] = matches . group ( 1 ) [ : : - 1 ]
elif line :
distro_info [ 'name' ] = line . strip ( )
return distro_info
|
def add ( self , event , message = None ) :
"""[ Decorator ] Add handler method .
: param event : Specify a kind of Event which you want to handle
: type event : T < = : py : class : ` linebot . models . events . Event ` class
: param message : ( optional ) If event is MessageEvent ,
specify kind of Messages which you want to handle
: type : message : T < = : py : class : ` linebot . models . messages . Message ` class
: rtype : func
: return : decorator"""
|
def decorator ( func ) :
if isinstance ( message , ( list , tuple ) ) :
for it in message :
self . __add_handler ( func , event , message = it )
else :
self . __add_handler ( func , event , message = message )
return func
return decorator
|
def size ( self , time ) :
"""Gets the size of the object at a given time .
Args :
time : Time value being queried .
Returns :
size of the object in pixels"""
|
if self . start_time <= time <= self . end_time :
return self . masks [ time - self . start_time ] . sum ( )
else :
return 0
|
def append_value_continuation ( self , linenum , indent , continuation ) :
""": param linenum : The line number of the frame .
: type linenum : int
: param indent : The indentation level of the frame .
: type indent : int
: param continuation :
: type continuation : str"""
|
frame = self . current_frame ( )
assert isinstance ( frame , FieldFrame ) or isinstance ( frame , ValueContinuationFrame )
if isinstance ( frame , FieldFrame ) :
assert frame . indent < indent and frame . container . contains ( ROOT_PATH , frame . field_name )
if isinstance ( frame , ValueContinuationFrame ) :
assert frame . indent == indent and frame . container . contains ( ROOT_PATH , frame . field_name )
self . pop_frame ( )
field_value = frame . field_value + '\n' + continuation
frame . container . put_field ( ROOT_PATH , frame . field_name , field_value )
frame = ValueContinuationFrame ( linenum , indent , frame . path , frame . container , frame . field_name , field_value )
self . push_frame ( frame )
|
def transform ( self , data ) :
""": param data :
: type data : dict
: return :
: rtype :"""
|
out = [ ]
keys = sorted ( data . keys ( ) )
for k in keys :
out . append ( "%s=%s" % ( k , data [ k ] ) )
return "\n" . join ( out )
|
def get_objs_from_record ( self , record , key ) :
"""Returns a mapping of UID - > object"""
|
uids = self . get_uids_from_record ( record , key )
objs = map ( self . get_object_by_uid , uids )
return dict ( zip ( uids , objs ) )
|
def _find_contpix_given_cuts ( f_cut , sig_cut , wl , fluxes , ivars ) :
"""Find and return continuum pixels given the flux and sigma cut
Parameters
f _ cut : float
the upper limit imposed on the quantity ( fbar - 1)
sig _ cut : float
the upper limit imposed on the quantity ( f _ sig )
wl : numpy ndarray of length npixels
rest - frame wavelength vector
fluxes : numpy ndarray of shape ( nstars , npixels )
pixel intensities
ivars : numpy ndarray of shape nstars , npixels
inverse variances , parallel to fluxes
Returns
contmask : boolean mask of length npixels
True indicates that the pixel is continuum"""
|
f_bar = np . median ( fluxes , axis = 0 )
sigma_f = np . var ( fluxes , axis = 0 )
bad = np . logical_and ( f_bar == 0 , sigma_f == 0 )
cont1 = np . abs ( f_bar - 1 ) <= f_cut
cont2 = sigma_f <= sig_cut
contmask = np . logical_and ( cont1 , cont2 )
contmask [ bad ] = False
return contmask
|
def access_specifier ( self ) :
"""Retrieves the access specifier ( if any ) of the entity pointed at by the
cursor ."""
|
if not hasattr ( self , '_access_specifier' ) :
self . _access_specifier = conf . lib . clang_getCXXAccessSpecifier ( self )
return AccessSpecifier . from_id ( self . _access_specifier )
|
def build_increments ( self ) :
"""experimental method to calculate parameter increments for use
in the finite difference pertubation calculations
Note
user beware !"""
|
self . enforce_bounds ( )
self . add_transform_columns ( )
par_groups = self . parameter_data . groupby ( "pargp" ) . groups
inctype = self . parameter_groups . groupby ( "inctyp" ) . groups
for itype , inc_groups in inctype . items ( ) :
pnames = [ ]
for group in inc_groups :
pnames . extend ( par_groups [ group ] )
derinc = self . parameter_groups . loc [ group , "derinc" ]
self . parameter_data . loc [ par_groups [ group ] , "derinc" ] = derinc
if itype == "absolute" :
self . parameter_data . loc [ pnames , "increment" ] = self . parameter_data . loc [ pnames , "derinc" ]
elif itype == "relative" :
self . parameter_data . loc [ pnames , "increment" ] = self . parameter_data . loc [ pnames , "derinc" ] * self . parameter_data . loc [ pnames , "parval1" ]
elif itype == "rel_to_max" :
mx = self . parameter_data . loc [ pnames , "parval1" ] . max ( )
self . parameter_data . loc [ pnames , "increment" ] = self . parameter_data . loc [ pnames , "derinc" ] * mx
else :
raise Exception ( 'Pst.get_derivative_increments(): ' + 'unrecognized increment type:{0}' . format ( itype ) )
# account for fixed pars
isfixed = self . parameter_data . partrans == "fixed"
self . parameter_data . loc [ isfixed , "increment" ] = self . parameter_data . loc [ isfixed , "parval1" ]
|
async def create ( self , ** kwargs ) :
'''Corresponds to POST request without a resource identifier , inserting a document into the database'''
|
try : # create model
obj = self . _meta . object_class ( )
# deserialize data from request body
self . data . update ( kwargs )
await obj . deserialize ( self . data )
# create document in DB
await obj . insert ( db = self . db )
# serialize object for response
return await obj . serialize ( )
except Exception as ex :
logger . exception ( ex )
raise BadRequest ( ex )
|
def request ( func = None , timeout = 600 ) :
"""use to request an api call from a specific endpoint"""
|
if func is None :
return partial ( request , timeout = timeout )
@ wraps ( func )
def wrapper ( self , * args , ** kwargs ) :
params = func ( self , * args , ** kwargs )
self = params . pop ( 'self' , None )
entity = params . pop ( 'entity' , None )
app_name = params . pop ( 'app_name' , None )
request_id = unique_hex ( )
params [ 'request_id' ] = request_id
future = self . _send_request ( app_name , endpoint = func . __name__ , entity = entity , params = params , timeout = timeout )
return future
wrapper . is_request = True
return wrapper
|
def get_language_settings ( language_code , site_id = None ) :
"""Return the language settings for the current site"""
|
# This method mainly exists for ease - of - use .
# the body is part of the settings , to allow third party packages
# to have their own variation of the settings with this method functionality included .
from parler import appsettings
return appsettings . PARLER_LANGUAGES . get_language ( language_code , site_id )
|
def _finalize_step ( self ) :
"""Finalize simulation step after all agents have acted for the current
step ."""
|
t = time . time ( )
if self . _callback is not None :
self . _callback ( self . age )
t2 = time . time ( )
self . _step_processing_time += t2 - t
self . _log ( logging . INFO , "Step {} run in: {:.3f}s ({:.3f}s of " "actual processing time used)" . format ( self . age , self . _step_processing_time , t2 - self . _step_start_time ) )
self . _processing_time += self . _step_processing_time
|
def filter_devices ( ads , func ) :
"""Finds the AndroidDevice instances from a list that match certain
conditions .
Args :
ads : A list of AndroidDevice instances .
func : A function that takes an AndroidDevice object and returns True
if the device satisfies the filter condition .
Returns :
A list of AndroidDevice instances that satisfy the filter condition ."""
|
results = [ ]
for ad in ads :
if func ( ad ) :
results . append ( ad )
return results
|
def oauth_session ( request , state = None , token = None ) :
"""Constructs the OAuth2 session object ."""
|
if settings . DISCORD_REDIRECT_URI is not None :
redirect_uri = settings . DISCORD_REDIRECT_URI
else :
redirect_uri = request . build_absolute_uri ( reverse ( 'discord_bind_callback' ) )
scope = ( [ 'email' , 'guilds.join' ] if settings . DISCORD_EMAIL_SCOPE else [ 'identity' , 'guilds.join' ] )
return OAuth2Session ( settings . DISCORD_CLIENT_ID , redirect_uri = redirect_uri , scope = scope , token = token , state = state )
|
def parse_condition ( self , schema , name , v ) :
"""Parse name = ' value ' to condition
: param name : column name
: param schema : schema name
: param v : column value
: return :"""
|
S = schema
col = S . get_column ( name )
condition = None
if col is not None : # can create condition
if isinstance ( v , ( str , unicode ) ) :
if v . startswith ( '>=' ) :
condition = ( col >= eval ( v [ 2 : ] . strip ( ) ) )
elif v . startswith ( '>' ) :
condition = ( col > eval ( v [ 1 : ] . strip ( ) ) )
elif v . startswith ( '<=' ) :
condition = ( col <= eval ( v [ 2 : ] . strip ( ) ) )
elif v . startswith ( '<' ) :
condition = ( col < eval ( v [ 1 : ] . strip ( ) ) )
elif v . startswith ( '=' ) :
condition = ( col == eval ( v [ 1 : ] . strip ( ) ) )
elif v . startswith ( '!=' ) :
condition = ( col != eval ( v [ 2 : ] . strip ( ) ) )
elif v . startswith ( 'like' ) :
condition = col . like ( v [ 4 : ] . strip ( ) )
elif v . startswith ( 'between' ) :
_v = eval ( v [ 7 : ] . strip ( ) )
if not isinstance ( _v , ( tuple , list ) ) :
raise ValueError ( "Between operation should be a list, but {!r} found" . format ( v ) )
condition = ( col . between ( * _v ) )
elif v . startswith ( 'in' ) :
condition = ( col . in_ ( eval ( v [ 2 : ] . strip ( ) ) ) )
else :
if '%' in v : # like
condition = col . like ( v )
else :
condition = ( col == v )
elif isinstance ( v , ( tuple , list ) ) :
condition = ( col . in_ ( v ) )
else :
condition = ( col == v )
return condition
|
def parse_args_and_run ( ) :
""": return : The parsed arguments"""
|
parser = argparse . ArgumentParser ( )
subparsers = parser . add_subparsers ( help = 'Docker-tag-naming sub-commands' , dest = 'subparser_name' )
# Forge
parser_forge = subparsers . add_parser ( 'forge' , help = 'Create a new version tag' )
parser_forge . add_argument ( '--version' , type = int , default = 1 , help = 'Version number' )
parser_forge . add_argument ( '--commit-id' , required = True , help = 'Git commit id' )
parser_forge . add_argument ( '--branch' , required = True , help = 'The branch name (ie. master)' )
# Latest
parser_latest = subparsers . add_parser ( 'latest' , help = 'Query the latest tag in the' ' registry' )
parser_latest . add_argument ( 'image' , help = 'The image to query (ie. username/image)' )
parser_latest . add_argument ( 'branch' , help = 'The branch name (ie. master)' )
# Bump
parser_bump = subparsers . add_parser ( 'bump' , help = 'Query the latest tag in the' ' registry and return a +1' )
parser_bump . add_argument ( 'image' , help = 'The image to bump (ie. username/image)' )
parser_bump . add_argument ( 'branch' , help = 'The branch name (ie. master)' )
parser_bump . add_argument ( '--commit-id' , required = True , help = 'Git commit id for the newly created tag' )
# Refresh
parser_latest = subparsers . add_parser ( 'refresh' , help = 'Loop until the latest tag in' ' the registry changes' )
parser_latest . add_argument ( 'image' , help = 'The image to query (ie. username/image)' )
parser_latest . add_argument ( 'branch' , help = 'The branch name (ie. master)' )
args = parser . parse_args ( )
{ 'bump' : run_bump , 'latest' : run_latest , 'forge' : run_forge , 'refresh' : run_refresh } . get ( args . subparser_name ) ( args )
|
async def create_source_event_stream ( schema : GraphQLSchema , document : DocumentNode , root_value : Any = None , context_value : Any = None , variable_values : Dict [ str , Any ] = None , operation_name : str = None , field_resolver : GraphQLFieldResolver = None , ) -> Union [ AsyncIterable [ Any ] , ExecutionResult ] :
"""Create source even stream
Implements the " CreateSourceEventStream " algorithm described in the GraphQL
specification , resolving the subscription source event stream .
Returns a coroutine that yields an AsyncIterable .
If the client provided invalid arguments , the source stream could not be created ,
or the resolver did not return an AsyncIterable , this function will throw an error ,
which should be caught and handled by the caller .
A Source Event Stream represents a sequence of events , each of which triggers a
GraphQL execution for that event .
This may be useful when hosting the stateful subscription service in a different
process or machine than the stateless GraphQL execution engine , or otherwise
separating these two steps . For more on this , see the " Supporting Subscriptions
at Scale " information in the GraphQL spec ."""
|
# If arguments are missing or incorrectly typed , this is an internal developer
# mistake which should throw an early error .
assert_valid_execution_arguments ( schema , document , variable_values )
# If a valid context cannot be created due to incorrect arguments , this will throw
# an error .
context = ExecutionContext . build ( schema , document , root_value , context_value , variable_values , operation_name , field_resolver , )
# Return early errors if execution context failed .
if isinstance ( context , list ) :
return ExecutionResult ( data = None , errors = context )
type_ = get_operation_root_type ( schema , context . operation )
fields = context . collect_fields ( type_ , context . operation . selection_set , { } , set ( ) )
response_names = list ( fields )
response_name = response_names [ 0 ]
field_nodes = fields [ response_name ]
field_node = field_nodes [ 0 ]
field_name = field_node . name . value
field_def = get_field_def ( schema , type_ , field_name )
if not field_def :
raise GraphQLError ( f"The subscription field '{field_name}' is not defined." , field_nodes )
# Call the ` subscribe ( ) ` resolver or the default resolver to produce an
# AsyncIterable yielding raw payloads .
resolve_fn = field_def . subscribe or context . field_resolver
resolve_fn = cast ( GraphQLFieldResolver , resolve_fn )
# help mypy
path = add_path ( None , response_name )
info = context . build_resolve_info ( field_def , field_nodes , type_ , path )
# ` resolve _ field _ value _ or _ error ` implements the " ResolveFieldEventStream " algorithm
# from GraphQL specification . It differs from ` resolve _ field _ value ` due to
# providing a different ` resolve _ fn ` .
result = context . resolve_field_value_or_error ( field_def , field_nodes , resolve_fn , root_value , info )
event_stream = await cast ( Awaitable , result ) if isawaitable ( result ) else result
# If ` event _ stream ` is an Error , rethrow a located error .
if isinstance ( event_stream , Exception ) :
raise located_error ( event_stream , field_nodes , response_path_as_list ( path ) )
# Assert field returned an event stream , otherwise yield an error .
if isinstance ( event_stream , AsyncIterable ) :
return cast ( AsyncIterable , event_stream )
raise TypeError ( f"Subscription field must return AsyncIterable. Received: {event_stream!r}" )
|
def parse_get_params ( request ) :
"""parse all url get params that contains dots in a representation of
serializer field names , for example : d . docs . limit to d _ docs _ limit .
that makes compatible an actual API client with django - rest - framework
serializers .
: param request :
: return : QueryDict with parsed get params ."""
|
get = request . GET . copy ( )
new_get = request . GET . copy ( )
for key in get . iterkeys ( ) :
if key . count ( "." ) > 0 :
new_key = key . replace ( "." , "_" )
new_get [ new_key ] = get . get ( key )
del new_get [ key ]
return new_get
|
def end_of_day ( val ) :
"""Return a new datetime . datetime object with values that represent
a end of a day .
: param val : Date to . . .
: type val : datetime . datetime | datetime . date
: rtype : datetime . datetime"""
|
if type ( val ) == date :
val = datetime . fromordinal ( val . toordinal ( ) )
return start_of_day ( val ) + timedelta ( days = 1 , microseconds = - 1 )
|
def process ( self , args = argv , input_file = stdin , output_file = stdout ) :
"""Processes search results as specified by command arguments .
: param args : Sequence of command arguments
: param input _ file : Pipeline input file
: param output _ file : Pipeline output file"""
|
self . logger . debug ( u'%s arguments: %s' , type ( self ) . __name__ , args )
self . _configuration = None
self . _output_file = output_file
try :
if len ( args ) >= 2 and args [ 1 ] == '__GETINFO__' :
ConfigurationSettings , operation , args , reader = self . _prepare ( args , input_file = None )
self . parser . parse ( args , self )
self . _configuration = ConfigurationSettings ( self )
writer = splunk_csv . DictWriter ( output_file , self , self . configuration . keys ( ) , mv_delimiter = ',' )
writer . writerow ( self . configuration . items ( ) )
elif len ( args ) >= 2 and args [ 1 ] == '__EXECUTE__' :
self . input_header . read ( input_file )
ConfigurationSettings , operation , args , reader = self . _prepare ( args , input_file )
self . parser . parse ( args , self )
self . _configuration = ConfigurationSettings ( self )
if self . show_configuration :
self . messages . append ( 'info_message' , '%s command configuration settings: %s' % ( self . name , self . _configuration ) )
writer = splunk_csv . DictWriter ( output_file , self )
self . _execute ( operation , reader , writer )
else :
file_name = path . basename ( args [ 0 ] )
message = ( u'Command {0} appears to be statically configured and static ' u'configuration is unsupported by splunklib.searchcommands. ' u'Please ensure that default/commands.conf contains this ' u'stanza:\n' u'[{0}]\n' u'filename = {1}\n' u'supports_getinfo = true\n' u'supports_rawargs = true\n' u'outputheader = true' . format ( type ( self ) . name , file_name ) )
raise NotImplementedError ( message )
except SystemExit :
raise
except :
import traceback
import sys
error_type , error_message , error_traceback = sys . exc_info ( )
self . logger . error ( traceback . format_exc ( error_traceback ) )
origin = error_traceback
while origin . tb_next is not None :
origin = origin . tb_next
filename = origin . tb_frame . f_code . co_filename
lineno = origin . tb_lineno
self . write_error ( '%s at "%s", line %d : %s' , error_type . __name__ , filename , lineno , error_message )
exit ( 1 )
return
|
def create_fuzzy_pattern ( pattern ) :
"""Convert a string into a fuzzy regular expression pattern .
: param pattern : The input pattern ( a string ) .
: returns : A compiled regular expression object .
This function works by adding ` ` . * ` ` between each of the characters in the
input pattern and compiling the resulting expression into a case
insensitive regular expression ."""
|
return re . compile ( ".*" . join ( map ( re . escape , pattern ) ) , re . IGNORECASE )
|
def set_forbidden_alien ( self , alien ) :
"""Set all forbidden alien values
: param alienes : a list with forbidden alien values
: alien alienes : list
: returns : None
: ralien : None
: raises : None"""
|
if self . _forbidden_alien == alien :
return
self . _forbidden_alien = alien
self . invalidateFilter ( )
|
def submit_mail ( self , send_from , send_to , subject , body , unique_id = None ) :
"""Добавляем письмо в очередь на отправку
: param send _ from : Отправитель
: param send _ to : Получатель
: param subject : Тема письма
: param body : Тело письма . Можно с HTML
: param unique _ id : Уникальный идентификатор письма . Обычно что - то вроде md5 + человекочитаемый префикс подходят лучше всего . Письмо с одинаковым unique _ id не будет добавлено"""
|
self . __metadb . update ( """
INSERT INTO meta.mail("template", "from", "to", "subject", "body", "attachments", "unique_id")
VALUES ('meta', :send_from, :send_to, :subject, :body, null, :unique_id)
ON CONFLICT (unique_id) DO NOTHING
""" , { "send_from" : send_from , "send_to" : send_to , "subject" : subject , "body" : body , "unique_id" : unique_id } )
|
def store_data ( name , data = None , delete = False , newname = None ) :
"""This function creates a " Tplot Variable " based on the inputs , and
stores this data in memory . Tplot Variables store all of the information
needed to generate a plot .
Parameters :
name : str
Name of the tplot variable that will be created
data : dict
A python dictionary object .
' x ' should be a 1 - dimensional array that represents the data ' s x axis . Typically this data is time ,
represented in seconds since epoch ( January 1st 1970)
' y ' should be the data values . This can be 2 dimensions if multiple lines or a spectrogram are desired .
' v ' is optional , and is only used for spectrogram plots . This will be a list of bins to be used . If this
is provided , then ' y ' should have dimensions of x by z .
' x ' and ' y ' can be any data format that can be read in by the pandas module . Python lists , numpy arrays ,
or any pandas data type will all work .
delete : bool , optional
Deletes the tplot variable matching the " name " parameter
newname : str
Renames TVar to new name
. . note : :
If you want to combine multiple tplot variables into one , simply supply the list of tplot variables to the
" data " parameter . This will cause the data to overlay when plotted .
Returns :
None
Examples :
> > > # Store a single line
> > > import pytplot
> > > x _ data = [ 1,2,3,4,5]
> > > y _ data = [ 1,2,3,4,5]
> > > pytplot . store _ data ( " Variable1 " , data = { ' x ' : x _ data , ' y ' : y _ data } )
> > > # Store a two lines
> > > x _ data = [ 1,2,3,4,5]
> > > y _ data = [ [ 1,5 ] , [ 2,4 ] , [ 3,3 ] , [ 4,2 ] , [ 5,1 ] ]
> > > pytplot . store _ data ( " Variable2 " , data = { ' x ' : x _ data , ' y ' : y _ data } )
> > > # Store a spectrogram
> > > x _ data = [ 1,2,3]
> > > y _ data = [ [ 1,2,3 ] , [ 4,5,6 ] , [ 7,8,9 ] ]
> > > v _ data = [ 1,2,3]
> > > pytplot . store _ data ( " Variable3 " , data = { ' x ' : x _ data , ' y ' : y _ data , ' v ' : v _ data } )
> > > # Combine two different line plots
> > > pytplot . store _ data ( " Variable1and2 " , data = [ ' Variable1 ' , ' Variable2 ' ] )
> > > # Rename TVar
> > > pytplot . store _ data ( ' a ' , data = { ' x ' : [ 0,4,8,12,16 ] , ' y ' : [ 1,2,3,4,5 ] } )
> > > pytplot . store _ data ( ' a ' , newname = ' f ' )"""
|
global tplot_num
create_time = datetime . datetime . now ( )
if delete is True :
del_data ( name )
return
if data is None and newname is None :
print ( 'Please provide data.' )
return
if newname is not None :
pytplot . tplot_rename ( name , newname )
return
if isinstance ( data , list ) :
base_data = get_base_tplot_vars ( data )
# Use first tplot var as the time range
trange = [ np . nanmin ( data_quants [ base_data [ 0 ] ] . data . index ) , np . nanmax ( data_quants [ base_data [ 0 ] ] . data . index ) ]
df = base_data
spec_bins = None
else :
df = format_ydata ( data [ 'y' ] )
times = data [ 'x' ]
# If given a list of datetime objects , convert times to seconds since epoch .
if any ( isinstance ( t , datetime . datetime ) for t in times ) :
for tt , time in enumerate ( times ) :
times [ tt ] = ( time - datetime . datetime ( 1970 , 1 , 1 , tzinfo = datetime . timezone . utc ) ) . total_seconds ( )
# If given a list of datetime string , convert times to seconds since epoch
elif any ( isinstance ( t , str ) for t in times ) :
for tt , time in enumerate ( times ) :
times [ tt ] = pytplot . tplot_utilities . str_to_int ( time )
if len ( times ) != len ( df . index ) :
print ( "The lengths of x and y do not match!" )
return
elif isinstance ( times , pd . Series ) :
df = df . set_index ( data [ 'x' ] )
else :
df [ 'Index' ] = times
df = df . set_index ( 'Index' , drop = True )
trange = [ np . nanmin ( times ) , np . nanmax ( times ) ]
if 'v' in data or 'v2' in data : # Generally the data is 1D , but occasionally
# the bins will vary in time .
if 'v' in data :
spec_bins = data [ 'v' ]
else :
spec_bins = data [ 'v2' ]
if type ( spec_bins ) is not pd . DataFrame :
spec_bins = pd . DataFrame ( spec_bins )
if len ( spec_bins . columns ) != 1 :
if len ( spec_bins ) == len ( df . index ) :
spec_bins = spec_bins . set_index ( df . index )
else :
print ( "Length of v and x do not match. Cannot create tplot variable." )
return
else :
spec_bins = spec_bins . transpose ( )
else :
spec_bins = None
xaxis_opt = dict ( axis_label = 'Time' )
yaxis_opt = dict ( axis_label = name ) if ( spec_bins is None ) else dict ( axis_label = '' )
zaxis_opt = dict ( axis_label = '' ) if ( spec_bins is None ) else dict ( axis_label = name )
line_opt = { }
dtype = ''
time_bar = [ ]
# Dictionary to keep track of extra details needed for plotting
# that aren ' t actual attributes in Bokeh
extras = dict ( panel_size = 1 )
links = { }
temp = TVar ( name , tplot_num , df , spec_bins , xaxis_opt , yaxis_opt , zaxis_opt , line_opt , trange , dtype , create_time , time_bar , extras , links )
data_quants [ name ] = temp
data_quants [ name ] . yaxis_opt [ 'y_range' ] = get_y_range ( df , spec_bins )
return
|
def POST ( self , ** kwargs ) :
'''Send one or more Salt commands in the request body
. . http : post : : /
: reqheader X - Auth - Token : | req _ token |
: reqheader Accept : | req _ accept |
: reqheader Content - Type : | req _ ct |
: resheader Content - Type : | res _ ct |
: status 200 : | 200 |
: status 400 : | 400 |
: status 401 : | 401 |
: status 406 : | 406 |
: term : ` lowstate ` data describing Salt commands must be sent in the
request body .
* * Example request : * *
. . code - block : : bash
curl - sSik https : / / localhost : 8000 \ - b ~ / cookies . txt \ - H " Accept : application / x - yaml " \ - H " Content - type : application / json " \ - d ' [ { " client " : " local " , " tgt " : " * " , " fun " : " test . ping " } ] '
. . code - block : : text
POST / HTTP / 1.1
Host : localhost : 8000
Accept : application / x - yaml
X - Auth - Token : d40d1e1e
Content - Type : application / json
[ { " client " : " local " , " tgt " : " * " , " fun " : " test . ping " } ]
* * Example response : * *
. . code - block : : text
HTTP / 1.1 200 OK
Content - Length : 200
Allow : GET , HEAD , POST
Content - Type : application / x - yaml
return :
- ms - 0 : true
ms - 1 : true
ms - 2 : true
ms - 3 : true
ms - 4 : true'''
|
return { 'return' : list ( self . exec_lowstate ( token = cherrypy . session . get ( 'token' ) ) ) }
|
def inverse_jacobian ( self , maps ) :
"""Returns the Jacobian for transforming mass1 and mass2 to
mchirp and eta ."""
|
m1 = maps [ parameters . mass1 ]
m2 = maps [ parameters . mass2 ]
mchirp = conversions . mchirp_from_mass1_mass2 ( m1 , m2 )
eta = conversions . eta_from_mass1_mass2 ( m1 , m2 )
return - 1. * mchirp / eta ** ( 6. / 5 )
|
def read_worker ( args , q_in , q_out ) :
"""Function that will be spawned to fetch the image
from the input queue and put it back to output queue .
Parameters
args : object
q _ in : queue
q _ out : queue"""
|
while True :
deq = q_in . get ( )
if deq is None :
break
i , item = deq
image_encode ( args , i , item , q_out )
|
def solve_equilibrium_point ( self , analyzer1 , analyzer2 , delu_dict = { } , delu_default = 0 , units = "nanometers" ) :
"""Gives the radial size of two particles where equilibrium is reached
between both particles . NOTE : the solution here is not the same
as the solution visualized in the plot because solving for r
requires that both the total surface area and volume of the
particles are functions of r .
Args :
analyzer1 ( SurfaceEnergyPlotter ) : Analyzer associated with the
first polymorph
analyzer2 ( SurfaceEnergyPlotter ) : Analyzer associated with the
second polymorph
delu _ dict ( Dict ) : Dictionary of the chemical potentials to be set as
constant . Note the key should be a sympy Symbol object of the
format : Symbol ( " delu _ el " ) where el is the name of the element .
delu _ default ( float ) : Default value for all unset chemical potentials
units ( str ) : Can be nanometers or Angstrom
Returns :
Particle radius in nm"""
|
# Set up
wulff1 = analyzer1 . wulff_from_chempot ( delu_dict = delu_dict , delu_default = delu_default , symprec = self . symprec )
wulff2 = analyzer2 . wulff_from_chempot ( delu_dict = delu_dict , delu_default = delu_default , symprec = self . symprec )
# Now calculate r
delta_gamma = wulff1 . weighted_surface_energy - wulff2 . weighted_surface_energy
delta_E = self . bulk_gform ( analyzer1 . ucell_entry ) - self . bulk_gform ( analyzer2 . ucell_entry )
r = ( ( - 3 * delta_gamma ) / ( delta_E ) )
return r / 10 if units == "nanometers" else r
|
def onSelect_specimen ( self , event ) :
"""update figures and text when a new specimen is selected"""
|
self . selected_meas = [ ]
self . select_specimen ( str ( self . specimens_box . GetValue ( ) ) )
if self . ie_open :
self . ie . change_selected ( self . current_fit )
self . update_selection ( )
|
def ansi_format_iter ( self , x_start = 0 , y_start = 0 , width = None , height = None , frame = 0 , columns = 1 , downsample = 1 ) :
"""Return the ANSI escape sequence to render the image .
x _ start
Offset from the left of the image data to render from . Defaults to 0.
y _ start
Offset from the top of the image data to render from . Defaults to 0.
width
Width of the image data to render . Defaults to the image width .
height
Height of the image data to render . Defaults to the image height .
frame
Single frame number , or a list of frame numbers to render in sequence . Defaults to frame 0.
columns
Number of frames to render per line ( useful for printing tilemaps ! ) . Defaults to 1.
downsample
Shrink larger images by printing every nth pixel only . Defaults to 1."""
|
image = self . get_image ( )
frames = [ ]
frame_count = 1 if not hasattr ( image , 'n_frames' ) else image . n_frames
if isinstance ( frame , int ) :
assert frame in range ( 0 , frame_count )
frames = [ frame ]
else :
frames = [ f for f in frame if f in range ( 0 , frame_count ) ]
if not width :
width = image . size [ 0 ] - x_start
if not height :
height = image . size [ 1 ] - y_start
if image . mode == 'P' :
palette = from_palette_bytes ( image . getpalette ( ) )
def data_fetch ( x , y , fr ) :
if fr not in range ( 0 , frame_count ) :
return Transparent ( )
if not ( ( 0 <= x < image . size [ 0 ] ) and ( 0 <= y < image . size [ 1 ] ) ) :
return Transparent ( )
image . seek ( fr )
return palette [ image . getpixel ( ( x , y ) ) ]
for x in ansi . format_image_iter ( data_fetch , x_start , y_start , width , height , frames , columns , downsample ) :
yield x
return
|
def mkdir_p ( path_to_dir ) :
"""Make directory ( ies ) .
This function behaves like mkdir - p .
Args :
path _ to _ dir ( : obj : ` str ` ) : Path to the directory to make ."""
|
try :
os . makedirs ( path_to_dir )
except OSError as e : # Python > 2.5
if e . errno == EEXIST and os . path . isdir ( path_to_dir ) :
logger . debug ( "Directory %s already exists. Skipping." % path_to_dir )
else :
raise e
|
def maximum_variation ( arr ) :
'''return np . max ( arr , axis = 0 ) - np . min ( arr , axis = 0)
If ` arr ` is a 1D array , a scalar is returned .
If ` arr ` is a 2D array ( N x M ) , an array of length M is returned .'''
|
return np . max ( arr , axis = 0 ) - np . min ( arr , axis = 0 )
|
def max_height ( self ) :
""": return : The max height of the rendered text ( across all images if an
animated renderer ) ."""
|
if len ( self . _plain_images ) <= 0 :
self . _convert_images ( )
if self . _max_height == 0 :
for image in self . _plain_images :
self . _max_height = max ( len ( image ) , self . _max_height )
return self . _max_height
|
def _extract_t_indices ( self , X , X2 = None , dL_dK = None ) :
"""Extract times and output indices from the input matrix X . Times are ordered according to their index for convenience of computation , this ordering is stored in self . _ order and self . order2 . These orderings are then mapped back to the original ordering ( in X ) using self . _ rorder and self . _ rorder2."""
|
# TODO : some fast checking here to see if this needs recomputing ?
self . _t = X [ : , 0 ]
if not X . shape [ 1 ] == 2 :
raise ValueError ( 'Input matrix for ode1 covariance should have two columns, one containing times, the other output indices' )
self . _index = np . asarray ( X [ : , 1 ] , dtype = np . int )
# Sort indices so that outputs are in blocks for computational
# convenience .
self . _order = self . _index . argsort ( )
self . _index = self . _index [ self . _order ]
self . _t = self . _t [ self . _order ]
self . _rorder = self . _order . argsort ( )
# rorder is for reversing the order
if X2 is None :
self . _t2 = None
self . _index2 = None
self . _order2 = self . _order
self . _rorder2 = self . _rorder
else :
if not X2 . shape [ 1 ] == 2 :
raise ValueError ( 'Input matrix for ode1 covariance should have two columns, one containing times, the other output indices' )
self . _t2 = X2 [ : , 0 ]
self . _index2 = np . asarray ( X2 [ : , 1 ] , dtype = np . int )
self . _order2 = self . _index2 . argsort ( )
self . _index2 = self . _index2 [ self . _order2 ]
self . _t2 = self . _t2 [ self . _order2 ]
self . _rorder2 = self . _order2 . argsort ( )
# rorder2 is for reversing order
if dL_dK is not None :
self . _dL_dK = dL_dK [ self . _order , : ]
self . _dL_dK = self . _dL_dK [ : , self . _order2 ]
|
def set ( self , key , value , filepath ) :
"""Set configuration parameter .
Writes ' value ' on ' key ' to the configuration file given in
' filepath ' . Configuration parameter in ' key ' must follow the schema
< section > . < option > .
: param key : key to set
: param value : value to set
: param filepath : configuration file"""
|
if not filepath :
raise RuntimeError ( "Configuration file not given" )
if not self . __check_config_key ( key ) :
raise RuntimeError ( "%s parameter does not exists or cannot be set" % key )
config = configparser . SafeConfigParser ( )
if os . path . isfile ( filepath ) :
config . read ( filepath )
section , option = key . split ( '.' )
if section not in config . sections ( ) :
config . add_section ( section )
try :
config . set ( section , option , value )
except TypeError as e :
raise RuntimeError ( str ( e ) )
try :
with open ( filepath , 'w' ) as f :
config . write ( f )
except IOError as e :
raise RuntimeError ( str ( e ) )
return CMD_SUCCESS
|
def estimate_B ( xray_table , vhe_table , photon_energy_density = 0.261 * u . eV / u . cm ** 3 ) :
"""Estimate magnetic field from synchrotron to Inverse Compton luminosity
ratio
Estimate the magnetic field from the ratio of X - ray to gamma - ray emission
according to :
. . math : :
\\ frac { L _ \ mathrm { xray } } { L _ \ gamma } =
\\ frac { u _ \ mathrm { B } } { u _ \ mathrm { ph } } =
\\ frac { B ^ 2 } { 8 \ pi u _ \ mathrm { ph } }
where : math : ` L _ \ mathrm { xray } ` is the X - ray luminosity , : math : ` L _ \ gamma ` is
the gamma - ray luminosity , and : math : ` u _ \ mathrm { ph } ` is the seed photon
field energy density .
Note that this assumes that the ratio of observed fluxes is equal to the
ratio of bolometric synchrotron and IC luminosities , and that IC proceeds
in the Thomson regims . This assumption is safe as long as the X - ray and
gamma - ray emission contain the bulk of the bolometric emission ( i . e . , the
peak in the SED is in the X - ray and gamma - ray observed bands ) . Even if the
assumption does not hold , this is a good starting point for the magnetic
field when doing simultaneous X - ray and gamma - ray spectral fits .
Parameters
xray _ table : : class : ` ~ astropy . table . Table `
Data table ( see : ref : ` dataformat ` for details on the format ) containing
the X - ray spectrum .
vhe _ table : : class : ` ~ astropy . table . Table `
Data table ( see : ref : ` dataformat ` for details on the format ) containing
the HE / VHE gamma - ray spectrum .
photon _ energy _ density : : class : ` ~ astropy . units . Quantity ` float , optional
Energy density of the seed photon field for IC emission . Defaults to
0.261 eV / cm3 , the energy density of the CMB .
Returns
B : : class : ` ~ astropy . units . Quantity ` float
Estimate of the magnetic flux density at the emitter ."""
|
xray = validate_data_table ( xray_table , sed = False )
vhe = validate_data_table ( vhe_table , sed = False )
xray_lum = trapz_loglog ( xray [ "flux" ] * xray [ "energy" ] , xray [ "energy" ] )
vhe_lum = trapz_loglog ( vhe [ "flux" ] * vhe [ "energy" ] , vhe [ "energy" ] )
uph = ( photon_energy_density . to ( "erg/cm3" ) ) . value
B0 = ( np . sqrt ( ( xray_lum / vhe_lum ) . decompose ( ) . value * 8 * np . pi * uph ) * u . G ) . to ( "uG" )
return B0
|
def spawn_program ( self , name , arguments = [ ] , timeout = 30 , exclusive = False ) :
"""Spawns a program in the working directory .
This method allows the interaction with the running program ,
based on the returned RunningProgram object .
Args :
name ( str ) : The name of the program to be executed .
arguments ( tuple ) : Command - line arguments for the program .
timeout ( int ) : The timeout for execution .
exclusive ( bool ) : Prevent parallel validation runs on the
test machines , e . g . when doing performance
measurements for submitted code .
Returns :
RunningProgram : An object representing the running program ."""
|
logger . debug ( "Spawning program for interaction ..." )
if exclusive :
kill_longrunning ( self . config )
return RunningProgram ( self , name , arguments , timeout )
|
def find_l50 ( contig_lengths_dict , genome_length_dict ) :
"""Calculate the L50 for each strain . L50 is defined as the number of contigs required to achieve the N50
: param contig _ lengths _ dict : dictionary of strain name : reverse - sorted list of all contig lengths
: param genome _ length _ dict : dictionary of strain name : total genome length
: return : l50 _ dict : dictionary of strain name : L50"""
|
# Initialise the dictionary
l50_dict = dict ( )
for file_name , contig_lengths in contig_lengths_dict . items ( ) :
currentlength = 0
# Initialise a variable to count how many contigs have been added to the currentlength variable
currentcontig = 0
for contig_length in contig_lengths :
currentlength += contig_length
# Increment : currentcontig each time a contig is added to the current length
currentcontig += 1
# Same logic as with the N50 , but the contig number is added instead of the length of the contig
if currentlength >= genome_length_dict [ file_name ] * 0.5 :
l50_dict [ file_name ] = currentcontig
break
return l50_dict
|
def draw ( self ) :
"""Draws the button in its current state .
Should be called every time through the main loop"""
|
if not self . visible :
return
# Blit the button ' s current appearance to the surface .
if self . isEnabled :
if self . mouseIsDown :
if self . mouseOverButton and self . lastMouseDownOverButton :
self . window . blit ( self . surfaceDown , self . loc )
else :
self . window . blit ( self . surfaceUp , self . loc )
else : # mouse is up
if self . mouseOverButton :
self . window . blit ( self . surfaceOver , self . loc )
else :
self . window . blit ( self . surfaceUp , self . loc )
else :
self . window . blit ( self . surfaceDisabled , self . loc )
|
def __process_username_password ( self ) :
"""If indicated , process the username and password"""
|
if self . use_username_password_store is not None :
if self . args . clear_store :
with load_config ( sections = AUTH_SECTIONS ) as config :
config . remove_option ( AUTH_SECTION , 'username' )
if not self . args . username :
self . args . username = get_username ( use_store = self . use_username_password_store )
if self . args . clear_store :
remove_password ( AUTH_SECTION , username = self . args . username )
if not self . args . password :
self . args . password = get_password ( AUTH_SECTION , username = self . args . username )
if self . use_username_password_store :
save_password ( AUTH_SECTION , self . args . password , self . args . username )
|
def register_view ( self , view ) :
"""Called when the View was registered"""
|
super ( ScopedVariableListController , self ) . register_view ( view )
view [ 'name_col' ] . add_attribute ( view [ 'name_text' ] , 'text' , self . NAME_STORAGE_ID )
if not isinstance ( self . model . state , LibraryState ) and self . model . state . get_next_upper_library_root_state ( ) is None :
view [ 'name_text' ] . set_property ( "editable" , True )
view [ 'data_type_col' ] . add_attribute ( view [ 'data_type_text' ] , 'text' , self . DATA_TYPE_NAME_STORAGE_ID )
if not isinstance ( self . model . state , LibraryState ) and self . model . state . get_next_upper_library_root_state ( ) is None :
view [ 'data_type_text' ] . set_property ( "editable" , True )
if isinstance ( view , ScopedVariablesListView ) :
view [ 'default_value_col' ] . add_attribute ( view [ 'default_value_text' ] , 'text' , self . DEFAULT_VALUE_STORAGE_ID )
if not isinstance ( self . model . state , LibraryState ) and self . model . state . get_next_upper_library_root_state ( ) is None :
view [ 'default_value_text' ] . set_property ( "editable" , True )
self . _apply_value_on_edited_and_focus_out ( view [ 'default_value_text' ] , self . apply_new_scoped_variable_default_value )
self . _apply_value_on_edited_and_focus_out ( view [ 'name_text' ] , self . apply_new_scoped_variable_name )
self . _apply_value_on_edited_and_focus_out ( view [ 'data_type_text' ] , self . apply_new_scoped_variable_type )
if isinstance ( self . model , ContainerStateModel ) :
self . reload_scoped_variables_list_store ( )
|
def _setup_simplejson ( self , responder ) :
"""We support serving simplejson for Python 2.4 targets on Ansible 2.3 , at
least so the package ' s own CI Docker scripts can run without external
help , however newer versions of simplejson no longer support Python
2.4 . Therefore override any installed / loaded version with a
2.4 - compatible version we ship in the compat / directory ."""
|
responder . whitelist_prefix ( 'simplejson' )
# issue # 536 : must be at end of sys . path , in case existing newer
# version is already loaded .
compat_path = os . path . join ( os . path . dirname ( __file__ ) , 'compat' )
sys . path . append ( compat_path )
for fullname , is_pkg , suffix in ( ( u'simplejson' , True , '__init__.py' ) , ( u'simplejson.decoder' , False , 'decoder.py' ) , ( u'simplejson.encoder' , False , 'encoder.py' ) , ( u'simplejson.scanner' , False , 'scanner.py' ) , ) :
path = os . path . join ( compat_path , 'simplejson' , suffix )
fp = open ( path , 'rb' )
try :
source = fp . read ( )
finally :
fp . close ( )
responder . add_source_override ( fullname = fullname , path = path , source = source , is_pkg = is_pkg , )
|
def set_limit ( self , param ) :
"""Models " Limit Command " functionality of device .
Sets the target temperate to be reached .
: param param : Target temperature in C , multiplied by 10 , as a string . Can be negative .
: return : Empty string ."""
|
# TODO : Is not having leading zeroes / 4 digits an error ?
limit = int ( param )
if - 2000 <= limit <= 6000 :
self . device . temperature_limit = limit / 10.0
return ""
|
def exclude_states ( omega , gamma , r , Lij , states , excluded_states ) :
"""Exclude states from matrices .
This function takes the matrices and excludes the states listed in
excluded _ states ."""
|
Ne = len ( omega )
excluded_indices = [ i for i in range ( Ne ) if states [ i ] in excluded_states ]
omega_new = [ ] ;
gamma_new = [ ] ;
r_new = [ [ ] , [ ] , [ ] ] ;
Lij_new = [ ]
for i in range ( Ne ) :
row_om = [ ] ;
row_ga = [ ] ;
row_L = [ ]
for j in range ( Ne ) :
if j not in excluded_indices :
row_om += [ omega [ i ] [ j ] ]
row_ga += [ gamma [ i ] [ j ] ]
row_L += [ Lij [ i ] [ j ] ]
if i not in excluded_indices :
omega_new += [ row_om ]
gamma_new += [ row_ga ]
Lij_new += [ row_L ]
for p in range ( 3 ) :
for i in range ( Ne ) :
row_r = [ ]
for j in range ( Ne ) :
if j not in excluded_indices :
row_r += [ r [ p ] [ i ] [ j ] ]
if i not in excluded_indices :
r_new [ p ] += [ row_r ]
states_new = [ states [ i ] for i in range ( Ne ) if i not in excluded_indices ]
return omega_new , gamma_new , r_new , Lij_new , states_new
|
def generate_docs ( self ) :
"""Generates the output docstring"""
|
if self . dst . style [ 'out' ] == 'numpydoc' and self . dst . numpydoc . first_line is not None :
self . first_line = self . dst . numpydoc . first_line
self . _set_desc ( )
self . _set_params ( )
self . _set_return ( )
self . _set_raises ( )
self . _set_other ( )
self . _set_raw ( )
self . generated_docs = True
|
def filter_sequences ( self , seq_type ) :
"""Return a DictList of only specified types in the sequences attribute .
Args :
seq _ type ( SeqProp ) : Object type
Returns :
DictList : A filtered DictList of specified object type only"""
|
return DictList ( x for x in self . sequences if isinstance ( x , seq_type ) )
|
def computeCovarianceOfSums ( self , d_ij , K , a ) :
"""We wish to calculate the variance of a weighted sum of free energy differences .
for example ` ` var ( \ sum a _ i df _ i ) ` ` .
We explicitly lay out the calculations for four variables ( where each variable
is a logarithm of a partition function ) , then generalize .
The uncertainty in the sum of two weighted differences is
. . code - block : : none
var ( a1 ( f _ i1 - f _ j1 ) + a2 ( f _ i2 - f _ j2 ) ) =
a1 ^ 2 var ( f _ i1 - f _ j1 ) +
a2 ^ 2 var ( f _ i2 - f _ j2 ) +
2 a1 a2 cov ( f _ i1 - f _ j1 , f _ i2 - f _ j2)
cov ( f _ i1 - f _ j1 , f _ i2 - f _ j2 ) =
cov ( f _ i1 , f _ i2 ) -
cov ( f _ i1 , f _ j2 ) -
cov ( f _ j1 , f _ i2 ) +
cov ( f _ j1 , f _ j2)
call :
. . code - block : : none
f _ i1 = a
f _ j1 = b
f _ i2 = c
f _ j2 = d
a1 ^ 2 var ( a - b ) + a2 ^ 2 var ( c - d ) + 2a1a2 cov ( a - b , c - d )
we want ` ` 2cov ( a - b , c - d ) = 2cov ( a , c ) - 2cov ( a , d ) - 2cov ( b , c ) + 2cov ( b , d ) ` ` ,
since ` ` var ( x - y ) = var ( x ) + var ( y ) - 2cov ( x , y ) ` ` ,
then , ` ` 2cov ( x , y ) = - var ( x - y ) + var ( x ) + var ( y ) ` ` . So , we get
. . code - block : : none
2cov ( a , c ) = - var ( a - c ) + var ( a ) + var ( c )
-2cov ( a , d ) = + var ( a - d ) - var ( a ) - var ( d )
-2cov ( b , c ) = + var ( b - c ) - var ( b ) - var ( c )
2cov ( b , d ) = - var ( b - d ) + var ( b ) + var ( d )
adding up , finally :
. . code - block : : none
2cov ( a - b , c - d ) = 2cov ( a , c ) - 2cov ( a , d ) - 2cov ( b , c ) + 2cov ( b , d ) =
- var ( a - c ) + var ( a - d ) + var ( b - c ) - var ( b - d )
a1 ^ 2 var ( a - b ) + a2 ^ 2 var ( c - d ) + 2a1a2cov ( a - b , c - d ) =
a1 ^ 2 var ( a - b ) + a2 ^ 2 var ( c - d ) + a1a2 [ - var ( a - c ) + var ( a - d ) + var ( b - c ) - var ( b - d ) ]
var ( a1 ( f _ i1 - f _ j1 ) + a2 ( f _ i2 - f _ j2 ) ) =
a1 ^ 2 var ( f _ i1 - f _ j1 ) + a2 ^ 2 var ( f _ i2 - f _ j2 ) + 2a1 a2 cov ( f _ i1 - f _ j1 , f _ i2 - f _ j2)
= a1 ^ 2 var ( f _ i1 - f _ j1 ) + a2 ^ 2 var ( f _ i2 - f _ j2 ) + a1 a2 [ - var ( f _ i1 - f _ i2 ) + var ( f _ i1 - f _ j2 ) + var ( f _ j1 - f _ i2 ) - var ( f _ j1 - f _ j2 ) ]
assume two arrays of free energy differences , and and array of constant vectors a .
we want the variance ` ` var ( \ sum _ k a _ k ( f _ i , k - f _ j , k ) ) ` ` Each set is separated from the other by an offset K
same process applies with the sum , with the single var terms and the pair terms
Parameters
d _ ij : a matrix of standard deviations of the quantities f _ i - f _ j
K : The number of states in each ' chunk ' , has to be constant
outputs : KxK variance matrix for the sums or differences ` ` \ sum a _ i df _ i ` `"""
|
# todo : vectorize this .
var_ij = np . square ( d_ij )
d2 = np . zeros ( [ K , K ] , float )
n = len ( a )
for i in range ( K ) :
for j in range ( K ) :
for k in range ( n ) :
d2 [ i , j ] += a [ k ] ** 2 * var_ij [ i + k * K , j + k * K ]
for l in range ( n ) :
d2 [ i , j ] += a [ k ] * a [ l ] * ( - var_ij [ i + k * K , i + l * K ] + var_ij [ i + k * K , j + l * K ] + var_ij [ j + k * K , i + l * K ] - var_ij [ j + k * K , j + l * K ] )
return np . sqrt ( d2 )
|
def _parse_hwtype ( self ) :
"""Convert the numerical hardware id to a chip name ."""
|
self . chip_name = KNOWN_HARDWARE_TYPES . get ( self . hw_type , "Unknown Chip (type=%d)" % self . hw_type )
|
def work_get ( self , wallet , account ) :
"""Retrieves work for * * account * * in * * wallet * *
. . enable _ control required
. . version 8.0 required
: param wallet : Wallet to get account work for
: type wallet : str
: param account : Account to get work for
: type account : str
: raises : : py : exc : ` nano . rpc . RPCException `
> > > rpc . work _ get (
. . . wallet = " 000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F " ,
. . . account = " xrb _ 11111hifc8npp "
"432e5cf728c90f4f " """
|
wallet = self . _process_value ( wallet , 'wallet' )
account = self . _process_value ( account , 'account' )
payload = { "wallet" : wallet , "account" : account }
resp = self . call ( 'work_get' , payload )
return resp [ 'work' ]
|
def add_bond ( self , key1 , key2 , bond ) :
"""Set a bond . Existing bond will be overwritten ."""
|
self . graph . add_edge ( key1 , key2 , bond = bond )
|
def run_validators ( self , value ) :
"""validate value .
Uses document field ' s ` ` validate ( ) ` `"""
|
try :
self . model_field . validate ( value )
except MongoValidationError as e :
raise ValidationError ( e . message )
super ( DocumentField , self ) . run_validators ( value )
|
def java_timestamp ( timestamp = True ) :
""". . versionadded : : 0.2.0
Returns a timestamp in the format produced by | date _ tostring | _ , e . g . : :
Mon Sep 02 14:00:54 EDT 2016
If ` ` timestamp ` ` is ` True ` ( the default ) , the current date & time is
returned .
If ` ` timestamp ` ` is ` None ` or ` False ` , an empty string is returned .
If ` ` timestamp ` ` is a number , it is converted from seconds since the epoch
to local time .
If ` ` timestamp ` ` is a ` datetime . datetime ` object , its value is used
directly , with naïve objects assumed to be in the local timezone .
The timestamp is always constructed using the C locale .
: param timestamp : the date & time to display
: type timestamp : ` None ` , ` bool ` , number , or ` datetime . datetime `
: rtype : text string
. . | date _ tostring | replace : : Java 8 ' s ` ` Date . toString ( ) ` `
. . _ date _ tostring : https : / / docs . oracle . com / javase / 8 / docs / api / java / util / Date . html # toString - -"""
|
if timestamp is None or timestamp is False :
return ''
if isinstance ( timestamp , datetime ) and timestamp . tzinfo is not None :
timebits = timestamp . timetuple ( )
# Assumes ` timestamp . tzinfo . tzname ( ) ` is meaningful / useful
tzname = timestamp . tzname ( )
else :
if timestamp is True :
timestamp = None
elif isinstance ( timestamp , datetime ) :
try : # Use ` datetime . timestamp ( ) ` if it ' s available , as it ( unlike
# ` datetime . timetuple ( ) ` ) takes ` fold ` into account for naïve
# datetimes
timestamp = timestamp . timestamp ( )
except AttributeError : # Pre - Python 3.3
# Mapping ` timetuple ` through ` mktime ` and ` localtime ` is
# necessary for determining whether DST is in effect ( which , in
# turn , is necessary for determining which timezone name to
# use ) . The only downside to using standard functions instead
# of ` python - dateutil ` is that ` mktime ` , apparently , handles
# times duplicated by DST non - deterministically ( cf .
# < https : / / git . io / vixsE > ) , but there ' s no right way to deal
# with those anyway , so . . .
timestamp = time . mktime ( timestamp . timetuple ( ) )
elif not isinstance ( timestamp , numbers . Number ) :
raise TypeError ( 'Timestamp must be number or datetime.datetime' )
timebits = time . localtime ( timestamp )
try :
tzname = timebits . tm_zone
except AttributeError : # This assumes that ` time . tzname ` is meaningful / useful .
tzname = time . tzname [ timebits . tm_isdst > 0 ]
assert 1 <= timebits . tm_mon <= 12 , 'invalid month'
assert 0 <= timebits . tm_wday <= 6 , 'invalid day of week'
return '{wday} {mon} {t.tm_mday:02d}' ' {t.tm_hour:02d}:{t.tm_min:02d}:{t.tm_sec:02d}' ' {tz} {t.tm_year:04d}' . format ( t = timebits , tz = tzname , mon = MONTHS [ timebits . tm_mon - 1 ] , wday = DAYS_OF_WEEK [ timebits . tm_wday ] )
|
def init_manual ( cls , pawn_value , knight_value , bishop_value , rook_value , queen_value , king_value ) :
"""Manual init method for external piece values
: type : PAWN _ VALUE : int
: type : KNIGHT _ VALUE : int
: type : BISHOP _ VALUE : int
: type : ROOK _ VALUE : int
: type : QUEEN _ VALUE : int"""
|
piece_values = cls ( )
piece_values . PAWN_VALUE = pawn_value
piece_values . KNIGHT_VALUE = knight_value
piece_values . BISHOP_VALUE = bishop_value
piece_values . ROOK_VALUE = rook_value
piece_values . QUEEN_VALUE = queen_value
piece_values . KING_VALUE = king_value
return piece_values
|
def print_rows ( self , num_rows = 10 , num_columns = 40 , max_column_width = 30 , max_row_width = 80 , output_file = None ) :
"""Print the first M rows and N columns of the SFrame in human readable
format .
Parameters
num _ rows : int , optional
Number of rows to print .
num _ columns : int , optional
Number of columns to print .
max _ column _ width : int , optional
Maximum width of a column . Columns use fewer characters if possible .
max _ row _ width : int , optional
Maximum width of a printed row . Columns beyond this width wrap to a
new line . ` max _ row _ width ` is automatically reset to be the
larger of itself and ` max _ column _ width ` .
output _ file : file , optional
The stream or file that receives the output . By default the output
goes to sys . stdout , but it can also be redirected to a file or a
string ( using an object of type StringIO ) .
See Also
head , tail"""
|
if output_file is None :
output_file = sys . stdout
max_row_width = max ( max_row_width , max_column_width + 1 )
printed_sf = self . _imagecols_to_stringcols ( num_rows )
row_of_tables = printed_sf . __get_pretty_tables__ ( wrap_text = False , max_rows_to_display = num_rows , max_columns = num_columns , max_column_width = max_column_width , max_row_width = max_row_width )
footer = "[%d rows x %d columns]\n" % self . shape
print ( '\n' . join ( [ str ( tb ) for tb in row_of_tables ] ) + "\n" + footer , file = output_file )
|
def _guess_concat ( data ) :
"""Guess concat function from given data"""
|
return { type ( u'' ) : u'' . join , type ( b'' ) : concat_bytes , } . get ( type ( data ) , list )
|
def set_main_and_cell_language ( metadata , cells , ext ) :
"""Set main language for the given collection of cells , and
use magics for cells that use other languages"""
|
main_language = ( metadata . get ( 'kernelspec' , { } ) . get ( 'language' ) or metadata . get ( 'jupytext' , { } ) . get ( 'main_language' ) or _SCRIPT_EXTENSIONS . get ( ext , { } ) . get ( 'language' ) )
if main_language is None :
languages = { 'python' : 0.5 }
for cell in cells :
if 'language' in cell [ 'metadata' ] :
language = cell [ 'metadata' ] [ 'language' ]
languages [ language ] = languages . get ( language , 0.0 ) + 1
main_language = max ( languages , key = languages . get )
# save main language when no kernel is set
if 'language' not in metadata . get ( 'kernelspec' , { } ) :
metadata . setdefault ( 'jupytext' , { } ) [ 'main_language' ] = main_language
# Remove ' language ' meta data and add a magic if not main language
for cell in cells :
if 'language' in cell [ 'metadata' ] :
language = cell [ 'metadata' ] . pop ( 'language' )
if language != main_language and language in _JUPYTER_LANGUAGES :
if 'magic_args' in cell [ 'metadata' ] :
magic_args = cell [ 'metadata' ] . pop ( 'magic_args' )
cell [ 'source' ] = u'%%{} {}\n' . format ( language , magic_args ) + cell [ 'source' ]
else :
cell [ 'source' ] = u'%%{}\n' . format ( language ) + cell [ 'source' ]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.