signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def tablespace_exists ( name , user = None , host = None , port = None , maintenance_db = None , password = None , runas = None ) :
'''Checks if a tablespace exists on the Postgres server .
CLI Example :
. . code - block : : bash
salt ' * ' postgres . tablespace _ exists ' dbname '
. . versionadded : : 2015.8.0''' | tablespaces = tablespace_list ( user = user , host = host , port = port , maintenance_db = maintenance_db , password = password , runas = runas )
return name in tablespaces |
def ExpandRelativePath ( method_config , params , relative_path = None ) :
"""Determine the relative path for request .""" | path = relative_path or method_config . relative_path or ''
for param in method_config . path_params :
param_template = '{%s}' % param
# For more details about " reserved word expansion " , see :
# http : / / tools . ietf . org / html / rfc6570 # section - 3.2.2
reserved_chars = ''
reserved_template = '{+%s}' % param
if reserved_template in path :
reserved_chars = _RESERVED_URI_CHARS
path = path . replace ( reserved_template , param_template )
if param_template not in path :
raise exceptions . InvalidUserInputError ( 'Missing path parameter %s' % param )
try : # TODO ( craigcitro ) : Do we want to support some sophisticated
# mapping here ?
value = params [ param ]
except KeyError :
raise exceptions . InvalidUserInputError ( 'Request missing required parameter %s' % param )
if value is None :
raise exceptions . InvalidUserInputError ( 'Request missing required parameter %s' % param )
try :
if not isinstance ( value , six . string_types ) :
value = str ( value )
path = path . replace ( param_template , urllib_parse . quote ( value . encode ( 'utf_8' ) , reserved_chars ) )
except TypeError as e :
raise exceptions . InvalidUserInputError ( 'Error setting required parameter %s to value %s: %s' % ( param , value , e ) )
return path |
def seek ( self , offset , whence = SEEK_SET ) :
"""Seek pointer in lob data buffer to requested position .
Might trigger further loading of data from the database if the pointer is beyond currently read data .""" | # A nice trick is to ( ab ) use BytesIO . seek ( ) to go to the desired position for easier calculation .
# This will not add any data to the buffer however - very convenient !
self . data . seek ( offset , whence )
new_pos = self . data . tell ( )
missing_bytes_to_read = new_pos - self . _current_lob_length
if missing_bytes_to_read > 0 : # Trying to seek beyond currently available LOB data , so need to load some more first .
# We are smart here : ( at least trying . . . ) :
# If a user sets a certain file position s / he probably wants to read data from
# there . So already read some extra data to avoid yet another immediate
# reading step . Try with EXTRA _ NUM _ ITEMS _ TO _ READ _ AFTER _ SEEK additional items ( bytes / chars ) .
# jump to the end of the current buffer and read the new data :
self . data . seek ( 0 , SEEK_END )
self . read ( missing_bytes_to_read + self . EXTRA_NUM_ITEMS_TO_READ_AFTER_SEEK )
# reposition file pointer a originally desired position :
self . data . seek ( new_pos )
return new_pos |
def fire ( obj , name , * args , ** kwargs ) :
"""Arrange for ` func ( * args , * * kwargs ) ` to be invoked for every function
registered for the named signal on ` obj ` .""" | signals = vars ( obj ) . get ( '_signals' , { } )
for func in signals . get ( name , ( ) ) :
func ( * args , ** kwargs ) |
def concat_align ( fastas ) :
"""concatenate alignments""" | # read in sequences
fa2len = { }
seqs = { }
IDs = [ ]
for fasta in fastas :
seqs [ fasta ] = { }
for seq in parse_fasta ( fasta ) :
ID = seq [ 0 ] . split ( '>' ) [ 1 ] . split ( ) [ 0 ]
IDs . append ( ID )
seqs [ fasta ] [ ID ] = seq [ 1 ]
fa2len [ fasta ] = len ( seq [ 1 ] )
# concat sequences
IDs = set ( IDs )
concat = { }
for fasta in fastas :
for ID in IDs :
if ID not in concat :
concat [ ID ] = [ ]
if ID not in seqs [ fasta ] :
concat [ ID ] . append ( '-' * fa2len [ fasta ] )
else :
concat [ ID ] . append ( seqs [ fasta ] [ ID ] )
return concat |
def greenlet_timeouts ( self ) :
"""This greenlet kills jobs in other greenlets if they timeout .""" | while True :
now = datetime . datetime . utcnow ( )
for greenlet in list ( self . gevent_pool ) :
job = get_current_job ( id ( greenlet ) )
if job and job . timeout and job . datestarted :
expires = job . datestarted + datetime . timedelta ( seconds = job . timeout )
if now > expires :
job . kill ( block = False , reason = "timeout" )
time . sleep ( 1 ) |
def _Members ( self , group ) :
"""Unify members of a group and accounts with the group as primary gid .""" | group . members = set ( group . members ) . union ( self . gids . get ( group . gid , [ ] ) )
return group |
def _export_project_file ( project , path , z , include_images , keep_compute_id , allow_all_nodes , temporary_dir ) :
"""Take a project file ( . gns3 ) and patch it for the export
We rename the . gns3 project . gns3 to avoid the task to the client to guess the file name
: param path : Path of the . gns3""" | # Image file that we need to include in the exported archive
images = [ ]
with open ( path ) as f :
topology = json . load ( f )
if "topology" in topology :
if "nodes" in topology [ "topology" ] :
for node in topology [ "topology" ] [ "nodes" ] :
compute_id = node . get ( 'compute_id' , 'local' )
if node [ "node_type" ] == "virtualbox" and node . get ( "properties" , { } ) . get ( "linked_clone" ) :
raise aiohttp . web . HTTPConflict ( text = "Topology with a linked {} clone could not be exported. Use qemu instead." . format ( node [ "node_type" ] ) )
if not allow_all_nodes and node [ "node_type" ] in [ "virtualbox" , "vmware" , "cloud" ] :
raise aiohttp . web . HTTPConflict ( text = "Topology with a {} could not be exported" . format ( node [ "node_type" ] ) )
if not keep_compute_id :
node [ "compute_id" ] = "local"
# To make project portable all node by default run on local
if "properties" in node and node [ "node_type" ] != "docker" :
for prop , value in node [ "properties" ] . items ( ) :
if node [ "node_type" ] == "iou" :
if not prop == "path" :
continue
elif not prop . endswith ( "image" ) :
continue
if value is None or value . strip ( ) == '' :
continue
if not keep_compute_id : # If we keep the original compute we can keep the image path
node [ "properties" ] [ prop ] = os . path . basename ( value )
if include_images is True :
images . append ( { 'compute_id' : compute_id , 'image' : value , 'image_type' : node [ 'node_type' ] } )
if not keep_compute_id :
topology [ "topology" ] [ "computes" ] = [ ]
# Strip compute information because could contain secret info like password
local_images = set ( [ i [ 'image' ] for i in images if i [ 'compute_id' ] == 'local' ] )
for image in local_images :
_export_local_images ( project , image , z )
remote_images = set ( [ ( i [ 'compute_id' ] , i [ 'image_type' ] , i [ 'image' ] ) for i in images if i [ 'compute_id' ] != 'local' ] )
for compute_id , image_type , image in remote_images :
yield from _export_remote_images ( project , compute_id , image_type , image , z , temporary_dir )
z . writestr ( "project.gns3" , json . dumps ( topology ) . encode ( ) )
return images |
def handle_details ( self , username ) :
"""Print user details""" | try :
user = User . objects . get ( username = username )
except User . DoesNotExist :
raise CommandError ( "Unable to find user '%s'" % username )
self . stdout . write ( "username : %s" % username )
self . stdout . write ( "is_active : %s" % user . is_active )
self . stdout . write ( "is_staff : %s" % user . is_staff )
self . stdout . write ( "is_superuser: %s" % user . is_superuser )
groups = [ g . name for g in user . groups . all ( ) . order_by ( "name" ) ]
self . stdout . write ( "groups : [%s]" % ", " . join ( groups ) ) |
def connect ( components , connections , force_SLH = False , expand_simplify = True ) :
"""Connect a list of components according to a list of connections .
Args :
components ( list ) : List of Circuit instances
connections ( list ) : List of pairs ` ` ( ( c1 , port1 ) , ( c2 , port2 ) ) ` ` where
` ` c1 ` ` and ` ` c2 ` ` are elements of ` components ` ( or the index of the
element in ` components ` ) , and ` ` port1 ` ` and ` ` port2 ` ` are the
indices ( or port names ) of the ports of the two components that
should be connected
force _ SLH ( bool ) : If True , convert the result to an SLH object
expand _ simplify ( bool ) : If the result is an SLH object , expand and
simplify the circuit after each feedback connection is added
Example :
> > > A = CircuitSymbol ( ' A ' , cdim = 2)
> > > B = CircuitSymbol ( ' B ' , cdim = 2)
> > > BS = Beamsplitter ( )
> > > circuit = connect (
. . . components = [ A , B , BS ] ,
. . . connections = [
. . . ( ( A , 0 ) , ( BS , ' in ' ) ) ,
. . . ( ( BS , ' tr ' ) , ( B , 0 ) ) ,
. . . ( ( A , 1 ) , ( B , 1 ) ) ] )
> > > print ( unicode ( circuit ) . replace ( ' cid ( 1 ) ' , ' 1 ' ) )
( B β 1 ) β Perm ( 0 , 2 , 1 ) β ( BS ( Ο / 4 ) β 1 ) β Perm ( 0 , 2 , 1 ) β ( A β 1)
The above example corresponds to the circuit diagram : :
β A β β BS ( Ο / 4 ) β β B β
Raises :
ValueError : if ` connections ` includes any invalid entries
Note :
The list of ` components ` may contain duplicate entries , but in this
case you must use a positional index in ` connections ` to refer to any
duplicate component . Alternatively , use unique components by defining
different labels .""" | combined = Concatenation . create ( * components )
cdims = [ c . cdim for c in components ]
offsets = _cumsum ( [ 0 ] + cdims [ : - 1 ] )
imap = [ ]
omap = [ ]
counts = defaultdict ( int )
for component in components :
counts [ component ] += 1
for ( ic , ( ( c1 , op ) , ( c2 , ip ) ) ) in enumerate ( connections ) : # check c1 ; convert to index int
if not isinstance ( c1 , int ) :
if counts [ c1 ] > 1 :
raise ValueError ( "Component %s appears more than once in list of " "components %r. You must reference it by index in the " "connection %r" % ( c1 , components , connections [ ic ] ) )
try :
c1 = components . index ( c1 )
except ValueError :
raise ValueError ( "The component %s in connection %r is not in the list of " "components %r" % ( c1 , connections [ ic ] , components ) )
else :
if c1 < 0 or c1 >= len ( components ) :
raise ValueError ( "Invalid index %d in connection %r" % ( c1 , connections [ ic ] ) )
# check c2 ; convert to index int
if not isinstance ( c2 , int ) :
if counts [ c2 ] > 1 :
raise ValueError ( "Component %s appears more than once in list of " "components %r. You must reference it by index in the " "connection %r" % ( c2 , components , connections [ ic ] ) )
try :
c2 = components . index ( c2 )
except ValueError :
raise ValueError ( "The component %s in connection %r is not in the list of " "components %r" % ( c2 , connections [ ic ] , components ) )
else :
if c2 < 0 or c2 >= len ( components ) :
raise ValueError ( "Invalid index %d in connection %r" % ( c2 , connections [ ic ] ) )
# check op ; convert to index int
if not ( isinstance ( op , int ) ) :
try :
op = components [ c1 ] . PORTSOUT . index ( op )
except AttributeError :
raise ValueError ( "The component %s does not define PORTSOUT labels. " "You cannot use the string %r to refer to a port" % ( components [ c1 ] , op ) )
except ValueError :
raise ValueError ( "The connection %r refers to an invalid output " "channel %s for component %r" % ( connections [ ic ] , op , components [ c1 ] ) )
else :
if op < 0 or op >= components [ c1 ] . cdim :
raise ValueError ( "Invalid output channel %d <0 or >=%d (cdim of %r) in %r" % ( op , components [ c1 ] . cdim , components [ c1 ] , connections [ ic ] ) )
# check ip ; convert to index int
if not ( isinstance ( ip , int ) ) :
try :
ip = components [ c2 ] . PORTSIN . index ( ip )
except AttributeError :
raise ValueError ( "The component %s does not define PORTSIN labels. " "You cannot use the string %r to refer to a port" % ( components [ c2 ] , ip ) )
except ValueError :
raise ValueError ( "The connection %r refers to an invalid input channel " "%s for component %r" % ( connections [ ic ] , ip , components [ c2 ] ) )
else :
if ip < 0 or ip >= components [ c2 ] . cdim :
raise ValueError ( "Invalid input channel %d <0 or >=%d (cdim of %r) in %r" % ( ip , components [ c2 ] . cdim , components [ c2 ] , connections [ ic ] ) )
op_idx = offsets [ c1 ] + op
ip_idx = offsets [ c2 ] + ip
imap . append ( ip_idx )
omap . append ( op_idx )
n = combined . cdim
nfb = len ( connections )
imapping = map_channels ( { k : im for ( k , im ) in zip ( range ( n - nfb , n ) , imap ) } , n )
omapping = map_channels ( { om : k for ( k , om ) in zip ( range ( n - nfb , n ) , omap ) } , n )
combined = omapping << combined << imapping
if force_SLH :
combined = combined . toSLH ( )
for k in range ( nfb ) :
combined = combined . feedback ( )
if isinstance ( combined , SLH ) and expand_simplify :
combined = combined . expand ( ) . simplify_scalar ( )
return combined |
def get_node_type ( dgtree ) :
"""Returns the type of the root node of a DGParentedTree .""" | if is_leaf ( dgtree ) :
return TreeNodeTypes . leaf_node
root_label = dgtree . label ( )
if root_label == '' :
assert dgtree == DGParentedTree ( '' , [ ] ) , "The tree has no root label, but isn't empty: {}" . format ( dgtree )
return TreeNodeTypes . empty_tree
elif root_label in NUCLEARITY_LABELS :
return TreeNodeTypes . nuclearity_node
else :
assert isinstance ( dgtree , ( RSTTree , DGParentedTree ) ) , type ( dgtree )
return TreeNodeTypes . relation_node |
def add ( self , * tasks ) :
"""Interfaces the GraphNode ` add ` method""" | nodes = [ x . node for x in tasks ]
self . node . add ( * nodes )
return self |
def load_and_process_igor_model ( self , marginals_file_name ) :
"""Set attributes by reading a generative model from IGoR marginal file .
Sets attributes PVJ , PdelV _ given _ V , PdelJ _ given _ J , PinsVJ , and Rvj .
Parameters
marginals _ file _ name : str
File name for a IGoR model marginals file .""" | raw_model = read_igor_marginals_txt ( marginals_file_name )
self . PinsVJ = raw_model [ 0 ] [ 'vj_ins' ]
self . PdelV_given_V = raw_model [ 0 ] [ 'v_3_del' ] . T
self . PdelJ_given_J = raw_model [ 0 ] [ 'j_5_del' ] . T
self . PVJ = np . multiply ( raw_model [ 0 ] [ 'j_choice' ] . T , raw_model [ 0 ] [ 'v_choice' ] ) . T
Rvj_raw = raw_model [ 0 ] [ 'vj_dinucl' ] . reshape ( ( 4 , 4 ) ) . T
self . Rvj = np . multiply ( Rvj_raw , 1 / np . sum ( Rvj_raw , axis = 0 ) ) |
def get_retry_after ( self , response ) :
"""Get the value of Retry - After in seconds .""" | retry_after = response . getheader ( "Retry-After" )
if retry_after is None :
return None
return self . parse_retry_after ( retry_after ) |
def compile_dictionary ( self , lang , wordlists , encoding , output ) :
"""Compile user dictionary .""" | wordlist = ''
try :
output_location = os . path . dirname ( output )
if not os . path . exists ( output_location ) :
os . makedirs ( output_location )
if os . path . exists ( output ) :
os . remove ( output )
self . log ( "Compiling Dictionary..." , 1 )
# Read word lists and create a unique set of words
words = set ( )
for wordlist in wordlists :
with open ( wordlist , 'rb' ) as src :
for word in src . read ( ) . split ( b'\n' ) :
words . add ( word . replace ( b'\r' , b'' ) )
# Sort and create wordlist
with open ( output , 'wb' ) as dest :
dest . write ( b'\n' . join ( sorted ( words ) ) + b'\n' )
except Exception :
self . log ( 'Problem compiling dictionary.' , 0 )
self . log ( "Current wordlist '%s'" % wordlist )
raise |
def register_on_machine_state_changed ( self , callback ) :
"""Set the callback function to consume on machine state changed events .
Callback receives a IMachineStateChangedEvent object .
Returns the callback _ id""" | event_type = library . VBoxEventType . on_machine_state_changed
return self . event_source . register_callback ( callback , event_type ) |
def get_custom_fields ( self ) :
"""Return a list of custom fields for this model""" | return CustomField . objects . filter ( content_type = ContentType . objects . get_for_model ( self ) ) |
def tarbell_update ( command , args ) :
"""Update the current tarbell project .""" | with ensure_settings ( command , args ) as settings , ensure_project ( command , args ) as site :
puts ( "Updating to latest blueprint\n" )
git = sh . git . bake ( _cwd = site . base . base_dir )
# stash then pull
puts ( colored . yellow ( "Stashing local changes" ) )
puts ( git . stash ( ) )
puts ( colored . yellow ( "Pull latest changes" ) )
puts ( git . pull ( ) )
# need to pop any local changes back to get back on the original branch
# this may behave oddly if you have old changes stashed
if git . stash . list ( ) :
puts ( git . stash . pop ( ) ) |
def new_pos ( self , html_div ) :
"""factory method pattern""" | pos = self . Position ( self , html_div )
pos . bind_mov ( )
self . positions . append ( pos )
return pos |
def duplicate_node ( self , source_node_id , destination_node_id ) :
"""Duplicate a node
: param node _ id : Node identifier
: returns : New node instance""" | source_node = self . get_node ( source_node_id )
destination_node = self . get_node ( destination_node_id )
# Not a Dynamips router
if not hasattr ( source_node , "startup_config_path" ) :
return ( yield from super ( ) . duplicate_node ( source_node_id , destination_node_id ) )
try :
with open ( source_node . startup_config_path ) as f :
startup_config = f . read ( )
except OSError :
startup_config = None
try :
with open ( source_node . private_config_path ) as f :
private_config = f . read ( )
except OSError :
private_config = None
yield from self . set_vm_configs ( destination_node , { "startup_config_content" : startup_config , "private_config_content" : private_config } )
# Force refresh of the name in configuration files
new_name = destination_node . name
yield from destination_node . set_name ( source_node . name )
yield from destination_node . set_name ( new_name )
return destination_node |
def command ( cmd ) :
"""Execute command and raise an exception upon an error .
> > > ' README ' in command ( ' ls ' )
True
> > > command ( ' nonexistingcommand ' ) # doctest : + ELLIPSIS
Traceback ( most recent call last ) :
SdistCreationError""" | status , out = commands . getstatusoutput ( cmd )
if status is not 0 :
logger . error ( "Something went wrong:" )
logger . error ( out )
raise SdistCreationError ( )
return out |
def is_all_field_none ( self ) :
""": rtype : bool""" | if self . _id_ is not None :
return False
if self . _created is not None :
return False
if self . _updated is not None :
return False
if self . _status is not None :
return False
if self . _sub_status is not None :
return False
if self . _type_ is not None :
return False
if self . _counterparty_alias is not None :
return False
if self . _amount_reward is not None :
return False
return True |
def forge_fdf ( pdf_form_url = None , fdf_data_strings = [ ] , fdf_data_names = [ ] , fields_hidden = [ ] , fields_readonly = [ ] , checkbox_checked_name = b"Yes" ) :
"""Generates fdf string from fields specified
* pdf _ form _ url ( default : None ) : just the url for the form .
* fdf _ data _ strings ( default : [ ] ) : array of ( string , value ) tuples for the
form fields ( or dicts ) . Value is passed as a UTF - 16 encoded string ,
unless True / False , in which case it is assumed to be a checkbox
( and passes names , ' / Yes ' ( by default ) or ' / Off ' ) .
* fdf _ data _ names ( default : [ ] ) : array of ( string , value ) tuples for the
form fields ( or dicts ) . Value is passed to FDF as a name , ' / value '
* fields _ hidden ( default : [ ] ) : list of field names that should be set
hidden .
* fields _ readonly ( default : [ ] ) : list of field names that should be set
readonly .
* checkbox _ checked _ value ( default : " Yes " ) : By default means a checked
checkboxes gets passed the value " / Yes " . You may find that the default
does not work with your PDF , in which case you might want to try " On " .
The result is a string suitable for writing to a . fdf file .""" | fdf = [ b'%FDF-1.2\x0a%\xe2\xe3\xcf\xd3\x0d\x0a' ]
fdf . append ( b'1 0 obj\x0a<</FDF' )
fdf . append ( b'<</Fields[' )
fdf . append ( b'' . join ( handle_data_strings ( fdf_data_strings , fields_hidden , fields_readonly , checkbox_checked_name ) ) )
fdf . append ( b'' . join ( handle_data_names ( fdf_data_names , fields_hidden , fields_readonly ) ) )
if pdf_form_url :
fdf . append ( b'' . join ( b'/F (' , smart_encode_str ( pdf_form_url ) , b')\x0a' ) )
fdf . append ( b']\x0a' )
fdf . append ( b'>>\x0a' )
fdf . append ( b'>>\x0aendobj\x0a' )
fdf . append ( b'trailer\x0a\x0a<<\x0a/Root 1 0 R\x0a>>\x0a' )
fdf . append ( b'%%EOF\x0a\x0a' )
return b'' . join ( fdf ) |
def insert ( self , stim , position ) :
"""Inserts a new stimulus into the list at the given position
: param stim : stimulus to insert into protocol
: type stim : : class : ` StimulusModel < sparkle . stim . stimulus _ model . StimulusModel > `
: param position : index ( row ) of location to insert to
: type position : int""" | if position == - 1 :
position = self . rowCount ( )
stim . setReferenceVoltage ( self . caldb , self . calv )
stim . setCalibration ( self . calibrationVector , self . calibrationFrequencies , self . calibrationFrange )
self . _tests . insert ( position , stim ) |
def uid ( self ) :
"""Return the user id that the process will run as
: rtype : int""" | if not self . _uid :
if self . config . daemon . user :
self . _uid = pwd . getpwnam ( self . config . daemon . user ) . pw_uid
else :
self . _uid = os . getuid ( )
return self . _uid |
def zero_pad ( matrix , to_length ) :
"""Zero pads along the 0th dimension to make sure the utterance array
x is of length to _ length .""" | assert matrix . shape [ 0 ] <= to_length
if not matrix . shape [ 0 ] <= to_length :
logger . error ( "zero_pad cannot be performed on matrix with shape {}" " to length {}" . format ( matrix . shape [ 0 ] , to_length ) )
raise ValueError
result = np . zeros ( ( to_length , ) + matrix . shape [ 1 : ] )
result [ : matrix . shape [ 0 ] ] = matrix
return result |
def mad ( data ) :
r"""Median absolute deviation
This method calculates the median absolute deviation of the input data .
Parameters
data : np . ndarray
Input data array
Returns
float MAD value
Examples
> > > from modopt . math . stats import mad
> > > a = np . arange ( 9 ) . reshape ( 3 , 3)
> > > mad ( a )
2.0
Notes
The MAD is calculated as follows :
. . math : :
\ mathrm { MAD } = \ mathrm { median } \ left ( | X _ i - \ mathrm { median } ( X ) | \ right )""" | return np . median ( np . abs ( data - np . median ( data ) ) ) |
def _set_as_cached ( self , item , cacher ) :
"""Set the _ cacher attribute on the calling object with a weakref to
cacher .""" | self . _cacher = ( item , weakref . ref ( cacher ) ) |
def prepare_cew_for_windows ( ) :
"""Copy files needed to compile the ` ` cew ` ` Python C extension on Windows .
A glorious day , when Microsoft will offer a decent support
for Python and shared libraries ,
all this mess will be unnecessary and it should be removed .
May that day come soon .
Return ` ` True ` ` if successful , ` ` False ` ` otherwise .
: rtype : bool""" | try : # copy espeak _ sapi . dll to C : \ Windows \ System32 \ espeak . dll
espeak_dll_win_path = "C:\\Windows\\System32\\espeak.dll"
espeak_dll_dst_path = "aeneas\\cew\\espeak.dll"
espeak_dll_src_paths = [ "C:\\aeneas\\eSpeak\\espeak_sapi.dll" , "C:\\sync\\eSpeak\\espeak_sapi.dll" , "C:\\Program Files\\eSpeak\\espeak_sapi.dll" , "C:\\Program Files (x86)\\eSpeak\\espeak_sapi.dll" , ]
if os . path . exists ( espeak_dll_dst_path ) :
print ( "[INFO] Found eSpeak DLL in %s" % espeak_dll_dst_path )
else :
found = False
copied = False
for src_path in espeak_dll_src_paths :
if os . path . exists ( src_path ) :
found = True
print ( "[INFO] Copying eSpeak DLL from %s into %s" % ( src_path , espeak_dll_dst_path ) )
try :
shutil . copyfile ( src_path , espeak_dll_dst_path )
copied = True
print ( "[INFO] Copied eSpeak DLL" )
except :
pass
break
if not found :
print ( "[WARN] Unable to find the eSpeak DLL, probably because you installed eSpeak in a non-standard location." )
print ( "[WARN] If you want to run aeneas with the C extension cew," )
print ( "[WARN] please copy espeak_sapi.dll from your eSpeak directory to %s" % espeak_dll_win_path )
# print ( " [ WARN ] and run the aeneas setup again . " )
# return False
elif not copied :
print ( "[WARN] Unable to copy the eSpeak DLL, probably because you are not running with admin privileges." )
print ( "[WARN] If you want to run aeneas with the C extension cew," )
print ( "[WARN] please copy espeak_sapi.dll from your eSpeak directory to %s" % espeak_dll_win_path )
# print ( " [ WARN ] and run the aeneas setup again . " )
# return False
# NOTE : espeak . lib is needed only while compiling the C extension , not when using it
# so , we copy it in the current working directory from the included thirdparty \ directory
# NOTE : PREV : copy thirdparty \ espeak . lib to $ PYTHON \ libs \ espeak . lib
# NOTE : PREV : espeak _ lib _ dst _ path = os . path . join ( sys . prefix , " libs " , " espeak . lib " )
espeak_lib_src_path = os . path . join ( os . path . dirname ( __file__ ) , "thirdparty" , "espeak.lib" )
espeak_lib_dst_path = os . path . join ( os . path . dirname ( __file__ ) , "espeak.lib" )
if os . path . exists ( espeak_lib_dst_path ) :
print ( "[INFO] Found eSpeak LIB in %s" % espeak_lib_dst_path )
else :
try :
print ( "[INFO] Copying eSpeak LIB into %s" % espeak_lib_dst_path )
shutil . copyfile ( espeak_lib_src_path , espeak_lib_dst_path )
print ( "[INFO] Copied eSpeak LIB" )
except :
print ( "[WARN] Unable to copy the eSpeak LIB, probably because you are not running with admin privileges." )
print ( "[WARN] If you want to compile the C extension cew," )
print ( "[WARN] please copy espeak.lib from the thirdparty directory into %s" % espeak_lib_dst_path )
print ( "[WARN] and run the aeneas setup again." )
return False
# if here , we have completed the setup , return True
return True
except Exception as e :
print ( "[WARN] Unexpected exception while preparing cew: %s" % e )
return False |
def reset ( self ) :
"""Deactivate all cells .""" | self . activeCells = np . empty ( 0 , dtype = "uint32" )
self . activeDeltaSegments = np . empty ( 0 , dtype = "uint32" )
self . activeFeatureLocationSegments = np . empty ( 0 , dtype = "uint32" ) |
def convert_destination_to_id ( destination_node , destination_port , nodes ) :
"""Convert a destination to device and port ID
: param str destination _ node : Destination node name
: param str destination _ port : Destination port name
: param list nodes : list of nodes from : py : meth : ` generate _ nodes `
: return : dict containing device ID , device name and port ID
: rtype : dict""" | device_id = None
device_name = None
port_id = None
if destination_node != 'NIO' :
for node in nodes :
if destination_node == node [ 'properties' ] [ 'name' ] :
device_id = node [ 'id' ]
device_name = destination_node
for port in node [ 'ports' ] :
if destination_port == port [ 'name' ] :
port_id = port [ 'id' ]
break
break
else :
for node in nodes :
if node [ 'type' ] == 'Cloud' :
for port in node [ 'ports' ] :
if destination_port . lower ( ) == port [ 'name' ] . lower ( ) :
device_id = node [ 'id' ]
device_name = node [ 'properties' ] [ 'name' ]
port_id = port [ 'id' ]
break
info = { 'id' : device_id , 'name' : device_name , 'pid' : port_id }
return info |
def _phi2deriv ( self , R , phi = 0. , t = 0. ) :
"""NAME :
_ phi2deriv
PURPOSE :
evaluate the second azimuthal derivative
INPUT :
phi
OUTPUT :
d2phi / dphi2
HISTORY :
2016-06-02 - Written - Bovy ( UofT )""" | return self . _Pot . phi2deriv ( R , 0. , phi = phi , t = t , use_physical = False ) |
def get_distribution_list ( self , dl_description ) :
""": param : dl _ description : a DistributionList specifying either :
- id : the account _ id
- name : the name of the list
: returns : the DistributionList""" | selector = dl_description . to_selector ( )
resp = self . request_single ( 'GetDistributionList' , { 'dl' : selector } )
dl = zobjects . DistributionList . from_dict ( resp )
return dl |
def image ( self ) :
"""Returns an image array of current render window""" | if not hasattr ( self , 'ren_win' ) and hasattr ( self , 'last_image' ) :
return self . last_image
ifilter = vtk . vtkWindowToImageFilter ( )
ifilter . SetInput ( self . ren_win )
ifilter . ReadFrontBufferOff ( )
if self . image_transparent_background :
ifilter . SetInputBufferTypeToRGBA ( )
else :
ifilter . SetInputBufferTypeToRGB ( )
return self . _run_image_filter ( ifilter ) |
def get_optimized_symbol ( executor ) :
"""Take an executor ' s underlying symbol graph and return its generated optimized version .
Parameters
executor :
An executor for which you want to see an optimized symbol . Getting an optimized symbol
is useful to compare and verify the work TensorRT has done against a legacy behaviour .
Returns
symbol : nnvm : : Symbol
The nnvm symbol optimized .""" | handle = SymbolHandle ( )
try :
check_call ( _LIB . MXExecutorGetOptimizedSymbol ( executor . handle , ctypes . byref ( handle ) ) )
result = sym . Symbol ( handle = handle )
return result
except MXNetError :
logging . error ( 'Error while trying to fetch TRT optimized symbol for graph. Please ensure ' 'build was compiled with MXNET_USE_TENSORRT enabled.' )
raise |
def check_value ( self , value_hash , value , salt = '' ) :
'''Checks the specified hash value against the hash of the provided
salt and value .
An example usage of : class : ` check _ value ` would be : :
val _ hash = hashing . hash _ value ( ' mysecretdata ' , salt = ' abcd ' )
if hashing . check _ value ( val _ hash , ' mysecretdata ' , salt = ' abcd ' ) :
# do something special
: param value _ hash : The hash value to check against
: param value : The value we want hashed to compare
: param salt : The salt to use when generating the hash of ` ` value ` ` . Default is ' ' .
: return : True if equal , False otherwise
: rtype : bool''' | h = self . hash_value ( value , salt = salt )
return h == value_hash |
def read_backend ( self , client = None ) :
'''The read : class : ` stdnet . BackendDatServer ` for this instance .
It can be ` ` None ` ` .''' | session = self . session
if session :
return session . model ( self ) . read_backend |
def deviator_stress ( self ) :
"""returns the deviatoric component of the stress""" | if not self . is_symmetric :
raise warnings . warn ( "The stress tensor is not symmetric, " "so deviator stress will not be either" )
return self - self . mean_stress * np . eye ( 3 ) |
async def build_pool_restart_request ( submitter_did : str , action : str , datetime : str ) -> str :
"""Builds a POOL _ RESTART request
: param submitter _ did : Id of Identity that sender transaction
: param action : Action that pool has to do after received transaction .
Can be " start " or " cancel "
: param datetime : Time when pool must be restarted .""" | logger = logging . getLogger ( __name__ )
logger . debug ( "build_pool_restart_request: >>> submitter_did: %r, action: %r, datetime: %r" )
if not hasattr ( build_pool_restart_request , "cb" ) :
logger . debug ( "build_pool_restart_request: Creating callback" )
build_pool_restart_request . cb = create_cb ( CFUNCTYPE ( None , c_int32 , c_int32 , c_char_p ) )
c_submitter_did = c_char_p ( submitter_did . encode ( 'utf-8' ) )
c_action = c_char_p ( action . encode ( 'utf-8' ) )
c_datetime = c_char_p ( datetime . encode ( 'utf-8' ) ) if datetime else None
request_json = await do_call ( 'indy_build_pool_restart_request' , c_submitter_did , c_action , c_datetime , build_pool_restart_request . cb )
res = request_json . decode ( )
logger . debug ( "build_pool_upgrade_request: <<< res: %r" , res )
return res |
def shared_edges ( faces_a , faces_b ) :
"""Given two sets of faces , find the edges which are in both sets .
Parameters
faces _ a : ( n , 3 ) int , set of faces
faces _ b : ( m , 3 ) int , set of faces
Returns
shared : ( p , 2 ) int , set of edges""" | e_a = np . sort ( faces_to_edges ( faces_a ) , axis = 1 )
e_b = np . sort ( faces_to_edges ( faces_b ) , axis = 1 )
shared = grouping . boolean_rows ( e_a , e_b , operation = np . intersect1d )
return shared |
def inv ( self ) :
"""inversion operation of self
Returns
Matrix : Matrix
inverse of self""" | if self . isdiagonal :
inv = 1.0 / self . __x
if ( np . any ( ~ np . isfinite ( inv ) ) ) :
idx = np . isfinite ( inv )
np . savetxt ( "testboo.dat" , idx )
invalid = [ self . row_names [ i ] for i in range ( idx . shape [ 0 ] ) if idx [ i ] == 0.0 ]
raise Exception ( "Matrix.inv has produced invalid floating points " + " for the following elements:" + ',' . join ( invalid ) )
return type ( self ) ( x = inv , isdiagonal = True , row_names = self . row_names , col_names = self . col_names , autoalign = self . autoalign )
else :
return type ( self ) ( x = la . inv ( self . __x ) , row_names = self . row_names , col_names = self . col_names , autoalign = self . autoalign ) |
def setAll ( self , pairs ) :
"""Set multiple parameters , passed as a list of key - value pairs .
: param pairs : list of key - value pairs to set""" | for ( k , v ) in pairs :
self . set ( k , v )
return self |
def listing ( source : list , ordered : bool = False , expand_full : bool = False ) :
"""An unordered or ordered list of the specified * source * iterable where
each element is converted to a string representation for display .
: param source :
The iterable to display as a list .
: param ordered :
Whether or not the list should be ordered . If False , which is the
default , an unordered bulleted list is created .
: param expand _ full :
Whether or not the list should expand to fill the screen horizontally .
When defaulted to False , the list is constrained to the center view
area of the screen along with other text . This can be useful to keep
lists aligned with the text flow .""" | r = _get_report ( )
r . append_body ( render . listing ( source = source , ordered = ordered , expand_full = expand_full ) )
r . stdout_interceptor . write_source ( '[ADDED] Listing\n' ) |
def pack_small_tensors ( tower_grads , max_bytes = 0 ) :
"""Concatenate gradients together more intelligently .
Does binpacking
Args :
tower _ grads : List of lists of ( gradient , variable ) tuples .
max _ bytes : Int giving max number of bytes in a tensor that
may be considered small .""" | assert max_bytes >= 0
orig_grads = [ g for g , _ in tower_grads [ 0 ] ]
# Check to make sure sizes are accurate ; not entirely important
assert all ( g . dtype == tf . float32 for g in orig_grads )
sizes = [ 4 * g . shape . num_elements ( ) for g in orig_grads ]
print_stats ( sizes )
small_ranges = [ ]
large_indices = [ ]
new_sizes = [ ]
def end_interval ( indices , small_ranges , large_indices ) :
if len ( indices ) > 1 :
small_ranges . insert ( 0 , [ indices [ 0 ] , indices [ - 1 ] ] )
else :
large_indices . insert ( 0 , indices [ 0 ] )
cur_range = [ ]
cur_size = 0
for i , s in reversed ( list ( enumerate ( sizes ) ) ) :
if cur_size > max_bytes :
end_interval ( cur_range , small_ranges , large_indices )
new_sizes . insert ( 0 , cur_size )
cur_range = [ ]
cur_size = 0
cur_range . insert ( 0 , i )
cur_size += s
end_interval ( cur_range , small_ranges , large_indices )
new_sizes . insert ( 0 , cur_size )
print_stats ( new_sizes )
num_gv = len ( orig_grads )
packing = { }
if len ( small_ranges ) :
new_tower_grads = [ ]
for dev_idx , gv_list in enumerate ( tower_grads ) :
assert len ( gv_list ) == num_gv , ( "Possible cause: " "Networks constructed on different workers " "don't have the same number of variables. " "If you use tf.GraphKeys or tf.global_variables() " "with multiple graphs per worker during network " "construction, you need to use " "appropriate scopes, see " "https://github.com/ray-project/ray/issues/3136" )
new_gv_list = [ ]
for r in small_ranges :
key = "%d:%d" % ( dev_idx , len ( new_gv_list ) )
new_gv_list . append ( ( pack_range ( key , packing , gv_list , r ) , "packing_var_placeholder" ) )
for i in large_indices :
new_gv_list . append ( gv_list [ i ] )
new_tower_grads . append ( new_gv_list )
return new_tower_grads , packing
else :
return tower_grads , None |
def hardware_flexport_flexport_type_instance ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
hardware = ET . SubElement ( config , "hardware" , xmlns = "urn:brocade.com:mgmt:brocade-hardware" )
flexport = ET . SubElement ( hardware , "flexport" )
id_key = ET . SubElement ( flexport , "id" )
id_key . text = kwargs . pop ( 'id' )
flexport_type = ET . SubElement ( flexport , "flexport_type" )
instance = ET . SubElement ( flexport_type , "instance" )
instance . text = kwargs . pop ( 'instance' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def buildSources ( self , sourceTime = None ) :
"""Return a dictionary of date / time tuples based on the keys
found in self . re _ sources .
The current time is used as the default and any specified
item found in self . re _ sources is inserted into the value
and the generated dictionary is returned .""" | if sourceTime is None :
( yr , mth , dy , hr , mn , sec , wd , yd , isdst ) = time . localtime ( )
else :
( yr , mth , dy , hr , mn , sec , wd , yd , isdst ) = sourceTime
sources = { }
defaults = { 'yr' : yr , 'mth' : mth , 'dy' : dy , 'hr' : hr , 'mn' : mn , 'sec' : sec , }
for item in self . re_sources :
values = { }
source = self . re_sources [ item ]
for key in defaults . keys ( ) :
if key in source :
values [ key ] = source [ key ]
else :
values [ key ] = defaults [ key ]
sources [ item ] = ( values [ 'yr' ] , values [ 'mth' ] , values [ 'dy' ] , values [ 'hr' ] , values [ 'mn' ] , values [ 'sec' ] , wd , yd , isdst )
return sources |
def paintEvent ( self , event ) :
"""Overloads the paint event to paint additional hint information if no text is set on the editor .
: param event | < QPaintEvent >""" | super ( XLineEdit , self ) . paintEvent ( event )
# paint the hint text if not text is set
if self . text ( ) and not ( self . icon ( ) and not self . icon ( ) . isNull ( ) ) :
return
# paint the hint text
with XPainter ( self ) as painter :
painter . setPen ( self . hintColor ( ) )
icon = self . icon ( )
left , top , right , bottom = self . getTextMargins ( )
w = self . width ( )
h = self . height ( ) - 2
w -= ( right + left )
h -= ( bottom + top )
if icon and not icon . isNull ( ) :
size = icon . actualSize ( self . iconSize ( ) )
x = self . cornerRadius ( ) + 2
y = ( self . height ( ) - size . height ( ) ) / 2.0
painter . drawPixmap ( x , y , icon . pixmap ( size . width ( ) , size . height ( ) ) )
w -= size . width ( ) - 2
else :
x = 6 + left
w -= self . _buttonWidth
y = 2 + top
# create the elided hint
if not self . text ( ) and self . hint ( ) :
rect = self . cursorRect ( )
metrics = QFontMetrics ( self . font ( ) )
hint = metrics . elidedText ( self . hint ( ) , Qt . ElideRight , w )
align = self . alignment ( )
if align & Qt . AlignHCenter :
x = 0
else :
x = rect . center ( ) . x ( )
painter . drawText ( x , y , w , h , align , hint ) |
def table_cells_2_spans ( table , spans ) :
"""Converts the table to a list of spans , for consistency .
This method combines the table data with the span data into a
single , more consistent type . Any normal cell will become a span
of just 1 column and 1 row .
Parameters
table : list of lists of str
spans : list of lists of int
Returns
table : list of lists of lists of int
As you can imagine , this is pretty confusing for a human which
is why data2rst accepts table data and span data separately .""" | new_spans = [ ]
for row in range ( len ( table ) ) :
for column in range ( len ( table [ row ] ) ) :
span = get_span ( spans , row , column )
if not span :
new_spans . append ( [ [ row , column ] ] )
new_spans . extend ( spans )
new_spans = list ( sorted ( new_spans ) )
return new_spans |
def add_organization ( db , organization ) :
"""Add an organization to the registry .
This function adds an organization to the registry .
It checks first whether the organization is already on the registry .
When it is not found , the new organization is added . Otherwise ,
it raises a ' AlreadyExistsError ' exception to notify that the organization
already exists .
: param db : database manager
: param organization : name of the organization
: raises InvalidValueError : raised when organization is None or an empty string
: raises AlreadyExistsError : raised when the organization already exists
in the registry .""" | with db . connect ( ) as session :
try :
add_organization_db ( session , organization )
except ValueError as e :
raise InvalidValueError ( e ) |
def data_as_matrix ( self , keys = None , return_basis = False , basis = None , alias = None , start = None , stop = None , step = None , window_length = None , window_step = 1 , ) :
"""Provide a feature matrix , given a list of data items .
I think this will probably fail if there are striplogs in the data
dictionary for this well .
TODO :
Deal with striplogs and other data , if present .
Args :
keys ( list ) : List of the logs to export from the data dictionary .
return _ basis ( bool ) : Whether or not to return the basis that was
used .
basis ( ndarray ) : The basis to use . Default is to survey all curves
to find a common basis .
alias ( dict ) : A mapping of alias names to lists of mnemonics .
start ( float ) : Optionally override the start of whatever basis
you find or ( more likely ) is surveyed .
stop ( float ) : Optionally override the stop of whatever basis
you find or ( more likely ) is surveyed .
step ( float ) : Override the step in the basis from survey _ basis .
window _ length ( int ) : The number of samples to return around each sample .
This will provide one or more shifted versions of the features .
window _ step ( int ) : How much to step the offset versions .
Returns :
ndarray .
or
ndarray , ndarray if return _ basis = True""" | if keys is None :
keys = [ k for k , v in self . data . items ( ) if isinstance ( v , Curve ) ]
else : # Only look at the alias list if keys were passed .
if alias is not None :
_keys = [ ]
for k in keys :
if k in alias :
added = False
for a in alias [ k ] :
if a in self . data :
_keys . append ( a )
added = True
break
if not added :
_keys . append ( k )
else :
_keys . append ( k )
keys = _keys
if basis is None :
basis = self . survey_basis ( keys = keys , step = step )
# Get the data , or None is curve is missing .
data = [ self . data . get ( k ) for k in keys ]
# Now cast to the correct basis , and replace any missing curves with
# an empty Curve . The sklearn imputer will deal with it . We will change
# the elements in place .
for i , d in enumerate ( data ) :
if d is not None :
data [ i ] = d . to_basis ( basis = basis )
# Allow user to override the start and stop from the survey .
if ( start is not None ) or ( stop is not None ) :
data [ i ] = data [ i ] . to_basis ( start = start , stop = stop , step = step )
basis = data [ i ] . basis
else : # Empty _ like gives unpredictable results
data [ i ] = Curve ( np . full ( basis . shape , np . nan ) , basis = basis )
if window_length is not None :
d_new = [ ]
for d in data :
r = d . _rolling_window ( window_length , func1d = utils . null , step = window_step , return_rolled = False , )
d_new . append ( r . T )
data = d_new
if return_basis :
return np . vstack ( data ) . T , basis
else :
return np . vstack ( data ) . T |
async def wait_done ( self ) -> int :
"""Coroutine to wait for subprocess run completion .
Returns :
The exit code of the subprocess .""" | await self . _done_running_evt . wait ( )
if self . _exit_code is None :
raise SublemonLifetimeError ( 'Subprocess exited abnormally with `None` exit code' )
return self . _exit_code |
def reset_weights ( self ) :
"""Initialize properly model weights""" | self . input_block . reset_weights ( )
self . backbone . reset_weights ( )
self . action_head . reset_weights ( )
self . value_head . reset_weights ( ) |
def parse_config_file ( job , config_file , max_cores = None ) :
"""Parse the config file and spawn a ProTECT job for every input sample .
: param str config _ file : Path to the input config file
: param int max _ cores : The maximum cores to use for any single high - compute job .""" | sample_set , univ_options , processed_tool_inputs = _parse_config_file ( job , config_file , max_cores )
# Start a job for each sample in the sample set
for patient_id in sample_set . keys ( ) :
job . addFollowOnJobFn ( launch_protect , sample_set [ patient_id ] , univ_options , processed_tool_inputs )
return None |
def insert ( self , instance ) :
"""inserts a unit of work into MongoDB .
: raises DuplicateKeyError : if such record already exist""" | assert isinstance ( instance , UnitOfWork )
collection = self . ds . connection ( COLLECTION_UNIT_OF_WORK )
try :
return collection . insert_one ( instance . document ) . inserted_id
except MongoDuplicateKeyError as e :
exc = DuplicateKeyError ( instance . process_name , instance . start_timeperiod , instance . start_id , instance . end_id , e )
raise exc |
def search ( self , args ) :
"""Executes a search
flickr : ( credsfile ) , search , ( arg1 ) = ( val1 ) , ( arg2 ) = ( val2 ) . . .""" | kwargs = { }
for a in args :
k , v = a . split ( '=' )
kwargs [ k ] = v
return self . _paged_api_call ( self . flickr . photos_search , kwargs ) |
def power_off ( self , context , ports ) :
"""Powers off the remote vm
: param models . QualiDriverModels . ResourceRemoteCommandContext context : the context the command runs on
: param list [ string ] ports : the ports of the connection between the remote resource and the local resource , NOT IN USE ! ! !""" | return self . _power_command ( context , ports , self . vm_power_management_command . power_off ) |
def _cache_get_for_dn ( self , dn : str ) -> Dict [ str , bytes ] :
"""Object state is cached . When an update is required the update will be
simulated on this cache , so that rollback information can be correct .
This function retrieves the cached data .""" | # no cached item , retrieve from ldap
self . _do_with_retry ( lambda obj : obj . search ( dn , '(objectclass=*)' , ldap3 . BASE , attributes = [ '*' , '+' ] ) )
results = self . _obj . response
if len ( results ) < 1 :
raise NoSuchObject ( "No results finding current value" )
if len ( results ) > 1 :
raise RuntimeError ( "Too many results finding current value" )
return results [ 0 ] [ 'raw_attributes' ] |
def _sanity_check_block_pairwise_constraints ( ir_blocks ) :
"""Assert that adjacent blocks obey all invariants .""" | for first_block , second_block in pairwise ( ir_blocks ) : # Always Filter before MarkLocation , never after .
if isinstance ( first_block , MarkLocation ) and isinstance ( second_block , Filter ) :
raise AssertionError ( u'Found Filter after MarkLocation block: {}' . format ( ir_blocks ) )
# There ' s no point in marking the same location twice in a row .
if isinstance ( first_block , MarkLocation ) and isinstance ( second_block , MarkLocation ) :
raise AssertionError ( u'Found consecutive MarkLocation blocks: {}' . format ( ir_blocks ) )
# Traverse blocks with optional = True are immediately followed
# by a MarkLocation , CoerceType or Filter block .
if isinstance ( first_block , Traverse ) and first_block . optional :
if not isinstance ( second_block , ( MarkLocation , CoerceType , Filter ) ) :
raise AssertionError ( u'Expected MarkLocation, CoerceType or Filter after Traverse ' u'with optional=True. Found: {}' . format ( ir_blocks ) )
# Backtrack blocks with optional = True are immediately followed by a MarkLocation block .
if isinstance ( first_block , Backtrack ) and first_block . optional :
if not isinstance ( second_block , MarkLocation ) :
raise AssertionError ( u'Expected MarkLocation after Backtrack with optional=True, ' u'but none was found: {}' . format ( ir_blocks ) )
# Recurse blocks are immediately preceded by a MarkLocation or Backtrack block .
if isinstance ( second_block , Recurse ) :
if not ( isinstance ( first_block , MarkLocation ) or isinstance ( first_block , Backtrack ) ) :
raise AssertionError ( u'Expected MarkLocation or Backtrack before Recurse, but none ' u'was found: {}' . format ( ir_blocks ) ) |
def maximum_position_of_labels ( image , labels , indices ) :
'''Return the i , j coordinates of the maximum value within each object
image - measure the maximum within this image
labels - use the objects within this labels matrix
indices - label # s to measure
The result returned is an 2 x n numpy array where n is the number
of the label minus one , result [ 0 , x ] is the i coordinate of the center
and result [ x , 1 ] is the j coordinate of the center .''' | if len ( indices ) == 0 :
return np . zeros ( ( 2 , 0 ) , int )
result = scind . maximum_position ( image , labels , indices )
result = np . array ( result , int )
if result . ndim == 1 :
result . shape = ( 2 , 1 )
return result
return result . transpose ( ) |
def index ( request ) :
"""Listing page for event ` Occurrence ` s .
: param request : Django request object .
: param is _ preview : Should the listing page be generated as a preview ? This
will allow preview specific actions to be done in the
template such as turning off tracking options or adding
links to the admin .
: return : TemplateResponse""" | warnings . warn ( "icekit_events.views.index is deprecated and will disappear in a " "future version. If you need this code, copy it into your project." , DeprecationWarning )
occurrences = models . Occurrence . objects . visible ( )
context = { 'occurrences' : occurrences , }
return TemplateResponse ( request , 'icekit_events/index.html' , context ) |
def File ( self , name , directory = None , create = 1 ) :
"""Create ` SCons . Node . FS . File `""" | return self . _create_node ( name , self . env . fs . File , directory , create ) |
def get_metadata ( audio_filepaths ) :
"""Return a tuple of album , artist , has _ embedded _ album _ art from a list of audio files .""" | artist , album , has_embedded_album_art = None , None , None
for audio_filepath in audio_filepaths :
try :
mf = mutagen . File ( audio_filepath )
except Exception :
continue
if mf is None :
continue
# artist
for key in ( "albumartist" , "artist" , # ogg
"TPE1" , "TPE2" , # mp3
"aART" , "\xa9ART" ) : # mp4
try :
val = mf . get ( key , None )
except ValueError :
val = None
if val is not None :
artist = val [ - 1 ]
break
# album
for key in ( "_album" , "album" , # ogg
"TALB" , # mp3
"\xa9alb" ) : # mp4
try :
val = mf . get ( key , None )
except ValueError :
val = None
if val is not None :
album = val [ - 1 ]
break
if artist and album : # album art
if isinstance ( mf , mutagen . ogg . OggFileType ) :
has_embedded_album_art = "metadata_block_picture" in mf
elif isinstance ( mf , mutagen . mp3 . MP3 ) :
has_embedded_album_art = any ( map ( operator . methodcaller ( "startswith" , "APIC:" ) , mf . keys ( ) ) )
elif isinstance ( mf , mutagen . mp4 . MP4 ) :
has_embedded_album_art = "covr" in mf
# stop at the first file that succeeds ( for performance )
break
return artist , album , has_embedded_album_art |
def addVariable ( self , variable , domain ) :
"""Add a variable to the problem
Example :
> > > problem = Problem ( )
> > > problem . addVariable ( " a " , [ 1 , 2 ] )
> > > problem . getSolution ( ) in ( { ' a ' : 1 } , { ' a ' : 2 } )
True
@ param variable : Object representing a problem variable
@ type variable : hashable object
@ param domain : Set of items defining the possible values that
the given variable may assume
@ type domain : list , tuple , or instance of C { Domain }""" | if variable in self . _variables :
msg = "Tried to insert duplicated variable %s" % repr ( variable )
raise ValueError ( msg )
if isinstance ( domain , Domain ) :
domain = copy . deepcopy ( domain )
elif hasattr ( domain , "__getitem__" ) :
domain = Domain ( domain )
else :
msg = "Domains must be instances of subclasses of the Domain class"
raise TypeError ( msg )
if not domain :
raise ValueError ( "Domain is empty" )
self . _variables [ variable ] = domain |
def from_content ( cls , content ) :
"""Parses the content of the World Overview section from Tibia . com into an object of this class .
Parameters
content : : class : ` str `
The HTML content of the World Overview page in Tibia . com
Returns
: class : ` WorldOverview `
An instance of this class containing all the information .
Raises
InvalidContent
If the provided content is not the HTML content of the worlds section in Tibia . com""" | parsed_content = parse_tibiacom_content ( content , html_class = "TableContentAndRightShadow" )
world_overview = WorldOverview ( )
try :
record_row , titles_row , * rows = parsed_content . find_all ( "tr" )
m = record_regexp . search ( record_row . text )
if not m :
raise InvalidContent ( "content does not belong to the World Overview section in Tibia.com" )
world_overview . record_count = int ( m . group ( "count" ) )
world_overview . record_date = parse_tibia_datetime ( m . group ( "date" ) )
world_rows = rows
world_overview . _parse_worlds ( world_rows )
return world_overview
except ( AttributeError , KeyError , ValueError ) :
raise InvalidContent ( "content does not belong to the World Overview section in Tibia.com" ) |
def add ( self , attribute ) :
"""Add an attribute to this attribute exchange request .
@ param attribute : The attribute that is being requested
@ type attribute : C { L { AttrInfo } }
@ returns : None
@ raise KeyError : when the requested attribute is already
present in this fetch request .""" | if attribute . type_uri in self . requested_attributes :
raise KeyError ( 'The attribute %r has already been requested' % ( attribute . type_uri , ) )
self . requested_attributes [ attribute . type_uri ] = attribute |
def render ( self , ctx = None ) :
'''Render the current value into a : class : ` bitstring . Bits ` object
: rtype : : class : ` bitstring . Bits `
: return : the rendered field''' | self . _initialize ( )
if ctx is None :
ctx = RenderContext ( )
# if we are called from within render , return a dummy object . . .
if self in ctx :
self . _current_rendered = self . _in_render_value ( )
else :
ctx . push ( self )
if self . dependency_type == Calculated . VALUE_BASED :
self . _rendered_field = self . _field . render ( ctx )
self . _render ( )
ctx . pop ( )
return self . _current_rendered |
def process_form ( self , instance , field , form , empty_marker = None , emptyReturnsMarker = False ) :
"""Return a UID so that ReferenceField understands .""" | fieldName = field . getName ( )
if fieldName + "_uid" in form :
uid = form . get ( fieldName + "_uid" , '' )
if field . multiValued and ( isinstance ( uid , str ) or isinstance ( uid , unicode ) ) :
uid = uid . split ( "," )
elif fieldName in form :
uid = form . get ( fieldName , '' )
if field . multiValued and ( isinstance ( uid , str ) or isinstance ( uid , unicode ) ) :
uid = uid . split ( "," )
else :
uid = None
return uid , { } |
def css ( self , path ) :
"""Link / embed CSS file .""" | if self . settings . embed_content :
content = codecs . open ( path , 'r' , encoding = 'utf8' ) . read ( )
tag = Style ( content , type = "text/css" )
else :
tag = Link ( href = path , rel = "stylesheet" , type_ = "text/css" )
self . head . append ( tag ) |
def get_skos ( self , id = None , uri = None , match = None ) :
"""get the saved skos concept with given ID or via other methods . . .
Note : it tries to guess what is being passed as above""" | if not id and not uri and not match :
return None
if type ( id ) == type ( "string" ) :
uri = id
id = None
if not is_http ( uri ) :
match = uri
uri = None
if match :
if type ( match ) != type ( "string" ) :
return [ ]
res = [ ]
if ":" in match : # qname
for x in self . all_skos_concepts :
if match . lower ( ) in x . qname . lower ( ) :
res += [ x ]
else :
for x in self . all_skos_concepts :
if match . lower ( ) in x . uri . lower ( ) :
res += [ x ]
return res
else :
for x in self . all_skos_concepts :
if id and x . id == id :
return x
if uri and x . uri . lower ( ) == uri . lower ( ) :
return x
return None |
def get_app_env ( ) :
"""if the app and the envi are passed in the command line as ' app = $ app : $ env '
: return : tuple app , env""" | app , env = None , get_env ( )
if "app" in os . environ :
app = os . environ [ "app" ] . lower ( )
if ":" in app :
app , env = os . environ [ "app" ] . split ( ":" , 2 )
set_env ( env )
return app , env |
def merge_datetime ( date , time = '' , date_format = '%d/%m/%Y' , time_format = '%H:%M' ) :
"""Create ` ` datetime ` ` object from date and time strings .""" | day = datetime . strptime ( date , date_format )
if time :
time = datetime . strptime ( time , time_format )
time = datetime . time ( time )
day = datetime . date ( day )
day = datetime . combine ( day , time )
return day |
def convert_feature_layers_to_dict ( feature_layers ) :
"""takes a list of ' feature _ layer ' objects and converts to a dict
keyed by the layer name""" | features_by_layer = { }
for feature_layer in feature_layers :
layer_name = feature_layer [ 'name' ]
features = feature_layer [ 'features' ]
features_by_layer [ layer_name ] = features
return features_by_layer |
def get_mean_and_stddevs ( self , sites , rup , dists , imt , stddev_types ) :
"""See : meth : ` superclass method
< . base . GroundShakingIntensityModel . get _ mean _ and _ stddevs > `
for spec of input and result values .""" | R = ( dists . rrup )
M = rup . mag
# get constants
Ssr = self . get_Ssr_term ( sites . vs30 )
Shr = self . get_Shr_term ( sites . vs30 )
rake = rup . rake
F = self . get_fault_term ( rake )
# compute mean
mean = - 3.512 + ( 0.904 * M ) - ( 1.328 * np . log ( np . sqrt ( R ** 2 + ( 0.149 * np . exp ( 0.647 * M ) ) ** 2 ) ) ) + ( 1.125 - 0.112 * np . log ( R ) - 0.0957 * M ) * F + ( 0.440 - 0.171 * np . log ( R ) ) * Ssr + ( 0.405 - 0.222 * np . log ( R ) ) * Shr
stddevs = self . get_stddevs ( mean , stddev_types )
return mean , stddevs |
def create_base_logger ( config = None , parallel = None ) :
"""Setup base logging configuration , also handling remote logging .
Correctly sets up for local , multiprocessing and distributed runs .
Creates subscribers for non - local runs that will be references from
local logging .
Retrieves IP address using tips from http : / / stackoverflow . com / a / 1267524/252589""" | if parallel is None :
parallel = { }
parallel_type = parallel . get ( "type" , "local" )
cores = parallel . get ( "cores" , 1 )
if parallel_type == "ipython" :
from bcbio . log import logbook_zmqpush
fqdn_ip = socket . gethostbyname ( socket . getfqdn ( ) )
ips = [ fqdn_ip ] if ( fqdn_ip and not fqdn_ip . startswith ( "127." ) ) else [ ]
if not ips :
ips = [ ip for ip in socket . gethostbyname_ex ( socket . gethostname ( ) ) [ 2 ] if not ip . startswith ( "127." ) ]
if not ips :
ips += [ ( s . connect ( ( '8.8.8.8' , 53 ) ) , s . getsockname ( ) [ 0 ] , s . close ( ) ) [ 1 ] for s in [ socket . socket ( socket . AF_INET , socket . SOCK_DGRAM ) ] ]
if not ips :
sys . stderr . write ( "Cannot resolve a local IP address that isn't 127.x.x.x " "Your machines might not have a local IP address " "assigned or are not able to resolve it.\n" )
sys . exit ( 1 )
uri = "tcp://%s" % ips [ 0 ]
subscriber = logbook_zmqpush . ZeroMQPullSubscriber ( )
mport = subscriber . socket . bind_to_random_port ( uri )
wport_uri = "%s:%s" % ( uri , mport )
parallel [ "log_queue" ] = wport_uri
subscriber . dispatch_in_background ( _create_log_handler ( config , True ) )
elif cores > 1 :
subscriber = IOSafeMultiProcessingSubscriber ( mpq )
subscriber . dispatch_in_background ( _create_log_handler ( config ) )
else : # Do not need to setup anything for local logging
pass
return parallel |
def git_check ( ) :
"""Check that all changes , besides versioning files , are committed
: return :""" | # check that changes staged for commit are pushed to origin
output = local ( 'git diff --name-only | egrep -v "^(pynb/version.py)|(version.py)$" | tr "\\n" " "' , capture = True ) . strip ( )
if output :
fatal ( 'Stage for commit and commit all changes first: {}' . format ( output ) )
output = local ( 'git diff --cached --name-only | egrep -v "^(pynb/version.py)|(version.py)$" | tr "\\n" " "' , capture = True ) . strip ( )
if output :
fatal ( 'Commit all changes first: {}' . format ( output ) ) |
def get_options ( argv = None ) :
"""Convert options into commands
return commands , message""" | parser = argparse . ArgumentParser ( usage = "spyder [options] files" )
parser . add_argument ( '--new-instance' , action = 'store_true' , default = False , help = "Run a new instance of Spyder, even if the single " "instance mode has been turned on (default)" )
parser . add_argument ( '--defaults' , dest = "reset_to_defaults" , action = 'store_true' , default = False , help = "Reset configuration settings to defaults" )
parser . add_argument ( '--reset' , dest = "reset_config_files" , action = 'store_true' , default = False , help = "Remove all configuration files!" )
parser . add_argument ( '--optimize' , action = 'store_true' , default = False , help = "Optimize Spyder bytecode (this may require " "administrative privileges)" )
parser . add_argument ( '-w' , '--workdir' , dest = "working_directory" , default = None , help = "Default working directory" )
parser . add_argument ( '--hide-console' , action = 'store_true' , default = False , help = "Hide parent console window (Windows)" )
parser . add_argument ( '--show-console' , action = 'store_true' , default = False , help = "(Deprecated) Does nothing, now the default behavior " "is to show the console" )
parser . add_argument ( '--multithread' , dest = "multithreaded" , action = 'store_true' , default = False , help = "Internal console is executed in another thread " "(separate from main application thread)" )
parser . add_argument ( '--profile' , action = 'store_true' , default = False , help = "Profile mode (internal test, " "not related with Python profiling)" )
parser . add_argument ( '--window-title' , type = str , default = None , help = "String to show in the main window title" )
parser . add_argument ( '-p' , '--project' , default = None , type = str , dest = "project" , help = "Path that contains an Spyder project" )
parser . add_argument ( '--opengl' , default = None , dest = "opengl_implementation" , choices = [ 'software' , 'desktop' , 'gles' ] , help = ( "OpenGL implementation to pass to Qt" ) )
parser . add_argument ( '--debug-info' , default = None , dest = "debug_info" , choices = [ 'minimal' , 'verbose' ] , help = ( "Level of internal debugging info to give. " "'minimal' only logs a small amount of " "confirmation messages and 'verbose' logs a " "lot of detailed information." ) )
parser . add_argument ( '--debug-output' , default = 'terminal' , dest = "debug_output" , choices = [ 'terminal' , 'file' ] , help = ( "Print internal debugging info either to the " "terminal or to a file called spyder-debug.log " "in your current working directory. Default is " "'terminal'." ) )
parser . add_argument ( 'files' , nargs = '*' )
options = parser . parse_args ( argv )
args = options . files
return options , args |
def assert_compile_finished ( app_folder ) :
"""Once builder . sh has invoked the compile script , it should return and we
should set a flag to the script returned . If that flag is missing , then
it is an indication that the container crashed , and we generate an error .
This function will clean up the flag after the check is performed , so only
call this function once . See issue # 141.""" | fpath = os . path . join ( app_folder , '.postbuild.flag' )
if not os . path . isfile ( fpath ) :
msg = ( 'No postbuild flag set, LXC container may have crashed while ' 'building. Check compile logs for build.' )
raise AssertionError ( msg )
try :
os . remove ( fpath )
except OSError : # It doesn ' t matter if it fails .
pass |
def is_valid ( self , name = None , debug = False ) :
"""Check to see if the current xml path is to be processed .""" | valid_tags = self . action_tree
invalid = False
for item in self . current_tree :
try :
if item in valid_tags or self . ALL_TAGS in valid_tags :
valid_tags = valid_tags [ item if item in valid_tags else self . ALL_TAGS ]
else :
valid_tags = None
invalid = True
break
except ( KeyError , TypeError ) as e : # object is either missing the key or is not a dictionary type
invalid = True
break
if debug :
print name , not invalid and valid_tags is not None
return not invalid and valid_tags is not None |
def domestic_mobile_phone_number ( value ) :
"""Confirms that the phone number is a valid UK phone number .
@ param { str } value
@ returns { None }
@ raises AssertionError""" | try :
parsed = phonenumbers . parse ( value , 'GB' )
except NumberParseException :
pass
else :
is_mobile = carrier . _is_mobile ( number_type ( parsed ) )
if is_mobile and phonenumbers . is_valid_number ( parsed ) :
return None
raise ValidationError ( MESSAGE_INVALID_PHONE_NUMBER ) |
def launchRequest ( request ) :
'''Method to launch a given request .
: param request : The request object .
: return : A dictionary containinf the results of the person and a list of dicts containing the references for the record .''' | person = { }
records = [ ]
try :
response = request . send ( )
# Trying to recover a person object . This is a dict :
try :
person = ( response . person ) . to_dict ( )
except :
pass
# Trying to recover a list of record objects . This is a list dicts
try :
aux = response . records
records = [ r . to_dict ( ) for r in aux ]
except :
pass
except SearchAPIError as e :
print e . http_status_code , e
return person , records |
def create_parser ( ) :
"""Create the language parser""" | select = create_select ( )
scan = create_scan ( )
delete = create_delete ( )
update = create_update ( )
insert = create_insert ( )
create = create_create ( )
drop = create_drop ( )
alter = create_alter ( )
dump = create_dump ( )
load = create_load ( )
base = ( select | scan | delete | update | insert | create | drop | alter | dump | load )
explain = upkey ( "explain" ) . setResultsName ( "action" ) + Group ( select | scan | delete | update | insert | create | drop | alter )
analyze = upkey ( "analyze" ) . setResultsName ( "action" ) + Group ( select | scan | delete | update | insert )
dql = explain | analyze | base
dql . ignore ( "--" + restOfLine )
return dql |
def train ( self , traindata : np . ndarray ) -> None :
"""Trains on dataset""" | self . clf . fit ( traindata [ : , 1 : 5 ] , traindata [ : , 5 ] ) |
async def _query_chunked_post ( self , path , method = "POST" , * , params = None , data = None , headers = None , timeout = None ) :
"""A shorthand for uploading data by chunks""" | if headers is None :
headers = { }
if headers and "content-type" not in headers :
headers [ "content-type" ] = "application/octet-stream"
response = await self . _query ( path , method , params = params , data = data , headers = headers , timeout = timeout , chunked = True , )
return response |
def __make_security_role_api_request ( server_context , api , role , email = None , user_id = None , container_path = None ) :
"""Execute a request against the LabKey Security Controller Group Membership apis
: param server _ context : A LabKey server context . See utils . create _ server _ context .
: param api : Action to execute
: param user _ id : user ids to apply action to
: param role : ( from get _ roles ) to remove user from
: param container _ path : Additional container context path
: return : Request json object""" | if email is None and user_id is None :
raise ValueError ( "Must supply either/both [email] or [user_id]" )
url = server_context . build_url ( security_controller , api , container_path )
return server_context . make_request ( url , { 'roleClassName' : role [ 'uniqueName' ] , 'principalId' : user_id , 'email' : email } ) |
def extract_irc_colours ( msg ) :
"""Extract the IRC colours from the start of the string .
Extracts the colours from the start , and returns the colour code in our
format , and then the rest of the message .""" | # first colour
fore , msg = _extract_irc_colour_code ( msg )
if not fore :
return '[]' , msg
if not len ( msg ) or msg [ 0 ] != ',' :
return '[{}]' . format ( _ctos ( fore ) ) , msg
msg = msg [ 1 : ]
# strip comma
# second colour
back , msg = _extract_irc_colour_code ( msg )
if back :
return '[{},{}]' . format ( _ctos ( fore ) , _ctos ( back ) ) , msg
else :
return '[{}]' . format ( _ctos ( fore ) ) , ',' + msg |
def export_collada ( mesh , ** kwargs ) :
"""Export a mesh or a list of meshes as a COLLADA . dae file .
Parameters
mesh : Trimesh object or list of Trimesh objects
The mesh ( es ) to export .
Returns
export : str , string of COLLADA format output""" | meshes = mesh
if not isinstance ( mesh , ( list , tuple , set , np . ndarray ) ) :
meshes = [ mesh ]
c = collada . Collada ( )
nodes = [ ]
for i , m in enumerate ( meshes ) : # Load uv , colors , materials
uv = None
colors = None
mat = _unparse_material ( None )
if m . visual . defined :
if m . visual . kind == 'texture' :
mat = _unparse_material ( m . visual . material )
uv = m . visual . uv
elif m . visual . kind == 'vertex' :
colors = ( m . visual . vertex_colors / 255.0 ) [ : , : 3 ]
c . effects . append ( mat . effect )
c . materials . append ( mat )
# Create geometry object
vertices = collada . source . FloatSource ( 'verts-array' , m . vertices . flatten ( ) , ( 'X' , 'Y' , 'Z' ) )
normals = collada . source . FloatSource ( 'normals-array' , m . vertex_normals . flatten ( ) , ( 'X' , 'Y' , 'Z' ) )
input_list = collada . source . InputList ( )
input_list . addInput ( 0 , 'VERTEX' , '#verts-array' )
input_list . addInput ( 1 , 'NORMAL' , '#normals-array' )
arrays = [ vertices , normals ]
if uv is not None :
texcoords = collada . source . FloatSource ( 'texcoords-array' , uv . flatten ( ) , ( 'U' , 'V' ) )
input_list . addInput ( 2 , 'TEXCOORD' , '#texcoords-array' )
arrays . append ( texcoords )
if colors is not None :
idx = 2
if uv :
idx = 3
colors = collada . source . FloatSource ( 'colors-array' , colors . flatten ( ) , ( 'R' , 'G' , 'B' ) )
input_list . addInput ( idx , 'COLOR' , '#colors-array' )
arrays . append ( colors )
geom = collada . geometry . Geometry ( c , uuid . uuid4 ( ) . hex , uuid . uuid4 ( ) . hex , arrays )
indices = np . repeat ( m . faces . flatten ( ) , len ( arrays ) )
matref = 'material{}' . format ( i )
triset = geom . createTriangleSet ( indices , input_list , matref )
geom . primitives . append ( triset )
c . geometries . append ( geom )
matnode = collada . scene . MaterialNode ( matref , mat , inputs = [ ] )
geomnode = collada . scene . GeometryNode ( geom , [ matnode ] )
node = collada . scene . Node ( 'node{}' . format ( i ) , children = [ geomnode ] )
nodes . append ( node )
scene = collada . scene . Scene ( 'scene' , nodes )
c . scenes . append ( scene )
c . scene = scene
b = io . BytesIO ( )
c . write ( b )
b . seek ( 0 )
return b . read ( ) |
def _check_length_equal ( param_1 , param_2 , name_param_1 , name_param_2 ) :
"""Raises an error when the length of given two arguments is not equal""" | if len ( param_1 ) != len ( param_2 ) :
raise ValueError ( "Length of {} must be same as Length of {}" . format ( name_param_1 , name_param_2 ) ) |
def get_site_orbital_dos ( self , site , orbital ) :
"""Get the Dos for a particular orbital of a particular site .
Args :
site : Site in Structure associated with CompleteDos .
orbital : Orbital in the site .
Returns :
Dos containing densities for orbital of site .""" | return Dos ( self . efermi , self . energies , self . pdos [ site ] [ orbital ] ) |
def update_or_create ( cls , name , external_endpoint = None , vpn_site = None , trust_all_cas = True , with_status = False ) :
"""Update or create an ExternalGateway . The ` ` external _ endpoint ` ` and
` ` vpn _ site ` ` parameters are expected to be a list of dicts with key / value
pairs to satisfy the respective elements create constructor . VPN Sites will
represent the final state of the VPN site list . ExternalEndpoint that are
pre - existing will not be deleted if not provided in the ` ` external _ endpoint ` `
parameter , however existing elements will be updated as specified .
: param str name : name of external gateway
: param list ( dict ) external _ endpoint : list of dict items with key / value
to satisfy ExternalEndpoint . create constructor
: param list ( dict ) vpn _ site : list of dict items with key / value to satisfy
VPNSite . create constructor
: param bool with _ status : If set to True , returns a 3 - tuple of
( ExternalGateway , modified , created ) , where modified and created
is the boolean status for operations performed .
: raises ValueError : missing required argument / s for constructor argument
: rtype : ExternalGateway""" | if external_endpoint :
for endpoint in external_endpoint :
if 'name' not in endpoint :
raise ValueError ( 'External endpoints are configured ' 'but missing the name parameter.' )
if vpn_site :
for site in vpn_site :
if 'name' not in site :
raise ValueError ( 'VPN sites are configured but missing ' 'the name parameter.' )
# Make sure VPN sites are resolvable before continuing
sites = [ element_resolver ( element , do_raise = True ) for element in site . get ( 'site_element' , [ ] ) ]
site . update ( site_element = sites )
updated = False
created = False
try :
extgw = ExternalGateway . get ( name )
except ElementNotFound :
extgw = ExternalGateway . create ( name , trust_all_cas )
created = True
if external_endpoint :
for endpoint in external_endpoint :
_ , modified , was_created = ExternalEndpoint . update_or_create ( extgw , with_status = True , ** endpoint )
if was_created or modified :
updated = True
if vpn_site :
for site in vpn_site :
_ , modified , was_created = VPNSite . update_or_create ( extgw , name = site [ 'name' ] , site_element = site . get ( 'site_element' ) , with_status = True )
if was_created or modified :
updated = True
if with_status :
return extgw , updated , created
return extgw |
def getNodePosition ( cls , start , height = None ) -> int :
"""Calculates node position based on start and height
: param start : The sequence number of the first leaf under this tree .
: param height : Height of this node in the merkle tree
: return : the node ' s position""" | pwr = highest_bit_set ( start ) - 1
height = height or pwr
if count_bits_set ( start ) == 1 :
adj = height - pwr
return start - 1 + adj
else :
c = pow ( 2 , pwr )
return cls . getNodePosition ( c , pwr ) + cls . getNodePosition ( start - c , height ) |
def get_sanitized_bot_name ( dict : Dict [ str , int ] , name : str ) -> str :
"""Cut off at 31 characters and handle duplicates .
: param dict : Holds the list of names for duplicates
: param name : The name that is being sanitized
: return : A sanitized version of the name""" | if name not in dict :
new_name = name [ : 31 ]
# Make sure name does not exceed 31 characters
dict [ name ] = 1
else :
count = dict [ name ]
new_name = name [ : 27 ] + "(" + str ( count + 1 ) + ")"
# Truncate at 27 because we can have up to ' ( 10 ) ' appended
assert new_name not in dict
# TODO : Fix collision between [ " foo " , " foo " , " foo ( 1 ) " ]
dict [ name ] = count + 1
return new_name |
def search ( self , criterion , table , columns = '' , fetch = False , radius = 1 / 60. , use_converters = False , sql_search = False ) :
"""General search method for tables . For ( ra , dec ) input in decimal degrees ,
i . e . ( 12.3456 , - 65.4321 ) , returns all sources within 1 arcminute , or the specified radius .
For string input , i . e . ' vb10 ' , returns all sources with case - insensitive partial text
matches in columns with ' TEXT ' data type . For integer input , i . e . 123 , returns all
exact matches of columns with INTEGER data type .
Parameters
criterion : ( str , int , sequence , tuple )
The text , integer , coordinate tuple , or sequence thereof to search the table with .
table : str
The name of the table to search
columns : sequence
Specific column names to search , otherwise searches all columns
fetch : bool
Return the results of the query as an Astropy table
radius : float
Radius in degrees in which to search for objects if using ( ra , dec ) . Default : 1/60 degree
use _ converters : bool
Apply converters to columns with custom data types
sql _ search : bool
Perform the search by coordinates in a box defined within the SQL commands , rather than with true angular
separations . Faster , but not a true radial search .""" | # Get list of columns to search and format properly
t = self . query ( "PRAGMA table_info({})" . format ( table ) , unpack = True , fmt = 'table' )
all_columns = t [ 'name' ] . tolist ( )
types = t [ 'type' ] . tolist ( )
columns = columns or all_columns
columns = np . asarray ( [ columns ] if isinstance ( columns , str ) else columns )
# Separate good and bad columns and corresponding types
badcols = columns [ ~ np . in1d ( columns , all_columns ) ]
columns = columns [ np . in1d ( columns , all_columns ) ]
columns = np . array ( [ c for c in all_columns if c in columns ] )
types = np . array ( [ t for c , t in zip ( all_columns , types ) if c in columns ] ) [ np . in1d ( columns , all_columns ) ]
for col in badcols :
print ( "'{}' is not a column in the {} table." . format ( col , table . upper ( ) ) )
# Coordinate search
if sys . version_info [ 0 ] == 2 :
str_check = ( str , unicode )
else :
str_check = str
results = ''
if isinstance ( criterion , ( tuple , list , np . ndarray ) ) :
try :
if sql_search :
q = "SELECT * FROM {} WHERE ra BETWEEN " . format ( table ) + str ( criterion [ 0 ] - radius ) + " AND " + str ( criterion [ 0 ] + radius ) + " AND dec BETWEEN " + str ( criterion [ 1 ] - radius ) + " AND " + str ( criterion [ 1 ] + radius )
results = self . query ( q , fmt = 'table' )
else :
t = self . query ( 'SELECT id,ra,dec FROM sources' , fmt = 'table' )
df = t . to_pandas ( )
df [ [ 'ra' , 'dec' ] ] = df [ [ 'ra' , 'dec' ] ] . apply ( pd . to_numeric )
# convert everything to floats
mask = df [ 'ra' ] . isnull ( )
df = df [ ~ mask ]
df [ 'theta' ] = df . apply ( ang_sep , axis = 1 , args = ( criterion [ 0 ] , criterion [ 1 ] ) )
good = df [ 'theta' ] <= radius
if sum ( good ) > 0 :
params = ", " . join ( [ '{}' . format ( s ) for s in df [ good ] [ 'id' ] . tolist ( ) ] )
try :
results = self . query ( 'SELECT * FROM {} WHERE source_id IN ({})' . format ( table , params ) , fmt = 'table' )
except :
results = self . query ( 'SELECT * FROM {} WHERE id IN ({})' . format ( table , params ) , fmt = 'table' )
except :
print ( "Could not search {} table by coordinates {}. Try again." . format ( table . upper ( ) , criterion ) )
# Text string search of columns with ' TEXT ' data type
elif isinstance ( criterion , str_check ) and any ( columns ) and 'TEXT' in types :
try :
q = "SELECT * FROM {} WHERE {}" . format ( table , ' OR ' . join ( [ r"REPLACE(" + c + r",' ','') like '%" + criterion . replace ( ' ' , '' ) + r"%'" for c , t in zip ( columns , types [ np . in1d ( columns , all_columns ) ] ) if t == 'TEXT' ] ) )
results = self . query ( q , fmt = 'table' , use_converters = use_converters )
except :
print ( "Could not search {} table by string {}. Try again." . format ( table . upper ( ) , criterion ) )
# Integer search of columns with ' INTEGER ' data type
elif isinstance ( criterion , int ) :
try :
q = "SELECT * FROM {} WHERE {}" . format ( table , ' OR ' . join ( [ '{}={}' . format ( c , criterion ) for c , t in zip ( columns , types [ np . in1d ( columns , all_columns ) ] ) if t == 'INTEGER' ] ) )
results = self . query ( q , fmt = 'table' , use_converters = use_converters )
except :
print ( "Could not search {} table by id {}. Try again." . format ( table . upper ( ) , criterion ) )
# Problem !
else :
print ( "Could not search {} table by '{}'. Try again." . format ( table . upper ( ) , criterion ) )
# Print or return the results
if fetch :
return results or at . Table ( names = columns , dtype = [ type_dict [ t ] for t in types ] , masked = True )
else :
if results :
pprint ( results , title = table . upper ( ) )
else :
print ( "No results found for {} in the {} table." . format ( criterion , table . upper ( ) ) ) |
def start_recording ( self , output_file ) :
"""Starts recording to a given output video file .
Parameters
output _ file : : obj : ` str `
filename to write video to""" | if not self . _started :
raise Exception ( "Must start the video recorder first by calling .start()!" )
if self . _recording :
raise Exception ( "Cannot record a video while one is already recording!" )
self . _recording = True
self . _cmd_q . put ( ( 'start' , output_file ) ) |
def on_modified ( self , event ) :
"""Handle a file modified event .""" | path = event . src_path
if path not in self . saw :
self . saw . add ( path )
self . recompile ( path ) |
def _create_pax_generic_header ( cls , pax_headers , type , encoding ) :
"""Return a POSIX . 1-2008 extended or global header sequence
that contains a list of keyword , value pairs . The values
must be strings .""" | # Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset = BINARY , see _ proc _ pax ( ) for more information .
binary = False
for keyword , value in pax_headers . items ( ) :
try :
value . encode ( "utf8" , "strict" )
except UnicodeEncodeError :
binary = True
break
records = b""
if binary : # Put the hdrcharset field at the beginning of the header .
records += b"21 hdrcharset=BINARY\n"
for keyword , value in pax_headers . items ( ) :
keyword = keyword . encode ( "utf8" )
if binary : # Try to restore the original byte representation of ` value ' .
# Needless to say , that the encoding must match the string .
value = value . encode ( encoding , "surrogateescape" )
else :
value = value . encode ( "utf8" )
l = len ( keyword ) + len ( value ) + 3
n = p = 0
while True :
n = l + len ( str ( p ) )
if n == p :
break
p = n
records += bytes ( str ( p ) , "ascii" ) + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded " . / . / @ PaxHeader " name like star does
# instead of the one that POSIX recommends .
info = { }
info [ "name" ] = "././@PaxHeader"
info [ "type" ] = type
info [ "size" ] = len ( records )
info [ "magic" ] = POSIX_MAGIC
# Create pax header + record blocks .
return cls . _create_header ( info , USTAR_FORMAT , "ascii" , "replace" ) + cls . _create_payload ( records ) |
def same_types ( self , index1 , index2 ) :
"""Returns True if both symbol table elements are of the same type""" | try :
same = self . table [ index1 ] . type == self . table [ index2 ] . type != SharedData . TYPES . NO_TYPE
except Exception :
self . error ( )
return same |
def get_and_update_package_metadata ( ) :
"""Update the package metadata for this package if we are building the package .
: return : metadata - Dictionary of metadata information""" | global setup_arguments
global METADATA_FILENAME
if not os . path . exists ( '.git' ) and os . path . exists ( METADATA_FILENAME ) :
with open ( METADATA_FILENAME ) as fh :
metadata = json . load ( fh )
else :
git = Git ( version = setup_arguments [ 'version' ] )
metadata = { 'version' : git . version , 'long_description' : 'Tool to pip install missing imports and more' , 'git_hash' : git . hash , 'git_origin' : git . origin , 'git_branch' : git . branch }
for readme_file in [ 'README.rst' , 'README.md' , 'README.txt' ] :
if os . path . exists ( readme_file ) :
with open ( readme_file ) as file_handle :
metadata [ 'long_description' ] = file_handle . read ( )
break
with open ( METADATA_FILENAME , 'w' ) as fh :
json . dump ( metadata , fh )
return metadata |
def get_value ( self , context , obj , field_name ) :
"""gets the translated value of field name . If ` FALLBACK ` evaluates to ` True ` and the field
has no translation for the current language , it tries to find a fallback value , using
the languages defined in ` settings . LANGUAGES ` .""" | try :
language = get_language ( )
value = self . get_translated_value ( obj , field_name , language )
if value :
return value
if self . FALLBACK :
for lang , lang_name in settings . LANGUAGES :
if lang == language : # already tried this one . . .
continue
value = self . get_translated_value ( obj , field_name , lang )
if value :
return value
untranslated = getattr ( obj , field_name )
if self . _is_truthy ( untranslated ) :
return untranslated
else :
return self . EMPTY_VALUE
except Exception :
if settings . TEMPLATE_DEBUG :
raise
return self . EMPTY_VALUE |
def prepare_injection_directions ( self ) :
"""provide genotypic directions for TPA and selective mirroring ,
with no specific length normalization , to be used in the
coming iteration .
Details :
This method is called in the end of ` tell ` . The result is
assigned to ` ` self . pop _ injection _ directions ` ` and used in
` ask _ geno ` .
TODO : should be rather appended ?""" | # self . pop _ injection _ directions is supposed to be empty here
if hasattr ( self , 'pop_injection_directions' ) and self . pop_injection_directions :
ValueError ( "Looks like a bug in calling order/logics" )
ary = [ ]
if ( isinstance ( self . adapt_sigma , CMAAdaptSigmaTPA ) or self . opts [ 'mean_shift_line_samples' ] ) :
ary . append ( self . mean - self . mean_old )
ary . append ( self . mean_old - self . mean )
# another copy !
if ary [ - 1 ] [ 0 ] == 0.0 :
_print_warning ( 'zero mean shift encountered which ' , 'prepare_injection_directions' , 'CMAEvolutionStrategy' , self . countiter )
if self . opts [ 'pc_line_samples' ] : # caveat : before , two samples were used
ary . append ( self . pc . copy ( ) )
if self . sp . lam_mirr and self . opts [ 'CMA_mirrormethod' ] == 2 :
if self . pop_sorted is None :
_print_warning ( 'pop_sorted attribute not found, mirrors obmitted' , 'prepare_injection_directions' , iteration = self . countiter )
else :
ary += self . get_selective_mirrors ( )
self . pop_injection_directions = ary
return ary |
def acquire ( self , resources , prop_name ) :
"""Starting with self , walk until you find prop or None""" | # Instance
custom_prop = getattr ( self . props , prop_name , None )
if custom_prop :
return custom_prop
# Parents . . . can ' t use acquire as have to keep going on acquireds
for parent in self . parents ( resources ) :
acquireds = parent . props . acquireds
if acquireds : # First try in the per - type acquireds
rtype_acquireds = acquireds . get ( self . rtype )
if rtype_acquireds :
prop_acquired = rtype_acquireds . get ( prop_name )
if prop_acquired :
return prop_acquired
# Next in the " all " section of acquireds
all_acquireds = acquireds . get ( 'all' )
if all_acquireds :
prop_acquired = all_acquireds . get ( prop_name )
if prop_acquired :
return prop_acquired
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.