signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def wncomd ( left , right , window ) :
"""Determine the complement of a double precision window with
respect to a specified interval .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / wncomd _ c . html
: param left : left endpoints of complement interval .
: type left : float
: param right : right endpoints of complement interval .
: type right : float
: param window : Input window
: type window : spiceypy . utils . support _ types . SpiceCell
: return : Complement of window with respect to left and right .
: rtype : spiceypy . utils . support _ types . SpiceCell""" | assert isinstance ( window , stypes . SpiceCell )
assert window . dtype == 1
left = ctypes . c_double ( left )
right = ctypes . c_double ( right )
result = stypes . SpiceCell . double ( window . size )
libspice . wncomd_c ( left , right , ctypes . byref ( window ) , result )
return result |
def range_window ( preceding = None , following = None , group_by = None , order_by = None ) :
"""Create a range - based window clause for use with window functions .
This RANGE window clause aggregates rows based upon differences in the
value of the order - by expression .
All window frames / ranges are inclusive .
Parameters
preceding : int , tuple , or None , default None
Specify None for unbounded , 0 to include current row tuple for
off - center window
following : int , tuple , or None , default None
Specify None for unbounded , 0 to include current row tuple for
off - center window
group _ by : expressions , default None
Either specify here or with TableExpr . group _ by
order _ by : expressions , default None
For analytic functions requiring an ordering , specify here , or let Ibis
determine the default ordering ( for functions like rank )
Returns
Window""" | return Window ( preceding = preceding , following = following , group_by = group_by , order_by = order_by , how = 'range' , ) |
def _git_diff ( self ) :
"""Run ` git diff ` and returns a dict in which the keys
are changed file paths and the values are lists of
line numbers .
Guarantees that each line number within a file
is unique ( no repeats ) and in ascending order .
Returns a cached result if called multiple times .
Raises a GitDiffError if ` git diff ` has an error .""" | # If we do not have a cached result , execute ` git diff `
if self . _diff_dict is None :
result_dict = dict ( )
for diff_str in self . _get_included_diff_results ( ) : # Parse the output of the diff string
diff_dict = self . _parse_diff_str ( diff_str )
for src_path in diff_dict . keys ( ) :
if self . _is_path_excluded ( src_path ) :
continue
# If no _ supported _ extensions provided , or extension present : process
root , extension = os . path . splitext ( src_path )
extension = extension [ 1 : ] . lower ( )
# ' not self . _ supported _ extensions ' tests for both None and empty list [ ]
if not self . _supported_extensions or extension in self . _supported_extensions :
added_lines , deleted_lines = diff_dict [ src_path ]
# Remove any lines from the dict that have been deleted
# Include any lines that have been added
result_dict [ src_path ] = [ line for line in result_dict . get ( src_path , [ ] ) if not line in deleted_lines ] + added_lines
# Eliminate repeats and order line numbers
for ( src_path , lines ) in result_dict . items ( ) :
result_dict [ src_path ] = self . _unique_ordered_lines ( lines )
# Store the resulting dict
self . _diff_dict = result_dict
# Return the diff cache
return self . _diff_dict |
def correct ( self , temp , we_t ) :
"""Compute weC from weT""" | if not PIDTempComp . in_range ( temp ) :
return None
n_t = self . cf_t ( temp )
if n_t is None :
return None
we_c = we_t * n_t
return we_c |
def remove_custom_binding ( self , key_name , needs_prefix = False ) :
"""Remove custom key binding for a key .
: param key _ name : Pymux key name , for instance " C - A " .""" | k = ( needs_prefix , key_name )
if k in self . custom_bindings :
self . custom_key_bindings . remove ( self . custom_bindings [ k ] . handler )
del self . custom_bindings [ k ] |
def delete_action ( action_id ) :
"""Delete action .""" | action = get_data_or_404 ( 'action' , action_id )
project = get_data_or_404 ( 'project' , action [ 'project_id' ] )
if project [ 'owner_id' ] != get_current_user_id ( ) :
return jsonify ( message = 'forbidden' ) , 403
delete_instance ( 'sender' , action [ 'id' ] )
return jsonify ( { } ) |
def get_imported_repo ( self , import_path ) :
"""Looks for a go - import meta tag for the provided import _ path .
Returns an ImportedRepo instance with the information in the meta tag ,
or None if no go - import meta tag is found .""" | try :
session = requests . session ( )
# TODO : Support https with ( optional ) fallback to http , as Go does .
# See https : / / github . com / pantsbuild / pants / issues / 3503.
session . mount ( "http://" , requests . adapters . HTTPAdapter ( max_retries = self . get_options ( ) . retries ) )
page_data = session . get ( 'http://{import_path}?go-get=1' . format ( import_path = import_path ) )
except requests . ConnectionError :
return None
if not page_data :
return None
# Return the first match , rather than doing some kind of longest prefix search .
# Hopefully no one returns multiple valid go - import meta tags .
for ( root , vcs , url ) in self . find_meta_tags ( page_data . text ) :
if root and vcs and url : # Check to make sure returned root is an exact match to the provided import path . If it is
# not then run a recursive check on the returned and return the values provided by that call .
if root == import_path :
return ImportedRepo ( root , vcs , url )
elif import_path . startswith ( root ) :
return self . get_imported_repo ( root )
return None |
def stop ( self , timeout_s = None ) :
"""Stops the interval .
If a timeout is provided and stop returns False then the thread is
effectively abandoned in whatever state it was in ( presumably dead - locked ) .
Args :
timeout _ s : The time in seconds to wait on the thread to finish . By
default it ' s forever .
Returns :
False if a timeout was provided and we timed out .""" | self . stopped . set ( )
if self . thread :
self . thread . join ( timeout_s )
return not self . thread . isAlive ( )
else :
return True |
async def async_open ( self ) -> None :
"""Opens connection to the LifeSOS ethernet interface .""" | await self . _loop . create_connection ( lambda : self , self . _host , self . _port ) |
def get_eargs ( ) :
"""Look for options in environment vars""" | settings = { }
zmq = os . environ . get ( "ZMQ_PREFIX" , None )
if zmq is not None :
debug ( "Found environ var ZMQ_PREFIX=%s" % zmq )
settings [ 'zmq_prefix' ] = zmq
return settings |
def _remove_redundancy ( self , log ) :
"""Removes duplicate data from ' data ' inside log dict and brings it
out .
> > > lc = LogCollector ( ' file = / path / to / log _ file . log : formatter = logagg . formatters . basescript ' , 30)
> > > log = { ' id ' : 46846876 , ' type ' : ' log ' ,
. . . ' data ' : { ' a ' : 1 , ' b ' : 2 , ' type ' : ' metric ' } }
> > > lc . _ remove _ redundancy ( log )
{ ' data ' : { ' a ' : 1 , ' b ' : 2 } , ' type ' : ' metric ' , ' id ' : 46846876}""" | for key in log :
if key in log and key in log [ 'data' ] :
log [ key ] = log [ 'data' ] . pop ( key )
return log |
def merge_subreturn ( original_return , sub_return , subkey = None ) :
'''Update an existing state return ( ` original _ return ` ) in place
with another state return ( ` sub _ return ` ) , i . e . for a subresource .
Returns :
dict : The updated state return .
The existing state return does not need to have all the required fields ,
as this is meant to be called from the internals of a state function ,
but any existing data will be kept and respected .
It is important after using this function to check the return value
to see if it is False , in which case the main state should return .
Prefer to check ` _ ret [ ' result ' ] ` instead of ` ret [ ' result ' ] ` ,
as the latter field may not yet be populated .
Code Example :
. . code - block : : python
def state _ func ( name , config , alarm = None ) :
ret = { ' name ' : name , ' comment ' : ' ' , ' changes ' : { } }
if alarm :
_ ret = _ _ states _ _ [ ' subresource . managed ' ] ( alarm )
_ _ utils _ _ [ ' state . merge _ subreturn ' ] ( ret , _ ret )
if _ ret [ ' result ' ] is False :
return ret''' | if not subkey :
subkey = sub_return [ 'name' ]
if sub_return [ 'result' ] is False : # True or None stay the same
original_return [ 'result' ] = sub_return [ 'result' ]
sub_comment = sub_return [ 'comment' ]
if not isinstance ( sub_comment , list ) :
sub_comment = [ sub_comment ]
original_return . setdefault ( 'comment' , [ ] )
if isinstance ( original_return [ 'comment' ] , list ) :
original_return [ 'comment' ] . extend ( sub_comment )
else :
if original_return [ 'comment' ] : # Skip for empty original comments
original_return [ 'comment' ] += '\n'
original_return [ 'comment' ] += '\n' . join ( sub_comment )
if sub_return [ 'changes' ] : # changes always exists
original_return . setdefault ( 'changes' , { } )
original_return [ 'changes' ] [ subkey ] = sub_return [ 'changes' ]
return original_return |
def add_version_tracking ( self , info_id , version , date , command_line = '' ) :
"""Add a line with information about which software that was run and when
to the header .
Arguments :
info _ id ( str ) : The id of the info line
version ( str ) : The version of the software used
date ( str ) : Date when software was run
command _ line ( str ) : The command line that was used for run""" | other_line = '##Software=<ID={0},Version={1},Date="{2}",CommandLineOptions="{3}">' . format ( info_id , version , date , command_line )
self . other_dict [ info_id ] = other_line
return |
def wait ( self , delay ) :
"""Wait at the current location for the specified number of iterations .
: param delay : The time to wait ( in animation frames ) .""" | for _ in range ( 0 , delay ) :
self . _add_step ( ( self . _rec_x , self . _rec_y ) ) |
def load_filter_plugins ( entrypoint_group : str ) -> Iterable [ Filter ] :
"""Load all blacklist plugins that are registered with pkg _ resources
Parameters
entrypoint _ group : str
The entrypoint group name to load plugins from
Returns
List of Blacklist :
A list of objects derived from the Blacklist class""" | global loaded_filter_plugins
enabled_plugins : List [ str ] = [ ]
config = BandersnatchConfig ( ) . config
try :
config_blacklist_plugins = config [ "blacklist" ] [ "plugins" ]
split_plugins = config_blacklist_plugins . split ( "\n" )
if "all" in split_plugins :
enabled_plugins = [ "all" ]
else :
for plugin in split_plugins :
if not plugin :
continue
enabled_plugins . append ( plugin )
except KeyError :
pass
# If the plugins for the entrypoint _ group have been loaded return them
cached_plugins = loaded_filter_plugins . get ( entrypoint_group )
if cached_plugins :
return cached_plugins
plugins = set ( )
for entry_point in pkg_resources . iter_entry_points ( group = entrypoint_group ) :
plugin_class = entry_point . load ( )
plugin_instance = plugin_class ( )
if "all" in enabled_plugins or plugin_instance . name in enabled_plugins :
plugins . add ( plugin_instance )
loaded_filter_plugins [ entrypoint_group ] = list ( plugins )
return plugins |
def request ( self , method , suffix , data ) :
""": param method : str , http method [ " GET " , " POST " , " PUT " ]
: param suffix : the url suffix
: param data :
: return :""" | url = self . site_url + suffix
response = self . session . request ( method , url , data = data )
if response . status_code == 200 :
json_obj = response . json ( )
if isinstance ( json_obj , dict ) and json_obj . get ( "error_code" ) :
raise WeiboOauth2Error ( json_obj . get ( "error_code" ) , json_obj . get ( "error" ) , json_obj . get ( 'error_description' ) )
else :
return json_obj
else :
raise WeiboRequestError ( "Weibo API request error: status code: {code} url:{url} ->" " method:{method}: data={data}" . format ( code = response . status_code , url = response . url , method = method , data = data ) ) |
def to_foreign ( self , obj , name , value ) : # pylint : disable = unused - argument
"""Transform to a MongoDB - safe value .""" | namespace = self . namespace
try :
explicit = self . explicit
except AttributeError :
explicit = not namespace
if not isinstance ( value , ( str , unicode ) ) :
value = canon ( value )
if namespace and ':' in value : # Try to reduce to a known plugin short name .
for point in iter_entry_points ( namespace ) : # TODO : Isolate .
qualname = point . module_name
if point . attrs :
qualname += ':' + '.' . join ( point . attrs )
if qualname == value :
value = point . name
break
if ':' in value :
if not explicit :
raise ValueError ( "Explicit object references not allowed." )
return value
if namespace and value not in ( i . name for i in iter_entry_points ( namespace ) ) :
raise ValueError ( 'Unknown plugin "' + value + '" for namespace "' + namespace + '".' )
return value |
def _compile_files ( self ) :
"""Compiles python plugin files in order to be processed by the loader .
It compiles the plugins if they have been updated or haven ' t yet been
compiled .""" | for f in glob . glob ( os . path . join ( self . dir_path , '*.py' ) ) : # Check for compiled Python files that aren ' t in the _ _ pycache _ _ .
if not os . path . isfile ( os . path . join ( self . dir_path , f + 'c' ) ) :
compileall . compile_dir ( self . dir_path , quiet = True )
logging . debug ( 'Compiled plugins as a new plugin has been added.' )
return
# Recompile if there are newer plugins .
elif os . path . getmtime ( os . path . join ( self . dir_path , f ) ) > os . path . getmtime ( os . path . join ( self . dir_path , f + 'c' ) ) :
compileall . compile_dir ( self . dir_path , quiet = True )
logging . debug ( 'Compiled plugins as a plugin has been changed.' )
return |
def get ( context , request , resource = None , uid = None ) :
"""GET""" | # We have a UID , return the record
if uid and not resource :
return api . get_record ( uid )
# we have a UID as resource , return the record
if api . is_uid ( resource ) :
return api . get_record ( resource )
portal_type = api . resource_to_portal_type ( resource )
if portal_type is None :
raise APIError ( 404 , "Not Found" )
return api . get_batched ( portal_type = portal_type , uid = uid , endpoint = "senaite.jsonapi.v1.get" ) |
def floor_nearest ( x , dx = 1 ) :
"""floor a number to within a given rounding accuracy""" | precision = get_sig_digits ( dx )
return round ( math . floor ( float ( x ) / dx ) * dx , precision ) |
def _unquote_or_none ( s : Optional [ str ] ) -> Optional [ bytes ] : # noqa : F811
"""None - safe wrapper around url _ unescape to handle unmatched optional
groups correctly .
Note that args are passed as bytes so the handler can decide what
encoding to use .""" | if s is None :
return s
return url_unescape ( s , encoding = None , plus = False ) |
def result ( self , result ) :
"""Sets the result of this ResponseStatus .
: param result : The result of this ResponseStatus . # noqa : E501
: type : str""" | if result is None :
raise ValueError ( "Invalid value for `result`, must not be `None`" )
# noqa : E501
allowed_values = [ "OK" , "ERROR" ]
# noqa : E501
if result not in allowed_values :
raise ValueError ( "Invalid value for `result` ({0}), must be one of {1}" # noqa : E501
. format ( result , allowed_values ) )
self . _result = result |
def render ( self ) :
'''Render a matplotlib figure from the analyzer result
Return the figure , use fig . show ( ) to display if neeeded''' | fig , ax = plt . subplots ( )
self . data_object . _render_plot ( ax )
return fig |
def default ( self , obj ) :
"""Serialize obj into JSON .""" | # pylint : disable = method - hidden , protected - access , arguments - differ
if isinstance ( obj , Sensor ) :
return { 'sensor_id' : obj . sensor_id , 'children' : obj . children , 'type' : obj . type , 'sketch_name' : obj . sketch_name , 'sketch_version' : obj . sketch_version , 'battery_level' : obj . battery_level , 'protocol_version' : obj . protocol_version , 'heartbeat' : obj . heartbeat , }
if isinstance ( obj , ChildSensor ) :
return { 'id' : obj . id , 'type' : obj . type , 'description' : obj . description , 'values' : obj . values , }
return json . JSONEncoder . default ( self , obj ) |
def createEditor ( self , parent , column , operator , value ) :
"""Creates a new editor for the given parent and operator .
: param parent | < QWidget >
operator | < str >
value | < variant >""" | if type ( value ) == datetime . timedelta :
editor = XTimeDeltaEdit ( parent )
editor . setAttribute ( Qt . WA_DeleteOnClose )
editor . setDelta ( value )
return editor
else :
editor = super ( DateTimePlugin , self ) . createEditor ( parent , column , operator , value )
if isinstance ( editor , XDateTimeEdit ) or isinstance ( editor , XDateEdit ) :
editor . setCalendarPopup ( True )
return editor |
def profile_execution ( self , status ) :
"""Return run total value .""" | self . selected_profile . data [ 'execution_success' ] = status
if status :
self . report [ 'results' ] [ 'executions' ] [ 'pass' ] += 1
else :
self . report [ 'results' ] [ 'executions' ] [ 'fail' ] += 1
if self . selected_profile . name not in self . report [ 'results' ] [ 'failed_profiles' ] :
self . report [ 'results' ] [ 'failed_profiles' ] . append ( self . selected_profile . name ) |
def add_offsets ( self , offset_ns = None ) :
"""adds the onset and offset to each token in the document graph , i . e .
the character position where each token starts and ends .""" | if offset_ns is None :
offset_ns = self . ns
onset = 0
offset = 0
for token_id , token_str in self . get_tokens ( ) :
offset = onset + len ( token_str )
self . node [ token_id ] [ '{0}:{1}' . format ( offset_ns , 'onset' ) ] = onset
self . node [ token_id ] [ '{0}:{1}' . format ( offset_ns , 'offset' ) ] = offset
onset = offset + 1 |
def split_ext ( path , basename = True ) :
"""Wrap them to make life easier .""" | if basename :
path = os . path . basename ( path )
return os . path . splitext ( path ) |
def vm_netstats ( vm_ = None ) :
'''Return combined network counters used by the vms on this hyper in a
list of dicts :
. . code - block : : python
' your - vm ' : {
' io _ read _ kbs ' : 0,
' io _ total _ read _ kbs ' : 0,
' io _ total _ write _ kbs ' : 0,
' io _ write _ kbs ' : 0
If you pass a VM name in as an argument then it will return info
for just the named VM , otherwise it will return all VMs .
CLI Example :
. . code - block : : bash
salt ' * ' virt . vm _ netstats''' | with _get_xapi_session ( ) as xapi :
def _info ( vm_ ) :
ret = { }
vm_rec = _get_record_by_label ( xapi , 'VM' , vm_ )
if vm_rec is False :
return False
for vif in vm_rec [ 'VIFs' ] :
vif_rec = _get_record ( xapi , 'VIF' , vif )
ret [ vif_rec [ 'device' ] ] = _get_metrics_record ( xapi , 'VIF' , vif_rec )
del ret [ vif_rec [ 'device' ] ] [ 'last_updated' ]
return ret
info = { }
if vm_ :
info [ vm_ ] = _info ( vm_ )
else :
for vm_ in list_domains ( ) :
info [ vm_ ] = _info ( vm_ )
return info |
def _translate_space ( self , space ) :
"""Translates a list of dictionaries into internal list of variables""" | self . space = [ ]
self . dimensionality = 0
self . has_types = d = { t : False for t in self . supported_types }
for i , d in enumerate ( space ) :
descriptor = deepcopy ( d )
descriptor [ 'name' ] = descriptor . get ( 'name' , 'var_' + str ( i ) )
descriptor [ 'type' ] = descriptor . get ( 'type' , 'continuous' )
if 'domain' not in descriptor :
raise InvalidConfigError ( 'Domain attribute is missing for variable ' + descriptor [ 'name' ] )
variable = create_variable ( descriptor )
self . space . append ( variable )
self . dimensionality += variable . dimensionality
self . has_types [ variable . type ] = True
# Check if there are any bandit and non - bandit variables together in the space
if any ( v . is_bandit ( ) for v in self . space ) and any ( not v . is_bandit ( ) for v in self . space ) :
raise InvalidConfigError ( 'Invalid mixed domain configuration. Bandit variables cannot be mixed with other types.' ) |
def new_result ( self , job , update_model = True ) :
"""registers finished runs
Every time a run has finished , this function should be called
to register it with the result logger . If overwritten , make
sure to call this method from the base class to ensure proper
logging .
Parameters
job : instance of hpbandster . distributed . dispatcher . Job
contains all necessary information about the job
update _ model : boolean
determines whether a model inside the config _ generator should be updated""" | if not job . exception is None :
self . logger . warning ( "job {} failed with exception\n{}" . format ( job . id , job . exception ) ) |
def dict_to_numpy_dict ( obj_dict ) :
"""Convert a dictionary of lists into a dictionary of numpy arrays""" | return { key : np . asarray ( value ) if value is not None else None for key , value in obj_dict . items ( ) } |
def update_room_from_obj ( settings , vc_room , room_obj ) :
"""Updates a VCRoom DB object using a SOAP room object returned by the API""" | vc_room . name = room_obj . name
if room_obj . ownerName != vc_room . data [ 'owner_identity' ] :
owner = get_user_from_identifier ( settings , room_obj . ownerName ) or User . get_system_user ( )
vc_room . vidyo_extension . owned_by_user = owner
vc_room . data . update ( { 'description' : room_obj . description , 'vidyo_id' : unicode ( room_obj . roomID ) , 'url' : room_obj . RoomMode . roomURL , 'owner_identity' : room_obj . ownerName , 'room_pin' : room_obj . RoomMode . roomPIN if room_obj . RoomMode . hasPIN else "" , 'moderation_pin' : room_obj . RoomMode . moderatorPIN if room_obj . RoomMode . hasModeratorPIN else "" , } )
vc_room . vidyo_extension . extension = int ( room_obj . extension ) |
def update_dtype ( self , resvar = None ) :
"""Updates the dtype attribute of the function . This is required because
fortran functions can have their types declared either as a modifier on
the function * or * as a member inside the function .
: arg resvar : the name of the variable declared using the result ( var )
construct after the function signature .""" | if self . dtype is None : # search the members of this function for one that has the same name
# as the function . If it gets found , overwrite the dtype , kind and
# modifiers attributes so the rest of the code works .
for m in self . members :
if m == self . name . lower ( ) or m == resvar :
member = self . members [ m ]
self . dtype = member . dtype
self . modifiers = member . modifiers
self . kind = member . kind
self . default = member . default
self . dimension = member . dimension
del self . members [ m ]
break |
def nfa_dot_importer ( input_file : str ) -> dict :
"""Imports a NFA from a DOT file .
Of . dot files are recognized the following attributes
• nodeX shape = doublecircle - > accepting node ;
• nodeX root = true - > initial node ;
• edgeX label = " a " - > action in alphabet ;
• fakeX style = invisible - > dummy invisible nodes pointing
to initial state ( it will be skipped ) ;
• fakeX - > S [ style = bold ] - > dummy transitions to draw arrows
pointing to initial states ( they will be skipped ) .
All invisible nodes are skipped .
Forbidden names :
• ' fake ' used for graphical purpose to drawn the arrow of
the initial state
• ' sink ' used as additional state when completing a NFA
Forbidden characters :
• spaces
: param str input _ file : Path to input DOT file ;
: return : * ( dict ) * representing a NFA .""" | # pyDot Object
g = pydot . graph_from_dot_file ( input_file ) [ 0 ]
states = set ( )
initial_states = set ( )
accepting_states = set ( )
replacements = { '"' : '' , "'" : '' , '(' : '' , ')' : '' , ' ' : '' }
for node in g . get_nodes ( ) :
attributes = node . get_attributes ( )
if node . get_name ( ) == 'fake' or node . get_name ( ) == 'None' or node . get_name ( ) == 'graph' or node . get_name ( ) == 'node' :
continue
if 'style' in attributes and attributes [ 'style' ] == 'invisible' :
continue
node_reference = __replace_all ( replacements , node . get_name ( ) ) . split ( ',' )
if len ( node_reference ) > 1 :
node_reference = tuple ( node_reference )
else :
node_reference = node_reference [ 0 ]
states . add ( node_reference )
for attribute in attributes :
if attribute == 'root' :
initial_states . add ( node_reference )
if attribute == 'shape' and attributes [ 'shape' ] == 'doublecircle' :
accepting_states . add ( node_reference )
alphabet = set ( )
transitions = { }
for edge in g . get_edges ( ) :
source = __replace_all ( replacements , edge . get_source ( ) ) . split ( ',' )
if len ( source ) > 1 :
source = tuple ( source )
else :
source = source [ 0 ]
destination = __replace_all ( replacements , edge . get_destination ( ) ) . split ( ',' )
if len ( destination ) > 1 :
destination = tuple ( destination )
else :
destination = destination [ 0 ]
if source not in states or destination not in states :
continue
label = __replace_all ( replacements , edge . get_label ( ) )
alphabet . add ( label )
transitions . setdefault ( ( source , label ) , set ( ) ) . add ( destination )
nfa = { 'alphabet' : alphabet , 'states' : states , 'initial_states' : initial_states , 'accepting_states' : accepting_states , 'transitions' : transitions }
return nfa |
def generate_git_version_info ( ) :
"""Query the git repository information to generate a version module .""" | info = GitInfo ( )
git_path = call ( ( 'which' , 'git' ) )
# get build info
info . builder = get_build_name ( )
info . build_date = get_build_date ( )
# parse git ID
info . hash , info . date , info . author , info . committer = ( get_last_commit ( git_path ) )
# determine branch
info . branch = get_git_branch ( git_path )
# determine tag
info . tag = get_git_tag ( info . hash , git_path )
# determine version
if info . tag :
info . version = info . tag . strip ( 'v' )
info . release = not re . search ( '[a-z]' , info . version . lower ( ) )
else :
info . version = info . hash [ : 6 ]
info . release = False
# Determine * last * stable release
info . last_release = determine_latest_release_version ( )
# refresh index
call ( ( git_path , 'update-index' , '-q' , '--refresh' ) )
# check working copy for changes
info . status = get_git_status ( git_path )
return info |
def set_pixel ( self , x , y , color ) :
"""Color may be : value , tuple , list etc .
If the image is set to contain more color - channels than len ( color ) , the
remaining channels will be filled automatically .
Example ( channels = 4 , i . e . RGBA output ) :
color = 17 - > color = [ 17,17,17,255]
color = ( 17 , 99 ) - > color = [ 17,99,0,255]
Passing in shorthand color - tuples for larger images on a regular basis
might result in a very noticeable performance penalty .""" | try : # these checks are for convenience , not for safety
if len ( color ) < self . channels : # color is a a tuple ( length > = 1)
if len ( color ) == 1 :
if self . channels == 2 :
color = [ color [ 0 ] , 255 ]
elif self . channels == 3 :
color = [ color [ 0 ] , color [ 0 ] , color [ 0 ] ]
elif self . channels == 4 :
color = [ color [ 0 ] , color [ 0 ] , color [ 0 ] , 255 ]
elif len ( color ) == 2 :
if self . channels == 3 :
color = [ color [ 0 ] , color [ 1 ] , 0 ]
elif self . channels == 4 :
color = [ color [ 0 ] , color [ 1 ] , 0 , 255 ]
elif len ( color ) == 3 :
if self . channels == 4 :
color = [ color [ 0 ] , color [ 1 ] , color [ 2 ] , 255 ]
except TypeError : # color is not an iterable
if self . channels > 1 :
if self . channels == 2 :
color = [ color , 255 ]
elif self . channels == 3 :
color = [ color , color , color ]
else : # only values 1 . . 4 are allowed
color = [ color , color , color , 255 ]
self . array [ y , x ] = color |
def pyvolvePartitions ( model , divselection = None ) :
"""Get list of ` pyvolve ` partitions for ` model ` .
Args :
` model ` ( ` phydmslib . models . Models ` object )
The model used for the simulations . Currently only
certain ` Models ` are supported ( e . g . , ` YNGKP ` ,
` ExpCM ` )
` divselection ` ( ` None ` or 2 - tuple ` ( divomega , divsites ) ` )
Set this option if you want to simulate a subset of sites
as under diversifying selection ( e . g . , an ` omega ` different
than that used by ` model ` . In this case , ` divomega ` is
the omega for this subset of sites , and ` divsites ` is a list
of the sites in 1 , 2 , . . . numbering .
Returns :
` partitions ` ( ` list ` of ` pyvolve . Partition ` objects )
Can be fed into ` pyvolve . Evolver ` to simulate evolution .""" | codons = pyvolve . genetics . Genetics ( ) . codons
codon_dict = pyvolve . genetics . Genetics ( ) . codon_dict
pyrims = pyvolve . genetics . Genetics ( ) . pyrims
purines = pyvolve . genetics . Genetics ( ) . purines
if divselection :
( divomega , divsites ) = divselection
else :
divsites = [ ]
assert all ( [ 1 <= r <= model . nsites for r in divsites ] )
partitions = [ ]
for r in range ( model . nsites ) :
matrix = scipy . zeros ( ( len ( codons ) , len ( codons ) ) , dtype = 'float' )
for ( xi , x ) in enumerate ( codons ) :
for ( yi , y ) in enumerate ( codons ) :
ntdiffs = [ ( x [ j ] , y [ j ] ) for j in range ( 3 ) if x [ j ] != y [ j ] ]
if len ( ntdiffs ) == 1 :
( xnt , ynt ) = ntdiffs [ 0 ]
qxy = 1.0
if ( xnt in purines ) == ( ynt in purines ) :
qxy *= model . kappa
( xaa , yaa ) = ( codon_dict [ x ] , codon_dict [ y ] )
fxy = 1.0
if xaa != yaa :
if type ( model ) == phydmslib . models . ExpCM_empirical_phi_divpressure :
fxy *= model . omega * ( 1 + model . omega2 * model . deltar [ r ] )
elif r + 1 in divsites :
fxy *= divomega
else :
fxy *= model . omega
if type ( model ) in [ phydmslib . models . ExpCM , phydmslib . models . ExpCM_empirical_phi , phydmslib . models . ExpCM_empirical_phi_divpressure ] :
qxy *= model . phi [ NT_TO_INDEX [ ynt ] ]
pix = model . pi [ r ] [ AA_TO_INDEX [ xaa ] ] ** model . beta
piy = model . pi [ r ] [ AA_TO_INDEX [ yaa ] ] ** model . beta
if abs ( pix - piy ) > ALMOST_ZERO :
fxy *= math . log ( piy / pix ) / ( 1.0 - pix / piy )
elif type ( model ) == phydmslib . models . YNGKP_M0 :
for p in range ( 3 ) :
qxy *= model . phi [ p ] [ NT_TO_INDEX [ y [ p ] ] ]
else :
raise ValueError ( "Can't handle model type {0}" . format ( type ( model ) ) )
matrix [ xi ] [ yi ] = model . mu * qxy * fxy
matrix [ xi ] [ xi ] = - matrix [ xi ] . sum ( )
# create model in way that captures annoying print statements in pyvolve
old_stdout = sys . stdout
sys . stdout = open ( os . devnull , 'w' )
try :
m = pyvolve . Model ( "custom" , { "matrix" : matrix } )
finally :
sys . stdout . close ( )
sys . stdout = old_stdout
partitions . append ( pyvolve . Partition ( models = m , size = 1 ) )
return partitions |
def export ( self , nidm_version , export_dir ) :
"""Create prov graph .""" | # Contrast Map entity
atts = ( ( PROV [ 'type' ] , NIDM_CONTRAST_MAP ) , ( NIDM_CONTRAST_NAME , self . name ) )
if not self . isderfrommap :
atts = atts + ( ( NIDM_IN_COORDINATE_SPACE , self . coord_space . id ) , )
if self . label is not None :
atts = atts + ( ( PROV [ 'label' ] , self . label ) , )
if self . name is not None :
atts = atts + ( ( NIDM_CONTRAST_NAME , self . name ) , )
# Parameter estimate entity
self . add_attributes ( atts ) |
def on_click ( self , event ) :
"""Volume up / down and toggle mute .""" | button = event [ "button" ]
# volume up
if button == self . button_up :
self . backend . volume_up ( self . volume_delta )
# volume down
elif button == self . button_down :
self . backend . volume_down ( self . volume_delta )
# toggle mute
elif button == self . button_mute :
self . backend . toggle_mute ( ) |
def get_credentials ( username : str = None , ** kwargs ) -> dict :
"""Calculate credentials for Axes to use internally from given username and kwargs .
Axes will set the username value into the key defined with ` ` settings . AXES _ USERNAME _ FORM _ FIELD ` `
and update the credentials dictionary with the kwargs given on top of that .""" | credentials = { settings . AXES_USERNAME_FORM_FIELD : username }
credentials . update ( kwargs )
return credentials |
def register ( self ) :
"""Register this resource for later retrieval via lookup ( ) , possibly in a child process .""" | os . environ [ self . resourceEnvNamePrefix + self . pathHash ] = self . pickle ( ) |
def as_dict ( self ) :
"""Return a ditionary mapping time slide IDs to offset
dictionaries .""" | d = { }
for row in self :
if row . time_slide_id not in d :
d [ row . time_slide_id ] = offsetvector . offsetvector ( )
if row . instrument in d [ row . time_slide_id ] :
raise KeyError ( "'%s': duplicate instrument '%s'" % ( row . time_slide_id , row . instrument ) )
d [ row . time_slide_id ] [ row . instrument ] = row . offset
return d |
def to_forward_slashes ( data ) :
"""Converts backward slashes to forward slashes .
Usage : :
> > > to _ forward _ slashes ( " To \ Forward \ Slashes " )
u ' To / Forward / Slashes '
: param data : Data to convert .
: type data : unicode
: return : Converted path .
: rtype : unicode""" | data = data . replace ( "\\" , "/" )
LOGGER . debug ( "> Data: '{0}' to forward slashes." . format ( data ) )
return data |
def _simplify ( cls , operands : List [ Expression ] ) -> bool :
"""Flatten / sort the operands of associative / commutative operations .
Returns :
True iff * one _ identity * is True and the operation contains a single
argument that is not a sequence wildcard .""" | if cls . associative :
new_operands = [ ]
# type : List [ Expression ]
for operand in operands :
if isinstance ( operand , cls ) :
new_operands . extend ( operand . operands )
# type : ignore
else :
new_operands . append ( operand )
operands . clear ( )
operands . extend ( new_operands )
if cls . one_identity and len ( operands ) == 1 :
expr = operands [ 0 ]
if not isinstance ( expr , Wildcard ) or ( expr . min_count == 1 and expr . fixed_size ) :
return True
if cls . commutative :
operands . sort ( )
return False |
def mmi_to_raster ( self , force_flag = False , algorithm = USE_ASCII ) :
"""Convert the grid . xml ' s mmi column to a raster using gdal _ grid .
A geotiff file will be created .
Unfortunately no python bindings exist for doing this so we are
going to do it using a shell call .
. . see also : : http : / / www . gdal . org / gdal _ grid . html
Example of the gdal _ grid call we generate : :
gdal _ grid - zfield " mmi " - a invdist : power = 2.0 : smoothing = 1.0 - txe 126.29 130.29 - tye 0.802 4.798 - outsize 400 400 - of GTiff - ot Float16 - l mmi mmi . vrt mmi . tif
. . note : : It is assumed that gdal _ grid is in your path .
: param force _ flag : Whether to force the regeneration of the output
file . Defaults to False .
: type force _ flag : bool
: param algorithm : Which re - sampling algorithm to use .
valid options are ' nearest ' ( for nearest neighbour ) , ' invdist '
( for inverse distance ) , ' average ' ( for moving average ) . Defaults
to ' nearest ' if not specified . Note that passing re - sampling alg
parameters is currently not supported . If None is passed it will
be replaced with ' use _ ascii ' .
' use _ ascii ' algorithm will convert the mmi grid to ascii file
then convert it to raster using gdal _ translate .
: type algorithm : str
: returns : Path to the resulting tif file .
: rtype : str
. . note : : For interest you can also make quite beautiful smoothed
raster using this :
gdal _ grid - zfield " mmi " - a _ srs EPSG : 4326
- a invdist : power = 2.0 : smoothing = 1.0 - txe 122.45 126.45
- tye - 2.21 1.79 - outsize 400 400 - of GTiff
- ot Float16 - l mmi mmi . vrt mmi - trippy . tif""" | LOGGER . debug ( 'mmi_to_raster requested.' )
if algorithm is None :
algorithm = USE_ASCII
if self . algorithm_name :
tif_path = os . path . join ( self . output_dir , '%s-%s.tif' % ( self . output_basename , algorithm ) )
else :
tif_path = os . path . join ( self . output_dir , '%s.tif' % self . output_basename )
# short circuit if the tif is already created .
if os . path . exists ( tif_path ) and force_flag is not True :
return tif_path
if algorithm == USE_ASCII : # Convert to ascii
ascii_path = self . mmi_to_ascii ( True )
# Creating command to convert to tif
command = ( ( '%(gdal_translate)s -a_srs EPSG:4326 ' '"%(ascii)s" "%(tif)s"' ) % { 'gdal_translate' : which ( 'gdal_translate' ) [ 0 ] , 'ascii' : ascii_path , 'tif' : tif_path } )
LOGGER . info ( 'Created this gdal command:\n%s' % command )
# Now run GDAL warp scottie . . .
self . _run_command ( command )
else : # Ensure the vrt mmi file exists ( it will generate csv too if
# needed )
vrt_path = self . mmi_to_vrt ( force_flag )
# now generate the tif using default nearest neighbour
# interpolation options . This gives us the same output as the
# mmi . grd generated by the earthquake server .
if INVDIST in algorithm :
algorithm = 'invdist:power=2.0:smoothing=1.0'
command = ( ( '%(gdal_grid)s -a %(alg)s -zfield "mmi" -txe %(xMin)s ' '%(xMax)s -tye %(yMin)s %(yMax)s -outsize %(dimX)i ' '%(dimY)i -of GTiff -ot Float16 -a_srs EPSG:4326 -l mmi ' '"%(vrt)s" "%(tif)s"' ) % { 'gdal_grid' : which ( 'gdal_grid' ) [ 0 ] , 'alg' : algorithm , 'xMin' : self . x_minimum , 'xMax' : self . x_maximum , 'yMin' : self . y_minimum , 'yMax' : self . y_maximum , 'dimX' : self . columns , 'dimY' : self . rows , 'vrt' : vrt_path , 'tif' : tif_path } )
LOGGER . info ( 'Created this gdal command:\n%s' % command )
# Now run GDAL warp scottie . . .
self . _run_command ( command )
# We will use keywords file name with simple algorithm name since
# it will raise an error in windows related to having double
# colon in path
if INVDIST in algorithm :
algorithm = 'invdist'
# copy the keywords file from fixtures for this layer
self . create_keyword_file ( algorithm )
# Lastly copy over the standard qml ( QGIS Style file ) for the mmi . tif
if self . algorithm_name :
qml_path = os . path . join ( self . output_dir , '%s-%s.qml' % ( self . output_basename , algorithm ) )
else :
qml_path = os . path . join ( self . output_dir , '%s.qml' % self . output_basename )
qml_source_path = resources_path ( 'converter_data' , 'mmi.qml' )
shutil . copyfile ( qml_source_path , qml_path )
return tif_path |
def change_crypto_domain_config ( self , crypto_domain_index , access_mode ) :
"""Change the access mode for a crypto domain that is currently included
in the crypto configuration of this partition .
The access mode will be changed for the specified crypto domain on all
crypto adapters currently included in the crypto configuration of this
partition .
For the general principle for maintaining crypto configurations of
partitions , see : meth : ` ~ zhmcclient . Partition . increase _ crypto _ config ` .
Authorization requirements :
* Object - access permission to this Partition .
* Task permission to the " Partition Details " task .
Parameters :
crypto _ domain _ index ( : term : ` integer ` ) :
Domain index of the crypto domain to be changed . For values , see
: meth : ` ~ zhmcclient . Partition . increase _ crypto _ config ` .
access _ mode ( : term : ` string ` ) :
The new access mode for the crypto domain . For values , see
: meth : ` ~ zhmcclient . Partition . increase _ crypto _ config ` .
Raises :
: exc : ` ~ zhmcclient . HTTPError `
: exc : ` ~ zhmcclient . ParseError `
: exc : ` ~ zhmcclient . AuthError `
: exc : ` ~ zhmcclient . ConnectionError `""" | body = { 'domain-index' : crypto_domain_index , 'access-mode' : access_mode }
self . manager . session . post ( self . uri + '/operations/change-crypto-domain-configuration' , body ) |
def zinb_ll ( data , P , R , Z ) :
"""Returns the zero - inflated negative binomial log - likelihood of the data .""" | lls = nb_ll ( data , P , R )
clusters = P . shape [ 1 ]
for c in range ( clusters ) :
pass
return lls |
def bake ( self ) :
"""Bake an ` ansible - lint ` command so it ' s ready to execute and returns
None .
: return : None""" | options = self . options
default_exclude_list = options . pop ( 'default_exclude' )
options_exclude_list = options . pop ( 'exclude' )
excludes = default_exclude_list + options_exclude_list
x_list = options . pop ( 'x' )
exclude_args = [ '--exclude={}' . format ( exclude ) for exclude in excludes ]
x_args = tuple ( ( '-x' , x ) for x in x_list )
self . _ansible_lint_command = sh . ansible_lint . bake ( options , exclude_args , sum ( x_args , ( ) ) , self . _playbook , _env = self . env , _out = LOG . out , _err = LOG . error ) |
def delete_channel_cb ( self , gshell , chinfo ) :
"""Called when a channel is deleted from the main interface .
Parameter is chinfo ( a bunch ) .""" | chname = chinfo . name
if chname not in self . name_dict :
return
del self . name_dict [ chname ]
self . logger . debug ( '{0} removed from ChangeHistory' . format ( chname ) )
if not self . gui_up :
return False
self . clear_selected_history ( )
self . recreate_toc ( ) |
def visualize ( ctx , meta_model_file , model_file , ignore_case , output_format ) :
"""Generate . dot file ( s ) from meta - model and optionally model .""" | debug = ctx . obj [ 'debug' ]
meta_model , model = check_model ( meta_model_file , model_file , debug , ignore_case )
if output_format == 'plantuml' :
pu_file = "{}.pu" . format ( meta_model_file )
click . echo ( "Generating '{}' file for meta-model." . format ( pu_file ) )
click . echo ( "To convert to png run 'plantuml {}'" . format ( pu_file ) )
click . echo ( "To convert to svg run 'plantuml -tsvg {}'" . format ( pu_file ) )
metamodel_export ( meta_model , pu_file , PlantUmlRenderer ( ) )
else :
dot_file = "{}.dot" . format ( meta_model_file )
click . echo ( "Generating '{}' file for meta-model." . format ( dot_file ) )
click . echo ( "To convert to png run 'dot -Tpng -O {}'" . format ( dot_file ) )
metamodel_export ( meta_model , dot_file )
if model_file :
if output_format == 'plantuml' :
raise Exception ( "plantuml is not supported for model files, yet." )
dot_file = "{}.dot" . format ( model_file )
click . echo ( "Generating '{}' file for model." . format ( model_file ) )
click . echo ( "To convert to png run 'dot -Tpng -O {}'" . format ( model_file ) )
model_export ( model , dot_file ) |
def map_keys ( pvs , keys ) :
"""Add human readable key names to dictionary while leaving any existing key names .""" | rs = [ ]
for pv in pvs :
r = dict ( ( v , None ) for k , v in keys . items ( ) )
for k , v in pv . items ( ) :
if k in keys :
r [ keys [ k ] ] = v
r [ k ] = v
rs . append ( r )
return rs |
def _tffunc ( * argtypes ) :
'''Helper that transforms TF - graph generating function into a regular one .
See ` _ resize ` function below .''' | placeholders = list ( map ( tf . placeholder , argtypes ) )
def wrap ( f ) :
out = f ( * placeholders )
def wrapper ( * args , ** kw ) :
return out . eval ( dict ( zip ( placeholders , args ) ) , session = kw . get ( 'session' ) )
return wrapper
return wrap |
def assign_messagetypes ( self , messages , clusters ) :
"""Assign message types based on the clusters . Following rules :
1 ) Messages from different clusters will get different message types
2 ) Messages from same clusters will get same message type
3 ) The new message type will copy over the existing labels
4 ) No new message type will be set for messages , that already have a custom message type assigned
For messages with clustername " default " no new message type will be created
: param messages : Messages , that messagetype needs to be clustered
: param clusters : clusters for the messages
: type messages : list [ Message ]
: type clusters : dict [ str , set [ int ] ]
: return :""" | for clustername , clustercontent in clusters . items ( ) :
if clustername == "default" : # Do not force the default message type
continue
for msg_i in clustercontent :
msg = messages [ msg_i ]
if msg . message_type == self . messagetypes [ 0 ] : # Message has default message type
# Copy the existing labels and create a new message type
# if it was not already done
try :
msg_type = next ( mtype for mtype in self . messagetypes if mtype . name == clustername )
except StopIteration :
msg_type = MessageType ( name = clustername , iterable = msg . message_type )
msg_type . assigned_by_logic_analyzer = True
self . messagetypes . append ( msg_type )
msg . message_type = msg_type |
def update_fw_local_cache ( self , net , direc , start ) :
"""Update the fw dict with Net ID and service IP .""" | fw_dict = self . get_fw_dict ( )
if direc == 'in' :
fw_dict . update ( { 'in_network_id' : net , 'in_service_ip' : start } )
else :
fw_dict . update ( { 'out_network_id' : net , 'out_service_ip' : start } )
self . update_fw_dict ( fw_dict ) |
def __load_settings ( self ) :
"""Load settings from . json file""" | # file _ path = path . relpath ( settings _ file _ path )
# file _ path = path . abspath ( settings _ file _ path )
file_path = self . file_path
try :
self . data = json . load ( open ( file_path ) )
except FileNotFoundError :
print ( "Could not load" , file_path ) |
def run_main ( ) :
"""run _ main
Search Splunk""" | parser = argparse . ArgumentParser ( description = ( 'Search Splunk' ) )
parser . add_argument ( '-u' , help = 'username' , required = False , dest = 'user' )
parser . add_argument ( '-p' , help = 'user password' , required = False , dest = 'password' )
parser . add_argument ( '-f' , help = 'splunk-ready request in a json file' , required = False , dest = 'datafile' )
parser . add_argument ( '-i' , help = 'index to search' , required = False , dest = 'index_name' )
parser . add_argument ( '-a' , help = 'host address: <fqdn:port>' , required = False , dest = 'address' )
parser . add_argument ( '-e' , help = '(Optional) earliest_time minutes back' , required = False , dest = 'earliest_time_minutes' )
parser . add_argument ( '-l' , help = '(Optional) latest_time minutes back' , required = False , dest = 'latest_time_minutes' )
parser . add_argument ( '-q' , '--queryargs' , nargs = '*' , help = ( 'query string for searching splunk: ' 'search index="antinex" AND levelname="ERROR"' ) , required = False , dest = 'query_args' )
parser . add_argument ( '-j' , help = '(Optional) view as json dictionary logs' , required = False , dest = 'json_view' , action = 'store_true' )
parser . add_argument ( '-t' , help = ( '(Optional) pre-existing Splunk token ' 'which can be set using export ' 'SPLUNK_TOKEN=<token> - if provided ' 'the user (-u) and password (-p) ' 'arguments are not required' ) , required = False , dest = 'token' )
parser . add_argument ( '-m' , help = '(Optional) verbose message when getting logs' , required = False , dest = 'message_details' , action = 'store_true' )
parser . add_argument ( '-v' , help = '(Optional) verify certs - disabled by default' , required = False , dest = 'verify' , action = 'store_true' )
parser . add_argument ( '-b' , help = 'verbose' , required = False , dest = 'verbose' , action = 'store_true' )
args = parser . parse_args ( )
user = SPLUNK_USER
password = SPLUNK_PASSWORD
token = SPLUNK_TOKEN
address = SPLUNK_API_ADDRESS
index_name = SPLUNK_INDEX
verbose = SPLUNK_VERBOSE
show_message_details = bool ( str ( ev ( 'MESSAGE_DETAILS' , '0' ) ) . lower ( ) == '1' )
earliest_time_minutes = None
latest_time_minutes = None
verify = False
code_view = True
json_view = False
datafile = None
if args . user :
user = args . user
if args . password :
password = args . password
if args . address :
address = args . address
if args . datafile :
datafile = args . datafile
if args . index_name :
index_name = args . index_name
if args . verify :
verify = args . verify
if args . earliest_time_minutes :
earliest_time_minutes = int ( args . earliest_time_minutes )
if args . latest_time_minutes :
latest_time_minutes = int ( args . latest_time_minutes )
if args . verbose :
verbose = True
if args . message_details :
show_message_details = args . message_details
if args . token :
token = args . token
if args . json_view :
json_view = True
code_view = False
default_search_query = 'index="{}" | head 10 | reverse' . format ( index_name )
search_query = ev ( 'SPLUNK_QUERY' , default_search_query )
if args . query_args :
search_query = ' ' . join ( args . query_args )
valid = True
if not user or user == 'user-not-set' :
log . critical ( 'missing user' )
valid = False
if not password or password == 'password-not-set' :
log . critical ( 'missing password' )
valid = False
if not index_name :
log . critical ( 'missing splunk index' )
valid = False
if token : # if the token is present ,
# then the user and the password are not required
if not valid and index_name :
valid = True
if not valid :
log . critical ( 'Please run with the following arguments:\n' )
log . error ( '-u <username> -p <password> ' '-i <index> -t <token if user and password not set> ' '-a <host address as: fqdn:port>' )
log . critical ( '\n' 'Or you can export the following ' 'environment variables and retry the command: ' '\n' )
log . error ( 'export SPLUNK_ADDRESS="splunkenterprise:8088"\n' 'export SPLUNK_API_ADDRESS="splunkenterprise:8089"\n' 'export SPLUNK_PASSWORD="123321"\n' 'export SPLUNK_USER="trex"\n' 'export SPLUNK_INDEX="antinex"\n' 'export SPLUNK_TOKEN="<Optional pre-existing Splunk token>"\n' )
sys . exit ( 1 )
if verbose :
log . info ( ( 'creating client user={} address={}' ) . format ( user , address ) )
last_msg = ''
host = ''
port = - 1
try :
last_msg = ( 'Invalid address={}' ) . format ( address )
address_split = address . split ( ':' )
last_msg = ( 'Failed finding host in address={} ' '- please use: -a <fqdn:port>' ) . format ( address )
host = address_split [ 0 ]
last_msg = ( 'Failed finding integer port in address={} ' '- please use: -a <fqdn:port>' ) . format ( address )
port = int ( address_split [ 1 ] )
except Exception as e :
log . error ( ( 'Failed to parse -a {} for the ' 'splunk host address: {} which threw an ' 'ex={}' ) . format ( address , last_msg , e ) )
sys . exit ( 1 )
# end of try ex
if verbose :
log . info ( ( 'connecting {}@{}:{}' ) . format ( user , host , port ) )
req_body = None
if datafile :
if verbose :
log . info ( ( 'loading request in datafile={}' ) . format ( datafile ) )
with open ( datafile , 'r' ) as f :
req_body = json . loads ( f . read ( ) )
earliest_time = None
latest_time = None
now = datetime . datetime . now ( )
if earliest_time_minutes :
min_15_ago = now - datetime . timedelta ( minutes = earliest_time_minutes )
earliest_time = min_15_ago . strftime ( '%Y-%m-%dT%H:%M:%S.000-00:00' )
if latest_time_minutes :
latest_time = ( now - datetime . timedelta ( minutes = latest_time_minutes ) ) . strftime ( '%Y-%m-%dT%H:%M:%S.000-00:00' )
# Step 2 : Create a search job
if not search_query . startswith ( 'search' ) :
search_query = 'search {}' . format ( search_query )
search_data = req_body
if not search_data :
search_data = { 'search' : search_query }
if earliest_time :
search_data [ 'earliest_time' ] = earliest_time
if latest_time :
search_data [ 'latest_time' ] = latest_time
res = sp . search ( user = user , password = password , address = address , token = token , query_dict = search_data , verify = verify )
if res [ 'status' ] == SUCCESS :
result_list = [ ]
try :
result_list = res [ 'record' ] . get ( 'results' , result_list )
if len ( result_list ) == 0 :
log . info ( ( 'No matches for search={} ' 'response={}' ) . format ( ppj ( search_data ) , ppj ( res [ 'record' ] ) ) )
except Exception as e :
result_list = [ ]
log . error ( ( 'Failed to find results for the query={} ' 'with ex={}' ) . format ( ppj ( search_data ) , e ) )
for ridx , log_record in enumerate ( result_list ) :
log_raw = log_record . get ( '_raw' , None )
if log_raw :
show_search_results ( log_rec = log_raw , code_view = code_view , json_view = json_view , show_message_details = show_message_details )
else :
show_non_search_results ( log_rec = log_record , code_view = code_view , json_view = json_view , show_message_details = show_message_details )
# end of handling log record presentation as a view
# end for all log records
else :
log . error ( ( 'Failed searching splunk with status={} and ' 'error: {}' ) . format ( res [ 'status' ] , res [ 'err' ] ) )
# end of if job _ id
if verbose :
log . info ( 'done' ) |
def print_str ( self , value , justify_right = True ) :
"""Print a 4 character long string of values to the display . Characters
in the string should be any ASCII value 32 to 127 ( printable ASCII ) .""" | # Calculcate starting position of digits based on justification .
pos = ( 4 - len ( value ) ) if justify_right else 0
# Go through each character and print it on the display .
for i , ch in enumerate ( value ) :
self . set_digit ( i + pos , ch ) |
def cls_sets ( cls , wanted_cls , registered = True ) :
"""Return a list of all ` wanted _ cls ` attributes in this
class , where ` wanted _ cls ` is the desired attribute type .""" | sets = [ ]
for attr in dir ( cls ) :
if attr . startswith ( '_' ) :
continue
val = getattr ( cls , attr , None )
if not isinstance ( val , wanted_cls ) :
continue
if ( not registered ) and getattr ( val , '_registered' , False ) :
continue
sets . append ( val )
return sets |
def lm_freqs_taus ( ** kwargs ) :
"""Take input _ params and return dictionaries with frequencies and damping
times of each overtone of a specific lm mode , checking that all of them
are given .""" | lmns = kwargs [ 'lmns' ]
freqs , taus = { } , { }
for lmn in lmns :
l , m , nmodes = int ( lmn [ 0 ] ) , int ( lmn [ 1 ] ) , int ( lmn [ 2 ] )
for n in range ( nmodes ) :
try :
freqs [ '%d%d%d' % ( l , m , n ) ] = kwargs [ 'f_%d%d%d' % ( l , m , n ) ]
except KeyError :
raise ValueError ( 'f_%d%d%d is required' % ( l , m , n ) )
try :
taus [ '%d%d%d' % ( l , m , n ) ] = kwargs [ 'tau_%d%d%d' % ( l , m , n ) ]
except KeyError :
raise ValueError ( 'tau_%d%d%d is required' % ( l , m , n ) )
return freqs , taus |
def _add_observation ( self , x_to_add , y_to_add ) :
"""Add observation to window , updating means / variance efficiently .""" | self . _add_observation_to_means ( x_to_add , y_to_add )
self . _add_observation_to_variances ( x_to_add , y_to_add )
self . window_size += 1 |
def store_password_in_keyring ( credential_id , username , password = None ) :
'''Interactively prompts user for a password and stores it in system keyring''' | try : # pylint : disable = import - error
import keyring
import keyring . errors
# pylint : enable = import - error
if password is None :
prompt = 'Please enter password for {0}: ' . format ( credential_id )
try :
password = getpass . getpass ( prompt )
except EOFError :
password = None
if not password : # WE should raise something else here to be able to use this
# as / from an API
raise RuntimeError ( 'Invalid password provided.' )
try :
_save_password_in_keyring ( credential_id , username , password )
except keyring . errors . PasswordSetError as exc :
log . debug ( 'Problem saving password in the keyring: %s' , exc )
except ImportError :
log . error ( 'Tried to store password in keyring, but no keyring module is installed' )
return False |
def parse_rawprofile_blocks ( text ) :
"""Split the file into blocks along delimters and and put delimeters back in
the list""" | # The total time reported in the raw output is from pystone not kernprof
# The pystone total time is actually the average time spent in the function
delim = 'Total time: '
delim2 = 'Pystone time: '
# delim = ' File : '
profile_block_list = ut . regex_split ( '^' + delim , text )
for ix in range ( 1 , len ( profile_block_list ) ) :
profile_block_list [ ix ] = delim2 + profile_block_list [ ix ]
return profile_block_list |
def stop ( ) :
'''Stop KodeDrive daemon .''' | output , err = cli_syncthing_adapter . sys ( exit = True )
click . echo ( "%s" % output , err = err ) |
def hscan ( self , name , cursor = '0' , match = None , count = 10 ) :
"""Emulate hscan .""" | def value_function ( ) :
values = self . hgetall ( name )
values = list ( values . items ( ) )
# list of tuples for sorting and matching
values . sort ( key = lambda x : x [ 0 ] )
# sort for consistent order
return values
scanned = self . _common_scan ( value_function , cursor = cursor , match = match , count = count , key = lambda v : v [ 0 ] )
# noqa
scanned [ 1 ] = dict ( scanned [ 1 ] )
# from list of tuples back to dict
return scanned |
def fit ( self , struct1 , struct2 ) :
"""Fit two structures .
Args :
struct1 ( Structure ) : 1st structure
struct2 ( Structure ) : 2nd structure
Returns :
True or False .""" | struct1 , struct2 = self . _process_species ( [ struct1 , struct2 ] )
if not self . _subset and self . _comparator . get_hash ( struct1 . composition ) != self . _comparator . get_hash ( struct2 . composition ) :
return None
struct1 , struct2 , fu , s1_supercell = self . _preprocess ( struct1 , struct2 )
match = self . _match ( struct1 , struct2 , fu , s1_supercell , break_on_match = True )
if match is None :
return False
else :
return match [ 0 ] <= self . stol |
def add_compliance_header ( self ) :
"""Add IIIF Compliance level header to response .""" | if ( self . manipulator . compliance_uri is not None ) :
self . headers [ 'Link' ] = '<' + self . manipulator . compliance_uri + '>;rel="profile"' |
def user_loc_value_to_instance_string ( axis_tag , user_loc ) :
"""Return the Glyphs UI string ( from the instance dropdown ) that is
closest to the provided user location .
> > > user _ loc _ value _ to _ instance _ string ( ' wght ' , 430)
' Normal '
> > > user _ loc _ value _ to _ instance _ string ( ' wdth ' , 150)
' Extra Expanded '""" | codes = { }
if axis_tag == "wght" :
codes = WEIGHT_CODES
elif axis_tag == "wdth" :
codes = WIDTH_CODES
else :
raise NotImplementedError
class_ = user_loc_value_to_class ( axis_tag , user_loc )
return min ( sorted ( ( code , class_ ) for code , class_ in codes . items ( ) if code is not None ) , key = lambda item : abs ( item [ 1 ] - class_ ) , ) [ 0 ] |
def _attr_data_ ( self , slots ) :
"""Does not work as expected , makes an empty object with a new _ _ slots _ _
definition .""" | self . __attr_data = type ( '' . join ( [ type ( self ) . __name__ , 'Data' ] ) , ( ) , { '__module__' : type ( self ) . __module__ , '__slots__' : tuple ( set ( slots ) ) } ) ( ) |
def demultiplex_samples ( data ) :
"""demultiplex a fastqtransformed FASTQ file into separate sample barcode files""" | work_dir = os . path . join ( dd . get_work_dir ( data ) , "umis" )
sample_dir = os . path . join ( work_dir , dd . get_sample_name ( data ) )
demulti_dir = os . path . join ( sample_dir , "demultiplexed" )
files = data [ "files" ]
if len ( files ) == 2 :
logger . error ( "Sample demultiplexing doesn't handle paired-end reads, but " "we can add it. Open an issue here https://github.com/bcbio/bcbio-nextgen/issues if you need this and we'll add it." )
sys . exit ( 1 )
else :
fq1 = files [ 0 ]
# check if samples need to be demultiplexed
with open_fastq ( fq1 ) as in_handle :
read = next ( in_handle )
if "SAMPLE_" not in read :
return [ [ data ] ]
bcfile = get_sample_barcodes ( dd . get_sample_barcodes ( data ) , sample_dir )
demultiplexed = glob . glob ( os . path . join ( demulti_dir , "*.fq*" ) )
if demultiplexed :
return [ split_demultiplexed_sampledata ( data , demultiplexed ) ]
umis = _umis_cmd ( data )
cmd = ( "{umis} demultiplex_samples --nedit 1 --barcodes {bcfile} " "--out_dir {tx_dir} {fq1}" )
msg = "Demultiplexing {fq1}."
with file_transaction ( data , demulti_dir ) as tx_dir :
do . run ( cmd . format ( ** locals ( ) ) , msg . format ( ** locals ( ) ) )
demultiplexed = glob . glob ( os . path . join ( demulti_dir , "*.fq*" ) )
return [ split_demultiplexed_sampledata ( data , demultiplexed ) ] |
def __get_action_graph_pairs_from_query ( self , query ) :
"""Splits the query into command / argument pairs , for example [ ( " MATCH " , " { } ( _ a ) ) " , ( " RETURN " , " _ a " ) ]
: param query : The string with the list of commands
: return : the command / argument pairs""" | import re
query = convert_special_characters_to_spaces ( query )
graph_list = re . split ( '|' . join ( self . action_list ) , query )
query_list_positions = [ query . find ( graph ) for graph in graph_list ]
query_list_positions = query_list_positions
query_list_positions = query_list_positions
action_list = [ query [ query_list_positions [ i ] + len ( graph_list [ i ] ) : query_list_positions [ i + 1 ] ] . strip ( ) for i in range ( len ( graph_list ) - 1 ) ]
graph_list = graph_list [ 1 : ]
return zip ( action_list , graph_list ) |
def save ( self , * args , ** kwargs ) :
"""* * uid * * : : code : ` { race . uid } _ election : { election _ day } - { party } `""" | if self . party :
self . uid = "{}_election:{}-{}" . format ( self . race . uid , self . election_day . date , slugify ( self . party . ap_code ) , )
else :
self . uid = "{}_election:{}" . format ( self . race . uid , self . election_day . date )
super ( Election , self ) . save ( * args , ** kwargs ) |
def nsx_controller_connection_addr_address ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
nsx_controller = ET . SubElement ( config , "nsx-controller" , xmlns = "urn:brocade.com:mgmt:brocade-tunnels" )
name_key = ET . SubElement ( nsx_controller , "name" )
name_key . text = kwargs . pop ( 'name' )
connection_addr = ET . SubElement ( nsx_controller , "connection-addr" )
address = ET . SubElement ( connection_addr , "address" )
address . text = kwargs . pop ( 'address' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def remove_template_from_network ( network_id , template_id , remove_attrs , ** kwargs ) :
"""Remove all resource types in a network relating to the specified
template .
remove _ attrs
Flag to indicate whether the attributes associated with the template
types should be removed from the resources in the network . These will
only be removed if they are not shared with another template on the network""" | try :
network = db . DBSession . query ( Network ) . filter ( Network . id == network_id ) . one ( )
except NoResultFound :
raise HydraError ( "Network %s not found" % network_id )
try :
template = db . DBSession . query ( Template ) . filter ( Template . id == template_id ) . one ( )
except NoResultFound :
raise HydraError ( "Template %s not found" % template_id )
type_ids = [ tmpltype . id for tmpltype in template . templatetypes ]
node_ids = [ n . id for n in network . nodes ]
link_ids = [ l . id for l in network . links ]
group_ids = [ g . id for g in network . resourcegroups ]
if remove_attrs == 'Y' : # find the attributes to remove
resource_attrs_to_remove = _get_resources_to_remove ( network , template )
for n in network . nodes :
resource_attrs_to_remove . extend ( _get_resources_to_remove ( n , template ) )
for l in network . links :
resource_attrs_to_remove . extend ( _get_resources_to_remove ( l , template ) )
for g in network . resourcegroups :
resource_attrs_to_remove . extend ( _get_resources_to_remove ( g , template ) )
for ra in resource_attrs_to_remove :
db . DBSession . delete ( ra )
resource_types = db . DBSession . query ( ResourceType ) . filter ( and_ ( or_ ( ResourceType . network_id == network_id , ResourceType . node_id . in_ ( node_ids ) , ResourceType . link_id . in_ ( link_ids ) , ResourceType . group_id . in_ ( group_ids ) , ) , ResourceType . type_id . in_ ( type_ids ) ) ) . all ( )
for resource_type in resource_types :
db . DBSession . delete ( resource_type )
db . DBSession . flush ( ) |
def _connect ( self ) :
"""Connect to Memcached .""" | if self . _socketFile is not None :
if not os . path . exists ( self . _socketFile ) :
raise Exception ( "Socket file (%s) for Memcached Instance not found." % self . _socketFile )
try :
if self . _timeout is not None :
self . _conn = util . Telnet ( self . _host , self . _port , self . _socketFile , timeout )
else :
self . _conn = util . Telnet ( self . _host , self . _port , self . _socketFile )
except :
raise Exception ( "Connection to %s failed." % self . _instanceName ) |
def __create ( self ) :
"""Construct the email""" | self . __data = json . dumps ( { 'config_path' : self . encode ( self . config_path ) , 'subject' : self . encode ( self . __subject ) , 'text' : self . encode ( self . __text ) , 'html' : self . encode ( self . __html ) , 'files' : self . __files , 'send_as_one' : self . send_as_one , 'addresses' : self . __addresses , 'ccs' : self . __ccs , } ) |
def _parse ( self , command ) :
"""Parse a single command .""" | cmd , id_ , args = command [ 0 ] , command [ 1 ] , command [ 2 : ]
if cmd == 'CURRENT' : # This context is made current
self . env . clear ( )
self . _gl_initialize ( )
self . env [ 'fbo' ] = args [ 0 ]
gl . glBindFramebuffer ( gl . GL_FRAMEBUFFER , args [ 0 ] )
elif cmd == 'FUNC' : # GL function call
args = [ as_enum ( a ) for a in args ]
try :
getattr ( gl , id_ ) ( * args )
except AttributeError :
logger . warning ( 'Invalid gl command: %r' % id_ )
elif cmd == 'CREATE' : # Creating an object
if args [ 0 ] is not None :
klass = self . _classmap [ args [ 0 ] ]
self . _objects [ id_ ] = klass ( self , id_ )
else :
self . _invalid_objects . add ( id_ )
elif cmd == 'DELETE' : # Deleting an object
ob = self . _objects . get ( id_ , None )
if ob is not None :
self . _objects [ id_ ] = JUST_DELETED
ob . delete ( )
else : # Doing somthing to an object
ob = self . _objects . get ( id_ , None )
if ob == JUST_DELETED :
return
if ob is None :
if id_ not in self . _invalid_objects :
raise RuntimeError ( 'Cannot %s object %i because it ' 'does not exist' % ( cmd , id_ ) )
return
# Triage over command . Order of commands is set so most
# common ones occur first .
if cmd == 'DRAW' : # Program
ob . draw ( * args )
elif cmd == 'TEXTURE' : # Program
ob . set_texture ( * args )
elif cmd == 'UNIFORM' : # Program
ob . set_uniform ( * args )
elif cmd == 'ATTRIBUTE' : # Program
ob . set_attribute ( * args )
elif cmd == 'DATA' : # VertexBuffer , IndexBuffer , Texture
ob . set_data ( * args )
elif cmd == 'SIZE' : # VertexBuffer , IndexBuffer ,
ob . set_size ( * args )
# Texture [ 1D , 2D , 3D ] , RenderBuffer
elif cmd == 'ATTACH' : # FrameBuffer
ob . attach ( * args )
elif cmd == 'FRAMEBUFFER' : # FrameBuffer
ob . set_framebuffer ( * args )
elif cmd == 'SHADERS' : # Program
ob . set_shaders ( * args )
elif cmd == 'WRAPPING' : # Texture1D , Texture2D , Texture3D
ob . set_wrapping ( * args )
elif cmd == 'INTERPOLATION' : # Texture1D , Texture2D , Texture3D
ob . set_interpolation ( * args )
else :
logger . warning ( 'Invalid GLIR command %r' % cmd ) |
def gen_str ( src , dst ) :
"""Return a STR instruction .""" | return ReilBuilder . build ( ReilMnemonic . STR , src , ReilEmptyOperand ( ) , dst ) |
def overlapping ( start1 , end1 , start2 , end2 ) :
"""> > > overlapping ( 0 , 5 , 6 , 7)
False
> > > overlapping ( 1 , 2 , 0 , 4)
True
> > > overlapping ( 5,6,0,5)
False""" | return not ( ( start1 <= start2 and start1 <= end2 and end1 <= end2 and end1 <= start2 ) or ( start1 >= start2 and start1 >= end2 and end1 >= end2 and end1 >= start2 ) ) |
def datetime_parser ( s ) :
"""Parse timestamp s in local time . First the arrow parser is used , if it fails , the parsedatetime parser is used .
: param s :
: return :""" | try :
ts = arrow . get ( s )
# Convert UTC to local , result of get is UTC unless it specifies timezone , bonfire assumes
# all time to be machine local
if ts . tzinfo == arrow . get ( ) . tzinfo :
ts = ts . replace ( tzinfo = 'local' )
except :
c = pdt . Calendar ( )
result , what = c . parse ( s )
ts = None
if what in ( 1 , 2 , 3 ) :
ts = datetime . datetime ( * result [ : 6 ] )
ts = arrow . get ( ts )
ts = ts . replace ( tzinfo = 'local' )
return ts
if ts is None :
raise ValueError ( "Cannot parse timestamp '" + s + "'" )
return ts |
def execute ( self ) :
"""Execute the actions necessary to perform a ` molecule init role ` and
returns None .
: return : None""" | role_name = self . _command_args [ 'role_name' ]
role_directory = os . getcwd ( )
msg = 'Initializing new role {}...' . format ( role_name )
LOG . info ( msg )
if os . path . isdir ( role_name ) :
msg = ( 'The directory {} exists. ' 'Cannot create new role.' ) . format ( role_name )
util . sysexit_with_message ( msg )
template_directory = ''
if 'template' in self . _command_args . keys ( ) :
template_directory = self . _command_args [ 'template' ]
else :
template_directory = 'role'
self . _process_templates ( template_directory , self . _command_args , role_directory )
scenario_base_directory = os . path . join ( role_directory , role_name )
templates = [ 'scenario/driver/{driver_name}' . format ( ** self . _command_args ) , 'scenario/verifier/{verifier_name}' . format ( ** self . _command_args ) , ]
for template in templates :
self . _process_templates ( template , self . _command_args , scenario_base_directory )
self . _process_templates ( 'molecule' , self . _command_args , role_directory )
role_directory = os . path . join ( role_directory , role_name )
msg = 'Initialized role in {} successfully.' . format ( role_directory )
LOG . success ( msg ) |
def start_worker_thread ( self , sleep_interval = 1.0 ) :
"""start _ worker _ thread
Start the helper worker thread to publish queued messages
to Splunk
: param sleep _ interval : sleep in seconds before reading from
the queue again""" | # Start a worker thread responsible for sending logs
if self . sleep_interval > 0 :
self . debug_log ( 'starting worker thread' )
self . timer = Timer ( sleep_interval , self . perform_work )
self . timer . daemon = True
# Auto - kill thread if main process exits
self . timer . start ( ) |
def maybe_convert_platform_interval ( values ) :
"""Try to do platform conversion , with special casing for IntervalArray .
Wrapper around maybe _ convert _ platform that alters the default return
dtype in certain cases to be compatible with IntervalArray . For example ,
empty lists return with integer dtype instead of object dtype , which is
prohibited for IntervalArray .
Parameters
values : array - like
Returns
array""" | if isinstance ( values , ( list , tuple ) ) and len ( values ) == 0 : # GH 19016
# empty lists / tuples get object dtype by default , but this is not
# prohibited for IntervalArray , so coerce to integer instead
return np . array ( [ ] , dtype = np . int64 )
elif is_categorical_dtype ( values ) :
values = np . asarray ( values )
return maybe_convert_platform ( values ) |
def code_binary ( item ) :
"""Return a binary ' code ' suitable for hashing .""" | code_str = code ( item )
if isinstance ( code_str , six . string_types ) :
return code_str . encode ( 'utf-8' )
return code_str |
def clean_text_by_sentences ( text , language = "english" , additional_stopwords = None ) :
"""Tokenizes a given text into sentences , applying filters and lemmatizing them .
Returns a SyntacticUnit list .""" | init_textcleanner ( language , additional_stopwords )
original_sentences = split_sentences ( text )
filtered_sentences = filter_words ( original_sentences )
return merge_syntactic_units ( original_sentences , filtered_sentences ) |
def get ( self , thread = None , plugin = None ) :
"""Get one or more threads .
: param thread : Name of the thread
: type thread : str
: param plugin : Plugin object , under which the thread was registered
: type plugin : GwBasePattern""" | if plugin is not None :
if thread is None :
threads_list = { }
for key in self . threads . keys ( ) :
if self . threads [ key ] . plugin == plugin :
threads_list [ key ] = self . threads [ key ]
return threads_list
else :
if thread in self . threads . keys ( ) :
if self . threads [ thread ] . plugin == plugin :
return self . threads [ thread ]
else :
return None
else :
return None
else :
if thread is None :
return self . threads
else :
if thread in self . threads . keys ( ) :
return self . threads [ thread ]
else :
return None |
def replace ( self , old , new ) :
"""Replace an instruction""" | if old . type != new . type :
raise TypeError ( "new instruction has a different type" )
pos = self . instructions . index ( old )
self . instructions . remove ( old )
self . instructions . insert ( pos , new )
for bb in self . parent . basic_blocks :
for instr in bb . instructions :
instr . replace_usage ( old , new ) |
def _remove_unit_rule ( g , rule ) :
"""Removes ' rule ' from ' g ' without changing the langugage produced by ' g ' .""" | new_rules = [ x for x in g . rules if x != rule ]
refs = [ x for x in g . rules if x . lhs == rule . rhs [ 0 ] ]
new_rules += [ build_unit_skiprule ( rule , ref ) for ref in refs ]
return Grammar ( new_rules ) |
def validate_field ( self , field_name : str ) -> bool :
"""Complain if a field in not in the schema
Args :
field _ name :
Returns : True if the field is present .""" | if field_name in { "@id" , "@type" } :
return True
result = self . schema . has_field ( field_name )
if not result : # todo : how to comply with our error handling policies ?
raise UndefinedFieldError ( "'{}' should be present in the knowledge graph schema." . format ( field_name ) )
return result |
def getValuesForProperty ( self , aPropURIRef ) :
"""generic way to extract some prop value eg
In [ 11 ] : c . getValuesForProperty ( rdflib . RDF . type )
Out [ 11 ] :
[ rdflib . term . URIRef ( u ' http : / / www . w3 . org / 2002/07 / owl # Class ' ) ,
rdflib . term . URIRef ( u ' http : / / www . w3 . org / 2000/01 / rdf - schema # Class ' ) ]""" | if not type ( aPropURIRef ) == rdflib . URIRef :
aPropURIRef = rdflib . URIRef ( aPropURIRef )
return list ( self . rdflib_graph . objects ( None , aPropURIRef ) ) |
def lstm_unroll ( num_lstm_layer , seq_len , num_hidden , num_label , loss_type = None ) :
"""Creates an unrolled LSTM symbol for inference if loss _ type is not specified , and for training
if loss _ type is specified . loss _ type must be one of ' ctc ' or ' warpctc '
Parameters
num _ lstm _ layer : int
seq _ len : int
num _ hidden : int
num _ label : int
loss _ type : str
' ctc ' or ' warpctc '
Returns
mxnet . symbol . symbol . Symbol""" | # Create the base ( shared between training and inference ) and add loss to the end
pred = _lstm_unroll_base ( num_lstm_layer , seq_len , num_hidden )
if loss_type : # Training mode , add loss
return _add_ctc_loss ( pred , seq_len , num_label , loss_type )
else : # Inference mode , add softmax
return mx . sym . softmax ( data = pred , name = 'softmax' ) |
def K ( self , X , X2 = None ) : # model : - a d ^ 2y / dx ^ 2 + b dy / dt + c * y = U
# kernel Kyy rbf spatiol temporal
# vyt Y temporal variance vyx Y spatiol variance lyt Y temporal lengthscale lyx Y spatiol lengthscale
# kernel Kuu doper ( doper ( Kyy ) )
# a b c lyt lyx vyx * vyt
"""Compute the covariance matrix between X and X2.""" | X , slices = X [ : , : - 1 ] , index_to_slices ( X [ : , - 1 ] )
if X2 is None :
X2 , slices2 = X , slices
K = np . zeros ( ( X . shape [ 0 ] , X . shape [ 0 ] ) )
else :
X2 , slices2 = X2 [ : , : - 1 ] , index_to_slices ( X2 [ : , - 1 ] )
K = np . zeros ( ( X . shape [ 0 ] , X2 . shape [ 0 ] ) )
tdist = ( X [ : , 0 ] [ : , None ] - X2 [ : , 0 ] [ None , : ] ) ** 2
xdist = ( X [ : , 1 ] [ : , None ] - X2 [ : , 1 ] [ None , : ] ) ** 2
ttdist = ( X [ : , 0 ] [ : , None ] - X2 [ : , 0 ] [ None , : ] )
# rdist = [ tdist , xdist ]
# dist = np . abs ( X - X2 . T )
vyt = self . variance_Yt
vyx = self . variance_Yx
lyt = 1 / ( 2 * self . lengthscale_Yt )
lyx = 1 / ( 2 * self . lengthscale_Yx )
a = self . a
# # - a is used in the model , negtive diffusion
b = self . b
c = self . c
kyy = lambda tdist , xdist : np . exp ( - lyt * ( tdist ) - lyx * ( xdist ) )
k1 = lambda tdist : ( 2 * lyt - 4 * lyt ** 2 * ( tdist ) )
k2 = lambda xdist : ( 4 * lyx ** 2 * ( xdist ) - 2 * lyx )
k3 = lambda xdist : ( 3 * 4 * lyx ** 2 - 6 * 8 * xdist * lyx ** 3 + 16 * xdist ** 2 * lyx ** 4 )
k4 = lambda ttdist : 2 * lyt * ( ttdist )
for i , s1 in enumerate ( slices ) :
for j , s2 in enumerate ( slices2 ) :
for ss1 in s1 :
for ss2 in s2 :
if i == 0 and j == 0 :
K [ ss1 , ss2 ] = vyt * vyx * kyy ( tdist [ ss1 , ss2 ] , xdist [ ss1 , ss2 ] )
elif i == 0 and j == 1 :
K [ ss1 , ss2 ] = ( - a * k2 ( xdist [ ss1 , ss2 ] ) + b * k4 ( ttdist [ ss1 , ss2 ] ) + c ) * vyt * vyx * kyy ( tdist [ ss1 , ss2 ] , xdist [ ss1 , ss2 ] )
# K [ ss1 , ss2 ] = np . where ( rdist [ ss1 , ss2 ] > 0 , kuyp ( np . abs ( rdist [ ss1 , ss2 ] ) ) , kuyn ( np . abs ( rdist [ ss1 , ss2 ] ) ) )
# K [ ss1 , ss2 ] = np . where ( rdist [ ss1 , ss2 ] > 0 , kuyp ( rdist [ ss1 , ss2 ] ) , kuyn ( rdist [ ss1 , ss2 ] ) )
elif i == 1 and j == 1 :
K [ ss1 , ss2 ] = ( b ** 2 * k1 ( tdist [ ss1 , ss2 ] ) - 2 * a * c * k2 ( xdist [ ss1 , ss2 ] ) + a ** 2 * k3 ( xdist [ ss1 , ss2 ] ) + c ** 2 ) * vyt * vyx * kyy ( tdist [ ss1 , ss2 ] , xdist [ ss1 , ss2 ] )
else :
K [ ss1 , ss2 ] = ( - a * k2 ( xdist [ ss1 , ss2 ] ) - b * k4 ( ttdist [ ss1 , ss2 ] ) + c ) * vyt * vyx * kyy ( tdist [ ss1 , ss2 ] , xdist [ ss1 , ss2 ] )
# K [ ss1 , ss2 ] = np . where ( rdist [ ss1 , ss2 ] > 0 , kyup ( np . abs ( rdist [ ss1 , ss2 ] ) ) , kyun ( np . abs ( rdist [ ss1 , ss2 ] ) ) )
# K [ ss1 , ss2 ] = np . where ( rdist [ ss1 , ss2 ] > 0 , kyup ( rdist [ ss1 , ss2 ] ) , kyun ( rdist [ ss1 , ss2 ] ) )
# stop
return K |
def add_worksheet ( self , title , rows , cols ) :
"""Adds a new worksheet to a spreadsheet .
: param title : A title of a new worksheet .
: type title : str
: param rows : Number of rows .
: type rows : int
: param cols : Number of columns .
: type cols : int
: returns : a newly created : class : ` worksheets < gsperad . models . Worksheet > ` .""" | body = { 'requests' : [ { 'addSheet' : { 'properties' : { 'title' : title , 'sheetType' : 'GRID' , 'gridProperties' : { 'rowCount' : rows , 'columnCount' : cols } } } } ] }
data = self . batch_update ( body )
properties = data [ 'replies' ] [ 0 ] [ 'addSheet' ] [ 'properties' ]
worksheet = Worksheet ( self , properties )
return worksheet |
def createDocument_ ( self , initDict = None ) :
"create and returns a completely empty document or one populated with initDict" | if initDict is None :
initV = { }
else :
initV = initDict
return self . documentClass ( self , initV ) |
def begin ( self , close = True , expire_on_commit = False , session = None , commit = False , ** options ) :
"""Provide a transactional scope around a series of operations .
By default , ` ` expire _ on _ commit ` ` is set to False so that instances
can be used outside the session .""" | if not session :
commit = True
session = self . session ( expire_on_commit = expire_on_commit , ** options )
else :
close = False
try :
yield session
if commit :
session . commit ( )
except Exception :
session . rollback ( )
raise
finally :
if close :
session . close ( ) |
def reset_namespace ( self , warning = False , message = False ) :
"""Reset the namespace by removing all names defined by the user .""" | reset_str = _ ( "Remove all variables" )
warn_str = _ ( "All user-defined variables will be removed. " "Are you sure you want to proceed?" )
kernel_env = self . kernel_manager . _kernel_spec . env
if warning :
box = MessageCheckBox ( icon = QMessageBox . Warning , parent = self )
box . setWindowTitle ( reset_str )
box . set_checkbox_text ( _ ( "Don't show again." ) )
box . setStandardButtons ( QMessageBox . Yes | QMessageBox . No )
box . setDefaultButton ( QMessageBox . Yes )
box . set_checked ( False )
box . set_check_visible ( True )
box . setText ( warn_str )
answer = box . exec_ ( )
# Update checkbox based on user interaction
CONF . set ( 'ipython_console' , 'show_reset_namespace_warning' , not box . is_checked ( ) )
self . ipyclient . reset_warning = not box . is_checked ( )
if answer != QMessageBox . Yes :
return
try :
if self . _reading :
self . dbg_exec_magic ( 'reset' , '-f' )
else :
if message :
self . reset ( )
self . _append_html ( _ ( "<br><br>Removing all variables..." "\n<hr>" ) , before_prompt = False )
self . silent_execute ( "%reset -f" )
if kernel_env . get ( 'SPY_AUTOLOAD_PYLAB_O' ) == 'True' :
self . silent_execute ( "from pylab import *" )
if kernel_env . get ( 'SPY_SYMPY_O' ) == 'True' :
sympy_init = """
from __future__ import division
from sympy import *
x, y, z, t = symbols('x y z t')
k, m, n = symbols('k m n', integer=True)
f, g, h = symbols('f g h', cls=Function)
init_printing()"""
self . silent_execute ( dedent ( sympy_init ) )
if kernel_env . get ( 'SPY_RUN_CYTHON' ) == 'True' :
self . silent_execute ( "%reload_ext Cython" )
self . refresh_namespacebrowser ( )
if not self . external_kernel :
self . silent_execute ( 'get_ipython().kernel.close_all_mpl_figures()' )
except AttributeError :
pass |
def ffill ( self , dim , limit = None ) :
'''Fill NaN values by propogating values forward
* Requires bottleneck . *
Parameters
dim : str
Specifies the dimension along which to propagate values when
filling .
limit : int , default None
The maximum number of consecutive NaN values to forward fill . In
other words , if there is a gap with more than this number of
consecutive NaNs , it will only be partially filled . Must be greater
than 0 or None for no limit .
Returns
Dataset''' | from . missing import ffill , _apply_over_vars_with_dim
new = _apply_over_vars_with_dim ( ffill , self , dim = dim , limit = limit )
return new |
def compute_dprime ( n_Hit = None , n_Miss = None , n_FA = None , n_CR = None ) :
"""Computes the d ' , beta , aprime , b ' ' d and c parameters based on the signal detection theory ( SDT ) . * * Feel free to help me expand the documentation of this function with details and interpretation guides . * *
Parameters
n _ Hit : int
Number of hits .
n _ Miss : int
Number of misses .
n _ FA : int
Number of false alarms .
n _ CR : int
Number of correct rejections .
Returns
parameters : dict
A dictionary with the parameters ( see details ) .
Example
> > > import neurokit as nk
> > > nk . compute _ dprime ( n _ Hit = 7 , n _ Miss = 4 , n _ FA = 6 , n _ CR = 6)
Notes
* Details *
The Signal Detection Theory ( often abridged as SDT ) is used in very different domains from psychology ( psychophysics , perception , memory ) , medical diagnostics ( do the symptoms match a known diagnostic or can they be dismissed are irrelevant ) , to statistical decision ( do the data indicate that the experiment has an effect or not ) . It evolved from the development of communications and radar equipment the first half of this century to psychology , as an attempt to understand some features of human behavior that were not well explained by tradition models . SDT is , indeed , used to analyze data coming from experiments where the task is to categorize ambiguous stimuli which can be generated either by a known process ( called the * signal * ) or be obtained by chance ( called the * noise * in the SDT framework ) . Based on the number of hits , misses , false alarms and correct rejections , it estimates two main parameters from the experimental data : * * d ' ( d - prime , for discriminability index * * ) and C ( a variant of it is called beta ) . Non parametric variants are aprime and b ' ' d ( bppd )
- * * dprime * * : The sensitivity index . Indicates the strength of the signal ( relative to the noise ) . More specifically , it is the standardized difference between the means of the Signal Present and Signal Absent distributions .
- * * beta * * : Response bias index .
- * * aprime * * : Non - parametric sensitivity index .
- * * bppd * * : Non - parametric response bias index .
- * * c * * : Response bias index .
* Authors *
- ` Dominique Makowski < https : / / dominiquemakowski . github . io / > ` _
* Dependencies *
- scipy
* See Also *
- ` neuropsychology < https : / / www . rdocumentation . org / packages / neuropsychology / topics / dprime > ` _
- http : / / lindeloev . net / calculating - d - in - python - and - php /""" | # Ratios
hit_rate = n_Hit / ( n_Hit + n_Miss )
fa_rate = n_FA / ( n_FA + n_CR )
# Adjusted ratios
hit_rate_adjusted = ( n_Hit + 0.5 ) / ( ( n_Hit + 0.5 ) + n_Miss + 1 )
fa_rate_adjusted = ( n_FA + 0.5 ) / ( ( n_FA + 0.5 ) + n_CR + 1 )
# dprime
dprime = scipy . stats . norm . ppf ( hit_rate_adjusted ) - scipy . stats . norm . ppf ( fa_rate_adjusted )
# beta
zhr = scipy . stats . norm . ppf ( hit_rate_adjusted )
zfar = scipy . stats . norm . ppf ( fa_rate_adjusted )
beta = np . exp ( - zhr * zhr / 2 + zfar * zfar / 2 )
# aprime
a = 1 / 2 + ( ( hit_rate - fa_rate ) * ( 1 + hit_rate - fa_rate ) / ( 4 * hit_rate * ( 1 - fa_rate ) ) )
b = 1 / 2 - ( ( fa_rate - hit_rate ) * ( 1 + fa_rate - hit_rate ) / ( 4 * fa_rate * ( 1 - hit_rate ) ) )
if fa_rate > hit_rate :
aprime = b
elif fa_rate < hit_rate :
aprime = a
else :
aprime = 0.5
# bppd
bppd = ( ( 1 - hit_rate ) * ( 1 - fa_rate ) - hit_rate * fa_rate ) / ( ( 1 - hit_rate ) * ( 1 - fa_rate ) + hit_rate * fa_rate )
c = - ( scipy . stats . norm . ppf ( hit_rate_adjusted ) + scipy . stats . norm . ppf ( fa_rate_adjusted ) ) / 2
parameters = dict ( dprime = dprime , beta = beta , aprime = aprime , bppd = bppd , c = c )
return ( parameters ) |
def on_rabbitmq_close ( self , reply_code , reply_text ) :
"""Called when RabbitMQ has been connected to .
: param int reply _ code : The code for the disconnect
: param str reply _ text : The disconnect reason""" | global rabbitmq_connection
LOGGER . warning ( 'RabbitMQ has disconnected (%s): %s' , reply_code , reply_text )
rabbitmq_connection = None
self . _set_rabbitmq_channel ( None )
self . _connect_to_rabbitmq ( ) |
def write_source_model ( dest , sources_or_groups , name = None , investigation_time = None ) :
"""Writes a source model to XML .
: param dest :
Destination path
: param sources _ or _ groups :
Source model in different formats
: param name :
Name of the source model ( if missing , extracted from the filename )""" | if isinstance ( sources_or_groups , nrml . SourceModel ) :
with open ( dest , 'wb' ) as f :
nrml . write ( [ obj_to_node ( sources_or_groups ) ] , f , '%s' )
return
if isinstance ( sources_or_groups [ 0 ] , sourceconverter . SourceGroup ) :
groups = sources_or_groups
else : # passed a list of sources
srcs_by_trt = groupby ( sources_or_groups , operator . attrgetter ( 'tectonic_region_type' ) )
groups = [ sourceconverter . SourceGroup ( trt , srcs_by_trt [ trt ] ) for trt in srcs_by_trt ]
name = name or os . path . splitext ( os . path . basename ( dest ) ) [ 0 ]
nodes = list ( map ( obj_to_node , sorted ( groups ) ) )
attrs = { "name" : name }
if investigation_time is not None :
attrs [ 'investigation_time' ] = investigation_time
source_model = Node ( "sourceModel" , attrs , nodes = nodes )
with open ( dest , 'wb' ) as f :
nrml . write ( [ source_model ] , f , '%s' )
return dest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.