signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def extant_file ( path ) :
"""Check if file exists with argparse"""
|
if not os . path . exists ( path ) :
raise argparse . ArgumentTypeError ( "{} does not exist" . format ( path ) )
return path
|
def hmget ( self , * args ) :
"""This command on the model allow getting many instancehash fields with only
one redis call . You must pass hash name to retrieve as arguments ."""
|
if args and not any ( arg in self . _instancehash_fields for arg in args ) :
raise ValueError ( "Only InstanceHashField can be used here." )
return self . _call_command ( 'hmget' , args )
|
def flushing ( self ) :
"""Flush the current session , handling common errors ."""
|
try :
yield
self . session . flush ( )
except ( FlushError , IntegrityError ) as error :
error_message = str ( error )
# There ought to be a cleaner way to capture this condition
if "duplicate" in error_message :
raise DuplicateModelError ( error )
if "already exists" in error_message :
raise DuplicateModelError ( error )
if "conflicts with" in error_message and "identity key" in error_message :
raise DuplicateModelError ( error )
elif "still referenced" in error_message :
raise ReferencedModelError ( error )
elif "is not present" in error_message :
raise MissingDependencyError ( error )
else :
raise ModelIntegrityError ( error )
|
def insert_data_frame ( col , df , int_col = None , binary_col = None , minimal_size = 5 ) :
"""Insert ` ` pandas . DataFrame ` ` .
: param col : : class : ` pymongo . collection . Collection ` instance .
: param df : : class : ` pandas . DataFrame ` instance .
: param int _ col : list of integer - type column .
: param binary _ col : list of binary - type column ."""
|
data = transform . to_dict_list_generic_type ( df , int_col = int_col , binary_col = binary_col )
smart_insert ( col , data , minimal_size )
|
def log_rmtree_error ( func , arg , exc_info ) :
"""Suited as onerror handler for ( sh ) util . rmtree ( ) that logs a warning ."""
|
logging . warning ( "Failure during '%s(%s)': %s" , func . __name__ , arg , exc_info [ 1 ] )
|
def _find_all_step_methods ( self ) :
"""Finds all _ step < n > methods where n is an integer in this class ."""
|
steps = ( [ method for method in dir ( self ) if callable ( getattr ( self , method ) ) and re . match ( r'_step\d+\d+.*' , method ) ] )
steps = sorted ( steps )
for step in steps :
self . _steps . append ( getattr ( self , step ) )
|
def get_user ( self , identified_with , identifier , req , resp , resource , uri_kwargs ) :
"""Get user object for given identifier .
Args :
identified _ with ( object ) : authentication middleware used
to identify the user .
identifier : middleware specifix user identifier ( string or tuple
in case of all built in authentication middleware classes ) .
Returns :
dict : user object stored in Redis if it exists , otherwise ` ` None ` `"""
|
stored_value = self . kv_store . get ( self . _get_storage_key ( identified_with , identifier ) )
if stored_value is not None :
user = self . serialization . loads ( stored_value . decode ( ) )
else :
user = None
return user
|
def connect ( * , dsn , autocommit = False , ansi = False , timeout = 0 , loop = None , executor = None , echo = False , after_created = None , ** kwargs ) :
"""Accepts an ODBC connection string and returns a new Connection object .
The connection string can be passed as the string ` str ` , as a list of
keywords , or a combination of the two . Any keywords except autocommit ,
ansi , and timeout are simply added to the connection string .
: param autocommit bool : False or zero , the default , if True or non - zero ,
the connection is put into ODBC autocommit mode and statements are
committed automatically .
: param ansi bool : By default , pyodbc first attempts to connect using
the Unicode version of SQLDriverConnectW . If the driver returns IM001
indicating it does not support the Unicode version , the ANSI version
is tried .
: param timeout int : An integer login timeout in seconds , used to set
the SQL _ ATTR _ LOGIN _ TIMEOUT attribute of the connection . The default is
0 which means the database ' s default timeout , if any , is use
: param after _ created callable : support customize configuration after
connection is connected . Must be an async unary function , or leave it
as None ."""
|
return _ContextManager ( _connect ( dsn = dsn , autocommit = autocommit , ansi = ansi , timeout = timeout , loop = loop , executor = executor , echo = echo , after_created = after_created , ** kwargs ) )
|
def cylinder ( target , throat_diameter = 'throat.diameter' ) :
r"""Calculate throat cross - sectional area for a cylindrical throat
Parameters
target : OpenPNM Object
The object which this model is associated with . This controls the
length of the calculated array , and also provides access to other
necessary properties .
throat _ diameter : string
Dictionary key of the throat diameter values"""
|
diams = target [ throat_diameter ]
value = _pi / 4 * ( diams ) ** 2
return value
|
def validateExtractOptions ( options ) :
'''Check the validity of the option combinations for barcode extraction'''
|
if not options . pattern and not options . pattern2 :
if not options . read2_in :
U . error ( "Must supply --bc-pattern for single-end" )
else :
U . error ( "Must supply --bc-pattern and/or --bc-pattern2 " "if paired-end " )
if options . pattern2 :
if not options . read2_in :
U . error ( "must specify a paired fastq ``--read2-in``" )
if not options . pattern2 :
options . pattern2 = options . pattern
extract_cell = False
extract_umi = False
# If the pattern is a regex we can compile the regex ( es ) prior to
# ExtractFilterAndUpdate instantiation
if options . extract_method == "regex" :
if options . pattern :
try :
options . pattern = regex . compile ( options . pattern )
except regex . error :
U . error ( "--bc-pattern '%s' is not a " "valid regex" % options . pattern )
if options . pattern2 :
try :
options . pattern2 = regex . compile ( options . pattern2 )
except regex . Error :
U . error ( "--bc-pattern2 '%s' is not a " "valid regex" % options . pattern2 )
# check whether the regex contains a umi group ( s ) and cell groups ( s )
if options . extract_method == "regex" :
if options . pattern :
for group in options . pattern . groupindex :
if group . startswith ( "cell_" ) :
extract_cell = True
elif group . startswith ( "umi_" ) :
extract_umi = True
if options . pattern2 :
for group in options . pattern2 . groupindex :
if group . startswith ( "cell_" ) :
extract_cell = True
elif group . startswith ( "umi_" ) :
extract_umi = True
# check whether the pattern string contains umi / cell bases
elif options . extract_method == "string" :
if options . pattern :
if "C" in options . pattern :
extract_cell = True
if "N" in options . pattern :
extract_umi = True
if options . pattern2 :
if "C" in options . pattern2 :
extract_cell = True
if "N" in options . pattern2 :
extract_umi = True
if not extract_umi :
if options . extract_method == "string" :
U . error ( "barcode pattern(s) do not include any umi bases " "(marked with 'Ns') %s, %s" % ( options . pattern , options . pattern2 ) )
elif options . extract_method == "regex" :
U . error ( "barcode regex(es) do not include any umi groups " "(starting with 'umi_') %s, %s" ( options . pattern , options . pattern2 ) )
return ( extract_cell , extract_umi )
|
def str_match ( arr , pat , case = True , flags = 0 , na = np . nan ) :
"""Determine if each string matches a regular expression .
Parameters
pat : str
Character sequence or regular expression .
case : bool , default True
If True , case sensitive .
flags : int , default 0 ( no flags )
re module flags , e . g . re . IGNORECASE .
na : default NaN
Fill value for missing values .
Returns
Series / array of boolean values
See Also
contains : Analogous , but less strict , relying on re . search instead of
re . match .
extract : Extract matched groups ."""
|
if not case :
flags |= re . IGNORECASE
regex = re . compile ( pat , flags = flags )
dtype = bool
f = lambda x : bool ( regex . match ( x ) )
return _na_map ( f , arr , na , dtype = dtype )
|
def get_all_parsers ( self , strict_type_matching : bool = False ) -> List [ Parser ] :
"""Returns the list of all parsers in order of relevance .
: return :"""
|
matching = self . find_all_matching_parsers ( strict = strict_type_matching ) [ 0 ]
# matching [ 1 ] ( approx match ) is supposed to be empty since we use a joker on type and a joker on ext : only
# exact and generic match should exist , no approx match
if len ( matching [ 1 ] ) > 0 :
raise Exception ( 'Internal error - this matching[1] list is supposed to be empty for such a query' )
return matching [ 0 ] + matching [ 2 ]
|
def GenerateNetworkedConfigFile ( load_hook , normal_class_load_hook , normal_class_dump_hook , ** kwargs ) -> NetworkedConfigObject :
"""Generates a NetworkedConfigObject using the specified hooks ."""
|
def NetworkedConfigObjectGenerator ( url , safe_load : bool = True ) :
cfg = NetworkedConfigObject ( url = url , load_hook = load_hook , safe_load = safe_load , normal_class_load_hook = normal_class_load_hook , normal_class_dump_hook = normal_class_dump_hook )
return cfg
return NetworkedConfigObjectGenerator
|
def delete_task ( self , task_name ) :
"""Deletes the named Task in this Job ."""
|
logger . debug ( 'Deleting task {0}' . format ( task_name ) )
if not self . state . allow_change_graph :
raise DagobahError ( "job's graph is immutable in its current state: %s" % self . state . status )
if task_name not in self . tasks :
raise DagobahError ( 'task %s does not exist' % task_name )
self . tasks . pop ( task_name )
self . delete_node ( task_name )
self . commit ( )
|
def _stripe_object_to_record ( cls , data , current_ids = None , pending_relations = None ) :
"""This takes an object , as it is formatted in Stripe ' s current API for our object
type . In return , it provides a dict . The dict can be used to create a record or
to update a record
This function takes care of mapping from one field name to another , converting
from cents to dollars , converting timestamps , and eliminating unused fields
( so that an objects . create ( ) call would not fail ) .
: param data : the object , as sent by Stripe . Parsed from JSON , into a dict
: type data : dict
: param current _ ids : stripe ids of objects that are currently being processed
: type current _ ids : set
: param pending _ relations : list of tuples of relations to be attached post - save
: type pending _ relations : list
: return : All the members from the input , translated , mutated , etc
: rtype : dict"""
|
manipulated_data = cls . _manipulate_stripe_object_hook ( data )
if "object" not in data :
raise ValueError ( "Stripe data has no `object` value. Aborting. %r" % ( data ) )
if not cls . is_valid_object ( data ) :
raise ValueError ( "Trying to fit a %r into %r. Aborting." % ( data [ "object" ] , cls . __name__ ) )
result = { }
if current_ids is None :
current_ids = set ( )
# Iterate over all the fields that we know are related to Stripe , let each field work its own magic
ignore_fields = [ "date_purged" , "subscriber" ]
# XXX : Customer hack
for field in cls . _meta . fields :
if field . name . startswith ( "djstripe_" ) or field . name in ignore_fields :
continue
if isinstance ( field , models . ForeignKey ) :
field_data , skip = cls . _stripe_object_field_to_foreign_key ( field = field , manipulated_data = manipulated_data , current_ids = current_ids , pending_relations = pending_relations , )
if skip :
continue
else :
if hasattr ( field , "stripe_to_db" ) :
field_data = field . stripe_to_db ( manipulated_data )
else :
field_data = manipulated_data . get ( field . name )
if isinstance ( field , ( models . CharField , models . TextField ) ) and field_data is None :
field_data = ""
result [ field . name ] = field_data
return result
|
def _set_lim_and_transforms ( self ) :
"""Setup the key transforms for the axes ."""
|
# Most of the transforms are set up correctly by LambertAxes
LambertAxes . _set_lim_and_transforms ( self )
# Transform for latitude ticks . These are typically unused , but just
# in case we need them . . .
yaxis_stretch = Affine2D ( ) . scale ( 4 * self . horizon , 1.0 )
yaxis_stretch = yaxis_stretch . translate ( - self . horizon , 0.0 )
# These are identical to LambertAxes . _ set _ lim _ and _ transforms , but we
# need to update things to reflect the new " yaxis _ stretch "
yaxis_space = Affine2D ( ) . scale ( 1.0 , 1.1 )
self . _yaxis_transform = yaxis_stretch + self . transData
yaxis_text_base = yaxis_stretch + self . transProjection + ( yaxis_space + self . transAffine + self . transAxes )
self . _yaxis_text1_transform = yaxis_text_base + Affine2D ( ) . translate ( - 8.0 , 0.0 )
self . _yaxis_text2_transform = yaxis_text_base + Affine2D ( ) . translate ( 8.0 , 0.0 )
|
def get_devices ( self ) :
"""Return a list of devices .
Deprecated , use get _ actors instead ."""
|
url = self . base_url + '/net/home_auto_query.lua'
response = self . session . get ( url , params = { 'sid' : self . sid , 'command' : 'AllOutletStates' , 'xhr' : 0 , } , timeout = 15 )
response . raise_for_status ( )
data = response . json ( )
count = int ( data [ "Outlet_count" ] )
devices = [ ]
for i in range ( 1 , count + 1 ) :
device = Device ( int ( data [ "DeviceID_{0}" . format ( i ) ] ) , int ( data [ "DeviceConnectState_{0}" . format ( i ) ] ) , int ( data [ "DeviceSwitchState_{0}" . format ( i ) ] ) )
devices . append ( device )
return devices
|
def get_extver ( self ) :
"""Get the version for this extension .
Used when a name is given to multiple extensions"""
|
ver = self . _info [ 'extver' ]
if ver == 0 :
ver = self . _info [ 'hduver' ]
return ver
|
def inc ( self , key , key_length = 0 ) :
"""Add value to key - value
Params :
< str > key
< int > value
< int > key _ length
Return :
< int > key _ value"""
|
if key_length < 1 :
key_length = len ( key )
val = self . add_method ( self , key , key_length , 1 )
if self . k :
self . _update ( key , val )
return val
|
def on ( self , message , namespace = None ) :
"""Decorator to register a SocketIO event handler .
This decorator must be applied to SocketIO event handlers . Example : :
@ socketio . on ( ' my event ' , namespace = ' / chat ' )
def handle _ my _ custom _ event ( json ) :
print ( ' received json : ' + str ( json ) )
: param message : The name of the event . This is normally a user defined
string , but a few event names are already defined . Use
` ` ' message ' ` ` to define a handler that takes a string
payload , ` ` ' json ' ` ` to define a handler that takes a
JSON blob payload , ` ` ' connect ' ` ` or ` ` ' disconnect ' ` `
to create handlers for connection and disconnection
events .
: param namespace : The namespace on which the handler is to be
registered . Defaults to the global namespace ."""
|
namespace = namespace or '/'
def decorator ( handler ) :
def _handler ( sid , * args ) :
return self . _handle_event ( handler , message , namespace , sid , * args )
if self . server :
self . server . on ( message , _handler , namespace = namespace )
else :
self . handlers . append ( ( message , _handler , namespace ) )
return handler
return decorator
|
def run_example ( example_name , environ ) :
"""Run an example module from zipline . examples ."""
|
mod = EXAMPLE_MODULES [ example_name ]
register_calendar ( "YAHOO" , get_calendar ( "NYSE" ) , force = True )
return run_algorithm ( initialize = getattr ( mod , 'initialize' , None ) , handle_data = getattr ( mod , 'handle_data' , None ) , before_trading_start = getattr ( mod , 'before_trading_start' , None ) , analyze = getattr ( mod , 'analyze' , None ) , bundle = 'test' , environ = environ , # Provide a default capital base , but allow the test to override .
** merge ( { 'capital_base' : 1e7 } , mod . _test_args ( ) ) )
|
def current_arg_text ( self ) -> str :
"""Plain text part in the current argument , without any CQ codes ."""
|
if self . _current_arg_text is None :
self . _current_arg_text = Message ( self . current_arg ) . extract_plain_text ( )
return self . _current_arg_text
|
def script ( script , interpreter = '' , suffix = '' , args = '' , ** kwargs ) :
'''Execute specified script using specified interpreter . This action accepts common
action arguments such as input , active , workdir , docker _ image and args . In particular ,
content of one or more files specified by option input would be prepended before
the specified script .'''
|
return SoS_ExecuteScript ( script , interpreter , suffix , args ) . run ( ** kwargs )
|
def hashsummary ( self ) :
"""Print a model summary - checksums of each layer parameters"""
|
children = list ( self . children ( ) )
result = [ ]
for child in children :
result . extend ( hashlib . sha256 ( x . detach ( ) . cpu ( ) . numpy ( ) . tobytes ( ) ) . hexdigest ( ) for x in child . parameters ( ) )
return result
|
def get_injuries ( self , season , week ) :
"""Injuries by week"""
|
result = self . _method_call ( "Injuries/{season}/{week}" , "stats" , season = season , week = week )
return result
|
def shrink ( self ) :
"""Shrink ca . 5 % of entries ."""
|
trim = int ( 0.05 * len ( self ) )
if trim :
items = super ( LFUCache , self ) . items ( )
# sorting function for items
keyfunc = lambda x : x [ 1 ] [ 0 ]
values = sorted ( items , key = keyfunc )
for item in values [ 0 : trim ] :
del self [ item [ 0 ] ]
|
def snapshot_created ( name , ami_name , instance_name , wait_until_available = True , wait_timeout_seconds = 300 , ** kwargs ) :
'''Create a snapshot from the given instance
. . versionadded : : 2016.3.0'''
|
ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } }
if not __salt__ [ 'boto_ec2.create_image' ] ( ami_name = ami_name , instance_name = instance_name , ** kwargs ) :
ret [ 'comment' ] = 'Failed to create new AMI {ami_name}' . format ( ami_name = ami_name )
ret [ 'result' ] = False
return ret
ret [ 'comment' ] = 'Created new AMI {ami_name}' . format ( ami_name = ami_name )
ret [ 'changes' ] [ 'new' ] = { ami_name : ami_name }
if not wait_until_available :
return ret
starttime = time ( )
while True :
images = __salt__ [ 'boto_ec2.find_images' ] ( ami_name = ami_name , return_objs = True , ** kwargs )
if images and images [ 0 ] . state == 'available' :
break
if time ( ) - starttime > wait_timeout_seconds :
if images :
ret [ 'comment' ] = 'AMI still in state {state} after timeout' . format ( state = images [ 0 ] . state )
else :
ret [ 'comment' ] = 'AMI with name {ami_name} not found after timeout.' . format ( ami_name = ami_name )
ret [ 'result' ] = False
return ret
sleep ( 5 )
return ret
|
async def stop_slaves ( self , timeout = 1 ) :
"""Stop all the slaves by sending a stop - message to their managers .
: param int timeout :
Timeout for connecting to each manager . If a connection can not
be made before the timeout expires , the resulting error for that
particular manager is logged , but the stopping of other managers
is not halted ."""
|
for addr in self . addrs :
try :
r_manager = await self . env . connect ( addr , timeout = timeout )
await r_manager . stop ( )
except :
self . _log ( logging . WARNING , "Could not stop {}" . format ( addr ) )
|
def set ( self , dict_name , key , value , priority = None ) :
'''Set a single value for a single key .
This requires a session lock .
: param str dict _ name : name of the dictionary to update
: param str key : key to update
: param str value : value to assign to ` key `
: param int priority : priority score for the value ( if any )'''
|
if priority is not None :
priorities = { key : priority }
else :
priorities = None
self . update ( dict_name , { key : value } , priorities = priorities )
|
def run ( self ) :
"""Start json minimizer and exit when all json files were minimized ."""
|
for rel_path in sorted ( self . paths ) :
file_path = join ( self . data_dir , rel_path )
self . minify ( file_path )
after = self . size_of ( self . after_total )
before = self . size_of ( self . before_total )
saved = self . size_of ( self . before_total - self . after_total )
template = '\nTotal: ' '\033[92m{}\033[0m -> \033[92m{}\033[0m. ' 'Compressed: \033[92m{}\033[0m\n'
print ( template . format ( before , after , saved ) )
|
def parse_http_list ( s ) :
"""Parse lists as described by RFC 2068 Section 2.
In particular , parse comma - separated lists where the elements of
the list may include quoted - strings . A quoted - string could
contain a comma . A non - quoted string could have quotes in the
middle . Neither commas nor quotes count if they are escaped .
Only double - quotes count , not single - quotes ."""
|
res = [ ]
part = ''
escape = quote = False
for cur in s :
if escape :
part += cur
escape = False
continue
if quote :
if cur == '\\' :
escape = True
continue
elif cur == '"' :
quote = False
part += cur
continue
if cur == ',' :
res . append ( part )
part = ''
continue
if cur == '"' :
quote = True
part += cur
# append last part
if part :
res . append ( part )
return [ part . strip ( ) for part in res ]
|
def add_file_task ( self , dir_name = None ) :
'''添加上传任务 , 会弹出一个选择文件的对话框'''
|
file_dialog = Gtk . FileChooserDialog ( _ ( 'Choose Files..' ) , self . app . window , Gtk . FileChooserAction . OPEN , ( Gtk . STOCK_CANCEL , Gtk . ResponseType . CANCEL , Gtk . STOCK_OK , Gtk . ResponseType . OK ) )
file_dialog . set_modal ( True )
file_dialog . set_select_multiple ( True )
file_dialog . set_default_response ( Gtk . ResponseType . OK )
response = file_dialog . run ( )
if response != Gtk . ResponseType . OK :
file_dialog . destroy ( )
return
source_paths = file_dialog . get_filenames ( )
file_dialog . destroy ( )
if source_paths :
self . upload_files ( source_paths , dir_name )
|
def reset ( self ) :
"""Reset the tough connection .
If a reset is not possible , tries to reopen the connection .
It will not complain if the connection is already closed ."""
|
try :
self . _con . reset ( )
self . _transaction = False
self . _setsession ( )
self . _usage = 0
except Exception :
try :
self . reopen ( )
except Exception :
try :
self . rollback ( )
except Exception :
pass
|
def union_update ( self , * others ) :
r"""Update the multiset , adding elements from all others using the maximum multiplicity .
> > > ms = Multiset ( ' aab ' )
> > > ms . union _ update ( ' bc ' )
> > > sorted ( ms )
[ ' a ' , ' a ' , ' b ' , ' c ' ]
You can also use the ` ` | = ` ` operator for the same effect . However , the operator version
will only accept a set as other operator , not any iterable , to avoid errors .
> > > ms = Multiset ( ' aab ' )
> > > ms | = Multiset ( ' bccd ' )
> > > sorted ( ms )
[ ' a ' , ' a ' , ' b ' , ' c ' , ' c ' , ' d ' ]
For a variant of the operation which does not modify the multiset , but returns a new
multiset instead see : meth : ` union ` .
Args :
others : The other sets to union this multiset with . Can also be any : class : ` ~ typing . Iterable ` \ [ ~ T ]
or : class : ` ~ typing . Mapping ` \ [ ~ T , : class : ` int ` ] which are then converted to : class : ` Multiset ` \ [ ~ T ] ."""
|
_elements = self . _elements
_total = self . _total
for other in map ( self . _as_mapping , others ) :
for element , multiplicity in other . items ( ) :
old_multiplicity = _elements . get ( element , 0 )
if multiplicity > old_multiplicity :
_elements [ element ] = multiplicity
_total += multiplicity - old_multiplicity
self . _total = _total
|
def saveVizGithub ( contents , ontouri ) :
"""DEPRECATED on 2016-11-16
Was working but had a dependecies on package ' uritemplate . py ' which caused problems at installation time"""
|
title = "Ontospy: ontology export"
readme = """This ontology documentation was automatically generated with Ontospy (https://github.com/lambdamusic/Ontospy).
The graph URI is: %s""" % str ( ontouri )
files = { 'index.html' : { 'content' : contents } , 'README.txt' : { 'content' : readme } , 'LICENSE.txt' : { 'content' : """The MIT License (MIT)
Copyright (c) 2016 Ontospy project [http://lambdamusic.github.io/Ontospy/]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.""" } }
urls = save_anonymous_gist ( title , files )
return urls
|
def _to_ascii ( s ) :
"""Converts given string to ascii ignoring non ascii .
Args :
s ( text or binary ) :
Returns :
str :"""
|
# TODO : Always use unicode within ambry .
from six import text_type , binary_type
if isinstance ( s , text_type ) :
ascii_ = s . encode ( 'ascii' , 'ignore' )
elif isinstance ( s , binary_type ) :
ascii_ = s . decode ( 'utf-8' ) . encode ( 'ascii' , 'ignore' )
else :
raise Exception ( 'Unknown text type - {}' . format ( type ( s ) ) )
return ascii_
|
def generate_random_long ( signed = True ) :
"""Generates a random long integer ( 8 bytes ) , which is optionally signed"""
|
return int . from_bytes ( os . urandom ( 8 ) , signed = signed , byteorder = 'little' )
|
def send ( self , message ) :
"""Adds a message to the list , returning a fake ' ok ' response
: returns : A response message with ` ` ok = True ` `"""
|
for event in message . events :
self . events . append ( event )
reply = riemann_client . riemann_pb2 . Msg ( )
reply . ok = True
return reply
|
def mavlink_packet ( self , m ) :
'''get time from mavlink ATTITUDE'''
|
if m . get_type ( ) == 'GLOBAL_POSITION_INT' :
if abs ( m . lat ) < 1000 and abs ( m . lon ) < 1000 :
return
self . vehicle_pos = VehiclePos ( m )
|
def from_config ( cls , shelf , obj , ** kwargs ) :
"""Construct a Recipe from a plain Python dictionary .
Most of the directives only support named ingredients , specified as
strings , and looked up on the shelf . But filters can be specified as
objects .
Additionally , each RecipeExtension can extract and handle data from the
configuration ."""
|
def subdict ( d , keys ) :
new = { }
for k in keys :
if k in d :
new [ k ] = d [ k ]
return new
core_kwargs = subdict ( obj , recipe_schema [ 'schema' ] . keys ( ) )
core_kwargs = normalize_schema ( recipe_schema , core_kwargs )
core_kwargs [ 'filters' ] = [ parse_condition ( filter , shelf . Meta . select_from ) if isinstance ( filter , dict ) else filter for filter in obj . get ( 'filters' , [ ] ) ]
core_kwargs . update ( kwargs )
recipe = cls ( shelf = shelf , ** core_kwargs )
# Now let extensions handle their own stuff
for ext in recipe . recipe_extensions :
additional_schema = getattr ( ext , 'recipe_schema' , None )
if additional_schema is not None :
ext_data = subdict ( obj , additional_schema . keys ( ) )
ext_data = normalize_dict ( additional_schema , ext_data )
recipe = ext . from_config ( ext_data )
return recipe
|
def append ( self , cell ) :
"""행에 cell을 붙입니다 ."""
|
assert isinstance ( cell , Cell )
super ( Row , self ) . append ( cell )
|
def moderate ( self , comment , content_object , request ) :
"""Determine whether a given comment on a given object should be
allowed to show up immediately , or should be marked non - public
and await approval .
Return ` ` True ` ` if the comment should be moderated ( marked
non - public ) , ` ` False ` ` otherwise ."""
|
if self . auto_moderate_field and self . moderate_after is not None :
moderate_after_date = getattr ( content_object , self . auto_moderate_field )
if moderate_after_date is not None and self . _get_delta ( timezone . now ( ) , moderate_after_date ) . days >= self . moderate_after :
return True
return False
|
def mock_import ( do_not_mock = None , ** mock_kwargs ) :
"""Mocks import statements by ignoring ImportErrors
and replacing the missing module with a Mock .
: param str | unicode | list [ str | unicode ] do _ not _ mock : names of modules
that should exists , and an ImportError could be raised for .
: param mock _ kwargs : kwargs for MagicMock object .
: return : patch object"""
|
do_not_mock = _to_list ( do_not_mock )
def try_import ( module_name , * args , ** kwargs ) :
try :
return _builtins_import ( module_name , * args , ** kwargs )
except : # intentionally catch all exceptions
if any ( ( _match ( module_name , prefix ) for prefix in do_not_mock ) ) : # This is a module we need to import ,
# so we raise the exception instead of mocking it
raise
# Mock external module so we can peacefully create our client
return mock . MagicMock ( ** mock_kwargs )
return mock . patch ( 'six.moves.builtins.__import__' , try_import )
|
def _action ( action = 'get' , search = None , one = True , force = False ) :
'''Multi action helper for start , stop , get , . . .'''
|
vms = { }
matched_vms = [ ]
client = salt . client . get_local_client ( __opts__ [ 'conf_file' ] )
# # lookup vms
try :
vmadm_args = { }
vmadm_args [ 'order' ] = 'uuid,alias,hostname,state'
if '=' in search :
vmadm_args [ 'search' ] = search
for cn in client . cmd_iter ( 'G@virtual:physical and G@os:smartos' , 'vmadm.list' , kwarg = vmadm_args , tgt_type = 'compound' ) :
if not cn :
continue
node = next ( six . iterkeys ( cn ) )
if not isinstance ( cn [ node ] , dict ) or 'ret' not in cn [ node ] or not isinstance ( cn [ node ] [ 'ret' ] , dict ) :
continue
for vm in cn [ node ] [ 'ret' ] :
vmcfg = cn [ node ] [ 'ret' ] [ vm ]
vmcfg [ 'node' ] = node
vms [ vm ] = vmcfg
except SaltClientError as client_error :
pass
# # check if we have vms
if not vms :
return { 'Error' : 'No vms found.' }
# # simple search
if '=' not in search :
loop_pass = 0
while loop_pass < 3 : # # each pass will try a different field
if loop_pass == 0 :
field = 'uuid'
elif loop_pass == 1 :
field = 'hostname'
else :
field = 'alias'
# # loop vms and try to match
for vm in vms :
if field == 'uuid' and vm == search :
matched_vms . append ( vm )
break
# exit for on uuid match ( max = 1)
elif field in vms [ vm ] and vms [ vm ] [ field ] == search :
matched_vms . append ( vm )
# # exit on match ( es ) or try again
if matched_vms :
break
else :
loop_pass += 1
else :
for vm in vms :
matched_vms . append ( vm )
# # check if we have vms
if not matched_vms :
return { 'Error' : 'No vms matched.' }
# # multiple allowed ?
if one and len ( matched_vms ) > 1 :
return { 'Error' : 'Matched {0} vms, only one allowed!' . format ( len ( matched_vms ) ) , 'Matches' : matched_vms }
# # perform action
ret = { }
if action in [ 'start' , 'stop' , 'reboot' , 'get' ] :
for vm in matched_vms :
vmadm_args = { 'key' : 'uuid' , 'vm' : vm }
try :
for vmadm_res in client . cmd_iter ( vms [ vm ] [ 'node' ] , 'vmadm.{0}' . format ( action ) , kwarg = vmadm_args ) :
if not vmadm_res :
continue
if vms [ vm ] [ 'node' ] in vmadm_res :
ret [ vm ] = vmadm_res [ vms [ vm ] [ 'node' ] ] [ 'ret' ]
except SaltClientError as client_error :
ret [ vm ] = False
elif action in [ 'is_running' ] :
ret = True
for vm in matched_vms :
if vms [ vm ] [ 'state' ] != 'running' :
ret = False
break
return ret
|
def decode_buffer ( buffer : dict ) -> np . ndarray :
"""Translate a DataBuffer into a numpy array .
: param buffer : Dictionary with ' data ' byte array , ' dtype ' , and ' shape ' fields
: return : NumPy array of decoded data"""
|
buf = np . frombuffer ( buffer [ 'data' ] , dtype = buffer [ 'dtype' ] )
return buf . reshape ( buffer [ 'shape' ] )
|
def validate ( pfeed , * , as_df = True , include_warnings = True ) :
"""Check whether the given pfeed satisfies the ProtoFeed spec .
Parameters
pfeed : ProtoFeed
as _ df : boolean
If ` ` True ` ` , then return the resulting report as a DataFrame ;
otherwise return the result as a list
include _ warnings : boolean
If ` ` True ` ` , then include problems of types ` ` ' error ' ` ` and
` ` ' warning ' ` ` ; otherwise , only return problems of type
` ` ' error ' ` `
Returns
list or DataFrame
Run all the table - checking functions : : func : ` check _ agency ` ,
: func : ` check _ calendar ` , etc .
This yields a possibly empty list of items
[ problem type , message , table , rows ] .
If ` ` as _ df ` ` , then format the error list as a DataFrame with the
columns
- ` ` ' type ' ` ` : ' error ' or ' warning ' ; ' error ' means the ProtoFeed
spec is violated ; ' warning ' means there is a problem but it ' s
not a ProtoFeed spec violation
- ` ` ' message ' ` ` : description of the problem
- ` ` ' table ' ` ` : table in which problem occurs , e . g . ' routes '
- ` ` ' rows ' ` ` : rows of the table ' s DataFrame where problem occurs
Return early if the pfeed is missing required tables or required
columns ."""
|
problems = [ ]
# Check for invalid columns and check the required tables
checkers = [ 'check_frequencies' , 'check_meta' , 'check_service_windows' , 'check_shapes' , 'check_stops' , ]
for checker in checkers :
problems . extend ( globals ( ) [ checker ] ( pfeed , include_warnings = include_warnings ) )
return gt . format_problems ( problems , as_df = as_df )
|
def ColorWithLightness ( self , lightness ) :
'''Create a new instance based on this one with a new lightness value .
Parameters :
: lightness :
The lightness of the new color [ 0 . . . 1 ] .
Returns :
A grapefruit . Color instance .
> > > Color . NewFromHsl ( 30 , 1 , 0.5 ) . ColorWithLightness ( 0.25)
(0.5 , 0.25 , 0.0 , 1.0)
> > > Color . NewFromHsl ( 30 , 1 , 0.5 ) . ColorWithLightness ( 0.25 ) . hsl
(30 , 1 , 0.25)'''
|
h , s , l = self . __hsl
return Color ( ( h , s , lightness ) , 'hsl' , self . __a , self . __wref )
|
def email ( self ) :
"""Shortcut property for finding the e - mail address or bot URL ."""
|
if "profile" in self . _raw :
email = self . _raw [ "profile" ] . get ( "email" )
elif "bot_url" in self . _raw :
email = self . _raw [ "bot_url" ]
else :
email = None
if not email :
logging . debug ( "No email found for %s" , self . _raw . get ( "name" ) )
return email
|
def baldwinsoc_winners ( self , profile ) :
"""Returns an integer list that represents all possible winners of a profile under baldwin rule .
: ivar Profile profile : A Profile object that represents an election profile ."""
|
ordering = profile . getOrderVectors ( )
m = profile . numCands
prefcounts = profile . getPreferenceCounts ( )
if min ( ordering [ 0 ] ) == 0 :
startstate = set ( range ( m ) )
else :
startstate = set ( range ( 1 , m + 1 ) )
wmg = self . getWmg2 ( prefcounts , ordering , startstate , normalize = False )
known_winners = set ( )
# - - - - - Some statistics - - - - -
hashtable2 = set ( )
# push the node of start state into the priority queue
root = Node ( value = startstate )
stackNode = [ ]
stackNode . append ( root )
while stackNode : # - - - - - pop the current node - - - - -
node = stackNode . pop ( )
state = node . value . copy ( )
# goal state 1 : if the state set contains only 1 candidate , then stop
if len ( state ) == 1 and list ( state ) [ 0 ] not in known_winners :
known_winners . add ( list ( state ) [ 0 ] )
continue
# goal state 2 ( pruning ) : if the state set is subset of the known _ winners set , then stop
if state <= known_winners :
continue
# - - - - - Compute plurality score for the current remaining candidates - - - - -
plural_score = dict ( )
for cand in state :
plural_score [ cand ] = 0
for cand1 , cand2 in itertools . permutations ( state , 2 ) :
plural_score [ cand1 ] += wmg [ cand1 ] [ cand2 ]
# if current state satisfies one of the 3 goal state , continue to the next loop
# After using heuristics , generate children and push them into priority queue
# frontier = [ val for val in known _ winners if val in state ] + list ( set ( state ) - set ( known _ winners ) )
minscore = min ( plural_score . values ( ) )
for to_be_deleted in state :
if plural_score [ to_be_deleted ] == minscore :
child_state = state . copy ( )
child_state . remove ( to_be_deleted )
tpc = tuple ( sorted ( child_state ) )
if tpc in hashtable2 :
continue
else :
hashtable2 . add ( tpc )
child_node = Node ( value = child_state )
stackNode . append ( child_node )
return sorted ( known_winners )
|
def find_first_tag ( tags , entity_type , after_index = - 1 ) :
"""Searches tags for entity type after given index
Args :
tags ( list ) : a list of tags with entity types to be compaired too entity _ type
entity _ type ( str ) : This is he entity type to be looking for in tags
after _ index ( int ) : the start token must be greaterthan this .
Returns :
( tag , v , confidence ) :
tag ( str ) : is the tag that matched
v ( str ) : ? the word that matched ?
confidence ( float ) : is a mesure of accuacy . 1 is full confidence and 0 is none ."""
|
for tag in tags :
for entity in tag . get ( 'entities' ) :
for v , t in entity . get ( 'data' ) :
if t . lower ( ) == entity_type . lower ( ) and tag . get ( 'start_token' , 0 ) > after_index :
return tag , v , entity . get ( 'confidence' )
return None , None , None
|
def count ( self , level = None ) :
"""Return number of non - NA / null observations in the Series .
Parameters
level : int or level name , default None
If the axis is a MultiIndex ( hierarchical ) , count along a
particular level , collapsing into a smaller Series .
Returns
int or Series ( if level specified )
Number of non - null values in the Series .
Examples
> > > s = pd . Series ( [ 0.0 , 1.0 , np . nan ] )
> > > s . count ( )"""
|
if level is None :
return notna ( com . values_from_object ( self ) ) . sum ( )
if isinstance ( level , str ) :
level = self . index . _get_level_number ( level )
lev = self . index . levels [ level ]
level_codes = np . array ( self . index . codes [ level ] , subok = False , copy = True )
mask = level_codes == - 1
if mask . any ( ) :
level_codes [ mask ] = cnt = len ( lev )
lev = lev . insert ( cnt , lev . _na_value )
obs = level_codes [ notna ( self . values ) ]
out = np . bincount ( obs , minlength = len ( lev ) or None )
return self . _constructor ( out , index = lev , dtype = 'int64' ) . __finalize__ ( self )
|
def split_input ( cls , mapper_spec ) :
"""Returns a list of shard _ count input _ spec _ shards for input _ spec .
Args :
mapper _ spec : The mapper specification to split from . Must contain
' blob _ keys ' parameter with one or more blob keys .
Returns :
A list of BlobstoreInputReaders corresponding to the specified shards ."""
|
params = _get_params ( mapper_spec )
blob_keys = params [ cls . BLOB_KEYS_PARAM ]
if isinstance ( blob_keys , basestring ) : # This is a mechanism to allow multiple blob keys ( which do not contain
# commas ) in a single string . It may go away .
blob_keys = blob_keys . split ( "," )
blob_sizes = { }
for blob_key in blob_keys :
blob_info = blobstore . BlobInfo . get ( blobstore . BlobKey ( blob_key ) )
blob_sizes [ blob_key ] = blob_info . size
shard_count = min ( cls . _MAX_SHARD_COUNT , mapper_spec . shard_count )
shards_per_blob = shard_count // len ( blob_keys )
if shards_per_blob == 0 :
shards_per_blob = 1
chunks = [ ]
for blob_key , blob_size in blob_sizes . items ( ) :
blob_chunk_size = blob_size // shards_per_blob
for i in xrange ( shards_per_blob - 1 ) :
chunks . append ( BlobstoreLineInputReader . from_json ( { cls . BLOB_KEY_PARAM : blob_key , cls . INITIAL_POSITION_PARAM : blob_chunk_size * i , cls . END_POSITION_PARAM : blob_chunk_size * ( i + 1 ) } ) )
chunks . append ( BlobstoreLineInputReader . from_json ( { cls . BLOB_KEY_PARAM : blob_key , cls . INITIAL_POSITION_PARAM : blob_chunk_size * ( shards_per_blob - 1 ) , cls . END_POSITION_PARAM : blob_size } ) )
return chunks
|
def unique_id ( self ) :
"""Generates a tuple that uniquely identifies a ` Monomer ` in an ` Assembly ` .
Notes
The unique _ id will uniquely identify each monomer within a polymer .
If each polymer in an assembly has a distinct id , it will uniquely
identify each monomer within the assembly .
The hetero - flag is defined as in Biopython as a string that is
either a single whitespace in the case of a non - hetero atom ,
or ' H _ ' plus the name of the hetero - residue ( e . g . ' H _ GLC ' in
the case of a glucose molecule ) , or ' W ' in the case of a water
molecule .
For more information , see the Biopython documentation or this
Biopython wiki page :
http : / / biopython . org / wiki / The _ Biopython _ Structural _ Bioinformatics _ FAQ
Returns
unique _ id : tuple
unique _ id [ 0 ] is the polymer _ id unique _ id [ 1 ] is a triple
of the hetero - flag , the monomer id ( residue number ) and the
insertion code ."""
|
if self . is_hetero :
if self . mol_code == 'HOH' :
hetero_flag = 'W'
else :
hetero_flag = 'H_{0}' . format ( self . mol_code )
else :
hetero_flag = ' '
return self . ampal_parent . id , ( hetero_flag , self . id , self . insertion_code )
|
def setLocalityGroups ( self , login , tableName , groups ) :
"""Parameters :
- login
- tableName
- groups"""
|
self . send_setLocalityGroups ( login , tableName , groups )
self . recv_setLocalityGroups ( )
|
def param_sweep ( model , sequences , param_grid , n_jobs = 1 , verbose = 0 ) :
"""Fit a series of models over a range of parameters .
Parameters
model : msmbuilder . BaseEstimator
An * instance * of an estimator to be used
to fit data .
sequences : list of array - like
List of sequences , or a single sequence . Each
sequence should be a 1D iterable of state
labels . Labels can be integers , strings , or
other orderable objects .
param _ grid : dict or sklearn . grid _ search . ParameterGrid
Parameter grid to specify models to fit . See
sklearn . grid _ search . ParameterGrid for an explanation
n _ jobs : int , optional
Number of jobs to run in parallel using joblib . Parallel
Returns
models : list
List of models fit to the data according to
param _ grid"""
|
if isinstance ( param_grid , dict ) :
param_grid = ParameterGrid ( param_grid )
elif not isinstance ( param_grid , ParameterGrid ) :
raise ValueError ( "param_grid must be a dict or ParamaterGrid instance" )
# iterable with ( model , sequence ) as items
iter_args = ( ( clone ( model ) . set_params ( ** params ) , sequences ) for params in param_grid )
models = Parallel ( n_jobs = n_jobs , verbose = verbose ) ( delayed ( _param_sweep_helper ) ( args ) for args in iter_args )
return models
|
def _get_param_values ( self , name ) :
"""Return the parameter by name as stored on the protocol
agent payload . This loads the data from the local cache
versus having to query the SMC for each parameter .
: param str name : name of param
: rtype : dict"""
|
for param in self . data . get ( 'paParameters' , [ ] ) :
for _pa_parameter , values in param . items ( ) :
if values . get ( 'name' ) == name :
return values
|
def output ( self , value ) :
"""SPL output port assignment expression .
Arguments :
value ( str ) : SPL expression used for an output assignment . This can be a string , a constant , or an : py : class : ` Expression ` .
Returns :
Expression : Output assignment expression that is valid as a the context of this operator ."""
|
return super ( Source , self ) . output ( self . stream , value )
|
def set_basic_params ( self , count = None , thunder_lock = None , lock_engine = None ) :
""": param int count : Create the specified number of shared locks .
: param bool thunder _ lock : Serialize accept ( ) usage ( if possible )
Could improve performance on Linux with robust pthread mutexes .
http : / / uwsgi . readthedocs . io / en / latest / articles / SerializingAccept . html
: param str | unicode lock _ engine : Set the lock engine .
Example :
- ipcsem"""
|
self . _set ( 'thunder-lock' , thunder_lock , cast = bool )
self . _set ( 'lock-engine' , lock_engine )
self . _set ( 'locks' , count )
return self . _section
|
def apply_config ( self , config ) :
"""Takes the given config dictionary and sets the hosts and base _ path
attributes .
If the kazoo client connection is established , its hosts list is
updated to the newly configured value ."""
|
self . hosts = config [ "hosts" ]
old_base_path = self . base_path
self . base_path = config [ "path" ]
if not self . connected . is_set ( ) :
return
logger . debug ( "Setting ZK hosts to %s" , self . hosts )
self . client . set_hosts ( "," . join ( self . hosts ) )
if old_base_path and old_base_path != self . base_path :
logger . critical ( "ZNode base path changed!" + " Lighthouse will need to be restarted" + " to watch the right znodes" )
|
def _random_id ( self , size = 16 , chars = string . ascii_uppercase + string . digits ) :
"""Generates a random id based on ` size ` and ` chars ` variable .
By default it will generate a 16 character long string based on
ascii uppercase letters and digits ."""
|
return '' . join ( random . choice ( chars ) for _ in range ( size ) )
|
def package ( self ) :
"""Find a package name from a build task ' s parameters .
: returns : name of the package this build task is building .
: raises : ValueError if we could not parse this tasks ' s request params ."""
|
if self . method == 'buildNotification' :
return self . params [ 1 ] [ 'name' ]
if self . method in ( 'createImage' , 'image' , 'livecd' ) :
return self . params [ 0 ]
if self . method == 'indirectionimage' :
return self . params [ 0 ] [ 'name' ]
# params [ 0 ] is the source URL for these tasks :
if self . method not in ( 'build' , 'buildArch' , 'buildContainer' , 'buildMaven' , 'buildSRPMFromSCM' , 'maven' ) :
return None
# ( I wish there was a better way to do this . )
source = self . params [ 0 ]
o = urlparse ( source )
# build tasks can load an SRPM from a " cli - build " tmpdir :
if source . endswith ( '.src.rpm' ) :
srpm = os . path . basename ( source )
( name , version , release ) = srpm . rsplit ( '-' , 2 )
# Note we ' re throwing away version and release here . They could be
# useful eventually , maybe in a " Package " class .
return name
# or an allowed SCM :
elif o . scheme :
package = os . path . basename ( o . path )
if package . endswith ( '.git' ) :
package = package [ : - 4 ]
if self . method == 'buildContainer' :
package += '-container'
return package
raise ValueError ( 'could not parse source "%s"' % source )
|
def change ( properties , feature , value = None ) :
"""Returns a modified version of properties with all values of the
given feature replaced by the given value .
If ' value ' is None the feature will be removed ."""
|
assert is_iterable_typed ( properties , basestring )
assert isinstance ( feature , basestring )
assert isinstance ( value , ( basestring , type ( None ) ) )
result = [ ]
feature = add_grist ( feature )
for p in properties :
if get_grist ( p ) == feature :
if value :
result . append ( replace_grist ( value , feature ) )
else :
result . append ( p )
return result
|
def calc_naturalremotedischarge_v1 ( self ) :
"""Try to estimate the natural discharge of a cross section far downstream
based on the last few simulation steps .
Required control parameter :
| NmbLogEntries |
Required log sequences :
| LoggedTotalRemoteDischarge |
| LoggedOutflow |
Calculated flux sequence :
| NaturalRemoteDischarge |
Basic equation :
: math : ` RemoteDemand =
max ( \\ frac { \\ Sigma ( LoggedTotalRemoteDischarge - LoggedOutflow ) }
{ NmbLogEntries } ) , 0 ) `
Examples :
Usually , the mean total remote flow should be larger than the mean
dam outflows . Then the estimated natural remote discharge is simply
the difference of both mean values :
> > > from hydpy . models . dam import *
> > > parameterstep ( )
> > > nmblogentries ( 3)
> > > logs . loggedtotalremotedischarge ( 2.5 , 2.0 , 1.5)
> > > logs . loggedoutflow ( 2.0 , 1.0 , 0.0)
> > > model . calc _ naturalremotedischarge _ v1 ( )
> > > fluxes . naturalremotedischarge
naturalremotedischarge ( 1.0)
Due to the wave travel times , the difference between remote discharge
and dam outflow mights sometimes be negative . To avoid negative
estimates of natural discharge , it its value is set to zero in
such cases :
> > > logs . loggedoutflow ( 4.0 , 3.0 , 5.0)
> > > model . calc _ naturalremotedischarge _ v1 ( )
> > > fluxes . naturalremotedischarge
naturalremotedischarge ( 0.0)"""
|
con = self . parameters . control . fastaccess
flu = self . sequences . fluxes . fastaccess
log = self . sequences . logs . fastaccess
flu . naturalremotedischarge = 0.
for idx in range ( con . nmblogentries ) :
flu . naturalremotedischarge += ( log . loggedtotalremotedischarge [ idx ] - log . loggedoutflow [ idx ] )
if flu . naturalremotedischarge > 0. :
flu . naturalremotedischarge /= con . nmblogentries
else :
flu . naturalremotedischarge = 0.
|
def _hash_of_file ( path , algorithm ) :
"""Return the hash digest of a file ."""
|
with open ( path , 'rb' ) as archive :
hash = hashlib . new ( algorithm )
for chunk in read_chunks ( archive ) :
hash . update ( chunk )
return hash . hexdigest ( )
|
def fields ( self ) :
'''Return a tuple of ordered fields for this : class : ` ColumnTS ` .'''
|
key = self . id + ':fields'
encoding = self . client . encoding
return tuple ( sorted ( ( f . decode ( encoding ) for f in self . client . smembers ( key ) ) ) )
|
def flush_all ( self , time = 0 ) :
"""Send a command to server flush | delete all keys .
: param time : Time to wait until flush in seconds .
: type time : int
: return : True in case of success , False in case of failure
: rtype : bool"""
|
returns = [ ]
for server in self . servers :
returns . append ( server . flush_all ( time ) )
return any ( returns )
|
def add_filter ( self , filter_values ) :
"""Improve the original one to deal with OR cases ."""
|
field = self . _params [ 'field' ]
# Build a ` AND ` query on values wihtout the OR operator .
# and a ` OR ` query for each value containing the OR operator .
filters = [ Q ( 'bool' , should = [ Q ( 'term' , ** { field : v } ) for v in value . split ( OR_SEPARATOR ) ] ) if OR_SEPARATOR in value else Q ( 'term' , ** { field : value } ) for value in filter_values ]
return Q ( 'bool' , must = filters ) if len ( filters ) > 1 else filters [ 0 ]
|
def auto_doc ( tool , nco_self ) :
"""Generate the _ _ doc _ _ string of the decorated function by calling the nco help command
: param tool :
: param nco _ self :
: return :"""
|
def desc ( func ) :
func . __doc__ = nco_self . call ( [ tool , "--help" ] ) . get ( "stdout" )
return func
return desc
|
def add_to_category ( self , category , name , action ) :
"""Adds given action to given category .
: param category : Category to store the action .
: type category : unicode
: param name : Action name .
: type name : unicode
: param action : Action object .
: type action : QAction
: return : Method success .
: rtype : bool"""
|
category = self . get_category ( category , vivify = True )
if not isinstance ( category , dict ) :
return False
category [ name ] = action
LOGGER . debug ( "> Added '{0}' action to '{1}' category!" . format ( category , name ) )
return True
|
def symlink ( source , target , isfile = True ) :
"""Creates a symlink at target * file * pointing to source .
: arg isfile : when True , if symlinking is disabled in the global config , the file
is copied instead with fortpy . utility . copyfile ; otherwise fortpy . utility . copy
is used and the target is considered a directory ."""
|
from fortpy . code import config
from os import path
if config . symlink :
from os import symlink , remove
if path . isfile ( target ) or path . islink ( target ) :
remove ( target )
elif path . isdir ( target ) :
msg . warn ( "Cannot auto-delete directory '{}' for symlinking." . format ( target ) )
return
symlink ( source , target )
else :
msg . info ( " COPY: {}" . format ( source ) )
if isfile :
copyfile ( source , target )
else :
copy ( source , target )
|
def send_message ( self , message ) :
"""Send a message to Storm via stdout ."""
|
if not isinstance ( message , dict ) :
logger = self . logger if self . logger else log
logger . error ( "%s.%d attempted to send a non dict message to Storm: " "%r" , self . component_name , self . pid , message , )
return
self . serializer . send_message ( message )
|
def formation_energy ( self , chemical_potentials = None , fermi_level = 0 ) :
"""Computes the formation energy for a defect taking into account a given chemical potential and fermi _ level"""
|
chemical_potentials = chemical_potentials if chemical_potentials else { }
chempot_correction = sum ( [ chem_pot * ( self . bulk_structure . composition [ el ] - self . defect . defect_composition [ el ] ) for el , chem_pot in chemical_potentials . items ( ) ] )
formation_energy = self . energy + chempot_correction
if "vbm" in self . parameters :
formation_energy += self . charge * ( self . parameters [ "vbm" ] + fermi_level )
else :
formation_energy += self . charge * fermi_level
return formation_energy
|
def single ( self ) :
"""Return the associated node .
: return : node"""
|
nodes = super ( One , self ) . all ( )
if nodes :
if len ( nodes ) == 1 :
return nodes [ 0 ]
else :
raise CardinalityViolation ( self , len ( nodes ) )
else :
raise CardinalityViolation ( self , 'none' )
|
def add_sender_info ( self , sender_txhash , nulldata_vin_outpoint , sender_out_data ) :
"""Record sender information in our block info .
@ sender _ txhash : txid of the sender
@ nulldata _ vin _ outpoint : the ' vout ' index from the nulldata tx input that this transaction funded"""
|
assert sender_txhash in self . sender_info . keys ( ) , "Missing sender info for %s" % sender_txhash
assert nulldata_vin_outpoint in self . sender_info [ sender_txhash ] , "Missing outpoint %s for sender %s" % ( nulldata_vin_outpoint , sender_txhash )
block_hash = self . sender_info [ sender_txhash ] [ nulldata_vin_outpoint ] [ 'block_hash' ]
relindex = self . sender_info [ sender_txhash ] [ nulldata_vin_outpoint ] [ 'relindex' ]
relinput_index = self . sender_info [ sender_txhash ] [ nulldata_vin_outpoint ] [ 'relinput' ]
value_in_satoshis = sender_out_data [ 'value' ]
script_pubkey = sender_out_data [ 'script' ]
script_info = bits . btc_tx_output_parse_script ( script_pubkey )
script_type = script_info [ 'type' ]
addresses = script_info . get ( 'addresses' , [ ] )
sender_info = { "value" : value_in_satoshis , "script_pubkey" : script_pubkey , "script_type" : script_type , "addresses" : addresses , "nulldata_vin_outpoint" : nulldata_vin_outpoint , "txid" : sender_txhash , }
# debit this tx ' s total value
self . block_info [ block_hash ] [ 'txns' ] [ relindex ] [ 'fee' ] += value_in_satoshis
# remember this sender , but put it in the right place .
# senders [ i ] must correspond to tx [ ' vin ' ] [ i ]
self . block_info [ block_hash ] [ 'txns' ] [ relindex ] [ 'senders' ] [ relinput_index ] = sender_info
self . block_info [ block_hash ] [ 'num_senders' ] += 1
return True
|
def shared ( self ) -> typing . Union [ None , SharedCache ] :
"""The shared display object associated with this project ."""
|
return self . _project . shared if self . _project else None
|
def compare_and_set ( self , oldval , newval ) :
'''Given ` oldval ` and ` newval ` , sets the atom ' s value to ` newval ` if and
only if ` oldval ` is the atom ' s current value . Returns ` True ` upon
success , otherwise ` False ` .
: param oldval : The old expected value .
: param newval : The new value which will be set if and only if ` oldval `
equals the current value .'''
|
ret = self . _state . compare_and_set ( oldval , newval )
if ret :
self . notify_watches ( oldval , newval )
return ret
|
def _bake_script ( script ) :
"""Takes a script element and bakes it in only if it contains a remote resource"""
|
if "src" in script . attrs :
if re . match ( "https?://" , script [ "src" ] ) :
script_data = _load_url ( script [ "src" ] ) . read ( )
else :
script_data = _load_file ( script [ "src" ] ) . read ( )
script . clear ( )
if USING_PYTHON2 :
script . string = "\n" + script_data + "\n"
else :
script . string = "\n" + str ( script_data ) + "\n"
del script [ "src" ]
del script [ "type" ]
|
def set_shutter_level ( self , level = 0.0 ) :
"""sets the shutter level
Args :
level ( float ) : the new level of the shutter . 0.0 = open , 1.0 = closed
Returns :
the result of the _ restCall"""
|
data = { "channelIndex" : 1 , "deviceId" : self . id , "shutterLevel" : level }
return self . _restCall ( "device/control/setShutterLevel" , body = json . dumps ( data ) )
|
def set_event_tags ( self , id , ** kwargs ) : # noqa : E501
"""Set all tags associated with a specific event # noqa : E501
# noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . set _ event _ tags ( id , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str id : ( required )
: param list [ str ] body :
: return : ResponseContainer
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . set_event_tags_with_http_info ( id , ** kwargs )
# noqa : E501
else :
( data ) = self . set_event_tags_with_http_info ( id , ** kwargs )
# noqa : E501
return data
|
def get_ry0_distance ( self , mesh ) :
"""For each point determine the corresponding Ry0 distance using the GC2
configuration .
See : meth : ` superclass method
< . base . BaseSurface . get _ ry0 _ distance > `
for spec of input and result values ."""
|
# If the GC2 calculations have already been computed ( by invoking Ry0
# first ) and the mesh is identical then class has GC2 attributes
# already pre - calculated
if not self . tmp_mesh or ( self . tmp_mesh == mesh ) : # If that ' s not the case , or the mesh is different then
# re - compute GC2 configuration
self . gc2t , self . gc2u = self . get_generalised_coordinates ( mesh . lons , mesh . lats )
# Update mesh
self . tmp_mesh = deepcopy ( mesh )
# Default value ry0 ( for sites within fault length ) is 0.0
ry0 = numpy . zeros_like ( self . gc2u , dtype = float )
# For sites with negative gc2u ( off the initial point of the fault )
# take the absolute value of gc2u
neg_gc2u = self . gc2u < 0.0
ry0 [ neg_gc2u ] = numpy . fabs ( self . gc2u [ neg_gc2u ] )
# Sites off the end of the fault have values shifted by the
# GC2 length of the fault
pos_gc2u = self . gc2u >= self . gc_length
ry0 [ pos_gc2u ] = self . gc2u [ pos_gc2u ] - self . gc_length
return ry0
|
def bearer_auth ( ) :
"""Prompts the user for authorization using bearer authentication .
tags :
- Auth
parameters :
- in : header
name : Authorization
schema :
type : string
produces :
- application / json
responses :
200:
description : Sucessful authentication .
401:
description : Unsuccessful authentication ."""
|
authorization = request . headers . get ( "Authorization" )
if not ( authorization and authorization . startswith ( "Bearer " ) ) :
response = app . make_response ( "" )
response . headers [ "WWW-Authenticate" ] = "Bearer"
response . status_code = 401
return response
slice_start = len ( "Bearer " )
token = authorization [ slice_start : ]
return jsonify ( authenticated = True , token = token )
|
def create_similar_image ( self , content , width , height ) :
"""Create a new image surface that is as compatible as possible
for uploading to and the use in conjunction with this surface .
However , this surface can still be used like any normal image surface .
Initially the surface contents are all 0
( transparent if contents have transparency , black otherwise . )
Use : meth : ` create _ similar ` if you don ' t need an image surface .
: param format : the : ref : ` FORMAT ` string for the new surface
: param width : width of the new surface , ( in device - space units )
: param height : height of the new surface ( in device - space units )
: type format : str
: type width : int
: type height : int
: returns : A new : class : ` ImageSurface ` instance ."""
|
return Surface . _from_pointer ( cairo . cairo_surface_create_similar_image ( self . _pointer , content , width , height ) , incref = False )
|
def run_process ( self , process ) :
"""Runs a single action ."""
|
message = u'#{bright}'
message += u'{} ' . format ( str ( process ) [ : 68 ] ) . ljust ( 69 , '.' )
stashed = False
if self . unstaged_changes and not self . include_unstaged_changes :
out , err , code = self . git . stash ( keep_index = True , quiet = True )
stashed = code == 0
try :
result = process ( files = self . files , cwd = self . cwd , fix = self . fix )
# Check for modified files
out , err , code = self . git . status ( porcelain = True , untracked_files = 'no' )
for line in out . splitlines ( ) :
file_status = Status ( line )
# Make sure the file is one of the files that was processed
if file_status . path in self . files and file_status . is_modified :
mtime = os . path . getmtime ( file_status . path ) if os . path . exists ( file_status . path ) else 0
if mtime > self . file_mtimes . get ( file_status . path , 0 ) :
self . file_mtimes [ file_status . path ] = mtime
result . add_modified_file ( file_status . path )
if self . stage_modified_files :
self . git . add ( file_status . path )
except : # noqa : E722
raise
finally :
if stashed :
self . git . reset ( hard = True , quiet = True )
self . git . stash . pop ( index = True , quiet = True )
if result . is_success :
message += u' #{green}[SUCCESS]'
elif result . is_failure :
message += u' #{red}[FAILURE]'
elif result . is_skip :
message += u' #{cyan}[SKIPPED]'
elif result . is_error :
message += u' #{red}[ERROR!!]'
return result , message
|
def rescorer ( self , rescorer ) :
"""Returns a new QuerySet with a set rescorer ."""
|
clone = self . _clone ( )
clone . _rescorer = rescorer
return clone
|
def execute_state_machine_from_path ( self , state_machine = None , path = None , start_state_path = None , wait_for_execution_finished = True ) :
"""A helper function to start an arbitrary state machine at a given path .
: param path : The path where the state machine resides
: param start _ state _ path : The path to the state from which the execution will start
: return : a reference to the created state machine"""
|
import rafcon . core . singleton
from rafcon . core . storage import storage
rafcon . core . singleton . library_manager . initialize ( )
if not state_machine :
state_machine = storage . load_state_machine_from_path ( path )
rafcon . core . singleton . state_machine_manager . add_state_machine ( state_machine )
rafcon . core . singleton . state_machine_execution_engine . start ( state_machine . state_machine_id , start_state_path = start_state_path )
if wait_for_execution_finished :
self . join ( )
self . stop ( )
return state_machine
|
def cond_int ( self , conkey ) :
"""Return the trailing number from cond if any , as an int . If no
trailing number , return the string conkey as is .
This is used for sorting the conditions properly even when
passing the number 10 . The name of this function could be
improved since it might return a string ."""
|
m = re . match ( self . numrx , conkey )
if not m :
return conkey
return int ( m . group ( 1 ) )
|
def matrixplot ( adata , var_names , groupby = None , use_raw = None , log = False , num_categories = 7 , figsize = None , dendrogram = False , gene_symbols = None , var_group_positions = None , var_group_labels = None , var_group_rotation = None , layer = None , standard_scale = None , swap_axes = False , show = None , save = None , ** kwds ) :
"""Creates a heatmap of the mean expression values per cluster of each var _ names
If groupby is not given , the matrixplot assumes that all data belongs to a single
category .
Parameters
{ common _ plot _ args }
standard _ scale : { { ' var ' , ' group ' } } , optional ( default : None )
Whether or not to standardize that dimension between 0 and 1 , meaning for each variable or group ,
subtract the minimum and divide each by its maximum .
{ show _ save _ ax }
* * kwds : keyword arguments
Are passed to ` matplotlib . pyplot . pcolor ` .
Returns
List of : class : ` ~ matplotlib . axes . Axes `
Examples
> > > adata = sc . datasets . pbmc68k _ reduced ( )
> > > sc . pl . matrixplot ( adata , [ ' C1QA ' , ' PSAP ' , ' CD79A ' , ' CD79B ' , ' CST3 ' , ' LYZ ' ] ,
. . . groupby = ' bulk _ labels ' , dendrogram = True )"""
|
if use_raw is None and adata . raw is not None :
use_raw = True
if isinstance ( var_names , str ) :
var_names = [ var_names ]
categories , obs_tidy = _prepare_dataframe ( adata , var_names , groupby , use_raw , log , num_categories , gene_symbols = gene_symbols , layer = layer )
if groupby is None or len ( categories ) <= 1 : # dendrogram can only be computed between groupby categories
dendrogram = False
mean_obs = obs_tidy . groupby ( level = 0 ) . mean ( )
if standard_scale == 'group' :
mean_obs = mean_obs . sub ( mean_obs . min ( 1 ) , axis = 0 )
mean_obs = mean_obs . div ( mean_obs . max ( 1 ) , axis = 0 ) . fillna ( 0 )
elif standard_scale == 'var' :
mean_obs -= mean_obs . min ( 0 )
mean_obs = ( mean_obs / mean_obs . max ( 0 ) ) . fillna ( 0 )
elif standard_scale is None :
pass
else :
logg . warn ( 'Unknown type for standard_scale, ignored' )
if dendrogram :
dendro_data = _reorder_categories_after_dendrogram ( adata , groupby , dendrogram , var_names = var_names , var_group_labels = var_group_labels , var_group_positions = var_group_positions )
var_group_labels = dendro_data [ 'var_group_labels' ]
var_group_positions = dendro_data [ 'var_group_positions' ]
# reorder matrix
if dendro_data [ 'var_names_idx_ordered' ] is not None : # reorder columns ( usually genes ) if needed . This only happens when
# var _ group _ positions and var _ group _ labels is set
mean_obs = mean_obs . iloc [ : , dendro_data [ 'var_names_idx_ordered' ] ]
# reorder rows ( categories ) to match the dendrogram order
mean_obs = mean_obs . iloc [ dendro_data [ 'categories_idx_ordered' ] , : ]
colorbar_width = 0.2
if not swap_axes :
dendro_width = 0.8 if dendrogram else 0
if figsize is None :
height = len ( categories ) * 0.2 + 1
# + 1 for labels
heatmap_width = len ( var_names ) * 0.32
width = heatmap_width + dendro_width + colorbar_width
# + 1.6 to account for the colorbar and + 1 to account for labels
else :
width , height = figsize
heatmap_width = width - ( dendro_width + colorbar_width )
if var_group_positions is not None and len ( var_group_positions ) > 0 : # add some space in case ' brackets ' want to be plotted on top of the image
height_ratios = [ 0.5 , 10 ]
height += 0.5
else :
height_ratios = [ 0 , 10.5 ]
# define a layout of 2 rows x 3 columns
# first row is for ' brackets ' ( if no brackets needed , the height of this row is zero )
# second row is for main content . This second row
# is divided into three axes :
# first ax is for the main matrix figure
# second ax is for the dendrogram
# third ax is for the color bar legend
fig = pl . figure ( figsize = ( width , height ) )
axs = gridspec . GridSpec ( nrows = 2 , ncols = 3 , wspace = 0.02 , hspace = 0.04 , width_ratios = [ heatmap_width , dendro_width , colorbar_width ] , height_ratios = height_ratios )
matrix_ax = fig . add_subplot ( axs [ 1 , 0 ] )
y_ticks = np . arange ( mean_obs . shape [ 0 ] ) + 0.5
matrix_ax . set_yticks ( y_ticks )
matrix_ax . set_yticklabels ( [ mean_obs . index [ idx ] for idx in range ( mean_obs . shape [ 0 ] ) ] )
if dendrogram :
dendro_ax = fig . add_subplot ( axs [ 1 , 1 ] , sharey = matrix_ax )
_plot_dendrogram ( dendro_ax , adata , groupby , dendrogram_key = dendrogram , ticks = y_ticks )
pc = matrix_ax . pcolor ( mean_obs , edgecolor = 'gray' , ** kwds )
# invert y axis to show categories ordered from top to bottom
matrix_ax . set_ylim ( mean_obs . shape [ 0 ] , 0 )
x_ticks = np . arange ( mean_obs . shape [ 1 ] ) + 0.5
matrix_ax . set_xticks ( x_ticks )
matrix_ax . set_xticklabels ( [ mean_obs . columns [ idx ] for idx in range ( mean_obs . shape [ 1 ] ) ] , rotation = 90 )
matrix_ax . tick_params ( axis = 'both' , labelsize = 'small' )
matrix_ax . grid ( False )
matrix_ax . set_xlim ( - 0.5 , len ( var_names ) + 0.5 )
matrix_ax . set_ylabel ( groupby )
matrix_ax . set_xlim ( 0 , mean_obs . shape [ 1 ] )
# plot group legends on top of matrix _ ax ( if given )
if var_group_positions is not None and len ( var_group_positions ) > 0 :
gene_groups_ax = fig . add_subplot ( axs [ 0 , 0 ] , sharex = matrix_ax )
_plot_gene_groups_brackets ( gene_groups_ax , group_positions = var_group_positions , group_labels = var_group_labels , rotation = var_group_rotation , left_adjustment = 0.2 , right_adjustment = 0.8 )
# plot colorbar
_plot_colorbar ( pc , fig , axs [ 1 , 2 ] )
else :
dendro_height = 0.5 if dendrogram else 0
if var_group_positions is not None and len ( var_group_positions ) > 0 : # add some space in case ' color blocks ' want to be plotted on the right of the image
vargroups_width = 0.4
else :
vargroups_width = 0
if figsize is None :
heatmap_height = len ( var_names ) * 0.2
height = dendro_height + heatmap_height + 1
# + 1 for labels
heatmap_width = len ( categories ) * 0.3
width = heatmap_width + vargroups_width + colorbar_width
else :
width , height = figsize
heatmap_width = width - ( vargroups_width + colorbar_width )
heatmap_height = height - dendro_height
# define a layout of 2 rows x 3 columns
# first row is for ' dendrogram ' ( if no dendrogram is plotted , the height of this row is zero )
# second row is for main content . This row
# is divided into three axes :
# first ax is for the main matrix figure
# second ax is for the groupby categories ( eg . brackets )
# third ax is for the color bar legend
fig = pl . figure ( figsize = ( width , height ) )
axs = gridspec . GridSpec ( nrows = 2 , ncols = 3 , wspace = 0.05 , hspace = 0.005 , width_ratios = [ heatmap_width , vargroups_width , colorbar_width ] , height_ratios = [ dendro_height , heatmap_height ] )
mean_obs = mean_obs . T
matrix_ax = fig . add_subplot ( axs [ 1 , 0 ] )
pc = matrix_ax . pcolor ( mean_obs , edgecolor = 'gray' , ** kwds )
y_ticks = np . arange ( mean_obs . shape [ 0 ] ) + 0.5
matrix_ax . set_yticks ( y_ticks )
matrix_ax . set_yticklabels ( [ mean_obs . index [ idx ] for idx in range ( mean_obs . shape [ 0 ] ) ] )
x_ticks = np . arange ( mean_obs . shape [ 1 ] ) + 0.5
matrix_ax . set_xticks ( x_ticks )
matrix_ax . set_xticklabels ( [ mean_obs . columns [ idx ] for idx in range ( mean_obs . shape [ 1 ] ) ] , rotation = 90 )
matrix_ax . tick_params ( axis = 'both' , labelsize = 'small' )
matrix_ax . grid ( False )
matrix_ax . set_xlim ( 0 , len ( categories ) )
matrix_ax . set_xlabel ( groupby )
# invert y axis to show var _ names ordered from top to bottom
matrix_ax . set_ylim ( mean_obs . shape [ 0 ] , 0 )
if dendrogram :
dendro_ax = fig . add_subplot ( axs [ 0 , 0 ] , sharex = matrix_ax )
_plot_dendrogram ( dendro_ax , adata , groupby , dendrogram_key = dendrogram , ticks = x_ticks , orientation = 'top' )
# plot group legends on top of matrix _ ax ( if given )
if var_group_positions is not None and len ( var_group_positions ) > 0 :
gene_groups_ax = fig . add_subplot ( axs [ 1 , 1 ] , sharey = matrix_ax )
_plot_gene_groups_brackets ( gene_groups_ax , group_positions = var_group_positions , group_labels = var_group_labels , rotation = var_group_rotation , left_adjustment = 0.2 , right_adjustment = 0.8 , orientation = 'right' )
# plot colorbar
_plot_colorbar ( pc , fig , axs [ 1 , 2 ] )
utils . savefig_or_show ( 'matrixplot' , show = show , save = save )
return axs
|
def _flush ( self ) :
"""Purges the buffer and commits all pending values into the estimator ."""
|
self . _buffer . sort ( )
self . _replace_batch ( )
self . _buffer = [ ]
self . _compress ( )
|
def parse_filters ( self , vt_filter ) :
"""Parse a string containing one or more filters
and return a list of filters
Arguments :
vt _ filter ( string ) : String containing filters separated with
semicolon .
Return :
List with filters . Each filters is a list with 3 elements
e . g . [ arg , operator , value ]"""
|
filter_list = vt_filter . split ( ';' )
filters = list ( )
for single_filter in filter_list :
filter_aux = re . split ( '(\W)' , single_filter , 1 )
if len ( filter_aux ) < 3 :
raise OSPDError ( "Invalid number of argument in the filter" , "get_vts" )
_element , _oper , _val = filter_aux
if _element not in self . allowed_filter :
raise OSPDError ( "Invalid filter element" , "get_vts" )
if _oper not in self . filter_operator :
raise OSPDError ( "Invalid filter operator" , "get_vts" )
filters . append ( filter_aux )
return filters
|
def render_compressed_output ( self , package , package_name , package_type ) :
"""Render HTML for using the package ' s output file .
Subclasses can override this method to provide custom behavior for
rendering the output file ."""
|
method = getattr ( self , 'render_{0}' . format ( package_type ) )
return method ( package , package . output_filename )
|
def DataCopyWithOverlay ( self , dcmfilelist , out_dir , overlays ) :
"""Function make 3D data from dicom file slices
: dcmfilelist list of sorted . dcm files
: overlays dictionary of binary overlays . { 1 : np . array ( [ . . . ] ) , 3 : . . . }
: out _ dir output directory"""
|
dcmlist = dcmfilelist
# data3d = [ ]
for i in range ( len ( dcmlist ) ) :
onefile = dcmlist [ i ]
logger . info ( onefile )
data = dicom . read_file ( onefile )
for i_overlay in overlays . keys ( ) :
overlay3d = overlays [ i_overlay ]
data = self . encode_overlay_slice ( data , overlay3d [ - 1 - i , : , : ] , i_overlay )
# construct output path
head , tail = os . path . split ( os . path . normpath ( onefile ) )
filename_out = os . path . join ( out_dir , tail )
# save
data . save_as ( filename_out )
|
def execute ( self , input_data ) :
'''yara worker execute method'''
|
raw_bytes = input_data [ 'sample' ] [ 'raw_bytes' ]
matches = self . rules . match_data ( raw_bytes )
# The matches data is organized in the following way
# { ' filename1 ' : [ match _ list ] , ' filename2 ' : [ match _ list ] }
# match _ list = list of match
# match = { ' meta ' : { ' description ' : ' blah } , tags = [ ] , matches : True ,
# strings : [ string _ list ] }
# string = { ' flags ' : blah , ' identifier ' : ' $ ' , ' data ' : FindWindow , ' offset ' }
# So we ' re going to flatten a bit ( shrug )
# { filename _ match _ meta _ description : string _ list }
flat_data = collections . defaultdict ( list )
for filename , match_list in matches . iteritems ( ) :
for match in match_list :
if 'description' in match [ 'meta' ] :
new_tag = filename + '_' + match [ 'meta' ] [ 'description' ]
else :
new_tag = filename + '_' + match [ 'rule' ]
for match in match [ 'strings' ] :
flat_data [ new_tag ] . append ( match [ 'data' ] )
# Remove duplicates
flat_data [ new_tag ] = list ( set ( flat_data [ new_tag ] ) )
return { 'matches' : flat_data }
|
def put ( self , id , name , description , command_to_run , environment_variables , required_arguments , required_arguments_default_values , logs_path , results_path , container_image , container_type , extra_data_to_put = None , ) :
"""Updates a task type on the saltant server .
Args :
id ( int ) : The ID of the task type .
name ( str ) : The name of the task type .
description ( str ) : The description of the task type .
command _ to _ run ( str ) : The command to run to execute the task .
environment _ variables ( list ) : The environment variables
required on the host to execute the task .
required _ arguments ( list ) : The argument names for the task type .
required _ arguments _ default _ values ( dict ) : Default values for
the tasks required arguments .
extra _ data _ to _ put ( dict , optional ) : Extra key - value pairs to
add to the request data . This is useful for subclasses
which require extra parameters .
logs _ path ( str ) : The path of the logs directory inside the
container .
results _ path ( str ) : The path of the results directory inside
the container .
container _ image ( str ) : The container name and tag . For
example , ubuntu : 14.04 for Docker ; and docker : / / ubuntu : 14:04
or shub : / / vsoch / hello - world for Singularity .
container _ type ( str ) : The type of the container ."""
|
# Add in extra data specific to container task types
if extra_data_to_put is None :
extra_data_to_put = { }
extra_data_to_put . update ( { "logs_path" : logs_path , "results_path" : results_path , "container_image" : container_image , "container_type" : container_type , } )
# Call the parent create function
return super ( ContainerTaskTypeManager , self ) . put ( id = id , name = name , description = description , command_to_run = command_to_run , environment_variables = environment_variables , required_arguments = required_arguments , required_arguments_default_values = ( required_arguments_default_values ) , extra_data_to_put = extra_data_to_put , )
|
def setup ( self , level = None , log_file = None , json = None ) :
'''Load everything up . Note that any arg here will override both
default and custom settings
@ param level : the log level
@ param log _ file : boolean t / f whether to log to a file , else stdout
@ param json : boolean t / f whether to write the logs in json'''
|
self . settings = self . wrapper . load ( self . settings_name )
my_level = level if level else self . settings [ 'LOG_LEVEL' ]
# negate because logger wants True for std out
my_output = not log_file if log_file else self . settings [ 'LOG_STDOUT' ]
my_json = json if json else self . settings [ 'LOG_JSON' ]
self . logger = LogFactory . get_instance ( json = my_json , stdout = my_output , level = my_level , name = self . settings [ 'LOGGER_NAME' ] , dir = self . settings [ 'LOG_DIR' ] , file = self . settings [ 'LOG_FILE' ] , bytes = self . settings [ 'LOG_MAX_BYTES' ] , backups = self . settings [ 'LOG_BACKUPS' ] )
self . redis_conn = redis . StrictRedis ( host = self . settings [ 'REDIS_HOST' ] , port = self . settings [ 'REDIS_PORT' ] , db = self . settings [ 'REDIS_DB' ] )
try :
self . redis_conn . info ( )
self . logger . debug ( "Successfully connected to Redis" )
except ConnectionError :
self . logger . error ( "Failed to connect to Redis" )
# essential to functionality
sys . exit ( 1 )
self . _load_plugins ( )
self . _setup_stats ( )
|
def list_zones ( self , max_results = None , page_token = None ) :
"""List zones for the project associated with this client .
See
https : / / cloud . google . com / dns / api / v1 / managedZones / list
: type max _ results : int
: param max _ results : maximum number of zones to return , If not
passed , defaults to a value set by the API .
: type page _ token : str
: param page _ token : Optional . If present , return the next batch of
zones , using the value , which must correspond to the
` ` nextPageToken ` ` value returned in the previous response .
Deprecated : use the ` ` pages ` ` property of the returned iterator
instead of manually passing the token .
: rtype : : class : ` ~ google . api _ core . page _ iterator . Iterator `
: returns : Iterator of : class : ` ~ google . cloud . dns . zone . ManagedZone `
belonging to this project ."""
|
path = "/projects/%s/managedZones" % ( self . project , )
return page_iterator . HTTPIterator ( client = self , api_request = self . _connection . api_request , path = path , item_to_value = _item_to_zone , items_key = "managedZones" , page_token = page_token , max_results = max_results , )
|
def convert_time ( time ) :
"""Convert a time string into 24 - hour time ."""
|
split_time = time . split ( )
try : # Get rid of period in a . m . / p . m .
am_pm = split_time [ 1 ] . replace ( '.' , '' )
time_str = '{0} {1}' . format ( split_time [ 0 ] , am_pm )
except IndexError :
return time
try :
time_obj = datetime . strptime ( time_str , '%I:%M %p' )
except ValueError :
time_obj = datetime . strptime ( time_str , '%I %p' )
return time_obj . strftime ( '%H:%M %p' )
|
def url_unquote_plus ( s , charset = 'utf-8' , errors = 'replace' ) :
"""URL decode a single string with the given decoding and decode
a " + " to whitespace .
Per default encoding errors are ignored . If you want a different behavior
you can set ` errors ` to ` ` ' replace ' ` ` or ` ` ' strict ' ` ` . In strict mode a
` HTTPUnicodeError ` is raised .
: param s : the string to unquote .
: param charset : the charset to be used .
: param errors : the error handling for the charset decoding ."""
|
if isinstance ( s , unicode ) :
s = s . encode ( charset )
return _decode_unicode ( _unquote_plus ( s ) , charset , errors )
|
def _parse_attributes ( self , attributes ) :
"""Ensure compliance with the spec ' s attributes section
Specifically , the attributes object of the single resource
object . This contains the key / values to be mapped to the
model .
: param attributes :
dict JSON API attributes object"""
|
link = 'jsonapi.org/format/#document-resource-object-attributes'
if not isinstance ( attributes , dict ) :
self . fail ( 'The JSON API resource object attributes key MUST ' 'be a hash.' , link )
elif 'id' in attributes or 'type' in attributes :
self . fail ( 'A field name of `id` or `type` is not allowed in ' 'the attributes object. They should be top-level ' 'keys.' , link )
|
def declare_backward_dependency ( self , out_grad , in_data , out_data ) :
"""Declare dependencies of this operator for backward pass .
Parameters
out _ grad : list of int
ids of out _ grad blobs .
in _ data : list of int
ids of in _ data blobs .
out _ data : list of int
ids of out _ data blobs .
Returns
deps : list of int
ids of the needed blobs ."""
|
deps = [ ]
if self . need_top_grad ( ) :
deps . extend ( out_grad )
deps . extend ( in_data )
deps . extend ( out_data )
return deps
|
def _set_gradebook_view ( self , session ) :
"""Sets the underlying gradebook view to match current view"""
|
if self . _gradebook_view == COMPARATIVE :
try :
session . use_comparative_gradebook_view ( )
except AttributeError :
pass
else :
try :
session . use_plenary_gradebook_view ( )
except AttributeError :
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.