signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def isignored ( self , relpath , directory = False ) :
"""Returns True if path matches pants ignore pattern .""" | relpath = self . _relpath_no_dot ( relpath )
if directory :
relpath = self . _append_trailing_slash ( relpath )
return self . ignore . match_file ( relpath ) |
def linear_least_squares ( a , b , residuals = False ) :
"""Return the least - squares solution to a linear matrix equation .
Solves the equation ` a x = b ` by computing a vector ` x ` that
minimizes the Euclidean 2 - norm ` | | b - a x | | ^ 2 ` . The equation may
be under - , well - , or over - determined ( i . e . , the number of
linearly independent rows of ` a ` can be less than , equal to , or
greater than its number of linearly independent columns ) . If ` a `
is square and of full rank , then ` x ` ( but for round - off error ) is
the " exact " solution of the equation .
Parameters
a : ( M , N ) array _ like
" Coefficient " matrix .
b : ( M , ) array _ like
Ordinate or " dependent variable " values .
residuals : bool
Compute the residuals associated with the least - squares solution
Returns
x : ( M , ) ndarray
Least - squares solution . The shape of ` x ` depends on the shape of
residuals : int ( Optional )
Sums of residuals ; squared Euclidean 2 - norm for each column in
` ` b - a * x ` ` .""" | # Copyright ( c ) 2013 Alexandre Drouin . All rights reserved .
# From https : / / gist . github . com / aldro61/5889795
from warnings import warn
# from scipy . linalg . fblas import dgemm
from scipy . linalg . blas import dgemm
# if type ( a ) ! = np . ndarray or not a . flags [ ' C _ CONTIGUOUS ' ] :
# warn ( ' Matrix a is not a C - contiguous numpy array . The solver will create a copy , which will result ' + \
# ' in increased memory usage . ' )
a = np . asarray ( a , order = 'c' )
i = dgemm ( alpha = 1.0 , a = a . T , b = a . T , trans_b = True )
x = np . linalg . solve ( i , dgemm ( alpha = 1.0 , a = a . T , b = b ) ) . flatten ( )
if residuals :
return x , np . linalg . norm ( np . dot ( a , x ) - b )
else :
return x |
def is_excluded ( root , excludes ) :
"""Check if the directory is in the exclude list .
Note : by having trailing slashes , we avoid common prefix issues , like
e . g . an exlude " foo " also accidentally excluding " foobar " .""" | root = os . path . normpath ( root )
for exclude in excludes :
if root == exclude :
return True
return False |
def c_M_z ( self , M , z ) :
"""fitting function of http : / / moriond . in2p3 . fr / J08 / proceedings / duffy . pdf for the mass and redshift dependence of the concentration parameter
: param M : halo mass in M _ sun / h
: type M : float or numpy array
: param z : redshift
: type z : float > 0
: return : concentration parameter as float""" | # fitted parameter values
A = 5.22
B = - 0.072
C = - 0.42
M_pivot = 2. * 10 ** 12
return A * ( M / M_pivot ) ** B * ( 1 + z ) ** C |
def is_inside ( directory , fname ) :
"""True if fname is inside directory .
The parameters should typically be passed to osutils . normpath first , so
that . and . . and repeated slashes are eliminated , and the separators
are canonical for the platform .
The empty string as a dir name is taken as top - of - tree and matches
everything .""" | # XXX : Most callers of this can actually do something smarter by
# looking at the inventory
if directory == fname :
return True
if directory == b'' :
return True
if not directory . endswith ( b'/' ) :
directory += b'/'
return fname . startswith ( directory ) |
def normalize ( self , inplace = True ) :
"""Normalizes the pdf of the distribution so that it
integrates to 1 over all the variables .
Parameters
inplace : boolean
If inplace = True it will modify the distribution itself , else would return
a new distribution .
Returns
CustomDistribution or None :
if inplace = True ( default ) returns None
if inplace = False returns a new CustomDistribution instance .
Examples
> > > from pgmpy . factors . distributions import CustomDistribution
> > > from scipy . stats import multivariate _ normal
> > > normal _ pdf _ x2 = lambda x1 , x2 : 2 * multivariate _ normal . pdf (
. . . x = [ x1 , x2 ] , mean = [ 0 , 0 ] , cov = [ [ 1 , 0 ] , [ 0 , 1 ] ] )
> > > normal _ dist _ x2 = CustomDistribution ( variables = [ ' x1 ' , ' x2 ' ] ,
. . . distribution = normal _ pdf _ x2)
> > > normal _ dist _ x2 . assignment ( 1 , 1)
0.117099663049
> > > normal _ dist = normal _ dist _ x2 . normalize ( inplace = False ) )
> > > normal _ dist . assignment ( 1 , 1)
0.0585498315243""" | phi = self if inplace else self . copy ( )
pdf = self . pdf
pdf_mod = integrate . nquad ( pdf , [ [ - np . inf , np . inf ] for var in self . variables ] ) [ 0 ]
phi . _pdf = lambda * args : pdf ( * args ) / pdf_mod
if not inplace :
return phi |
def save_license ( license_code ) :
"""Grab license , save to LICENSE / LICENSE . txt file""" | desc = _get_license_description ( license_code )
fname = "LICENSE"
if sys . platform == "win32" :
fname += ".txt"
# Windows and file exts
with open ( os . path . join ( os . getcwd ( ) , fname ) , "w" ) as afile :
afile . write ( desc ) |
def remove_consecutive_duplicates ( elements ) :
"""This function eliminates consecutive duplicates from a list .
Args :
elements : A list of integers or characters .
Returns :
A list which includes the distinct consecutive elements in the original list .
Examples :
> > > remove _ consecutive _ duplicates ( [ 0 , 0 , 1 , 2 , 3 , 4 , 4 , 5 , 6 , 6 , 6 , 7 , 8 , 9 , 4 , 4 ] )
[0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 4]
> > > remove _ consecutive _ duplicates ( [ 10 , 10 , 15 , 19 , 18 , 18 , 17 , 26 , 26 , 17 , 18 , 10 ] )
[10 , 15 , 19 , 18 , 17 , 26 , 17 , 18 , 10]
> > > remove _ consecutive _ duplicates ( [ ' a ' , ' a ' , ' b ' , ' c ' , ' d ' , ' d ' ] )
[ ' a ' , ' b ' , ' c ' , ' d ' ]""" | from itertools import groupby
return [ item for item , _ in groupby ( elements ) ] |
def to_chunks ( stream_or_generator ) :
"""This generator function receives file - like or generator as input
and returns generator .
: param file | _ _ generator [ bytes ] stream _ or _ generator : readable stream or
generator .
: rtype : _ _ generator [ bytes ]
: raise : TypeError""" | if isinstance ( stream_or_generator , types . GeneratorType ) :
yield from stream_or_generator
elif hasattr ( stream_or_generator , 'read' ) :
while True :
chunk = stream_or_generator . read ( CHUNK_SIZE )
if not chunk :
break
# no more data
yield chunk
else :
raise TypeError ( 'Input must be either readable or generator.' ) |
def update ( self , path , verbose = False ) :
"""if the path isn ' t being watched , start watching it
if it is , stop watching it""" | if path in self . _by_path :
self . remove ( path )
else :
self . add ( path , verbose ) |
def _get_entity_ids ( field_name , attrs ) :
"""Find the IDs for a one to many relationship .
The server may return JSON data in the following forms for a
: class : ` nailgun . entity _ fields . OneToManyField ` : :
' user ' : [ { ' id ' : 1 , . . . } , { ' id ' : 42 , . . . } ]
' users ' : [ { ' id ' : 1 , . . . } , { ' id ' : 42 , . . . } ]
' user _ ids ' : [ 1 , 42]
Search ` ` attrs ` ` for a one to many ` ` field _ name ` ` and return its ID .
: param field _ name : A string . The name of a field .
: param attrs : A dict . A JSON payload as returned from a server .
: returns : An iterable of entity IDs .""" | field_name_ids = field_name + '_ids'
plural_field_name = pluralize ( field_name )
if field_name_ids in attrs :
return attrs [ field_name_ids ]
elif field_name in attrs :
return [ entity [ 'id' ] for entity in attrs [ field_name ] ]
elif plural_field_name in attrs :
return [ entity [ 'id' ] for entity in attrs [ plural_field_name ] ]
else :
raise MissingValueError ( 'Cannot find a value for the "{0}" field. Searched for keys named ' '{1}, but available keys are {2}.' . format ( field_name , ( field_name_ids , field_name , plural_field_name ) , attrs . keys ( ) ) ) |
def _json_safe_float ( number ) :
"""JSON serialization for infinity can be problematic .
See https : / / docs . python . org / 2 / library / json . html # basic - usage
This function returns None if ` number ` is infinity or negative infinity .
If the ` number ` cannot be converted to float , this will raise an exception .""" | if number is None :
return None
if isinstance ( number , float ) :
return None if np . isinf ( number ) or np . isnan ( number ) else number
# errors if number is not float compatible
return float ( number ) |
def register_object ( self , obj , name , tango_class_name = None , member_filter = None ) :
""": param member _ filter :
callable ( obj , tango _ class _ name , member _ name , member ) - > bool""" | slash_count = name . count ( "/" )
if slash_count == 0 :
alias = name
full_name = "{0}/{1}" . format ( self . server_instance , name )
elif slash_count == 2 :
alias = None
full_name = name
else :
raise ValueError ( "Invalid name" )
class_name = tango_class_name or obj . __class__ . __name__
tango_class = self . get_tango_class ( class_name )
if tango_class is None :
tango_class = create_tango_class ( self , obj , class_name , member_filter = member_filter )
self . register_tango_class ( tango_class )
tango_object = self . TangoObjectAdapter ( self , obj , full_name , alias , tango_class_name = class_name )
self . __objects [ full_name . lower ( ) ] = tango_object
if self . _phase > Server . Phase1 :
import tango
util = tango . Util . instance ( )
util . create_device ( class_name , name )
return tango_object |
def dec2bin ( s ) :
"""dec2bin
十进制 to 二进制 : bin ( )
: param s :
: return :""" | if not isinstance ( s , int ) :
num = int ( s )
else :
num = s
mid = [ ]
while True :
if num == 0 :
break
num , rem = divmod ( num , 2 )
mid . append ( base [ rem ] )
return '' . join ( [ str ( x ) for x in mid [ : : - 1 ] ] ) |
def send_cons3rt_agent_logs ( self ) :
"""Send the cons3rt agent log file
: return :""" | log = logging . getLogger ( self . cls_logger + '.send_cons3rt_agent_logs' )
if self . cons3rt_agent_log_dir is None :
log . warn ( 'There is not CONS3RT agent log directory on this system' )
return
log . debug ( 'Searching for log files in directory: {d}' . format ( d = self . cons3rt_agent_log_dir ) )
for item in os . listdir ( self . cons3rt_agent_log_dir ) :
item_path = os . path . join ( self . cons3rt_agent_log_dir , item )
if os . path . isfile ( item_path ) :
log . info ( 'Sending email with cons3rt agent log file: {f}' . format ( f = item_path ) )
try :
self . send_text_file ( text_file = item_path )
except ( TypeError , OSError , AssetMailerError ) :
_ , ex , trace = sys . exc_info ( )
msg = '{n}: There was a problem sending CONS3RT agent log file: {f}\n{e}' . format ( n = ex . __class__ . __name__ , f = item_path , e = str ( ex ) )
raise AssetMailerError , msg , trace
else :
log . info ( 'Successfully sent email with file: {f}' . format ( f = item_path ) ) |
def rectangle_geo_array ( rectangle , map_canvas ) :
"""Obtain the rectangle in EPSG : 4326.
: param rectangle : A rectangle instance .
: type rectangle : QgsRectangle
: param map _ canvas : A map canvas instance .
: type map _ canvas : QgsMapCanvas
: returns : A list in the form [ xmin , ymin , xmax , ymax ] where all
coordinates provided are in Geographic / EPSG : 4326.
: rtype : list
. . note : : Delegates to extent _ to _ array ( )""" | destination_crs = QgsCoordinateReferenceSystem ( )
destination_crs . createFromSrid ( 4326 )
source_crs = map_canvas . mapSettings ( ) . destinationCrs ( )
return extent_to_array ( rectangle , source_crs , destination_crs ) |
def add_or_filter ( self , * values ) :
"""Add a filter using " OR " logic . This filter is useful when matching
on one or more criteria . For example , searching for IP 1.1.1.1 and
service TCP / 443 , or IP 1.1.1.10 and TCP / 80 . Either pair would produce
a positive match .
. . seealso : : : class : ` smc _ monitoring . models . filters . OrFilter ` for examples .
: param values : optional constructor args for
: class : ` smc _ monitoring . models . filters . OrFilter ` . Typically this is a
list of InFilter expressions .
: type : list ( QueryFilter )
: rtype : OrFilter""" | filt = OrFilter ( * values )
self . update_filter ( filt )
return filt |
def reverse_search_history ( event ) :
"""Search backward starting at the current line and moving ` up ` through
the history as necessary . This is an incremental search .""" | event . cli . current_search_state . direction = IncrementalSearchDirection . BACKWARD
event . cli . push_focus ( SEARCH_BUFFER ) |
def is_simple ( tx : Transaction ) -> bool :
"""Filter a transaction and checks if it is a basic one
A simple transaction is a tx which has only one issuer
and two outputs maximum . The unlocks must be done with
simple " SIG " functions , and the outputs must be simple
SIG conditions .
: param tx : the transaction to check
: return : True if a simple transaction""" | simple = True
if len ( tx . issuers ) != 1 :
simple = False
for unlock in tx . unlocks :
if len ( unlock . parameters ) != 1 :
simple = False
elif type ( unlock . parameters [ 0 ] ) is not SIGParameter :
simple = False
for o in tx . outputs : # if right condition is not None . . .
if getattr ( o . condition , 'right' , None ) :
simple = False
# if left is not SIG . . .
elif type ( o . condition . left ) is not output . SIG :
simple = False
return simple |
def list_role_for_all_namespaces ( self , ** kwargs ) : # noqa : E501
"""list _ role _ for _ all _ namespaces # noqa : E501
list or watch objects of kind Role # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . list _ role _ for _ all _ namespaces ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param bool include _ uninitialized : If true , partially initialized resources are included in the response .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str pretty : If ' true ' , then the output is pretty printed .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V1RoleList
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . list_role_for_all_namespaces_with_http_info ( ** kwargs )
# noqa : E501
else :
( data ) = self . list_role_for_all_namespaces_with_http_info ( ** kwargs )
# noqa : E501
return data |
def send_signals ( self ) :
"""Shout for the world to hear whether a txn was successful .""" | if self . flag :
invalid_ipn_received . send ( sender = self )
return
else :
valid_ipn_received . send ( sender = self ) |
def long_banner ( self ) :
"""Banner for IPython widgets with pylab message""" | # Default banner
try :
from IPython . core . usage import quick_guide
except Exception :
quick_guide = ''
banner_parts = [ 'Python %s\n' % self . interpreter_versions [ 'python_version' ] , 'Type "copyright", "credits" or "license" for more information.\n\n' , 'IPython %s -- An enhanced Interactive Python.\n' % self . interpreter_versions [ 'ipython_version' ] , quick_guide ]
banner = '' . join ( banner_parts )
# Pylab additions
pylab_o = self . additional_options [ 'pylab' ]
autoload_pylab_o = self . additional_options [ 'autoload_pylab' ]
mpl_installed = programs . is_module_installed ( 'matplotlib' )
if mpl_installed and ( pylab_o and autoload_pylab_o ) :
pylab_message = ( "\nPopulating the interactive namespace from " "numpy and matplotlib\n" )
banner = banner + pylab_message
# Sympy additions
sympy_o = self . additional_options [ 'sympy' ]
if sympy_o :
lines = """
These commands were executed:
>>> from __future__ import division
>>> from sympy import *
>>> x, y, z, t = symbols('x y z t')
>>> k, m, n = symbols('k m n', integer=True)
>>> f, g, h = symbols('f g h', cls=Function)
"""
banner = banner + lines
if ( pylab_o and sympy_o ) :
lines = """
Warning: pylab (numpy and matplotlib) and symbolic math (sympy) are both
enabled at the same time. Some pylab functions are going to be overrided by
the sympy module (e.g. plot)
"""
banner = banner + lines
return banner |
def _delete_file ( fileName , n = 10 ) :
"""Cleanly deletes a file in ` n ` attempts ( if necessary )""" | status = False
count = 0
while not status and count < n :
try :
_os . remove ( fileName )
except OSError :
count += 1
_time . sleep ( 0.2 )
else :
status = True
return status |
def get_output_margin ( self , status = None ) :
"""Get the output margin ( number of rows for the prompt , footer and
timing message .""" | margin = self . get_reserved_space ( ) + self . get_prompt ( self . prompt ) . count ( '\n' ) + 1
if special . is_timing_enabled ( ) :
margin += 1
if status :
margin += 1 + status . count ( '\n' )
return margin |
def cursor ( belstr : str , ast : AST , cursor_loc : int , result : Mapping [ str , Any ] = None ) -> Mapping [ str , Any ] :
"""Find BEL function or argument at cursor location
Args :
belstr : BEL String used to create the completion _ text
ast ( Mapping [ str , Any ] ) : AST ( dict ) of BEL String
cursor _ loc ( int ) : given cursor location from input field
cursor _ loc starts at 0 , think of it like a block cursor covering each char
result : used to recursively return the result
Returns :
result dict""" | log . debug ( f"SubAST: {json.dumps(ast, indent=4)}" )
# Recurse down through subject , object , nested to functions
log . debug ( f"Cursor keys {ast.keys()}, BELStr: {belstr}" )
if len ( belstr ) == 0 :
return { "type" : "Function" , "replace_span" : ( 0 , 0 ) , "completion_text" : "" }
if "relation" in ast and in_span ( cursor_loc , ast [ "relation" ] [ "span" ] ) :
log . debug ( "In relation" )
completion_text = belstr [ ast [ "relation" ] [ "span" ] [ 0 ] : cursor_loc + 1 ]
return { "type" : "Relation" , "replace_span" : ast [ "relation" ] [ "span" ] , "completion_text" : completion_text , }
# Handle subject , object and nested keys in tree
elif "span" not in ast and isinstance ( ast , dict ) :
for key in ast :
if key in [ "subject" , "object" , "nested" ] :
log . debug ( f"Recursing Keys {key}" )
result = cursor ( belstr , ast [ key ] , cursor_loc , result = result )
if result :
return result
# Matches Functions , NSArgs and StrArgs / StrArgNSArg
if "span" in ast and in_span ( cursor_loc , ast [ "span" ] ) :
log . debug ( "Inside subject/object subAST" )
if "function" in ast :
name_span = ast [ "function" ] [ "name_span" ]
if in_span ( cursor_loc , name_span ) :
return { "type" : "Function" , "replace_span" : name_span , "completion_text" : belstr [ name_span [ 0 ] : cursor_loc + 1 ] , }
for idx , arg in enumerate ( ast [ "args" ] ) :
if ( cursor_loc == ast [ "function" ] [ "parens_span" ] [ 0 ] and ast [ "function" ] [ "parens_span" ] [ 1 ] == - 1 ) :
return { "type" : "StrArg" , # default type if unknown
"arg_idx" : idx , "replace_span" : arg [ "span" ] , # replace entire strarg
"parent_function" : ast [ "function" ] [ "name" ] , "completion_text" : "" , }
elif in_span ( cursor_loc , arg [ "span" ] ) :
log . debug ( f'In argument span {arg["span"]} Cursor_loc: {cursor_loc}' )
if arg [ "type" ] == "Function" :
if in_span ( cursor_loc , arg [ "function" ] [ "name_span" ] ) :
log . debug ( "Found replace_span in args: Function type" )
return { "type" : "Function" , "replace_span" : arg [ "function" ] [ "name_span" ] , # replace function name only
"arg_idx" : idx , "args" : copy . deepcopy ( ast [ "args" ] ) , "parent_function" : ast [ "function" ] [ "name" ] , "completion_text" : belstr [ arg [ "function" ] [ "name_span" ] [ 0 ] : cursor_loc + 1 ] , }
else :
log . debug ( f'Recursing Function {arg["span"]}' )
result = cursor ( belstr , arg , cursor_loc , result = result )
elif arg [ "type" ] == "NSArg" :
result = { "type" : "NSArg" , "replace_span" : arg [ "span" ] , # replace entire nsarg
"arg_idx" : idx , "args" : copy . deepcopy ( ast [ "args" ] ) , "parent_function" : ast [ "function" ] [ "name" ] , }
# Filter on namespace and query on ns _ val chars up to cursor _ loc
if in_span ( cursor_loc , arg [ "nsarg" ] [ "ns_val_span" ] ) :
result [ "namespace" ] = arg [ "nsarg" ] [ "ns" ]
# provide namespace for nsarg filtering
result [ "completion_text" ] = belstr [ arg [ "nsarg" ] [ "ns_val_span" ] [ 0 ] : cursor_loc + 1 ]
# Query on nsarg chars up to cursor _ loc
else :
result [ "completion_text" ] = belstr [ arg [ "nsarg" ] [ "ns_span" ] [ 0 ] : cursor_loc + 1 ]
log . debug ( f"Found replace_span in args: NSArg {result}" )
return result
elif ( arg [ "type" ] == "StrArg" ) : # in case this is a default namespace StrArg
if ( arg [ "span" ] [ 0 ] == arg [ "span" ] [ 1 ] ) : # handle case like p ( ) cursor = 2
completion_text = arg [ "arg" ]
else :
completion_text = belstr [ arg [ "span" ] [ 0 ] : cursor_loc + 1 ]
return { "type" : "StrArg" , "arg_idx" : idx , "replace_span" : arg [ "span" ] , # replace entire strarg
"parent_function" : ast [ "function" ] [ "name" ] , "completion_text" : completion_text . lstrip ( ) , }
return result |
def _parse_table ( self ) :
"""Parse a wikicode table by starting with the first line .""" | reset = self . _head
self . _head += 2
try :
self . _push ( contexts . TABLE_OPEN )
padding = self . _handle_table_style ( "\n" )
except BadRoute :
self . _head = reset
self . _emit_text ( "{" )
return
style = self . _pop ( )
self . _head += 1
restore_point = self . _stack_ident
try :
table = self . _parse ( contexts . TABLE_OPEN )
except BadRoute :
while self . _stack_ident != restore_point :
self . _memoize_bad_route ( )
self . _pop ( )
self . _head = reset
self . _emit_text ( "{" )
return
self . _emit_table_tag ( "{|" , "table" , style , padding , None , table , "|}" )
# Offset displacement done by _ parse ( ) :
self . _head -= 1 |
def write_function_dockercall ( self , job ) :
'''Writes a string containing the apiDockerCall ( ) that will run the job .
: param job _ task _ reference : The name of the job calling docker .
: param docker _ image : The corresponding name of the docker image .
e . g . " ubuntu : latest "
: return : A string containing the apiDockerCall ( ) that will run the job .''' | docker_dict = { "docker_image" : self . tasks_dictionary [ job ] [ 'runtime' ] [ 'docker' ] , "job_task_reference" : job , "docker_user" : str ( self . docker_user ) }
docker_template = heredoc_wdl ( '''
stdout = apiDockerCall(self,
image={docker_image},
working_dir=tempDir,
parameters=[os.path.join(tempDir, "{job_task_reference}_script.sh")],
entrypoint="/bin/bash",
user={docker_user},
stderr=True,
volumes={{tempDir: {{"bind": tempDir}}}})
writetype = 'wb' if isinstance(stdout, bytes) else 'w'
with open(os.path.join(asldijoiu23r8u34q89fho934t8u34fcurrentworkingdir, '{job_task_reference}.log'), writetype) as f:
f.write(stdout)
''' , docker_dict , indent = ' ' ) [ 1 : ]
return docker_template |
def t_BIN ( self , t ) :
r'( % [ 01 ] + ) | ( [ 01 ] + [ bB ] )' | # A Binary integer
# Note 00B is a 0 binary , but
# 00Bh is a 12 in hex . So this pattern must come
# after HEXA
if t . value [ 0 ] == '%' :
t . value = t . value [ 1 : ]
# Remove initial %
else :
t . value = t . value [ : - 1 ]
# Remove last ' b '
t . value = int ( t . value , 2 )
# Convert to decimal
t . type = 'INTEGER'
return t |
def get_child_at ( self , x , y , bAllowTransparency = True ) :
"""Get the child window located at the given coordinates . If no such
window exists an exception is raised .
@ see : L { get _ children }
@ type x : int
@ param x : Horizontal coordinate .
@ type y : int
@ param y : Vertical coordinate .
@ type bAllowTransparency : bool
@ param bAllowTransparency : If C { True } transparent areas in windows are
ignored , returning the window behind them . If C { False } transparent
areas are treated just like any other area .
@ rtype : L { Window }
@ return : Child window at the requested position , or C { None } if there
is no window at those coordinates .""" | try :
if bAllowTransparency :
hWnd = win32 . RealChildWindowFromPoint ( self . get_handle ( ) , ( x , y ) )
else :
hWnd = win32 . ChildWindowFromPoint ( self . get_handle ( ) , ( x , y ) )
if hWnd :
return self . __get_window ( hWnd )
except WindowsError :
pass
return None |
def _prep_acl_for_compare ( ACL ) :
'''Prepares the ACL returned from the AWS API for comparison with a given one .''' | ret = copy . deepcopy ( ACL )
ret [ 'Owner' ] = _normalize_user ( ret [ 'Owner' ] )
for item in ret . get ( 'Grants' , ( ) ) :
item [ 'Grantee' ] = _normalize_user ( item . get ( 'Grantee' ) )
return ret |
def interact ( self , msg = 'SHUTIT PAUSE POINT' , shutit_pexpect_child = None , print_input = True , level = 1 , resize = True , color = '32' , default_msg = None , wait = - 1 ) :
"""Same as pause _ point , but sets up the terminal ready for unmediated
interaction .""" | shutit_global . shutit_global_object . yield_to_draw ( )
self . pause_point ( msg = msg , shutit_pexpect_child = shutit_pexpect_child , print_input = print_input , level = level , resize = resize , color = color , default_msg = default_msg , interact = True , wait = wait ) |
def read_ipv6_frag ( self , length , extension ) :
"""Read Fragment Header for IPv6.
Structure of IPv6 - Frag header [ RFC 8200 ] :
| Next Header | Reserved | Fragment Offset | Res | M |
| Identification |
Octets Bits Name Description
0 0 frag . next Next Header
1 8 - Reserved
2 16 frag . offset Fragment Offset
3 29 - Reserved
3 31 frag . mf More Flag
4 32 frag . id Identification""" | if length is None :
length = len ( self )
_next = self . _read_protos ( 1 )
_temp = self . _read_fileng ( 1 )
_offm = self . _read_binary ( 2 )
_ipid = self . _read_unpack ( 4 )
ipv6_frag = dict ( next = _next , length = 8 , offset = int ( _offm [ : 13 ] , base = 2 ) , mf = True if int ( _offm [ 15 ] , base = 2 ) else False , id = _ipid , )
length -= ipv6_frag [ 'length' ]
ipv6_frag [ 'packet' ] = self . _read_packet ( header = 8 , payload = length )
if extension :
self . _protos = None
return ipv6_frag
return self . _decode_next_layer ( ipv6_frag , _next , length ) |
def retrieve ( self , id ) :
"""Retrieve a single source
Returns a single source available to the user by the provided id
If a source with the supplied unique identifier does not exist it returns an error
: calls : ` ` get / deal _ sources / { id } ` `
: param int id : Unique identifier of a DealSource .
: return : Dictionary that support attriubte - style access and represent DealSource resource .
: rtype : dict""" | _ , _ , deal_source = self . http_client . get ( "/deal_sources/{id}" . format ( id = id ) )
return deal_source |
def _reformat_low ( self , low ) :
'''Format the low data for RunnerClient ( ) ' s master _ call ( ) function
This also normalizes the following low data formats to a single , common
low data structure .
Old - style low : ` ` { ' fun ' : ' jobs . lookup _ jid ' , ' jid ' : ' 1234 ' } ` `
New - style : ` ` { ' fun ' : ' jobs . lookup _ jid ' , ' kwarg ' : { ' jid ' : ' 1234 ' } } ` `
CLI - style : ` ` { ' fun ' : ' jobs . lookup _ jid ' , ' arg ' : [ ' jid = " 1234 " ' ] } ` `''' | fun = low . pop ( 'fun' )
verify_fun ( self . functions , fun )
eauth_creds = dict ( [ ( i , low . pop ( i ) ) for i in [ 'username' , 'password' , 'eauth' , 'token' , 'client' , 'user' , 'key' , ] if i in low ] )
# Run name = value args through parse _ input . We don ' t need to run kwargs
# through because there is no way to send name = value strings in the low
# dict other than by including an ` arg ` array .
_arg , _kwarg = salt . utils . args . parse_input ( low . pop ( 'arg' , [ ] ) , condition = False )
_kwarg . update ( low . pop ( 'kwarg' , { } ) )
# If anything hasn ' t been pop ( ) ' ed out of low by this point it must be
# an old - style kwarg .
_kwarg . update ( low )
# Finally , mung our kwargs to a format suitable for the byzantine
# load _ args _ and _ kwargs so that we can introspect the function being
# called and fish for invalid kwargs .
munged = [ ]
munged . extend ( _arg )
munged . append ( dict ( __kwarg__ = True , ** _kwarg ) )
arg , kwarg = salt . minion . load_args_and_kwargs ( self . functions [ fun ] , munged , ignore_invalid = True )
return dict ( fun = fun , kwarg = { 'kwarg' : kwarg , 'arg' : arg } , ** eauth_creds ) |
def device_statistics ( fritz , args ) :
"""Command that prints the device statistics .""" | stats = fritz . get_device_statistics ( args . ain )
print ( stats ) |
def element_css_attribute_should_be ( self , locator , prop , expected ) :
"""Verifies the element identified by ` locator ` has the expected
value for the targeted ` prop ` .
| * Argument * | * Description * | * Example * |
| locator | Selenium 2 element locator | id = my _ id |
| prop | targeted css attribute | background - color |
| expected | expected value | rgba ( 0 , 128 , 0 , 1 ) |""" | self . _info ( "Verifying element '%s' has css attribute '%s' with a value of '%s'" % ( locator , prop , expected ) )
self . _check_element_css_value ( locator , prop , expected ) |
def defvalkey ( js , key , default = None , take_none = True ) :
"""Returns js [ key ] if set , otherwise default . Note js [ key ] can be None .
: param js :
: param key :
: param default :
: param take _ none :
: return :""" | if js is None :
return default
if key not in js :
return default
if js [ key ] is None and not take_none :
return default
return js [ key ] |
def rsi ( arg , n ) :
"""compute RSI for the given arg
arg : Series or DataFrame""" | if isinstance ( arg , pd . DataFrame ) :
cols = [ ( name , rsi ( arg [ name ] , n ) ) for name in arg . columns ]
return pd . DataFrame . from_items ( cols )
else :
assert isinstance ( arg , pd . Series )
n = int ( n )
converted = arg . dropna ( )
change = converted . diff ( )
gain = change . apply ( lambda c : c > 0 and c or 0 )
avgGain = wilderma ( gain , n )
loss = change . apply ( lambda c : c < 0 and abs ( c ) or 0 )
avgLoss = wilderma ( loss , n )
result = avgGain / avgLoss
result [ result == np . inf ] = 100.
# divide by zero
result = 100. - ( 100. / ( 1. + result ) )
return pd . Series ( result , index = converted . index ) . reindex ( arg . index ) |
def get_loss_func ( self , C = 1.0 , k = 1 ) :
"""Get loss function of VAE .
The loss value is equal to ELBO ( Evidence Lower Bound )
multiplied by - 1.
Args :
C ( int ) : Usually this is 1.0 . Can be changed to control the
second term of ELBO bound , which works as regularization .
k ( int ) : Number of Monte Carlo samples used in encoded vector .""" | def lf ( x ) :
mu , ln_var = self . encode ( x )
batchsize = len ( mu . data )
# reconstruction loss
rec_loss = 0
for l in six . moves . range ( k ) :
z = F . gaussian ( mu , ln_var )
rec_loss += F . bernoulli_nll ( x , self . decode ( z , sigmoid = False ) ) / ( k * batchsize )
self . rec_loss = rec_loss
self . loss = self . rec_loss + C * gaussian_kl_divergence ( mu , ln_var ) / batchsize
return self . loss
return lf |
def get_mime_representation ( self ) :
"""returns mime part that constitutes this attachment""" | part = deepcopy ( self . part )
part . set_param ( 'maxlinelen' , '78' , header = 'Content-Disposition' )
return part |
def from_vizier_table ( cls , table_id , nside = 256 ) :
"""Creates a ` ~ mocpy . moc . MOC ` object from a VizieR table .
* * Info * * : This method is already implemented in ` astroquery . cds < https : / / astroquery . readthedocs . io / en / latest / cds / cds . html > ` _ _ . You can ask to get a ` mocpy . moc . MOC ` object
from a vizier catalog ID .
Parameters
table _ id : str
table index
nside : int , optional
256 by default
Returns
result : ` ~ mocpy . moc . MOC `
The resulting MOC .""" | nside_possible_values = ( 8 , 16 , 32 , 64 , 128 , 256 , 512 )
if nside not in nside_possible_values :
raise ValueError ( 'Bad value for nside. Must be in {0}' . format ( nside_possible_values ) )
result = cls . from_ivorn ( 'ivo://CDS/' + table_id , nside )
return result |
def _is_dir ( self , f ) :
'''Check if the given in - dap file is a directory''' | return self . _tar . getmember ( f ) . type == tarfile . DIRTYPE |
def get_related ( self , instance , number ) :
"""Implement high level cache system for get _ related .""" | cache = self . cache
cache_key = '%s:%s' % ( instance . pk , number )
if cache_key not in cache :
related_objects = super ( CachedModelVectorBuilder , self ) . get_related ( instance , number )
cache [ cache_key ] = related_objects
self . cache = cache
return cache [ cache_key ] |
def documents ( self , key , value ) :
"""Populate the ` ` documents ` ` key .
Also populates the ` ` figures ` ` key through side effects .""" | def _is_hidden ( value ) :
return 'HIDDEN' in [ val . upper ( ) for val in value ] or None
def _is_figure ( value ) :
figures_extensions = [ '.png' ]
return value . get ( 'f' ) in figures_extensions
def _is_fulltext ( value ) :
return value . get ( 'd' , '' ) . lower ( ) == 'fulltext' or None
def _get_index_and_caption ( value ) :
match = re . compile ( r'(^\d{5})?\s*(.*)' ) . match ( value )
if match :
return match . group ( 1 ) , match . group ( 2 )
def _get_key ( value ) :
fname = value . get ( 'n' , 'document' )
extension = value . get ( 'f' , '' )
if fname . endswith ( extension ) :
return fname
return fname + extension
def _get_source ( value ) :
source = value . get ( 't' , '' )
if source in ( 'INSPIRE-PUBLIC' , 'Main' ) :
source = None
elif source . lower ( ) == 'arxiv' :
return 'arxiv'
return source
figures = self . get ( 'figures' , [ ] )
is_context = value . get ( 'f' , '' ) . endswith ( 'context' )
if is_context :
return
if _is_figure ( value ) :
index , caption = _get_index_and_caption ( value . get ( 'd' , '' ) )
figures . append ( { 'key' : _get_key ( value ) , 'caption' : caption , 'url' : afs_url ( value . get ( 'a' ) ) , 'order' : index , 'source' : 'arxiv' , # XXX : we don ' t have any other figures on legacy
} )
self [ 'figures' ] = figures
else :
return { 'description' : value . get ( 'd' ) if not _is_fulltext ( value ) else None , 'key' : _get_key ( value ) , 'fulltext' : _is_fulltext ( value ) , 'hidden' : _is_hidden ( force_list ( value . get ( 'o' ) ) ) , 'url' : afs_url ( value . get ( 'a' ) ) , 'source' : _get_source ( value ) , } |
def join_cluster ( host , user = 'rabbit' , ram_node = None , runas = None ) :
'''Join a rabbit cluster
CLI Example :
. . code - block : : bash
salt ' * ' rabbitmq . join _ cluster rabbit . example . com rabbit''' | cmd = [ RABBITMQCTL , 'join_cluster' ]
if ram_node :
cmd . append ( '--ram' )
cmd . append ( '{0}@{1}' . format ( user , host ) )
if runas is None and not salt . utils . platform . is_windows ( ) :
runas = salt . utils . user . get_user ( )
stop_app ( runas )
res = __salt__ [ 'cmd.run_all' ] ( cmd , reset_system_locale = False , runas = runas , python_shell = False )
start_app ( runas )
return _format_response ( res , 'Join' ) |
def deep_force_unicode ( value ) :
"""Recursively call force _ text on value .""" | if isinstance ( value , ( list , tuple , set ) ) :
value = type ( value ) ( map ( deep_force_unicode , value ) )
elif isinstance ( value , dict ) :
value = type ( value ) ( map ( deep_force_unicode , value . items ( ) ) )
elif isinstance ( value , Promise ) :
value = force_text ( value )
return value |
def _query ( self , method , path , data = None , page = False , retry = 0 ) :
"""Fetch an object from the Graph API and parse the output , returning a tuple where the first item
is the object yielded by the Graph API and the second is the URL for the next page of results , or
` ` None ` ` if results have been exhausted .
: param method : A string describing the HTTP method .
: param path : A string describing the object in the Graph API .
: param data : A dictionary of HTTP GET parameters ( for GET requests ) or POST data ( for POST requests ) .
: param page : A boolean describing whether to return an iterator that iterates over each page of results .
: param retry : An integer describing how many times the request may be retried .""" | if ( data ) :
data = dict ( ( k . replace ( '_sqbro_' , '[' ) , v ) for k , v in data . items ( ) )
data = dict ( ( k . replace ( '_sqbrc_' , ']' ) , v ) for k , v in data . items ( ) )
data = dict ( ( k . replace ( '__' , ':' ) , v ) for k , v in data . items ( ) )
data = data or { }
def load ( method , url , data ) :
for key in data :
value = data [ key ]
if isinstance ( value , ( list , dict , set ) ) :
data [ key ] = json . dumps ( value )
try :
if method in [ 'GET' , 'DELETE' ] :
response = self . session . request ( method , url , params = data , allow_redirects = True , verify = self . verify_ssl_certificate , timeout = self . timeout )
if method in [ 'POST' , 'PUT' ] :
files = { }
for key in data :
if hasattr ( data [ key ] , 'read' ) :
files [ key ] = data [ key ]
for key in files :
data . pop ( key )
response = self . session . request ( method , url , data = data , files = files , verify = self . verify_ssl_certificate , timeout = self . timeout )
if 500 <= response . status_code < 600 : # Facebook 5XX errors usually come with helpful messages
# as a JSON object describing the problem with the request .
# If this is the case , an error will be raised and we just
# need to re - raise it . This is most likely to happen
# with the Ads API .
# This will raise an exception if a JSON - like error object
# comes in the response .
self . _parse ( response . content )
# If Facebook does not provide any JSON - formatted error
# but just a plain - text , useless error , we ' ll just inform
# about a Facebook Internal errror occurred .
raise FacebookError ( 'Internal Facebook error occurred' , response . status_code )
except requests . RequestException as exception :
raise HTTPError ( exception )
result = self . _parse ( response . content )
if isinstance ( result , dict ) :
result [ 'headers' ] = response . headers
try :
next_url = result [ 'paging' ] [ 'next' ]
except ( KeyError , TypeError ) :
next_url = None
return result , next_url
def load_with_retry ( method , url , data ) :
remaining_retries = retry
while True :
try :
return load ( method , url , data )
except FacepyError as e :
log . warn ( "Exception on %s: %s, retries remaining: %s" , url , e , remaining_retries , )
if remaining_retries > 0 :
remaining_retries -= 1
else :
raise
def paginate ( method , url , data ) :
while url :
result , url = load_with_retry ( method , url , data )
# Reset pagination parameters .
for key in [ 'offset' , 'until' , 'since' ] :
if key in data :
del data [ key ]
yield result
# Convert option lists to comma - separated values .
for key in data :
if isinstance ( data [ key ] , ( list , set , tuple ) ) and all ( [ isinstance ( item , six . string_types ) for item in data [ key ] ] ) :
data [ key ] = ',' . join ( data [ key ] )
# Support absolute paths too
if not path . startswith ( '/' ) :
if six . PY2 :
path = '/' + six . text_type ( path . decode ( 'utf-8' ) )
else :
path = '/' + path
url = self . _get_url ( path )
if self . oauth_token :
data [ 'access_token' ] = self . oauth_token
if self . appsecret and self . oauth_token :
data [ 'appsecret_proof' ] = self . _generate_appsecret_proof ( )
if page :
return paginate ( method , url , data )
else :
return load_with_retry ( method , url , data ) [ 0 ] |
def jsonxs ( data , expr , action = ACTION_GET , value = None , default = None ) :
"""Get , set , delete values in a JSON structure . ` expr ` is a JSONpath - like
expression pointing to the desired value . ` action ` determines the action to
perform . See the module - level ` ACTION _ * ` constants . ` value ` should be given
if action is ` ACTION _ SET ` . If ` default ` is set and ` expr ` isn ' t found ,
return ` default ` instead . This will override all exceptions .""" | tokens = tokenize ( expr )
# Walk through the list of tokens to reach the correct path in the data
# structure .
try :
prev_path = None
cur_path = data
for token in tokens :
prev_path = cur_path
if not token in cur_path and action in [ ACTION_SET , ACTION_MKDICT , ACTION_MKLIST ] : # When setting values or creating dicts / lists , the key can be
# missing from the data struture
continue
cur_path = cur_path [ token ]
except Exception :
if default is not None :
return default
else :
raise
# Perform action the user requested .
if action == ACTION_GET :
return cur_path
elif action == ACTION_DEL :
del prev_path [ token ]
elif action == ACTION_SET :
prev_path [ token ] = value
elif action == ACTION_APPEND :
prev_path [ token ] . append ( value )
elif action == ACTION_INSERT :
prev_path . insert ( token , value )
elif action == ACTION_MKDICT :
prev_path [ token ] = { }
elif action == ACTION_MKLIST :
prev_path [ token ] = [ ]
else :
raise ValueError ( "Invalid action: {}" . format ( action ) ) |
def inspect_secret ( self , id ) :
"""Retrieve secret metadata
Args :
id ( string ) : Full ID of the secret to remove
Returns ( dict ) : A dictionary of metadata
Raises :
: py : class : ` docker . errors . NotFound `
if no secret with that ID exists""" | url = self . _url ( '/secrets/{0}' , id )
return self . _result ( self . _get ( url ) , True ) |
def insert_order ( self , order ) :
''': param order : QA _ Order类型
: return :''' | # print ( " * > > QAOrder ! insert _ order { } " . format ( order ) )
# QUEUED = 300 # queued 用于表示在order _ queue中 实际表达的意思是订单存活 待成交
# order . status = ORDER _ STATUS . QUEUED
# 🛠 todo 是为了速度快把order对象转换成 df 对象的吗 ?
# self . queue _ df = self . queue _ df . append ( order . to _ df ( ) , ignore _ index = True )
# self . queue _ df . set _ index ( ' order _ id ' , drop = True , inplace = True )
if order is not None :
self . order_list [ order . order_id ] = order
return order
else :
print ( 'QAERROR Wrong for get None type while insert order to Queue' ) |
def make_file_path ( project_dir , project_name , root , name ) :
"""Generates the target path for a file""" | return path . join ( make_dir_path ( project_dir , root , project_name ) , name ) |
def run ( self ) :
'''Run loop''' | logger . info ( "fetcher starting..." )
def queue_loop ( ) :
if not self . outqueue or not self . inqueue :
return
while not self . _quit :
try :
if self . outqueue . full ( ) :
break
if self . http_client . free_size ( ) <= 0 :
break
task = self . inqueue . get_nowait ( )
# FIXME : decode unicode _ obj should used after data selete from
# database , it ' s used here for performance
task = utils . decode_unicode_obj ( task )
self . fetch ( task )
except queue . Empty :
break
except KeyboardInterrupt :
break
except Exception as e :
logger . exception ( e )
break
tornado . ioloop . PeriodicCallback ( queue_loop , 100 , io_loop = self . ioloop ) . start ( )
tornado . ioloop . PeriodicCallback ( self . clear_robot_txt_cache , 10000 , io_loop = self . ioloop ) . start ( )
self . _running = True
try :
self . ioloop . start ( )
except KeyboardInterrupt :
pass
logger . info ( "fetcher exiting..." ) |
def merge_range_pairs ( prs ) :
'''Takes in a list of pairs specifying ranges and returns a sorted list of merged , sorted ranges .''' | new_prs = [ ]
sprs = [ sorted ( p ) for p in prs ]
sprs = sorted ( sprs )
merged = False
x = 0
while x < len ( sprs ) :
newx = x + 1
new_pair = list ( sprs [ x ] )
for y in range ( x + 1 , len ( sprs ) ) :
if new_pair [ 0 ] <= sprs [ y ] [ 0 ] - 1 <= new_pair [ 1 ] :
new_pair [ 0 ] = min ( new_pair [ 0 ] , sprs [ y ] [ 0 ] )
new_pair [ 1 ] = max ( new_pair [ 1 ] , sprs [ y ] [ 1 ] )
newx = y + 1
if new_pair not in new_prs :
new_prs . append ( new_pair )
x = newx
return new_prs |
def read ( file , frames = - 1 , start = 0 , stop = None , dtype = 'float64' , always_2d = False , fill_value = None , out = None , samplerate = None , channels = None , format = None , subtype = None , endian = None , closefd = True ) :
"""Provide audio data from a sound file as NumPy array .
By default , the whole file is read from the beginning , but the
position to start reading can be specified with ` start ` and the
number of frames to read can be specified with ` frames ` .
Alternatively , a range can be specified with ` start ` and ` stop ` .
If there is less data left in the file than requested , the rest of
the frames are filled with ` fill _ value ` .
If no ` fill _ value ` is specified , a smaller array is returned .
Parameters
file : str or int or file - like object
The file to read from . See : class : ` SoundFile ` for details .
frames : int , optional
The number of frames to read . If ` frames ` is negative , the whole
rest of the file is read . Not allowed if ` stop ` is given .
start : int , optional
Where to start reading . A negative value counts from the end .
stop : int , optional
The index after the last frame to be read . A negative value
counts from the end . Not allowed if ` frames ` is given .
dtype : { ' float64 ' , ' float32 ' , ' int32 ' , ' int16 ' } , optional
Data type of the returned array , by default ` ` ' float64 ' ` ` .
Floating point audio data is typically in the range from
` ` - 1.0 ` ` to ` ` 1.0 ` ` . Integer data is in the range from
` ` - 2 * * 15 ` ` to ` ` 2 * * 15-1 ` ` for ` ` ' int16 ' ` ` and from ` ` - 2 * * 31 ` ` to
` ` 2 * * 31-1 ` ` for ` ` ' int32 ' ` ` .
. . note : : Reading int values from a float file will * not *
scale the data to [ - 1.0 , 1.0 ) . If the file contains
` ` np . array ( [ 42.6 ] , dtype = ' float32 ' ) ` ` , you will read
` ` np . array ( [ 43 ] , dtype = ' int32 ' ) ` ` for ` ` dtype = ' int32 ' ` ` .
Returns
audiodata : numpy . ndarray or type ( out )
A two - dimensional ( frames x channels ) NumPy array is returned .
If the sound file has only one channel , a one - dimensional array
is returned . Use ` ` always _ 2d = True ` ` to return a two - dimensional
array anyway .
If ` out ` was specified , it is returned . If ` out ` has more
frames than available in the file ( or if ` frames ` is smaller
than the length of ` out ` ) and no ` fill _ value ` is given , then
only a part of ` out ` is overwritten and a view containing all
valid frames is returned .
samplerate : int
The sample rate of the audio file .
Other Parameters
always _ 2d : bool , optional
By default , reading a mono sound file will return a
one - dimensional array . With ` ` always _ 2d = True ` ` , audio data is
always returned as a two - dimensional array , even if the audio
file has only one channel .
fill _ value : float , optional
If more frames are requested than available in the file , the
rest of the output is be filled with ` fill _ value ` . If
` fill _ value ` is not specified , a smaller array is returned .
out : numpy . ndarray or subclass , optional
If ` out ` is specified , the data is written into the given array
instead of creating a new array . In this case , the arguments
` dtype ` and ` always _ 2d ` are silently ignored ! If ` frames ` is
not given , it is obtained from the length of ` out ` .
samplerate , channels , format , subtype , endian , closefd
See : class : ` SoundFile ` .
Examples
> > > import soundfile as sf
> > > data , samplerate = sf . read ( ' stereo _ file . wav ' )
> > > data
array ( [ [ 0.71329652 , 0.06294799 ] ,
[ - 0.26450912 , - 0.38874483 ] ,
[ 0.67398441 , - 0.11516333 ] ] )
> > > samplerate
44100""" | with SoundFile ( file , 'r' , samplerate , channels , subtype , endian , format , closefd ) as f :
frames = f . _prepare_read ( start , stop , frames )
data = f . read ( frames , dtype , always_2d , fill_value , out )
return data , f . samplerate |
def rename ( self , new_name ) :
"""Rename the container .
On success , returns the new Container object .
On failure , returns False .""" | if _lxc . Container . rename ( self , new_name ) :
return Container ( new_name )
return False |
def add_leaf ( self , value , do_hash = False ) :
"""Add a leaf to the tree .
: param value : hash value ( as a Buffer ) or hex string
: param do _ hash : whether to hash value""" | self . tree [ 'is_ready' ] = False
self . _add_leaf ( value , do_hash ) |
def as_artist ( self , origin = ( 0 , 0 ) , ** kwargs ) :
"""Matplotlib patch object for this region ( ` matplotlib . patches . Ellipse ` ) .
Parameters :
origin : array _ like , optional
The ` ` ( x , y ) ` ` pixel position of the origin of the displayed image .
Default is ( 0 , 0 ) .
kwargs : ` dict `
All keywords that a ` ~ matplotlib . patches . Ellipse ` object accepts
Returns
patch : ` ~ matplotlib . patches . Ellipse `
Matplotlib ellipse patch""" | from matplotlib . patches import Ellipse
xy = self . center . x - origin [ 0 ] , self . center . y - origin [ 1 ]
width = self . width
height = self . height
# From the docstring : MPL expects " rotation in degrees ( anti - clockwise ) "
angle = self . angle . to ( 'deg' ) . value
mpl_params = self . mpl_properties_default ( 'patch' )
mpl_params . update ( kwargs )
return Ellipse ( xy = xy , width = width , height = height , angle = angle , ** mpl_params ) |
def get_pca_ks_stats ( self , maxrange = 5 ) :
"""Get a dictionary of PC # : K - S test stat for each""" | pc_to_phenotype_pairs = { }
num_components = self . principal_observations_df . shape [ 1 ]
if num_components < maxrange :
maxrange = num_components
phenotypes = self . principal_observations_df . phenotype . unique ( ) . tolist ( )
for i in range ( 0 , maxrange ) :
phenotype_pair_to_ks = { }
for p1 , p2 in combinations ( phenotypes , 2 ) :
p1_pc = self . principal_observations_df [ self . principal_observations_df . phenotype == p1 ] . iloc [ : , i ] . as_matrix ( )
p2_pc = self . principal_observations_df [ self . principal_observations_df . phenotype == p2 ] . iloc [ : , i ] . as_matrix ( )
phenotype_pair_to_ks [ ( p1 , p2 ) ] = ks_2samp ( p1_pc , p2_pc )
pc_to_phenotype_pairs [ i + 1 ] = phenotype_pair_to_ks
return pc_to_phenotype_pairs |
def findpk2 ( self , r1 , s1 , r2 , s2 , flag1 , flag2 ) :
"""find pubkey Y from 2 different signature on the same message
sigs : ( r1 , s1 ) and ( r2 , s2)
returns ( R1 * s1 - R2 * s2 ) / ( r1 - r2)""" | R1 = self . ec . decompress ( r1 , flag1 )
R2 = self . ec . decompress ( r2 , flag2 )
rdiff = self . GFn . value ( r1 - r2 )
return ( R1 * s1 - R2 * s2 ) * ( 1 / rdiff ) |
def start ( self ) :
"""Start a thread that consumes the messages and invokes the callback""" | t = threading . Thread ( target = self . _consume )
t . start ( ) |
def terrain_request_encode ( self , lat , lon , grid_spacing , mask ) :
'''Request for terrain data and terrain status
lat : Latitude of SW corner of first grid ( degrees * 10 ^ 7 ) ( int32 _ t )
lon : Longitude of SW corner of first grid ( in degrees * 10 ^ 7 ) ( int32 _ t )
grid _ spacing : Grid spacing in meters ( uint16 _ t )
mask : Bitmask of requested 4x4 grids ( row major 8x7 array of grids , 56 bits ) ( uint64 _ t )''' | return MAVLink_terrain_request_message ( lat , lon , grid_spacing , mask ) |
def filter ( n : Node , query : str ) -> CompatNodeIterator :
"""This function has the same signature as the pre - v3 filter ( ) returning a
compatibility CompatNodeIterator .""" | ctx = uast ( )
return CompatNodeIterator ( NodeIterator ( ctx . filter ( query , n . internal_node ) , ctx ) ) |
def status ( logfile , time_format ) :
"show current status" | try :
r = read ( logfile , time_format ) [ - 1 ]
if r [ 1 ] [ 1 ] :
return summary ( logfile , time_format )
else :
print "working on %s" % colored ( r [ 0 ] , attrs = [ 'bold' ] )
print " since %s" % colored ( server . date_to_txt ( r [ 1 ] [ 0 ] , time_format ) , 'green' )
print " to now, %s" % colored ( server . date_to_txt ( now ( ) , time_format ) , 'green' )
print " => %s elapsed" % colored ( time_elapsed ( r [ 1 ] [ 0 ] ) , 'red' )
except IndexError :
return cmdapp . help ( ) |
def params ( self ) :
"""Parameters used in the url of the API call and for authentication .
: return : parameters used in the url .
: rtype : dict""" | params = { }
params [ "access_token" ] = self . access_token
params [ "account_id" ] = self . account_id
return params |
def encompass ( self ) :
"""Called when parallelize is False .
This function will generate the file names in a directory tree by walking the tree either top - down or
bottom - up . For each directory in the tree rooted at directory top ( including top itself ) , it yields a 3 - tuple
( dirpath , dirnames , filenames ) .""" | self . _printer ( 'Standard Walk' )
count = Counter ( length = 3 )
for directory in self . directory :
for root , directories , files in os . walk ( directory , topdown = self . topdown ) :
root = root [ len ( str ( directory ) ) + 1 : ]
self . _printer ( str ( count . up ) + ": Explored path - " + str ( root ) , stream = True )
for filename in files :
fullname = os . path . join ( root , filename )
# Join the two strings in order to form the full filepath .
self . add_path ( directory , fullname ) |
def pid_tuning_send ( self , axis , desired , achieved , FF , P , I , D , force_mavlink1 = False ) :
'''PID tuning information
axis : axis ( uint8 _ t )
desired : desired rate ( degrees / s ) ( float )
achieved : achieved rate ( degrees / s ) ( float )
FF : FF component ( float )
P : P component ( float )
I : I component ( float )
D : D component ( float )''' | return self . send ( self . pid_tuning_encode ( axis , desired , achieved , FF , P , I , D ) , force_mavlink1 = force_mavlink1 ) |
def _jseq ( self , cols , converter = None ) :
"""Return a JVM Seq of Columns from a list of Column or names""" | return _to_seq ( self . sql_ctx . _sc , cols , converter ) |
def plot_dom_parameters ( data , detector , filename , label , title , vmin = 0.0 , vmax = 10.0 , cmap = 'RdYlGn_r' , under = 'deepskyblue' , over = 'deeppink' , underfactor = 1.0 , overfactor = 1.0 , missing = 'lightgray' , hide_limits = False ) :
"""Creates a plot in the classical monitoring . km3net . de style .
Parameters
data : dict ( ( du , floor ) - > value )
detector : km3pipe . hardware . Detector ( ) instance
filename : filename or filepath
label : str
title : str
underfactor : a scale factor for the points used for underflow values
overfactor : a scale factor for the points used for overflow values
hide _ limits : do not show under / overflows in the plot""" | x , y , _ = zip ( * detector . doms . values ( ) )
fig , ax = plt . subplots ( figsize = ( 10 , 6 ) )
cmap = plt . get_cmap ( cmap )
cmap . set_over ( over , 1.0 )
cmap . set_under ( under , 1.0 )
m_size = 100
scatter_args = { 'edgecolors' : 'None' , 'vmin' : vmin , 'vmax' : vmax , }
sc_inactive = ax . scatter ( x , y , c = missing , label = 'missing' , s = m_size * 0.9 , ** scatter_args )
xa , ya = map ( np . array , zip ( * data . keys ( ) ) )
zs = np . array ( list ( data . values ( ) ) )
in_range_idx = np . logical_and ( zs >= vmin , zs <= vmax )
sc = ax . scatter ( xa [ in_range_idx ] , ya [ in_range_idx ] , c = zs [ in_range_idx ] , cmap = cmap , s = m_size , ** scatter_args )
if not hide_limits :
under_idx = zs < vmin
ax . scatter ( xa [ under_idx ] , ya [ under_idx ] , c = under , label = '< {0}' . format ( vmin ) , s = m_size * underfactor , ** scatter_args )
over_idx = zs > vmax
ax . scatter ( xa [ over_idx ] , ya [ over_idx ] , c = over , label = '> {0}' . format ( vmax ) , s = m_size * overfactor , ** scatter_args )
cb = plt . colorbar ( sc )
cb . set_label ( label )
ax . set_title ( "{0}\n{1} UTC" . format ( title , datetime . utcnow ( ) . strftime ( "%c" ) ) )
ax . set_xlabel ( "DU" )
ax . set_ylabel ( "DOM" )
ax . set_ylim ( - 2 )
ax . set_yticks ( range ( 1 , 18 + 1 ) )
major_locator = pylab . MaxNLocator ( integer = True )
sc_inactive . axes . xaxis . set_major_locator ( major_locator )
ax . legend ( bbox_to_anchor = ( 0. , - .16 , 1. , .102 ) , loc = 1 , ncol = 2 , mode = "expand" , borderaxespad = 0. )
fig . tight_layout ( )
plt . savefig ( filename , dpi = 120 , bbox_inches = "tight" )
plt . close ( 'all' ) |
def resolve ( self , requirement_set ) : # type : ( RequirementSet ) - > None
"""Resolve what operations need to be done
As a side - effect of this method , the packages ( and their dependencies )
are downloaded , unpacked and prepared for installation . This
preparation is done by ` ` pip . operations . prepare ` ` .
Once PyPI has static dependency metadata available , it would be
possible to move the preparation to become a step separated from
dependency resolution .""" | # make the wheelhouse
if self . preparer . wheel_download_dir :
ensure_dir ( self . preparer . wheel_download_dir )
# If any top - level requirement has a hash specified , enter
# hash - checking mode , which requires hashes from all .
root_reqs = ( requirement_set . unnamed_requirements + list ( requirement_set . requirements . values ( ) ) )
self . require_hashes = ( requirement_set . require_hashes or any ( req . has_hash_options for req in root_reqs ) )
# Display where finder is looking for packages
locations = self . finder . get_formatted_locations ( )
if locations :
logger . info ( locations )
# Actually prepare the files , and collect any exceptions . Most hash
# exceptions cannot be checked ahead of time , because
# req . populate _ link ( ) needs to be called before we can make decisions
# based on link type .
discovered_reqs = [ ]
# type : List [ InstallRequirement ]
hash_errors = HashErrors ( )
for req in chain ( root_reqs , discovered_reqs ) :
try :
discovered_reqs . extend ( self . _resolve_one ( requirement_set , req ) )
except HashError as exc :
exc . req = req
hash_errors . append ( exc )
if hash_errors :
raise hash_errors |
def expect_element ( __funcname = _qualified_name , ** named ) :
"""Preprocessing decorator that verifies inputs are elements of some
expected collection .
Examples
> > > @ expect _ element ( x = ( ' a ' , ' b ' ) )
. . . def foo ( x ) :
. . . return x . upper ( )
> > > foo ( ' a ' )
> > > foo ( ' b ' )
> > > foo ( ' c ' ) # doctest : + NORMALIZE _ WHITESPACE + ELLIPSIS
Traceback ( most recent call last ) :
ValueError : . . . foo ( ) expected a value in ( ' a ' , ' b ' ) for argument ' x ' ,
but got ' c ' instead .
Notes
A special argument , _ _ funcname , can be provided as a string to override the
function name shown in error messages . This is most often used on _ _ init _ _
or _ _ new _ _ methods to make errors refer to the class name instead of the
function name .
This uses the ` in ` operator ( _ _ contains _ _ ) to make the containment check .
This allows us to use any custom container as long as the object supports
the container protocol .""" | def _expect_element ( collection ) :
if isinstance ( collection , ( set , frozenset ) ) : # Special case the error message for set and frozen set to make it
# less verbose .
collection_for_error_message = tuple ( sorted ( collection ) )
else :
collection_for_error_message = collection
template = ( "%(funcname)s() expected a value in {collection} " "for argument '%(argname)s', but got %(actual)s instead." ) . format ( collection = collection_for_error_message )
return make_check ( ValueError , template , complement ( op . contains ( collection ) ) , repr , funcname = __funcname , )
return preprocess ( ** valmap ( _expect_element , named ) ) |
def git_ls_files ( filename ) :
"""Return a boolean value for whether the given file is tracked by git .""" | with chdir ( get_root ( ) ) : # https : / / stackoverflow . com / a / 2406813
result = run_command ( 'git ls-files --error-unmatch {}' . format ( filename ) , capture = True )
return result . code == 0 |
def get_all_suppliers ( self , params = None ) :
"""Get all suppliers
This will iterate over all pages until it gets all elements .
So if the rate limit exceeded it will throw an Exception and you will get nothing
: param params : search params
: return : list""" | if not params :
params = { }
return self . _iterate_through_pages ( get_function = self . get_suppliers_per_page , resource = SUPPLIERS , ** { 'params' : params } ) |
def thread_started ( self ) :
"""Start polling for a stop event or ready event and do small work via
: meth : ` . WPollingThreadTask . _ polling _ iteration ` method call
: return : None""" | while self . check_events ( ) is False :
self . _polling_iteration ( )
self . stop_event ( ) . wait ( self . polling_timeout ( ) ) |
def post_install_package ( ) :
"""Run any functions post install a matching package .
Hook functions are in the form post _ install _ [ package name ] and are
defined in a deploy . py file
Will be executed post install _ packages and upload _ etc""" | module_name = '.' . join ( [ env . project_package_name , 'deploy' ] )
funcs_run = [ ]
try :
imported = import_module ( module_name )
funcs = vars ( imported )
for f in env . installed_packages [ env . host ] :
func = funcs . get ( '' . join ( [ 'post_install_' , f . replace ( '.' , '_' ) . replace ( '-' , '_' ) ] ) )
if func :
func ( )
funcs_run . append ( func )
except ImportError :
pass
# run per app
for app in env . INSTALLED_APPS :
if app == 'woven' :
continue
module_name = '.' . join ( [ app , 'deploy' ] )
try :
imported = import_module ( module_name )
funcs = vars ( imported )
for f in env . installed_packages [ env . host ] :
func = funcs . get ( '' . join ( [ 'post_install_' , f . replace ( '.' , '_' ) . replace ( '-' , '_' ) ] ) )
if func and func not in funcs_run :
func ( )
funcs_run . append ( func )
except ImportError :
pass
# run woven last
import woven . deploy
funcs = vars ( woven . deploy )
for f in env . installed_packages [ env . host ] :
func = funcs . get ( '' . join ( [ 'post_install_' , f . replace ( '.' , '_' ) . replace ( '-' , '_' ) ] ) )
if func and func not in funcs_run :
func ( ) |
def add_raw_code ( self , string_or_list ) :
"""Add raw Gmsh code .""" | if _is_string ( string_or_list ) :
self . _GMSH_CODE . append ( string_or_list )
else :
assert isinstance ( string_or_list , list )
for string in string_or_list :
self . _GMSH_CODE . append ( string )
return |
def all ( self ) :
"""Return the results represented by this Query as a list .
. . versionchanged : : 0.10.0
Returns an iterator that lazily loads
records instead of fetching thousands
of records at once .""" | return self . rpc_model . search_read_all ( self . domain , self . _order_by , self . fields , context = self . context , offset = self . _offset or 0 , limit = self . _limit , ) |
def SLICE ( array , n , position = None ) :
"""Returns a subset of an array .
See https : / / docs . mongodb . com / manual / reference / operator / aggregation / slice /
for more details
: param array : Any valid expression as long as it resolves to an array .
: param n : Any valid expression as long as it resolves to an integer .
: param position : Optional . Any valid expression as long as it resolves to an integer .
: return : Aggregation operator""" | return { '$slice' : [ array , position , n ] } if position is not None else { '$slice' : [ array , n ] } |
def deci2sexa ( deci , pre = 3 , trunc = False , lower = None , upper = None , b = False , upper_trim = False ) :
"""Returns the sexagesimal representation of a decimal number .
Parameters
deci : float
Decimal number to be converted into sexagesimal . If ` lower ` and
` upper ` are given then the number is normalized to the given
range before converting to sexagesimal .
pre : int
The last part of the sexagesimal number is rounded to these
many decimal places . This can be negative . Default is 3.
trunc : bool
If True then the last part of the sexagesimal number is
truncated and not rounded to ` pre ` decimal places . Default is
False .
lower : int
Lower bound of range to which number is to be normalized .
upper : int
Upper bound of range to which number is to be normalized .
b : bool
Affects type of normalization . See docstring for ` normalize ` .
upper _ trim : bool
If ` lower ` and ` upper ` are given and this is True , then if the
first part of the sexagesimal number is equal to ` upper ` , it is
replaced with ` lower ` ( value used is int ( lower ) ) . This converts numbers
such as " 24 00 00.000 " to " 00 00 00.000 " . Default value is False .
Returns
s : 4 element tuple ; ( int , int , int , float )
A tuple of sign and the three parts of the sexagesimal
number . Sign is 1 for positive and - 1 for negative values . The
sign applies to the whole angle and not to any single part ,
i . e . , all parts are positive and the sign multiplies the
angle . The first and second parts of the sexagesimal number are
integers and the last part is a float .
Notes
The given decimal number ` deci ` is converted into a sexagesimal
number . The last part of the sexagesimal number is rounded to ` pre `
number of decimal points . If ` trunc = = True ` then instead of
rounding , the last part is truncated .
If ` lower ` and ` upper ` are given then the number is normalized to
the given range before converting into sexagesimal format . The ` b `
argument determines the type of normalization . See docstring of the
` normalize ` function for details .
If ` upper _ trim ` is True then , if after convertion to sexagesimal
the first part is equal to ` upper ` , it is replaced with ` lower ` ( value used
is int ( lower ) ) . This is useful in cases where numbers such as " 24 00 00.00"
needs to be converted into " 00 00 00.00"
The returned sign , first element of tuple , applies to the whole
number and not just to a single part .
Examples
> > > deci2sexa ( - 11.2345678)
( - 1 , 11 , 14 , 4.444)
> > > deci2sexa ( - 11.2345678 , pre = 5)
( - 1 , 11 , 14 , 4.44408)
> > > deci2sexa ( - 11.2345678 , pre = 4)
( - 1 , 11 , 14 , 4.4441)
> > > deci2sexa ( - 11.2345678 , pre = 4 , trunc = True )
( - 1 , 11 , 14 , 4.444)
> > > deci2sexa ( - 11.2345678 , pre = 1)
( - 1 , 11 , 14 , 4.4)
> > > deci2sexa ( - 11.2345678 , pre = 0)
( - 1 , 11 , 14 , 4.0)
> > > deci2sexa ( - 11.2345678 , pre = - 1)
( - 1 , 11 , 14 , 0.0)
> > > x = 23 + 59/60.0 + 59.99999/3600.0
To 3 decimal places , this number is 24 or 0 hours .
> > > deci2sexa ( x , pre = 3 , lower = 0 , upper = 24 , upper _ trim = True )
(1 , 0 , 0 , 0.0)
> > > deci2sexa ( x , pre = 3 , lower = 0 , upper = 24 , upper _ trim = False )
(1 , 24 , 0 , 0.0)
To 5 decimal places , we get back the full value .
> > > deci2sexa ( x , pre = 5 , lower = 0 , upper = 24 , upper _ trim = True )
(1 , 23 , 59 , 59.99999)""" | if lower is not None and upper is not None :
deci = normalize ( deci , lower = lower , upper = upper , b = b )
sign = 1
if deci < 0 :
deci = abs ( deci )
sign = - 1
hd , f1 = divmod ( deci , 1 )
mm , f2 = divmod ( f1 * 60.0 , 1 )
sf = f2 * 60.0
# Find the seconds part to required precision .
fp = 10 ** pre
if trunc :
ss , _ = divmod ( sf * fp , 1 )
else :
ss = round ( sf * fp , 0 )
ss = int ( ss )
# If ss is 60 to given precision then update mm , and if necessary
# hd .
if ss == 60 * fp :
mm += 1
ss = 0
if mm == 60 :
hd += 1
mm = 0
hd = int ( hd )
mm = int ( mm )
if lower is not None and upper is not None and upper_trim : # For example 24h0m0s = > 0h0m0s .
if hd == upper :
hd = int ( lower )
if hd == 0 and mm == 0 and ss == 0 :
sign = 1
ss /= float ( fp )
# hd and mm parts are integer values but of type float
return ( sign , hd , mm , ss ) |
def describe_addresses ( self , xml_bytes ) :
"""Parse the XML returned by the C { DescribeAddresses } function .
@ param xml _ bytes : XML bytes with a C { DescribeAddressesResponse } root
element .
@ return : a C { list } of L { tuple } of ( publicIp , instancId ) .""" | results = [ ]
root = XML ( xml_bytes )
for address_data in root . find ( "addressesSet" ) :
address = address_data . findtext ( "publicIp" )
instance_id = address_data . findtext ( "instanceId" )
results . append ( ( address , instance_id ) )
return results |
def sys_get_current_resolution ( ) -> Tuple [ int , int ] :
"""Return the current resolution as ( width , height )
Returns :
Tuple [ int , int ] : The current resolution .""" | w = ffi . new ( "int *" )
h = ffi . new ( "int *" )
lib . TCOD_sys_get_current_resolution ( w , h )
return w [ 0 ] , h [ 0 ] |
def get_smart_contract ( self , hex_contract_address : str , is_full : bool = False ) -> dict :
"""This interface is used to get the information of smart contract based on the specified hexadecimal hash value .
: param hex _ contract _ address : str , a hexadecimal hash value .
: param is _ full :
: return : the information of smart contract in dictionary form .""" | if not isinstance ( hex_contract_address , str ) :
raise SDKException ( ErrorCode . param_err ( 'a hexadecimal contract address is required.' ) )
if len ( hex_contract_address ) != 40 :
raise SDKException ( ErrorCode . param_err ( 'the length of the contract address should be 40 bytes.' ) )
payload = self . generate_json_rpc_payload ( RpcMethod . GET_SMART_CONTRACT , [ hex_contract_address , 1 ] )
response = self . __post ( self . __url , payload )
if is_full :
return response
return response [ 'result' ] |
def getTempDirectory ( rootDir = None ) :
"""returns a temporary directory that must be manually deleted . rootDir will be
created if it does not exist .""" | if rootDir is None :
return tempfile . mkdtemp ( )
else :
if not os . path . exists ( rootDir ) :
try :
os . makedirs ( rootDir )
except OSError : # Maybe it got created between the test and the makedirs call ?
pass
while True : # Keep trying names until we find one that doesn ' t exist . If one
# does exist , don ' t nest inside it , because someone else may be
# using it for something .
tmpDir = os . path . join ( rootDir , "tmp_" + getRandomAlphaNumericString ( ) )
if not os . path . exists ( tmpDir ) :
break
os . mkdir ( tmpDir )
os . chmod ( tmpDir , 0777 )
# Ensure everyone has access to the file .
return tmpDir |
def _thumbnail_div ( target_dir , src_dir , fname , snippet , is_backref = False , check = True ) :
"""Generates RST to place a thumbnail in a gallery""" | thumb , _ = _find_image_ext ( os . path . join ( target_dir , 'images' , 'thumb' , 'sphx_glr_%s_thumb.png' % fname [ : - 3 ] ) )
if check and not os . path . isfile ( thumb ) : # This means we have done something wrong in creating our thumbnail !
raise RuntimeError ( 'Could not find internal sphinx-gallery thumbnail ' 'file:\n%s' % ( thumb , ) )
thumb = os . path . relpath ( thumb , src_dir )
full_dir = os . path . relpath ( target_dir , src_dir )
# Inside rst files forward slash defines paths
thumb = thumb . replace ( os . sep , "/" )
ref_name = os . path . join ( full_dir , fname ) . replace ( os . path . sep , '_' )
template = BACKREF_THUMBNAIL_TEMPLATE if is_backref else THUMBNAIL_TEMPLATE
return template . format ( snippet = escape ( snippet ) , thumbnail = thumb , ref_name = ref_name ) |
def updateReplicationMetadata ( self , pid , replicaMetadata , serialVersion , vendorSpecific = None ) :
"""See Also : updateReplicationMetadataResponse ( )
Args :
pid :
replicaMetadata :
serialVersion :
vendorSpecific :
Returns :""" | response = self . updateReplicationMetadataResponse ( pid , replicaMetadata , serialVersion , vendorSpecific )
return self . _read_boolean_response ( response ) |
def get_one ( self , section , key ) :
"""Retrieve the first value for a section / key .
Raises :
KeyError : If no line match the given section / key .""" | lines = iter ( self . get ( section , key ) )
try :
return next ( lines )
except StopIteration :
raise KeyError ( "Key %s not found in %s" % ( key , section ) ) |
def lookup ( self , key , name ) :
"""Look for values in registry in Microsoft software registry .
Parameters
key : str
Registry key path where look .
name : str
Value name to find .
Return
str : value""" | KEY_READ = winreg . KEY_READ
openkey = winreg . OpenKey
ms = self . microsoft
for hkey in self . HKEYS :
try :
bkey = openkey ( hkey , ms ( key ) , 0 , KEY_READ )
except ( OSError , IOError ) :
if not self . pi . current_is_x86 ( ) :
try :
bkey = openkey ( hkey , ms ( key , True ) , 0 , KEY_READ )
except ( OSError , IOError ) :
continue
else :
continue
try :
return winreg . QueryValueEx ( bkey , name ) [ 0 ]
except ( OSError , IOError ) :
pass |
def max ( self ) :
"""Return the maximum element or ( element - based computation ) .""" | if ( self . _clean . isDict ( ) ) :
return self . _wrap ( list ( ) )
return self . _wrap ( max ( self . obj ) ) |
def cas ( self , key , value , * , flags = None , index ) :
"""Sets the Key to the given Value with check - and - set semantics
Parameters :
key ( str ) : Key to set
value ( Payload ) : Value to set , It will be encoded by flags
index ( ObjectIndex ) : Index ID
flags ( int ) : Flags to set with value
The Key will only be set if its current modify index matches the
supplied Index""" | self . append ( { "Verb" : "cas" , "Key" : key , "Value" : encode_value ( value , flags , base64 = True ) . decode ( "utf-8" ) , "Flags" : flags , "Index" : extract_attr ( index , keys = [ "ModifyIndex" , "Index" ] ) } )
return self |
def _error_msg_iface ( iface , option , expected ) :
'''Build an appropriate error message from a given option and
a list of expected values .''' | msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]'
return msg . format ( iface , option , '|' . join ( str ( e ) for e in expected ) ) |
def install_theme ( theme = None , monofont = None , monosize = 11 , nbfont = None , nbfontsize = 13 , tcfont = None , tcfontsize = 13 , dffontsize = 93 , outfontsize = 85 , mathfontsize = 100 , margins = 'auto' , cellwidth = '980' , lineheight = 170 , cursorwidth = 2 , cursorcolor = 'default' , altprompt = False , altmd = False , altout = False , hideprompt = False , vimext = False , toolbar = False , nbname = False , kernellogo = False , dfonts = False ) :
"""Install theme to jupyter _ customcss with specified font , fontsize ,
md layout , and toolbar pref""" | # get working directory
wkdir = os . path . abspath ( './' )
stylefx . reset_default ( False )
stylefx . check_directories ( )
doc = '\nConcatenated font imports, .less styles, & custom variables\n'
s = '*' * 65
style_less = '\n' . join ( [ '/*' , s , s , doc , s , s , '*/' ] )
style_less += '\n\n\n'
style_less += '/* Import Notebook, Markdown, & Code Fonts */\n'
# initialize style _ less & style _ css
style_less = stylefx . set_font_properties ( style_less = style_less , monofont = monofont , monosize = monosize , nbfont = nbfont , nbfontsize = nbfontsize , tcfont = tcfont , tcfontsize = tcfontsize , dffontsize = dffontsize , outfontsize = outfontsize , mathfontsize = mathfontsize , dfonts = dfonts )
if theme is not None : # define some vars for cell layout
cursorcolor = stylefx . get_colors ( theme = theme , c = cursorcolor )
style_less = stylefx . style_layout ( style_less , theme = theme , cellwidth = cellwidth , margins = margins , lineheight = lineheight , altprompt = altprompt , altmd = altmd , altout = altout , hideprompt = hideprompt , cursorwidth = cursorwidth , cursorcolor = cursorcolor , vimext = vimext , toolbar = toolbar , nbname = nbname , kernellogo = kernellogo )
# compile tempfile . less to css code and append to style _ css
style_css = stylefx . less_to_css ( style_less )
# append mathjax css & script to style _ css
style_css = stylefx . set_mathjax_style ( style_css , mathfontsize )
# install style _ css to . jupyter / custom / custom . css
stylefx . write_final_css ( style_css )
# change back to original working directory
os . chdir ( wkdir ) |
def get_or_add_tx_rich ( self ) :
"""Return the ` c : tx [ c : rich ] ` subtree , newly created if not present .""" | tx = self . get_or_add_tx ( )
tx . _remove_strRef ( )
tx . get_or_add_rich ( )
return tx |
def get_magic_info ( self , child_type , parent_type = None , attr = 'er' , filename = None , sort_by_file_type = False ) :
"""Read er _ * . txt or pmag _ * . txt file .
If no filename is provided , use er _ * or pmag _ * file in WD .
If sort _ by _ file _ type , use file header to determine child , parent types ,
instead of passing those in as arguments .
Once file is open , parse information into dictionaries for each item .
If the item does not yet exist , add it to the builder data object .
Then add info to the item object as object . er _ data or object . pmag _ data .""" | parent = ''
grandparent_type = None
magic_name = 'er_' + child_type + '_name'
expected_item_type = child_type
if not filename :
short_filename = attr + '_' + child_type + 's.txt'
magic_file = os . path . join ( self . WD , short_filename )
else :
short_filename = os . path . split ( filename ) [ 1 ]
magic_file = filename
attr = short_filename . split ( '_' ) [ 0 ]
print ( '-I- Attempting to read {}' . format ( magic_file ) )
if not os . path . isfile ( magic_file ) :
print ( '-W- Could not find {}' . format ( magic_file ) )
return False
# get the data from the appropriate . txt file
data_dict , header , file_type = self . read_magic_file ( magic_file , magic_name , sort_by_file_type = sort_by_file_type )
if not data_dict :
print ( '-W- Could not read in file: {}.\n Make sure it is a MagIC-format file' . format ( magic_file ) )
return False
item_type = file_type . split ( '_' ) [ 1 ] [ : - 1 ]
# if a file was named wrong , use the type of data that is actually in that file
if item_type != expected_item_type :
print ( '-W- Expected data of type: {} but instead got: {}' . format ( expected_item_type , item_type ) )
print ( '-W- Using type: {}' . format ( item_type ) )
if item_type == 'age' :
self . get_age_info ( filename )
return 'age'
child_type = item_type
magic_name = 'er_' + child_type + '_name'
ind = self . ancestry . index ( child_type )
parent_type = self . ancestry [ ind + 1 ]
if item_type != 'location' :
grandparent_type = self . ancestry [ ind + 2 ]
else :
grandparent_type = ''
if not grandparent_type :
ind = self . ancestry . index ( child_type )
try :
grandparent_type = self . ancestry [ ind + 2 ]
except IndexError :
grandparent_type = None
child_list , child_class , child_constructor = self . data_lists [ child_type ]
if parent_type :
parent_list , parent_class , parent_constructor = self . data_lists [ parent_type ]
else :
parent_list , parent_name = None , None
for child_name in data_dict : # if there is a possible parent , try to find parent object in the data model
if parent_type :
parent_name = data_dict [ child_name ] . get ( 'er_' + parent_type + '_name' , '' )
parent = self . find_by_name ( parent_name , parent_list )
if parent :
remove_dict_headers ( parent . er_data )
remove_dict_headers ( parent . pmag_data )
# if there should be a parent
# ( meaning there is a name for it and the child object should have a parent )
# but none exists in the data model , go ahead and create that parent object .
if parent_name and parent_type and not parent : # try to get grandparent
grandparent = None
grandparent_name = None
if grandparent_type :
grandparent_list , grandparent_class , grandparent_constructor = self . data_lists [ grandparent_type ]
grandparent_name = data_dict [ child_name ] [ 'er_' + grandparent_type + '_name' ]
grandparent = self . find_by_name ( grandparent_name , grandparent_list )
if grandparent_name and not grandparent :
grandparent = grandparent_constructor ( grandparent_name , None )
parent = parent_constructor ( parent_name , grandparent_name )
# otherwise there is no parent and none can be created , so use an empty string
elif not parent :
parent_name = None
parent = ''
child = self . find_by_name ( child_name , child_list )
# if the child object does not exist yet in the data model
if not child :
child = child_constructor ( child_name , parent_name )
else : # bind parent to child and child to parent
if parent :
child . set_parent ( parent )
if parent and ( child not in parent . children ) :
parent . add_child ( child )
# add in the appropriate data dictionary to the child object
if attr == 'er' :
self . update_methods [ child_type ] ( child_name , child_name , parent_name , new_er_data = data_dict [ child_name ] )
else :
self . update_methods [ child_type ] ( child_name , child_name , parent_name , new_pmag_data = data_dict [ child_name ] )
# old way
# child . _ _ setattr _ _ ( attr + ' _ data ' , data _ dict [ child _ name ] )
remove_dict_headers ( child . er_data )
remove_dict_headers ( child . pmag_data )
return child_type |
def get_object ( table , id = None , condition = None , cache = False , fields = None , use_local = False , engine_name = None , session = None ) :
"""Get obj in Local . object _ caches first and also use get ( cache = True ) function if
not found in object _ caches""" | from uliweb import functions , settings
model = get_model ( table , engine_name )
# if id is an object of Model , so get the real id value
if isinstance ( id , Model ) :
return id
if cache :
if use_local :
s = get_session ( session )
key = get_object_id ( s . engine_name , model . tablename , id )
value = s . get_local_cache ( key )
if value :
return value
obj = model . get ( id , condition = condition , fields = fields , cache = True )
if use_local :
value = s . get_local_cache ( key , obj )
else :
obj = model . get ( id , condition = condition , fields = fields )
return obj |
def enabled ( name ) :
'''Ensure an Apache conf is enabled .
name
Name of the Apache conf''' | ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } }
is_enabled = __salt__ [ 'apache.check_conf_enabled' ] ( name )
if not is_enabled :
if __opts__ [ 'test' ] :
msg = 'Apache conf {0} is set to be enabled.' . format ( name )
ret [ 'comment' ] = msg
ret [ 'changes' ] [ 'old' ] = None
ret [ 'changes' ] [ 'new' ] = name
ret [ 'result' ] = None
return ret
status = __salt__ [ 'apache.a2enconf' ] ( name ) [ 'Status' ]
if isinstance ( status , six . string_types ) and 'enabled' in status :
ret [ 'result' ] = True
ret [ 'changes' ] [ 'old' ] = None
ret [ 'changes' ] [ 'new' ] = name
else :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to enable {0} Apache conf' . format ( name )
if isinstance ( status , six . string_types ) :
ret [ 'comment' ] = ret [ 'comment' ] + ' ({0})' . format ( status )
return ret
else :
ret [ 'comment' ] = '{0} already enabled.' . format ( name )
return ret |
def _dpi ( self , density ) :
"""Return dots per inch corresponding to * density * value .""" | if self . _density_units == 1 :
dpi = density
elif self . _density_units == 2 :
dpi = int ( round ( density * 2.54 ) )
else :
dpi = 72
return dpi |
def seek ( self , pos ) :
"""Move to new input file position . If position is negative or out of file , raise Exception .""" | if ( pos > self . file_size ) or ( pos < 0 ) :
raise Exception ( "Unable to seek - position out of file!" )
self . file . seek ( pos ) |
def get_all ( self , start = 0 , count = - 1 , filter = '' , fields = '' , query = '' , sort = '' , view = '' ) :
"""Gets a list of Deployment Servers based on optional sorting and filtering , and constrained by start and count
parameters .
Args :
start :
The first item to return , using 0 - based indexing .
If not specified , the default is 0 - start with the first available item .
count :
The number of resources to return . A count of - 1 requests all items .
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items .
filter ( list or str ) :
A general filter / query string to narrow the list of items returned . The
default is no filter ; all resources are returned .
fields :
Specifies which fields should be returned in the result set .
query :
A general query string to narrow the list of resources returned . The default
is no query - all resources are returned .
sort :
The sort order of the returned data set . By default , the sort order is based
on create time with the oldest entry first .
view :
Return a specific subset of the attributes of the resource or collection , by
specifying the name of a predefined view . The default view is expand - show all
attributes of the resource and all elements of collections of resources .
Returns :
list : Os Deployment Servers""" | return self . _client . get_all ( start , count , filter = filter , sort = sort , query = query , fields = fields , view = view ) |
def store ( self , key , value ) :
"""Add new record to cache
key : entry key
value : data of entry""" | self . client . set ( key , value , time = self . timeout ) |
def add_dynamics_2 ( self , component , runnable , regime , dynamics ) :
"""Adds dynamics to a runnable component based on the dynamics
specifications in the component model .
This method builds dynamics dependent on child components .
@ param component : Component model containing dynamics specifications .
@ type component : lems . model . component . FatComponent
@ param runnable : Runnable component to which dynamics is to be added .
@ type runnable : lems . sim . runnable . Runnable
@ param regime : The dynamics regime to be used to generate
dynamics code in the runnable component .
@ type regime : lems . model . dynamics . Regime
@ param dynamics : Shared dynamics specifications .
@ type dynamics : lems . model . dynamics . Regime
@ raise SimBuildError : Raised when a time derivative expression refers
to an undefined variable .
@ raise SimBuildError : Raised when there are invalid time
specifications for the < Run > statement .
@ raise SimBuildError : Raised when the component reference for < Run >
cannot be resolved .""" | if isinstance ( regime , Dynamics ) or regime . name == '' :
suffix = ''
else :
suffix = '_regime_' + regime . name
# Process kinetic schemes
ks_code = [ ]
for ks in regime . kinetic_schemes :
raise NotImplementedError ( "KineticScheme element is not stable in PyLEMS yet, see https://github.com/LEMS/pylems/issues/15" )
try : # # # nodes = { node . id : node for node in runnable . _ _ dict _ _ [ ks . nodes ] }
nodes = { }
for node in runnable . __dict__ [ ks . nodes ] :
nodes [ node . id ] = node
edges = runnable . __dict__ [ ks . edges ]
for edge in edges :
from_ = edge . __dict__ [ ks . edge_source ]
to = edge . __dict__ [ ks . edge_target ]
ks_code += [ ( 'self.{0}.{2} += dt * (-self.{3}.{4} * self.{0}.{2}_shadow' ' + self.{3}.{5} * self.{1}.{2}_shadow)' ) . format ( from_ , to , ks . state_variable , edge . id , ks . forward_rate , ks . reverse_rate ) ]
ks_code += [ ( 'self.{1}.{2} += dt * (self.{3}.{4} * self.{0}.{2}_shadow' ' - self.{3}.{5} * self.{1}.{2}_shadow)' ) . format ( from_ , to , ks . state_variable , edge . id , ks . forward_rate , ks . reverse_rate ) ]
ks_code += [ 'sum = 0' ]
for node in nodes :
nodes [ node ] . __dict__ [ ks . state_variable ] = 1.0 / len ( nodes )
nodes [ node ] . __dict__ [ ks . state_variable + '_shadow' ] = 1.0 / len ( nodes )
ks_code += [ 'sum += self.{0}.{1}' . format ( node , ks . state_variable ) ]
for node in nodes :
ks_code += [ 'self.{0}.{1} /= sum' . format ( node , ks . state_variable ) ]
for node in nodes :
ks_code += [ ( 'self.{0}.{1}_shadow = ' 'self.{0}.{1}' ) . format ( node , ks . state_variable ) ]
except Exception as e :
raise SimBuildError ( ( "Unable to construct kinetic scheme '{0}' " "for component '{1}' - {2}" ) . format ( ks . name , component . id , str ( e ) ) )
runnable . add_method ( 'update_kinetic_scheme' + suffix , [ 'self' , 'dt' ] , ks_code ) |
def run ( self ) :
"""主函数""" | # try :
self . fenum . write ( '\n' )
self . fcpp = open ( os . path . join ( os . path . abspath ( self . ctp_dir ) , 'ThostFtdcUserApiDataType.h' ) , 'r' )
for idx , line in enumerate ( self . fcpp ) :
l = self . process_line ( idx , line )
self . f_data_type . write ( l )
self . fcpp . close ( )
self . f_data_type . close ( )
self . fenum . close ( )
print ( 'ctp_data_type.py生成过程完成' ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.