signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def insert ( self , i , v ) :
"""Insert an idfobject ( bunch ) to list1 and its object to list2.""" | self . list1 . insert ( i , v )
self . list2 . insert ( i , v . obj )
if isinstance ( v , EpBunch ) :
v . theidf = self . theidf |
def get_units_from_category ( self , category ) -> typing . Iterator [ 'BaseUnit' ] :
"""Args :
category : unit category
Returns : generator over all units of a specific category in this coalition""" | Mission . validator_group_category . validate ( category , 'group category' )
for unit in self . units :
if unit . group_category == category :
yield unit |
def sqlite3_find_tool ( ) :
"""Find the sqlite3 binary
Return the path to the binary on success
Return None on error""" | # find sqlite3
path = os . environ . get ( "PATH" , None )
if path is None :
path = "/usr/local/bin:/usr/bin:/bin"
sqlite3_path = None
dirs = path . split ( ":" )
for pathdir in dirs :
if len ( pathdir ) == 0 :
continue
sqlite3_path = os . path . join ( pathdir , 'sqlite3' )
if not os . path . exists ( sqlite3_path ) :
continue
if not os . path . isfile ( sqlite3_path ) :
continue
if not os . access ( sqlite3_path , os . X_OK ) :
continue
break
if sqlite3_path is None :
log . error ( "Could not find sqlite3 binary" )
return None
return sqlite3_path |
def send ( self , request , stem = None ) :
"""Prepare and send a request
Arguments :
request : a Request object that is not yet prepared
stem : a path to append to the root URL
Returns :
The response to the request""" | if stem is not None :
request . url = request . url + "/" + stem . lstrip ( "/" )
prepped = self . session . prepare_request ( request )
settings = self . session . merge_environment_settings ( url = prepped . url , proxies = { } , stream = None , verify = None , cert = None )
return self . session . send ( prepped , ** settings ) |
def route ( self ) :
"""Used to select the L2 address""" | dst = self . dst
if isinstance ( dst , Gen ) :
dst = next ( iter ( dst ) )
return conf . route6 . route ( dst ) |
def setOverlayFlag ( self , ulOverlayHandle , eOverlayFlag , bEnabled ) :
"""Specify flag setting for a given overlay""" | fn = self . function_table . setOverlayFlag
result = fn ( ulOverlayHandle , eOverlayFlag , bEnabled )
return result |
def run_async ( coro : Coroutine ) -> None :
"""Simple async runner that cleans up DB connections on exit .
This is meant for simple scripts .
Usage : :
from tortoise import Tortoise , run _ async
async def do _ stuff ( ) :
await Tortoise . init (
db _ url = ' sqlite : / / db . sqlite3 ' ,
models = { ' models ' : [ ' app . models ' ] }
run _ async ( do _ stuff ( ) )""" | loop = asyncio . get_event_loop ( )
try :
loop . run_until_complete ( coro )
finally :
loop . run_until_complete ( Tortoise . close_connections ( ) ) |
def expect_column_values_to_be_in_type_list ( self , column , type_list , mostly = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) :
"""Expect each column entry to match a list of specified data types .
expect _ column _ values _ to _ be _ in _ type _ list is a : func : ` column _ map _ expectation < great _ expectations . data _ asset . dataset . Dataset . column _ map _ expectation > ` .
Args :
column ( str ) : The column name .
type _ list ( list of str ) : A list of strings representing the data type that each column should have as entries .
For example , " double integer " refers to an integer with double precision .
Keyword Args :
mostly ( None or a float between 0 and 1 ) : Return ` " success " : True ` if at least mostly percent of values match the expectation . For more detail , see : ref : ` mostly ` .
Other Parameters :
result _ format ( str or None ) : Which output mode to use : ` BOOLEAN _ ONLY ` , ` BASIC ` , ` COMPLETE ` , or ` SUMMARY ` .
For more detail , see : ref : ` result _ format < result _ format > ` .
include _ config ( boolean ) : If True , then include the expectation config as part of the result object . For more detail , see : ref : ` include _ config ` .
catch _ exceptions ( boolean or None ) : If True , then catch exceptions and include them as part of the result object . For more detail , see : ref : ` catch _ exceptions ` .
meta ( dict or None ) : A JSON - serializable dictionary ( nesting allowed ) that will be included in the output without modification . For more detail , see : ref : ` meta ` .
Returns :
A JSON - serializable expectation result object .
Exact fields vary depending on the values passed to : ref : ` result _ format < result _ format > ` and
: ref : ` include _ config ` , : ref : ` catch _ exceptions ` , and : ref : ` meta ` .
Warning :
expect _ column _ values _ to _ be _ in _ type _ list is slated for major changes in future versions of great _ expectations .
As of v0.3 , great _ expectations is exclusively based on pandas , which handles typing in its own peculiar way .
Future versions of great _ expectations will allow for Datasets in SQL , spark , etc .
When we make that change , we expect some breaking changes in parts of the codebase that are based strongly on pandas notions of typing .
See also :
expect _ column _ values _ to _ be _ of _ type""" | raise NotImplementedError |
def get_id ( self ) :
"""get unique identifier of this container
: return : str""" | if self . _id is None : # FIXME : provide a better error message when key is not defined
self . _id = self . inspect ( refresh = False ) [ "Id" ]
return self . _id |
def emit_java_headers ( target , source , env ) :
"""Create and return lists of Java stub header files that will
be created from a set of class files .""" | class_suffix = env . get ( 'JAVACLASSSUFFIX' , '.class' )
classdir = env . get ( 'JAVACLASSDIR' )
if not classdir :
try :
s = source [ 0 ]
except IndexError :
classdir = '.'
else :
try :
classdir = s . attributes . java_classdir
except AttributeError :
classdir = '.'
classdir = env . Dir ( classdir ) . rdir ( )
if str ( classdir ) == '.' :
c_ = None
else :
c_ = str ( classdir ) + os . sep
slist = [ ]
for src in source :
try :
classname = src . attributes . java_classname
except AttributeError :
classname = str ( src )
if c_ and classname [ : len ( c_ ) ] == c_ :
classname = classname [ len ( c_ ) : ]
if class_suffix and classname [ - len ( class_suffix ) : ] == class_suffix :
classname = classname [ : - len ( class_suffix ) ]
classname = SCons . Tool . javac . classname ( classname )
s = src . rfile ( )
s . attributes . java_classname = classname
slist . append ( s )
s = source [ 0 ] . rfile ( )
if not hasattr ( s . attributes , 'java_classdir' ) :
s . attributes . java_classdir = classdir
if target [ 0 ] . __class__ is SCons . Node . FS . File :
tlist = target
else :
if not isinstance ( target [ 0 ] , SCons . Node . FS . Dir ) :
target [ 0 ] . __class__ = SCons . Node . FS . Dir
target [ 0 ] . _morph ( )
tlist = [ ]
for s in source :
fname = s . attributes . java_classname . replace ( '.' , '_' ) + '.h'
t = target [ 0 ] . File ( fname )
t . attributes . java_lookupdir = target [ 0 ]
tlist . append ( t )
return tlist , source |
def set_window_refresh_callback ( window , cbfun ) :
"""Sets the refresh callback for the specified window .
Wrapper for :
GLFWwindowrefreshfun glfwSetWindowRefreshCallback ( GLFWwindow * window , GLFWwindowrefreshfun cbfun ) ;""" | window_addr = ctypes . cast ( ctypes . pointer ( window ) , ctypes . POINTER ( ctypes . c_long ) ) . contents . value
if window_addr in _window_refresh_callback_repository :
previous_callback = _window_refresh_callback_repository [ window_addr ]
else :
previous_callback = None
if cbfun is None :
cbfun = 0
c_cbfun = _GLFWwindowrefreshfun ( cbfun )
_window_refresh_callback_repository [ window_addr ] = ( cbfun , c_cbfun )
cbfun = c_cbfun
_glfw . glfwSetWindowRefreshCallback ( window , cbfun )
if previous_callback is not None and previous_callback [ 0 ] != 0 :
return previous_callback [ 0 ] |
def catch_boto_400 ( self , message , heading = None , document = None , ** info ) :
"""Turn a BotoServerError 400 into a BadAmazon""" | try :
yield
except ClientError as error :
if str ( error . response [ "ResponseMetadata" ] [ "HTTPStatusCode" ] ) . startswith ( "4" ) :
if heading or document :
print ( "=" * 80 )
if heading :
print ( heading )
print ( document )
print ( "=" * 80 )
error_message = error . response [ "Error" ] [ "Message" ]
raise BadAmazon ( message , error_message = error_message , error_code = error . response [ "ResponseMetadata" ] [ "HTTPStatusCode" ] , ** info )
else :
raise |
def add_arguments ( self , parser ) :
"""Add command arguments""" | parser . add_argument ( self . _source_param , ** self . _source_kwargs )
parser . add_argument ( '--base' , '-b' , action = 'store' , help = 'Supply the base currency as code or a settings variable name. ' 'The default is taken from settings CURRENCIES_BASE or SHOP_DEFAULT_CURRENCY, ' 'or the db, otherwise USD' ) |
def delete ( gandi , background , force , resource ) :
"""Delete a virtual machine .
Resource can be a Hostname or an ID""" | output_keys = [ 'id' , 'type' , 'step' ]
resource = sorted ( tuple ( set ( resource ) ) )
possible_resources = gandi . iaas . resource_list ( )
for item in resource :
if item not in possible_resources :
gandi . echo ( 'Sorry virtual machine %s does not exist' % item )
gandi . echo ( 'Please use one of the following: %s' % possible_resources )
return
if not force :
instance_info = "'%s'" % ', ' . join ( resource )
proceed = click . confirm ( "Are you sure to delete Virtual Machine %s?" % instance_info )
if not proceed :
return
iaas_list = gandi . iaas . list ( )
stop_opers = [ ]
for item in resource :
vm = next ( ( vm for ( index , vm ) in enumerate ( iaas_list ) if vm [ 'hostname' ] == item ) , gandi . iaas . info ( item ) )
if vm [ 'state' ] == 'running' :
if background :
gandi . echo ( 'Virtual machine not stopped, background option ' 'disabled' )
background = False
oper = gandi . iaas . stop ( item , background )
if not background :
stop_opers . append ( oper )
opers = gandi . iaas . delete ( resource , background )
if background :
for oper in stop_opers + opers :
output_generic ( gandi , oper , output_keys )
return opers |
def local_thickness ( im , sizes = 25 , mode = 'hybrid' ) :
r"""For each voxel , this functions calculates the radius of the largest sphere
that both engulfs the voxel and fits entirely within the foreground . This
is not the same as a simple distance transform , which finds the largest
sphere that could be * centered * on each voxel .
Parameters
im : array _ like
A binary image with the phase of interest set to True
sizes : array _ like or scalar
The sizes to invade . If a list of values of provided they are used
directly . If a scalar is provided then that number of points spanning
the min and max of the distance transform are used .
mode : string
Controls with method is used to compute the result . Options are :
' hybrid ' - ( default ) Performs a distance tranform of the void space ,
thresholds to find voxels larger than ` ` sizes [ i ] ` ` , trims the resulting
mask if ` ` access _ limitations ` ` is ` ` True ` ` , then dilates it using the
efficient fft - method to obtain the non - wetting fluid configuration .
' dt ' - Same as ' hybrid ' , except uses a second distance transform ,
relative to the thresholded mask , to find the invading fluid
configuration . The choice of ' dt ' or ' hybrid ' depends on speed , which
is system and installation specific .
' mio ' - Using a single morphological image opening step to obtain the
invading fluid confirguration directly , * then * trims if
` ` access _ limitations ` ` is ` ` True ` ` . This method is not ideal and is
included mostly for comparison purposes .
Returns
image : ND - array
A copy of ` ` im ` ` with the pore size values in each voxel
See Also
porosimetry
Notes
The term * foreground * is used since this function can be applied to both
pore space or the solid , whichever is set to ` ` True ` ` .
This function is identical to ` ` porosimetry ` ` with ` ` access _ limited ` ` set
to ` ` False ` ` .
The way local thickness is found in PoreSpy differs from the traditional
method ( i . e . ` used in ImageJ < https : / / imagej . net / Local _ Thickness > ` _ ) .
Our approach is probably slower , but it allows for the same code to be
used for ` ` local _ thickness ` ` and ` ` porosimetry ` ` , since we can ' trim '
invaded regions that are not connected to the inlets in the ` ` porosimetry ` `
function . This is not needed in ` ` local _ thickness ` ` however .""" | im_new = porosimetry ( im = im , sizes = sizes , access_limited = False , mode = mode )
return im_new |
def _get_input_steps ( self ) :
"""Search and return all steps that have no parents . These are the steps that are get the input data .""" | input_steps = [ ]
for step in self . steps_sorted :
parent_steps = self . _parent_steps ( step )
if len ( parent_steps ) == 0 :
input_steps . append ( step )
return input_steps |
async def _default_expect_handler ( request : Request ) -> None :
"""Default handler for Expect header .
Just send " 100 Continue " to client .
raise HTTPExpectationFailed if value of header is not " 100 - continue " """ | expect = request . headers . get ( hdrs . EXPECT )
if request . version == HttpVersion11 :
if expect . lower ( ) == "100-continue" :
await request . writer . write ( b"HTTP/1.1 100 Continue\r\n\r\n" )
else :
raise HTTPExpectationFailed ( text = "Unknown Expect: %s" % expect ) |
def setSr ( self , fs ) :
"""Sets the samplerate of the input operation being plotted""" | self . tracePlot . setSr ( fs )
self . stimPlot . setSr ( fs ) |
def drop_prefix ( strings ) :
"""Removes common prefix from a collection of strings""" | strings_without_extensions = [ s . split ( "." , 2 ) [ 0 ] for s in strings ]
if len ( strings_without_extensions ) == 1 :
return [ os . path . basename ( strings_without_extensions [ 0 ] ) ]
prefix_len = len ( os . path . commonprefix ( strings_without_extensions ) )
result = [ string [ prefix_len : ] for string in strings_without_extensions ]
if len ( set ( result ) ) != len ( strings ) : # If these operations resulted in a collision , just return the original
# strings .
return strings
return result |
def has_foreign_key ( self , name ) :
"""Returns whether this table has a foreign key constraint with the given name .
: param name : The constraint name
: type name : str
: rtype : bool""" | name = self . _normalize_identifier ( name )
return name in self . _fk_constraints |
def copy ( self , request , ** kwargs ) : # pylint : disable = unused - argument
'''Copy instance with deps .''' | instance = self . copy_instance ( self . get_object ( ) )
serializer = self . get_serializer ( instance , data = request . data , partial = True )
serializer . is_valid ( )
serializer . save ( )
return Response ( serializer . data , status . HTTP_201_CREATED ) . resp |
def format_signed ( feature , # type : Dict [ str , Any ]
formatter = None , # type : Callable [ . . . , str ]
** kwargs ) : # type : ( . . . ) - > str
"""Format unhashed feature with sign .
> > > format _ signed ( { ' name ' : ' foo ' , ' sign ' : 1 } )
' foo '
> > > format _ signed ( { ' name ' : ' foo ' , ' sign ' : - 1 } )
' ( - ) foo '
> > > format _ signed ( { ' name ' : ' foo ' , ' sign ' : - 1 } , lambda x : ' " { } " ' . format ( x ) )
' ( - ) " foo " '""" | txt = '' if feature [ 'sign' ] > 0 else '(-)'
name = feature [ 'name' ]
# type : str
if formatter is not None :
name = formatter ( name , ** kwargs )
return '{}{}' . format ( txt , name ) |
def list_to_workbook ( data , title = 'report' , header = None , widths = None ) :
"""Create just a openpxl workbook from a list of data""" | wb = Workbook ( )
title = re . sub ( r'\W+' , '' , title ) [ : 30 ]
if isinstance ( data , dict ) :
i = 0
for sheet_name , sheet_data in data . items ( ) :
if i > 0 :
wb . create_sheet ( )
ws = wb . worksheets [ i ]
build_sheet ( sheet_data , ws , sheet_name = sheet_name , header = header )
i += 1
else :
ws = wb . worksheets [ 0 ]
build_sheet ( data , ws , header = header , widths = widths )
return wb |
def guess_saves ( zone , data ) :
"""Return types with guessed DST saves""" | saves = { }
details = { }
for ( time0 , type0 ) , ( time1 , type1 ) in pairs ( data . times ) :
is_dst0 = bool ( data . types [ type0 ] [ 1 ] )
is_dst1 = bool ( data . types [ type1 ] [ 1 ] )
if ( is_dst0 , is_dst1 ) == ( False , True ) :
shift = data . types [ type1 ] [ 0 ] - data . types [ type0 ] [ 0 ]
if shift :
saves . setdefault ( type1 , set ( ) ) . add ( shift )
details [ type1 , shift ] = ( time0 , time1 )
elif ( is_dst0 , is_dst1 ) == ( True , False ) :
shift = data . types [ type0 ] [ 0 ] - data . types [ type1 ] [ 0 ]
if shift :
saves . setdefault ( type0 , set ( ) ) . add ( shift )
details [ type0 , shift ] = ( time0 , time1 )
types = data . types [ : ]
for i , ( offset , save , abbr ) in enumerate ( data . types ) :
if save :
guesses = saves . get ( i , set ( ) )
if not guesses :
print ( "No save value guesses for type %d (%r) in zone %s." % ( i , types [ i ] [ - 1 ] , zone ) )
guess = timedelta ( hours = 1 )
elif len ( guesses ) == 1 :
guess = guesses . pop ( )
else :
print ( "Multiple save value guesses for type %d in zone %s." % ( i , zone ) )
for g in guesses :
d = details [ i , g ]
print ( " " , g , * d )
guess = min ( g for g in guesses if g )
types [ i ] = ( offset , guess , abbr )
return types |
def _set_tvf_domain ( self , v , load = False ) :
"""Setter method for tvf _ domain , mapped from YANG variable / interface / port _ channel / tvf _ domain ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ tvf _ domain is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ tvf _ domain ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = tvf_domain . tvf_domain , is_container = 'container' , presence = False , yang_name = "tvf-domain" , rest_name = "tvf-domain" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'TVF Domain configuration' , u'cli-suppress-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-interface' , defining_module = 'brocade-interface' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """tvf_domain must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=tvf_domain.tvf_domain, is_container='container', presence=False, yang_name="tvf-domain", rest_name="tvf-domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'TVF Domain configuration', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""" , } )
self . __tvf_domain = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def location ( self , value ) :
"""( Deprecated ) Set ` Bucket . location `
This can only be set at bucket * * creation * * time .
See https : / / cloud . google . com / storage / docs / json _ api / v1 / buckets and
https : / / cloud . google . com / storage / docs / bucket - locations
. . warning : :
Assignment to ' Bucket . location ' is deprecated , as it is only
valid before the bucket is created . Instead , pass the location
to ` Bucket . create ` .""" | warnings . warn ( _LOCATION_SETTER_MESSAGE , DeprecationWarning , stacklevel = 2 )
self . _location = value |
def copy ( self ) :
"""Create a shallow copy of the sorted set .""" | return self . _fromset ( set ( self . _set ) , key = self . _key ) |
def set_axis_labels ( self , x_var = None , y_var = None ) :
"""Set axis labels on the left column and bottom row of the grid .""" | if x_var is not None :
if x_var in self . data . coords :
self . _x_var = x_var
self . set_xlabels ( label_from_attrs ( self . data [ x_var ] ) )
else : # x _ var is a string
self . set_xlabels ( x_var )
if y_var is not None :
if y_var in self . data . coords :
self . _y_var = y_var
self . set_ylabels ( label_from_attrs ( self . data [ y_var ] ) )
else :
self . set_ylabels ( y_var )
return self |
def close ( self ) :
"""Close the poll instance .""" | if self . _poll is None :
return
self . _poll . close ( )
self . _poll = None
self . _readers = 0
self . _writers = 0
self . _events = 0
clear_callbacks ( self ) |
def _integrate_odeint ( self , * args , ** kwargs ) :
"""Do not use directly ( use ` ` integrate ( . . . , integrator = ' odeint ' ) ` ` ) .
Uses ` Boost . Numeric . Odeint < http : / / www . odeint . com > ` _
( via ` pyodeint < https : / / pypi . python . org / pypi / pyodeint > ` _ ) to integrate
the ODE system .""" | import pyodeint
# Python interface to boost ' s odeint integrators
kwargs [ 'with_jacobian' ] = kwargs . get ( 'method' , 'rosenbrock4' ) in pyodeint . requires_jac
return self . _integrate ( pyodeint . integrate_adaptive , pyodeint . integrate_predefined , * args , ** kwargs ) |
def _infer_stmts ( stmts , context , frame = None ) :
"""Return an iterator on statements inferred by each statement in * stmts * .""" | inferred = False
if context is not None :
name = context . lookupname
context = context . clone ( )
else :
name = None
context = contextmod . InferenceContext ( )
for stmt in stmts :
if stmt is util . Uninferable :
yield stmt
inferred = True
continue
context . lookupname = stmt . _infer_name ( frame , name )
try :
for inferred in stmt . infer ( context = context ) :
yield inferred
inferred = True
except exceptions . NameInferenceError :
continue
except exceptions . InferenceError :
yield util . Uninferable
inferred = True
if not inferred :
raise exceptions . InferenceError ( "Inference failed for all members of {stmts!r}." , stmts = stmts , frame = frame , context = context , ) |
def enter_frame ( self , frame ) :
"""Remember all undeclared identifiers .""" | CodeGenerator . enter_frame ( self , frame )
for _ , ( action , param ) in iteritems ( frame . symbols . loads ) :
if action == 'resolve' :
self . undeclared_identifiers . add ( param ) |
def set ( self , name , value , default = False , imported = False ) :
"""Set the : class : ` Setting ` at ` ` name ` ` with a new ` ` value ` ` .
If ` ` default ` ` is ` ` True ` ` , the : attr : ` Setting . default ` is also set .""" | if name in self . __dict__ :
self . __dict__ [ name ] = value
return
if name not in self . settings and self . prefix :
prefix_name = '%s_%s' % ( self . prefix , name )
if prefix_name in self . settings :
return
# don ' t do anything
if name in self . settings :
self . settings [ name ] . set ( value , default = default , imported = imported )
elif not imported :
self . params [ name ] = value |
def run ( self , loopinfo = None , batch_size = 1 ) :
"""Run consumer""" | logger . info ( "{}.Starting..." . format ( self . __class__ . __name__ ) )
if loopinfo :
while True :
for topic in self . topics :
self . call_kafka ( topic , batch_size )
time . sleep ( loopinfo . sleep )
else :
for topic in self . topics :
self . call_kafka ( topic , batch_size ) |
def save_file ( result , filename , encoding = 'utf8' , headers = None , convertors = None , visitor = None , writer = None , ** kwargs ) :
"""save query result to a csv file
visitor can used to convert values , all value should be convert to string
visitor function should be defined as :
def visitor ( keys , values , encoding ) :
# return new values [ ]
convertors is used to convert single column value , for example :
convertors = { ' field1 ' : convert _ func1 , ' fields2 ' : convert _ func2}
def convert _ func1 ( value , data ) :
value is value of field1
data is the record
if visitor and convertors all provided , only visitor is available .
headers used to convert column to a provided value""" | import os
from uliweb . utils . common import simple_value
convertors = convertors or { }
headers = headers or [ ]
writer_class = Writer
if isinstance ( filename , ( str , unicode ) ) :
ext = os . path . splitext ( filename ) [ 1 ]
if ext == '.csv' :
writer_class = Writer
elif ext == '.dict' :
writer_class = DictWriter
elif ext == '.xlsx' :
writer_class = XlsxWriter
def convert ( k , v , data ) :
f = convertors . get ( k )
if f :
v = f ( v , data )
return v
if isinstance ( result , ( str , unicode ) ) :
result = text ( safe_unicode ( result ) )
if isinstance ( result , ( Select , TextClause ) ) :
result = do_ ( result )
_header = [ ]
for k in result . keys ( ) :
flag = False
for x in headers :
if x [ 'name' ] == k :
_header . append ( x )
flag = True
break
if not flag :
_header . append ( { 'name' : k , 'title' : k } )
def _data ( ) :
for row in result :
if visitor and callable ( visitor ) :
_row = visitor ( result . keys ( ) , row . values ( ) , encoding )
else :
_row = [ convert ( k , v , row ) for k , v in zip ( result . keys ( ) , row . values ( ) ) ]
yield _row
writer = writer_class ( filename , header = _header , data = _data ( ) , ** kwargs )
writer . save ( ) |
def ap_adc_send ( self , adc1 , adc2 , adc3 , adc4 , adc5 , adc6 , force_mavlink1 = False ) :
'''raw ADC output
adc1 : ADC output 1 ( uint16 _ t )
adc2 : ADC output 2 ( uint16 _ t )
adc3 : ADC output 3 ( uint16 _ t )
adc4 : ADC output 4 ( uint16 _ t )
adc5 : ADC output 5 ( uint16 _ t )
adc6 : ADC output 6 ( uint16 _ t )''' | return self . send ( self . ap_adc_encode ( adc1 , adc2 , adc3 , adc4 , adc5 , adc6 ) , force_mavlink1 = force_mavlink1 ) |
def disable_branching_model ( self , project , repository ) :
"""Disable branching model
: param project :
: param repository :
: return :""" | url = 'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branchmodel/configuration' . format ( project = project , repository = repository )
return self . delete ( url ) |
def incoming_references ( self , client = None , query = { } ) :
"""Fetches all entries referencing the entry
API Reference : https : / / www . contentful . com / developers / docs / references / content - delivery - api / # / reference / search - parameters / links - to - asset
: param client Client instance
: param query : ( optional ) Dict with API options .
: return : List of : class : ` Entry < contentful . entry . Entry > ` objects .
: rtype : List of contentful . entry . Entry
Usage :
> > > entries = entry . incoming _ references ( client )
[ < Entry [ cat ] id = ' happycat ' > ]""" | if client is None :
return False
query . update ( { 'links_to_entry' : self . id } )
return client . entries ( query ) |
def cysparse_type_to_real_sum_cysparse_type ( cysparse_type ) :
"""Returns the best * * real * * type for a * * real * * sum for a given type .
For instance :
INT32 _ t - > FLOAT64 _ t
Args :
cysparse _ type :""" | r_type = None
if cysparse_type in [ 'INT32_t' , 'UINT32_t' , 'INT64_t' , 'UINT64_t' ] :
r_type = 'FLOAT64_t'
elif cysparse_type in [ 'FLOAT32_t' , 'FLOAT64_t' ] :
r_type = 'FLOAT64_t'
elif cysparse_type in [ 'FLOAT128_t' ] :
r_type = 'FLOAT128_t'
elif cysparse_type in [ 'COMPLEX64_t' , 'COMPLEX128_t' ] :
r_type = 'FLOAT64_t'
elif cysparse_type in [ 'COMPLEX256_t' ] :
r_type = 'FLOAT128_t'
else :
raise TypeError ( "Not a recognized type" )
assert r_type in [ 'FLOAT64_t' , 'FLOAT128_t' ]
return r_type |
def SynchronizedRequest ( client , request , global_endpoint_manager , connection_policy , requests_session , method , path , request_data , query_params , headers ) :
"""Performs one synchronized http request according to the parameters .
: param object client :
Document client instance
: param dict request :
: param _ GlobalEndpointManager global _ endpoint _ manager :
: param documents . ConnectionPolicy connection _ policy :
: param requests . Session requests _ session :
Session object in requests module
: param str method :
: param str path :
: param ( str , unicode , file - like stream object , dict , list or None ) request _ data :
: param dict query _ params :
: param dict headers :
: return :
tuple of ( result , headers )
: rtype :
tuple of ( dict dict )""" | request_body = None
if request_data :
request_body = _RequestBodyFromData ( request_data )
if not request_body :
raise errors . UnexpectedDataType ( 'parameter data must be a JSON object, string or' + ' readable stream.' )
request_options = { }
request_options [ 'path' ] = path
request_options [ 'method' ] = method
if query_params :
request_options [ 'path' ] += '?' + urlencode ( query_params )
request_options [ 'headers' ] = headers
if request_body and ( type ( request_body ) is str or type ( request_body ) is six . text_type ) :
request_options [ 'headers' ] [ http_constants . HttpHeaders . ContentLength ] = ( len ( request_body ) )
elif request_body is None :
request_options [ 'headers' ] [ http_constants . HttpHeaders . ContentLength ] = 0
# Pass _ Request function with it ' s parameters to retry _ utility ' s Execute method that wraps the call with retries
return retry_utility . _Execute ( client , global_endpoint_manager , _Request , request , connection_policy , requests_session , path , request_options , request_body ) |
def final_objects ( self ) :
"""Return the objects associated with this workflow .""" | return [ obj for obj in self . database_objects if obj . status in [ obj . known_statuses . COMPLETED ] ] |
def _init_go_sources ( self , go_sources_arg , go2obj_arg ) :
"""Return GO sources which are present in GODag .""" | gos_user = set ( go_sources_arg )
if 'children' in self . kws and self . kws [ 'children' ] :
gos_user |= get_leaf_children ( gos_user , go2obj_arg )
gos_godag = set ( go2obj_arg )
gos_source = gos_user . intersection ( gos_godag )
gos_missing = gos_user . difference ( gos_godag )
if not gos_missing :
return gos_source
sys . stdout . write ( "{N} GO IDs NOT FOUND IN GO DAG: {GOs}\n" . format ( N = len ( gos_missing ) , GOs = " " . join ( [ str ( e ) for e in gos_missing ] ) ) )
return gos_source |
def BindScope ( self , scope_id , values ) :
"""Associates given values with given scope .
This can be called multiple times to associate multiple values .
Args :
scope _ id : A scope id to bind the values to .
values : A mapping from scope variable ids to values to bind in scope .
Raises :
KeyError : If given scope or scope variable is not specified in the
pattern .""" | if scope_id not in self . _scopes :
raise KeyError ( scope_id )
keys = set ( iterkeys ( values ) )
if keys != self . _scopes [ scope_id ] :
raise KeyError ( keys ^ self . _scopes [ scope_id ] )
self . _scope_bindings [ scope_id ] . append ( values ) |
def plot ( self ) :
"""After model execution , print the dataset , curve - fit , BMD , and BMDL .
Example
> > > import os
> > > fn = os . path . expanduser ( ' ~ / Desktop / image . png ' )
> > > fig = model . plot ( )
> > > fig . savefig ( fn )
> > > fig . clear ( )
. . figure : : . . / tests / resources / test _ exponential _ m4 _ plot . png
: scale : 80%
: align : center
: alt : Example generated BMD plot
BMD models can generate plots using the ` ` plot ( ) ` ` method ; an example
is shown here .""" | fig = self . dataset . plot ( )
ax = fig . gca ( )
ax . set_title ( "{}\n{}, {}" . format ( self . dataset . _get_dataset_name ( ) , self . name , self . get_bmr_text ( ) ) )
if self . has_successfully_executed :
self . _set_x_range ( ax )
ax . plot ( self . _xs , self . get_ys ( self . _xs ) , label = self . name , ** plotting . LINE_FORMAT )
self . _add_bmr_lines ( ax )
else :
self . _add_plot_failure ( ax )
ax . legend ( ** settings . LEGEND_OPTS )
return fig |
def favorite_remove ( self , post_id ) :
"""Remove a post from favorites ( Requires login ) .
Parameters :
post _ id ( int ) : Where post _ id is the post id .""" | return self . _get ( 'favorites/{0}.json' . format ( post_id ) , method = 'DELETE' , auth = True ) |
def describe_jobflow ( self , jobflow_id ) :
"""Describes a single Elastic MapReduce job flow
: type jobflow _ id : str
: param jobflow _ id : The job flow id of interest""" | jobflows = self . describe_jobflows ( jobflow_ids = [ jobflow_id ] )
if jobflows :
return jobflows [ 0 ] |
def track_sequence ( self , name , checkpoints , description = '' , max_rows = None ) :
"""Create a Sequence object in the Tracker .""" | if name in self . _tables :
raise TableConflictError ( name )
if max_rows is None :
max_rows = AnonymousUsageTracker . MAX_ROWS_PER_TABLE
self . register_table ( name , self . uuid , 'Sequence' , description )
self . _tables [ name ] = Sequence ( name , self , checkpoints , max_rows = max_rows ) |
def set ( self , value = True ) :
"""Set the boolean parameter""" | value = value in ( True , 1 ) or ( ( isinstance ( value , str ) or ( sys . version < '3' and isinstance ( value , unicode ) ) ) and ( value . lower ( ) in ( "1" , "yes" , "true" , "enabled" ) ) )
# pylint : disable = undefined - variable
return super ( BooleanParameter , self ) . set ( value ) |
def aiidalab_display ( obj , downloadable = True , ** kwargs ) :
"""Display AiiDA data types in Jupyter notebooks .
: param downloadable : If True , add link / button to download content of displayed AiiDA object .
Defers to IPython . display . display for any objects it does not recognize .""" | from aiidalab_widgets_base import aiida_visualizers
try :
visualizer = getattr ( aiida_visualizers , AIIDA_VISUALIZER_MAPPING [ obj . type ] )
display ( visualizer ( obj , downloadable = downloadable ) , ** kwargs )
except KeyError :
display ( obj , ** kwargs ) |
def dir_list ( load ) :
'''Return a list of all directories on the master''' | if 'env' in load : # " env " is not supported ; Use " saltenv " .
load . pop ( 'env' )
ret = [ ]
if 'saltenv' not in load :
return ret
saltenv = load [ 'saltenv' ]
metadata = _init ( )
if not metadata or saltenv not in metadata :
return ret
# grab all the dirs from the buckets cache file
for bucket in _find_dirs ( metadata [ saltenv ] ) :
for dirs in six . itervalues ( bucket ) : # trim env and trailing slash
dirs = _trim_env_off_path ( dirs , saltenv , trim_slash = True )
# remove empty string left by the base env dir in single bucket mode
ret += [ _f for _f in dirs if _f ]
return ret |
def srun_nodes ( self ) :
"""Get list of nodes where to execute the command""" | count = self . execution . get ( 'srun_nodes' , 0 )
if isinstance ( count , six . string_types ) :
tag = count
count = 0
elif isinstance ( count , SEQUENCES ) :
return count
else :
assert isinstance ( count , int )
tag = self . tag
nodes = self . _srun_nodes ( tag , count )
if 'srun_nodes' in self . execution :
self . execution [ 'srun_nodes' ] = nodes
self . execution [ 'srun_nodes_count' ] = len ( nodes )
return nodes |
def ensure_us_time_resolution ( val ) :
"""Convert val out of numpy time , for use in to _ dict .
Needed because of numpy bug GH # 7619""" | if np . issubdtype ( val . dtype , np . datetime64 ) :
val = val . astype ( 'datetime64[us]' )
elif np . issubdtype ( val . dtype , np . timedelta64 ) :
val = val . astype ( 'timedelta64[us]' )
return val |
def adapts ( ** schemas ) :
"""Create a decorator for validating and adapting function parameters .
Example : :
@ adapts ( a = " number " , body = { " + field _ ids " : [ V . AdaptTo ( int ) ] , " is _ ok " : bool } )
def f ( a , body ) :
print ( a , body . field _ ids , body . is _ ok )
: param schemas : The schema for adapting a given parameter .""" | validate = parse ( schemas ) . validate
@ decorator
def adapting ( func , * args , ** kwargs ) :
adapted = validate ( inspect . getcallargs ( func , * args , ** kwargs ) , adapt = True )
argspec = inspect . getargspec ( func )
if argspec . varargs is argspec . keywords is None : # optimization for the common no varargs , no keywords case
return func ( ** adapted )
adapted_varargs = adapted . pop ( argspec . varargs , ( ) )
adapted_keywords = adapted . pop ( argspec . keywords , { } )
if not adapted_varargs : # keywords only
if adapted_keywords :
adapted . update ( adapted_keywords )
return func ( ** adapted )
adapted_posargs = [ adapted [ arg ] for arg in argspec . args ]
adapted_posargs . extend ( adapted_varargs )
return func ( * adapted_posargs , ** adapted_keywords )
return adapting |
def nested_genobject ( self , metadata , attr , datastore ) :
"""Allow for the printing of nested GenObjects
: param metadata : Nested dictionary containing the metadata . Will be further populated by this method
: param attr : Current attribute being evaluated . Must be a GenObject e . g . sample . general
: param datastore : The dictionary of the current attribute . Will be converted to nested dictionaries
: return : Updated nested metadata dictionary with all GenObjects safely converted to dictionaries""" | # Iterate through all the key : value pairs of the current datastore [ attr ] datastore
# e . g . reverse _ reads < accessoryFunctions . accessoryFunctions . GenObject object at 0x7fe153b725f8 >
for key , value in sorted ( datastore [ attr ] . datastore . items ( ) ) : # If the type ( value ) is a GenObject , then JSON serialization will not work
if 'GenObject' in str ( type ( value ) ) : # Initialise the nested attribute : key nested dictionary within the metadata dictionary
# e . g . attr : 100_100 , key : reverse _ reads
metadata [ attr ] [ key ] = dict ( )
# Iterate through the nested keys and nested values within the value datastore
# e . g . nested _ key : length , nested _ value : 100
for nested_key , nested_datastore in sorted ( value . datastore . items ( ) ) : # Create an additional dictionary layer within the metadata dictionary
metadata [ attr ] [ key ] [ nested_key ] = dict ( )
# If the type ( nested _ datastore ) is a GenObject , recursively run this method to update the
# metadata dictionary , supply the newly created nested dictionary : metadata [ attr ] [ key ] as
# the input metadata dictionary , the nested key as the input attribute , and the datastore of
# value as the input datastore
# e . g . key : 100_100,
# datastore : < accessoryFunctions . accessoryFunctions . GenObject object at 0x7fc526001e80 >
if 'GenObject' in str ( type ( nested_datastore ) ) :
metadata [ attr ] [ key ] . update ( self . nested_genobject ( metadata [ attr ] [ key ] , nested_key , value . datastore ) )
# If the nested datastore is not a GenObject , populate the nested metadata dictionary with
# the attribute , key , nested key , and nested datastore
# e . g . attr : 100_100 , key : reverse _ reads , nested _ key : length , nested _ datastore : 100
else :
metadata [ attr ] [ key ] [ nested_key ] = nested_datastore
# Non - GenObjects can ( usually ) be added to the metadata dictionary without issues
else :
try :
if key not in self . unwanted_keys :
metadata [ attr ] [ key ] = value
except AttributeError :
print ( 'dumperror' , attr )
# Return the metadata
return metadata |
def get_stream ( self , bucket , label , as_stream = True ) :
'''Get a bitstream for the given bucket : label combination .
: param bucket : the bucket to use .
: return : bitstream as a file - like object''' | if self . mode == "w" :
raise OFSException ( "Cannot read from archive in 'w' mode" )
elif self . exists ( bucket , label ) :
fn = self . _zf ( bucket , label )
if as_stream :
return self . z . open ( fn )
else :
return self . z . read ( fn )
else :
raise OFSFileNotFound |
def plotFuncs ( functions , bottom , top , N = 1000 , legend_kwds = None ) :
'''Plots 1D function ( s ) over a given range .
Parameters
functions : [ function ] or function
A single function , or a list of functions , to be plotted .
bottom : float
The lower limit of the domain to be plotted .
top : float
The upper limit of the domain to be plotted .
N : int
Number of points in the domain to evaluate .
legend _ kwds : None , or dictionary
If not None , the keyword dictionary to pass to plt . legend
Returns
none''' | if type ( functions ) == list :
function_list = functions
else :
function_list = [ functions ]
for function in function_list :
x = np . linspace ( bottom , top , N , endpoint = True )
y = function ( x )
plt . plot ( x , y )
plt . xlim ( [ bottom , top ] )
if legend_kwds is not None :
plt . legend ( ** legend_kwds )
plt . show ( ) |
def check_deps ( deps ) :
"""check whether specific requirements are available .""" | if not isinstance ( deps , list ) :
deps = [ deps ]
checks = list ( Environment . has_apps ( deps ) )
if not all ( checks ) :
for name , available in list ( dict ( zip ( deps , checks ) ) . items ( ) ) :
if not available :
error_msg = "The required application/dependency '{0}'" " isn't available." . format ( name )
raise SystemError ( error_msg ) |
def iter_params_for_processing ( invocation_order , declaration_order ) :
"""Given a sequence of parameters in the order as should be considered
for processing and an iterable of parameters that exist , this returns
a list in the correct order as they should be processed .""" | def sort_key ( item ) :
try :
idx = invocation_order . index ( item )
except ValueError :
idx = float ( 'inf' )
return ( not item . is_eager , idx )
return sorted ( declaration_order , key = sort_key ) |
def post ( self , request ) :
"""Creates a video transcript instance with the given information .
Arguments :
request : A WSGI request .""" | attrs = ( 'video_id' , 'name' , 'language_code' , 'provider' , 'file_format' )
missing = [ attr for attr in attrs if attr not in request . data ]
if missing :
LOGGER . warn ( '[VAL] Required transcript params are missing. %s' , ' and ' . join ( missing ) )
return Response ( status = status . HTTP_400_BAD_REQUEST , data = dict ( message = u'{missing} must be specified.' . format ( missing = ' and ' . join ( missing ) ) ) )
video_id = request . data [ 'video_id' ]
language_code = request . data [ 'language_code' ]
transcript_name = request . data [ 'name' ]
provider = request . data [ 'provider' ]
file_format = request . data [ 'file_format' ]
supported_formats = sorted ( dict ( TranscriptFormat . CHOICES ) . keys ( ) )
if file_format not in supported_formats :
message = ( u'"{format}" transcript file type is not supported. Supported formats are "{supported_formats}"' ) . format ( format = file_format , supported_formats = supported_formats )
return Response ( status = status . HTTP_400_BAD_REQUEST , data = { 'message' : message } )
supported_providers = sorted ( dict ( TranscriptProviderType . CHOICES ) . keys ( ) )
if provider not in supported_providers :
message = ( u'"{provider}" provider is not supported. Supported transcription providers are "{supported_providers}"' ) . format ( provider = provider , supported_providers = supported_providers )
return Response ( status = status . HTTP_400_BAD_REQUEST , data = { 'message' : message } )
transcript = VideoTranscript . get_or_none ( video_id , language_code )
if transcript is None :
create_or_update_video_transcript ( video_id , language_code , metadata = { 'provider' : provider , 'file_name' : transcript_name , 'file_format' : file_format } )
response = Response ( status = status . HTTP_200_OK )
else :
message = ( u'Can not override existing transcript for video "{video_id}" and language code "{language}".' ) . format ( video_id = video_id , language = language_code )
response = Response ( status = status . HTTP_400_BAD_REQUEST , data = { 'message' : message } )
return response |
def _make_repr_table_from_sframe ( X ) :
"""Serializes an SFrame to a list of strings , that , when printed , creates a well - formatted table .""" | assert isinstance ( X , _SFrame )
column_names = X . column_names ( )
out_data = [ [ None ] * len ( column_names ) for i in range ( X . num_rows ( ) ) ]
column_sizes = [ len ( s ) for s in column_names ]
for i , c in enumerate ( column_names ) :
for j , e in enumerate ( X [ c ] ) :
out_data [ j ] [ i ] = str ( e )
column_sizes [ i ] = max ( column_sizes [ i ] , len ( e ) )
# now , go through and pad everything .
out_data = ( [ [ cn . ljust ( k , ' ' ) for cn , k in zip ( column_names , column_sizes ) ] , [ "-" * k for k in column_sizes ] ] + [ [ e . ljust ( k , ' ' ) for e , k in zip ( row , column_sizes ) ] for row in out_data ] )
return [ ' ' . join ( row ) for row in out_data ] |
def addBlock ( self , branch = None , btype = None , mtype = None , attributes = None ) :
'''Add block definition to list of blocks in material
Order for list entry : branch level ( 0 = root ) , block type ( material , solid , fluid , etc . ) , matid ( integer if root , False otherwise ) ,
material name ( string if root , False otherwise ) , material type , dictionary of attributes or False if none''' | if branch == 0 :
attributes = self . attributes
blk = { 'branch' : branch , 'btype' : btype , 'mtype' : mtype , 'attributes' : attributes }
self . blocks . append ( blk ) |
async def random_connection ( self ) :
"""Connect to random agent from current : attr : ` connections ` .
: returns : : class : ` aiomas . Proxy ` object for the connected agent .""" | addr = choice ( list ( self . _connections . keys ( ) ) )
return await self . env . connect ( addr ) |
def epochs_joint ( ts , variability = None , threshold = 0.0 , minlength = 1.0 , proportion = 0.75 , plot = True ) :
"""Identify epochs within a multivariate time series where at least a
certain proportion of channels are " stationary " , based on a previously
computed variability measure .
( Note : This requires an IPython cluster to be started first ,
e . g . on a workstation type ' ipcluster start ' )
Args :
ts Timeseries of m variables , shape ( n , m ) .
variability ( optional ) Timeseries of shape ( n , m ) , giving a scalar
measure of the variability of timeseries ` ts ` near each
point in time . ( if None , we will use variability _ fp ( ) )
threshold The maximum variability permitted in stationary epochs .
minlength Shortest acceptable epoch length ( in seconds )
proportion Require at least this fraction of channels to be " stationary "
plot bool Whether to display the output
Returns : ( variability , joint _ epochs )
joint _ epochs : list of tuples
A list of tuples ( start , end ) that give the starting and ending indices
of time epochs that are stationary for at least ` proportion ` of channels .
( epochs are inclusive of start point but not the end point )""" | variability , allchannels_epochs = ts . epochs_distributed ( variability , threshold , minlength , plot = False )
orig_ndim = ts . ndim
if ts . ndim is 1 :
ts = ts [ : , np . newaxis ]
allchannels_epochs = [ allchannels_epochs ]
variability = variability [ : , np . newaxis ]
channels = ts . shape [ 1 ]
dt = ( 1.0 * ts . tspan [ - 1 ] - ts . tspan [ 0 ] ) / ( len ( ts ) - 1 )
starts = [ ( e [ 0 ] , 1 ) for channel in allchannels_epochs for e in channel ]
ends = [ ( e [ 1 ] , - 1 ) for channel in allchannels_epochs for e in channel ]
all = sorted ( starts + ends )
joint_epochs = [ ]
in_joint_epoch = False
joint_start = 0.0
inside_count = 0
for bound in all :
inside_count += bound [ 1 ]
if not in_joint_epoch and 1.0 * inside_count / channels >= proportion :
in_joint_epoch = True
joint_start = bound [ 0 ]
if in_joint_epoch and 1.0 * inside_count / channels < proportion :
in_joint_epoch = False
joint_end = bound [ 0 ]
if ( joint_end - joint_start ) * dt >= minlength :
joint_epochs . append ( ( joint_start , joint_end ) )
if plot :
joint_epochs_repeated = [ joint_epochs ] * channels
_plot_variability ( ts , variability , threshold , joint_epochs_repeated )
return ( variability , joint_epochs ) |
def _negotiate_value ( response ) :
"""Extracts the gssapi authentication token from the appropriate header""" | if hasattr ( _negotiate_value , 'regex' ) :
regex = _negotiate_value . regex
else : # There ' s no need to re - compile this EVERY time it is called . Compile
# it once and you won ' t have the performance hit of the compilation .
regex = re . compile ( '(?:.*,)*\s*Negotiate\s*([^,]*),?' , re . I )
_negotiate_value . regex = regex
authreq = response . headers . get ( 'www-authenticate' , None )
if authreq :
match_obj = regex . search ( authreq )
if match_obj :
return match_obj . group ( 1 )
return None |
async def cli ( ctx , endpoint , debug , websocket , post ) :
"""Songpal CLI .""" | lvl = logging . INFO
if debug :
lvl = logging . DEBUG
click . echo ( "Setting debug level to %s" % debug )
logging . basicConfig ( level = lvl )
if ctx . invoked_subcommand == "discover" :
ctx . obj = { "debug" : debug }
return
if endpoint is None :
err ( "Endpoint is required except when with 'discover'!" )
return
protocol = None
if post and websocket :
err ( "You can force either --post or --websocket" )
return
elif websocket :
protocol = ProtocolType . WebSocket
elif post :
protocol = ProtocolType . XHRPost
logging . debug ( "Using endpoint %s" , endpoint )
x = Device ( endpoint , force_protocol = protocol , debug = debug )
try :
await x . get_supported_methods ( )
except ( requests . exceptions . ConnectionError , SongpalException ) as ex :
err ( "Unable to get supported methods: %s" % ex )
sys . exit ( - 1 )
ctx . obj = x |
def generate_configurations ( * , guided = False , fresh_start = False , save = False ) :
"""If a config file is found in the standard locations , it will be loaded and
the config data would be retuned . If not found , then generate the data on
the fly , and return it""" | if fresh_start :
purge_configs ( )
loaded_status , loaded_data = get_config ( )
if loaded_status != CONFIG_VALID :
if save :
make_config_file ( guided = guided )
status , config_data = get_config ( )
else :
config_data = make_config_data ( guided = guided )
else :
config_data = loaded_data
return config_data |
def quantile ( q , variable , weight_variable = None , filter_variable = None ) :
"""Return quantile of a variable with weight provided by a specific wieght variable potentially filtered""" | def formula ( entity , period ) :
value = entity ( variable , period )
if weight_variable is not None :
weight = entity ( weight_variable , period )
weight = entity . filled_array ( 1 )
if filter_variable is not None :
filter_value = entity ( filter_variable , period )
weight = filter_value * weight
labels = arange ( 1 , q + 1 )
quantile , _ = weightedcalcs_quantiles ( value , labels , weight , return_quantiles = True , )
if filter_variable is not None :
quantile = where ( weight > 0 , quantile , - 1 )
return quantile
return formula |
def get_archiver_index ( config , archiver ) :
"""Get the contents of the archiver index file .
: param config : The configuration
: type config : : class : ` wily . config . WilyConfig `
: param archiver : The name of the archiver type ( e . g . ' git ' )
: type archiver : ` ` str ` `
: return : The index data
: rtype : ` ` dict ` `""" | root = pathlib . Path ( config . cache_path ) / archiver
with ( root / "index.json" ) . open ( "r" ) as index_f :
index = json . load ( index_f )
return index |
def define_selector ( by , value , el_class ) :
""": param by :
: param value :
: param el _ class :
: rtype : tuple [ type , str | tuple [ str , str ] ]
: return :""" | el = el_class
selector = by
if isinstance ( value , six . string_types ) :
selector = ( by , value )
elif value is not None :
el = value
if el is None :
el = elements . PageElement
return el , selector |
def _scroll ( clicks , x = None , y = None ) :
"""Send the mouse vertical scroll event to Windows by calling the
mouse _ event ( ) win32 function .
Args :
clicks ( int ) : The amount of scrolling to do . A positive value is the mouse
wheel moving forward ( scrolling up ) , a negative value is backwards ( down ) .
x ( int ) : The x position of the mouse event .
y ( int ) : The y position of the mouse event .
Returns :
None""" | startx , starty = _position ( )
width , height = _size ( )
if x is None :
x = startx
else :
if x < 0 :
x = 0
elif x >= width :
x = width - 1
if y is None :
y = starty
else :
if y < 0 :
y = 0
elif y >= height :
y = height - 1
try :
_sendMouseEvent ( MOUSEEVENTF_WHEEL , x , y , dwData = clicks )
except ( PermissionError , OSError ) : # TODO : We need to figure out how to prevent these errors , see https : / / github . com / asweigart / pyautogui / issues / 60
pass |
def write_trailer ( self , sector , key_a = ( 0xFF , 0xFF , 0xFF , 0xFF , 0xFF , 0xFF ) , auth_bits = ( 0xFF , 0x07 , 0x80 ) , user_data = 0x69 , key_b = ( 0xFF , 0xFF , 0xFF , 0xFF , 0xFF , 0xFF ) ) :
"""Writes sector trailer of specified sector . Tag and auth must be set - does auth .
If value is None , value of byte is kept .
Returns error state .""" | addr = self . block_addr ( sector , 3 )
return self . rewrite ( addr , key_a [ : 6 ] + auth_bits [ : 3 ] + ( user_data , ) + key_b [ : 6 ] ) |
def generate_epochs ( delorean , direction , unit , count ) :
"""Generates epochs from a shifted Delorean instance
Args :
delorean : Delorean datetime instance to shift from .
direction : String to shift time forwards or backwards .
Valid values : ' last ' , ' next ' .
unit : String of time period unit for count argument .
What unit in direction should be shifted ?
Valid values : ' hour ' , ' day ' , ' week ' , ' month ' , ' year ' .
count : Int of units .
How many units to shift in direction ?
Returns :
Generator of count int epochs in UTC from a shifted Delorean""" | for shift in range ( count ) :
yield int ( delorean . _shift_date ( direction , unit , shift ) . epoch ) |
def random_deinterleave ( text , separator_symbol = "X" ) :
"""Create a fill - in - the - blanks training example from text .
Split on spaces , then cut into segments at random points . Alternate segments
are assigned to the two output strings . separator _ symbol separates segments
within each of the outputs .
example :
text = " The quick brown fox jumps over the lazy dog . "
returns : ( " X quick brown X the lazy X " , " The X fox jumps over X dog . " )
The two outputs can also be reversed to yield an instance of the same problem .
Args :
text : a string
separator _ symbol : a string
Returns :
a pair of strings""" | words = text . strip ( ) . split ( " " )
n = len ( words )
if n <= 1 :
return text , ""
cut = [ False ] * n
cut [ 0 ] = True
num_cuts = int ( math . exp ( random . uniform ( 0 , math . log ( n ) ) ) )
for _ in range ( num_cuts ) :
cut [ random . randint ( 1 , n - 1 ) ] = True
out = [ [ ] , [ ] ]
part = random . randint ( 0 , 1 )
for i in range ( n ) :
if cut [ i ] :
out [ part ] . append ( separator_symbol )
part = 1 - part
out [ part ] . append ( words [ i ] )
return " " . join ( out [ 0 ] ) , " " . join ( out [ 1 ] ) |
def recentProgress ( self ) :
"""Returns an array of the most recent [ [ StreamingQueryProgress ] ] updates for this query .
The number of progress updates retained for each stream is configured by Spark session
configuration ` spark . sql . streaming . numRecentProgressUpdates ` .""" | return [ json . loads ( p . json ( ) ) for p in self . _jsq . recentProgress ( ) ] |
def mkdir_interactive ( dirpath ) :
"""Create a directory if required .
This will query the user for a confirmation .
Args :
dirname : The path to create .""" | from benchbuild . utils . cmd import mkdir
if os . path . exists ( dirpath ) :
return
response = ui . ask ( "The directory {dirname} does not exist yet. " "Should I create it?" . format ( dirname = dirpath ) , default_answer = True , default_answer_str = "yes" )
if response :
mkdir ( "-p" , dirpath )
print ( "Created directory {0}." . format ( dirpath ) ) |
def create_from ( cls , src_path ) :
"""Returns Torrent object created from a file or a directory .
: param str src _ path :
: rtype : Torrent""" | is_dir = isdir ( src_path )
target_files , size_data = cls . _get_target_files_info ( src_path )
SIZE_MIN = 32768
# 32 KiB
SIZE_DEFAULT = 262144
# 256 KiB
SIZE_MAX = 1048576
# 1 MiB
CHUNKS_MIN = 1000
# todo use those limits as advised
CHUNKS_MAX = 2200
size_piece = SIZE_MIN
if size_data > SIZE_MIN :
size_piece = SIZE_DEFAULT
if size_piece > SIZE_MAX :
size_piece = SIZE_MAX
def read ( filepath ) :
with open ( filepath , 'rb' ) as f :
while True :
chunk = f . read ( size_piece - len ( pieces_buffer ) )
chunk_size = len ( chunk )
if chunk_size == 0 :
break
yield chunk
pieces = bytearray ( )
pieces_buffer = bytearray ( )
for fpath , _ , _ in target_files :
for chunk in read ( fpath ) :
pieces_buffer += chunk
if len ( pieces_buffer ) == size_piece :
pieces += sha1 ( pieces_buffer ) . digest ( ) [ : 20 ]
pieces_buffer = bytearray ( )
if len ( pieces_buffer ) :
pieces += sha1 ( pieces_buffer ) . digest ( ) [ : 20 ]
pieces_buffer = bytearray ( )
info = { 'name' : basename ( src_path ) , 'pieces' : bytes ( pieces ) , 'piece length' : size_piece , }
if is_dir :
files = [ ]
for _ , length , path in target_files :
files . append ( { 'length' : length , 'path' : path } )
info [ 'files' ] = files
else :
info [ 'length' ] = target_files [ 0 ] [ 1 ]
torrent = cls ( { 'info' : info } )
torrent . created_by = get_app_version ( )
torrent . creation_date = datetime . utcnow ( )
return torrent |
def reloading_meta_metaclass_factory ( BASE_TYPE = type ) :
"""hack for pyqt""" | class ReloadingMetaclass2 ( BASE_TYPE ) :
def __init__ ( metaself , name , bases , dct ) :
super ( ReloadingMetaclass2 , metaself ) . __init__ ( name , bases , dct )
# print ( ' Making rrr for % r ' % ( name , ) )
metaself . rrr = reload_class
return ReloadingMetaclass2 |
def delete_additional_charge ( self , recurring_billing_id ) :
"""Remove an extra charge from an invoice .
Args :
recurring _ billing _ id : Identifier of the additional charge .
Returns :""" | fmt = 'recurringBillItems/{}' . format ( recurring_billing_id )
return self . client . _delete ( self . url + fmt , headers = self . get_headers ( ) ) |
def set_dims ( self , dims , shape = None ) :
"""Return a new variable with given set of dimensions .
This method might be used to attach new dimension ( s ) to variable .
When possible , this operation does not copy this variable ' s data .
Parameters
dims : str or sequence of str or dict
Dimensions to include on the new variable . If a dict , values are
used to provide the sizes of new dimensions ; otherwise , new
dimensions are inserted with length 1.
Returns
Variable""" | if isinstance ( dims , str ) :
dims = [ dims ]
if shape is None and utils . is_dict_like ( dims ) :
shape = dims . values ( )
missing_dims = set ( self . dims ) - set ( dims )
if missing_dims :
raise ValueError ( 'new dimensions %r must be a superset of ' 'existing dimensions %r' % ( dims , self . dims ) )
self_dims = set ( self . dims )
expanded_dims = tuple ( d for d in dims if d not in self_dims ) + self . dims
if self . dims == expanded_dims : # don ' t use broadcast _ to unless necessary so the result remains
# writeable if possible
expanded_data = self . data
elif shape is not None :
dims_map = dict ( zip ( dims , shape ) )
tmp_shape = tuple ( dims_map [ d ] for d in expanded_dims )
expanded_data = duck_array_ops . broadcast_to ( self . data , tmp_shape )
else :
expanded_data = self . data [ ( None , ) * ( len ( expanded_dims ) - self . ndim ) ]
expanded_var = Variable ( expanded_dims , expanded_data , self . _attrs , self . _encoding , fastpath = True )
return expanded_var . transpose ( * dims ) |
def get_one_series ( self , id , ** data ) :
"""GET / series / : id /
Returns a repeating event series parent object for the specified repeating event series .
. . _ post - series - by - id :""" | return self . get ( "/series/{0}/" . format ( id ) , data = data ) |
def fit ( self , sents , ** kwargs ) :
"""Builds a vocabulary object based on the tokens in the input .
Args :
sents : A list of lists of tokens ( representing sentences )
Vocab kwargs include :
max _ size
min _ freq
specials
unk _ init""" | tokens = list ( itertools . chain . from_iterable ( sents ) )
counter = Counter ( tokens )
self . vocab = self . build_vocab ( counter , ** kwargs ) |
def iter_tar ( arch_f , gz = False , stream = False ) :
"""Iter over tar archive , yielding ( path , object - like ) tuples .
Args :
arch _ f : File object of the archive to iterate .
gz : If True , open a gzip ' ed archive .
stream : If True , open the archive in stream mode which allows for faster
processing and less temporary disk consumption , but random access to the
file is not allowed .
Yields :
( filepath , extracted _ fobj ) for each file in the archive .""" | read_type = 'r' + ( '|' if stream else ':' )
if gz :
read_type += 'gz'
with _open_or_pass ( arch_f ) as fobj :
tar = tarfile . open ( mode = read_type , fileobj = fobj )
for member in tar :
extract_file = tar . extractfile ( member )
if extract_file : # File with data ( not directory ) :
path = _normpath ( member . path )
if not path :
continue
yield [ path , extract_file ] |
def items ( self ) :
"""Get list of download items .""" | if self . matcher :
for item in self . _fetch_items ( ) :
if self . matcher . match ( item ) :
yield item
else :
for item in self . _fetch_items ( ) :
yield item |
def pop_path_info ( environ , charset = 'utf-8' , errors = 'replace' ) :
"""Removes and returns the next segment of ` PATH _ INFO ` , pushing it onto
` SCRIPT _ NAME ` . Returns ` None ` if there is nothing left on ` PATH _ INFO ` .
If the ` charset ` is set to ` None ` a bytestring is returned .
If there are empty segments ( ` ` ' / foo / / bar ` ` ) these are ignored but
properly pushed to the ` SCRIPT _ NAME ` :
> > > env = { ' SCRIPT _ NAME ' : ' / foo ' , ' PATH _ INFO ' : ' / a / b ' }
> > > pop _ path _ info ( env )
> > > env [ ' SCRIPT _ NAME ' ]
' / foo / a '
> > > pop _ path _ info ( env )
> > > env [ ' SCRIPT _ NAME ' ]
' / foo / a / b '
. . versionadded : : 0.5
. . versionchanged : : 0.9
The path is now decoded and a charset and encoding
parameter can be provided .
: param environ : the WSGI environment that is modified .""" | path = environ . get ( 'PATH_INFO' )
if not path :
return None
script_name = environ . get ( 'SCRIPT_NAME' , '' )
# shift multiple leading slashes over
old_path = path
path = path . lstrip ( '/' )
if path != old_path :
script_name += '/' * ( len ( old_path ) - len ( path ) )
if '/' not in path :
environ [ 'PATH_INFO' ] = ''
environ [ 'SCRIPT_NAME' ] = script_name + path
rv = wsgi_get_bytes ( path )
else :
segment , path = path . split ( '/' , 1 )
environ [ 'PATH_INFO' ] = '/' + path
environ [ 'SCRIPT_NAME' ] = script_name + segment
rv = wsgi_get_bytes ( segment )
return to_unicode ( rv , charset , errors , allow_none_charset = True ) |
def create_from_label_monetary_account ( cls , label_monetary_account ) :
""": type label _ monetary _ account : LabelMonetaryAccount""" | instance = cls . __new__ ( cls )
instance . label_monetary_account = label_monetary_account
instance . pointer = Pointer ( )
instance . pointer . _name = label_monetary_account . display_name
instance . pointer . _type_ = cls . _POINTER_TYPE_IBAN
instance . pointer . _value = label_monetary_account . iban
return instance |
def __update_html ( self , html ) :
"""Updates the View with given html content .
: param html : Html content .
: type html : unicode""" | if platform . system ( ) in ( "Windows" , "Microsoft" ) :
html = re . sub ( r"((?:[a-zA-Z]\:|\\\\[\w\.]+\\[\w.$]+)\\(?:[\w]+\\)*\w([\w.])+)" , lambda x : foundations . strings . to_forward_slashes ( x . group ( 1 ) ) , html )
html = foundations . strings . replace ( html , OrderedDict ( [ ( '"' , '\\"' ) , ( "\n" , "" ) ] ) )
self . __evaluate_javascript ( "$(\"#report\").html(\"{0}\");" . format ( html ) ) |
def register_type ( self , oids , name , casting ) :
"""Callback to register data types when reconnect""" | assert type ( oids ) is tuple
assert isinstance ( name , basestring )
assert hasattr ( casting , '__call__' )
self . _register_types . append ( ( oids , name , casting ) )
psycopg2 . extensions . register_type ( psycopg2 . extensions . new_type ( oids , name , casting ) ) |
def blockingCallFromGreenlet ( * args , ** kwargs ) :
"""Call function in reactor greenlet and block current greenlet waiting for the result""" | reactor = args [ 0 ]
assert reactor . greenlet != getcurrent ( ) , "can't invoke this in the reactor greenlet"
func = args [ 1 ]
result = AsyncResult ( )
def task ( ) :
try :
result . set ( func ( * args [ 2 : ] , ** kwargs ) )
except Exception , ex :
result . set_exception ( ex )
reactor . callFromGreenlet ( task )
value = result . get ( )
if isinstance ( value , defer . Deferred ) :
return waitForDeferred ( value )
else :
return value |
def get_version ( form = 'short' ) :
"""Returns the version string .
Takes single argument ` ` form ` ` , which should be one of the following
strings :
* ` ` short ` ` Returns major + minor branch version string with the format of
B . b . t .
* ` ` normal ` ` Returns human readable version string with the format of
B . b . t _ type type _ num .
* ` ` verbose ` ` Returns a verbose version string with the format of
B . b . t _ type type _ num @ git _ sha
* ` ` all ` ` Returns a dict of all versions .""" | versions = { }
branch = "%s.%s" % ( VERSION [ 0 ] , VERSION [ 1 ] )
tertiary = VERSION [ 2 ]
type_ = VERSION [ 3 ]
type_num = VERSION [ 4 ]
versions [ "branch" ] = branch
v = versions [ "branch" ]
if tertiary :
versions [ "tertiary" ] = "." + str ( tertiary )
v += versions [ "tertiary" ]
versions [ 'short' ] = v
if form is "short" :
return v
v += " " + type_ + " " + str ( type_num )
versions [ "normal" ] = v
if form is "normal" :
return v
v += " @" + git_sha ( )
versions [ "verbose" ] = v
if form is "verbose" :
return v
if form is "all" :
return versions |
def addDtdEntity ( self , name , type , ExternalID , SystemID , content ) :
"""Register a new entity for this document DTD external subset .""" | ret = libxml2mod . xmlAddDtdEntity ( self . _o , name , type , ExternalID , SystemID , content )
if ret is None :
raise treeError ( 'xmlAddDtdEntity() failed' )
__tmp = xmlEntity ( _obj = ret )
return __tmp |
def api_call ( self , service , method , data , options ) :
""": type app : metasdk . MetaApp""" | if 'self' in data : # может не быть , если вызывается напрямую из кода ,
# а не из прослоек типа DbQueryService
data . pop ( "self" )
if options :
data . update ( options )
_headers = dict ( self . __default_headers )
if self . auth_user_id :
_headers [ 'X-META-AuthUserID' ] = str ( self . auth_user_id )
request = { "url" : self . meta_url + "/api/v1/adptools/" + service + "/" + method , "data" : json . dumps ( data ) , "headers" : _headers , "timeout" : ( 60 , 1800 ) }
for _try_idx in range ( 20 ) :
try :
resp = requests . post ( ** request )
if resp . status_code == 200 :
decoded_resp = json . loads ( resp . text )
if 'data' in decoded_resp :
return decoded_resp [ 'data' ] [ method ]
if 'error' in decoded_resp :
if 'details' in decoded_resp [ 'error' ] :
eprint ( decoded_resp [ 'error' ] [ 'details' ] )
raise DbQueryError ( decoded_resp [ 'error' ] )
raise UnexpectedError ( )
else :
process_meta_api_error_code ( resp . status_code , request , resp . text )
except ( requests . exceptions . ConnectionError , ConnectionError , TimeoutError ) as e :
self . log . warning ( 'META API Connection Error. Sleep...' , { "e" : e } )
time . sleep ( 15 )
except Exception as e :
if 'Служба частично или полностью недоступна' in str ( e ) :
self . log . warning ( 'META API Connection Error. Sleep...' , { "e" : e } )
time . sleep ( 15 )
else :
raise e
raise ServerError ( request ) |
def _symlink_to_workdir ( data , key ) :
"""For CWL support , symlink files into a working directory if in read - only imports .""" | orig_file = tz . get_in ( key , data )
if orig_file and not orig_file . startswith ( dd . get_work_dir ( data ) ) :
variantcaller = genotype . get_variantcaller ( data , require_bam = False )
if not variantcaller :
variantcaller = "precalled"
out_file = os . path . join ( dd . get_work_dir ( data ) , variantcaller , os . path . basename ( orig_file ) )
utils . safe_makedir ( os . path . dirname ( out_file ) )
utils . symlink_plus ( orig_file , out_file )
data = tz . update_in ( data , key , lambda x : out_file )
return data |
def stop_tcp_server ( self ) :
"""Stops the TCP server .
: return : Method success .
: rtype : bool""" | if self . __tcp_server . online :
if self . __tcp_server . stop ( ) :
self . __engine . notifications_manager . notify ( "{0} | TCP Server has stopped!" . format ( self . __class__ . __name__ ) )
return True
else :
self . __engine . notifications_manager . warnify ( "{0} | TCP Server is not online!" . format ( self . __class__ . __name__ ) )
return False |
def set_attr ( self , ** kwargs ) :
"""Set attributes to the Booster .
Parameters
* * kwargs
The attributes to set .
Setting a value to None deletes an attribute .
Returns
self : Booster
Booster with set attributes .""" | for key , value in kwargs . items ( ) :
if value is not None :
if not isinstance ( value , string_type ) :
raise ValueError ( "Only string values are accepted" )
self . __attr [ key ] = value
else :
self . __attr . pop ( key , None )
return self |
def translate_response_to_dict ( rawdata , dirt ) :
"""Translate SMT response to a python dictionary .
SMT response example :
keyword1 : value1 \n
keyword2 : value2 \n
keywordn : valuen \n
Will return a python dictionary :
{ keyword1 : value1,
keyword2 : value2,
keywordn : valuen , }""" | data_list = rawdata . split ( "\n" )
data = { }
for ls in data_list :
for k in list ( dirt . keys ( ) ) :
if ls . __contains__ ( dirt [ k ] ) :
data [ k ] = ls [ ( ls . find ( dirt [ k ] ) + len ( dirt [ k ] ) ) : ] . strip ( )
break
if data == { } :
msg = ( "Invalid smt response data. Error: No value matched with " "keywords. Raw Data: %(raw)s; Keywords: %(kws)s" % { 'raw' : rawdata , 'kws' : str ( dirt ) } )
raise exception . SDKInternalError ( msg = msg )
return data |
def _gen_figure ( nxplot = 1 , nyplot = 1 , figargs = None , projection = None , sharex = 'none' , joinx = False , sharey = 'none' , joiny = False , x = None , nxlabel = None , xlabels = None , nxdecimal = None , xmin = None , xmax = None , y = None , nylabel = None , ylabels = None , nydecimal = None , ymin = None , ymax = None , z = None , nzlabel = None , zlabels = None , nzdecimal = None , zmin = None , zmax = None , r = None , nrlabel = None , rlabels = None , nrdecimal = None , rmin = None , rmax = None , t = None , ntlabel = None , tlabels = None , fontsize = 20 ) :
"""Returns a figure object with as much customization as provided .""" | figargs = { } if figargs is None else figargs
if projection is not None :
fig , axs = _gen_projected ( nxplot , nyplot , projection , figargs )
else :
fig , axs = _gen_shared ( nxplot , nyplot , sharex , sharey , figargs )
adj = { }
if joinx :
adj . update ( { 'hspace' : 0 } )
if joiny :
adj . update ( { 'wspace' : 0 } )
fig . subplots_adjust ( ** adj )
data = { }
if projection is None :
data = { 'x' : x , 'y' : y }
elif projection == '3d' :
data = { 'x' : x , 'y' : y , 'z' : z }
elif projection == 'polar' :
data = { 'r' : r , 't' : t }
methods = { }
for ax in axs :
if 'x' in data :
methods [ 'x' ] = ( ax . set_xlim , ax . set_xticks , ax . set_xticklabels , nxlabel , xlabels , nxdecimal , xmin , xmax )
if 'y' in data :
methods [ 'y' ] = ( ax . set_ylim , ax . set_yticks , ax . set_yticklabels , nylabel , ylabels , nydecimal , ymin , ymax )
if 'z' in data :
methods [ 'z' ] = ( ax . set_zlim , ax . set_zticks , ax . set_zticklabels , nzlabel , zlabels , nzdecimal , zmin , zmax )
if 'r' in data :
methods [ 'r' ] = ( ax . set_rlim , ax . set_rticks , ax . set_rgrids , nrlabel , rlabels , nrdecimal , rmin , rmax )
if 't' in data :
methods [ 't' ] = ( ax . set_thetagrids , ntlabel , tlabels )
for dim , arr in data . items ( ) :
if dim == 't' :
grids , nlabel , labls = methods [ dim ]
if ntlabel is not None :
theta = np . arange ( 0 , 2 * np . pi , 2 * np . pi / ntlabel )
if labls is not None :
grids ( np . degrees ( theta ) , labls , fontsize = fontsize )
else :
grids ( np . degrees ( theta ) , fontsize = fontsize )
else :
lim , ticks , labels , nlabel , labls , decs , mins , maxs = methods [ dim ]
if arr is not None :
amin = mins if mins is not None else arr . min ( )
amax = maxs if maxs is not None else arr . max ( )
lim ( ( amin , amax ) )
elif mins is not None and maxs is not None :
if nlabel is not None :
ticks ( np . linspace ( amin , amax , nlabel ) )
if decs is not None :
sub = "{{:.{}f}}" . format ( decs ) . format
labels ( [ sub ( i ) for i in np . linspace ( amin , amax , nlabel ) ] )
if labls is not None :
labels ( labls )
ax . tick_params ( axis = dim , labelsize = fontsize )
return fig |
def find_eq_stress ( strains , stresses , tol = 1e-10 ) :
"""Finds stress corresponding to zero strain state in stress - strain list
Args :
strains ( Nx3x3 array - like ) : array corresponding to strains
stresses ( Nx3x3 array - like ) : array corresponding to stresses
tol ( float ) : tolerance to find zero strain state""" | stress_array = np . array ( stresses )
strain_array = np . array ( strains )
eq_stress = stress_array [ np . all ( abs ( strain_array ) < tol , axis = ( 1 , 2 ) ) ]
if eq_stress . size != 0 :
all_same = ( abs ( eq_stress - eq_stress [ 0 ] ) < 1e-8 ) . all ( )
if len ( eq_stress ) > 1 and not all_same :
raise ValueError ( "Multiple stresses found for equilibrium strain" " state, please specify equilibrium stress or " " remove extraneous stresses." )
eq_stress = eq_stress [ 0 ]
else :
warnings . warn ( "No eq state found, returning zero voigt stress" )
eq_stress = Stress ( np . zeros ( ( 3 , 3 ) ) )
return eq_stress |
def add_uid ( fastq , cores ) :
'''Adds UID : [ samplebc cellbc umi ] to readname for umi - tools deduplication
Expects formatted fastq files with correct sample and cell barcodes .''' | uids = partial ( append_uids )
p = multiprocessing . Pool ( cores )
chunks = tz . partition_all ( 10000 , read_fastq ( fastq ) )
bigchunks = tz . partition_all ( cores , chunks )
for bigchunk in bigchunks :
for chunk in p . map ( uids , list ( bigchunk ) ) :
for read in chunk :
sys . stdout . write ( read ) |
def matrix ( m , n , lst , m_text : list = None , n_text : list = None ) :
"""m : row
n : column
lst : items
> > > print ( _ matrix ( 2 , 3 , [ ( 1 , 1 ) , ( 2 , 3 ) ] ) )""" | fmt = ""
if n_text :
fmt += " {}\n" . format ( " " . join ( n_text ) )
for i in range ( 1 , m + 1 ) :
if m_text :
fmt += "{:<4.4} " . format ( m_text [ i - 1 ] )
fmt += "|"
for j in range ( 1 , n + 1 ) :
if ( i , j ) in lst :
fmt += "x|"
else :
fmt += " |"
fmt += "\n"
return fmt |
def user ( self ) :
"""Creates a User object when requested .""" | try :
return self . _user
except AttributeError :
self . _user = MatrixUser ( self . mxid , self . Api ( identity = self . mxid ) )
return self . _user |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.