signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def tojson ( self ) -> str :
"""Serialize an Event into JSON .
Returns
str
JSON - serialized Event ."""
|
return json . dumps ( { 'event_id' : str ( self . id ) , 'event_type' : self . type , 'schema_name' : self . schema_name , 'table_name' : self . table_name , 'row_id' : self . row_id } )
|
def call_func ( self , req , * args , ** kwargs ) :
"""Add json _ error _ formatter to any webob HTTPExceptions ."""
|
try :
return super ( SdkWsgify , self ) . call_func ( req , * args , ** kwargs )
except webob . exc . HTTPException as exc :
msg = ( 'encounter %(error)s error' ) % { 'error' : exc }
LOG . debug ( msg )
exc . json_formatter = json_error_formatter
code = exc . status_int
explanation = six . text_type ( exc )
fault_data = { 'overallRC' : 400 , 'rc' : 400 , 'rs' : code , 'modID' : SDKWSGI_MODID , 'output' : '' , 'errmsg' : explanation }
exc . text = six . text_type ( json . dumps ( fault_data ) )
raise exc
|
def get_content_version ( cls , abspath ) :
"""Returns a version string for the resource at the given path .
This class method may be overridden by subclasses . The
default implementation is a hash of the file ' s contents .
. . versionadded : : 3.1"""
|
data = cls . get_content ( abspath )
hasher = hashlib . md5 ( )
mtime_data = format ( cls . get_content_modified_time ( abspath ) , "%Y-%m-%d %H:%M:%S" )
hasher . update ( mtime_data . encode ( ) )
if isinstance ( data , bytes ) :
hasher . update ( data )
else :
for chunk in data :
hasher . update ( chunk )
return hasher . hexdigest ( )
|
def _detect ( self ) :
"""Detect un - indexed ERC20 event parameters in all contracts ."""
|
results = [ ]
for c in self . contracts :
unindexed_params = self . detect_erc20_unindexed_event_params ( c )
if unindexed_params :
info = "{} ({}) does not mark important ERC20 parameters as 'indexed':\n"
info = info . format ( c . name , c . source_mapping_str )
for ( event , parameter ) in unindexed_params :
info += "\t-{} ({}) does not index parameter '{}'\n" . format ( event . name , event . source_mapping_str , parameter . name )
# Add the events to the JSON ( note : we do not add the params / vars as they have no source mapping ) .
json = self . generate_json_result ( info )
self . add_functions_to_json ( [ event for event , _ in unindexed_params ] , json )
results . append ( json )
return results
|
def render ( self , ** kwargs ) :
"""Renders the HTML representation of the element ."""
|
vegalite_major_version = self . _get_vegalite_major_versions ( self . data )
self . _parent . html . add_child ( Element ( Template ( """
<div id="{{this.get_name()}}"></div>
""" ) . render ( this = self , kwargs = kwargs ) ) , name = self . get_name ( ) )
figure = self . get_root ( )
assert isinstance ( figure , Figure ) , ( 'You cannot render this Element ' 'if it is not in a Figure.' )
figure . header . add_child ( Element ( Template ( """
<style> #{{this.get_name()}} {
position : {{this.position}};
width : {{this.width[0]}}{{this.width[1]}};
height: {{this.height[0]}}{{this.height[1]}};
left: {{this.left[0]}}{{this.left[1]}};
top: {{this.top[0]}}{{this.top[1]}};
</style>
""" ) . render ( this = self , ** kwargs ) ) , name = self . get_name ( ) )
if vegalite_major_version == '1' :
self . _embed_vegalite_v1 ( figure )
elif vegalite_major_version == '2' :
self . _embed_vegalite_v2 ( figure )
elif vegalite_major_version == '3' :
self . _embed_vegalite_v3 ( figure )
else : # Version 2 is assumed as the default , if no version is given in the schema .
self . _embed_vegalite_v2 ( figure )
|
def update_contents ( self , contents , mime_type ) :
"""Update the contents and set the hash and modification time"""
|
import hashlib
import time
new_size = len ( contents )
self . mime_type = mime_type
if mime_type == 'text/plain' :
self . contents = contents . encode ( 'utf-8' )
else :
self . contents = contents
old_hash = self . hash
self . hash = hashlib . md5 ( self . contents ) . hexdigest ( )
if self . size and ( old_hash != self . hash ) :
self . modified = int ( time . time ( ) )
self . size = new_size
|
def whoami ( self ) -> dict :
"""Returns the basic information about the authenticated character .
Obviously doesn ' t do anything if this Preston instance is not
authenticated , so it returns an empty dict .
Args :
None
Returns :
character info if authenticated , otherwise an empty dict"""
|
if not self . access_token :
return { }
self . _try_refresh_access_token ( )
return self . session . get ( self . WHOAMI_URL ) . json ( )
|
def sends ( self , tag = None , fromdate = None , todate = None ) :
"""Gets a total count of emails you ’ ve sent out ."""
|
return self . call ( "GET" , "/stats/outbound/sends" , tag = tag , fromdate = fromdate , todate = todate )
|
def reduce ( self , reduce_fn ) :
"""Applies a rolling sum operator to the stream .
Attributes :
sum _ attribute _ index ( int ) : The index of the attribute to sum
( assuming tuple records ) ."""
|
op = Operator ( _generate_uuid ( ) , OpType . Reduce , "Sum" , reduce_fn , num_instances = self . env . config . parallelism )
return self . __register ( op )
|
def copy ( self , src_url , dst_url ) :
"""Copy an S3 object to another S3 location ."""
|
src_bucket , src_key = _parse_url ( src_url )
dst_bucket , dst_key = _parse_url ( dst_url )
if not dst_bucket :
dst_bucket = src_bucket
params = { 'copy_source' : '/' . join ( ( src_bucket , src_key ) ) , 'bucket' : dst_bucket , 'key' : dst_key , }
return self . call ( "CopyObject" , ** params )
|
def splitFASTA ( params ) :
"""Read the FASTA file named params [ ' fastaFile ' ] and print out its
sequences into files named 0 . fasta , 1 . fasta , etc . with
params [ ' seqsPerJob ' ] sequences per file ."""
|
assert params [ 'fastaFile' ] [ - 1 ] == 'a' , ( 'You must specify a file in ' 'fasta-format that ends in ' '.fasta' )
fileCount = count = seqCount = 0
outfp = None
with open ( params [ 'fastaFile' ] ) as infp :
for seq in SeqIO . parse ( infp , 'fasta' ) :
seqCount += 1
if count == params [ 'seqsPerJob' ] :
outfp . close ( )
count = 0
if count == 0 :
outfp = open ( '%d.fasta' % fileCount , 'w' )
fileCount += 1
count += 1
outfp . write ( '>%s\n%s\n' % ( seq . description , str ( seq . seq ) ) )
outfp . close ( )
return fileCount , seqCount
|
def get_default_config ( self ) :
"""Returns the default collector settings"""
|
config = super ( ExampleCollector , self ) . get_default_config ( )
config . update ( { 'path' : 'example' } )
return config
|
def arrow_respond ( slider , event ) :
"""Event handler for arrow key events in plot windows .
Pass the slider object to update as a masked argument using a lambda function : :
lambda evt : arrow _ respond ( my _ slider , evt )
Parameters
slider : Slider instance associated with this handler .
event : Event to be handled ."""
|
if event . key == 'right' :
slider . set_val ( min ( slider . val + 1 , slider . valmax ) )
elif event . key == 'left' :
slider . set_val ( max ( slider . val - 1 , slider . valmin ) )
|
def translatePoints ( points , movex , movey ) :
"""Returns a generator that produces all of the ( x , y ) tuples in ` points ` moved over by ` movex ` and ` movey ` .
> > > points = [ ( 0 , 0 ) , ( 5 , 10 ) , ( 25 , 25 ) ]
> > > list ( translatePoints ( points , 1 , - 3 ) )
[ ( 1 , - 3 ) , ( 6 , 7 ) , ( 26 , 22 ) ]"""
|
# Note : There is no translatePoint ( ) function because that ' s trivial .
_checkForIntOrFloat ( movex )
_checkForIntOrFloat ( movey )
try :
for x , y in points :
_checkForIntOrFloat ( x )
_checkForIntOrFloat ( y )
yield x + movex , y + movey
except :
raise PyBresenhamException ( '`points` argument must be an iterable of (x, y) points.' )
|
def to_array ( self ) :
"""Serializes this Contact to a dictionary .
: return : dictionary representation of this object .
: rtype : dict"""
|
array = super ( Contact , self ) . to_array ( )
array [ 'phone_number' ] = u ( self . phone_number )
# py2 : type unicode , py3 : type str
array [ 'first_name' ] = u ( self . first_name )
# py2 : type unicode , py3 : type str
if self . last_name is not None :
array [ 'last_name' ] = u ( self . last_name )
# py2 : type unicode , py3 : type str
if self . user_id is not None :
array [ 'user_id' ] = int ( self . user_id )
# type int
if self . vcard is not None :
array [ 'vcard' ] = u ( self . vcard )
# py2 : type unicode , py3 : type str
return array
|
def convertColors ( element ) :
"""Recursively converts all color properties into # RRGGBB format if shorter"""
|
numBytes = 0
if element . nodeType != Node . ELEMENT_NODE :
return 0
# set up list of color attributes for each element type
attrsToConvert = [ ]
if element . nodeName in [ 'rect' , 'circle' , 'ellipse' , 'polygon' , 'line' , 'polyline' , 'path' , 'g' , 'a' ] :
attrsToConvert = [ 'fill' , 'stroke' ]
elif element . nodeName in [ 'stop' ] :
attrsToConvert = [ 'stop-color' ]
elif element . nodeName in [ 'solidColor' ] :
attrsToConvert = [ 'solid-color' ]
# now convert all the color formats
styles = _getStyle ( element )
for attr in attrsToConvert :
oldColorValue = element . getAttribute ( attr )
if oldColorValue != '' :
newColorValue = convertColor ( oldColorValue )
oldBytes = len ( oldColorValue )
newBytes = len ( newColorValue )
if oldBytes > newBytes :
element . setAttribute ( attr , newColorValue )
numBytes += ( oldBytes - len ( element . getAttribute ( attr ) ) )
# colors might also hide in styles
if attr in styles :
oldColorValue = styles [ attr ]
newColorValue = convertColor ( oldColorValue )
oldBytes = len ( oldColorValue )
newBytes = len ( newColorValue )
if oldBytes > newBytes :
styles [ attr ] = newColorValue
numBytes += ( oldBytes - len ( element . getAttribute ( attr ) ) )
_setStyle ( element , styles )
# now recurse for our child elements
for child in element . childNodes :
numBytes += convertColors ( child )
return numBytes
|
def list ( self ) :
"""Get a list of the names of the functions stored in this database ."""
|
return [ x [ "_id" ] for x in self . _db . system . js . find ( projection = [ "_id" ] ) ]
|
def getPlainText ( self , iv , key , ciphertext ) :
""": type iv : bytearray
: type key : bytearray
: type ciphertext : bytearray"""
|
try :
cipher = AESCipher ( key , iv )
plaintext = cipher . decrypt ( ciphertext )
if sys . version_info >= ( 3 , 0 ) :
return plaintext . decode ( )
return plaintext
except Exception as e :
raise InvalidMessageException ( e )
|
def to_hemi_str ( s ) :
'''to _ hemi _ str ( s ) yields either ' lh ' , ' rh ' , or ' lr ' depending on the input s .
The match rules for s are as follows :
* if s is None or Ellipsis , returns ' lr '
* if s is not a string , error ; otherwise s becomes s . lower ( )
* if s is in ( ' lh ' , ' rh ' , ' lr ' ) , returns s
* if s is in ( ' left ' , ' l ' , ' sh ' ) , returns ' lh '
* if s is in ( ' right ' , ' r ' , ' dh ' ) , returns ' rh '
* if s in in ( ' both ' , ' all ' , ' xh ' ) , returns ' lr '
* otherwise , raises an error'''
|
if s is None or s is Ellipsis :
return 'lr'
if not pimms . is_str ( s ) :
raise ValueError ( 'to_hemi_str(%s): not a string or ... or None' % s )
s = s . lower ( )
if s in ( 'lh' , 'rh' , 'lr' ) :
return s
elif s in ( 'left' , 'l' , 'sh' ) :
return 'lh'
elif s in ( 'right' , 'r' , 'dh' ) :
return 'rh'
elif s in ( 'both' , 'all' , 'xh' ) :
return 'lr'
else :
raise ValueError ( 'Could not understand to_hemi_str argument: %s' % s )
|
def compute_feed_stats ( feed : "Feed" , trip_stats : DataFrame , dates : List [ str ] ) -> DataFrame :
"""Compute some feed stats for the given dates and trip stats .
Parameters
feed : Feed
trip _ stats : DataFrame
Trip stats to consider in the format output by
: func : ` . trips . compute _ trip _ stats `
dates : string or list
A YYYYMMDD date string or list thereof indicating the date ( s )
for which to compute stats
Returns
DataFrame
The columns are
- ` ` ' date ' ` `
- ` ` ' num _ stops ' ` ` : number of stops active on the date
- ` ` ' num _ routes ' ` ` : number of routes active on the date
- ` ` ' num _ trips ' ` ` : number of trips that start on the date
- ` ` ' num _ trip _ starts ' ` ` : number of trips with nonnull start
times on the date
- ` ` ' num _ trip _ ends ' ` ` : number of trips with nonnull start times
and nonnull end times on the date , ignoring trips that end
after 23:59:59 on the date
- ` ` ' peak _ num _ trips ' ` ` : maximum number of simultaneous trips in
service on the date
- ` ` ' peak _ start _ time ' ` ` : start time of first longest period
during which the peak number of trips occurs on the date
- ` ` ' peak _ end _ time ' ` ` : end time of first longest period during
which the peak number of trips occurs on the date
- ` ` ' service _ distance ' ` ` : sum of the service distances for the
active routes on the date
- ` ` ' service _ duration ' ` ` : sum of the service durations for the
active routes on the date
- ` ` ' service _ speed ' ` ` : service _ distance / service _ duration on the
date
Dates with no trip activity will have null stats .
Exclude dates that lie outside of the Feed ' s date range .
If all the dates given lie outside of the Feed ' s date range ,
then return an empty DataFrame .
Notes
- The route and trip stats for date d contain stats for trips that
start on date d only and ignore trips that start on date d - 1 and
end on date d
- Assume the following feed attributes are not ` ` None ` ` :
* Those used in : func : ` . trips . get _ trips `
* Those used in : func : ` . routes . get _ routes `
* Those used in : func : ` . stops . get _ stops `"""
|
dates = feed . restrict_dates ( dates )
if not dates :
return pd . DataFrame ( )
ts = trip_stats . copy ( )
activity = feed . compute_trip_activity ( dates )
stop_times = feed . stop_times . copy ( )
# Convert timestrings to seconds for quicker calculations
ts [ [ "start_time" , "end_time" ] ] = ts [ [ "start_time" , "end_time" ] ] . applymap ( hp . timestr_to_seconds )
# Collect stats for each date , memoizing stats by trip ID sequence
# to avoid unnecessary recomputations .
# Store in dictionary of the form
# trip ID sequence - >
# [ stats dictionary , date list that stats apply ]
stats_and_dates_by_ids = { }
cols = [ "num_stops" , "num_routes" , "num_trips" , "num_trip_starts" , "num_trip_ends" , "peak_num_trips" , "peak_start_time" , "peak_end_time" , "service_distance" , "service_duration" , "service_speed" , ]
null_stats = { c : np . nan for c in cols }
for date in dates :
stats = { }
ids = tuple ( activity . loc [ activity [ date ] > 0 , "trip_id" ] )
if ids in stats_and_dates_by_ids : # Append date to date list
stats_and_dates_by_ids [ ids ] [ 1 ] . append ( date )
elif not ids : # Null stats
stats_and_dates_by_ids [ ids ] = [ null_stats , [ date ] ]
else : # Compute stats
f = ts [ ts [ "trip_id" ] . isin ( ids ) ] . copy ( )
stats [ "num_stops" ] = stop_times . loc [ stop_times [ "trip_id" ] . isin ( ids ) , "stop_id" ] . nunique ( )
stats [ "num_routes" ] = f [ "route_id" ] . nunique ( )
stats [ "num_trips" ] = f . shape [ 0 ]
stats [ "num_trip_starts" ] = f [ "start_time" ] . count ( )
stats [ "num_trip_ends" ] = f . loc [ f [ "end_time" ] < 24 * 3600 , "end_time" ] . count ( )
stats [ "service_distance" ] = f [ "distance" ] . sum ( )
stats [ "service_duration" ] = f [ "duration" ] . sum ( )
stats [ "service_speed" ] = ( stats [ "service_distance" ] / stats [ "service_duration" ] )
# Compute peak stats , which is the slowest part
active_trips = hp . get_active_trips_df ( f [ [ "start_time" , "end_time" ] ] )
times , counts = active_trips . index . values , active_trips . values
start , end = hp . get_peak_indices ( times , counts )
stats [ "peak_num_trips" ] = counts [ start ]
stats [ "peak_start_time" ] = times [ start ]
stats [ "peak_end_time" ] = times [ end ]
# Record stats
stats_and_dates_by_ids [ ids ] = [ stats , [ date ] ]
# Assemble stats into DataFrame
rows = [ ]
for stats , dates_ in stats_and_dates_by_ids . values ( ) :
for date in dates_ :
s = copy . copy ( stats )
s [ "date" ] = date
rows . append ( s )
f = pd . DataFrame ( rows ) . sort_values ( "date" )
# Convert seconds back to timestrings
times = [ "peak_start_time" , "peak_end_time" ]
f [ times ] = f [ times ] . applymap ( lambda t : hp . timestr_to_seconds ( t , inverse = True ) )
return f
|
def set_mode ( self , * modes , ** kwargs ) :
"""Set ( enable ) a given list of modes .
: param list modes : modes to set , where each mode is a constant
from : mod : ` pyte . modes ` ."""
|
# Private mode codes are shifted , to be distingiushed from non
# private ones .
if kwargs . get ( "private" ) :
modes = [ mode << 5 for mode in modes ]
if mo . DECSCNM in modes :
self . dirty . update ( range ( self . lines ) )
self . mode . update ( modes )
# When DECOLM mode is set , the screen is erased and the cursor
# moves to the home position .
if mo . DECCOLM in modes :
self . saved_columns = self . columns
self . resize ( columns = 132 )
self . erase_in_display ( 2 )
self . cursor_position ( )
# According to VT520 manual , DECOM should also home the cursor .
if mo . DECOM in modes :
self . cursor_position ( )
# Mark all displayed characters as reverse .
if mo . DECSCNM in modes :
for line in self . buffer . values ( ) :
line . default = self . default_char
for x in line :
line [ x ] = line [ x ] . _replace ( reverse = True )
self . select_graphic_rendition ( 7 )
# + reverse .
# Make the cursor visible .
if mo . DECTCEM in modes :
self . cursor . hidden = False
|
def default_sort_key ( item , order = None ) :
"""Return a key that can be used for sorting .
The key has the structure :
( class _ key , ( len ( args ) , args ) , exponent . sort _ key ( ) , coefficient )
This key is supplied by the sort _ key routine of Basic objects when
` ` item ` ` is a Basic object or an object ( other than a string ) that
sympifies to a Basic object . Otherwise , this function produces the
key .
The ` ` order ` ` argument is passed along to the sort _ key routine and is
used to determine how the terms * within * an expression are ordered .
( See examples below ) ` ` order ` ` options are : ' lex ' , ' grlex ' , ' grevlex ' ,
and reversed values of the same ( e . g . ' rev - lex ' ) . The default order
value is None ( which translates to ' lex ' ) .
Examples
> > > from sympy import S , I , default _ sort _ key
> > > from sympy . core . function import UndefinedFunction
> > > from sympy . abc import x
The following are equivalent ways of getting the key for an object :
> > > x . sort _ key ( ) = = default _ sort _ key ( x )
True
Here are some examples of the key that is produced :
> > > default _ sort _ key ( UndefinedFunction ( ' f ' ) )
( ( 0 , 0 , ' UndefinedFunction ' ) , ( 1 , ( ' f ' , ) ) , ( ( 1 , 0 , ' Number ' ) ,
(0 , ( ) ) , ( ) , 1 ) , 1)
> > > default _ sort _ key ( ' 1 ' )
( ( 0 , 0 , ' str ' ) , ( 1 , ( ' 1 ' , ) ) , ( ( 1 , 0 , ' Number ' ) , ( 0 , ( ) ) , ( ) , 1 ) , 1)
> > > default _ sort _ key ( S . One )
( ( 1 , 0 , ' Number ' ) , ( 0 , ( ) ) , ( ) , 1)
> > > default _ sort _ key ( 2)
( ( 1 , 0 , ' Number ' ) , ( 0 , ( ) ) , ( ) , 2)
While sort _ key is a method only defined for SymPy objects ,
default _ sort _ key will accept anything as an argument so it is
more robust as a sorting key . For the following , using key =
lambda i : i . sort _ key ( ) would fail because 2 doesn ' t have a sort _ key
method ; that ' s why default _ sort _ key is used . Note , that it also
handles sympification of non - string items likes ints :
> > > a = [ 2 , I , - I ]
> > > sorted ( a , key = default _ sort _ key )
[2 , - I , I ]
The returned key can be used anywhere that a key can be specified for
a function , e . g . sort , min , max , etc . . . :
> > > a . sort ( key = default _ sort _ key ) ; a [ 0]
> > > min ( a , key = default _ sort _ key )
Note
The key returned is useful for getting items into a canonical order
that will be the same across platforms . It is not directly useful for
sorting lists of expressions :
> > > a , b = x , 1 / x
Since ` ` a ` ` has only 1 term , its value of sort _ key is unaffected by
` ` order ` ` :
> > > a . sort _ key ( ) = = a . sort _ key ( ' rev - lex ' )
True
If ` ` a ` ` and ` ` b ` ` are combined then the key will differ because there
are terms that can be ordered :
> > > eq = a + b
> > > eq . sort _ key ( ) = = eq . sort _ key ( ' rev - lex ' )
False
> > > eq . as _ ordered _ terms ( )
[ x , 1 / x ]
> > > eq . as _ ordered _ terms ( ' rev - lex ' )
[1 / x , x ]
But since the keys for each of these terms are independent of ` ` order ` ` ' s
value , they don ' t sort differently when they appear separately in a list :
> > > sorted ( eq . args , key = default _ sort _ key )
[1 / x , x ]
> > > sorted ( eq . args , key = lambda i : default _ sort _ key ( i , order = ' rev - lex ' ) )
[1 / x , x ]
The order of terms obtained when using these keys is the order that would
be obtained if those terms were * factors * in a product .
See Also
sympy . core . expr . as _ ordered _ factors , sympy . core . expr . as _ ordered _ terms"""
|
from sympy . core import S , Basic
from sympy . core . sympify import sympify , SympifyError
from sympy . core . compatibility import iterable
if isinstance ( item , Basic ) :
return item . sort_key ( order = order )
if iterable ( item , exclude = string_types ) :
if isinstance ( item , dict ) :
args = item . items ( )
unordered = True
elif isinstance ( item , set ) :
args = item
unordered = True
else : # e . g . tuple , list
args = list ( item )
unordered = False
args = [ default_sort_key ( arg , order = order ) for arg in args ]
if unordered : # e . g . dict , set
args = sorted ( args )
cls_index , args = 10 , ( len ( args ) , tuple ( args ) )
else :
if not isinstance ( item , string_types ) :
try :
item = sympify ( item )
except SympifyError : # e . g . lambda x : x
pass
else :
if isinstance ( item , Basic ) : # e . g int - > Integer
return default_sort_key ( item )
# e . g . UndefinedFunction
# e . g . str
cls_index , args = 0 , ( 1 , ( str ( item ) , ) )
return ( cls_index , 0 , item . __class__ . __name__ ) , args , S . One . sort_key ( ) , S . One
|
def group_num ( self ) :
"""Current group number .
: getter : Returns current group number
: setter : Sets current group number
: type : int"""
|
xkb_state = XkbStateRec ( )
XkbGetState ( self . _display , XkbUseCoreKbd , byref ( xkb_state ) )
return xkb_state . group
|
def _algebraic_rules_circuit ( ) :
"""Set the default algebraic rules for the operations defined in this
module"""
|
A_CPermutation = wc ( "A" , head = CPermutation )
B_CPermutation = wc ( "B" , head = CPermutation )
C_CPermutation = wc ( "C" , head = CPermutation )
D_CPermutation = wc ( "D" , head = CPermutation )
A_Concatenation = wc ( "A" , head = Concatenation )
B_Concatenation = wc ( "B" , head = Concatenation )
A_SeriesProduct = wc ( "A" , head = SeriesProduct )
A_Circuit = wc ( "A" , head = Circuit )
B_Circuit = wc ( "B" , head = Circuit )
C_Circuit = wc ( "C" , head = Circuit )
A__Circuit = wc ( "A__" , head = Circuit )
B__Circuit = wc ( "B__" , head = Circuit )
C__Circuit = wc ( "C__" , head = Circuit )
A_SLH = wc ( "A" , head = SLH )
B_SLH = wc ( "B" , head = SLH )
j_int = wc ( "j" , head = int )
k_int = wc ( "k" , head = int )
SeriesProduct . _binary_rules . update ( check_rules_dict ( [ ( 'R001' , ( pattern_head ( A_CPermutation , B_CPermutation ) , lambda A , B : A . series_with_permutation ( B ) ) ) , ( 'R002' , ( pattern_head ( A_SLH , B_SLH ) , lambda A , B : A . series_with_slh ( B ) ) ) , ( 'R003' , ( pattern_head ( A_Circuit , B_Circuit ) , lambda A , B : _tensor_decompose_series ( A , B ) ) ) , ( 'R004' , ( pattern_head ( A_CPermutation , B_Circuit ) , lambda A , B : _factor_permutation_for_blocks ( A , B ) ) ) , ( 'R005' , ( pattern_head ( A_Circuit , pattern ( SeriesInverse , A_Circuit ) ) , lambda A : cid ( A . cdim ) ) ) , ( 'R006' , ( pattern_head ( pattern ( SeriesInverse , A_Circuit ) , A_Circuit ) , lambda A : cid ( A . cdim ) ) ) , ] ) )
Concatenation . _binary_rules . update ( check_rules_dict ( [ ( 'R001' , ( pattern_head ( A_SLH , B_SLH ) , lambda A , B : A . concatenate_slh ( B ) ) ) , ( 'R002' , ( pattern_head ( A_CPermutation , B_CPermutation ) , lambda A , B : CPermutation . create ( concatenate_permutations ( A . permutation , B . permutation ) ) ) ) , ( 'R003' , ( pattern_head ( A_CPermutation , CIdentity ) , lambda A : CPermutation . create ( concatenate_permutations ( A . permutation , ( 0 , ) ) ) ) ) , ( 'R004' , ( pattern_head ( CIdentity , B_CPermutation ) , lambda B : CPermutation . create ( concatenate_permutations ( ( 0 , ) , B . permutation ) ) ) ) , ( 'R005' , ( pattern_head ( pattern ( SeriesProduct , A__Circuit , B_CPermutation ) , pattern ( SeriesProduct , C__Circuit , D_CPermutation ) ) , lambda A , B , C , D : ( ( SeriesProduct . create ( * A ) + SeriesProduct . create ( * C ) ) << ( B + D ) ) ) ) , ( 'R006' , ( pattern_head ( pattern ( SeriesProduct , A__Circuit , B_CPermutation ) , C_Circuit ) , lambda A , B , C : ( ( SeriesProduct . create ( * A ) + C ) << ( B + cid ( C . cdim ) ) ) ) ) , ( 'R007' , ( pattern_head ( A_Circuit , pattern ( SeriesProduct , B__Circuit , C_CPermutation ) ) , lambda A , B , C : ( ( A + SeriesProduct . create ( * B ) ) << ( cid ( A . cdim ) + C ) ) ) ) , ] ) )
Feedback . _rules . update ( check_rules_dict ( [ ( 'R001' , ( pattern_head ( A_SeriesProduct , out_port = j_int , in_port = k_int ) , lambda A , j , k : _series_feedback ( A , out_port = j , in_port = k ) ) ) , ( 'R002' , ( pattern_head ( pattern ( SeriesProduct , A_CPermutation , B__Circuit ) , out_port = j_int , in_port = k_int ) , lambda A , B , j , k : _pull_out_perm_lhs ( A , B , j , k ) ) ) , ( 'R003' , ( pattern_head ( pattern ( SeriesProduct , A_Concatenation , B__Circuit ) , out_port = j_int , in_port = k_int ) , lambda A , B , j , k : _pull_out_unaffected_blocks_lhs ( A , B , j , k ) ) ) , ( 'R004' , ( pattern_head ( pattern ( SeriesProduct , A__Circuit , B_CPermutation ) , out_port = j_int , in_port = k_int ) , lambda A , B , j , k : _pull_out_perm_rhs ( A , B , j , k ) ) ) , ( 'R005' , ( pattern_head ( pattern ( SeriesProduct , A__Circuit , B_Concatenation ) , out_port = j_int , in_port = k_int ) , lambda A , B , j , k : _pull_out_unaffected_blocks_rhs ( A , B , j , k ) ) ) , ] ) )
|
def _find_global ( self , module , func ) :
"""Return the class implementing ` module _ name . class _ name ` or raise
` StreamError ` if the module is not whitelisted ."""
|
if module == __name__ :
if func == '_unpickle_call_error' or func == 'CallError' :
return _unpickle_call_error
elif func == '_unpickle_sender' :
return self . _unpickle_sender
elif func == '_unpickle_context' :
return self . _unpickle_context
elif func == 'Blob' :
return Blob
elif func == 'Secret' :
return Secret
elif func == 'Kwargs' :
return Kwargs
elif module == '_codecs' and func == 'encode' :
return self . _unpickle_bytes
elif module == '__builtin__' and func == 'bytes' :
return BytesType
raise StreamError ( 'cannot unpickle %r/%r' , module , func )
|
def enabled_actions_for_env ( env ) :
"""Returns actions to perform when processing the given environment ."""
|
def enabled ( config_value , required ) :
if config_value is Config . TriBool . No :
return False
if config_value is Config . TriBool . Yes :
return True
assert config_value is Config . TriBool . IfNeeded
return bool ( required )
# Some old Python versions do not support HTTPS downloads and therefore can
# not download installation packages from PyPI . To run setuptools or pip
# based installations on such Python versions , all the required
# installation packages need to be downloaded locally first using a
# compatible Python version ( e . g . Python 2.4.4 for Python 2.4.3 ) and then
# installed locally .
download_supported = not ( ( 2 , 4 , 3 ) <= env . sys_version_info < ( 2 , 4 , 4 ) )
local_install = config . installation_cache_folder ( ) is not None
actions = set ( )
pip_required = False
run_pip_based_installations = enabled ( config . install_environments , True )
if run_pip_based_installations :
actions . add ( "run pip based installations" )
pip_required = True
if download_supported and enabled ( config . download_installations , local_install and run_pip_based_installations ) :
actions . add ( "download pip based installations" )
pip_required = True
setuptools_required = False
run_pip_installation = enabled ( config . install_environments , pip_required )
if run_pip_installation :
actions . add ( "run pip installation" )
setuptools_required = True
if download_supported and enabled ( config . download_installations , local_install and run_pip_installation ) :
actions . add ( "download pip installation" )
setuptools_required = True
if enabled ( config . setup_setuptools , setuptools_required ) :
actions . add ( "setup setuptools" )
return actions
|
def remove_all_cts_records_by ( self , crypto_idfp ) :
"""Remove all CTS records from the specified player
: param crypto _ idfp :
: return :"""
|
regex = re . compile ( '(.+)/cts100record/crypto_idfp(\d+)' )
to_remove = [ ]
for k , v in self . filter ( regex , is_regex = True ) :
if v == crypto_idfp :
match = regex . match ( k )
to_remove . append ( ( match . group ( 1 ) , int ( match . group ( 2 ) ) ) )
for i in to_remove :
self . remove_cts_record ( * i )
|
def _build_request ( self , type , commands ) :
'''Build NX - API JSON request .'''
|
request = { }
headers = { 'content-type' : 'application/json' , }
if self . nxargs [ 'connect_over_uds' ] :
user = self . nxargs [ 'cookie' ]
headers [ 'cookie' ] = 'nxapi_auth=' + user + ':local'
request [ 'url' ] = self . NXAPI_UDS_URI_PATH
else :
request [ 'url' ] = '{transport}://{host}:{port}{uri}' . format ( transport = self . nxargs [ 'transport' ] , host = self . nxargs [ 'host' ] , port = self . nxargs [ 'port' ] , uri = self . NXAPI_REMOTE_URI_PATH , )
if isinstance ( commands , ( list , set , tuple ) ) :
commands = ' ; ' . join ( commands )
payload = { }
payload [ 'ins_api' ] = { 'version' : self . NXAPI_VERSION , 'type' : type , 'chunk' : '0' , 'sid' : '1' , 'input' : commands , 'output_format' : 'json' , }
request [ 'headers' ] = headers
request [ 'payload' ] = json . dumps ( payload )
request [ 'opts' ] = { 'http_request_timeout' : self . nxargs [ 'timeout' ] }
log . info ( 'request: %s' , request )
return request
|
def mean ( name , add , match ) :
'''Accept a numeric value from the matched events and store a running average
of the values in the given register . If the specified value is not numeric
it will be skipped
USAGE :
. . code - block : : yaml
foo :
reg . mean :
- add : data _ field
- match : my / custom / event'''
|
ret = { 'name' : name , 'changes' : { } , 'comment' : '' , 'result' : True }
if name not in __reg__ :
__reg__ [ name ] = { }
__reg__ [ name ] [ 'val' ] = 0
__reg__ [ name ] [ 'total' ] = 0
__reg__ [ name ] [ 'count' ] = 0
for event in __events__ :
try :
event_data = event [ 'data' ] [ 'data' ]
except KeyError :
event_data = event [ 'data' ]
if salt . utils . stringutils . expr_match ( event [ 'tag' ] , match ) :
if add in event_data :
try :
comp = int ( event_data )
except ValueError :
continue
__reg__ [ name ] [ 'total' ] += comp
__reg__ [ name ] [ 'count' ] += 1
__reg__ [ name ] [ 'val' ] = __reg__ [ name ] [ 'total' ] / __reg__ [ name ] [ 'count' ]
return ret
|
def get_supported_file_loaders_2 ( force = False ) :
"""Returns a list of file - based module loaders .
Each item is a tuple ( loader , suffixes ) ."""
|
if force or ( 2 , 7 ) <= sys . version_info < ( 3 , 4 ) : # valid until which py3 version ?
import imp
loaders = [ ]
for suffix , mode , type in imp . get_suffixes ( ) :
if type == imp . PY_SOURCE :
loaders . append ( ( SourceFileLoader2 , [ suffix ] ) )
else :
loaders . append ( ( ImpFileLoader2 , [ suffix ] ) )
return loaders
elif sys . version_info >= ( 3 , 4 ) : # valid from which py3 version ?
from importlib . machinery import ( SOURCE_SUFFIXES , SourceFileLoader , BYTECODE_SUFFIXES , SourcelessFileLoader , EXTENSION_SUFFIXES , ExtensionFileLoader , )
# This is already defined in importlib . _ bootstrap _ external
# but is not exposed .
extensions = ExtensionFileLoader , EXTENSION_SUFFIXES
source = SourceFileLoader , SOURCE_SUFFIXES
bytecode = SourcelessFileLoader , BYTECODE_SUFFIXES
return [ extensions , source , bytecode ]
|
def listBlockChildren ( self , block_name = "" ) :
"""list parents of a block"""
|
if ( not block_name ) or re . search ( "['%','*']" , block_name ) :
dbsExceptionHandler ( "dbsException-invalid-input" , "DBSBlock/listBlockChildren. Block_name must be provided." )
conn = self . dbi . connection ( )
try :
results = self . blockchildlist . execute ( conn , block_name )
return results
finally :
if conn :
conn . close ( )
|
def probes_used_extract_scores ( full_scores , same_probes ) :
"""Extracts a matrix of scores for a model , given a probes _ used row vector of boolean"""
|
if full_scores . shape [ 1 ] != same_probes . shape [ 0 ] :
raise "Size mismatch"
import numpy as np
model_scores = np . ndarray ( ( full_scores . shape [ 0 ] , np . sum ( same_probes ) ) , 'float64' )
c = 0
for i in range ( 0 , full_scores . shape [ 1 ] ) :
if same_probes [ i ] :
for j in range ( 0 , full_scores . shape [ 0 ] ) :
model_scores [ j , c ] = full_scores [ j , i ]
c += 1
return model_scores
|
def combined_properties ( self , suffix ) :
"""Get the value of all properties whose name ends with suffix and join them
together into a list ."""
|
props = [ y for x , y in self . settings . items ( ) if x . endswith ( suffix ) ]
properties = itertools . chain ( * props )
processed_props = [ x for x in properties ]
return processed_props
|
def pyxwriter ( self ) :
"""Update the pyx file ."""
|
model = self . Model ( )
if hasattr ( self , 'Parameters' ) :
model . parameters = self . Parameters ( vars ( self ) )
else :
model . parameters = parametertools . Parameters ( vars ( self ) )
if hasattr ( self , 'Sequences' ) :
model . sequences = self . Sequences ( model = model , ** vars ( self ) )
else :
model . sequences = sequencetools . Sequences ( model = model , ** vars ( self ) )
return PyxWriter ( self , model , self . pyxfilepath )
|
def migrate ( self , id_or_uri , timeout = - 1 ) :
"""Initiates a migration of an enclosure specified by the ID or URI of a migration report .
Args :
id _ or _ uri : ID or URI of the migration report .
timeout : Timeout in seconds . Waits for task completion by default . The timeout does not abort the task in
OneView ; just stops waiting for its completion .
Returns : dict : a migration report ."""
|
# create the special payload to tell the VC Migration Manager to migrate the VC domain
migrationInformation = { 'migrationState' : 'Migrated' , 'type' : 'migratable-vc-domains' , 'category' : 'migratable-vc-domains' }
# call build _ uri manually since . update ( . . . ) doesn ' t do it and the URI is not to be included in the body when
# requesting a migration
complete_uri = self . _client . build_uri ( id_or_uri )
return self . _client . update ( migrationInformation , uri = complete_uri , timeout = timeout )
|
def mi ( x , y , bins_x = None , bins_y = None , bins_xy = None , method = 'nearest-neighbors' , units = 'bits' ) :
'''compute and return the mutual information between x and y
inputs :
x , y : numpy arrays of shape samples x dimension
method : ' nearest - neighbors ' , ' gaussian ' , or ' bin '
units : ' bits ' or ' nats '
output :
mi : float
Notes :
if you are trying to mix several symbols together as in mi ( x , ( y0 , y1 , . . . ) ) , try
info [ p ] = _ info . mi ( x , info . combine _ symbols ( y0 , y1 , . . . ) )'''
|
# dict . values ( ) returns a view object that has to be converted to a list before being
# converted to an array
# the following lines will execute properly in python3 , but not python2 because there
# is no zip object
try :
if isinstance ( x , zip ) :
x = list ( x )
if isinstance ( y , zip ) :
y = list ( y )
except :
pass
# wrapped in try bracket because x , y might have no . shape attribute
try : # handling for 1d np arrays
if len ( x . shape ) == 1 :
x = np . expand_dims ( x , 1 )
if len ( y . shape ) == 1 :
y = np . expand_dims ( y , 1 )
except :
pass
HX = entropy ( data = x , bins = bins_x , method = method , units = units )
HY = entropy ( data = y , bins = bins_y , method = method , units = units )
HXY = entropy ( data = np . concatenate ( [ x , y ] , axis = 1 ) , bins = bins_xy , method = method , units = units )
return HX + HY - HXY
|
def exact_match ( pred , target ) :
"""Compute " Exact match " metric , also called " Subset accuracy " indicating the
number of samples that have all their labels classified correctly .
See https : / / en . wikipedia . org / wiki / Multi - label _ classification
: param pred : Predicted labels
: param target : Correct labels
: return : containing a 1 at the prediction where all labels in the prediction
match all labels in the target"""
|
res = torch . eq ( target . sort ( dim = 1 ) [ 0 ] , pred . sort ( dim = 1 ) [ 0 ] )
return res . prod ( dim = 1 )
|
def GetMessages ( self , formatter_mediator , event ) :
"""Determines the formatted message strings for an event object .
Args :
formatter _ mediator ( FormatterMediator ) : mediates the interactions between
formatters and other components , such as storage and Windows EventLog
resources .
event ( EventObject ) : event .
Returns :
tuple ( str , str ) : formatted message string and short message string .
Raises :
WrongFormatter : if the event object cannot be formatted by the formatter ."""
|
regvalue = getattr ( event , 'regvalue' , { } )
# Loop over all the registry value names in the service key .
for service_value_name in regvalue . keys ( ) : # A temporary variable so we can refer to this long name more easily .
service_enums = human_readable_service_enums . SERVICE_ENUMS
# Check if we need to can make the value more human readable .
if service_value_name in service_enums . keys ( ) :
service_enum = service_enums [ service_value_name ]
# Find the human readable version of the name and fall back to the
# raw value if it ' s not found .
human_readable_value = service_enum . get ( regvalue [ service_value_name ] , regvalue [ service_value_name ] )
regvalue [ service_value_name ] = human_readable_value
return super ( WinRegistryServiceFormatter , self ) . GetMessages ( formatter_mediator , event )
|
def trigger ( self_ , * param_names ) :
"""Trigger watchers for the given set of parameter names . Watchers
will be triggered whether or not the parameter values have
actually changed ."""
|
events = self_ . self_or_cls . param . _events
watchers = self_ . self_or_cls . param . _watchers
self_ . self_or_cls . param . _events = [ ]
self_ . self_or_cls . param . _watchers = [ ]
param_values = dict ( self_ . get_param_values ( ) )
params = { name : param_values [ name ] for name in param_names }
self_ . self_or_cls . param . _TRIGGER = True
self_ . set_param ( ** params )
self_ . self_or_cls . param . _TRIGGER = False
self_ . self_or_cls . param . _events = events
self_ . self_or_cls . param . _watchers = watchers
|
def _try_inline_read ( self ) -> None :
"""Attempt to complete the current read operation from buffered data .
If the read can be completed without blocking , schedules the
read callback on the next IOLoop iteration ; otherwise starts
listening for reads on the socket ."""
|
# See if we ' ve already got the data from a previous read
pos = self . _find_read_pos ( )
if pos is not None :
self . _read_from_buffer ( pos )
return
self . _check_closed ( )
pos = self . _read_to_buffer_loop ( )
if pos is not None :
self . _read_from_buffer ( pos )
return
# We couldn ' t satisfy the read inline , so make sure we ' re
# listening for new data unless the stream is closed .
if not self . closed ( ) :
self . _add_io_state ( ioloop . IOLoop . READ )
|
def do_put ( self , line ) :
"put [ : tablename ] [ ! fieldname : expectedvalue ] { json - body } [ { json - body } , { json - body } . . . ]"
|
table , line = self . get_table_params ( line )
expected , line = self . get_expected ( line )
if expected :
print "expected: not yet implemented"
return
if line . startswith ( '(' ) or line . startswith ( '[' ) :
print "batch: not yet implemented"
return
list = self . get_list ( line )
wlist = self . conn . new_batch_write_list ( )
wlist . add_batch ( table , [ table . new_item ( None , None , item ) for item in list ] )
response = self . conn . batch_write_item ( wlist )
consumed = response [ 'Responses' ] [ table . table_name ] [ 'ConsumedCapacityUnits' ]
if 'UnprocessedItems' in response and response [ 'UnprocessedItems' ] :
print ""
print "unprocessed: " , response [ 'UnprocessedItems' ]
print ""
else :
item = json . loads ( line )
table . put_item ( item )
consumed = None
if self . consumed and consumed :
print "consumed units:" , consumed
|
def is_metric_cls ( cls ) :
"""A metric class is defined as follows :
- It inherits DiffParserBase
- It is not DiffParserBase
- It does not have _ _ metric _ _ = False"""
|
return ( cls is not DiffParserBase and cls . __dict__ . get ( '__metric__' , True ) and issubclass ( cls , DiffParserBase ) )
|
def show_bgp_peer ( self , peer_id , ** _params ) :
"""Fetches information of a certain BGP peer ."""
|
return self . get ( self . bgp_peer_path % peer_id , params = _params )
|
def _validate_publish_parameters ( body , exchange , immediate , mandatory , properties , routing_key ) :
"""Validate Publish Parameters .
: param bytes | str | unicode body : Message payload
: param str routing _ key : Message routing key
: param str exchange : The exchange to publish the message to
: param dict properties : Message properties
: param bool mandatory : Requires the message is published
: param bool immediate : Request immediate delivery
: raises AMQPInvalidArgument : Invalid Parameters
: return :"""
|
if not compatibility . is_string ( body ) :
raise AMQPInvalidArgument ( 'body should be a string' )
elif not compatibility . is_string ( routing_key ) :
raise AMQPInvalidArgument ( 'routing_key should be a string' )
elif not compatibility . is_string ( exchange ) :
raise AMQPInvalidArgument ( 'exchange should be a string' )
elif properties is not None and not isinstance ( properties , dict ) :
raise AMQPInvalidArgument ( 'properties should be a dict or None' )
elif not isinstance ( mandatory , bool ) :
raise AMQPInvalidArgument ( 'mandatory should be a boolean' )
elif not isinstance ( immediate , bool ) :
raise AMQPInvalidArgument ( 'immediate should be a boolean' )
|
def apply_pairwise ( self , function , symmetric = True , diagonal = False , block = None , ** kwargs ) :
"""Helper function for pairwise apply .
Args :
steps : an ordered collection of steps
function : function to apply , first two positional arguments are steps
symmetric : whether function is symmetric in the two steps
diagonal : whether to apply on the diagonal
block : apply only when the given columns match
kwargs : keyword arguments to pass to the function
Returns :
DataFrame with index and columns equal to the steps argument"""
|
steps = self . index
r = pd . DataFrame ( index = steps , columns = steps )
for i , s1 in enumerate ( steps ) :
j = range ( i + 1 if symmetric else len ( steps ) )
if not diagonal :
j . remove ( i )
other = set ( steps [ j ] )
if block is not None :
df = self . reset_index ( )
df = df . merge ( df , on = block )
other &= set ( df [ df . index_x == s1 ] . index_y )
for s2 in other :
r . ix [ s1 , s2 ] = function ( s1 , s2 , ** kwargs )
return r
|
def copy_from_model ( cls , model_name , reference , ** kwargs ) :
"""Set - up a user - defined grid using specifications of a reference
grid model .
Parameters
model _ name : string
name of the user - defined grid model .
reference : string or : class : ` CTMGrid ` instance
Name of the reference model ( see : func : ` get _ supported _ models ` ) ,
or a : class : ` CTMGrid ` object from which grid set - up is copied .
* * kwargs
Any set - up parameter which will override the settings of the
reference model ( see : class : ` CTMGrid ` parameters ) .
Returns
A : class : ` CTMGrid ` object ."""
|
if isinstance ( reference , cls ) :
settings = reference . __dict__ . copy ( )
settings . pop ( 'model' )
else :
settings = _get_model_info ( reference )
settings . pop ( 'model_name' )
settings . update ( kwargs )
settings [ 'reference' ] = reference
return cls ( model_name , ** settings )
|
def wait_for_subworkflows ( self , workflow_results ) :
'''Wait for results from subworkflows'''
|
wf_ids = sum ( [ x [ 'pending_workflows' ] for x in workflow_results ] , [ ] )
for wf_id in wf_ids : # here we did not check if workflow ids match
yield self . socket
res = self . socket . recv_pyobj ( )
if res is None :
sys . exit ( 0 )
elif isinstance ( res , Exception ) :
raise res
|
def get_customjs ( self , references , plot_id = None ) :
"""Creates a CustomJS callback that will send the requested
attributes back to python ."""
|
# Generate callback JS code to get all the requested data
if plot_id is None :
plot_id = self . plot . id or 'PLACEHOLDER_PLOT_ID'
self_callback = self . js_callback . format ( comm_id = self . comm . id , timeout = self . timeout , debounce = self . debounce , plot_id = plot_id )
attributes = self . attributes_js ( self . attributes )
conditions = [ "%s" % cond for cond in self . skip ]
conditional = ''
if conditions :
conditional = 'if (%s) { return };\n' % ( ' || ' . join ( conditions ) )
data = "var data = {};\n"
code = conditional + data + attributes + self . code + self_callback
return CustomJS ( args = references , code = code )
|
def _find_ble_controllers ( self ) :
"""Get a list of the available and powered BLE controllers"""
|
controllers = self . bable . list_controllers ( )
return [ ctrl for ctrl in controllers if ctrl . powered and ctrl . low_energy ]
|
def _get_stmt_matching_groups ( stmts ) :
"""Use the matches _ key method to get sets of matching statements ."""
|
def match_func ( x ) :
return x . matches_key ( )
# Remove exact duplicates using a set ( ) call , then make copies :
logger . debug ( '%d statements before removing object duplicates.' % len ( stmts ) )
st = list ( set ( stmts ) )
logger . debug ( '%d statements after removing object duplicates.' % len ( stmts ) )
# Group statements according to whether they are matches ( differing
# only in their evidence ) .
# Sort the statements in place by matches _ key ( )
st . sort ( key = match_func )
return itertools . groupby ( st , key = match_func )
|
def create_isobaric_quant_lookup ( quantdb , specfn_consensus_els , channelmap ) :
"""Creates an sqlite lookup table of scannrs with quant data .
spectra - an iterable of tupled ( filename , spectra )
consensus _ els - a iterable with consensusElements"""
|
# store quantchannels in lookup and generate a db _ id vs channel map
channels_store = ( ( name , ) for name , c_id in sorted ( channelmap . items ( ) , key = lambda x : x [ 1 ] ) )
quantdb . store_channelmap ( channels_store )
channelmap_dbid = { channelmap [ ch_name ] : ch_id for ch_id , ch_name in quantdb . get_channelmap ( ) }
quants = [ ]
mzmlmap = quantdb . get_mzmlfile_map ( )
for specfn , consensus_el in specfn_consensus_els :
rt = openmsreader . get_consxml_rt ( consensus_el )
rt = round ( float ( Decimal ( rt ) / 60 ) , 12 )
qdata = get_quant_data ( consensus_el )
spectra_id = quantdb . get_spectra_id ( mzmlmap [ specfn ] , retention_time = rt )
for channel_no in sorted ( qdata . keys ( ) ) :
quants . append ( ( spectra_id , channelmap_dbid [ channel_no ] , qdata [ channel_no ] ) )
if len ( quants ) == DB_STORE_CHUNK :
quantdb . store_isobaric_quants ( quants )
quantdb . store_isobaric_quants ( quants )
quantdb . index_isobaric_quants ( )
|
def beautify ( string , * args , ** kwargs ) :
"""Convenient interface to the ecstasy package .
Arguments :
string ( str ) : The string to beautify with ecstasy .
args ( list ) : The positional arguments .
kwargs ( dict ) : The keyword ( ' always ' ) arguments ."""
|
parser = Parser ( args , kwargs )
return parser . beautify ( string )
|
def getAnalysesNum ( self ) :
"""Returns an array with the number of analyses for the current AR in
different statuses , like follows :
[ verified , total , not _ submitted , to _ be _ verified ]"""
|
an_nums = [ 0 , 0 , 0 , 0 ]
for analysis in self . getAnalyses ( ) :
review_state = analysis . review_state
if review_state in [ 'retracted' , 'rejected' , 'cancelled' ] :
continue
if review_state == 'to_be_verified' :
an_nums [ 3 ] += 1
elif review_state in [ 'published' , 'verified' ] :
an_nums [ 0 ] += 1
else :
an_nums [ 2 ] += 1
an_nums [ 1 ] += 1
return an_nums
|
def read_pmid_sentences ( pmid_sentences , ** drum_args ) :
"""Read sentences from a PMID - keyed dictonary and return all Statements
Parameters
pmid _ sentences : dict [ str , list [ str ] ]
A dictonary where each key is a PMID pointing to a list of sentences
to be read .
* * drum _ args
Keyword arguments passed directly to the DrumReader . Typical
things to specify are ` host ` and ` port ` . If ` run _ drum ` is specified
as True , this process will internally run the DRUM reading system
as a subprocess . Otherwise , DRUM is expected to be running
independently .
Returns
all _ statements : list [ indra . statement . Statement ]
A list of INDRA Statements resulting from the reading"""
|
def _set_pmid ( statements , pmid ) :
for stmt in statements :
for evidence in stmt . evidence :
evidence . pmid = pmid
# See if we need to start DRUM as a subprocess
run_drum = drum_args . get ( 'run_drum' , False )
drum_process = None
all_statements = { }
# Iterate over all the keys and sentences to read
for pmid , sentences in pmid_sentences . items ( ) :
logger . info ( '================================' )
logger . info ( 'Processing %d sentences for %s' % ( len ( sentences ) , pmid ) )
ts = time . time ( )
# Make a DrumReader instance
drum_args [ 'name' ] = 'DrumReader%s' % pmid
dr = DrumReader ( ** drum_args )
time . sleep ( 3 )
# If there is no DRUM process set yet , we get the one that was
# just started by the DrumReader
if run_drum and drum_process is None :
drum_args . pop ( 'run_drum' , None )
drum_process = dr . drum_system
# By setting this , we ensuer that the reference to the
# process is passed in to all future DrumReaders
drum_args [ 'drum_system' ] = drum_process
# Now read each sentence for this key
for sentence in sentences :
dr . read_text ( sentence )
# Start receiving results and exit when done
try :
dr . start ( )
except SystemExit :
pass
statements = [ ]
# Process all the extractions into INDRA Statements
for extraction in dr . extractions : # Sometimes we get nothing back
if not extraction :
continue
tp = process_xml ( extraction )
statements += tp . statements
# Set the PMIDs for the evidences of the Statements
_set_pmid ( statements , pmid )
te = time . time ( )
logger . info ( 'Reading took %d seconds and produced %d Statements.' % ( te - ts , len ( statements ) ) )
all_statements [ pmid ] = statements
# If we were running a DRUM process , we should kill it
if drum_process and dr . drum_system :
dr . _kill_drum ( )
return all_statements
|
def _update_limits_from_api ( self ) :
"""Query Lambda ' s DescribeLimits API action , and update limits
with the quotas returned . Updates ` ` self . limits ` ` ."""
|
logger . debug ( "Updating limits for Lambda from the AWS API" )
if len ( self . limits ) == 2 :
return
self . connect ( )
lims = self . conn . get_account_settings ( ) [ 'AccountLimit' ]
self . limits [ 'Total Code Size (MiB)' ] . _set_api_limit ( ( lims [ 'TotalCodeSize' ] / 1048576 ) )
self . limits [ 'Code Size Unzipped (MiB) per Function' ] . _set_api_limit ( ( lims [ 'CodeSizeUnzipped' ] / 1048576 ) )
self . limits [ 'Unreserved Concurrent Executions' ] . _set_api_limit ( lims [ 'UnreservedConcurrentExecutions' ] )
self . limits [ 'Concurrent Executions' ] . _set_api_limit ( lims [ 'ConcurrentExecutions' ] )
self . limits [ 'Code Size Zipped (MiB) per Function' ] . _set_api_limit ( ( lims [ 'CodeSizeZipped' ] / 1048576 ) )
|
def advance_recurring_todo ( p_todo , p_offset = None , p_strict = False ) :
"""Given a Todo item , return a new instance of a Todo item with the dates
shifted according to the recurrence rule .
Strict means that the real due date is taken as a offset , not today or a
future date to determine the offset .
When the todo item has no due date , then the date is used passed by the
caller ( defaulting to today ) .
When no recurrence tag is present , an exception is raised ."""
|
todo = Todo ( p_todo . source ( ) )
pattern = todo . tag_value ( 'rec' )
if not pattern :
raise NoRecurrenceException ( )
elif pattern . startswith ( '+' ) :
p_strict = True
# strip off the +
pattern = pattern [ 1 : ]
if p_strict :
offset = p_todo . due_date ( ) or p_offset or date . today ( )
else :
offset = p_offset or date . today ( )
length = todo . length ( )
new_due = relative_date_to_date ( pattern , offset )
if not new_due :
raise NoRecurrenceException ( )
# pylint : disable = E1103
todo . set_tag ( config ( ) . tag_due ( ) , new_due . isoformat ( ) )
if todo . start_date ( ) :
new_start = new_due - timedelta ( length )
todo . set_tag ( config ( ) . tag_start ( ) , new_start . isoformat ( ) )
todo . set_creation_date ( date . today ( ) )
return todo
|
def sortable_sortkey_title ( instance ) :
"""Returns a sortable title as a mxin of sortkey + lowercase sortable _ title"""
|
title = sortable_title ( instance )
if safe_callable ( title ) :
title = title ( )
sort_key = instance . getSortKey ( )
if sort_key is None :
sort_key = 999999
return "{:010.3f}{}" . format ( sort_key , title )
|
def constant ( X , n , mu , hyper_deriv = None ) :
"""Function implementing a constant mean suitable for use with : py : class : ` MeanFunction ` ."""
|
if ( n == 0 ) . all ( ) :
if hyper_deriv is not None :
return scipy . ones ( X . shape [ 0 ] )
else :
return mu * scipy . ones ( X . shape [ 0 ] )
else :
return scipy . zeros ( X . shape [ 0 ] )
|
def _fields ( self , resource ) :
"""Get projection fields for given resource ."""
|
datasource = self . get_datasource ( resource )
keys = datasource [ 2 ] . keys ( )
return ',' . join ( keys ) + ',' . join ( [ config . LAST_UPDATED , config . DATE_CREATED ] )
|
def from_pymatgen_molecule ( cls , molecule ) :
"""Create an instance of the own class from a pymatgen molecule
Args :
molecule ( : class : ` pymatgen . core . structure . Molecule ` ) :
Returns :
Cartesian :"""
|
new = cls ( atoms = [ el . value for el in molecule . species ] , coords = molecule . cart_coords )
return new . _to_numeric ( )
|
def sun_events ( latitude , longitude , date , timezone = 0 , zenith = None ) :
"""Convenience function for calculating sunrise and sunset .
Civil twilight starts / ends when the Sun ' s centre is 6 degrees below
the horizon .
Nautical twilight starts / ends when the Sun ' s centre is 12 degrees
below the horizon .
Astronomical twilight starts / ends when the Sun ' s centre is 18 degrees below
the horizon .
Args :
latitude ( float ) : Location ' s latitude
longitude ( float ) : Location ' s longitude
date ( datetime . date ) : Calculate rise or set for given date
timezone ( int ) : Offset from UTC in minutes
zenith ( str ) : Calculate rise / set events , or twilight times
Returns :
tuple of datetime . time : The time for the given events in the specified
timezone"""
|
return ( sun_rise_set ( latitude , longitude , date , 'rise' , timezone , zenith ) , sun_rise_set ( latitude , longitude , date , 'set' , timezone , zenith ) )
|
def render_payment_form ( self ) :
"""Display the DirectPayment for entering payment information ."""
|
self . context [ self . form_context_name ] = self . payment_form_cls ( )
return TemplateResponse ( self . request , self . payment_template , self . context )
|
def getInspectorActionById ( self , identifier ) :
"""Sets the inspector and draw the contents
Triggers the corresponding action so that it is checked in the menus ."""
|
for action in self . inspectorActionGroup . actions ( ) :
if action . data ( ) == identifier :
return action
raise KeyError ( "No action found with ID: {!r}" . format ( identifier ) )
|
def fetch_routing_info ( self , address ) :
"""Fetch raw routing info from a given router address .
: param address : router address
: return : list of routing records or
None if no connection could be established
: raise ServiceUnavailable : if the server does not support routing or
if routing support is broken"""
|
metadata = { }
records = [ ]
def fail ( md ) :
if md . get ( "code" ) == "Neo.ClientError.Procedure.ProcedureNotFound" :
raise RoutingProtocolError ( "Server {!r} does not support routing" . format ( address ) )
else :
raise RoutingProtocolError ( "Routing support broken on server {!r}" . format ( address ) )
try :
with self . acquire_direct ( address ) as cx :
_ , _ , server_version = ( cx . server . agent or "" ) . partition ( "/" )
# TODO 2.0 : remove old routing procedure
if server_version and Version . parse ( server_version ) >= Version ( ( 3 , 2 ) ) :
log_debug ( "[#%04X] C: <ROUTING> query=%r" , cx . local_port , self . routing_context or { } )
cx . run ( "CALL dbms.cluster.routing.getRoutingTable({context})" , { "context" : self . routing_context } , on_success = metadata . update , on_failure = fail )
else :
log_debug ( "[#%04X] C: <ROUTING> query={}" , cx . local_port )
cx . run ( "CALL dbms.cluster.routing.getServers" , { } , on_success = metadata . update , on_failure = fail )
cx . pull_all ( on_success = metadata . update , on_records = records . extend )
cx . sync ( )
routing_info = [ dict ( zip ( metadata . get ( "fields" , ( ) ) , values ) ) for values in records ]
log_debug ( "[#%04X] S: <ROUTING> info=%r" , cx . local_port , routing_info )
return routing_info
except RoutingProtocolError as error :
raise ServiceUnavailable ( * error . args )
except ServiceUnavailable :
self . deactivate ( address )
return None
|
async def load_credentials ( self , credentials ) :
"""Load existing credentials ."""
|
split = credentials . split ( ':' )
self . identifier = split [ 0 ]
self . srp . initialize ( binascii . unhexlify ( split [ 1 ] ) )
_LOGGER . debug ( 'Loaded AirPlay credentials: %s' , credentials )
|
def request ( self ) :
"""Send the request to the API .
This method will send the request to the API . It will try to handle
all the types of responses and provide the relevant data when possible .
Some basic error detection and handling is implemented , but not all failure
cases will get caught .
Return :
( dictionary ) : Response / Results data ."""
|
# self . _ request . authorization _ method ( self . _ authorization _ method )
self . _request . url = '{}/v2/{}' . format ( self . tcex . default_args . tc_api_path , self . _request_uri )
self . _apply_filters ( )
self . tcex . log . debug ( u'Resource URL: ({})' . format ( self . _request . url ) )
response = self . _request . send ( stream = self . _stream )
data , status = self . _request_process ( response )
# # bcs - to reset or not to reset ?
# self . _ request . body = None
# # self . _ request . reset _ headers ( )
# # self . _ request . reset _ payload ( )
# self . _ request _ uri = self . _ api _ uri
# self . _ request _ entity = self . _ api _ entity
return { 'data' : data , 'response' : response , 'status' : status }
|
def _unlock ( self , closing = False ) :
"""Remove lock file to the target root folder ."""
|
# write ( " _ unlock " , closing )
try :
if self . cur_dir != self . root_dir :
if closing :
write ( "Changing to ftp root folder to remove lock file: {}" . format ( self . root_dir ) )
self . cwd ( self . root_dir )
else :
write_error ( "Could not remove lock file, because CWD != ftp root: {}" . format ( self . cur_dir ) )
return
if self . lock_data is False :
if self . get_option ( "verbose" , 3 ) >= 4 :
write ( "Skip remove lock file (was not written)." )
else : # direct delete , without updating metadata or checking for target access :
try :
self . ftp . delete ( DirMetadata . LOCK_FILE_NAME )
# self . remove _ file ( DirMetadata . LOCK _ FILE _ NAME )
except Exception as e : # I have seen ' 226 Closing data connection ' responses here ,
# probably when a previous command threw another error .
# However here , 2xx response should be Ok ( ? ) :
# A 226 reply code is sent by the server before closing the
# data connection after successfully processing the previous client command
if e . args [ 0 ] [ : 3 ] == "226" :
write_error ( "Ignoring 226 response for ftp.delete() lockfile" )
else :
raise
self . lock_data = None
except Exception as e :
write_error ( "Could not remove lock file: {}" . format ( e ) )
raise
|
def processV3 ( self , sessionRecord , message ) :
""": param sessionRecord :
: param message :
: type message : PreKeyWhisperMessage
: return :"""
|
if sessionRecord . hasSessionState ( message . getMessageVersion ( ) , message . getBaseKey ( ) . serialize ( ) ) :
logger . warn ( "We've already setup a session for this V3 message, letting bundled message fall through..." )
return None
ourSignedPreKey = self . signedPreKeyStore . loadSignedPreKey ( message . getSignedPreKeyId ( ) ) . getKeyPair ( )
parameters = BobAxolotlParameters . newBuilder ( )
parameters . setTheirBaseKey ( message . getBaseKey ( ) ) . setTheirIdentityKey ( message . getIdentityKey ( ) ) . setOurIdentityKey ( self . identityKeyStore . getIdentityKeyPair ( ) ) . setOurSignedPreKey ( ourSignedPreKey ) . setOurRatchetKey ( ourSignedPreKey )
if message . getPreKeyId ( ) is not None :
parameters . setOurOneTimePreKey ( self . preKeyStore . loadPreKey ( message . getPreKeyId ( ) ) . getKeyPair ( ) )
else :
parameters . setOurOneTimePreKey ( None )
if not sessionRecord . isFresh ( ) :
sessionRecord . archiveCurrentState ( )
RatchetingSession . initializeSessionAsBob ( sessionRecord . getSessionState ( ) , message . getMessageVersion ( ) , parameters . create ( ) )
sessionRecord . getSessionState ( ) . setLocalRegistrationId ( self . identityKeyStore . getLocalRegistrationId ( ) )
sessionRecord . getSessionState ( ) . setRemoteRegistrationId ( message . getRegistrationId ( ) )
sessionRecord . getSessionState ( ) . setAliceBaseKey ( message . getBaseKey ( ) . serialize ( ) )
if message . getPreKeyId ( ) is not None and message . getPreKeyId ( ) != Medium . MAX_VALUE :
return message . getPreKeyId ( )
else :
return None
|
def learn ( self , grad_arr , fix_opt_flag = False ) :
'''Update this Discriminator by ascending its stochastic gradient .
Args :
grad _ arr : ` np . ndarray ` of gradients .
fix _ opt _ flag : If ` False ` , no optimization in this model will be done .
Returns :
` np . ndarray ` of delta or gradients .'''
|
channel = grad_arr . shape [ 1 ] // 2
grad_arr = self . __deconvolution_model . learn ( grad_arr [ : , : channel ] , fix_opt_flag = fix_opt_flag )
delta_arr = self . __cnn . back_propagation ( grad_arr )
if fix_opt_flag is False :
self . __cnn . optimize ( self . __learning_rate , 1 )
return delta_arr
|
def from_pubkey_line ( cls , line ) :
"""Generate Key instance from a a string . Raise ValueError if string is
malformed"""
|
options , key_without_options = cls . _extract_options ( line )
if key_without_options == '' :
raise ValueError ( "Empty key" )
# the key ( with options stripped out ) should consist of the fields
# " type " , " data " , and optionally " comment " , separated by a space .
# The comment field may contain additional spaces
fields = key_without_options . strip ( ) . split ( None , 2 )
# maxsplit = 2
if len ( fields ) == 3 :
type_str , data64 , comment = fields
elif len ( fields ) == 2 :
type_str , data64 = fields
comment = None
else : # len ( fields ) < = 1
raise ValueError ( "Key has insufficient number of fields" )
try :
data = b64decode ( data64 )
except ( binascii . Error , TypeError ) :
raise ValueError ( "Key contains invalid data" )
key_type = next ( iter_prefixed ( data ) )
if key_type == b'ssh-rsa' :
key_class = RSAKey
elif key_type == b'ssh-dss' :
key_class = DSAKey
elif key_type . startswith ( b'ecdsa-' ) :
key_class = ECDSAKey
else :
raise ValueError ( 'Unknown key type {}' . format ( key_type ) )
return key_class ( b64decode ( data64 ) , comment , options = options )
|
def run ( self , args ) :
"""Give the user with user _ full _ name the auth _ role permissions on the remote project with project _ name .
: param args Namespace arguments parsed from the command line"""
|
email = args . email
# email of person to give permissions , will be None if username is specified
username = args . username
# username of person to give permissions , will be None if email is specified
auth_role = args . auth_role
# type of permission ( project _ admin )
project = self . fetch_project ( args , must_exist = True , include_children = False )
user = self . remote_store . lookup_or_register_user_by_email_or_username ( email , username )
self . remote_store . set_user_project_permission ( project , user , auth_role )
print ( u'Gave user {} {} permissions for project {}.' . format ( user . full_name , auth_role , project . name ) )
|
def permit_gitrepo ( config_fpath , writeback = False ) :
"""Changes https : / / in . git / config files to git @ and makes
appropriate changes to colons and slashses"""
|
# Define search replace patterns
username_regex = utool . named_field ( 'username' , utool . REGEX_VARNAME )
username_repl = utool . backref_field ( 'username' )
regexpat = r'https://github.com/' + username_regex + '/'
replpat = r'git@github.com:' + username_repl + '/'
# Read and replace
lines = utool . read_from ( config_fpath , aslines = True )
newlines = utool . regex_replace_lines ( lines , regexpat , replpat )
# Writeback or print
if not WRITEBACK :
print ( '' . join ( newlines ) )
else :
utool . write_to ( config_fpath , newlines , aslines = True )
|
def total_tax ( self ) :
"""Returns the sum of all Tax objects ."""
|
q = Tax . objects . filter ( receipt = self ) . aggregate ( total = Sum ( 'amount' ) )
return q [ 'total' ] or 0
|
def instance ( ) :
"""Return an PyVabamorf instance .
It returns the previously initialized instance or creates a new
one if nothing exists . Also creates new instance in case the
process has been forked ."""
|
if not hasattr ( Vabamorf , 'pid' ) or Vabamorf . pid != os . getpid ( ) :
Vabamorf . pid = os . getpid ( )
Vabamorf . morf = Vabamorf ( )
return Vabamorf . morf
|
def update_model ( self , tfi ) :
"""Update the model for the given tfi
: param tfi : taskfile info
: type tfi : : class : ` TaskFileInfo `
: returns : None
: rtype : None
: raises : None"""
|
if tfi . task . department . assetflag :
browser = self . assetbrws
else :
browser = self . shotbrws
if tfi . version == 1 : # add descriptor
parent = browser . selected_indexes ( 2 ) [ 0 ]
ddata = treemodel . ListItemData ( [ tfi . descriptor ] )
ditem = treemodel . TreeItem ( ddata )
browser . model . insertRow ( 0 , ditem , parent )
self . set_level ( browser , 3 , [ tfi . descriptor ] )
|
def _load_from ( self , line ) :
'''load the From section of the recipe for the Dockerfile .'''
|
# Remove any comments
line = line . split ( '#' , 1 ) [ 0 ]
line = re . sub ( '(F|f)(R|r)(O|o)(M|m):' , '' , line ) . strip ( )
bot . info ( 'FROM %s' % line )
self . config [ 'from' ] = line
|
def add_environment ( self , environ ) :
"""Updates the environment dictionary with the given one .
Existing entries are overridden by the given ones
: param environ : New environment variables"""
|
if isinstance ( environ , dict ) :
self . _environment . update ( environ )
|
def match_completion ( self , start , end , match ) :
"""Sets the completion for this query to match completion percentages between the given range inclusive .
arg : start ( decimal ) : start of range
arg : end ( decimal ) : end of range
arg : match ( boolean ) : ` ` true ` ` for a positive match ,
` ` false ` ` for a negative match
raise : InvalidArgument - ` ` end ` ` is less than ` ` start ` `
* compliance : mandatory - - This method must be implemented . *"""
|
try :
start = float ( start )
except ValueError :
raise errors . InvalidArgument ( 'Invalid start value' )
try :
end = float ( end )
except ValueError :
raise errors . InvalidArgument ( 'Invalid end value' )
if match :
if end < start :
raise errors . InvalidArgument ( 'end value must be >= start value when match = True' )
self . _query_terms [ 'completion' ] = { '$gte' : start , '$lte' : end }
else :
raise errors . InvalidArgument ( 'match = False not currently supported' )
|
def profile_get ( name , remote_addr = None , cert = None , key = None , verify_cert = True , _raw = False ) :
'''Gets a profile from the LXD
name :
The name of the profile to get .
remote _ addr :
An URL to a remote Server , you also have to give cert and key if
you provide remote _ addr and its a TCP Address !
Examples :
https : / / myserver . lan : 8443
/ var / lib / mysocket . sock
cert :
PEM Formatted SSL Certificate .
Examples :
~ / . config / lxc / client . crt
key :
PEM Formatted SSL Key .
Examples :
~ / . config / lxc / client . key
verify _ cert : True
Wherever to verify the cert , this is by default True
but in the most cases you want to set it off as LXD
normaly uses self - signed certificates .
_ raw :
Return the pylxd object , this is internal and by states in use .
CLI Examples :
. . code - block : : bash
$ salt ' * ' lxd . profile _ get autostart'''
|
client = pylxd_client_get ( remote_addr , cert , key , verify_cert )
profile = None
try :
profile = client . profiles . get ( name )
except pylxd . exceptions . LXDAPIException :
raise SaltInvocationError ( 'Profile \'{0}\' not found' . format ( name ) )
if _raw :
return profile
return _pylxd_model_to_dict ( profile )
|
def customize_base_cfg ( cfgname , cfgopt_strs , base_cfg , cfgtype , alias_keys = None , valid_keys = None , offset = 0 , strict = True ) :
"""Args :
cfgname ( str ) : config name
cfgopt _ strs ( str ) : mini - language defining key variations
base _ cfg ( dict ) : specifies the default cfg to customize
cfgtype ( ? ) :
alias _ keys ( None ) : ( default = None )
valid _ keys ( None ) : if base _ cfg is not specied , this defines the valid
keys ( default = None )
offset ( int ) : ( default = 0)
strict ( bool ) : ( default = True )
Returns :
list : cfg _ combo - list of config dicts defining customized configs
based on cfgopt _ strs . customized configs always are given an
_ cfgindex , _ cfgstr , and _ cfgname key .
CommandLine :
python - m utool . util _ gridsearch - - test - customize _ base _ cfg : 0
Ignore :
> > > cfgname = ' default '
> > > cfgopt _ strs = ' dsize = 1000 , per _ name = [ 1,2 ] '
Example :
> > > # ENABLE _ DOCTEST
> > > from utool . util _ gridsearch import * # NOQA
> > > import utool as ut
> > > cfgname = ' name '
> > > cfgopt _ strs = ' b = [ 1,2 ] '
> > > base _ cfg = { }
> > > alias _ keys = None
> > > cfgtype = None
> > > offset = 0
> > > valid _ keys = None
> > > strict = False
> > > cfg _ combo = customize _ base _ cfg ( cfgname , cfgopt _ strs , base _ cfg , cfgtype ,
> > > alias _ keys , valid _ keys , offset , strict )
> > > result = ( ' cfg _ combo = % s ' % ( ut . repr2 ( cfg _ combo , nl = 1 ) , ) )
> > > print ( result )
cfg _ combo = [
{ ' _ cfgindex ' : 0 , ' _ cfgname ' : ' name ' , ' _ cfgstr ' : ' name : b = [ 1,2 ] ' , ' _ cfgtype ' : None , ' b ' : 1 } ,
{ ' _ cfgindex ' : 1 , ' _ cfgname ' : ' name ' , ' _ cfgstr ' : ' name : b = [ 1,2 ] ' , ' _ cfgtype ' : None , ' b ' : 2 } ,"""
|
import utool as ut
cfg = base_cfg . copy ( )
# Parse config options without expansion
cfg_options = noexpand_parse_cfgstrs ( cfgopt_strs , alias_keys )
# Ensure that nothing bad is being updated
if strict :
parsed_keys = cfg_options . keys ( )
if valid_keys is not None :
ut . assert_all_in ( parsed_keys , valid_keys , 'keys specified not in valid set' )
else :
ut . assert_all_in ( parsed_keys , cfg . keys ( ) , 'keys specified not in default options' )
# Finalize configuration dict
cfg . update ( cfg_options )
cfg [ '_cfgtype' ] = cfgtype
cfg [ '_cfgname' ] = cfgname
# Perform expansion
cfg_combo = ut . all_dict_combinations ( cfg )
# if len ( cfg _ combo ) > 1:
for combox , cfg_ in enumerate ( cfg_combo , start = offset ) :
cfg_ [ '_cfgindex' ] = combox
for cfg_ in cfg_combo :
if len ( cfgopt_strs ) > 0 :
cfg_ [ '_cfgstr' ] = cfg_ [ '_cfgname' ] + NAMEVARSEP + cfgopt_strs
else :
cfg_ [ '_cfgstr' ] = cfg_ [ '_cfgname' ]
return cfg_combo
|
def write_channel_list_file ( channels , fobj ) :
"""Write a ` ~ gwpy . detector . ChannelList ` to a INI - format channel list file"""
|
if not isinstance ( fobj , FILE_LIKE ) :
with open ( fobj , "w" ) as fobj :
return write_channel_list_file ( channels , fobj )
out = configparser . ConfigParser ( dict_type = OrderedDict )
for channel in channels :
group = channel . group
if not out . has_section ( group ) :
out . add_section ( group )
for param , value in channel . params . items ( ) :
out . set ( group , param , value )
if channel . sample_rate is not None :
entry = '%s %s' % ( str ( channel ) , str ( channel . sample_rate . to ( 'Hz' ) . value ) )
else :
entry = str ( channel )
entry += ' %s' % channel . params . get ( 'safe' , 'safe' )
entry += ' %s' % channel . params . get ( 'fidelity' , 'clean' )
try :
clist = out . get ( group , 'channels' )
except configparser . NoOptionError :
out . set ( group , 'channels' , '\n%s' % entry )
else :
out . set ( group , 'channels' , clist + '\n%s' % entry )
out . write ( fobj )
|
def add_options ( cls , parser ) :
"""Add options for command line and config file ."""
|
parser . add_option ( '--putty-select' , metavar = 'errors' , default = '' , help = 'putty select list' , )
parser . add_option ( '--putty-ignore' , metavar = 'errors' , default = '' , help = 'putty ignore list' , )
parser . add_option ( '--putty-no-auto-ignore' , action = 'store_false' , dest = 'putty_auto_ignore' , default = False , help = ( ' (default) do not auto ignore lines matching ' '# flake8: disable=<code>,<code>' ) , )
parser . add_option ( '--putty-auto-ignore' , action = 'store_true' , dest = 'putty_auto_ignore' , default = False , help = ( 'auto ignore lines matching ' '# flake8: disable=<code>,<code>' ) , )
parser . config_options . append ( 'putty-select' )
parser . config_options . append ( 'putty-ignore' )
parser . config_options . append ( 'putty-auto-ignore' )
|
def command_children ( self , command ) :
"""Run a command on the direct children of the currently selected
container .
: rtype : List of CommandReply ? ? ? ?"""
|
if not len ( self . nodes ) :
return
commands = [ ]
for c in self . nodes :
commands . append ( '[con_id="{}"] {};' . format ( c . id , command ) )
self . _conn . command ( ' ' . join ( commands ) )
|
def int_flux_threshold ( self , skydir , fn , ts_thresh , min_counts ) :
"""Compute the integral flux threshold for a point source at
position ` ` skydir ` ` with spectral parameterization ` ` fn ` ` ."""
|
ebins = 10 ** np . linspace ( np . log10 ( self . ebins [ 0 ] ) , np . log10 ( self . ebins [ - 1 ] ) , 33 )
ectr = np . sqrt ( ebins [ 0 ] * ebins [ - 1 ] )
sig , bkg , bkg_fit = self . compute_counts ( skydir , fn , ebins )
norms = irfs . compute_norm ( sig , bkg , ts_thresh , min_counts , sum_axes = [ 1 , 2 , 3 ] , bkg_fit = bkg_fit , rebin_axes = [ 4 , 10 , 1 ] )
npred = np . squeeze ( np . apply_over_axes ( np . sum , norms * sig , [ 1 , 2 , 3 ] ) )
npred = np . array ( npred , ndmin = 1 )
flux = np . squeeze ( norms ) * fn . flux ( ebins [ 0 ] , ebins [ - 1 ] )
eflux = np . squeeze ( norms ) * fn . eflux ( ebins [ 0 ] , ebins [ - 1 ] )
dnde = np . squeeze ( norms ) * fn . dnde ( ectr )
e2dnde = ectr ** 2 * dnde
o = dict ( e_min = self . ebins [ 0 ] , e_max = self . ebins [ - 1 ] , e_ref = ectr , npred = npred , flux = flux , eflux = eflux , dnde = dnde , e2dnde = e2dnde )
sig , bkg , bkg_fit = self . compute_counts ( skydir , fn )
npred = np . squeeze ( np . apply_over_axes ( np . sum , norms * sig , [ 2 , 3 ] ) )
flux = np . squeeze ( np . squeeze ( norms , axis = ( 1 , 2 , 3 ) ) [ : , None ] * fn . flux ( self . ebins [ : - 1 ] , self . ebins [ 1 : ] ) )
eflux = np . squeeze ( np . squeeze ( norms , axis = ( 1 , 2 , 3 ) ) [ : , None ] * fn . eflux ( self . ebins [ : - 1 ] , self . ebins [ 1 : ] ) )
dnde = np . squeeze ( np . squeeze ( norms , axis = ( 1 , 2 , 3 ) ) [ : , None ] * fn . dnde ( self . ectr ) )
e2dnde = ectr ** 2 * dnde
o [ 'bins' ] = dict ( npred = npred , flux = flux , eflux = eflux , dnde = dnde , e2dnde = e2dnde , e_min = self . ebins [ : - 1 ] , e_max = self . ebins [ 1 : ] , e_ref = self . ectr )
return o
|
def ensure_datetime ( obj ) :
"""Return the object if it is a datetime - like object
Parameters
obj : Object to be tested .
Returns
The original object if it is a datetime - like object
Raises
TypeError if ` obj ` is not datetime - like"""
|
_VALID_TYPES = ( str , datetime . datetime , cftime . datetime , np . datetime64 )
if isinstance ( obj , _VALID_TYPES ) :
return obj
raise TypeError ( "datetime-like object required. " "Type given: {}" . format ( type ( obj ) ) )
|
def _moving_average ( data , wind_size = 3 ) :
"""Brief
Application of a moving average filter for signal smoothing .
Description
In certain situations it will be interesting to simplify a signal , particularly in cases where
some events with a random nature take place ( the random nature of EMG activation periods is
a good example ) .
One possible simplification procedure consists in smoothing the signal in order to obtain
only an " envelope " . With this methodology the analysis is mainly centered on seeing patterns
in data and excluding noise or rapid events [ 1 ] .
The simplification can be achieved by segmenting the time series in multiple windows and
from each window an average value of all the samples that it contains will be determined
( dividing the sum of all sample values by the window size ) .
A quick and efficient implementation ( chosen in biosignalsnotebooks package ) of the moving window
methodology is through a cumulative sum array .
[1 ] https : / / en . wikipedia . org / wiki / Smoothing
Parameters
data : list
List of signal samples .
wind _ size : int
Number of samples inside the moving average window ( a bigger value implies a smoother
output signal ) .
Returns
out : numpy array
Array that contains the samples of the smoothed signal ."""
|
wind_size = int ( wind_size )
ret = numpy . cumsum ( data , dtype = float )
ret [ wind_size : ] = ret [ wind_size : ] - ret [ : - wind_size ]
return numpy . concatenate ( ( numpy . zeros ( wind_size - 1 ) , ret [ wind_size - 1 : ] / wind_size ) )
|
def index_exists ( self ) :
"""Check to see if index exists ."""
|
headers = { 'Content-Type' : 'application/json' , 'DB-Method' : 'GET' }
url = '/v2/exchange/db/{}/{}/_search' . format ( self . domain , self . data_type )
r = self . tcex . session . post ( url , headers = headers )
if not r . ok :
self . tcex . log . warning ( 'The provided index was not found ({}).' . format ( r . text ) )
return False
return True
|
def copies ( mapping , s2bins , rna , min_rna = 800 , mismatches = 0 ) :
"""1 . determine bin coverage
2 . determine rRNA gene coverage
3 . compare"""
|
cov = { }
# cov [ scaffold ] = [ bases , length ]
s2bins , bins2s = parse_s2bins ( s2bins )
rna_cov = parse_rna ( rna , s2bins , min_rna )
s2bins , bins2s = filter_missing_rna ( s2bins , bins2s , rna_cov )
# count bases mapped to scaffolds and rRNA gene regions
for line in mapping :
line = line . strip ( ) . split ( )
# get scaffold lengths
if line [ 0 ] . startswith ( '@' ) :
if line [ 0 ] . startswith ( '@SQ' ) is False :
continue
s = line [ 1 ] . split ( ':' ) [ 1 ]
l = int ( line [ 2 ] . split ( ':' ) [ 1 ] )
# check if scaffold is binned
if s not in s2bins :
continue
if s not in cov :
cov [ s ] = [ 0 , l ]
# check mismatch threshold
mm = count_mismatches ( line )
if mm is False or mm > mismatches :
continue
# check that scaffold is in bin
s , bases = line [ 2 ] , len ( line [ 9 ] )
if s not in cov :
continue
cov [ s ] [ 0 ] += bases
rna_cov = rna_bases ( rna_cov , s , bases , line )
print ( '# mismatches threshold: %s' % ( mismatches ) )
header = [ '#rRNA scaffold' , 'rRNA genes >=%sbp on scaffold' % ( min_rna ) , 'rRNA coverage' , 'bin' , 'bin info' , 'bin coverage' , 'rRNAs >=%sbp in bin' % ( min_rna ) , 'rRNA coverage/bin coverage' , 'estimated number of copies' ]
print ( '\t' . join ( header ) )
for bin , scaffolds in list ( bins2s . items ( ) ) :
rna_count = sum ( [ len ( rna_cov [ s ] [ 2 ] ) for s in scaffolds if s in rna_cov ] )
for s in scaffolds :
if s not in rna_cov :
continue
out = [ ]
counts = rna_cov [ s ]
bin_cov = calc_bin_cov ( bins2s [ bin ] , cov )
num_genes = len ( counts [ 2 ] )
rna_coverage = float ( float ( counts [ 0 ] ) / float ( counts [ 1 ] ) )
if bin_cov == 0 :
rna_div_bin = 0
else :
rna_div_bin = float ( rna_coverage / bin_cov )
est = int ( max ( [ rna_count , counts , rna_div_bin ] ) )
out = [ s , num_genes , rna_coverage , bin , bin_cov , rna_count , rna_div_bin , est ]
print ( '\t' . join ( [ str ( i ) for i in out ] ) )
|
def _mdens_deriv ( self , m ) :
"""Derivative of the density as a function of m"""
|
return - self . _mdens ( m ) * ( self . a * self . alpha + self . beta * m ) / m / ( self . a + m )
|
def get_user_mentions ( tweet ) :
"""Get the @ - mentions in the Tweet as dictionaries .
Note that in the case of a quote - tweet , this does not return the users
mentioned in the quoted status . The recommended way to get that list would
be to use get _ user _ mentions on the quoted status .
Also note that in the caes of a quote - tweet , the list of @ - mentioned users
does not include the user who authored the original ( quoted ) Tweet .
Args :
tweet ( Tweet or dict ) : A Tweet object or dictionary
Returns :
list ( list of dicts ) : 1 item per @ mention . Note that the fields here
aren ' t enforced by the parser , they are simply the fields as they
appear in a Tweet data payload .
Example :
> > > from tweet _ parser . getter _ methods . tweet _ entities import get _ user _ mentions
> > > original = { " created _ at " : " Wed May 24 20:17:19 + 0000 2017 " ,
. . . " text " : " RT @ notFromShrek : Stuff ! Words ! . . . " ,
. . . " entities " : { " user _ mentions " : [ {
. . . " indices " : [ 2,12 ] , # characters where the @ mention appears
. . . " id _ str " : " 2382763597 " , # id of @ mentioned user as a string
. . . " screen _ name " : " notFromShrek " , # screen _ name of @ d user
. . . " name " : " Fiona " , # display name of @ mentioned user
. . . " id " : 2382763597 # id of @ mentioned user as an int
. . . " retweeted _ status " : {
. . . " created _ at " : " Wed May 24 20:01:19 + 0000 2017 " ,
. . . " text " : " Stuff ! Words ! # Tweeting ! " ,
. . . " entities " : { " user _ mentions " : [ ] }
> > > get _ user _ mentions ( original )
[ { ' indices ' : [ 2 , 12 ] , ' id _ str ' : ' 2382763597 ' , ' screen _ name ' : ' notFromShrek ' , ' name ' : ' Fiona ' , ' id ' : 2382763597 } ]"""
|
entities = get_entities ( tweet )
user_mentions = entities . get ( "user_mentions" ) if entities else None
return user_mentions if user_mentions else [ ]
|
def read_prologs ( filename ) :
"""Given a filename , search for SST prologues
and returns a dict where the keys are the name of the
prolog ( from the " Name " field ) and the keys are another
dict with keys correspding fo the SST labels in lowercase .
prologs = read _ prologs ( filename )
Common SST labels are : name , purpose , lanugage , invocation ,
arguments , description , authors , notes etc ."""
|
results = { }
prolog = { }
heading_re = re . compile ( r"^\* ([A-Z].*):$" )
heading = ""
content = ""
counter = 0
for line in open ( filename ) :
line = line . strip ( )
# Start of a completely new prolog so reset everything
if line . startswith ( "*+" ) :
if counter != 0 :
raise ValueError ( "Started prologue without closing previous prologue" )
prolog = { }
heading = ""
content = ""
counter = counter + 1
continue
# End of a prolog . Must store the current dict
if line . startswith ( "*-" ) :
counter = 0
if len ( heading ) : # Flush current heading
prolog [ heading ] = content
content = ""
name = prolog [ 'name' ] . strip ( )
results [ name ] = prolog
prolog = None
continue
# If we are not in a prologue then nothing further is needed
if counter == 0 :
continue
counter = counter + 1
# Completely blank lines are ignored
if len ( line ) == 0 :
continue
# Look for a new section heading
match_head = heading_re . search ( line )
if match_head is not None :
if len ( heading ) : # Flush previous heading
prolog [ heading ] = content
heading = match_head . group ( 1 ) . lower ( )
content = ""
continue
if line . startswith ( "* " ) :
content = content + line [ 6 : ] + "\n"
continue
elif line == "*" :
content = content + "\n"
continue
if counter :
raise ValueError ( "Error parsing SST prologue line " + str ( counter ) + ":'" + line + "'" )
return results
|
def upload_path ( instance , filename ) :
'''Sanitize the user - provided file name , add timestamp for uniqness .'''
|
filename = filename . replace ( " " , "_" )
filename = unicodedata . normalize ( 'NFKD' , filename ) . lower ( )
return os . path . join ( str ( timezone . now ( ) . date ( ) . isoformat ( ) ) , filename )
|
def resource_response ( resource , depth = 0 ) :
"""Return a response for the * resource * of the appropriate content type .
: param resource : resource to be returned in request
: type resource : : class : ` sandman . model . Model `
: rtype : : class : ` flask . Response `"""
|
if _get_acceptable_response_type ( ) == JSON :
depth = 0
if 'expand' in request . args :
depth = 1
return _single_resource_json_response ( resource , depth )
else :
return _single_resource_html_response ( resource )
|
def address ( self , num ) :
"""Search for company addresses by company number .
Args :
num ( str ) : Company number to search on ."""
|
url_root = "company/{}/registered-office-address"
baseuri = self . _BASE_URI + url_root . format ( num )
res = self . session . get ( baseuri )
self . handle_http_error ( res )
return res
|
def populate_event_que ( self , que_obj ) :
"""Populates the event queue object .
This is for sending router events to event handler ."""
|
for ip in self . obj_dict :
drvr_obj = self . obj_dict . get ( ip ) . get ( 'drvr_obj' )
drvr_obj . populate_event_que ( que_obj )
|
def start ( name , quiet = False , path = None ) :
'''Start the named container .
path
path to the container parent
default : / var / lib / lxc ( system default )
. . versionadded : : 2015.8.0
. . code - block : : bash
salt - run lxc . start name'''
|
data = _do_names ( name , 'start' , path = path )
if data and not quiet :
__jid_event__ . fire_event ( { 'data' : data , 'outputter' : 'lxc_start' } , 'progress' )
return data
|
def logical_chassis_fwdl_sanity_input_host ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
logical_chassis_fwdl_sanity = ET . Element ( "logical_chassis_fwdl_sanity" )
config = logical_chassis_fwdl_sanity
input = ET . SubElement ( logical_chassis_fwdl_sanity , "input" )
host = ET . SubElement ( input , "host" )
host . text = kwargs . pop ( 'host' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def scaffold ( args ) :
"""% prog scaffold scaffold . fasta synteny . blast synteny . sizes synteny . bed
physicalmap . blast physicalmap . sizes physicalmap . bed
As evaluation of scaffolding , visualize external line of evidences :
* Plot synteny to an external genome
* Plot alignments to physical map
* Plot alignments to genetic map ( TODO )
Each trio defines one panel to be plotted . blastfile defines the matchings
between the evidences vs scaffolds . Then the evidence sizes , and evidence
bed to plot dot plots .
This script will plot a dot in the dot plot in the corresponding location
the plots are one contig / scaffold per plot ."""
|
from jcvi . utils . iter import grouper
p = OptionParser ( scaffold . __doc__ )
p . add_option ( "--cutoff" , type = "int" , default = 1000000 , help = "Plot scaffolds with size larger than [default: %default]" )
p . add_option ( "--highlights" , help = "A set of regions in BED format to highlight [default: %default]" )
opts , args , iopts = p . set_image_options ( args , figsize = "14x8" , dpi = 150 )
if len ( args ) < 4 or len ( args ) % 3 != 1 :
sys . exit ( not p . print_help ( ) )
highlights = opts . highlights
scafsizes = Sizes ( args [ 0 ] )
trios = list ( grouper ( args [ 1 : ] , 3 ) )
trios = [ ( a , Sizes ( b ) , Bed ( c ) ) for a , b , c in trios ]
if highlights :
hlbed = Bed ( highlights )
for scaffoldID , scafsize in scafsizes . iter_sizes ( ) :
if scafsize < opts . cutoff :
continue
logging . debug ( "Loading {0} (size={1})" . format ( scaffoldID , thousands ( scafsize ) ) )
tmpname = scaffoldID + ".sizes"
tmp = open ( tmpname , "w" )
tmp . write ( "{0}\t{1}" . format ( scaffoldID , scafsize ) )
tmp . close ( )
tmpsizes = Sizes ( tmpname )
tmpsizes . close ( clean = True )
if highlights :
subhighlights = list ( hlbed . sub_bed ( scaffoldID ) )
imagename = "." . join ( ( scaffoldID , opts . format ) )
plot_one_scaffold ( scaffoldID , tmpsizes , None , trios , imagename , iopts , highlights = subhighlights )
|
def _read_para_transaction_pacing ( self , code , cbit , clen , * , desc , length , version ) :
"""Read HIP TRANSACTION _ PACING parameter .
Structure of HIP TRANSACTION _ PACING parameter [ RFC 5770 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| Type | Length |
| Min Ta |
Octets Bits Name Description
0 0 transaction _ pacing . type Parameter Type
1 15 transaction _ pacing . critical Critical Bit
2 16 transaction _ pacing . length Length of Contents
4 32 transaction _ pacing . min _ ta Min Ta"""
|
if clen != 4 :
raise ProtocolError ( f'HIPv{version}: [Parano {code}] invalid format' )
_data = self . _read_unpack ( 4 )
transaction_pacing = dict ( type = desc , critical = cbit , length = clen , min_ta = _data , )
return transaction_pacing
|
def debugfile ( filename , args = None , wdir = None , post_mortem = False ) :
"""Debug filename
args : command line arguments ( string )
wdir : working directory
post _ mortem : boolean , included for compatiblity with runfile"""
|
debugger = pdb . Pdb ( )
filename = debugger . canonic ( filename )
debugger . _wait_for_mainpyfile = 1
debugger . mainpyfile = filename
debugger . _user_requested_quit = 0
if os . name == 'nt' :
filename = filename . replace ( '\\' , '/' )
debugger . run ( "runfile(%r, args=%r, wdir=%r)" % ( filename , args , wdir ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.