signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def main ( sleep_length = 0.1 ) :
"""Log to stdout using python logging in a while loop"""
|
log = logging . getLogger ( 'sip.examples.log_spammer' )
log . info ( 'Starting to spam log messages every %fs' , sleep_length )
counter = 0
try :
while True :
log . info ( 'Hello %06i (log_spammer: %s, sip logging: %s)' , counter , _version . __version__ , __version__ )
counter += 1
time . sleep ( sleep_length )
except KeyboardInterrupt :
log . info ( 'Exiting...' )
|
def median ( array ) :
"""Return the median value of a list of numbers ."""
|
n = len ( array )
if n < 1 :
return 0
elif n == 1 :
return array [ 0 ]
sorted_vals = sorted ( array )
midpoint = int ( n / 2 )
if n % 2 == 1 :
return sorted_vals [ midpoint ]
else :
return ( sorted_vals [ midpoint - 1 ] + sorted_vals [ midpoint ] ) / 2.0
|
def _urlopen_as_json ( self , url , headers = None ) :
"""Shorcut for return contents as json"""
|
req = Request ( url , headers = headers )
return json . loads ( urlopen ( req ) . read ( ) )
|
def _call_validators ( self ) :
"""Actually run all the validations .
Returns :
list ( str ) : Error messages from the validators ."""
|
msg = [ ]
msg . extend ( self . _validate_keyfile ( ) )
msg . extend ( self . _validate_dns_zone ( ) )
msg . extend ( self . _validate_retries ( ) )
msg . extend ( self . _validate_project ( ) )
return msg
|
def validate ( self , session ) :
"""Validate that the current user can be saved .
: param session : SQLAlchemy session
: type session : : class : ` sqlalchemy . Session `
: return : ` ` True ` `
: rtype : bool
: raise : : class : ` pyshop . helpers . sqla . ModelError ` if user is not valid"""
|
errors = [ ]
if not self . login :
errors . append ( u'login is required' )
else :
other = User . by_login ( session , self . login )
if other and other . id != self . id :
errors . append ( u'duplicate login %s' % self . login )
if not self . password :
errors . append ( u'password is required' )
if not self . email :
errors . append ( u'email is required' )
elif not re_email . match ( self . email ) :
errors . append ( u'%s is not a valid email' % self . email )
if len ( errors ) :
raise ModelError ( errors )
return True
|
def discrepancy_plot ( data , name = 'discrepancy' , report_p = True , format = 'png' , suffix = '-gof' , path = './' , fontmap = None , verbose = 1 ) :
'''Generate goodness - of - fit deviate scatter plot .
: Arguments :
data : list
List ( or list of lists for vector - valued variables ) of discrepancy values , output
from the ` pymc . diagnostics . discrepancy ` function .
name : string
The name of the plot .
report _ p : bool
Flag for annotating the p - value to the plot .
format ( optional ) : string
Graphic output format ( defaults to png ) .
suffix ( optional ) : string
Filename suffix ( defaults to " - gof " ) .
path ( optional ) : string
Specifies location for saving plots ( defaults to local directory ) .
fontmap ( optional ) : dict
Font map for plot .'''
|
if verbose > 0 :
print_ ( 'Plotting' , name + suffix )
if fontmap is None :
fontmap = { 1 : 10 , 2 : 8 , 3 : 6 , 4 : 5 , 5 : 4 }
# Generate new scatter plot
figure ( )
try :
x , y = transpose ( data )
except ValueError :
x , y = data
scatter ( x , y )
# Plot x = y line
lo = nmin ( ravel ( data ) )
hi = nmax ( ravel ( data ) )
datarange = hi - lo
lo -= 0.1 * datarange
hi += 0.1 * datarange
pyplot ( ( lo , hi ) , ( lo , hi ) )
# Plot options
xlabel ( 'Observed deviates' , fontsize = 'x-small' )
ylabel ( 'Simulated deviates' , fontsize = 'x-small' )
if report_p : # Put p - value in legend
count = sum ( s > o for o , s in zip ( x , y ) )
text ( lo + 0.1 * datarange , hi - 0.1 * datarange , 'p=%.3f' % ( count / len ( x ) ) , horizontalalignment = 'center' , fontsize = 10 )
# Save to file
if not os . path . exists ( path ) :
os . mkdir ( path )
if not path . endswith ( '/' ) :
path += '/'
savefig ( "%s%s%s.%s" % ( path , name , suffix , format ) )
|
def run_details ( self , run ) :
"""Retrieve sequencing run details as a dictionary ."""
|
run_data = dict ( run = run )
req = urllib . request . Request ( "%s/nglims/api_run_details" % self . _base_url , urllib . parse . urlencode ( run_data ) )
response = urllib . request . urlopen ( req )
info = json . loads ( response . read ( ) )
if "error" in info :
raise ValueError ( "Problem retrieving info: %s" % info [ "error" ] )
else :
return info [ "details" ]
|
def get_all_monomials ( variables , extramonomials , substitutions , degree , removesubstitutions = True ) :
"""Return the monomials of a certain degree ."""
|
monomials = get_monomials ( variables , degree )
if extramonomials is not None :
monomials . extend ( extramonomials )
if removesubstitutions and substitutions is not None :
monomials = [ monomial for monomial in monomials if monomial not in substitutions ]
monomials = [ remove_scalar_factor ( apply_substitutions ( monomial , substitutions ) ) for monomial in monomials ]
monomials = unique ( monomials )
return monomials
|
def spawn_isolated_child ( self ) :
"""Fork or launch a new child off the target context .
: returns :
mitogen . core . Context of the new child ."""
|
return self . get_chain ( use_fork = True ) . call ( ansible_mitogen . target . spawn_isolated_child )
|
def from_pty ( cls , stdout , true_color = False , ansi_colors_only = None , term = None ) :
"""Create an Output class from a pseudo terminal .
( This will take the dimensions by reading the pseudo
terminal attributes . )"""
|
assert stdout . isatty ( )
def get_size ( ) :
rows , columns = _get_size ( stdout . fileno ( ) )
# If terminal ( incorrectly ) reports its size as 0 , pick a reasonable default .
# See https : / / github . com / ipython / ipython / issues / 10071
return Size ( rows = ( rows or 24 ) , columns = ( columns or 80 ) )
return cls ( stdout , get_size , true_color = true_color , ansi_colors_only = ansi_colors_only , term = term )
|
def do_sort ( value , case_sensitive = False ) :
"""Sort an iterable . If the iterable is made of strings the second
parameter can be used to control the case sensitiveness of the
comparison which is disabled by default .
. . sourcecode : : jinja
{ % for item in iterable | sort % }
{ % endfor % }"""
|
if not case_sensitive :
def sort_func ( item ) :
if isinstance ( item , basestring ) :
item = item . lower ( )
return item
else :
sort_func = None
return sorted ( seq , key = sort_func )
|
def p_return_statement_1 ( self , p ) :
"""return _ statement : RETURN SEMI
| RETURN AUTOSEMI"""
|
p [ 0 ] = self . asttypes . Return ( )
p [ 0 ] . setpos ( p )
|
def get_tuids ( self , files , revision , commit = True , chunk = 50 , repo = None ) :
'''Wrapper for ` _ get _ tuids ` to limit the number of annotation calls to hg
and separate the calls from DB transactions . Also used to simplify ` _ get _ tuids ` .
: param files :
: param revision :
: param commit :
: param chunk :
: param repo :
: return :'''
|
results = [ ]
revision = revision [ : 12 ]
# For a single file , there is no need
# to put it in an array when given .
if not isinstance ( files , list ) :
files = [ files ]
if repo is None :
repo = self . config . hg . branch
for _ , new_files in jx . groupby ( files , size = chunk ) :
for count , file in enumerate ( new_files ) :
new_files [ count ] = file . lstrip ( '/' )
annotations_to_get = [ ]
for file in new_files :
with self . conn . transaction ( ) as t :
already_ann = self . _get_annotation ( revision , file , transaction = t )
if already_ann :
results . append ( ( file , self . destringify_tuids ( already_ann ) ) )
elif already_ann == '' :
results . append ( ( file , [ ] ) )
else :
annotations_to_get . append ( file )
if not annotations_to_get : # No new annotations to get , so get next set
continue
# Get all the annotations in parallel and
# store in annotated _ files
annotated_files = [ None ] * len ( annotations_to_get )
threads = [ Thread . run ( str ( thread_count ) , self . _get_hg_annotate , revision , annotations_to_get [ thread_count ] , annotated_files , thread_count , repo ) for thread_count , _ in enumerate ( annotations_to_get ) ]
for t in threads :
t . join ( )
# Help for memory , because ` chunk ` ( or a lot of )
# threads are started at once .
del threads
with self . conn . transaction ( ) as transaction :
results . extend ( self . _get_tuids ( transaction , annotations_to_get , revision , annotated_files , commit = commit , repo = repo ) )
# Help for memory
gc . collect ( )
return results
|
def all_qubits ( self ) -> FrozenSet [ ops . Qid ] :
"""Returns the qubits acted upon by Operations in this circuit ."""
|
return frozenset ( q for m in self . _moments for q in m . qubits )
|
def sign_key ( self , keyid , default_key = None , passphrase = None ) :
"""sign ( an imported ) public key - keyid , with default secret key
> > > import gnupg
> > > gpg = gnupg . GPG ( homedir = " doctests " )
> > > key _ input = gpg . gen _ key _ input ( )
> > > key = gpg . gen _ key ( key _ input )
> > > gpg . sign _ key ( key [ ' fingerprint ' ] )
> > > gpg . list _ sigs ( key [ ' fingerprint ' ] )
: param str keyid : key shortID , longID , fingerprint or email _ address
: param str passphrase : passphrase used when creating the key , leave None otherwise
: returns : The result giving status of the key signing . . .
success can be verified by gpg . list _ sigs ( keyid )"""
|
args = [ ]
input_command = ""
if passphrase :
passphrase_arg = "--passphrase-fd 0"
input_command = "%s\n" % passphrase
args . append ( passphrase_arg )
if default_key :
args . append ( str ( "--default-key %s" % default_key ) )
args . extend ( [ "--command-fd 0" , "--sign-key %s" % keyid ] )
p = self . _open_subprocess ( args )
result = self . _result_map [ 'signing' ] ( self )
confirm_command = "%sy\n" % input_command
p . stdin . write ( b ( confirm_command ) )
self . _collect_output ( p , result , stdin = p . stdin )
return result
|
def expect_column_pair_values_to_be_in_set ( self , column_A , column_B , value_pairs_set , ignore_row_if = "both_values_are_missing" , result_format = None , include_config = False , catch_exceptions = None , meta = None ) :
"""Expect paired values from columns A and B to belong to a set of valid pairs .
Args :
column _ A ( str ) : The first column name
column _ B ( str ) : The second column name
value _ pairs _ set ( list of tuples ) : All the valid pairs to be matched
Keyword Args :
ignore _ row _ if ( str ) : " both _ values _ are _ missing " , " either _ value _ is _ missing " , " never "
Other Parameters :
result _ format ( str or None ) : Which output mode to use : ` BOOLEAN _ ONLY ` , ` BASIC ` , ` COMPLETE ` , or ` SUMMARY ` .
For more detail , see : ref : ` result _ format < result _ format > ` .
include _ config ( boolean ) : If True , then include the expectation config as part of the result object . For more detail , see : ref : ` include _ config ` .
catch _ exceptions ( boolean or None ) : If True , then catch exceptions and include them as part of the result object . For more detail , see : ref : ` catch _ exceptions ` .
meta ( dict or None ) : A JSON - serializable dictionary ( nesting allowed ) that will be included in the output without modification . For more detail , see : ref : ` meta ` .
Returns :
A JSON - serializable expectation result object .
Exact fields vary depending on the values passed to : ref : ` result _ format < result _ format > ` and
: ref : ` include _ config ` , : ref : ` catch _ exceptions ` , and : ref : ` meta ` ."""
|
raise NotImplementedError
|
def parse_manifest ( self , fp ) :
"""Open manifest JSON file and build icon map
Args :
fp ( string or fileobject ) : Either manifest filepath to open or
manifest File object .
Returns :
dict : Webfont icon map . Contains :
* ` ` class _ name ` ` : Builded icon classname with prefix configured
in manifest ( from parameters in Icomoon interface ) ;
* ` ` int ` ` : Icon integer code like ` ` 59649 ` ` ;
* ` ` hex ` ` : Icon hexadecimal code like ` ` 0xe901 ` ` ;
* ` ` unicode ` ` : Icon unicode like ` ` U + E901 ` ` ;
* ` ` utf8 ` ` ! Icon UTF8 code like ` ` \ e901 ` ` ;"""
|
# Given a string for file path to open
if isinstance ( fp , string_types ) :
fp = io . open ( fp , 'r' , encoding = 'utf-8' )
with fp as json_file :
webfont_manifest = json . load ( json_file )
# Get the font set prefix to know the css classname
icon_prefix = webfont_manifest . get ( 'preferences' ) . get ( 'fontPref' ) . get ( 'prefix' )
# Get sorted icons
icons_map = OrderedDict ( )
sorted_entries = sorted ( webfont_manifest . get ( 'icons' ) , key = self . get_icon_key )
for icon_entry in sorted_entries :
name = icon_entry . get ( 'properties' ) . get ( 'name' )
code = icon_entry . get ( 'properties' ) . get ( 'code' )
hexa_code = hex ( code )
icons_map [ name ] = { 'class_name' : icon_prefix + name , 'int' : code , 'hex' : hexa_code , 'unicode' : 'U+' + '' . join ( hex ( code ) . split ( 'x' ) [ 1 : ] ) . upper ( ) , 'utf8' : '\\' + '' . join ( hex ( code ) . split ( 'x' ) [ 1 : ] ) . lower ( ) , }
return icons_map
|
def get ( self , rate_type , role , session , fields = [ ] , ** kwargs ) :
'''taobao . traderates . get 搜索评价信息
搜索评价信息 , 只能获取距今180天内的评价记录'''
|
request = TOPRequest ( 'taobao.traderates.get' )
request [ 'rate_type' ] = rate_type
request [ 'role' ] = role
if not fields :
tradeRate = TradeRate ( )
fields = tradeRate . fields
request [ 'fields' ] = fields
for k , v in kwargs . iteritems ( ) :
if k not in ( 'result' , 'page_no' , 'page_size' , 'start_date' , 'end_date' , 'tid' ) and v == None :
continue
request [ k ] = v
self . create ( self . execute ( request , session ) )
return self . trade_rates
|
def stop ( self , timeout = None ) :
"""Stop the thread ."""
|
logger . debug ( "ports plugin - Close thread for scan list {}" . format ( self . _stats ) )
self . _stopper . set ( )
|
def _process_file_continue_request ( self , request : BaseRequest ) :
'''Modify the request to resume downloading file .'''
|
if os . path . exists ( self . _filename ) :
size = os . path . getsize ( self . _filename )
request . set_continue ( size )
self . _file_continue_requested = True
_logger . debug ( 'Continue file from {0}.' , size )
else :
_logger . debug ( 'No file to continue.' )
|
def _yyyymmdd_to_year_fraction ( date ) :
"""Convert YYYMMDD . DD date string or float to YYYY . YYY"""
|
date = str ( date )
if '.' in date :
date , residual = str ( date ) . split ( '.' )
residual = float ( '0.' + residual )
else :
residual = 0.0
date = _datetime . datetime . strptime ( date , '%Y%m%d' )
date += _datetime . timedelta ( days = residual )
year = date . year
year_start = _datetime . datetime ( year = year , month = 1 , day = 1 )
next_year_start = _datetime . datetime ( year = year + 1 , month = 1 , day = 1 )
year_duration = next_year_start - year_start
year_elapsed = date - year_start
fraction = year_elapsed / year_duration
return year + fraction
|
def language ( s ) :
"""Returns a ( language , confidence ) - tuple for the given string ."""
|
s = decode_utf8 ( s )
s = set ( w . strip ( PUNCTUATION ) for w in s . replace ( "'" , "' " ) . split ( ) )
n = float ( len ( s ) or 1 )
p = { }
for xx in LANGUAGES :
lexicon = _module ( xx ) . __dict__ [ "lexicon" ]
p [ xx ] = sum ( 1 for w in s if w in lexicon ) / n
return max ( p . items ( ) , key = lambda kv : ( kv [ 1 ] , int ( kv [ 0 ] == "en" ) ) )
|
def _check ( peers ) :
'''Checks whether the input is a valid list of peers and transforms domain names into IP Addresses'''
|
if not isinstance ( peers , list ) :
return False
for peer in peers :
if not isinstance ( peer , six . string_types ) :
return False
if not HAS_NETADDR : # if does not have this lib installed , will simply try to load what user specified
# if the addresses are not correctly specified , will trow error when loading the actual config
return True
ip_only_peers = [ ]
for peer in peers :
try :
ip_only_peers . append ( six . text_type ( IPAddress ( peer ) ) )
# append the str value
except AddrFormatError : # if not a valid IP Address
# will try to see if it is a nameserver and resolve it
if not HAS_DNSRESOLVER :
continue
# without the dns resolver cannot populate the list of NTP entities based on their nameserver
# so we ' ll move on
dns_reply = [ ]
try : # try to see if it is a valid NS
dns_reply = dns . resolver . query ( peer )
except dns . resolver . NoAnswer : # no a valid DNS entry either
return False
for dns_ip in dns_reply :
ip_only_peers . append ( six . text_type ( dns_ip ) )
peers = ip_only_peers
return True
|
def _is_collect_cx_state_runnable ( self , proc_location ) :
"""Determine if collect _ connection _ state is set and can effectively run .
If self . _ collect _ cx _ state is True and a custom proc _ location is provided , the system cannot
run ` ss ` or ` netstat ` over a custom proc _ location
: param proc _ location : str
: return : bool"""
|
if self . _collect_cx_state is False :
return False
if proc_location != "/proc" :
self . warning ( "Cannot collect connection state: currently with a custom /proc path: %s" % proc_location )
return False
return True
|
def parseFullScan ( self , i , modifications = True ) :
"""parses scan info for giving a Spectrum Obj for plotting . takes significantly longer since it has to unzip / parse xml"""
|
scanObj = PeptideObject ( )
peptide = str ( i [ 1 ] )
pid = i [ 2 ]
if modifications :
sql = 'select aam.ModificationName,pam.Position,aam.DeltaMass from peptidesaminoacidmodifications pam left join aminoacidmodifications aam on (aam.AminoAcidModificationID=pam.AminoAcidModificationID) where pam.PeptideID=%s' % pid
for row in self . conn . execute ( sql ) :
scanObj . addModification ( peptide [ row [ 1 ] ] , str ( row [ 1 ] ) , str ( row [ 2 ] ) , row [ 0 ] )
scanObj . peptide = peptide
if self . decompressScanInfo ( scanObj , i [ 0 ] ) :
return scanObj
return None
|
def filter_data ( self , min_len , max_len ) :
"""Preserves only samples which satisfy the following inequality :
min _ len < = src sample sequence length < = max _ len AND
min _ len < = tgt sample sequence length < = max _ len
: param min _ len : minimum sequence length
: param max _ len : maximum sequence length"""
|
logging . info ( f'Filtering data, min len: {min_len}, max len: {max_len}' )
initial_len = len ( self . src )
filtered_src = [ ]
filtered_tgt = [ ]
for src , tgt in zip ( self . src , self . tgt ) :
if min_len <= len ( src ) <= max_len and min_len <= len ( tgt ) <= max_len :
filtered_src . append ( src )
filtered_tgt . append ( tgt )
self . src = filtered_src
self . tgt = filtered_tgt
filtered_len = len ( self . src )
logging . info ( f'Pairs before: {initial_len}, after: {filtered_len}' )
|
def get_tab_title ( key , frame , overlay ) :
"""Computes a title for bokeh tabs from the key in the overlay , the
element and the containing ( Nd ) Overlay ."""
|
if isinstance ( overlay , Overlay ) :
if frame is not None :
title = [ ]
if frame . label :
title . append ( frame . label )
if frame . group != frame . params ( 'group' ) . default :
title . append ( frame . group )
else :
title . append ( frame . group )
else :
title = key
title = ' ' . join ( title )
else :
title = ' | ' . join ( [ d . pprint_value_string ( k ) for d , k in zip ( overlay . kdims , key ) ] )
return title
|
def _v ( self , token , previous = None , next = None ) :
"""Returns a training vector for the given ( word , tag ) - tuple and its context ."""
|
def f ( v , s1 , s2 ) :
if s2 :
v [ s1 + " " + s2 ] = 1
p , n = previous , next
p = ( "" , "" ) if not p else ( p [ 0 ] or "" , p [ 1 ] or "" )
n = ( "" , "" ) if not n else ( n [ 0 ] or "" , n [ 1 ] or "" )
v = { }
f ( v , "b" , "b" )
# Bias .
f ( v , "h" , token [ 0 ] )
# Capitalization .
f ( v , "w" , token [ - 6 : ] if token not in self . known or token in self . unknown else "" )
f ( v , "x" , token [ - 3 : ] )
# Word suffix .
f ( v , "-x" , p [ 0 ] [ - 3 : ] )
# Word suffix left .
f ( v , "+x" , n [ 0 ] [ - 3 : ] )
# Word suffix right .
f ( v , "-t" , p [ 1 ] )
# Tag left .
f ( v , "-+" , p [ 1 ] + n [ 1 ] )
# Tag left + right .
f ( v , "+t" , n [ 1 ] )
# Tag right .
return v
|
def InitLocCheck ( self ) :
"""make an interactive grid in which users can edit locations"""
|
# if there is a location without a name , name it ' unknown '
self . contribution . rename_item ( 'locations' , 'nan' , 'unknown' )
# propagate lat / lon values from sites table
self . contribution . get_min_max_lat_lon ( )
# propagate lithologies & geologic classes from sites table
self . contribution . propagate_cols_up ( [ 'lithologies' , 'geologic_classes' ] , 'locations' , 'sites' )
res = self . contribution . propagate_min_max_up ( )
if cb . not_null ( res ) :
self . contribution . propagate_cols_up ( [ 'age_unit' ] , 'locations' , 'sites' )
# set up frame
self . panel = wx . Panel ( self , style = wx . SIMPLE_BORDER )
self . grid_frame = grid_frame3 . GridFrame ( self . contribution , self . WD , 'locations' , 'locations' , self . panel , main_frame = self . main_frame )
# redefine default ' save & exit grid ' button to go to next dialog instead
self . grid_frame . exitButton . SetLabel ( 'Save and continue' )
grid = self . grid_frame . grid
self . grid_frame . Bind ( wx . EVT_BUTTON , lambda event : self . onContinue ( event , grid , self . InitAgeCheck ) , self . grid_frame . exitButton )
# add back button
self . backButton = wx . Button ( self . grid_frame . panel , id = - 1 , label = 'Back' , name = 'back_btn' )
self . Bind ( wx . EVT_BUTTON , lambda event : self . onbackButton ( event , self . InitSiteCheck ) , self . backButton )
self . grid_frame . main_btn_vbox . Add ( self . backButton , flag = wx . ALL , border = 5 )
# re - do fit
self . grid_frame . do_fit ( None , min_size = self . min_size )
# center
self . grid_frame . Centre ( )
return
|
def set_default_names ( data ) :
"""Sets index names to ' index ' for regular , or ' level _ x ' for Multi"""
|
if com . _all_not_none ( * data . index . names ) :
nms = data . index . names
if len ( nms ) == 1 and data . index . name == 'index' :
warnings . warn ( "Index name of 'index' is not round-trippable" )
elif len ( nms ) > 1 and any ( x . startswith ( 'level_' ) for x in nms ) :
warnings . warn ( "Index names beginning with 'level_' are not " "round-trippable" )
return data
data = data . copy ( )
if data . index . nlevels > 1 :
names = [ name if name is not None else 'level_{}' . format ( i ) for i , name in enumerate ( data . index . names ) ]
data . index . names = names
else :
data . index . name = data . index . name or 'index'
return data
|
def resize_state_meta ( state_m , factor , gaphas_editor = True ) :
"""Resize state meta data recursive what includes also LibraryStateModels meta data and its internal state _ copy"""
|
# print ( " START RESIZE OF STATE " , state _ m . get _ meta _ data _ editor ( for _ gaphas = gaphas _ editor ) , state _ m )
old_rel_pos = state_m . get_meta_data_editor ( for_gaphas = gaphas_editor ) [ 'rel_pos' ]
# print ( " old _ rel _ pos state " , old _ rel _ pos , state _ m . core _ element )
state_m . set_meta_data_editor ( 'rel_pos' , mult_two_vectors ( factor , old_rel_pos ) , from_gaphas = gaphas_editor )
# print ( " new _ rel _ pos state " , state _ m . get _ meta _ data _ editor ( for _ gaphas = gaphas _ editor ) , state _ m . core _ element )
# print ( " resize factor " , factor , state _ m , state _ m . meta )
old_size = state_m . get_meta_data_editor ( for_gaphas = gaphas_editor ) [ 'size' ]
# print ( " old _ size " , old _ size , type ( old _ size ) )
state_m . set_meta_data_editor ( 'size' , mult_two_vectors ( factor , old_size ) , from_gaphas = gaphas_editor )
# print ( " new _ size " , state _ m . get _ meta _ data _ editor ( for _ gaphas = gaphas _ editor ) [ ' size ' ] )
if gaphas_editor :
old_rel_pos = state_m . get_meta_data_editor ( for_gaphas = gaphas_editor ) [ 'name' ] [ 'rel_pos' ]
state_m . set_meta_data_editor ( 'name.rel_pos' , mult_two_vectors ( factor , old_rel_pos ) , from_gaphas = gaphas_editor )
old_size = state_m . get_meta_data_editor ( for_gaphas = gaphas_editor ) [ 'name' ] [ 'size' ]
state_m . set_meta_data_editor ( 'name.size' , mult_two_vectors ( factor , old_size ) , from_gaphas = gaphas_editor )
if isinstance ( state_m , LibraryStateModel ) : # print ( " LIBRARY " , state _ m )
if gaphas_editor and state_m . state_copy_initialized :
if state_m . meta_data_was_scaled :
resize_state_port_meta ( state_m , factor , gaphas_editor )
else :
scale_library_ports_meta_data ( state_m , gaphas_editor )
if state_m . state_copy_initialized :
resize_state_meta ( state_m . state_copy , factor , gaphas_editor )
# print ( " END LIBRARY RESIZE " )
else : # print ( " resize _ state _ meta - > resize _ state _ port _ meta " )
resize_state_port_meta ( state_m , factor , gaphas_editor )
if isinstance ( state_m , ContainerStateModel ) :
_resize_connection_models_list ( state_m . transitions [ : ] + state_m . data_flows [ : ] , factor , gaphas_editor )
for child_state_m in state_m . states . values ( ) :
resize_state_meta ( child_state_m , factor , gaphas_editor )
|
def call_in_executor ( self , func : Callable , * args , executor : Union [ Executor , str ] = None , ** kwargs ) -> Awaitable :
"""Call the given callable in an executor .
: param func : the callable to call
: param args : positional arguments to call the callable with
: param executor : either an : class : ` ~ concurrent . futures . Executor ` instance , the resource
name of one or ` ` None ` ` to use the event loop ' s default executor
: param kwargs : keyword arguments to call the callable with
: return : an awaitable that resolves to the return value of the call"""
|
assert check_argument_types ( )
if isinstance ( executor , str ) :
executor = self . require_resource ( Executor , executor )
return asyncio_extras . call_in_executor ( func , * args , executor = executor , ** kwargs )
|
def Reset ( self ) :
'Reset Axis and set default parameters for H - bridge'
|
spi . SPI_write_byte ( self . CS , 0xC0 )
# reset
# spi . SPI _ write _ byte ( self . CS , 0x14 ) # Stall Treshold setup
# spi . SPI _ write _ byte ( self . CS , 0xFF )
# spi . SPI _ write _ byte ( self . CS , 0x13 ) # Over Current Treshold setup
# spi . SPI _ write _ byte ( self . CS , 0xFF )
spi . SPI_write_byte ( self . CS , 0x15 )
# Full Step speed
spi . SPI_write_byte ( self . CS , 0xFF )
spi . SPI_write_byte ( self . CS , 0xFF )
spi . SPI_write_byte ( self . CS , 0x05 )
# ACC
spi . SPI_write_byte ( self . CS , 0x00 )
spi . SPI_write_byte ( self . CS , 0x20 )
spi . SPI_write_byte ( self . CS , 0x06 )
# DEC
spi . SPI_write_byte ( self . CS , 0x00 )
spi . SPI_write_byte ( self . CS , 0x20 )
spi . SPI_write_byte ( self . CS , 0x0A )
# KVAL _ RUN
spi . SPI_write_byte ( self . CS , 0xd0 )
spi . SPI_write_byte ( self . CS , 0x0B )
# KVAL _ ACC
spi . SPI_write_byte ( self . CS , 0xd0 )
spi . SPI_write_byte ( self . CS , 0x0C )
# KVAL _ DEC
spi . SPI_write_byte ( self . CS , 0xd0 )
spi . SPI_write_byte ( self . CS , 0x16 )
# STEPPER
spi . SPI_write_byte ( self . CS , 0b00000000 )
spi . SPI_write_byte ( self . CS , 0x18 )
# CONFIG
spi . SPI_write_byte ( self . CS , 0b00111000 )
spi . SPI_write_byte ( self . CS , 0b00000000 )
|
def has_matching_etag ( remote_storage , source_storage , path , prefixed_path ) :
"""Compare etag of path in source storage with remote ."""
|
storage_etag = get_etag ( remote_storage , path , prefixed_path )
local_etag = get_file_hash ( source_storage , path )
return storage_etag == local_etag
|
def to_definition ( self ) :
"""Converts the name instance to a pyqode . core . share . Definition"""
|
icon = { Name . Type . Root : icons . ICON_MIMETYPE , Name . Type . Division : icons . ICON_DIVISION , Name . Type . Section : icons . ICON_SECTION , Name . Type . Variable : icons . ICON_VAR , Name . Type . Paragraph : icons . ICON_FUNC } [ self . node_type ]
d = Definition ( self . name , self . line , self . column , icon , self . description )
for ch in self . children :
d . add_child ( ch . to_definition ( ) )
return d
|
def group ( args ) :
"""% prog group tabfile > tabfile . grouped
Given a tab - delimited file , either group all elements within the file or
group the elements in the value column ( s ) based on the key ( groupby ) column
For example , convert this | into this
a2 3 4 | a , 2,3,4,5,6
a5 6 | b , 7,8
b7 8 | c , 9,10,11
c9 |
c 10 11 |
If grouping by a particular column ,
convert this | into this :
a2 3 4 | a2,5 3,6 4
a5 6 | b7 8
b7 8 | c9,10 11
c9 |
c 10 11 |
By default , it uniqifies all the grouped elements"""
|
from jcvi . utils . cbook import AutoVivification
from jcvi . utils . grouper import Grouper
p = OptionParser ( group . __doc__ )
p . set_sep ( )
p . add_option ( "--groupby" , default = None , type = 'int' , help = "Default column to groupby [default: %default]" )
p . add_option ( "--groupsep" , default = ',' , help = "Separator to join the grouped elements [default: `%default`]" )
p . add_option ( "--nouniq" , default = False , action = "store_true" , help = "Do not uniqify the grouped elements [default: %default]" )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
tabfile , = args
sep = opts . sep
groupby = opts . groupby
groupsep = opts . groupsep
cols = [ ]
grouper = AutoVivification ( ) if groupby is not None else Grouper ( )
fp = must_open ( tabfile )
for row in fp :
row = row . rstrip ( )
atoms = row . split ( sep )
if groupby is not None :
if len ( cols ) < len ( atoms ) :
cols = [ x for x in xrange ( len ( atoms ) ) ]
if groupby not in cols :
logging . error ( "groupby col index `{0}` is out of range" . format ( groupby ) )
sys . exit ( )
key = atoms [ groupby ]
for col in cols :
if col == groupby :
continue
if not grouper [ key ] [ col ] :
grouper [ key ] [ col ] = [ ] if opts . nouniq else set ( )
if col < len ( atoms ) :
if groupsep in atoms [ col ] :
for atom in atoms [ col ] . split ( groupsep ) :
if opts . nouniq :
grouper [ key ] [ col ] . append ( atom )
else :
grouper [ key ] [ col ] . add ( atom )
else :
if opts . nouniq :
grouper [ key ] [ col ] . append ( atoms [ col ] )
else :
grouper [ key ] [ col ] . add ( atoms [ col ] )
else :
grouper . join ( * atoms )
for key in grouper :
if groupby is not None :
line = [ ]
for col in cols :
if col == groupby :
line . append ( key )
elif col in grouper [ key ] . keys ( ) :
line . append ( groupsep . join ( grouper [ key ] [ col ] ) )
else :
line . append ( "na" )
print ( sep . join ( line ) )
else :
print ( groupsep . join ( key ) )
|
def _descope_flag ( self , flag , default_scope ) :
"""If the flag is prefixed by its scope , in the old style , extract the scope .
Otherwise assume it belongs to default _ scope .
returns a pair ( scope , flag ) ."""
|
for scope_prefix , scope_info in self . _known_scoping_prefixes :
for flag_prefix in [ '--' , '--no-' ] :
prefix = flag_prefix + scope_prefix
if flag . startswith ( prefix ) :
scope = scope_info . scope
if scope_info . category == ScopeInfo . SUBSYSTEM and default_scope != GLOBAL_SCOPE : # We allow goal . task - - subsystem - foo to refer to the task - level subsystem instance ,
# i . e . , as if qualified by - - subsystem - goal - task - foo .
# Note that this means that we can ' t set a task option on the cmd - line if its
# name happens to start with a subsystem scope .
# TODO : Either fix this or at least detect such options and warn .
task_subsystem_scope = '{}.{}' . format ( scope_info . scope , default_scope )
if task_subsystem_scope in self . _known_scopes : # Such a task subsystem actually exists .
scope = task_subsystem_scope
return scope , flag_prefix + flag [ len ( prefix ) : ]
return default_scope , flag
|
def _slice_data ( self , source_area , slices , dataset ) :
"""Slice the data to reduce it ."""
|
slice_x , slice_y = slices
dataset = dataset . isel ( x = slice_x , y = slice_y )
assert ( 'x' , source_area . x_size ) in dataset . sizes . items ( )
assert ( 'y' , source_area . y_size ) in dataset . sizes . items ( )
dataset . attrs [ 'area' ] = source_area
return dataset
|
def _populate_common_request ( self , request ) :
'''Populate the Request with common fields .'''
|
url_record = self . _item_session . url_record
# Note that referrer may have already been set by the - - referer option
if url_record . parent_url and not request . fields . get ( 'Referer' ) :
self . _add_referrer ( request , url_record )
if self . _fetch_rule . http_login :
request . username , request . password = self . _fetch_rule . http_login
|
def get_next_invoke_id ( self , addr ) :
"""Called by clients to get an unused invoke ID ."""
|
if _debug :
StateMachineAccessPoint . _debug ( "get_next_invoke_id" )
initialID = self . nextInvokeID
while 1 :
invokeID = self . nextInvokeID
self . nextInvokeID = ( self . nextInvokeID + 1 ) % 256
# see if we ' ve checked for them all
if initialID == self . nextInvokeID :
raise RuntimeError ( "no available invoke ID" )
for tr in self . clientTransactions :
if ( invokeID == tr . invokeID ) and ( addr == tr . pdu_address ) :
break
else :
break
return invokeID
|
def _get_targets ( self , target , include_global = True ) :
"""Internal iterator to split up a complete target into the possible parts
it may match .
For example : :
> > > list ( aliases . _ get _ targets ( ' my _ app . MyModel . somefield ' ) )
[ ' ' , ' my _ app ' , ' my _ app . MyModel ' , ' my _ app . MyModel . somefield ' ]"""
|
target = self . _coerce_target ( target )
if include_global :
yield ''
if not target :
return
target_bits = target . split ( '.' )
for i in range ( len ( target_bits ) ) :
yield '.' . join ( target_bits [ : i + 1 ] )
|
def _set_html2text ( self , settings ) :
"""Load settings for html2text ( https : / / github . com / Alir3z4 / html2text )
Warning : does not check options / values
: param settings : Settings for the object
( see : https : / / github . com / Alir3z4 / html2text / blob / master / docs / usage . md )
: type settings : dict
: rtype : None"""
|
self . _text_maker = html2text . HTML2Text ( )
for param in settings :
if not hasattr ( self . _text_maker , param ) :
raise WEBParameterException ( "Setting html2text failed - unknown parameter {}" . format ( param ) )
setattr ( self . _text_maker , param , settings [ param ] )
|
def is_installed_extension ( name , user = None , host = None , port = None , maintenance_db = None , password = None , runas = None ) :
'''Test if a specific extension is installed
CLI Example :
. . code - block : : bash
salt ' * ' postgres . is _ installed _ extension'''
|
installed_ext = get_installed_extension ( name , user = user , host = host , port = port , maintenance_db = maintenance_db , password = password , runas = runas )
return bool ( installed_ext )
|
def find_smallest_of_three ( num1 , num2 , num3 ) :
"""Function to determine the smallest of three input numbers .
Examples :
> > > find _ smallest _ of _ three ( 10 , 20 , 0)
> > > find _ smallest _ of _ three ( 19 , 15 , 18)
15
> > > find _ smallest _ of _ three ( - 10 , - 20 , - 30)
-30
Args :
num1 , num2 , num3 : Three numbers to compare .
Returns :
The smallest of the three numbers ."""
|
return min ( num1 , num2 , num3 )
|
def scroll_constrain ( self ) :
'''This keeps the scroll region within the screen region .'''
|
if self . scroll_row_start <= 0 :
self . scroll_row_start = 1
if self . scroll_row_end > self . rows :
self . scroll_row_end = self . rows
|
def isoformat ( self , sep = 'T' ) :
"""Formats the date as " % Y - % m - % d % H : % M : % S " with the sep param between the
date and time portions
: param set :
A single character of the separator to place between the date and
time
: return :
The formatted datetime as a unicode string in Python 3 and a byte
string in Python 2"""
|
if self . microsecond == 0 :
return self . strftime ( '0000-%%m-%%d%s%%H:%%M:%%S' % sep )
return self . strftime ( '0000-%%m-%%d%s%%H:%%M:%%S.%%f' % sep )
|
def _check_device ( self , requested_device , map_device ) :
"""Compare the requested device with the map device and
return the map device if it differs from the requested device
along with a warning ."""
|
type_1 = torch . device ( requested_device )
type_2 = torch . device ( map_device )
if type_1 != type_2 :
warnings . warn ( 'Setting self.device = {} since the requested device ({}) ' 'is not available.' . format ( map_device , requested_device ) , DeviceWarning )
return map_device
# return requested _ device instead of map _ device even though we
# checked for * type * equality as we might have ' cuda : 0 ' vs . ' cuda : 1 ' .
return requested_device
|
def load_object ( name ) :
"""Load object from module"""
|
if "." not in name :
raise Exception ( 'load object need module.object' )
module_name , object_name = name . rsplit ( '.' , 1 )
if six . PY2 :
module = __import__ ( module_name , globals ( ) , locals ( ) , [ utf8 ( object_name ) ] , - 1 )
else :
module = __import__ ( module_name , globals ( ) , locals ( ) , [ object_name ] )
return getattr ( module , object_name )
|
def RSA ( im : array , radius : int , volume_fraction : int = 1 , mode : str = 'extended' ) :
r"""Generates a sphere or disk packing using Random Sequential Addition
This which ensures that spheres do not overlap but does not guarantee they
are tightly packed .
Parameters
im : ND - array
The image into which the spheres should be inserted . By accepting an
image rather than a shape , it allows users to insert spheres into an
already existing image . To begin the process , start with an array of
zero such as ` ` im = np . zeros ( [ 200 , 200 ] , dtype = bool ) ` ` .
radius : int
The radius of the disk or sphere to insert .
volume _ fraction : scalar
The fraction of the image that should be filled with spheres . The
spheres are addeds 1 ' s , so each sphere addition increases the
` ` volume _ fraction ` ` until the specified limit is reach .
mode : string
Controls how the edges of the image are handled . Options are :
' extended ' - Spheres are allowed to extend beyond the edge of the image
' contained ' - Spheres are all completely within the image
' periodic ' - The portion of a sphere that extends beyond the image is
inserted into the opposite edge of the image ( Not Implemented Yet ! )
Returns
image : ND - array
A copy of ` ` im ` ` with spheres of specified radius * added * to the
background .
Notes
Each sphere is filled with 1 ' s , but the center is marked with a 2 . This
allows easy boolean masking to extract only the centers , which can be
converted to coordinates using ` ` scipy . where ` ` and used for other purposes .
The obtain only the spheres , use ` ` im = im = = 1 ` ` .
This function adds spheres to the background of the received ` ` im ` ` , which
allows iteratively adding spheres of different radii to the unfilled space .
References
[1 ] Random Heterogeneous Materials , S . Torquato ( 2001)"""
|
# Note : The 2D vs 3D splitting of this just me being lazy . . . I can ' t be
# bothered to figure it out programmatically right now
# TODO : Ideally the spheres should be added periodically
print ( 78 * '―' )
print ( 'RSA: Adding spheres of size ' + str ( radius ) )
d2 = len ( im . shape ) == 2
mrad = 2 * radius
if d2 :
im_strel = ps_disk ( radius )
mask_strel = ps_disk ( mrad )
else :
im_strel = ps_ball ( radius )
mask_strel = ps_ball ( mrad )
if sp . any ( im > 0 ) : # Dilate existing objects by im _ strel to remove pixels near them
# from consideration for sphere placement
mask = ps . tools . fftmorphology ( im > 0 , im_strel > 0 , mode = 'dilate' )
mask = mask . astype ( int )
else :
mask = sp . zeros_like ( im )
if mode == 'contained' :
mask = _remove_edge ( mask , radius )
elif mode == 'extended' :
pass
elif mode == 'periodic' :
raise Exception ( 'Periodic edges are not implemented yet' )
else :
raise Exception ( 'Unrecognized mode: ' + mode )
vf = im . sum ( ) / im . size
free_spots = sp . argwhere ( mask == 0 )
i = 0
while vf <= volume_fraction and len ( free_spots ) > 0 :
choice = sp . random . randint ( 0 , len ( free_spots ) , size = 1 )
if d2 :
[ x , y ] = free_spots [ choice ] . flatten ( )
im = _fit_strel_to_im_2d ( im , im_strel , radius , x , y )
mask = _fit_strel_to_im_2d ( mask , mask_strel , mrad , x , y )
im [ x , y ] = 2
else :
[ x , y , z ] = free_spots [ choice ] . flatten ( )
im = _fit_strel_to_im_3d ( im , im_strel , radius , x , y , z )
mask = _fit_strel_to_im_3d ( mask , mask_strel , mrad , x , y , z )
im [ x , y , z ] = 2
free_spots = sp . argwhere ( mask == 0 )
vf = im . sum ( ) / im . size
i += 1
if vf > volume_fraction :
print ( 'Volume Fraction' , volume_fraction , 'reached' )
if len ( free_spots ) == 0 :
print ( 'No more free spots' , 'Volume Fraction' , vf )
return im
|
def market_if_touched ( self , accountID , ** kwargs ) :
"""Shortcut to create a MarketIfTouched Order in an Account
Args :
accountID : The ID of the Account
kwargs : The arguments to create a MarketIfTouchedOrderRequest
Returns :
v20 . response . Response containing the results from submitting
the request"""
|
return self . create ( accountID , order = MarketIfTouchedOrderRequest ( ** kwargs ) )
|
def _is_germline ( rec ) :
"""Handle somatic INFO classifications from MuTect , MuTect2 , VarDict , VarScan and Octopus ."""
|
if _has_somatic_flag ( rec ) :
return False
if _is_mutect2_somatic ( rec ) :
return False
ss_flag = rec . INFO . get ( "SS" )
if ss_flag is not None :
if str ( ss_flag ) == "1" :
return True
# Octopus , assessed for potentially being Germline and not flagged SOMATIC
# https : / / github . com / luntergroup / octopus / wiki / Calling - models : - Cancer # qual - vs - pp
pp = rec . INFO . get ( "PP" )
if pp and float ( pp ) / float ( rec . QUAL ) >= 0.5 :
return True
status_flag = rec . INFO . get ( "STATUS" )
if status_flag is not None :
if str ( status_flag ) . lower ( ) in [ "germline" , "likelyloh" , "strongloh" , "afdiff" , "deletion" ] :
return True
return False
|
def density_contourf ( self , * args , ** kwargs ) :
"""Estimates point density of the given linear orientation measurements
( Interpreted as poles , lines , rakes , or " raw " longitudes and latitudes
based on the ` measurement ` keyword argument . ) and plots filled contours
of the resulting density distribution .
Parameters
* args : A variable number of sequences of measurements .
By default , this will be expected to be ` ` strike ` ` & ` ` dip ` ` , both
array - like sequences representing poles to planes . ( Rake
measurements require three parameters , thus the variable number of
arguments . ) The ` ` measurement ` ` kwarg controls how these arguments
are interpreted .
measurement : string , optional
Controls how the input arguments are interpreted . Defaults to
` ` " poles " ` ` . May be one of the following :
` ` " poles " ` ` : strikes , dips
Arguments are assumed to be sequences of strikes and dips
of planes . Poles to these planes are used for contouring .
` ` " lines " ` ` : plunges , bearings
Arguments are assumed to be sequences of plunges and
bearings of linear features .
` ` " rakes " ` ` : strikes , dips , rakes
Arguments are assumed to be sequences of strikes , dips , and
rakes along the plane .
` ` " radians " ` ` : lon , lat
Arguments are assumed to be " raw " longitudes and latitudes
in the stereonet ' s underlying coordinate system .
method : string , optional
The method of density estimation to use . Defaults to
` ` " exponential _ kamb " ` ` . May be one of the following :
` ` " exponential _ kamb " ` ` : Kamb with exponential smoothing
A modified Kamb method using exponential smoothing [ 1 ] _ . Units
are in numbers of standard deviations by which the density
estimate differs from uniform .
` ` " linear _ kamb " ` ` : Kamb with linear smoothing
A modified Kamb method using linear smoothing [ 1 ] _ . Units are
in numbers of standard deviations by which the density estimate
differs from uniform .
` ` " kamb " ` ` : Kamb with no smoothing
Kamb ' s method [ 2 ] _ with no smoothing . Units are in numbers of
standard deviations by which the density estimate differs from
uniform .
` ` " schmidt " ` ` : 1 % counts
The traditional " Schmidt " ( a . k . a . 1 % ) method . Counts points
within a counting circle comprising 1 % of the total area of the
hemisphere . Does not take into account sample size . Units are
in points per 1 % area .
sigma : int or float , optional
The number of standard deviations defining the expected number of
standard deviations by which a random sample from a uniform
distribution of points would be expected to vary from being evenly
distributed across the hemisphere . This controls the size of the
counting circle , and therefore the degree of smoothing . Higher
sigmas will lead to more smoothing of the resulting density
distribution . This parameter only applies to Kamb - based methods .
Defaults to 3.
gridsize : int or 2 - item tuple of ints , optional
The size of the grid that the density is estimated on . If a single
int is given , it is interpreted as an NxN grid . If a tuple of ints
is given it is interpreted as ( nrows , ncols ) . Defaults to 100.
weights : array - like , optional
The relative weight to be applied to each input measurement . The
array will be normalized to sum to 1 , so absolute value of the
weights do not affect the result . Defaults to None .
* * kwargs
Additional keyword arguments are passed on to matplotlib ' s
` contourf ` function .
Returns
A matplotlib ` QuadContourSet ` .
See Also
mplstereonet . density _ grid
mplstereonet . StereonetAxes . density _ contour
matplotlib . pyplot . contourf
matplotlib . pyplot . clabel
Examples
Plot filled density contours of poles to the specified planes using
a modified Kamb method with exponential smoothing [ 1 ] _ .
> > > strikes , dips = [ 120 , 315 , 86 ] , [ 22 , 85 , 31]
> > > ax . density _ contourf ( strikes , dips )
Plot filled density contours of a set of linear orientation
measurements .
> > > plunges , bearings = [ - 10 , 20 , - 30 ] , [ 120 , 315 , 86]
> > > ax . density _ contourf ( plunges , bearings , measurement = ' lines ' )
Plot filled density contours of a set of rake measurements .
> > > strikes , dips , rakes = [ 120 , 315 , 86 ] , [ 22 , 85 , 31 ] , [ - 5 , 20 , 9]
> > > ax . density _ contourf ( strikes , dips , rakes , measurement = ' rakes ' )
Plot filled density contours of a set of " raw " longitudes and
latitudes .
> > > lon , lat = np . radians ( [ - 40 , 30 , - 85 ] ) , np . radians ( [ 21 , - 59 , 45 ] )
> > > ax . density _ contourf ( lon , lat , measurement = ' radians ' )
Plot filled density contours of poles to planes using a Kamb method
[2 ] _ with the density estimated on a 10x10 grid ( in long - lat space )
> > > strikes , dips = [ 120 , 315 , 86 ] , [ 22 , 85 , 31]
> > > ax . density _ contourf ( strikes , dips , method = ' kamb ' , gridsize = 10)
Plot filled density contours of poles to planes with contours at
[1,2,3 ] standard deviations .
> > > strikes , dips = [ 120 , 315 , 86 ] , [ 22 , 85 , 31]
> > > ax . density _ contourf ( strikes , dips , levels = [ 1,2,3 ] )
References
. . [ 1 ] Vollmer , 1995 . C Program for Automatic Contouring of Spherical
Orientation Data Using a Modified Kamb Method . Computers &
Geosciences , Vol . 21 , No . 1 , pp . 31 - - 49.
. . [ 2 ] Kamb , 1959 . Ice Petrofabric Observations from Blue Glacier ,
Washington , in Relation to Theory and Experiment . Journal of
Geophysical Research , Vol . 64 , No . 11 , pp . 1891 - - 1909."""
|
lon , lat , totals , kwargs = self . _contour_helper ( args , kwargs )
return self . contourf ( lon , lat , totals , ** kwargs )
|
def JoinPath ( self , path_segments ) :
"""Joins the path segments into a path .
Args :
path _ segments ( list [ str ] ) : path segments .
Returns :
str : joined path segments prefixed with the path separator ."""
|
# This is an optimized way to combine the path segments into a single path
# and combine multiple successive path separators to one .
# Split all the path segments based on the path ( segment ) separator .
path_segments = [ segment . split ( self . PATH_SEPARATOR ) for segment in path_segments ]
# Flatten the sublists into one list .
path_segments = [ element for sublist in path_segments for element in sublist ]
# Remove empty path segments .
path_segments = list ( filter ( None , path_segments ) )
return '{0:s}{1:s}' . format ( self . PATH_SEPARATOR , self . PATH_SEPARATOR . join ( path_segments ) )
|
def interpolate ( x , y , z , interp_type = 'linear' , hres = 50000 , minimum_neighbors = 3 , gamma = 0.25 , kappa_star = 5.052 , search_radius = None , rbf_func = 'linear' , rbf_smooth = 0 , boundary_coords = None ) :
"""Wrap interpolate _ to _ grid for deprecated interpolate function ."""
|
return interpolate_to_grid ( x , y , z , interp_type = interp_type , hres = hres , minimum_neighbors = minimum_neighbors , gamma = gamma , kappa_star = kappa_star , search_radius = search_radius , rbf_func = rbf_func , rbf_smooth = rbf_smooth , boundary_coords = boundary_coords )
|
def get_time_evolution ( self ) :
"""Get the function to append the time evolution of this term .
Returns :
function ( circuit : Circuit , t : float ) :
Add gates for time evolution to ` circuit ` with time ` t `"""
|
term = self . simplify ( )
coeff = term . coeff
if coeff . imag :
raise ValueError ( "Not a real coefficient." )
ops = term . ops
def append_to_circuit ( circuit , t ) :
if not ops :
return
for op in ops :
n = op . n
if op . op == "X" :
circuit . h [ n ]
elif op . op == "Y" :
circuit . rx ( - half_pi ) [ n ]
for i in range ( 1 , len ( ops ) ) :
circuit . cx [ ops [ i - 1 ] . n , ops [ i ] . n ]
circuit . rz ( - 2 * coeff * t ) [ ops [ - 1 ] . n ]
for i in range ( len ( ops ) - 1 , 0 , - 1 ) :
circuit . cx [ ops [ i - 1 ] . n , ops [ i ] . n ]
for op in ops :
n = op . n
if op . op == "X" :
circuit . h [ n ]
elif op . op == "Y" :
circuit . rx ( half_pi ) [ n ]
return append_to_circuit
|
def from_tuple ( cls , components : tuple ) -> 'ComponentRef' :
"""Convert the tuple pointing to a component to
a component reference .
: param components : tuple of components name
: return : ComponentRef"""
|
component_ref = ComponentRef ( name = components [ 0 ] , child = [ ] )
c = component_ref
for component in components [ 1 : ] :
c . child . append ( ComponentRef ( name = component , child = [ ] ) )
c = c . child [ 0 ]
return component_ref
|
def get_environmental_configuration ( self ) :
"""Gets the settings that describe the environmental configuration ( supported feature set , calibrated minimum &
maximum power , location & dimensions , . . . ) of the enclosure resource .
Returns :
Settings that describe the environmental configuration ."""
|
uri = '{}/environmentalConfiguration' . format ( self . data [ 'uri' ] )
return self . _helper . do_get ( uri )
|
def filter ( self , id ) :
"""Fetches information about the filter with the specified ` id ` .
Returns a ` filter dict ` _ ."""
|
id = self . __unpack_id ( id )
url = '/api/v1/filters/{0}' . format ( str ( id ) )
return self . __api_request ( 'GET' , url )
|
def call ( command , working_directory = config . BASE_DIR ) :
"""Executes shell command in a given working _ directory .
Command is a list of strings to execute as a command line .
Returns a tuple of two byte strings : ( stdout , stderr )"""
|
LOG . info ( command )
proc = sp . Popen ( command , stdout = sp . PIPE , stderr = sp . PIPE , cwd = working_directory , shell = True )
out , err = proc . communicate ( )
return ( out , err )
|
def save_itemgetter ( self , obj ) :
"""itemgetter serializer ( needed for namedtuple support )"""
|
class Dummy :
def __getitem__ ( self , item ) :
return item
items = obj ( Dummy ( ) )
if not isinstance ( items , tuple ) :
items = ( items , )
return self . save_reduce ( operator . itemgetter , items )
|
def set_logfile ( path , instance ) :
"""Specify logfile path"""
|
global logfile
logfile = os . path . normpath ( path ) + '/hfos.' + instance + '.log'
|
def _pip_list ( self , stdout , stderr , prefix = None ) :
"""Callback for ` pip _ list ` ."""
|
result = stdout
# A dict
linked = self . linked ( prefix )
pip_only = [ ]
linked_names = [ self . split_canonical_name ( l ) [ 0 ] for l in linked ]
for pkg in result :
name = self . split_canonical_name ( pkg ) [ 0 ]
if name not in linked_names :
pip_only . append ( pkg )
# FIXME : NEED A MORE ROBUST WAY !
# if ' < pip > ' in line and ' # ' not in line :
# temp = line . split ( ) [ : - 1 ] + [ ' pip ' ]
# temp = ' - ' . join ( temp )
# if ' - ( ' in temp :
# start = temp . find ( ' - ( ' )
# end = temp . find ( ' ) ' )
# substring = temp [ start : end + 1]
# temp = temp . replace ( substring , ' ' )
# result . append ( temp )
return pip_only
|
def _axes_style ( style = None , rc = None ) :
"""Return a parameter dict for the aesthetic style of the plots .
NOTE : This is an internal method from Seaborn that is simply used to
create a default aesthetic in yellowbrick . If you ' d like to use these
styles then import Seaborn !
This affects things like the color of the axes , whether a grid is
enabled by default , and other aesthetic elements .
This function returns an object that can be used in a ` ` with ` ` statement
to temporarily change the style parameters .
Parameters
style : dict , reset , or None
A dictionary of parameters or the name of a preconfigured set .
rc : dict , optional
Parameter mappings to override the values in the preset seaborn
style dictionaries . This only updates parameters that are
considered part of the style definition ."""
|
if isinstance ( style , dict ) :
style_dict = style
else : # Define colors here
dark_gray = ".15"
light_gray = ".8"
# Common parameters
style_dict = { "figure.facecolor" : "white" , "text.color" : dark_gray , "axes.labelcolor" : dark_gray , "legend.frameon" : False , "legend.numpoints" : 1 , "legend.scatterpoints" : 1 , "xtick.direction" : "out" , "ytick.direction" : "out" , "xtick.color" : dark_gray , "ytick.color" : dark_gray , "axes.axisbelow" : True , "image.cmap" : "Greys" , "font.family" : [ "sans-serif" ] , "font.sans-serif" : [ "Arial" , "Liberation Sans" , "Bitstream Vera Sans" , "sans-serif" ] , "grid.linestyle" : "-" , "axes.grid" : True , "lines.solid_capstyle" : "round" , "axes.facecolor" : "white" , "axes.edgecolor" : light_gray , "axes.linewidth" : 1.25 , "grid.color" : light_gray , "xtick.major.size" : 0 , "ytick.major.size" : 0 , "xtick.minor.size" : 0 , "ytick.minor.size" : 0 , }
# Override these settings with the provided rc dictionary
if rc is not None :
rc = { k : v for k , v in rc . items ( ) if k in _style_keys }
style_dict . update ( rc )
# Wrap in an _ AxesStyle object so this can be used in a with statement
style_object = _AxesStyle ( style_dict )
return style_object
|
def Matches ( self , file_entry ) :
"""Compares the file entry against the filter .
Args :
file _ entry ( dfvfs . FileEntry ) : file entry to compare .
Returns :
bool : True if the file entry matches the filter , False if not or
None if the filter does not apply ."""
|
location = getattr ( file_entry . path_spec , 'location' , None )
if not location :
return None
if '.' not in location :
return False
_ , _ , extension = location . rpartition ( '.' )
return extension . lower ( ) in self . _extensions
|
def _get_client ( ) :
"""Create a new client for the HNV REST API ."""
|
return utils . get_client ( url = CONFIG . HNV . url , username = CONFIG . HNV . username , password = CONFIG . HNV . password , allow_insecure = CONFIG . HNV . https_allow_insecure , ca_bundle = CONFIG . HNV . https_ca_bundle )
|
async def addFeedNodes ( self , name , items ) :
'''Call a feed function and return what it returns ( typically yields Node ( ) s ) .
Args :
name ( str ) : The name of the feed record type .
items ( list ) : A list of records of the given feed type .
Returns :
( object ) : The return value from the feed function . Typically Node ( ) generator .'''
|
func = self . core . getFeedFunc ( name )
if func is None :
raise s_exc . NoSuchName ( name = name )
logger . info ( f'adding feed nodes ({name}): {len(items)}' )
async for node in func ( self , items ) :
yield node
|
def is_refreshable_url ( self , request ) :
"""Takes a request and returns whether it triggers a refresh examination
: arg HttpRequest request :
: returns : boolean"""
|
# Do not attempt to refresh the session if the OIDC backend is not used
backend_session = request . session . get ( BACKEND_SESSION_KEY )
is_oidc_enabled = True
if backend_session :
auth_backend = import_string ( backend_session )
is_oidc_enabled = issubclass ( auth_backend , OIDCAuthenticationBackend )
return ( request . method == 'GET' and is_authenticated ( request . user ) and is_oidc_enabled and request . path not in self . exempt_urls )
|
def add_maildir ( self , maildir_path ) :
"""Load up a maildir and compute hash for each mail found ."""
|
maildir_path = self . canonical_path ( maildir_path )
logger . info ( "Opening maildir at {} ..." . format ( maildir_path ) )
# Maildir parser requires a string , not a unicode , as path .
maildir = Maildir ( str ( maildir_path ) , factory = None , create = False )
# Group folders by hash .
logger . info ( "{} mails found." . format ( len ( maildir ) ) )
if self . conf . progress :
bar = ProgressBar ( widgets = [ Percentage ( ) , Bar ( ) ] , max_value = len ( maildir ) , redirect_stderr = True , redirect_stdout = True )
else :
def bar ( x ) :
return x
for mail_id in bar ( maildir . iterkeys ( ) ) :
self . stats [ 'mail_found' ] += 1
mail_path = self . canonical_path ( os . path . join ( maildir . _path , maildir . _lookup ( mail_id ) ) )
mail = Mail ( mail_path , self . conf )
try :
mail_hash = mail . hash_key
except ( InsufficientHeadersError , MissingMessageID ) as expt :
logger . warning ( "Rejecting {}: {}" . format ( mail_path , expt . args [ 0 ] ) )
self . stats [ 'mail_rejected' ] += 1
else :
logger . debug ( "Hash is {} for mail {!r}." . format ( mail_hash , mail_id ) )
# Use a set to deduplicate entries pointing to the same file .
self . mails . setdefault ( mail_hash , set ( ) ) . add ( mail_path )
self . stats [ 'mail_kept' ] += 1
|
def write_to_file ( self , path , filename , footer = True ) :
"""Class method responsible for generating a file containing the notebook object data .
Parameters
path : str
OpenSignalsTools Root folder path ( where the notebook will be stored ) .
filename : str
Defines the name of the notebook file .
footer : bool
Flag that defines when the footer needs to be included in the Notebook ."""
|
# = = = = = Storage of Filename = = = = =
self . filename = filename
# = = = = = Inclusion of Footer in the Notebook = = = = =
if footer is True :
_generate_footer ( self . notebook , self . notebook_type )
# = = = = = Code segment for application of the OpenSignalsTools CSS style = = = = =
self . notebook [ "cells" ] . append ( nb . v4 . new_markdown_cell ( AUX_CODE_MESSAGE , ** { "metadata" : { "tags" : [ "hide_mark" ] } } ) )
self . notebook [ "cells" ] . append ( nb . v4 . new_code_cell ( CSS_STYLE_CODE , ** { "metadata" : { "tags" : [ "hide_both" ] } } ) )
self . notebook [ "cells" ] . append ( nb . v4 . new_code_cell ( JS_CODE_AUTO_PLAY , ** { "metadata" : { "tags" : [ "hide_both" ] } } ) )
full_path = path + "\\Categories\\" + self . notebook_type + "\\" + filename + ".ipynb"
nb . write ( self . notebook , full_path )
# = = = = = Run Notebook Code Instructions = = = = =
os . system ( "jupyter nbconvert --execute --inplace --ExecutePreprocessor.timeout=-1 " + full_path )
os . system ( "jupyter trust " + full_path )
|
def _init_lazy_fields ( self ) :
"""Member data that gets loaded or constructed on demand"""
|
self . gtf_path = None
self . _protein_sequences = None
self . _transcript_sequences = None
self . _db = None
self . protein_fasta_paths = None
self . transcript_fasta_paths = None
# only memoizing the Gene , Transcript , and Exon objects
self . _genes = { }
self . _transcripts = { }
self . _exons = { }
|
def _partially_evaluate ( self , addr , simplify = False ) :
"""Return part of the lazy array ."""
|
if self . is_homogeneous :
if simplify :
base_val = self . base_value
else :
base_val = self . _homogeneous_array ( addr ) * self . base_value
elif isinstance ( self . base_value , ( int , long , numpy . integer , float , bool ) ) :
base_val = self . _homogeneous_array ( addr ) * self . base_value
elif isinstance ( self . base_value , numpy . ndarray ) :
base_val = self . base_value [ addr ]
elif have_scipy and sparse . issparse ( self . base_value ) : # For sparse matrices larr [ 2 , : ]
base_val = self . base_value [ addr ]
elif callable ( self . base_value ) :
indices = self . _array_indices ( addr )
base_val = self . base_value ( * indices )
if isinstance ( base_val , numpy . ndarray ) and base_val . shape == ( 1 , ) :
base_val = base_val [ 0 ]
elif hasattr ( self . base_value , "lazily_evaluate" ) :
base_val = self . base_value . lazily_evaluate ( addr , shape = self . _shape )
elif isinstance ( self . base_value , VectorizedIterable ) :
partial_shape = self . _partial_shape ( addr )
if partial_shape :
n = reduce ( operator . mul , partial_shape )
else :
n = 1
base_val = self . base_value . next ( n )
# note that the array contents will depend on the order of access to elements
if n == 1 :
base_val = base_val [ 0 ]
elif partial_shape and base_val . shape != partial_shape :
base_val = base_val . reshape ( partial_shape )
elif isinstance ( self . base_value , collections . Iterator ) :
raise NotImplementedError ( "coming soon..." )
else :
raise ValueError ( "invalid base value for array (%s)" % self . base_value )
return self . _apply_operations ( base_val , addr , simplify = simplify )
|
def pages ( self ) :
"""The aggregate pages of all the parser objects ."""
|
pages = [ ]
for har_dict in self . har_data :
har_parser = HarParser ( har_data = har_dict )
if self . page_id :
for page in har_parser . pages :
if page . page_id == self . page_id :
pages . append ( page )
else :
pages = pages + har_parser . pages
return pages
|
def format_norm ( kwargs , current = None ) :
"""Format a ` ~ matplotlib . colors . Normalize ` from a set of kwargs
Returns
norm , kwargs
the formatted ` Normalize ` instance , and the remaining keywords"""
|
norm = kwargs . pop ( 'norm' , current ) or 'linear'
vmin = kwargs . pop ( 'vmin' , None )
vmax = kwargs . pop ( 'vmax' , None )
clim = kwargs . pop ( 'clim' , ( vmin , vmax ) ) or ( None , None )
clip = kwargs . pop ( 'clip' , None )
if norm == 'linear' :
norm = colors . Normalize ( )
elif norm == 'log' :
norm = colors . LogNorm ( )
elif not isinstance ( norm , colors . Normalize ) :
raise ValueError ( "unrecognised value for norm {!r}" . format ( norm ) )
for attr , value in ( ( 'vmin' , clim [ 0 ] ) , ( 'vmax' , clim [ 1 ] ) , ( 'clip' , clip ) ) :
if value is not None :
setattr ( norm , attr , value )
return norm , kwargs
|
def open ( self ) :
"""Open the window ."""
|
self . _window = Window ( caption = self . caption , height = self . height , width = self . width , vsync = False , resizable = True , )
|
def square_root ( n , epsilon = 0.001 ) :
"""Return square root of n , with maximum absolute error epsilon"""
|
guess = n / 2
while abs ( guess * guess - n ) > epsilon :
guess = ( guess + ( n / guess ) ) / 2
return guess
|
def __get_valid_form_data_elements ( self , soup ) :
"""Get all valid form input elements .
Note :
An element is valid when the value can be updated client - side
and the element has a name attribute .
Args :
soup ( obj ) : The BeautifulSoup form .
Returns :
list ( obj ) : Soup elements ."""
|
elements = [ ]
for element in soup . find_all ( [ "input" , "button" , "textarea" , "select" ] ) :
if element . has_attr ( "name" ) :
elements . append ( element )
return elements
|
def refresh_console ( self , console : tcod . console . Console ) -> None :
"""Update an Image created with : any : ` tcod . image _ from _ console ` .
The console used with this function should have the same width and
height as the Console given to : any : ` tcod . image _ from _ console ` .
The font width and height must also be the same as when
: any : ` tcod . image _ from _ console ` was called .
Args :
console ( Console ) : A Console with a pixel width and height
matching this Image ."""
|
lib . TCOD_image_refresh_console ( self . image_c , _console ( console ) )
|
def _post_login_page ( self ) :
"""Login to Janrain ."""
|
# Prepare post data
data = { "form" : "signInForm" , "client_id" : JANRAIN_CLIENT_ID , "redirect_uri" : "https://www.fido.ca/pages/#/" , "response_type" : "token" , "locale" : "en-US" , "userID" : self . username , "currentPassword" : self . password , }
# HTTP request
try :
raw_res = yield from self . _session . post ( LOGIN_URL , headers = self . _headers , data = data , timeout = self . _timeout )
except OSError :
raise PyFidoError ( "Can not sign in" )
return True
|
def place_bid ( self , owner_id , bid ) :
"""Submits a bid for this tick for current player . This is not a guarantee that it will be accepted !
If other players submit a higher bid this same tick , the bid won ' t be counted . Try again next tick if it ' s not
too high !
: param int owner _ id : id of owner who is submitting the bid
: param int bid : bid amount
: raise InvalidActionError : if the action is not allowed according to the rules . See the error message
for details ."""
|
if self . state != AuctionState . BID :
raise InvalidActionError ( "Bid was attempted, but it is not currently time to submit bids." )
elif self . bid >= bid :
raise InvalidActionError ( "Bid amount " + str ( bid ) + " must be greater than current bid of " + str ( self . bid ) )
elif not self . owners [ owner_id ] . can_buy ( self . nominee , bid ) :
raise InvalidActionError ( "The owner with index " + str ( owner_id ) + " cannot afford a bid of " + str ( bid ) + " for player " + self . nominee . name + " or cannot actually buy this player (due to " "not having any free slots)" )
# success , add their bid to the current tick
self . tickbids [ owner_id ] = bid
|
def fit_transform ( self , X , y , step_size = 0.1 , init_weights = None , warm_start = False ) :
"""Fit optimizer to X , then transforms X . See ` fit ` and ` transform ` for further explanation ."""
|
self . fit ( X = X , y = y , step_size = step_size , init_weights = init_weights , warm_start = warm_start )
return self . transform ( X = X )
|
def _ParseEntryObjectOffsets ( self , file_object , file_offset ) :
"""Parses entry array objects for the offset of the entry objects .
Args :
file _ object ( dfvfs . FileIO ) : a file - like object .
file _ offset ( int ) : offset of the first entry array object relative to
the start of the file - like object .
Returns :
list [ int ] : offsets of the entry objects ."""
|
entry_array_object = self . _ParseEntryArrayObject ( file_object , file_offset )
entry_object_offsets = list ( entry_array_object . entry_object_offsets )
while entry_array_object . next_entry_array_offset != 0 :
entry_array_object = self . _ParseEntryArrayObject ( file_object , entry_array_object . next_entry_array_offset )
entry_object_offsets . extend ( entry_array_object . entry_object_offsets )
return entry_object_offsets
|
def camelcase_to_underscores ( argument ) :
'''Converts a camelcase param like theNewAttribute to the equivalent
python underscore variable like the _ new _ attribute'''
|
result = ''
prev_char_title = True
if not argument :
return argument
for index , char in enumerate ( argument ) :
try :
next_char_title = argument [ index + 1 ] . istitle ( )
except IndexError :
next_char_title = True
upper_to_lower = char . istitle ( ) and not next_char_title
lower_to_upper = char . istitle ( ) and not prev_char_title
if index and ( upper_to_lower or lower_to_upper ) : # Only add underscore if char is capital , not first letter , and next
# char is not capital
result += "_"
prev_char_title = char . istitle ( )
if not char . isspace ( ) : # Only add non - whitespace
result += char . lower ( )
return result
|
def compile_bundle_entry ( self , spec , entry ) :
"""Handler for each entry for the bundle method of the compile
process . This copies the source file or directory into the
build directory ."""
|
modname , source , target , modpath = entry
bundled_modpath = { modname : modpath }
bundled_target = { modname : target }
export_module_name = [ ]
if isfile ( source ) :
export_module_name . append ( modname )
copy_target = join ( spec [ BUILD_DIR ] , target )
if not exists ( dirname ( copy_target ) ) :
makedirs ( dirname ( copy_target ) )
shutil . copy ( source , copy_target )
elif isdir ( source ) :
copy_target = join ( spec [ BUILD_DIR ] , modname )
shutil . copytree ( source , copy_target )
return bundled_modpath , bundled_target , export_module_name
|
def _parse_metadata ( self , message ) :
'''Parse incoming messages to build metadata dict
Lots of ' if ' statements . It sucks , I know .
Args :
message ( dict ) : JSON dump of message sent from Slack
Returns :
Legobot . Metadata'''
|
# Try to handle all the fields of events we care about .
metadata = Metadata ( source = self . actor_urn ) . __dict__
metadata [ 'thread_ts' ] = message . get ( 'thread_ts' )
if 'presence' in message :
metadata [ 'presence' ] = message [ 'presence' ]
if 'text' in message :
metadata [ 'text' ] = message [ 'text' ]
elif 'previous_message' in message : # Try to handle slack links
if 'text' in message [ 'previous_message' ] :
metadata [ 'text' ] = message [ 'previous_message' ] [ 'text' ]
else :
metadata [ 'text' ] = None
else :
metadata [ 'text' ] = None
if 'user' in message :
metadata [ 'source_user' ] = message [ 'user' ]
elif 'bot_id' in message :
metadata [ 'source_user' ] = self . get_userid_from_botid ( message [ 'bot_id' ] )
elif 'message' in message and 'user' in message [ 'message' ] :
metadata [ 'source_user' ] = message [ 'message' ] [ 'user' ]
else :
metadata [ 'source_user' ] = None
metadata [ 'user_id' ] = metadata [ 'source_user' ]
metadata [ 'display_name' ] = self . get_username ( metadata [ 'source_user' ] )
if 'channel' in message :
metadata [ 'source_channel' ] = message [ 'channel' ]
# Slack starts DM channel IDs with " D "
if message [ 'channel' ] . startswith ( 'D' ) :
metadata [ 'is_private_message' ] = True
else :
metadata [ 'is_private_message' ] = False
metadata [ 'source_connector' ] = 'slack'
return metadata
|
def summarize ( text : str , n : int , engine : str = "frequency" , tokenizer : str = "newmm" ) -> List [ str ] :
"""Thai text summarization
: param str text : text to be summarized
: param int n : number of sentences to be included in the summary
: param str engine : text summarization engine
: param str tokenizer : word tokenizer
: return List [ str ] summary : list of selected sentences"""
|
sents = [ ]
if engine == "frequency" :
sents = FrequencySummarizer ( ) . summarize ( text , n , tokenizer )
else : # if engine not found , return first n sentences
sents = sent_tokenize ( text ) [ : n ]
return sents
|
def gotoNext ( self ) :
"""Goes to the next date based on the current mode and date ."""
|
scene = self . scene ( )
date = scene . currentDate ( )
# go forward a day
if ( scene . currentMode ( ) == scene . Mode . Day ) :
scene . setCurrentDate ( date . addDays ( 1 ) )
# go forward a week
elif ( scene . currentMode ( ) == scene . Mode . Week ) :
scene . setCurrentDate ( date . addDays ( 7 ) )
# go forward a month
elif ( scene . currentMode ( ) == scene . Mode . Month ) :
scene . setCurrentDate ( date . addMonths ( 1 ) )
|
def set_temperature ( self , temperature , rate , delay = 1 ) :
"""Performs a temperature scan .
Measures until the target temperature is reached .
: param measure : A callable called repeatedly until stability at target
temperature is reached .
: param temperature : The target temperature in kelvin .
: param rate : The sweep rate in kelvin per minute .
: param delay : The time delay between each call to measure in seconds ."""
|
self . scan_temperature ( lambda : None , temperature , rate , delay )
|
def geometry ( self , geometry ) :
"""sets the geometry value"""
|
if isinstance ( geometry , AbstractGeometry ) :
self . _geomObject = geometry
self . _geomType = geometry . type
elif arcpyFound :
wkid = None
wkt = None
if ( hasattr ( geometry , 'spatialReference' ) and geometry . spatialReference is not None ) :
if ( hasattr ( geometry . spatialReference , 'factoryCode' ) and geometry . spatialReference . factoryCode is not None ) :
wkid = geometry . spatialReference . factoryCode
else :
wkt = geometry . spatialReference . exportToString ( )
if isinstance ( geometry , arcpy . Polygon ) :
self . _geomObject = Polygon ( geometry , wkid = wkid , wkt = wkt )
self . _geomType = "esriGeometryPolygon"
elif isinstance ( geometry , arcpy . Point ) :
self . _geomObject = Point ( geometry , wkid = wkid , wkt = wkt )
self . _geomType = "esriGeometryPoint"
elif isinstance ( geometry , arcpy . Polyline ) :
self . _geomObject = Polyline ( geometry , wkid = wkid , wkt = wkt )
self . _geomType = "esriGeometryPolyline"
elif isinstance ( geometry , arcpy . Multipoint ) :
self . _geomObject = MultiPoint ( geometry , wkid = wkid , wkt = wkt )
self . _geomType = "esriGeometryMultipoint"
else :
raise AttributeError ( "geometry must be a common.Geometry or arcpy.Geometry type." )
else :
raise AttributeError ( "geometry must be a common.Geometry or arcpy.Geometry type." )
|
def get_inputs ( node , kwargs ) :
"""Helper function to get inputs"""
|
name = node [ "name" ]
proc_nodes = kwargs [ "proc_nodes" ]
index_lookup = kwargs [ "index_lookup" ]
inputs = node [ "inputs" ]
attrs = node . get ( "attrs" , { } )
input_nodes = [ ]
for ip in inputs :
input_node_id = index_lookup [ ip [ 0 ] ]
input_nodes . append ( proc_nodes [ input_node_id ] . name )
return name , input_nodes , attrs
|
def transition_run ( self , pipeline_key , blocking_slot_keys = None , fanned_out_pipelines = None , pipelines_to_run = None ) :
"""Marks an asynchronous or generator pipeline as running .
Does nothing if the pipeline is no longer in a runnable state .
Args :
pipeline _ key : The db . Key of the _ PipelineRecord to update .
blocking _ slot _ keys : List of db . Key instances that this pipeline ' s
finalization barrier should wait on in addition to the existing one .
This is used to update the barrier to include all child outputs . When
None , the barrier will not be updated .
fanned _ out _ pipelines : List of db . Key instances of _ PipelineRecords that
were fanned out by this generator pipeline . This is distinct from the
' pipelines _ to _ run ' list because not all of the pipelines listed here
will be immediately ready to execute . When None , then this generator
yielded no children .
pipelines _ to _ run : List of db . Key instances of _ PipelineRecords that should
be kicked off ( fan - out ) transactionally as part of this transition .
When None , no child pipelines will run . All db . Keys in this list must
also be present in the fanned _ out _ pipelines list .
Raises :
UnexpectedPipelineError if blocking _ slot _ keys was not empty and the
_ BarrierRecord has gone missing ."""
|
def txn ( ) :
pipeline_record = db . get ( pipeline_key )
if pipeline_record is None :
logging . warning ( 'Pipeline ID "%s" cannot be marked as run. ' 'Does not exist.' , pipeline_key . name ( ) )
raise db . Rollback ( )
if pipeline_record . status != _PipelineRecord . WAITING :
logging . warning ( 'Pipeline ID "%s" in bad state to be marked as run: %s' , pipeline_key . name ( ) , pipeline_record . status )
raise db . Rollback ( )
pipeline_record . status = _PipelineRecord . RUN
if fanned_out_pipelines : # NOTE : We must model the pipeline relationship in a top - down manner ,
# meaning each pipeline must point forward to the pipelines that it
# fanned out to . The reason is race conditions . If evaluate ( )
# dies early , it may create many unused _ PipelineRecord and _ SlotRecord
# instances that never progress . The only way we know which of these
# are valid is by traversing the graph from the root , where the
# fanned _ out property refers to those pipelines that were run using a
# transactional task .
child_pipeline_list = list ( fanned_out_pipelines )
pipeline_record . fanned_out = child_pipeline_list
if pipelines_to_run :
child_indexes = [ child_pipeline_list . index ( p ) for p in pipelines_to_run ]
child_indexes . sort ( )
task = taskqueue . Task ( url = self . fanout_handler_path , params = dict ( parent_key = str ( pipeline_key ) , child_indexes = child_indexes ) )
task . add ( queue_name = self . queue_name , transactional = True )
pipeline_record . put ( )
if blocking_slot_keys : # NOTE : Always update a generator pipeline ' s finalization barrier to
# include all of the outputs of any pipelines that it runs , to ensure
# that finalized calls will not happen until all child pipelines have
# completed . This must happen transactionally with the enqueue of
# the fan - out kickoff task above to ensure the child output slots and
# the barrier blocking slots are the same .
barrier_key = db . Key . from_path ( _BarrierRecord . kind ( ) , _BarrierRecord . FINALIZE , parent = pipeline_key )
finalize_barrier = db . get ( barrier_key )
if finalize_barrier is None :
raise UnexpectedPipelineError ( 'Pipeline ID "%s" cannot update finalize barrier. ' 'Does not exist.' % pipeline_key . name ( ) )
else :
finalize_barrier . blocking_slots = list ( blocking_slot_keys . union ( set ( finalize_barrier . blocking_slots ) ) )
finalize_barrier . put ( )
db . run_in_transaction ( txn )
|
def target_sdp_state ( self , state ) :
"""Update the target state of SDP ."""
|
LOG . info ( 'Setting SDP target state to %s' , state )
if self . _sdp_state . current_state == state :
LOG . info ( 'Target state ignored, SDP is already "%s"!' , state )
if state == 'on' :
self . set_state ( DevState . ON )
if state == 'off' :
self . set_state ( DevState . OFF )
if state == 'standby' :
self . set_state ( DevState . STANDBY )
if state == 'disable' :
self . set_state ( DevState . DISABLE )
self . _sdp_state . update_target_state ( state )
|
def _transform_snapshot ( raw_snapshot : str , storage : SQLiteStorage , cache : BlockHashCache , ) -> str :
"""Upgrades a single snapshot by adding the blockhash to it and to any pending transactions"""
|
snapshot = json . loads ( raw_snapshot )
block_number = int ( snapshot [ 'block_number' ] )
snapshot [ 'block_hash' ] = cache . get ( block_number )
pending_transactions = snapshot [ 'pending_transactions' ]
new_pending_transactions = [ ]
for transaction_data in pending_transactions :
if 'raiden.transfer.events.ContractSend' not in transaction_data [ '_type' ] :
raise InvalidDBData ( "Error during v18 -> v19 upgrade. Chain state's pending transactions " "should only contain ContractSend transactions" , )
# For each pending transaction find the corresponding DB event record .
event_record = storage . get_latest_event_by_data_field ( filters = transaction_data , )
if not event_record . data :
raise InvalidDBData ( 'Error during v18 -> v19 upgrade. Could not find a database event ' 'table entry for a pending transaction.' , )
event_record_data = json . loads ( event_record . data )
transaction_data [ 'triggered_by_block_hash' ] = event_record_data [ 'triggered_by_block_hash' ]
new_pending_transactions . append ( transaction_data )
snapshot [ 'pending_transactions' ] = new_pending_transactions
return json . dumps ( snapshot )
|
def _exclusion_indices_for_range ( self , start_idx , end_idx ) :
"""Returns
List of tuples of ( start , stop ) which represent the ranges of minutes
which should be excluded when a market minute window is requested ."""
|
itree = self . _minute_exclusion_tree
if itree . overlaps ( start_idx , end_idx ) :
ranges = [ ]
intervals = itree [ start_idx : end_idx ]
for interval in intervals :
ranges . append ( interval . data )
return sorted ( ranges )
else :
return None
|
def find ( self , soup ) :
'''Yield tags matching the tag criterion from a soup .
There is no need to override this if you are satisfied with finding
tags that match match _ criterion .
Args :
soup : A BeautifulSoup to search through .
Yields :
BeautifulSoup Tags that match the criterion .'''
|
for tag in soup . recursiveChildGenerator ( ) :
if self . match_criterion ( tag ) :
yield tag
|
def pretty_render ( data , format = 'text' , indent = 0 ) :
"""Render a dict based on a format"""
|
if format == 'json' :
return render_json ( data )
elif format == 'html' :
return render_html ( data )
elif format == 'xml' :
return render_xml ( data )
else :
return dict_to_plaintext ( data , indent = indent )
|
def make_zoho_blueprint ( client_id = None , client_secret = None , scope = None , redirect_url = None , offline = False , redirect_to = None , login_url = None , session_class = None , storage = None , reprompt_consent = False , ) :
"""Make a blueprint for authenticating with Zoho using OAuth 2 . This requires
a client ID and client secret from Zoho . You should either pass them to
this constructor , or make sure that your Flask application config defines
them , using the variables : envvar : ` ZOHO _ OAUTH _ CLIENT _ ID ` and
: envvar : ` ZOHO _ OAUTH _ CLIENT _ SECRET ` .
IMPORTANT : Configuring the base _ url is not supported in this config .
Args :
client _ id ( str ) : The client ID for your application on Zoho .
client _ secret ( str ) : The client secret for your application on Zoho
scope ( list , optional ) : list of scopes ( str ) for the OAuth token
redirect _ url ( str ) : the URL to redirect to after the authentication
dance is complete
redirect _ to ( str ) : if ` ` redirect _ url ` ` is not defined , the name of the
view to redirect to after the authentication dance is complete .
The actual URL will be determined by : func : ` flask . url _ for `
login _ url ( str , optional ) : the URL path for the ` ` login ` ` view .
Defaults to ` ` / zoho ` `
authorized _ url ( str , optional ) : the URL path for the ` ` authorized ` ` view .
Defaults to ` ` / zoho / authorized ` ` .
session _ class ( class , optional ) : The class to use for creating a
Requests session . Defaults to
: class : ` ~ flask _ dance . consumer . requests . OAuth2Session ` .
storage : A token storage class , or an instance of a token storage
class , to use for this blueprint . Defaults to
: class : ` ~ flask _ dance . consumer . storage . session . SessionStorage ` .
offline ( bool ) : Whether to request ` offline access `
for the OAuth token . Defaults to False
reprompt _ consent ( bool ) : If True , force Zoho to re - prompt the user
for their consent , even if the user has already given their
consent . Defaults to False
: rtype : : class : ` ~ flask _ dance . consumer . OAuth2ConsumerBlueprint `
: returns : A : ref : ` blueprint < flask : blueprints > ` to attach to your Flask app ."""
|
scope = scope or [ "ZohoCRM.users.all" ]
base_url = "https://www.zohoapis.com/"
client = ZohoWebClient ( client_id , token_type = ZOHO_TOKEN_HEADER )
authorization_url_params = { }
authorization_url_params [ "access_type" ] = "offline" if offline else "online"
if reprompt_consent :
authorization_url_params [ "prompt" ] = "consent"
zoho_bp = OAuth2ConsumerBlueprint ( "zoho" , __name__ , client_id = client_id , client_secret = client_secret , client = client , scope = scope , base_url = base_url , token_url = "https://accounts.zoho.com/oauth/v2/token" , authorization_url = "https://accounts.zoho.com/oauth/v2/auth" , authorization_url_params = authorization_url_params , redirect_url = redirect_url , redirect_to = redirect_to , login_url = login_url , session_class = session_class , storage = storage , )
zoho_bp . from_config [ "client_id" ] = "ZOHO_OAUTH_CLIENT_ID"
zoho_bp . from_config [ "client_secret" ] = "ZOHO_OAUTH_CLIENT_SECRET"
@ zoho_bp . before_app_request
def set_applocal_session ( ) :
ctx = stack . top
ctx . zoho_oauth = zoho_bp . session
return zoho_bp
|
def setup ( console = False , port = None , menu = True ) :
"""Setup integration
Registers Pyblish for Maya plug - ins and appends an item to the File - menu
Arguments :
console ( bool ) : Display console with GUI
port ( int , optional ) : Port from which to start looking for an
available port to connect with Pyblish QML , default
provided by Pyblish Integration ."""
|
if self . _has_been_setup :
teardown ( )
register_plugins ( )
register_host ( )
if menu :
add_to_filemenu ( )
self . _has_menu = True
self . _has_been_setup = True
print ( "pyblish: Loaded successfully." )
|
def update ( self , resource , timeout = - 1 ) :
"""Updates a Logical Switch .
Args :
resource ( dict ) : Object to update .
timeout :
Timeout in seconds . Wait for task completion by default . The timeout does not abort the operation
in OneView , just stop waiting for its completion .
Returns :
dict : Updated resource ."""
|
self . __set_default_values ( resource )
uri = self . _client . build_uri ( resource [ 'logicalSwitch' ] [ 'uri' ] )
return self . _client . update ( resource , uri = uri , timeout = timeout )
|
def pretty_json ( obj ) :
"""Print JSON with indentation and colours
: param obj : the object to print - can be a dict or a string"""
|
if isinstance ( obj , string_types ) :
try :
obj = json . loads ( obj )
except ValueError :
raise ClientException ( "`obj` is not a json string" )
json_str = json . dumps ( obj , sort_keys = True , indent = 2 )
print ( highlight ( json_str , JsonLexer ( ) , TerminalFormatter ( ) ) )
|
def is_insecure_platform ( ) :
"""Checks if the current system is missing an SSLContext object"""
|
v = sys . version_info
if v . major == 3 :
return False
# Python 2 issue
if v . major == 2 and v . minor == 7 and v . micro >= 9 :
return False
# > = 2.7.9 includes the new SSL updates
try :
import OpenSSL
# noqa
import ndg
# noqa
import pyasn1
# noqa
except ImportError :
pass
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.