signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def disconnect ( self ) :
"""This method disconnects an IOM session to allow for reconnecting when switching networks""" | if not self . sascfg . reconnect :
return "Disconnecting and then reconnecting to this workspaceserver has been disabled. Did not disconnect"
pgm = b'\n' + b'tom says EOL=DISCONNECT \n'
self . stdin [ 0 ] . send ( pgm )
while True :
try :
log = self . stderr [ 0 ] . recv ( 4096 ) . decode ( errors = 'replace' )
except ( BlockingIOError ) :
log = b''
if len ( log ) > 0 :
if log . count ( "DISCONNECT" ) >= 1 :
break
return log . rstrip ( "DISCONNECT" ) |
def _variants_fields ( fields , exclude_fields , info_ids ) :
"""Utility function to determine which fields to extract when loading
variants .""" | if fields is None : # no fields specified by user
# by default extract all standard and INFO fields
fields = config . STANDARD_VARIANT_FIELDS + info_ids
else : # fields have been specified
for f in fields : # check for non - standard fields not declared in INFO header
if f not in config . STANDARD_VARIANT_FIELDS and f not in info_ids : # support extracting INFO even if not declared in header ,
# but warn . . .
print ( 'WARNING: no INFO definition found for field %s' % f , file = sys . stderr )
# process any exclusions
if exclude_fields is not None :
fields = [ f for f in fields if f not in exclude_fields ]
return tuple ( f for f in fields ) |
def kogge_stone ( a , b , cin = 0 ) :
"""Creates a Kogge - Stone adder given two inputs
: param WireVector a , b : The two WireVectors to add up ( bitwidths don ' t need to match )
: param cin : An optimal carry in WireVector or value
: return : a Wirevector representing the output of the adder
The Kogge - Stone adder is a fast tree - based adder with O ( log ( n ) )
propagation delay , useful for performance critical designs . However ,
it has O ( n log ( n ) ) area usage , and large fan out .""" | a , b = libutils . match_bitwidth ( a , b )
prop_orig = a ^ b
prop_bits = [ i for i in prop_orig ]
gen_bits = [ i for i in a & b ]
prop_dist = 1
# creation of the carry calculation
while prop_dist < len ( a ) :
for i in reversed ( range ( prop_dist , len ( a ) ) ) :
prop_old = prop_bits [ i ]
gen_bits [ i ] = gen_bits [ i ] | ( prop_old & gen_bits [ i - prop_dist ] )
if i >= prop_dist * 2 : # to prevent creating unnecessary nets and wires
prop_bits [ i ] = prop_old & prop_bits [ i - prop_dist ]
prop_dist *= 2
# assembling the result of the addition
# preparing the cin ( and conveniently shifting the gen bits )
gen_bits . insert ( 0 , pyrtl . as_wires ( cin ) )
return pyrtl . concat_list ( gen_bits ) ^ prop_orig |
def transpose ( self , * args , ** kwargs ) :
"""Transposes this DataManager .
Returns :
Transposed new DataManager .""" | new_data = self . data . transpose ( * args , ** kwargs )
# Switch the index and columns and transpose the
new_manager = self . __constructor__ ( new_data , self . columns , self . index )
# It is possible that this is already transposed
new_manager . _is_transposed = self . _is_transposed ^ 1
return new_manager |
def insert_or_append ( parent , node , next_sibling ) :
"""Insert the node before next _ sibling . If next _ sibling is None , append the node last instead .""" | # simple insert
if next_sibling :
parent . insertBefore ( node , next_sibling )
else :
parent . appendChild ( node ) |
def _get_default_letters ( model_admin = None ) :
"""Returns the set of letters defined in the configuration variable
DEFAULT _ ALPHABET . DEFAULT _ ALPHABET can be a callable , string , tuple , or
list and returns a set .
If a ModelAdmin class is passed , it will look for a DEFAULT _ ALPHABET
attribute and use it instead .""" | from django . conf import settings
import string
default_ltrs = string . digits + string . ascii_uppercase
default_letters = getattr ( settings , 'DEFAULT_ALPHABET' , default_ltrs )
if model_admin and hasattr ( model_admin , 'DEFAULT_ALPHABET' ) :
default_letters = model_admin . DEFAULT_ALPHABET
if callable ( default_letters ) :
return set ( default_letters ( ) )
elif isinstance ( default_letters , str ) :
return set ( [ x for x in default_letters ] )
elif isinstance ( default_letters , str ) :
return set ( [ x for x in default_letters . decode ( 'utf8' ) ] )
elif isinstance ( default_letters , ( tuple , list ) ) :
return set ( default_letters ) |
def add_callback ( self , callback , msg_type = None ) :
"""Add per message type or global callback .
Parameters
callback : fn
Callback function
msg _ type : int | iterable
Message type to register callback against . Default ` None ` means global callback .
Iterable type adds the callback to all the message types .""" | cb_keys = self . _to_iter ( msg_type )
if cb_keys is not None :
for msg_type_ in cb_keys :
self . _callbacks [ msg_type_ ] . add ( callback )
else :
self . _callbacks [ msg_type ] . add ( callback ) |
def grant_member ( context , request ) :
"""Grant member roles in the group .""" | mapping = request . json [ 'mapping' ]
for entry in mapping :
user = entry [ 'user' ]
roles = entry [ 'roles' ]
username = user . get ( 'username' , None )
userid = user . get ( 'userid' , None )
if userid :
u = context . get_user_by_userid ( userid )
elif username :
u = context . get_user_by_username ( username )
else :
u = None
if u is None :
raise UnprocessableError ( 'User %s does not exists' % ( userid or username ) )
for rolename in roles :
context . grant_member_role ( u . userid , rolename )
return { 'status' : 'success' } |
def _includes_base_class ( self , iter_classes , base_class ) :
"""Returns whether any class in iter _ class is a subclass of the given base _ class .""" | return any ( issubclass ( auth_class , base_class ) for auth_class in iter_classes , ) |
def main ( argString = None ) :
"""The main function .
The purpose of this module is to plot Eigenvectors provided by the
Eigensoft software .
Here are the steps of this module :
1 . Reads the Eigenvector ( : py : func : ` read _ eigenvalues ` ) .
2 . Plots the Scree Plot ( : py : func : ` create _ scree _ plot ` ) .""" | # Getting and checking the options
args = parse_args ( argString )
check_args ( args )
# Reads the eigenvalues
eigenvalues = read_eigenvalues ( args . evec )
# Creates the plot
create_scree_plot ( eigenvalues , args . out , args ) |
def get_state ( self , site ) :
"""Read client status file and return dict .""" | parser = ConfigParser ( )
status_section = 'incremental'
parser . read ( self . status_file )
timestamp = None
try :
timestamp = float ( parser . get ( status_section , self . config_site_to_name ( site ) ) )
except NoSectionError as e :
pass
except NoOptionError as e :
pass
return ( timestamp ) |
def format_commands ( self , ctx , formatter ) :
"""Extra format methods for multi methods that adds all the commands
after the options .""" | commands = [ ]
for subcommand in self . list_commands ( ctx ) :
cmd = self . get_command ( ctx , subcommand )
# What is this , the tool lied about a command . Ignore it
if cmd is None :
continue
if cmd . hidden :
continue
commands . append ( ( subcommand , cmd ) )
# allow for 3 times the default spacing
if len ( commands ) :
limit = formatter . width - 6 - max ( len ( cmd [ 0 ] ) for cmd in commands )
rows = [ ]
for subcommand , cmd in commands :
help = cmd . get_short_help_str ( limit )
rows . append ( ( subcommand , help ) )
if rows :
with formatter . section ( 'Commands' ) :
formatter . write_dl ( rows ) |
def _get_included_diff_results ( self ) :
"""Return a list of stages to be included in the diff results .""" | included = [ self . _git_diff_tool . diff_committed ( self . _compare_branch ) ]
if not self . _ignore_staged :
included . append ( self . _git_diff_tool . diff_staged ( ) )
if not self . _ignore_unstaged :
included . append ( self . _git_diff_tool . diff_unstaged ( ) )
return included |
def _after_request ( self , response ) :
"""Set a new ID token cookie if the ID token has changed .""" | # This means that if either the new or the old are False , we set
# insecure cookies .
# We don ' t define OIDC _ ID _ TOKEN _ COOKIE _ SECURE in init _ app , because we
# don ' t want people to find it easily .
cookie_secure = ( current_app . config [ 'OIDC_COOKIE_SECURE' ] and current_app . config . get ( 'OIDC_ID_TOKEN_COOKIE_SECURE' , True ) )
if getattr ( g , 'oidc_id_token_dirty' , False ) :
if g . oidc_id_token :
signed_id_token = self . cookie_serializer . dumps ( g . oidc_id_token )
response . set_cookie ( current_app . config [ 'OIDC_ID_TOKEN_COOKIE_NAME' ] , signed_id_token , secure = cookie_secure , httponly = True , max_age = current_app . config [ 'OIDC_ID_TOKEN_COOKIE_TTL' ] )
else : # This was a log out
response . set_cookie ( current_app . config [ 'OIDC_ID_TOKEN_COOKIE_NAME' ] , '' , path = current_app . config [ 'OIDC_ID_TOKEN_COOKIE_PATH' ] , secure = cookie_secure , httponly = True , expires = 0 )
return response |
def Chisholm_voidage ( x , rhol , rhog ) :
r'''Calculates void fraction in two - phase flow according to the model of
[1 ] _ , as given in [ 2 ] _ and [ 3 ] _ .
. . math : :
\ alpha = \ left [ 1 + \ left ( \ frac { 1 - x } { x } \ right ) \ left ( \ frac { \ rho _ g }
{ \ rho _ l } \ right ) \ sqrt { 1 - x \ left ( 1 - \ frac { \ rho _ l } { \ rho _ g } \ right ) }
\ right ] ^ { - 1}
Parameters
x : float
Quality at the specific tube interval [ ]
rhol : float
Density of the liquid [ kg / m ^ 3]
rhog : float
Density of the gas [ kg / m ^ 3]
Returns
alpha : float
Void fraction ( area of gas / total area of channel ) , [ - ]
Notes
[1 ] _ has not been reviewed . However , both [ 2 ] _ and [ 3 ] _ present it the
same way .
Examples
> > > Chisholm _ voidage ( . 4 , 800 , 2.5)
0.949525900374774
References
. . [ 1 ] Chisholm , D . " Pressure Gradients due to Friction during the Flow of
Evaporating Two - Phase Mixtures in Smooth Tubes and Channels . "
International Journal of Heat and Mass Transfer 16 , no . 2 ( February 1,
1973 ) : 347-58 . doi : 10.1016/0017-9310(73)90063 - X .
. . [ 2 ] Xu , Yu , and Xiande Fang . " Correlations of Void Fraction for Two -
Phase Refrigerant Flow in Pipes . " Applied Thermal Engineering 64 , no .
1-2 ( March 2014 ) : 242–51 . doi : 10.1016 / j . applthermaleng . 2013.12.032.
. . [ 3 ] Dalkilic , A . S . , S . Laohalertdecha , and S . Wongwises . " Effect of
Void Fraction Models on the Two - Phase Friction Factor of R134a during
Condensation in Vertical Downward Flow in a Smooth Tube . " International
Communications in Heat and Mass Transfer 35 , no . 8 ( October 2008 ) :
921-27 . doi : 10.1016 / j . icheatmasstransfer . 2008.04.001.''' | S = ( 1 - x * ( 1 - rhol / rhog ) ) ** 0.5
alpha = ( 1 + ( 1 - x ) / x * rhog / rhol * S ) ** - 1
return alpha |
def init ( deb1 , deb2 = False ) :
"""Initialize DEBUG and DEBUGALL .
Allows other modules to set DEBUG and DEBUGALL , so their
call to dprint or dprintx generate output .
Args :
deb1 ( bool ) : value of DEBUG to set
deb2 ( bool ) : optional - value of DEBUGALL to set ,
defaults to False .""" | global DEBUG
# pylint : disable = global - statement
global DEBUGALL
# pylint : disable = global - statement
DEBUG = deb1
DEBUGALL = deb2 |
def __check_hash ( self , message ) :
"""return true / false if hash is good
message = dict""" | return message [ W_HASH ] == self . __make_hash ( message [ W_MESSAGE ] , self . __token , message [ W_SEQ ] ) |
def start_dev_session ( self ) :
"""Start a client that attempts to connect to the dev server
running on the host ` app . dev `""" | try :
from . dev import DevServerSession
session = DevServerSession . initialize ( host = self . dev )
session . start ( )
# : Save a reference
self . _dev_session = session
except :
self . show_error ( traceback . format_exc ( ) ) |
def lifetime_report ( args , parser ) :
"""Generates a lifetime report .""" | catalogue = utils . get_catalogue ( args )
tokenizer = utils . get_tokenizer ( args )
results = tacl . Results ( args . results , tokenizer )
output_dir = os . path . abspath ( args . output )
os . makedirs ( output_dir , exist_ok = True )
report = tacl . LifetimeReport ( )
report . generate ( output_dir , catalogue , results , args . label ) |
def _insertLayer ( self , layer , name , ** kwargs ) :
"""This is the environment implementation of : meth : ` BaseFont . insertLayer ` .
This must return an instance of a : class : ` BaseLayer ` subclass .
* * layer * * will be a layer object with the attributes necessary
for copying as defined in : meth : ` BaseLayer . copy ` An environment
must not insert * * layer * * directly . Instead the data from * * layer * *
should be copied to a new layer . * * name * * will be a : ref : ` type - string `
representing a glyph layer . It will have been normalized with
: func : ` normalizers . normalizeLayerName ` . * * name * * will have been
tested to make sure that no layer with the same name exists in the font .
Subclasses may override this method .""" | if name != layer . name and layer . name in self . layerOrder :
layer = layer . copy ( )
layer . name = name
dest = self . newLayer ( name )
dest . copyData ( layer )
return dest |
def edit ( directory = None , revision = 'current' ) :
"""Edit current revision .""" | if alembic_version >= ( 0 , 8 , 0 ) :
config = current_app . extensions [ 'migrate' ] . migrate . get_config ( directory )
command . edit ( config , revision )
else :
raise RuntimeError ( 'Alembic 0.8.0 or greater is required' ) |
def maps_get_rules_input_rbridge_id ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
maps_get_rules = ET . Element ( "maps_get_rules" )
config = maps_get_rules
input = ET . SubElement ( maps_get_rules , "input" )
rbridge_id = ET . SubElement ( input , "rbridge-id" )
rbridge_id . text = kwargs . pop ( 'rbridge_id' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def crack ( ciphertext , * fitness_functions , ntrials = 30 , nswaps = 3000 ) :
"""Break ` ` ciphertext ` ` using hill climbing .
Note :
Currently ntrails and nswaps default to magic numbers .
Generally the trend is , the longer the text , the lower the number of trials
you need to run , because the hill climbing will lead to the best answer faster .
Because randomness is involved , there is the possibility of the correct decryption
not being found . In this circumstance you just need to run the code again .
Example :
> > > decryptions = crack ( " XUOOB " , fitness . english . quadgrams )
> > > print ( decryptions [ 0 ] )
HELLO
Args :
ciphertext ( str ) : The text to decrypt
* fitness _ functions ( variable length argument list ) : Functions to score decryption with
Keyword Args :
ntrials ( int ) : The number of times to run the hill climbing algorithm
nswaps ( int ) : The number of rounds to find a local maximum
Returns :
Sorted list of decryptions
Raises :
ValueError : If nswaps or ntrails are not positive integers
ValueError : If no fitness _ functions are given""" | if ntrials <= 0 or nswaps <= 0 :
raise ValueError ( "ntrials and nswaps must be positive integers" )
# Find a local maximum by swapping two letters and scoring the decryption
def next_node_inner_climb ( node ) : # Swap 2 characters in the key
a , b = random . sample ( range ( len ( node ) ) , 2 )
node [ a ] , node [ b ] = node [ b ] , node [ a ]
plaintext = decrypt ( node , ciphertext )
node_score = score ( plaintext , * fitness_functions )
return node , node_score , Decryption ( plaintext , '' . join ( node ) , node_score )
# Outer climb rereuns hill climb ntrials number of times each time at a different start location
def next_node_outer_climb ( node ) :
random . shuffle ( node )
key , best_score , outputs = hill_climb ( nswaps , node [ : ] , next_node_inner_climb )
return key , best_score , outputs [ - 1 ]
# The last item in this list is the item with the highest score
_ , _ , decryptions = hill_climb ( ntrials , list ( string . ascii_uppercase ) , next_node_outer_climb )
return sorted ( decryptions , reverse = True ) |
def savepoint ( self ) :
"""Copies the last displayed image .""" | if self . _last_image :
self . _savepoints . append ( self . _last_image )
self . _last_image = None |
def date ( fmt = None , timestamp = None ) :
"Manejo de fechas ( simil PHP )" | if fmt == 'U' : # return timestamp
t = datetime . datetime . now ( )
return int ( time . mktime ( t . timetuple ( ) ) )
if fmt == 'c' : # return isoformat
d = datetime . datetime . fromtimestamp ( timestamp )
return d . isoformat ( )
if fmt == 'Ymd' :
d = datetime . datetime . now ( )
return d . strftime ( "%Y%m%d" ) |
def callgraph ( G , stmt_list ) :
"""Build callgraph of func _ list , ignoring
built - in functions""" | func_list = [ ]
for stmt in stmt_list :
try :
G . add_node ( stmt . head . ident . name )
func_list . append ( stmt )
except :
pass
for func in func_list :
assert isinstance ( func , node . function )
func_name = func . head . ident . name
# resolve . resolve ( func )
for s in node . postorder ( func ) :
if ( s . __class__ is node . funcall and s . func_expr . __class__ is node . ident ) : # if s . func _ expr . name in G . nodes ( ) :
G . add_edge ( func_name , s . func_expr . name ) |
def _prnt_min_max_val ( var , text , verb ) :
r"""Print variable ; if more than three , just min / max , unless verb > 3.""" | if var . size > 3 :
print ( text , _strvar ( var . min ( ) ) , "-" , _strvar ( var . max ( ) ) , ":" , _strvar ( var . size ) , " [min-max; #]" )
if verb > 3 :
print ( " : " , _strvar ( var ) )
else :
print ( text , _strvar ( np . atleast_1d ( var ) ) ) |
def read_acceptance_fraction ( self , walkers = None ) :
"""Reads the acceptance fraction .
Parameters
walkers : ( list of ) int , optional
The walker index ( or a list of indices ) to retrieve . If None ,
samples from all walkers will be obtained .
Returns
array
Array of acceptance fractions with shape ( requested walkers , ) .""" | group = self . sampler_group + '/acceptance_fraction'
if walkers is None :
wmask = numpy . ones ( self . nwalkers , dtype = bool )
else :
wmask = numpy . zeros ( self . nwalkers , dtype = bool )
wmask [ walkers ] = True
return self [ group ] [ wmask ] |
def load_raw ( cls , model_fn , schema , * args , ** kwargs ) :
"""Loads a trained classifier from the raw Weka model format .
Must specify the model schema and classifier name , since
these aren ' t currently deduced from the model format .""" | c = cls ( * args , ** kwargs )
c . schema = schema . copy ( schema_only = True )
c . _model_data = open ( model_fn , 'rb' ) . read ( )
return c |
def _mm_pairwise ( n_items , data , params ) :
"""Inner loop of MM algorithm for pairwise data .""" | weights = exp_transform ( params )
wins = np . zeros ( n_items , dtype = float )
denoms = np . zeros ( n_items , dtype = float )
for winner , loser in data :
wins [ winner ] += 1.0
val = 1.0 / ( weights [ winner ] + weights [ loser ] )
denoms [ winner ] += val
denoms [ loser ] += val
return wins , denoms |
def _convert_to_unicode ( string ) :
"""This method should work with both Python 2 and 3 with the caveat
that they need to be compiled with wide unicode character support .
If there isn ' t wide unicode character support it ' ll blow up with a
warning .""" | codepoints = [ ]
for character in string . split ( '-' ) :
if character in BLACKLIST_UNICODE :
next
codepoints . append ( '\U{0:0>8}' . format ( character ) . decode ( 'unicode-escape' ) )
return codepoints |
def build_coordinate_families ( self , paired_aligns ) :
'''Given a stream of paired aligns , return a list of pairs that share
same coordinates ( coordinate family ) . Flushes families in progress
when any of :
a ) incoming right start > family end
b ) incoming chrom ! = current chrom
c ) incoming align stream is exhausted''' | rightmost_start = None
current_chrom = None
def _new_coordinate ( pair ) :
return pair . right . reference_start != rightmost_start
def _new_chrom ( pair ) :
return current_chrom != pair . right . reference_name
for pair in paired_aligns :
if rightmost_start is None :
rightmost_start = pair . right . reference_start
current_chrom = pair . right . reference_name
if _new_chrom ( pair ) :
self . _right_coords_in_progress [ current_chrom ] . clear ( )
rightmost_start = None
current_chrom = None
for family in self . _remaining_families ( ) :
yield family
elif _new_coordinate ( pair ) :
right = pair . right
for family in self . _completed_families ( right . reference_name , right . reference_start ) :
yield family
self . _add ( pair )
for family in self . _remaining_families ( ) :
yield family |
def assert_valid_rule_class ( clazz ) :
"""Asserts that a given rule clazz is valid by checking a number of its properties :
- Rules must extend from LineRule or CommitRule
- Rule classes must have id and name string attributes .
The options _ spec is optional , but if set , it must be a list of gitlint Options .
- Rule classes must have a validate method . In case of a CommitRule , validate must take a single commit parameter .
In case of LineRule , validate must take line and commit as first and second parameters .
- LineRule classes must have a target class attributes that is set to either
CommitMessageTitle or CommitMessageBody .
- User Rule id ' s cannot start with R , T , B or M as these rule ids are reserved for gitlint itself .""" | # Rules must extend from LineRule or CommitRule
if not ( issubclass ( clazz , rules . LineRule ) or issubclass ( clazz , rules . CommitRule ) ) :
msg = u"User-defined rule class '{0}' must extend from {1}.{2} or {1}.{3}"
raise UserRuleError ( msg . format ( clazz . __name__ , rules . CommitRule . __module__ , rules . LineRule . __name__ , rules . CommitRule . __name__ ) )
# Rules must have an id attribute
if not hasattr ( clazz , 'id' ) or clazz . id is None or not clazz . id :
raise UserRuleError ( u"User-defined rule class '{0}' must have an 'id' attribute" . format ( clazz . __name__ ) )
# Rule id ' s cannot start with gitlint reserved letters
if clazz . id [ 0 ] . upper ( ) in [ 'R' , 'T' , 'B' , 'M' ] :
msg = u"The id '{1}' of '{0}' is invalid. Gitlint reserves ids starting with R,T,B,M"
raise UserRuleError ( msg . format ( clazz . __name__ , clazz . id [ 0 ] ) )
# Rules must have a name attribute
if not hasattr ( clazz , 'name' ) or clazz . name is None or not clazz . name :
raise UserRuleError ( u"User-defined rule class '{0}' must have a 'name' attribute" . format ( clazz . __name__ ) )
# if set , options _ spec must be a list of RuleOption
if not isinstance ( clazz . options_spec , list ) :
msg = u"The options_spec attribute of user-defined rule class '{0}' must be a list of {1}.{2}"
raise UserRuleError ( msg . format ( clazz . __name__ , options . RuleOption . __module__ , options . RuleOption . __name__ ) )
# check that all items in options _ spec are actual gitlint options
for option in clazz . options_spec :
if not isinstance ( option , options . RuleOption ) :
msg = u"The options_spec attribute of user-defined rule class '{0}' must be a list of {1}.{2}"
raise UserRuleError ( msg . format ( clazz . __name__ , options . RuleOption . __module__ , options . RuleOption . __name__ ) )
# Rules must have a validate method . We use isroutine ( ) as it ' s both python 2 and 3 compatible .
# For more info see http : / / stackoverflow . com / a / 17019998/381010
if not hasattr ( clazz , 'validate' ) or not inspect . isroutine ( clazz . validate ) :
raise UserRuleError ( u"User-defined rule class '{0}' must have a 'validate' method" . format ( clazz . __name__ ) )
# LineRules must have a valid target : rules . CommitMessageTitle or rules . CommitMessageBody
if issubclass ( clazz , rules . LineRule ) :
if clazz . target not in [ rules . CommitMessageTitle , rules . CommitMessageBody ] :
msg = u"The target attribute of the user-defined LineRule class '{0}' must be either {1}.{2} or {1}.{3}"
msg = msg . format ( clazz . __name__ , rules . CommitMessageTitle . __module__ , rules . CommitMessageTitle . __name__ , rules . CommitMessageBody . __name__ )
raise UserRuleError ( msg ) |
def kw_changelist_view ( self , request : HttpRequest , extra_context = None , ** kw ) :
"""Changelist view which allow key - value arguments .
: param request : HttpRequest
: param extra _ context : Extra context dict
: param kw : Key - value dict
: return : See changelist _ view ( )""" | return self . changelist_view ( request , extra_context ) |
def delete ( self , transport , robj , rw = None , r = None , w = None , dw = None , pr = None , pw = None , timeout = None ) :
"""delete ( robj , rw = None , r = None , w = None , dw = None , pr = None , pw = None , timeout = None )
Deletes an object from Riak .
. . note : : This request is automatically retried : attr : ` retries `
times if it fails due to network error .
: param robj : the object to delete
: type robj : RiakObject
: param rw : the read / write ( delete ) quorum
: type rw : integer , string , None
: param r : the read quorum
: type r : integer , string , None
: param pr : the primary read quorum
: type pr : integer , string , None
: param w : the write quorum
: type w : integer , string , None
: param dw : the durable write quorum
: type dw : integer , string , None
: param pw : the primary write quorum
: type pw : integer , string , None
: param timeout : a timeout value in milliseconds
: type timeout : int""" | _validate_timeout ( timeout )
return transport . delete ( robj , rw = rw , r = r , w = w , dw = dw , pr = pr , pw = pw , timeout = timeout ) |
def run_shell ( args : dict ) -> int :
"""Run the shell sub command""" | if args . get ( 'project_directory' ) :
return run_batch ( args )
shell = CauldronShell ( )
if in_project_directory ( ) :
shell . cmdqueue . append ( 'open "{}"' . format ( os . path . realpath ( os . curdir ) ) )
shell . cmdloop ( )
return 0 |
def get_pipeline_stage ( self , pipeline_key , stage_key = None , sort_by = None ) :
'''Gets a list of one / all stage objects in a pipeline . Performs a single GET .
Args :
pipeline _ keykey for pipeline
stage _ key key for stage ( default : None i . e . ALL )
sort _ byin desc order by ' creationTimestamp ' or ' lastUpdatedTimestamp '
may or may not be supported
returns ( status code for the GET request , dict of stages )
It is not a list hence the . values ( ) before return''' | if not pipeline_key :
return requests . codes . bad_request , None
uri = '/' . join ( [ self . api_uri , self . pipelines_suffix , pipeline_key , self . stages_suffix ] )
if stage_key :
uri = '/' . join ( [ uri , stage_key ] )
if sort_by :
if sort_by in [ 'creationTimestamp' , 'lastUpdatedTimestamp' ] :
uri += self . sort_by_postfix + sort_by
else :
return requests . codes . bad_request , { 'success' : 'False' , 'error' : 'sortBy needs to be \'creationTimestamp\', or \'lastUpdatedTimestamp\'' }
code , data = self . _req ( 'get' , uri )
# format is ambigious so we need to rely on user input
if stage_key :
data = list ( data . values ( ) )
return code , data |
def relax_AX ( self ) :
"""Implement relaxation if option ` ` RelaxParam ` ` ! = 1.0.""" | # We need to keep the non - relaxed version of AX since it is
# required for computation of primal residual r
self . AXnr = self . cnst_A ( self . X , self . Xf )
if self . rlx == 1.0 : # If RelaxParam option is 1.0 there is no relaxation
self . AX = self . AXnr
else : # Avoid calling cnst _ c ( ) more than once in case it is expensive
# ( e . g . due to allocation of a large block of memory )
if not hasattr ( self , '_cnst_c' ) :
self . _cnst_c = self . cnst_c ( )
# Compute relaxed version of AX
alpha = self . rlx
self . AX = alpha * self . AXnr - ( 1 - alpha ) * ( self . cnst_B ( self . Y ) - self . _cnst_c ) |
def open ( self , auto_commit = None , schema = None ) :
'''Create a context to execute queries''' | if schema is None :
schema = self . schema
ac = auto_commit if auto_commit is not None else schema . auto_commit
exe = ExecutionContext ( self . path , schema = schema , auto_commit = ac )
# setup DB if required
if not os . path . isfile ( self . path ) or os . path . getsize ( self . path ) == 0 :
getLogger ( ) . warning ( "DB does not exist at {}. Setup is required." . format ( self . path ) )
# run setup files
if schema is not None and schema . setup_files :
for file_path in schema . setup_files :
getLogger ( ) . debug ( "Executing script file: {}" . format ( file_path ) )
exe . cur . executescript ( self . read_file ( file_path ) )
# run setup scripts
if schema . setup_scripts :
for script in schema . setup_scripts :
exe . cur . executescript ( script )
return exe |
def next_page ( self ) :
"""Move the screen page down through the history buffer .""" | if self . history . position < self . history . size and self . history . bottom :
mid = min ( len ( self . history . bottom ) , int ( math . ceil ( self . lines * self . history . ratio ) ) )
self . history . top . extend ( self . buffer [ y ] for y in range ( mid ) )
self . history = self . history . _replace ( position = self . history . position + mid )
for y in range ( self . lines - mid ) :
self . buffer [ y ] = self . buffer [ y + mid ]
for y in range ( self . lines - mid , self . lines ) :
self . buffer [ y ] = self . history . bottom . popleft ( )
self . dirty = set ( range ( self . lines ) ) |
def brightness ( im ) :
'''Return the brightness of an image
Args :
im ( numpy ) : image
Returns :
float , average brightness of an image''' | im_hsv = cv2 . cvtColor ( im , cv2 . COLOR_BGR2HSV )
h , s , v = cv2 . split ( im_hsv )
height , weight = v . shape [ : 2 ]
total_bright = 0
for i in v :
total_bright = total_bright + sum ( i )
return float ( total_bright ) / ( height * weight ) |
def apply_annotation ( self , bo , annotation ) :
"""Apply an annotation on the backend object .
: param BackendObject bo : The backend object .
: param Annotation annotation : The annotation to be applied
: return : A new BackendObject
: rtype : BackendObject""" | # Currently we only support RegionAnnotation
if not isinstance ( annotation , RegionAnnotation ) :
return bo
if not isinstance ( bo , ValueSet ) : # Convert it to a ValueSet first
# Note that the original value is not kept at all . If you want to convert a StridedInterval to a ValueSet ,
# you gotta do the conversion by calling AST . annotate ( ) from outside .
bo = ValueSet . empty ( bo . bits )
return bo . apply_annotation ( annotation ) |
def subscribe_topic ( self , topics = [ ] , pattern = None ) :
"""Subscribe to a list of topics , or a topic regex pattern .
- ` ` topics ` ` ( list ) : List of topics for subscription .
- ` ` pattern ` ` ( str ) : Pattern to match available topics . You must provide either topics or pattern ,
but not both .""" | if not isinstance ( topics , list ) :
topics = [ topics ]
self . consumer . subscribe ( topics , pattern = pattern ) |
def _evaluate_xyz ( self , x , y , z ) :
"""Evaluation of the potential as a function of ( x , y , z ) in the
aligned coordinate frame""" | return 2. * numpy . pi * self . _b * self . _c * _potInt ( x , y , z , self . _psi , self . _b2 , self . _c2 , glx = self . _glx , glw = self . _glw ) |
def step_impl11 ( context , runs ) :
"""Execute multiple runs .
: param runs : number of test runs to perform .
: param context : test context .""" | executor = context . fuzz_executor
executor . run_test ( runs )
stats = executor . stats
count = stats . cumulated_counts ( )
assert count == runs , "VERIFY: stats available." |
def _compose_mro ( cls , types ) : # noqa
"""Calculates the method resolution order for a given class * cls * .
Includes relevant abstract base classes ( with their respective bases ) from
the * types * iterable . Uses a modified C3 linearization algorithm .""" | bases = set ( cls . __mro__ )
# Remove entries which are already present in the _ _ mro _ _ or unrelated .
def is_related ( _type ) :
return ( # : off
_type not in bases and hasattr ( _type , '__mro__' ) and issubclass ( cls , _type ) )
# : on
types = [ n for n in types if is_related ( n ) ]
# Remove entries which are strict bases of other entries ( they will end up
# in the MRO anyway .
def is_strict_base ( _typ ) :
for other in types :
if _typ != other and _typ in other . __mro__ :
return True
return False
types = [ n for n in types if not is_strict_base ( n ) ]
# Subclasses of the ABCs in * types * which are also implemented by
# * cls * can be used to stabilize ABC ordering .
type_set = set ( types )
mro = [ ]
for typ in types :
found = [ ]
for sub in typ . __subclasses__ ( ) :
if sub not in bases and issubclass ( cls , sub ) :
found . append ( [ s for s in sub . __mro__ if s in type_set ] )
if not found :
mro . append ( typ )
continue
# Favor subclasses with the biggest number of useful bases
found . sort ( key = len , reverse = True )
for sub in found :
for subcls in sub :
if subcls not in mro :
mro . append ( subcls )
return _c3_mro ( cls , abcs = mro ) |
def calc_signal ( self , ff , t = None , ani = None , fkwdargs = { } , Brightness = True , res = 0.005 , DL = None , resMode = 'abs' , method = 'sum' , ind = None , out = object , plot = True , dataname = None , fs = None , dmargin = None , wintit = None , invert = True , units = None , draw = True , connect = True ) :
"""Return the line - integrated emissivity
Beware , by default , Brightness = True and it is only a line - integral !
Indeed , to get the received power , you need an estimate of the Etendue
( previously set using self . set _ Etendues ( ) ) and use Brightness = False .
Hence , if Brightness = True and if
the emissivity is provided in W / m3 ( resp . W / m3 / sr ) ,
= > the method returns W / m2 ( resp . W / m2 / sr )
The line is sampled using : meth : ` ~ tofu . geom . LOS . get _ sample ` ,
The integral can be computed using three different methods :
- ' sum ' : A numpy . sum ( ) on the local values ( x segments lengths )
- ' simps ' : using : meth : ` scipy . integrate . simps `
- ' romb ' : using : meth : ` scipy . integrate . romb `
Except ff , arguments common to : meth : ` ~ tofu . geom . LOS . get _ sample `
Parameters
ff : callable
The user - provided
Returns
sig : np . ndarray
The computed signal , a 1d or 2d array depending on whether a time
vector was provided .
units : str
Units of the result""" | msg = "Arg out must be in [object,np.ndarray]"
assert out in [ object , np . ndarray ] , msg
assert type ( Brightness ) is bool , "Arg Brightness must be a bool !"
if Brightness is False and self . Etendues is None :
msg = "Etendue must be set if Brightness is False !"
raise Exception ( msg )
# Preformat ind
ind = self . _check_indch ( ind )
# Preformat DL
kIn , kOut = self . kIn , self . kOut
if DL is None :
DL = np . array ( [ kIn [ ind ] , kOut [ ind ] ] )
elif np . asarray ( DL ) . size == 2 :
DL = np . tile ( np . asarray ( DL ) . ravel ( ) [ : , np . newaxis ] , len ( ind ) )
DL = np . ascontiguousarray ( DL ) . astype ( float )
assert type ( DL ) is np . ndarray and DL . ndim == 2
assert DL . shape == ( 2 , len ( ind ) ) , "Arg DL has wrong shape !"
# check limits
ii = DL [ 0 , : ] < kIn [ ind ]
DL [ 0 , ii ] = kIn [ ind ] [ ii ]
ii [ : ] = DL [ 0 , : ] >= kOut [ ind ]
DL [ 0 , ii ] = kOut [ ind ] [ ii ]
ii [ : ] = DL [ 1 , : ] > kOut [ ind ]
DL [ 1 , ii ] = kOut [ ind ] [ ii ]
ii [ : ] = DL [ 1 , : ] <= kIn [ ind ]
DL [ 1 , ii ] = kIn [ ind ] [ ii ]
# Preformat Ds , us and Etendue
Ds , us = self . D [ : , ind ] , self . u [ : , ind ]
if Brightness is False :
E = self . Etendues
if E . size == self . nRays :
E = E [ ind ]
# Preformat signal
if len ( ind ) == 1 :
Ds , us = Ds . reshape ( ( 3 , 1 ) ) , us . reshape ( ( 3 , 1 ) )
if t is None or len ( t ) == 1 :
sig = np . full ( ( Ds . shape [ 1 ] , ) , np . nan )
else :
sig = np . full ( ( len ( t ) , Ds . shape [ 1 ] ) , np . nan )
indok = ~ ( np . any ( np . isnan ( DL ) , axis = 0 ) | np . any ( np . isinf ( DL ) , axis = 0 ) | ( ( DL [ 1 , : ] - DL [ 0 , : ] ) <= 0. ) )
if np . any ( indok ) :
Ds , us , DL = Ds [ : , indok ] , us [ : , indok ] , DL [ : , indok ]
if indok . sum ( ) == 1 :
Ds , us = Ds . reshape ( ( 3 , 1 ) ) , us . reshape ( ( 3 , 1 ) )
DL = DL . reshape ( ( 2 , 1 ) )
Ds , us = np . ascontiguousarray ( Ds ) , np . ascontiguousarray ( us )
DL = np . ascontiguousarray ( DL )
# Launch # NB : find a way to exclude cases with DL [ 0 , : ] > = DL [ 1 , : ] ! !
# Exclude Rays not seeing the plasma
s = _GG . LOS_calc_signal ( ff , Ds , us , res , DL , dLMode = resMode , method = method , t = t , Ani = ani , fkwdargs = fkwdargs , Test = True )
if t is None or len ( t ) == 1 :
sig [ indok ] = s
else :
sig [ : , indok ] = s
# Format output
if Brightness is False :
if dataname is None :
dataname = r"LOS-integral x Etendue"
if t is None or len ( t ) == 1 or E . size == 1 :
sig = sig * E
else :
sig = sig * E [ np . newaxis , : ]
if units is None :
units = r"origin x $m^3.sr$"
else :
if dataname is None :
dataname = r"LOS-integral"
if units is None :
units = r"origin x m"
if plot or out in [ object , 'object' ] :
kwdargs = dict ( data = sig , t = t , lCam = self , Name = self . Id . Name , dlabels = { 'data' : { 'units' : units , 'name' : dataname } } , Exp = self . Id . Exp , Diag = self . Id . Diag )
import tofu . data as tfd
if self . _is2D ( ) :
osig = tfd . DataCam2D ( ** kwdargs )
else :
osig = tfd . DataCam1D ( ** kwdargs )
if plot :
kh = osig . plot ( fs = fs , dmargin = dmargin , wintit = wintit , plotmethod = plotmethod , invert = invert , draw = draw , connect = connect )
if out in [ object , 'object' ] :
return osig
else :
return sig , units |
def getSequence ( title , db = 'nucleotide' ) :
"""Get information about a sequence from Genbank .
@ param title : A C { str } sequence title from a BLAST hit . Of the form
' gi | 63148399 | gb | DQ011818.1 | Description . . . ' .
@ param db : The C { str } name of the Entrez database to consult .
NOTE : this uses the network ! Also , there is a 3 requests / second limit
imposed by NCBI on these requests so be careful or your IP will be banned .""" | titleId = title . split ( ' ' , 1 ) [ 0 ]
try :
gi = titleId . split ( '|' ) [ 1 ]
except IndexError : # Assume we have a gi number directly , and make sure it ' s a string .
gi = str ( titleId )
try :
client = Entrez . efetch ( db = db , rettype = 'gb' , retmode = 'text' , id = gi )
except URLError :
return None
else :
record = SeqIO . read ( client , 'gb' )
client . close ( )
return record |
def usage_plan_absent ( name , plan_name , region = None , key = None , keyid = None , profile = None ) :
'''Ensures usage plan identified by name is no longer present
. . versionadded : : 2017.7.0
name
name of the state
plan _ name
name of the plan to remove
. . code - block : : yaml
usage plan absent :
boto _ apigateway . usage _ plan _ absent :
- plan _ name : my _ usage _ plan
- profile : my _ profile''' | ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } }
try :
common_args = dict ( [ ( 'region' , region ) , ( 'key' , key ) , ( 'keyid' , keyid ) , ( 'profile' , profile ) ] )
existing = __salt__ [ 'boto_apigateway.describe_usage_plans' ] ( name = plan_name , ** common_args )
if 'error' in existing :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to describe existing usage plans'
return ret
if not existing [ 'plans' ] :
ret [ 'comment' ] = 'Usage plan {0} does not exist already' . format ( plan_name )
return ret
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Usage plan {0} exists and would be deleted' . format ( plan_name )
ret [ 'result' ] = None
return ret
plan_id = existing [ 'plans' ] [ 0 ] [ 'id' ]
result = __salt__ [ 'boto_apigateway.delete_usage_plan' ] ( plan_id , ** common_args )
if 'error' in result :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to delete usage plan {0}, {1}' . format ( plan_name , result )
return ret
ret [ 'comment' ] = 'Usage plan {0} has been deleted' . format ( plan_name )
ret [ 'changes' ] [ 'old' ] = { 'plan' : existing [ 'plans' ] [ 0 ] }
ret [ 'changes' ] [ 'new' ] = { 'plan' : None }
except ( ValueError , IOError ) as e :
ret [ 'result' ] = False
ret [ 'comment' ] = '{0}' . format ( e . args )
return ret |
def compare ( self , compare_recipe , suffix = '_compare' ) :
"""Adds a comparison recipe to a base recipe .""" | assert isinstance ( compare_recipe , Recipe )
assert isinstance ( suffix , basestring )
self . compare_recipe . append ( compare_recipe )
self . suffix . append ( suffix )
self . dirty = True
return self . recipe |
def path ( self , which = None ) :
"""Extend ` ` nailgun . entity _ mixins . Entity . path ` ` .
The format of the returned path depends on the value of ` ` which ` ` :
smart _ class _ parameters
/ api / puppetclasses / : puppetclass _ id / smart _ class _ parameters
Otherwise , call ` ` super ` ` .""" | if which in ( 'smart_class_parameters' , 'smart_variables' ) :
return '{0}/{1}' . format ( super ( PuppetClass , self ) . path ( which = 'self' ) , which )
return super ( PuppetClass , self ) . path ( which ) |
def add_status_query_managers ( sender , ** kwargs ) :
"""Add a Querymanager for each status item dynamically .""" | if not issubclass ( sender , StatusModel ) :
return
if django . VERSION >= ( 1 , 10 ) : # First , get current manager name . . .
default_manager = sender . _meta . default_manager
for value , display in getattr ( sender , 'STATUS' , ( ) ) :
if _field_exists ( sender , value ) :
raise ImproperlyConfigured ( "StatusModel: Model '%s' has a field named '%s' which " "conflicts with a status of the same name." % ( sender . __name__ , value ) )
sender . add_to_class ( value , QueryManager ( status = value ) )
if django . VERSION >= ( 1 , 10 ) : # . . . then , put it back , as add _ to _ class is modifying the default manager !
sender . _meta . default_manager_name = default_manager . name |
def write_command_line ( self ) :
"""Writes command line to attributes .
The command line is written to the file ' s ` ` attrs [ ' cmd ' ] ` ` . If this
attribute already exists in the file ( this can happen when resuming
from a checkpoint ) , ` ` attrs [ ' cmd ' ] ` ` will be a list storing the current
command line and all previous command lines .""" | cmd = [ " " . join ( sys . argv ) ]
try :
previous = self . attrs [ "cmd" ]
if isinstance ( previous , str ) : # convert to list
previous = [ previous ]
elif isinstance ( previous , numpy . ndarray ) :
previous = previous . tolist ( )
except KeyError :
previous = [ ]
self . attrs [ "cmd" ] = cmd + previous |
async def list ( source ) :
"""Generate a single list from an asynchronous sequence .""" | result = [ ]
async with streamcontext ( source ) as streamer :
async for item in streamer :
result . append ( item )
yield result |
def _get_minidom_tag_value ( station , tag_name ) :
"""get a value from a tag ( if it exists )""" | tag = station . getElementsByTagName ( tag_name ) [ 0 ] . firstChild
if tag :
return tag . nodeValue
return None |
def run ( self ) :
"""Run task , namely :
* purge existing index , if requested ( ` purge _ existing _ index ` ) ,
* create the index , if missing ,
* apply mappings , if given ,
* set refresh interval to - 1 ( disable ) for performance reasons ,
* bulk index in batches of size ` chunk _ size ` ( 2000 ) ,
* set refresh interval to 1s ,
* refresh Elasticsearch ,
* create entry in marker index .""" | if self . purge_existing_index :
self . delete_index ( )
self . create_index ( )
es = self . _init_connection ( )
if self . mapping :
es . indices . put_mapping ( index = self . index , doc_type = self . doc_type , body = self . mapping )
es . indices . put_settings ( { "index" : { "refresh_interval" : "-1" } } , index = self . index )
bulk ( es , self . _docs ( ) , chunk_size = self . chunk_size , raise_on_error = self . raise_on_error )
es . indices . put_settings ( { "index" : { "refresh_interval" : "1s" } } , index = self . index )
es . indices . refresh ( )
self . output ( ) . touch ( ) |
def cli ( env , package_keyname , keyword , category ) :
"""List package items used for ordering .
The item keyNames listed can be used with ` slcli order place ` to specify
the items that are being ordered in the package .
. . Note : :
Items with a numbered category , like disk0 or gpu0 , can be included
multiple times in an order to match how many of the item you want to order .
# List all items in the VSI package
slcli order item - list CLOUD _ SERVER
# List Ubuntu OSes from the os category of the Bare Metal package
slcli order item - list BARE _ METAL _ SERVER - - category os - - keyword ubuntu""" | table = formatting . Table ( COLUMNS )
manager = ordering . OrderingManager ( env . client )
_filter = { 'items' : { } }
if keyword :
_filter [ 'items' ] [ 'description' ] = { 'operation' : '*= %s' % keyword }
if category :
_filter [ 'items' ] [ 'categories' ] = { 'categoryCode' : { 'operation' : '_= %s' % category } }
items = manager . list_items ( package_keyname , filter = _filter )
sorted_items = sort_items ( items )
categories = sorted_items . keys ( )
for catname in sorted ( categories ) :
for item in sorted_items [ catname ] :
table . add_row ( [ catname , item [ 'keyName' ] , item [ 'description' ] , get_price ( item ) ] )
env . fout ( table ) |
def freqpoly_plot ( data ) :
"""make freqpoly plot of merged read lengths""" | rel_data = OrderedDict ( )
for key , val in data . items ( ) :
tot = sum ( val . values ( ) , 0 )
rel_data [ key ] = { k : v / tot for k , v in val . items ( ) }
fplotconfig = { 'data_labels' : [ { 'name' : 'Absolute' , 'ylab' : 'Frequency' , 'xlab' : 'Merged Read Length' } , { 'name' : 'Relative' , 'ylab' : 'Relative Frequency' , 'xlab' : 'Merged Read Length' } ] , 'id' : 'flash_freqpoly_plot' , 'title' : 'FLASh: Frequency of merged read lengths' , 'colors' : dict ( zip ( data . keys ( ) , MultiqcModule . get_colors ( len ( data ) ) ) ) }
return linegraph . plot ( [ data , rel_data ] , fplotconfig ) |
def __store_query ( self , query_items ) :
"""Make where clause
: @ param query _ items
: @ type query _ items : dict""" | temp_index = self . _current_query_index
if len ( self . _queries ) - 1 < temp_index :
self . _queries . append ( [ ] )
self . _queries [ temp_index ] . append ( query_items ) |
def inspect ( item , maxchar = 80 ) :
"""Inspect the attributes of an item .""" | for i in dir ( item ) :
try :
member = str ( getattr ( item , i ) )
if maxchar and len ( member ) > maxchar :
member = member [ : maxchar ] + "..."
except :
member = "[ERROR]"
print ( "{}: {}" . format ( i , member ) , file = sys . stderr ) |
def checkSubstitute ( self , typecode ) :
'''If this is True , allow typecode to be substituted
for " self " typecode .''' | if not isinstance ( typecode , ElementDeclaration ) :
return False
try :
nsuri , ncname = typecode . substitutionGroup
except ( AttributeError , TypeError ) :
return False
if ( nsuri , ncname ) != ( self . schema , self . literal ) : # allow slop with the empty namespace
if not nsuri and not self . schema and ncname == self . literal :
return True
return False
sub = GED ( self . schema , self . literal )
if sub is None or sub is not typecode :
return False
return True |
def _addFilename ( self , filename ) :
"""Add a new file name .
@ param filename : A C { str } file name .
@ raise ValueError : If a file with this name has already been added .
@ return : The C { int } id of the newly added file .""" | cur = self . _connection . cursor ( )
try :
cur . execute ( 'INSERT INTO files(name) VALUES (?)' , ( filename , ) )
except sqlite3 . IntegrityError as e :
if str ( e ) . find ( 'UNIQUE constraint failed' ) > - 1 :
raise ValueError ( 'Duplicate file name: %r' % filename )
else :
raise
else :
fileNumber = cur . lastrowid
self . _connection . commit ( )
return fileNumber |
def PushEvent ( self , event ) :
"""Pushes an event onto the heap .
Args :
event ( EventObject ) : event .""" | event_string = event . GetAttributeValuesString ( )
heap_values = ( event . timestamp , event . timestamp_desc , event_string , event )
heapq . heappush ( self . _heap , heap_values ) |
def get_private_name ( self , f ) :
"""get private protected name of an attribute
: param str f : name of the private attribute to be accessed .""" | f = self . __swagger_rename__ [ f ] if f in self . __swagger_rename__ . keys ( ) else f
return '_' + self . __class__ . __name__ + '__' + f |
def pad_aes256 ( s ) :
"""Pads an input string to a given block size .
: param s : string
: returns : The padded string .""" | if len ( s ) % AES . block_size == 0 :
return s
return Padding . appendPadding ( s , blocksize = AES . block_size ) |
def heartbeat ( self ) :
"""Send a ping to the websocket periodically .
This is needed so that Heroku won ' t close the connection
from inactivity .""" | while not self . ws . closed :
gevent . sleep ( HEARTBEAT_DELAY )
gevent . spawn ( self . send , "ping" ) |
def up_capture ( returns , factor_returns , ** kwargs ) :
"""Compute the capture ratio for periods when the benchmark return is positive
Parameters
returns : pd . Series or np . ndarray
Returns of the strategy , noncumulative .
- See full explanation in : func : ` ~ empyrical . stats . cum _ returns ` .
factor _ returns : pd . Series or np . ndarray
Noncumulative returns of the factor to which beta is
computed . Usually a benchmark such as the market .
- This is in the same style as returns .
period : str , optional
Defines the periodicity of the ' returns ' data for purposes of
annualizing . Value ignored if ` annualization ` parameter is specified .
Defaults are : :
' monthly ' : 12
' weekly ' : 52
' daily ' : 252
Returns
up _ capture : float
Note
See http : / / www . investopedia . com / terms / u / up - market - capture - ratio . asp for
more information .""" | return up ( returns , factor_returns , function = capture , ** kwargs ) |
def cancel_subnet ( self , subnet_id ) :
"""Cancels the specified subnet .
: param int subnet _ id : The ID of the subnet to be cancelled .""" | subnet = self . get_subnet ( subnet_id , mask = 'id, billingItem.id' )
if "billingItem" not in subnet :
raise exceptions . SoftLayerError ( "subnet %s can not be cancelled" " " % subnet_id )
billing_id = subnet [ 'billingItem' ] [ 'id' ]
return self . client [ 'Billing_Item' ] . cancelService ( id = billing_id ) |
def get_account_policy ( region = None , key = None , keyid = None , profile = None ) :
'''Get account policy for the AWS account .
. . versionadded : : 2015.8.0
CLI Example :
. . code - block : : bash
salt myminion boto _ iam . get _ account _ policy''' | conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
try :
info = conn . get_account_password_policy ( )
return info . get_account_password_policy_response . get_account_password_policy_result . password_policy
except boto . exception . BotoServerError as e :
log . debug ( e )
msg = 'Failed to update the password policy.'
log . error ( msg )
return False |
def quantize_without_scipy ( self , image ) :
"""" This function can be used if no scipy is availabe .
It ' s 7 times slower though .""" | w , h = image . size
px = np . asarray ( image ) . copy ( )
memo = { }
for j in range ( w ) :
for i in range ( h ) :
key = ( px [ i , j , 0 ] , px [ i , j , 1 ] , px [ i , j , 2 ] )
try :
val = memo [ key ]
except KeyError :
val = self . convert ( * key )
memo [ key ] = val
px [ i , j , 0 ] , px [ i , j , 1 ] , px [ i , j , 2 ] = val
return Image . fromarray ( px ) . convert ( "RGB" ) . quantize ( palette = self . palette_image ( ) ) |
def deepvalues ( mapping ) :
"""Iterates over nested mapping , depth - first , in sorted order by key .""" | values = vals_sorted_by_key ( mapping )
for obj in values :
mapping = False
try :
obj . items
except AttributeError :
pass
else :
mapping = True
for subobj in deepvalues ( obj ) :
yield subobj
if not mapping :
yield obj |
def get_mean_and_stddevs ( self , sites , rup , dists , imt , stddev_types ) :
"""See documentation for method ` GroundShakingIntensityModel ` in
: class : ~ ` openquake . hazardlib . gsim . base . GSIM `""" | # This is just used for testing purposes
if len ( stddev_types ) == 0 :
stddev_types = [ StdDev . TOTAL ]
mean , stds = self . _get_mean_and_stddevs ( sites , rup , dists , imt , stddev_types )
stddevs = [ np . ones ( len ( dists . repi ) ) * get_sigma ( imt ) ]
delta = self . _get_delta ( stds , dists )
mean = mean + stds + delta
mean = np . squeeze ( mean )
return mean , stddevs |
def stream_json_file ( local_file ) :
"""Stream a JSON file ( in JSON - per - line format )
Args :
local _ file ( file - like object ) an open file - handle that contains a
JSON string on each line
Yields :
( dict ) JSON objects""" | for i , line in enumerate ( local_file ) :
try :
data = json . loads ( line . decode ( 'utf-8' ) )
yield data
except ValueError as e :
logging . warning ( "Skipping line %d due to error: %s" , i , e )
continue |
def fit ( self , X , y , sample_weight = None , init_score = None , eval_set = None , eval_names = None , eval_sample_weight = None , eval_class_weight = None , eval_init_score = None , eval_metric = None , early_stopping_rounds = None , verbose = True , feature_name = 'auto' , categorical_feature = 'auto' , callbacks = None ) :
"""Docstring is inherited from the LGBMModel .""" | _LGBMAssertAllFinite ( y )
_LGBMCheckClassificationTargets ( y )
self . _le = _LGBMLabelEncoder ( ) . fit ( y )
_y = self . _le . transform ( y )
self . _classes = self . _le . classes_
self . _n_classes = len ( self . _classes )
if self . _n_classes > 2 : # Switch to using a multiclass objective in the underlying LGBM instance
ova_aliases = ( "multiclassova" , "multiclass_ova" , "ova" , "ovr" )
if self . _objective not in ova_aliases and not callable ( self . _objective ) :
self . _objective = "multiclass"
if eval_metric in ( 'logloss' , 'binary_logloss' ) :
eval_metric = "multi_logloss"
elif eval_metric in ( 'error' , 'binary_error' ) :
eval_metric = "multi_error"
else :
if eval_metric in ( 'logloss' , 'multi_logloss' ) :
eval_metric = 'binary_logloss'
elif eval_metric in ( 'error' , 'multi_error' ) :
eval_metric = 'binary_error'
if eval_set is not None :
if isinstance ( eval_set , tuple ) :
eval_set = [ eval_set ]
for i , ( valid_x , valid_y ) in enumerate ( eval_set ) :
if valid_x is X and valid_y is y :
eval_set [ i ] = ( valid_x , _y )
else :
eval_set [ i ] = ( valid_x , self . _le . transform ( valid_y ) )
super ( LGBMClassifier , self ) . fit ( X , _y , sample_weight = sample_weight , init_score = init_score , eval_set = eval_set , eval_names = eval_names , eval_sample_weight = eval_sample_weight , eval_class_weight = eval_class_weight , eval_init_score = eval_init_score , eval_metric = eval_metric , early_stopping_rounds = early_stopping_rounds , verbose = verbose , feature_name = feature_name , categorical_feature = categorical_feature , callbacks = callbacks )
return self |
def is_ens_domain ( authority : str ) -> bool :
"""Return false if authority is not a valid ENS domain .""" | # check that authority ends with the tld ' . eth '
# check that there are either 2 or 3 subdomains in the authority
# i . e . zeppelinos . eth or packages . zeppelinos . eth
if authority [ - 4 : ] != ".eth" or len ( authority . split ( "." ) ) not in [ 2 , 3 ] :
return False
return True |
def set_linkage_method ( self , method ) :
"""Sets the method to determine the distance between two clusters .
: param method : The method to use . It can be one of ` ` ' single ' ` ` ,
` ` ' complete ' ` ` , ` ` ' average ' ` ` or ` ` ' uclus ' ` ` , or a callable . The
callable should take two collections as parameters and return a
distance value between both collections .""" | if method == 'single' :
self . linkage = single
elif method == 'complete' :
self . linkage = complete
elif method == 'average' :
self . linkage = average
elif method == 'uclus' :
self . linkage = uclus
elif hasattr ( method , '__call__' ) :
self . linkage = method
else :
raise ValueError ( 'distance method must be one of single, ' 'complete, average of uclus' ) |
async def start ( self ) -> None :
"""Process incoming request .
It reads request line , request headers and request payload , then
calls handle _ request ( ) method . Subclass has to override
handle _ request ( ) . start ( ) handles various exceptions in request
or response handling . Connection is being closed always unless
keep _ alive ( True ) specified .""" | loop = self . _loop
handler = self . _task_handler
assert handler is not None
manager = self . _manager
assert manager is not None
keepalive_timeout = self . _keepalive_timeout
resp = None
assert self . _request_factory is not None
assert self . _request_handler is not None
while not self . _force_close :
if not self . _messages :
try : # wait for next request
self . _waiter = loop . create_future ( )
await self . _waiter
except asyncio . CancelledError :
break
finally :
self . _waiter = None
message , payload = self . _messages . popleft ( )
if self . access_log :
now = loop . time ( )
manager . requests_count += 1
writer = StreamWriter ( self , loop )
request = self . _request_factory ( message , payload , self , writer , handler )
try :
try : # a new task is used for copy context vars ( # 3406)
task = self . _loop . create_task ( self . _request_handler ( request ) )
resp = await task
except HTTPException as exc :
resp = Response ( status = exc . status , reason = exc . reason , text = exc . text , headers = exc . headers )
except asyncio . CancelledError :
self . log_debug ( 'Ignored premature client disconnection' )
break
except asyncio . TimeoutError as exc :
self . log_debug ( 'Request handler timed out.' , exc_info = exc )
resp = self . handle_error ( request , 504 )
except Exception as exc :
resp = self . handle_error ( request , 500 , exc )
try :
prepare_meth = resp . prepare
except AttributeError :
if resp is None :
raise RuntimeError ( "Missing return " "statement on request handler" )
else :
raise RuntimeError ( "Web-handler should return " "a response instance, " "got {!r}" . format ( resp ) )
await prepare_meth ( request )
await resp . write_eof ( )
# notify server about keep - alive
self . _keepalive = bool ( resp . keep_alive )
# log access
if self . access_log :
self . log_access ( request , resp , loop . time ( ) - now )
# check payload
if not payload . is_eof ( ) :
lingering_time = self . _lingering_time
if not self . _force_close and lingering_time :
self . log_debug ( 'Start lingering close timer for %s sec.' , lingering_time )
now = loop . time ( )
end_t = now + lingering_time
with suppress ( asyncio . TimeoutError , asyncio . CancelledError ) :
while not payload . is_eof ( ) and now < end_t :
with CeilTimeout ( end_t - now , loop = loop ) : # read and ignore
await payload . readany ( )
now = loop . time ( )
# if payload still uncompleted
if not payload . is_eof ( ) and not self . _force_close :
self . log_debug ( 'Uncompleted request.' )
self . close ( )
payload . set_exception ( PayloadAccessError ( ) )
except asyncio . CancelledError :
self . log_debug ( 'Ignored premature client disconnection ' )
break
except RuntimeError as exc :
if self . debug :
self . log_exception ( 'Unhandled runtime exception' , exc_info = exc )
self . force_close ( )
except Exception as exc :
self . log_exception ( 'Unhandled exception' , exc_info = exc )
self . force_close ( )
finally :
if self . transport is None and resp is not None :
self . log_debug ( 'Ignored premature client disconnection.' )
elif not self . _force_close :
if self . _keepalive and not self . _close : # start keep - alive timer
if keepalive_timeout is not None :
now = self . _loop . time ( )
self . _keepalive_time = now
if self . _keepalive_handle is None :
self . _keepalive_handle = loop . call_at ( now + keepalive_timeout , self . _process_keepalive )
else :
break
# remove handler , close transport if no handlers left
if not self . _force_close :
self . _task_handler = None
if self . transport is not None and self . _error_handler is None :
self . transport . close ( ) |
def to_e164 ( name , origin = public_enum_domain , want_plus_prefix = True ) :
"""Convert an ENUM domain name into an E . 164 number .
@ param name : the ENUM domain name .
@ type name : dns . name . Name object .
@ param origin : A domain containing the ENUM domain name . The
name is relativized to this domain before being converted to text .
@ type : dns . name . Name object or None
@ param want _ plus _ prefix : if True , add a ' + ' to the beginning of the
returned number .
@ rtype : str""" | if not origin is None :
name = name . relativize ( origin )
dlabels = [ d for d in name . labels if ( d . isdigit ( ) and len ( d ) == 1 ) ]
if len ( dlabels ) != len ( name . labels ) :
raise dns . exception . SyntaxError ( 'non-digit labels in ENUM domain name' )
dlabels . reverse ( )
text = '' . join ( dlabels )
if want_plus_prefix :
text = '+' + text
return text |
def create_project ( self , name , description ) :
"""Create a new project with the specified name and description
: param name : str : name of the project to create
: param description : str : description of the project to create
: return : Project""" | return self . _create_item_response ( self . data_service . create_project ( name , description ) , Project ) |
def toggle_concatenate ( self ) :
"""Enable and disable concatenation options .""" | if not ( self . chunk [ 'epoch' ] . isChecked ( ) and self . lock_to_staging . get_value ( ) ) :
for i , j in zip ( [ self . idx_chan , self . idx_cycle , self . idx_stage , self . idx_evt_type ] , [ self . cat [ 'chan' ] , self . cat [ 'cycle' ] , self . cat [ 'stage' ] , self . cat [ 'evt_type' ] ] ) :
if len ( i . selectedItems ( ) ) > 1 :
j . setEnabled ( True )
else :
j . setEnabled ( False )
j . setChecked ( False )
if not self . chunk [ 'event' ] . isChecked ( ) :
self . cat [ 'evt_type' ] . setEnabled ( False )
if not self . cat [ 'discontinuous' ] . get_value ( ) :
self . cat [ 'chan' ] . setEnabled ( False )
self . cat [ 'chan' ] . setChecked ( False )
self . update_nseg ( ) |
def remove_volume ( self , name , force = False ) :
"""Remove a volume . Similar to the ` ` docker volume rm ` ` command .
Args :
name ( str ) : The volume ' s name
force ( bool ) : Force removal of volumes that were already removed
out of band by the volume driver plugin .
Raises :
: py : class : ` docker . errors . APIError `
If volume failed to remove .""" | params = { }
if force :
if utils . version_lt ( self . _version , '1.25' ) :
raise errors . InvalidVersion ( 'force removal was introduced in API 1.25' )
params = { 'force' : force }
url = self . _url ( '/volumes/{0}' , name , params = params )
resp = self . _delete ( url )
self . _raise_for_status ( resp ) |
def _remove_vm ( name , datacenter , service_instance , placement = None , power_off = None ) :
'''Helper function to remove a virtual machine
name
Name of the virtual machine
service _ instance
vCenter service instance for connection and configuration
datacenter
Datacenter of the virtual machine
placement
Placement information of the virtual machine''' | results = { }
if placement :
( resourcepool_object , placement_object ) = salt . utils . vmware . get_placement ( service_instance , datacenter , placement )
else :
placement_object = salt . utils . vmware . get_datacenter ( service_instance , datacenter )
if power_off :
power_off_vm ( name , datacenter , service_instance )
results [ 'powered_off' ] = True
vm_ref = salt . utils . vmware . get_mor_by_property ( service_instance , vim . VirtualMachine , name , property_name = 'name' , container_ref = placement_object )
if not vm_ref :
raise salt . exceptions . VMwareObjectRetrievalError ( 'The virtual machine object {0} in datacenter ' '{1} was not found' . format ( name , datacenter ) )
return results , vm_ref |
async def getTriggerToken ( self , * args , ** kwargs ) :
"""Get a trigger token
Retrieve a unique secret token for triggering the specified hook . This
token can be deactivated with ` resetTriggerToken ` .
This method gives output : ` ` v1 / trigger - token - response . json # ` `
This method is ` ` stable ` `""" | return await self . _makeApiCall ( self . funcinfo [ "getTriggerToken" ] , * args , ** kwargs ) |
def source_analysis ( source_path , group , encoding = 'automatic' , fallback_encoding = 'cp1252' , generated_regexes = pygount . common . regexes_from ( DEFAULT_GENERATED_PATTERNS_TEXT ) , duplicate_pool = None ) :
"""Analysis for line counts in source code stored in ` ` source _ path ` ` .
: param source _ path :
: param group : name of a logical group the sourc code belongs to , e . g . a
package .
: param encoding : encoding according to : func : ` encoding _ for `
: param fallback _ encoding : fallback encoding according to
: func : ` encoding _ for `
: return : a : class : ` SourceAnalysis `""" | assert encoding is not None
assert generated_regexes is not None
result = None
lexer = None
source_code = None
source_size = os . path . getsize ( source_path )
if source_size == 0 :
_log . info ( '%s: is empty' , source_path )
result = pseudo_source_analysis ( source_path , group , SourceState . empty )
elif is_binary_file ( source_path ) :
_log . info ( '%s: is binary' , source_path )
result = pseudo_source_analysis ( source_path , group , SourceState . binary )
elif not has_lexer ( source_path ) :
_log . info ( '%s: unknown language' , source_path )
result = pseudo_source_analysis ( source_path , group , SourceState . unknown )
elif duplicate_pool is not None :
duplicate_path = duplicate_pool . duplicate_path ( source_path )
if duplicate_path is not None :
_log . info ( '%s: is a duplicate of %s' , source_path , duplicate_path )
result = pseudo_source_analysis ( source_path , group , SourceState . duplicate , duplicate_path )
if result is None :
if encoding in ( 'automatic' , 'chardet' ) :
encoding = encoding_for ( source_path , encoding , fallback_encoding )
try :
with open ( source_path , 'r' , encoding = encoding ) as source_file :
source_code = source_file . read ( )
except ( LookupError , OSError , UnicodeError ) as error :
_log . warning ( 'cannot read %s using encoding %s: %s' , source_path , encoding , error )
result = pseudo_source_analysis ( source_path , group , SourceState . error , error )
if result is None :
lexer = guess_lexer ( source_path , source_code )
assert lexer is not None
if ( result is None ) and ( len ( generated_regexes ) != 0 ) :
number_line_and_regex = matching_number_line_and_regex ( pygount . common . lines ( source_code ) , generated_regexes )
if number_line_and_regex is not None :
number , _ , regex = number_line_and_regex
message = 'line {0} matches {1}' . format ( number , regex )
_log . info ( '%s: is generated code because %s' , source_path , message )
result = pseudo_source_analysis ( source_path , group , SourceState . generated , message )
if result is None :
assert lexer is not None
assert source_code is not None
language = lexer . name
if ( 'xml' in language . lower ( ) ) or ( language == 'Genshi' ) :
dialect = pygount . xmldialect . xml_dialect ( source_path , source_code )
if dialect is not None :
language = dialect
_log . info ( '%s: analyze as %s using encoding %s' , source_path , language , encoding )
mark_to_count_map = { 'c' : 0 , 'd' : 0 , 'e' : 0 , 's' : 0 }
for line_parts in _line_parts ( lexer , source_code ) :
mark_to_increment = 'e'
for mark_to_check in ( 'd' , 's' , 'c' ) :
if mark_to_check in line_parts :
mark_to_increment = mark_to_check
mark_to_count_map [ mark_to_increment ] += 1
result = SourceAnalysis ( path = source_path , language = language , group = group , code = mark_to_count_map [ 'c' ] , documentation = mark_to_count_map [ 'd' ] , empty = mark_to_count_map [ 'e' ] , string = mark_to_count_map [ 's' ] , state = SourceState . analyzed . name , state_info = None , )
assert result is not None
return result |
def __render_videoframe ( self ) :
"""Retrieves a new videoframe from the stream .
Sets the frame as the _ _ current _ video _ frame and passes it on to
_ _ videorenderfunc ( ) if it is set .""" | new_videoframe = self . clip . get_frame ( self . clock . time )
# Pass it to the callback function if this is set
if callable ( self . __videorenderfunc ) :
self . __videorenderfunc ( new_videoframe )
# Set current _ frame to current frame ( . . . )
self . __current_videoframe = new_videoframe |
def get_url_for_id ( client_site_url , apikey , resource_id ) :
"""Return the URL for the given resource ID .
Contacts the client site ' s API to get the URL for the ID and returns it .
: raises CouldNotGetURLError : if getting the URL fails for any reason""" | # TODO : Handle invalid responses from the client site .
url = client_site_url + u"deadoralive/get_url_for_resource_id"
params = { "resource_id" : resource_id }
response = requests . get ( url , headers = dict ( Authorization = apikey ) , params = params )
if not response . ok :
raise CouldNotGetURLError ( u"Couldn't get URL for resource {id}: {code} {reason}" . format ( id = resource_id , code = response . status_code , reason = response . reason ) )
return response . json ( ) |
def ud_grade_ipix ( ipix , nside_in , nside_out , nest = False ) :
"""Upgrade or degrade resolution of a pixel list .
Parameters :
ipix : array - like
the input pixel ( s )
nside _ in : int
the nside of the input pixel ( s )
nside _ out : int
the desired nside of the output pixel ( s )
order : str
pixel ordering of input and output ( " RING " or " NESTED " )
Returns :
pix _ out : array - like
the upgraded or degraded pixel array""" | if nside_in == nside_out :
return ipix
elif nside_in < nside_out :
return u_grade_ipix ( ipix , nside_in , nside_out , nest )
elif nside_in > nside_out :
return d_grade_ipix ( ipix , nside_in , nside_out , nest ) |
def _getNumberOfRequiredVerificationsVocabulary ( self ) :
"""Returns a DisplayList with the available options for the
multi - verification list : ' system default ' , ' 1 ' , ' 2 ' , ' 3 ' , ' 4'
: returns : DisplayList with the available options for the
multi - verification list""" | bsve = self . bika_setup . getNumberOfRequiredVerifications ( )
bsval = "%s (%s)" % ( _ ( "System default" ) , str ( bsve ) )
items = [ ( - 1 , bsval ) , ( 1 , '1' ) , ( 2 , '2' ) , ( 3 , '3' ) , ( 4 , '4' ) ]
return IntDisplayList ( list ( items ) ) |
def _inner_dataset_template ( cls , dataset ) :
"""Returns a Dataset template used as a wrapper around the data
contained within the multi - interface dataset .""" | from . import Dataset
vdims = dataset . vdims if getattr ( dataset , 'level' , None ) is None else [ ]
return Dataset ( dataset . data [ 0 ] , datatype = cls . subtypes , kdims = dataset . kdims , vdims = vdims ) |
def select_edges_by ( docgraph , layer = None , edge_type = None , data = False ) :
"""get all edges with the given edge type and layer .
Parameters
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str
name of the layer
edge _ type : str
Type of the edges to be extracted ( Edge types are defined in the
Enum ` ` EdgeTypes ` ` ) .
data : bool
If True , results will include edge attributes .
Returns
edges : generator of str
a container / list of edges ( represented as ( source node ID , target
node ID ) tuples ) . If data is True , edges are represented as
( source node ID , target node ID , edge attribute dict ) tuples .""" | edge_type_eval = "edge_attribs['edge_type'] == '{}'" . format ( edge_type )
layer_eval = "'{}' in edge_attribs['layers']" . format ( layer )
if layer is not None :
if edge_type is not None :
return select_edges ( docgraph , data = data , conditions = [ edge_type_eval , layer_eval ] )
else : # filter by layer , but not by edge type
return select_edges ( docgraph , conditions = [ layer_eval ] , data = data )
else : # don ' t filter layers
if edge_type is not None : # filter by edge type , but not by layer
return select_edges ( docgraph , data = data , conditions = [ edge_type_eval ] )
else : # neither layer , nor edge type is filtered
return docgraph . edges_iter ( data = data ) |
def version ( ) :
"""Extracts version from the ` ` _ _ init _ _ . py ` ` file at the module ' s root .
Inspired by : https : / / packaging . python . org / single _ source _ version /""" | global _version
if _version :
return _version
init_file = read_file ( MODULE_NAME , '__init__.py' )
matches = re . search ( r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]' , init_file , re . M )
if not matches :
raise RuntimeError ( "Unable to find version string in __init__.py ." )
_version = matches . group ( 1 )
# noqa
return _version |
def reflected_binary_operator ( op ) :
"""Factory function for making binary operator methods on a Factor .
Returns a function , " reflected _ binary _ operator " suitable for implementing
functions like _ _ radd _ _ .""" | assert not is_comparison ( op )
@ with_name ( method_name_for_op ( op , commute = True ) )
@ coerce_numbers_to_my_dtype
def reflected_binary_operator ( self , other ) :
if isinstance ( self , NumericalExpression ) :
self_expr , other_expr , new_inputs = self . build_binary_op ( op , other )
return NumExprFactor ( "({left}) {op} ({right})" . format ( left = other_expr , right = self_expr , op = op , ) , new_inputs , dtype = binop_return_dtype ( op , other . dtype , self . dtype ) )
# Only have to handle the numeric case because in all other valid cases
# the corresponding left - binding method will be called .
elif isinstance ( other , Number ) :
return NumExprFactor ( "{constant} {op} x_0" . format ( op = op , constant = other ) , binds = ( self , ) , dtype = binop_return_dtype ( op , other . dtype , self . dtype ) , )
raise BadBinaryOperator ( op , other , self )
return reflected_binary_operator |
def concatenate ( fname1 , fname2 , dfilter1 = None , dfilter2 = None , has_header1 = True , has_header2 = True , frow1 = 0 , frow2 = 0 , ofname = None , ocols = None , ) :
r"""Concatenate two comma - separated values file .
Data rows from the second file are appended at the end of the data rows
from the first file
: param fname1 : Name of the first comma - separated values file , the file
whose data appears first in the output file
: type fname1 : FileNameExists _
: param fname2 : Name of the second comma - separated values file , the file
whose data appears last in the output file
: type fname2 : FileNameExists _
: param dfilter1 : Row and / or column filter for the first file . If None no
data filtering is done on the file
: type dfilter1 : : ref : ` CsvDataFilter ` or None
: param dfilter2 : Row and / or column filter for the second file . If None no
data filtering is done on the file
: type dfilter2 : : ref : ` CsvDataFilter ` or None
: param has _ header1 : Flag that indicates whether the first comma - separated
values file has column headers in its first line ( True )
or not ( False )
: type has _ header1 : boolean
: param has _ header2 : Flag that indicates whether the second comma - separated
values file has column headers in its first line ( True )
or not ( False )
: type has _ header2 : boolean
: param frow1 : First comma - separated values file first data row ( starting
from 1 ) . If 0 the row where data starts is auto - detected as
the first row that has a number ( integer of float ) in at
least one of its columns
: type frow1 : NonNegativeInteger _
: param frow2 : Second comma - separated values file first data row ( starting
from 1 ) . If 0 the row where data starts is auto - detected as
the first row that has a number ( integer of float ) in at
least one of its columns
: type frow2 : NonNegativeInteger _
: param ofname : Name of the output comma - separated values file , the file
that will contain the data from the first and second files .
If None the first file is replaced " in place "
: type ofname : FileName _ or None
: param ocols : Column names of the output comma - separated values file .
If None the column names in the first file are used if
* * has _ header1 * * is True or the column names in the second
files are used if * * has _ header1 * * is False and
* * has _ header2 * * is True , otherwise no header is used
: type ocols : list or None
. . [ [ [ cog cog . out ( exobj . get _ sphinx _ autodoc ( raised = True ) ) ] ] ]
. . Auto - generated exceptions documentation for
. . pcsv . concatenate . concatenate
: raises :
* OSError ( File * [ fname ] * could not be found )
* RuntimeError ( Argument \ ` dfilter1 \ ` is not valid )
* RuntimeError ( Argument \ ` dfilter2 \ ` is not valid )
* RuntimeError ( Argument \ ` fname1 \ ` is not valid )
* RuntimeError ( Argument \ ` fname2 \ ` is not valid )
* RuntimeError ( Argument \ ` frow1 \ ` is not valid )
* RuntimeError ( Argument \ ` frow2 \ ` is not valid )
* RuntimeError ( Argument \ ` ocols \ ` is not valid )
* RuntimeError ( Argument \ ` ofname \ ` is not valid )
* RuntimeError ( Column headers are not unique in file * [ fname ] * )
* RuntimeError ( File * [ fname ] * has no valid data )
* RuntimeError ( File * [ fname ] * is empty )
* RuntimeError ( Files have different number of columns )
* RuntimeError ( Invalid column specification )
* RuntimeError ( Number of columns in data files and output columns are
different )
* ValueError ( Column * [ column _ identifier ] * not found )
. . [ [ [ end ] ] ]""" | # pylint : disable = R0913 , R0914
iro = pexdoc . exh . addex ( RuntimeError , "Files have different number of columns" )
iom = pexdoc . exh . addex ( RuntimeError , "Number of columns in data files and output columns are different" )
# Read and validate file 1
obj1 = CsvFile ( fname = fname1 , dfilter = dfilter1 , has_header = has_header1 , frow = frow1 )
# Read and validate file 2
obj2 = CsvFile ( fname = fname2 , dfilter = dfilter2 , has_header = has_header2 , frow = frow2 )
# Assign output data structure
ofname = fname1 if ofname is None else ofname
# Create new header
if ( ocols is None ) and has_header1 :
ocols = [ obj1 . header ( ) ] if obj1 . cfilter is None else [ obj1 . cfilter ]
elif ( ocols is None ) and has_header2 :
ocols = [ obj2 . header ( ) ] if obj2 . cfilter is None else [ obj2 . cfilter ]
elif ocols is None :
ocols = [ ]
else :
iom ( ( obj1 . cfilter is not None ) and ( len ( obj1 . cfilter ) != len ( ocols ) ) )
ocols = [ ocols ]
# Miscellaneous data validation
iro ( _C ( obj1 . cfilter , obj2 . cfilter ) and ( len ( obj1 . cfilter ) != len ( obj2 . cfilter ) ) )
# Write final output
data = ocols + obj1 . data ( filtered = True ) + obj2 . data ( filtered = True )
write ( fname = ofname , data = data , append = False ) |
def generateSequence ( self , text , preprocess = False ) :
"""Return a list of lists representing the text sequence in network data
format . Does not preprocess the text .""" | # TODO : enable text preprocessing ; abstract out the logic in split ( ) into a common method .
tokens = TextPreprocess ( ) . tokenize ( text )
cat = [ - 1 ]
self . sequenceCount += 1
uniqueID = "q"
data = self . _formatSequence ( tokens , cat , self . sequenceCount - 1 , uniqueID )
return data |
def iter_sections ( self , recursive = False , path = None , key = 'path' ) :
"""See : meth : ` . iter _ all ` for standard iterator argument descriptions .
Returns :
iterator : iterator over ` ` ( key , section ) ` ` pairs of all sections
in this section ( and sub - sections if ` ` recursive = True ` ` ) .""" | for x in self . iter_all ( recursive = recursive , path = path , key = key ) :
if key is None :
if x . is_section :
yield x
elif x [ 1 ] . is_section :
yield x |
def _show_or_dump ( self , dump = False , indent = 3 , lvl = "" , label_lvl = "" , first_call = True ) :
"""Reproduced from packet . py""" | ct = AnsiColorTheme ( ) if dump else conf . color_theme
s = "%s%s %s %s \n" % ( label_lvl , ct . punct ( "###[" ) , ct . layer_name ( self . name ) , ct . punct ( "]###" ) )
for f in self . fields_desc [ : - 1 ] :
ncol = ct . field_name
vcol = ct . field_value
fvalue = self . getfieldval ( f . name )
begn = "%s %-10s%s " % ( label_lvl + lvl , ncol ( f . name ) , ct . punct ( "=" ) , )
reprval = f . i2repr ( self , fvalue )
if isinstance ( reprval , str ) :
reprval = reprval . replace ( "\n" , "\n" + " " * ( len ( label_lvl ) + len ( lvl ) + len ( f . name ) + 4 ) )
s += "%s%s\n" % ( begn , vcol ( reprval ) )
f = self . fields_desc [ - 1 ]
ncol = ct . field_name
vcol = ct . field_value
fvalue = self . getfieldval ( f . name )
begn = "%s %-10s%s " % ( label_lvl + lvl , ncol ( f . name ) , ct . punct ( "=" ) , )
reprval = f . i2repr ( self , fvalue )
if isinstance ( reprval , str ) :
reprval = reprval . replace ( "\n" , "\n" + " " * ( len ( label_lvl ) + len ( lvl ) + len ( f . name ) + 4 ) )
s += "%s%s\n" % ( begn , vcol ( reprval ) )
if self . payload :
s += self . payload . _show_or_dump ( dump = dump , indent = indent , lvl = lvl + ( " " * indent * self . show_indent ) , # noqa : E501
label_lvl = label_lvl , first_call = False )
# noqa : E501
if first_call and not dump :
print ( s )
else :
return s |
def get_apod ( cls , date = None , hd = False ) :
"""Returns Astronomy Picture of the Day
Args :
date : date instance ( default = today )
hd : bool if high resolution should be included
Returns :
json""" | instance = cls ( 'planetary/apod' )
filters = { 'date' : date , 'hd' : hd }
return instance . get_resource ( ** filters ) |
def markdown ( doc , title = True , template = 'short_documentation.md' ) :
"""Markdown , specifically for the Notes field in a CKAN dataset""" | from jinja2 import Environment , PackageLoader , select_autoescape
env = Environment ( loader = PackageLoader ( 'metapack' , 'support/templates' ) # autoescape = select _ autoescape ( [ ' html ' , ' xml ' ] )
)
context = display_context ( doc )
return env . get_template ( template ) . render ( ** context ) |
def _un_meta_name ( self , name ) :
"""Reverse of _ meta _ name""" | if name . startswith ( 'HTTP_' ) :
name = name [ 5 : ]
return name . replace ( '_' , '-' ) . title ( ) |
def _GetStat ( self ) :
"""Retrieves information about the file entry .
Returns :
VFSStat : a stat object .""" | stat_object = vfs_stat . VFSStat ( )
if self . _compressed_stream :
stat_object . size = self . _compressed_stream . get_size ( )
stat_object . type = self . entry_type
return stat_object |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.