signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def calc_qib2_v1 ( self ) :
"""Calculate the first inflow component released from the soil .
Required control parameters :
| NHRU |
| Lnk |
| NFk |
| DMin |
| DMax |
Required derived parameter :
| WZ |
Required state sequence :
| BoWa |
Calculated flux sequence :
| QIB2 |
Basic equation :
: math : ` QIB2 = ( DMax - DMin ) \\ cdot
( \\ frac { BoWa - WZ } { NFk - WZ } ) ^ \\ frac { 3 } { 2 } `
Examples :
For water and sealed areas , no interflow is calculated ( the first
three HRUs are of type | FLUSS | , | SEE | , and | VERS | , respectively ) .
No principal distinction is made between the remaining land use
classes ( arable land | ACKER | has been selected for the last
five HRUs arbitrarily ) :
> > > from hydpy . models . lland import *
> > > parameterstep ( ' 1d ' )
> > > simulationstep ( ' 12h ' )
> > > nhru ( 8)
> > > lnk ( FLUSS , SEE , VERS , ACKER , ACKER , ACKER , ACKER , ACKER )
> > > dmax ( 10.0)
> > > dmin ( 4.0)
> > > nfk ( 100.0 , 100.0 , 100.0 , 50.0 , 100.0 , 100.0 , 100.0 , 200.0)
> > > derived . wz ( 50.0)
> > > states . bowa = 100.0 , 100.0 , 100.0 , 50.1 , 50.0 , 75.0 , 100.0 , 100.0
Note the time dependence of parameters | DMin | ( see the example above )
and | DMax | :
> > > dmax
dmax ( 10.0)
> > > dmax . values
array ( [ 5 . , 5 . , 5 . , 5 . , 5 . , 5 . , 5 . , 5 . ] )
The following results show that he calculation of | QIB2 | both
resembles those of | QBB | and | QIB1 | in some regards :
> > > model . calc _ qib2 _ v1 ( )
> > > fluxes . qib2
qib2(0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 1.06066 , 3.0 , 0.57735)
In the given example , the maximum rate of total interflow
generation is 5 mm / 12h ( parameter | DMax | ) . For the seventh zone ,
which contains a saturated soil , the value calculated for the
second interflow component ( | QIB2 | ) is 3 mm / h . The " missing "
value of 2 mm / 12h is be calculated by method | calc _ qib1 _ v1 | .
( The fourth zone , which is slightly oversaturated , is only intended
to demonstrate that zero division due to | NFk | = | WZ | is circumvented . )"""
|
con = self . parameters . control . fastaccess
der = self . parameters . derived . fastaccess
flu = self . sequences . fluxes . fastaccess
sta = self . sequences . states . fastaccess
for k in range ( con . nhru ) :
if ( ( con . lnk [ k ] in ( VERS , WASSER , FLUSS , SEE ) ) or ( sta . bowa [ k ] <= der . wz [ k ] ) or ( con . nfk [ k ] <= der . wz [ k ] ) ) :
flu . qib2 [ k ] = 0.
else :
flu . qib2 [ k ] = ( ( con . dmax [ k ] - con . dmin [ k ] ) * ( ( sta . bowa [ k ] - der . wz [ k ] ) / ( con . nfk [ k ] - der . wz [ k ] ) ) ** 1.5 )
|
def create_stack ( self , args ) :
"""创建服务组
创建新一个指定名称的服务组 , 并创建其下的服务 。
Args :
- args : 服务组描述 , 参考 http : / / kirk - docs . qiniu . com / apidocs /
Returns :
返回一个tuple对象 , 其格式为 ( < result > , < ResponseInfo > )
- result 成功返回空dict { } , 失败返回 { " error " : " < errMsg string > " }
- ResponseInfo 请求的Response信息"""
|
url = '{0}/v3/stacks' . format ( self . host )
return self . __post ( url , args )
|
def _eq ( field , value , document ) :
"""Returns True if the value of a document field is equal to a given value"""
|
try :
return document . get ( field , None ) == value
except TypeError : # pragma : no cover Python < 3.0
return False
|
def team_accessLogs ( self , ** kwargs ) -> SlackResponse :
"""Gets the access logs for the current team ."""
|
self . _validate_xoxp_token ( )
return self . api_call ( "team.accessLogs" , http_verb = "GET" , params = kwargs )
|
def model_to_json ( self , object , cleanup = True ) :
"""Take a model instance and return it as a json struct"""
|
model_name = type ( object ) . __name__
if model_name not in self . swagger_dict [ 'definitions' ] :
raise ValidationError ( "Swagger spec has no definition for model %s" % model_name )
model_def = self . swagger_dict [ 'definitions' ] [ model_name ]
log . debug ( "Marshalling %s into json" % model_name )
m = marshal_model ( self . spec , model_def , object )
if cleanup :
self . cleanup_model ( m )
return m
|
def _set_route_profiletype ( self , v , load = False ) :
"""Setter method for route _ profiletype , mapped from YANG variable / hardware / profile / route / predefined / route _ profiletype ( route - profile - subtype )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ route _ profiletype is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ route _ profiletype ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = unicode , restriction_type = "dict_key" , restriction_arg = { u'default' : { 'value' : 0 } , u'route-enhance' : { 'value' : 1 } } , ) , is_leaf = True , yang_name = "route_profiletype" , rest_name = "route_profiletype" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-drop-node-name' : None , u'cli-suppress-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-hardware' , defining_module = 'brocade-hardware' , yang_type = 'route-profile-subtype' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """route_profiletype must be of a type compatible with route-profile-subtype""" , 'defined-type' : "brocade-hardware:route-profile-subtype" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'default': {'value': 0}, u'route-enhance': {'value': 1}},), is_leaf=True, yang_name="route_profiletype", rest_name="route_profiletype", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='route-profile-subtype', is_config=True)""" , } )
self . __route_profiletype = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def reverse_list ( head ) :
""": type head : ListNode
: rtype : ListNode"""
|
if not head or not head . next :
return head
prev = None
while head :
current = head
head = head . next
current . next = prev
prev = current
return prev
|
def intra_mean ( self , values ) :
"""Calculate the mean of a quantity within strata
Parameters
values : array - like , shape = ( n _ items , n _ class )
array containing the values of the quantity for each item in the
pool
Returns
numpy . ndarray , shape = ( n _ strata , n _ class )
array containing the mean value of the quantity within each stratum"""
|
# TODO Check that quantity is valid
if values . ndim > 1 :
return np . array ( [ np . mean ( values [ x , : ] , axis = 0 ) for x in self . allocations_ ] )
else :
return np . array ( [ np . mean ( values [ x ] ) for x in self . allocations_ ] )
|
def add_row ( self , * cells , color = None , escape = None , mapper = None , strict = True ) :
"""Add a row of cells to the table .
Args
cells : iterable , such as a ` list ` or ` tuple `
There ' s two ways to use this method . The first method is to pass
the content of each cell as a separate argument . The second method
is to pass a single argument that is an iterable that contains each
contents .
color : str
The name of the color used to highlight the row
mapper : callable or ` list `
A function or a list of functions that should be called on all
entries of the list after converting them to a string ,
for instance bold
strict : bool
Check for correct count of cells in row or not ."""
|
if len ( cells ) == 1 and _is_iterable ( cells ) :
cells = cells [ 0 ]
if escape is None :
escape = self . escape
# Propagate packages used in cells
for c in cells :
if isinstance ( c , LatexObject ) :
for p in c . packages :
self . packages . add ( p )
# Count cell contents
cell_count = 0
for c in cells :
if isinstance ( c , MultiColumn ) :
cell_count += c . size
else :
cell_count += 1
if strict and cell_count != self . width :
msg = "Number of cells added to table ({}) " "did not match table width ({})" . format ( cell_count , self . width )
raise TableRowSizeError ( msg )
if color is not None :
if not self . color :
self . packages . append ( Package ( "xcolor" , options = 'table' ) )
self . color = True
color_command = Command ( command = "rowcolor" , arguments = color )
self . append ( color_command )
self . append ( dumps_list ( cells , escape = escape , token = '&' , mapper = mapper ) + NoEscape ( r'\\' ) )
|
def finalize ( self , ** kwargs ) :
"""Finalize executes any subclass - specific axes finalization steps .
The user calls poof and poof calls finalize ."""
|
indices = np . arange ( len ( self . classes_ ) )
# Set the title
self . set_title ( "Class Prediction Error for {}" . format ( self . name ) )
# Set the x ticks with the class names
self . ax . set_xticks ( indices )
self . ax . set_xticklabels ( self . classes_ )
# Set the axes labels
self . ax . set_xlabel ( "actual class" )
self . ax . set_ylabel ( "number of predicted class" )
# Compute the ceiling for the y limit
cmax = max ( [ sum ( predictions ) for predictions in self . predictions_ ] )
self . ax . set_ylim ( 0 , cmax + cmax * 0.1 )
# Put the legend outside of the graph
plt . legend ( bbox_to_anchor = ( 1.04 , 0.5 ) , loc = "center left" )
plt . tight_layout ( rect = [ 0 , 0 , 0.85 , 1 ] )
|
def wait_for_stateful_block_init ( context , mri , timeout = DEFAULT_TIMEOUT ) :
"""Wait until a Block backed by a StatefulController has initialized
Args :
context ( Context ) : The context to use to make the child block
mri ( str ) : The mri of the child block
timeout ( float ) : The maximum time to wait"""
|
context . when_matches ( [ mri , "state" , "value" ] , StatefulStates . READY , bad_values = [ StatefulStates . FAULT , StatefulStates . DISABLED ] , timeout = timeout )
|
def seqnum ( self , value ) :
"""Set SeqNum for Annotation
: param value : SeqNum value
: type value : int"""
|
if not re . match ( r'\d+' , str ( value ) ) or value < 0 :
raise AttributeError ( "Invalid SeqNum value supplied" )
self . _seqnum = value
|
def parse_authorization_header ( value ) :
"""Parse the Authenticate header .
Returns nothing on failure , opts hash on success with type = ' basic ' or ' digest '
and other params .
< http : / / nullege . com / codes / search / werkzeug . http . parse _ authorization _ header >
< http : / / stackoverflow . com / questions / 1349367 / parse - an - http - request - authorization - header - with - python >
< http : / / bugs . python . org / file34041/0001 - Add - an - authorization - header - to - the - initial - request . patch >"""
|
try :
( auth_type , auth_info ) = value . split ( ' ' , 1 )
auth_type = auth_type . lower ( )
except ValueError :
return
if ( auth_type == 'basic' ) :
try :
decoded = base64 . b64decode ( auth_info ) . decode ( 'utf-8' )
# b64decode gives bytes in python3
( username , password ) = decoded . split ( ':' , 1 )
except ( ValueError , TypeError ) : # py3 , py2
return
return { 'type' : 'basic' , 'username' : username , 'password' : password }
elif ( auth_type == 'digest' ) :
try :
auth_map = parse_keqv_list ( parse_http_list ( auth_info ) )
except ValueError :
return
logging . debug ( auth_map )
for key in 'username' , 'realm' , 'nonce' , 'uri' , 'response' :
if key not in auth_map :
return
if 'qop' in auth_map and ( 'nc' not in auth_map or 'cnonce' not in auth_map ) :
return
auth_map [ 'type' ] = 'digest'
return auth_map
else : # unknown auth type
return
|
def get_parent ( self , log_info ) :
"""Get the parent container for the log sink"""
|
if self . data . get ( 'scope' , 'log' ) == 'log' :
if log_info . scope_type != 'projects' :
raise ValueError ( "Invalid log subscriber scope" )
parent = "%s/%s" % ( log_info . scope_type , log_info . scope_id )
elif self . data [ 'scope' ] == 'project' :
parent = 'projects/{}' . format ( self . data . get ( 'scope_id' , self . session . get_default_project ( ) ) )
elif self . data [ 'scope' ] == 'organization' :
parent = 'organizations/{}' . format ( self . data [ 'scope_id' ] )
elif self . data [ 'scope' ] == 'folder' :
parent = 'folders/{}' . format ( self . data [ 'scope_id' ] )
elif self . data [ 'scope' ] == 'billing' :
parent = 'billingAccounts/{}' . format ( self . data [ 'scope_id' ] )
else :
raise ValueError ( 'invalid log subscriber scope %s' % ( self . data ) )
return parent
|
def hasmethod ( obj , meth ) :
"""Checks if an object , obj , has a callable method , meth
return True or False"""
|
if hasattr ( obj , meth ) :
return callable ( getattr ( obj , meth ) )
return False
|
def _ondim ( self , dimension , valuestring ) :
"""Converts valuestring to int and assigns result to self . dim
If there is an error ( such as an empty valuestring ) or if
the value is < 1 , the value 1 is assigned to self . dim
Parameters
dimension : int
\t Dimension that is to be updated . Must be in [ 1:4]
valuestring : string
\t A string that can be converted to an int"""
|
try :
self . dimensions [ dimension ] = int ( valuestring )
except ValueError :
self . dimensions [ dimension ] = 1
self . textctrls [ dimension ] . SetValue ( str ( 1 ) )
if self . dimensions [ dimension ] < 1 :
self . dimensions [ dimension ] = 1
self . textctrls [ dimension ] . SetValue ( str ( 1 ) )
|
def createMSBWTFromSeqs ( seqArray , mergedDir , numProcs , areUniform , logger ) :
'''This function takes a series of sequences and creates the BWT using the technique from Cox and Bauer
@ param seqArray - a list of ' $ ' - terminated sequences to be in the MSBWT
@ param mergedFN - the final destination filename for the BWT
@ param numProcs - the number of processes it ' s allowed to use'''
|
# wipe the auxiliary data stored here
MSBWTGen . clearAuxiliaryData ( mergedDir )
# TODO : do we want a special case for N = 1 ? there was one in early code , but we could just assume users aren ' t dumb
seqFN = mergedDir + '/seqs.npy'
offsetFN = mergedDir + '/offsets.npy'
# sort the sequences
seqCopy = sorted ( seqArray )
if areUniform :
uniformLength = len ( seqArray [ 0 ] )
else :
uniformLength = 0
# join into one massive string
seqCopy = '' . join ( seqCopy )
# convert the sequences into uint8s and then save it
seqCopy = np . fromstring ( seqCopy , dtype = '<u1' )
MSBWTGen . writeSeqsToFiles ( seqCopy , seqFN , offsetFN , uniformLength )
MSBWTGen . createFromSeqs ( seqFN , offsetFN , mergedDir + '/msbwt.npy' , numProcs , areUniform , logger )
|
def is_valid ( self , qstr = None ) :
"""Return True if string is valid"""
|
if qstr is None :
qstr = self . currentText ( )
return osp . isdir ( to_text_string ( qstr ) )
|
def _make_canonical_headers ( headers , headers_to_sign ) :
"""Return canonicalized headers .
@ param headers : The request headers .
@ type headers : L { dict }
@ param headers _ to _ sign : A sequence of header names that should be
signed .
@ type headers _ to _ sign : A sequence of L { bytes }
@ return : The canonicalized headers .
@ rtype : L { bytes }"""
|
pairs = [ ]
for name in headers_to_sign :
if name not in headers :
continue
values = headers [ name ]
if not isinstance ( values , ( list , tuple ) ) :
values = [ values ]
comma_values = b',' . join ( ' ' . join ( line . strip ( ) . split ( ) ) for value in values for line in value . splitlines ( ) )
pairs . append ( ( name . lower ( ) , comma_values ) )
sorted_pairs = sorted ( b'%s:%s' % ( name , value ) for name , value in sorted ( pairs ) )
return b'\n' . join ( sorted_pairs ) + b'\n'
|
def get_permalink_ids_iter ( self ) :
'''Method to get permalink ids from content . To be bound to the class last
thing .'''
|
permalink_id_key = self . settings [ 'PERMALINK_ID_METADATA_KEY' ]
permalink_ids = self . metadata . get ( permalink_id_key , '' )
for permalink_id in permalink_ids . split ( ',' ) :
if permalink_id :
yield permalink_id . strip ( )
|
def hash_array ( vals , encoding = 'utf8' , hash_key = None , categorize = True ) :
"""Given a 1d array , return an array of deterministic integers .
. . versionadded : : 0.19.2
Parameters
vals : ndarray , Categorical
encoding : string , default ' utf8'
encoding for data & key when strings
hash _ key : string key to encode , default to _ default _ hash _ key
categorize : bool , default True
Whether to first categorize object arrays before hashing . This is more
efficient when the array contains duplicate values .
. . versionadded : : 0.20.0
Returns
1d uint64 numpy array of hash values , same length as the vals"""
|
if not hasattr ( vals , 'dtype' ) :
raise TypeError ( "must pass a ndarray-like" )
dtype = vals . dtype
if hash_key is None :
hash_key = _default_hash_key
# For categoricals , we hash the categories , then remap the codes to the
# hash values . ( This check is above the complex check so that we don ' t ask
# numpy if categorical is a subdtype of complex , as it will choke ) .
if is_categorical_dtype ( dtype ) :
return _hash_categorical ( vals , encoding , hash_key )
elif is_extension_array_dtype ( dtype ) :
vals , _ = vals . _values_for_factorize ( )
dtype = vals . dtype
# we ' ll be working with everything as 64 - bit values , so handle this
# 128 - bit value early
if np . issubdtype ( dtype , np . complex128 ) :
return hash_array ( vals . real ) + 23 * hash_array ( vals . imag )
# First , turn whatever array this is into unsigned 64 - bit ints , if we can
# manage it .
elif isinstance ( dtype , np . bool ) :
vals = vals . astype ( 'u8' )
elif issubclass ( dtype . type , ( np . datetime64 , np . timedelta64 ) ) :
vals = vals . view ( 'i8' ) . astype ( 'u8' , copy = False )
elif issubclass ( dtype . type , np . number ) and dtype . itemsize <= 8 :
vals = vals . view ( 'u{}' . format ( vals . dtype . itemsize ) ) . astype ( 'u8' )
else : # With repeated values , its MUCH faster to categorize object dtypes ,
# then hash and rename categories . We allow skipping the categorization
# when the values are known / likely to be unique .
if categorize :
from pandas import factorize , Categorical , Index
codes , categories = factorize ( vals , sort = False )
cat = Categorical ( codes , Index ( categories ) , ordered = False , fastpath = True )
return _hash_categorical ( cat , encoding , hash_key )
try :
vals = hashing . hash_object_array ( vals , hash_key , encoding )
except TypeError : # we have mixed types
vals = hashing . hash_object_array ( vals . astype ( str ) . astype ( object ) , hash_key , encoding )
# Then , redistribute these 64 - bit ints within the space of 64 - bit ints
vals ^= vals >> 30
vals *= np . uint64 ( 0xbf58476d1ce4e5b9 )
vals ^= vals >> 27
vals *= np . uint64 ( 0x94d049bb133111eb )
vals ^= vals >> 31
return vals
|
def newChannelOpened_channel_ ( self , notif , newChannel ) :
"""Handle when a client connects to the server channel .
( This method is called for both RFCOMM and L2CAP channels . )"""
|
if newChannel is not None and newChannel . isIncoming ( ) : # not sure if delegate really needs to be set
newChannel . setDelegate_ ( self )
if hasattr ( self . __cb_obj , '_handle_channelopened' ) :
self . __cb_obj . _handle_channelopened ( newChannel )
|
def get_operator ( self , operator ) :
"""Get a comparison suffix to be used in Django ORM & inversion flag for it
: param operator : string , DjangoQL comparison operator
: return : ( suffix , invert ) - a tuple with 2 values :
suffix - suffix to be used in ORM query , for example ' _ _ gt ' for ' > '
invert - boolean , True if this comparison needs to be inverted"""
|
op = { '=' : '' , '>' : '__gt' , '>=' : '__gte' , '<' : '__lt' , '<=' : '__lte' , '~' : '__icontains' , 'in' : '__in' , } . get ( operator )
if op is not None :
return op , False
op = { '!=' : '' , '!~' : '__icontains' , 'not in' : '__in' , } [ operator ]
return op , True
|
def jacobian ( sess , x , grads , target , X , nb_features , nb_classes , feed = None ) :
"""TensorFlow implementation of the foward derivative / Jacobian
: param x : the input placeholder
: param grads : the list of TF gradients returned by jacobian _ graph ( )
: param target : the target misclassification class
: param X : numpy array with sample input
: param nb _ features : the number of features in the input
: return : matrix of forward derivatives flattened into vectors"""
|
warnings . warn ( "This function is dead code and will be removed on or after 2019-07-18" )
# Prepare feeding dictionary for all gradient computations
feed_dict = { x : X }
if feed is not None :
feed_dict . update ( feed )
# Initialize a numpy array to hold the Jacobian component values
jacobian_val = np . zeros ( ( nb_classes , nb_features ) , dtype = np_dtype )
# Compute the gradients for all classes
for class_ind , grad in enumerate ( grads ) :
run_grad = sess . run ( grad , feed_dict )
jacobian_val [ class_ind ] = np . reshape ( run_grad , ( 1 , nb_features ) )
# Sum over all classes different from the target class to prepare for
# saliency map computation in the next step of the attack
other_classes = utils . other_classes ( nb_classes , target )
grad_others = np . sum ( jacobian_val [ other_classes , : ] , axis = 0 )
return jacobian_val [ target ] , grad_others
|
def set_difficulty_value ( self , difficulty ) :
"""stub"""
|
if not isinstance ( difficulty , float ) :
raise InvalidArgument ( 'difficulty value must be a decimal' )
self . add_decimal_value ( difficulty , 'difficulty' )
|
def touch ( self , connection = None ) :
"""Mark this update as complete .
IMPORTANT , If the marker table doesn ' t exist ,
the connection transaction will be aborted and the connection reset .
Then the marker table will be created ."""
|
self . create_marker_table ( )
if connection is None :
connection = self . connect ( )
connection . autocommit = True
# if connection created here , we commit it here
connection . cursor ( ) . execute ( """INSERT INTO {marker_table} (update_id, target_table)
VALUES (%s, %s)
ON DUPLICATE KEY UPDATE
update_id = VALUES(update_id)
""" . format ( marker_table = self . marker_table ) , ( self . update_id , self . table ) )
# make sure update is properly marked
assert self . exists ( connection )
|
def is_carrier_specific_for_region ( numobj , region_dialing_from ) :
"""Given a valid short number , determines whether it is carrier - specific when
dialed from the given region ( however , nothing is implied about its
validity ) . Carrier - specific numbers may connect to a different end - point ,
or not connect at all , depending on the user ' s carrier . If it is important
that the number is valid , then its validity must first be checked using
isValidShortNumber or isValidShortNumberForRegion . Returns false if the
number doesn ' t match the region provided .
Arguments :
numobj - - the valid short number to check
region _ dialing _ from - - the region from which the number is dialed
Returns whether the short number is carrier - specific , assuming the input
was a valid short number ."""
|
if not _region_dialing_from_matches_number ( numobj , region_dialing_from ) :
return False
national_number = national_significant_number ( numobj )
metadata = PhoneMetadata . short_metadata_for_region ( region_dialing_from )
return ( metadata is not None and _matches_possible_number_and_national_number ( national_number , metadata . carrier_specific ) )
|
def alter_add_column ( self , table , column_name , field , ** kwargs ) :
"""Fix fieldname for ForeignKeys ."""
|
name = field . name
op = super ( SchemaMigrator , self ) . alter_add_column ( table , column_name , field , ** kwargs )
if isinstance ( field , pw . ForeignKeyField ) :
field . name = name
return op
|
def replace ( self , child , * nodes ) :
r"""Replace provided node with node ( s ) .
: param TexNode child : Child node to replace
: param TexNode nodes : List of nodes to subtitute in
> > > from TexSoup import TexSoup
> > > soup = TexSoup ( r ' ' '
. . . \ begin { itemize }
. . . \ item Hello
. . . \ item Bye
. . . \ end { itemize } ' ' ' )
> > > items = list ( soup . find _ all ( ' item ' ) )
> > > bye = items [ 1]
> > > soup . itemize . replace ( soup . item , bye )
> > > soup . itemize
\ begin { itemize }
\ item Bye
\ item Bye
\ end { itemize }"""
|
self . expr . insert ( self . expr . remove ( child . expr ) , * nodes )
|
def _get_target_from_package_name ( self , target , package_name , file_path ) :
"""Get a dependent target given the package name and relative file path .
This will only traverse direct dependencies of the passed target . It is not necessary
to traverse further than that because transitive dependencies will be resolved under the
direct dependencies and every direct dependencies is symlinked to the target .
Returns ` None ` if the target does not exist .
: param NodePackage target : A subclass of NodePackage
: param string package _ name : A package . json name that is required to be the same as the target name
: param string file _ path : Relative filepath from target to the package in the format ' file : < address _ path > '"""
|
address_path = self . parse_file_path ( file_path )
if not address_path :
return None
dep_spec_path = os . path . normpath ( os . path . join ( target . address . spec_path , address_path ) )
for dep in target . dependencies :
if dep . package_name == package_name and dep . address . spec_path == dep_spec_path :
return dep
return None
|
def merge_segmentations ( segs1 , segs2 , strokes = None ) :
"""Parameters
segs1 : a list of tuples
Each tuple is a segmentation with its score
segs2 : a list of tuples
Each tuple is a segmentation with its score
strokes : list of stroke names for segs2
Returns
list of tuples :
Segmentations with their score , combined from segs1 and segs2"""
|
def translate ( segmentation , strokes ) :
t = [ ]
for symbol in segmentation :
symbol_new = [ ]
for stroke in symbol :
symbol_new . append ( strokes [ stroke ] )
t . append ( symbol_new )
return t
if strokes is None :
strokes = [ i for i in range ( len ( segs2 [ 0 ] [ 0 ] ) ) ]
topf = partitions . TopFinder ( 500 )
for s1 , s2 in itertools . product ( segs1 , segs2 ) :
topf . push ( s1 [ 0 ] + translate ( s2 [ 0 ] , strokes ) , s1 [ 1 ] * s2 [ 1 ] )
return list ( topf )
|
def create_ipv4 ( self , id_network_ipv4 ) :
"""Create VLAN in layer 2 using script ' navlan ' .
: param id _ network _ ipv4 : NetworkIPv4 ID .
: return : Following dictionary :
{ ‘ sucesso ’ : { ‘ codigo ’ : < codigo > ,
‘ descricao ’ : { ' stdout ' : < stdout > , ' stderr ' : < stderr > } } }
: raise NetworkIPv4NaoExisteError : NetworkIPv4 not found .
: raise EquipamentoNaoExisteError : Equipament in list not found .
: raise VlanError : VLAN is active .
: raise InvalidParameterError : VLAN identifier is none or invalid .
: raise InvalidParameterError : Equipment list is none or empty .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response .
: raise ScriptError : Failed to run the script ."""
|
url = 'vlan/v4/create/'
vlan_map = dict ( )
vlan_map [ 'id_network_ip' ] = id_network_ipv4
code , xml = self . submit ( { 'vlan' : vlan_map } , 'POST' , url )
return self . response ( code , xml )
|
async def postback_me ( msg : BaseMessage , platform : Platform ) -> Response :
"""Provides the front - end with details about the user . This output can be
completed using the ` api _ postback _ me ` middleware hook ."""
|
async def get_basic_info ( _msg : BaseMessage , _platform : Platform ) :
user = _msg . get_user ( )
return { 'friendly_name' : await user . get_friendly_name ( ) , 'locale' : await user . get_locale ( ) , 'platform' : _platform . NAME , }
func = MiddlewareManager . instance ( ) . get ( 'api_postback_me' , get_basic_info )
return json_response ( await func ( msg , platform ) )
|
def check_managed_changes ( name , source , source_hash , source_hash_name , user , group , mode , attrs , template , context , defaults , saltenv , contents = None , skip_verify = False , keep_mode = False , seuser = None , serole = None , setype = None , serange = None , ** kwargs ) :
'''Return a dictionary of what changes need to be made for a file
. . versionchanged : : Neon
selinux attributes added
CLI Example :
. . code - block : : bash
salt ' * ' file . check _ managed _ changes / etc / httpd / conf . d / httpd . conf salt : / / http / httpd . conf ' { hash _ type : ' md5 ' , ' hsum ' : < md5sum > } ' root , root , ' 755 ' jinja True None None base'''
|
# If the source is a list then find which file exists
source , source_hash = source_list ( source , # pylint : disable = W0633
source_hash , saltenv )
sfn = ''
source_sum = None
if contents is None : # Gather the source file from the server
sfn , source_sum , comments = get_managed ( name , template , source , source_hash , source_hash_name , user , group , mode , attrs , saltenv , context , defaults , skip_verify , ** kwargs )
# Ensure that user - provided hash string is lowercase
if source_sum and ( 'hsum' in source_sum ) :
source_sum [ 'hsum' ] = source_sum [ 'hsum' ] . lower ( )
if comments :
__clean_tmp ( sfn )
raise CommandExecutionError ( comments )
if sfn and source and keep_mode :
if _urlparse ( source ) . scheme in ( 'salt' , 'file' ) or source . startswith ( '/' ) :
try :
mode = __salt__ [ 'cp.stat_file' ] ( source , saltenv = saltenv , octal = True )
except Exception as exc :
log . warning ( 'Unable to stat %s: %s' , sfn , exc )
changes = check_file_meta ( name , sfn , source , source_sum , user , group , mode , attrs , saltenv , contents , seuser = seuser , serole = serole , setype = setype , serange = serange )
__clean_tmp ( sfn )
return changes
|
def system_find_projects ( input_params = { } , always_retry = True , ** kwargs ) :
"""Invokes the / system / findProjects API method .
For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Search # API - method % 3A - % 2Fsystem % 2FfindProjects"""
|
return DXHTTPRequest ( '/system/findProjects' , input_params , always_retry = always_retry , ** kwargs )
|
def _get_cpu_info_internal ( ) :
'''Returns the CPU info by using the best sources of information for your OS .
Returns { } if nothing is found .'''
|
# Get the CPU arch and bits
arch , bits = _parse_arch ( DataSource . arch_string_raw )
friendly_maxsize = { 2 ** 31 - 1 : '32 bit' , 2 ** 63 - 1 : '64 bit' } . get ( sys . maxsize ) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}" . format ( * sys . version_info )
PYTHON_VERSION = "{0} ({1})" . format ( friendly_version , friendly_maxsize )
info = { 'python_version' : PYTHON_VERSION , 'cpuinfo_version' : CPUINFO_VERSION , 'cpuinfo_version_string' : CPUINFO_VERSION_STRING , 'arch' : arch , 'bits' : bits , 'count' : DataSource . cpu_count , 'arch_string_raw' : DataSource . arch_string_raw , }
# Try the Windows wmic
_copy_new_fields ( info , _get_cpu_info_from_wmic ( ) )
# Try the Windows registry
_copy_new_fields ( info , _get_cpu_info_from_registry ( ) )
# Try / proc / cpuinfo
_copy_new_fields ( info , _get_cpu_info_from_proc_cpuinfo ( ) )
# Try cpufreq - info
_copy_new_fields ( info , _get_cpu_info_from_cpufreq_info ( ) )
# Try LSCPU
_copy_new_fields ( info , _get_cpu_info_from_lscpu ( ) )
# Try sysctl
_copy_new_fields ( info , _get_cpu_info_from_sysctl ( ) )
# Try kstat
_copy_new_fields ( info , _get_cpu_info_from_kstat ( ) )
# Try dmesg
_copy_new_fields ( info , _get_cpu_info_from_dmesg ( ) )
# Try / var / run / dmesg . boot
_copy_new_fields ( info , _get_cpu_info_from_cat_var_run_dmesg_boot ( ) )
# Try lsprop ibm , pa - features
_copy_new_fields ( info , _get_cpu_info_from_ibm_pa_features ( ) )
# Try sysinfo
_copy_new_fields ( info , _get_cpu_info_from_sysinfo ( ) )
# Try querying the CPU cpuid register
_copy_new_fields ( info , _get_cpu_info_from_cpuid ( ) )
# Try platform . uname
_copy_new_fields ( info , _get_cpu_info_from_platform_uname ( ) )
return info
|
def OnWidgetToolbarToggle ( self , event ) :
"""Widget toolbar toggle event handler"""
|
self . main_window . widget_toolbar . SetGripperVisible ( True )
widget_toolbar_info = self . main_window . _mgr . GetPane ( "widget_toolbar" )
self . _toggle_pane ( widget_toolbar_info )
event . Skip ( )
|
def has_all_nonzero_neurite_radii ( neuron , threshold = 0.0 ) :
'''Check presence of neurite points with radius not above threshold
Arguments :
neuron ( Neuron ) : The neuron object to test
threshold : value above which a radius is considered to be non - zero
Returns :
CheckResult with result including list of ( section ID , point ID ) pairs
of zero - radius points'''
|
bad_ids = [ ]
seen_ids = set ( )
for s in _nf . iter_sections ( neuron ) :
for i , p in enumerate ( s . points ) :
info = ( s . id , i )
if p [ COLS . R ] <= threshold and info not in seen_ids :
seen_ids . add ( info )
bad_ids . append ( info )
return CheckResult ( len ( bad_ids ) == 0 , bad_ids )
|
def get_incomplete_assessment_sections ( self , assessment_taken_id ) :
"""Gets the incomplete assessment sections of this assessment .
arg : assessment _ taken _ id ( osid . id . Id ) : ` ` Id ` ` of the
` ` AssessmentTaken ` `
return : ( osid . assessment . AssessmentSectionList ) - the list of
incomplete assessment sections
raise : IllegalState - ` ` has _ assessment _ begun ( ) ` ` is ` ` false ` `
raise : NotFound - ` ` assessment _ taken _ id ` ` is not found
raise : NullArgument - ` ` assessment _ taken _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure occurred
* compliance : mandatory - - This method must be implemented . *"""
|
section_list = [ ]
for section in self . get_assessment_sections ( assessment_taken_id ) :
if not section . is_complete ( ) :
section_list . append ( section )
return objects . AssessmentSectionList ( section_list , runtime = self . _runtime , proxy = self . _proxy )
|
def connect_output ( self , node ) :
"""Connect another node to our output .
This downstream node will automatically be triggered when we update
our output .
Args :
node ( SGNode ) : The node that should receive our output"""
|
if len ( self . outputs ) == self . max_outputs :
raise TooManyOutputsError ( "Attempted to connect too many nodes to the output of a node" , max_outputs = self . max_outputs , stream = self . stream )
self . outputs . append ( node )
|
def _on_mode_change ( self , mode ) :
"""Mode change broadcast from Abode SocketIO server ."""
|
if isinstance ( mode , ( tuple , list ) ) :
mode = mode [ 0 ]
if mode is None :
_LOGGER . warning ( "Mode change event with no mode." )
return
if not mode or mode . lower ( ) not in CONST . ALL_MODES :
_LOGGER . warning ( "Mode change event with unknown mode: %s" , mode )
return
_LOGGER . debug ( "Alarm mode change event to: %s" , mode )
# We ' re just going to convert it to an Alarm device
alarm_device = self . _abode . get_alarm ( refresh = True )
# At the time of development , refreshing after mode change notification
# didn ' t seem to get the latest update immediately . As such , we will
# force the mode status now to match the notification .
# pylint : disable = W0212
alarm_device . _json_state [ 'mode' ] [ 'area_1' ] = mode
for callback in self . _device_callbacks . get ( alarm_device . device_id , ( ) ) :
_execute_callback ( callback , alarm_device )
|
def _grab_version ( self ) :
"""Set the version to a non - development version ."""
|
original_version = self . vcs . version
logger . debug ( "Extracted version: %s" , original_version )
if original_version is None :
logger . critical ( 'No version found.' )
sys . exit ( 1 )
suggestion = utils . cleanup_version ( original_version )
new_version = utils . ask_version ( "Enter version" , default = suggestion )
if not new_version :
new_version = suggestion
self . data [ 'original_version' ] = original_version
self . data [ 'new_version' ] = new_version
|
def _validate_assets ( self , assets ) :
"""Validate that asset identifiers are contained in the daily bars .
Parameters
assets : array - like [ int ]
The asset identifiers to validate .
Raises
NoDataForSid
If one or more of the provided asset identifiers are not
contained in the daily bars ."""
|
missing_sids = np . setdiff1d ( assets , self . sids )
if len ( missing_sids ) :
raise NoDataForSid ( 'Assets not contained in daily pricing file: {}' . format ( missing_sids ) )
|
def project_sequence ( s , permutation = None ) :
"""Projects a point or sequence of points using ` project _ point ` to lists xs , ys
for plotting with Matplotlib .
Parameters
s , Sequence - like
The sequence of points ( 3 - tuples ) to be projected .
Returns
xs , ys : The sequence of projected points in coordinates as two lists"""
|
xs , ys = unzip ( [ project_point ( p , permutation = permutation ) for p in s ] )
return xs , ys
|
def isLearned ( self , mode = None ) :
"""Return true if this parameter is learned
Hidden parameters are not learned ; automatic parameters inherit
behavior from package / cl ; other parameters are learned .
If mode is set , it determines how automatic parameters behave .
If not set , cl . mode parameter determines behavior ."""
|
if "l" in self . mode :
return 1
if "h" in self . mode :
return 0
if "a" in self . mode :
if mode is None :
mode = 'ql'
# that is , iraf . cl . mode
if "h" in mode and "l" not in mode :
return 0
return 1
|
def set_node_config ( self , jid , config , node = None ) :
"""Update the configuration of a node .
: param jid : Address of the PubSub service .
: type jid : : class : ` aioxmpp . JID `
: param config : Configuration form
: type config : : class : ` aioxmpp . forms . Data `
: param node : Name of the PubSub node to query .
: type node : : class : ` str `
: raises aioxmpp . errors . XMPPError : as returned by the service
: return : The configuration of the node .
: rtype : : class : ` ~ . forms . Data `
. . seealso : :
: class : ` aioxmpp . pubsub . NodeConfigForm `"""
|
iq = aioxmpp . stanza . IQ ( to = jid , type_ = aioxmpp . structs . IQType . SET )
iq . payload = pubsub_xso . OwnerRequest ( pubsub_xso . OwnerConfigure ( node = node ) )
iq . payload . payload . data = config
yield from self . client . send ( iq )
|
def process_text ( text , save_xml_name = 'trips_output.xml' , save_xml_pretty = True , offline = False , service_endpoint = 'drum' ) :
"""Return a TripsProcessor by processing text .
Parameters
text : str
The text to be processed .
save _ xml _ name : Optional [ str ]
The name of the file to save the returned TRIPS extraction knowledge
base XML . Default : trips _ output . xml
save _ xml _ pretty : Optional [ bool ]
If True , the saved XML is pretty - printed . Some third - party tools
require non - pretty - printed XMLs which can be obtained by setting this
to False . Default : True
offline : Optional [ bool ]
If True , offline reading is used with a local instance of DRUM , if
available . Default : False
service _ endpoint : Optional [ str ]
Selects the TRIPS / DRUM web service endpoint to use . Is a choice between
" drum " ( default ) and " drum - dev " , a nightly build .
Returns
tp : TripsProcessor
A TripsProcessor containing the extracted INDRA Statements
in tp . statements ."""
|
if not offline :
html = client . send_query ( text , service_endpoint )
xml = client . get_xml ( html )
else :
if offline_reading :
try :
dr = DrumReader ( )
if dr is None :
raise Exception ( 'DrumReader could not be instantiated.' )
except BaseException as e :
logger . error ( e )
logger . error ( 'Make sure drum/bin/trips-drum is running in' ' a separate process' )
return None
try :
dr . read_text ( text )
dr . start ( )
except SystemExit :
pass
xml = dr . extractions [ 0 ]
else :
logger . error ( 'Offline reading with TRIPS/DRUM not available.' )
logger . error ( 'Error message was: %s' % offline_err )
msg = """
To install DRUM locally, follow instructions at
https://github.com/wdebeaum/drum.
Next, install the pykqml package either from pip or from
https://github.com/bgyori/pykqml.
Once installed, run drum/bin/trips-drum in a separate process.
"""
logger . error ( msg )
return None
if save_xml_name :
client . save_xml ( xml , save_xml_name , save_xml_pretty )
return process_xml ( xml )
|
def get_all_functional_groups ( self , elements = None , func_groups = None , catch_basic = True ) :
"""Identify all functional groups ( or all within a certain subset ) in the
molecule , combining the methods described above .
: param elements : List of elements that will qualify a carbon as special
( if only certain functional groups are of interest ) .
Default None .
: param func _ groups : List of strs representing the functional groups of
interest . Default to None , meaning that all of the functional groups
defined in this function will be sought .
: param catch _ basic : bool . If True , use get _ basic _ functional _ groups and
other methods
: return : list of sets of ints , representing groups of connected atoms"""
|
heteroatoms = self . get_heteroatoms ( elements = elements )
special_cs = self . get_special_carbon ( elements = elements )
groups = self . link_marked_atoms ( heteroatoms . union ( special_cs ) )
if catch_basic :
groups += self . get_basic_functional_groups ( func_groups = func_groups )
return groups
|
def generate ( self ) :
'''Generate noise samples .
Returns :
` np . ndarray ` of samples .'''
|
observed_arr = None
for row in range ( self . __batch_size ) :
arr = None
for d in range ( self . __dim ) :
_arr = self . __generate_sin ( amp = self . __amp , sampling_freq = self . __sampling_freq , freq = self . __freq , sec = self . __sec , seq_len = self . __seq_len )
_arr = np . expand_dims ( _arr , axis = 0 )
if arr is None :
arr = _arr
else :
arr = np . r_ [ arr , _arr ]
arr = np . expand_dims ( arr , axis = 0 )
if observed_arr is None :
observed_arr = arr
else :
observed_arr = np . r_ [ observed_arr , arr ]
observed_arr = observed_arr . transpose ( ( 0 , 2 , 1 ) )
gauss_noise = np . random . normal ( loc = self . __mu , scale = self . __sigma , size = observed_arr . shape )
observed_arr = observed_arr + gauss_noise
if self . noise_sampler is not None :
self . noise_sampler . output_shape = observed_arr . shape
observed_arr += self . noise_sampler . generate ( )
if self . __norm_mode == "z_score" :
if observed_arr . std ( ) != 0 :
observed_arr = ( observed_arr - observed_arr . mean ( ) ) / observed_arr . std ( )
elif self . __norm_mode == "min_max" :
if ( observed_arr . max ( ) - observed_arr . min ( ) ) != 0 :
observed_arr = ( observed_arr - observed_arr . min ( ) ) / ( observed_arr . max ( ) - observed_arr . min ( ) )
elif self . __norm_mode == "tanh" :
observed_arr = np . tanh ( observed_arr )
return observed_arr
|
def process_alias_import_namespace ( namespace ) :
"""Validate input arguments when the user invokes ' az alias import ' .
Args :
namespace : argparse namespace object ."""
|
if is_url ( namespace . alias_source ) :
alias_source = retrieve_file_from_url ( namespace . alias_source )
_validate_alias_file_content ( alias_source , url = namespace . alias_source )
else :
namespace . alias_source = os . path . abspath ( namespace . alias_source )
_validate_alias_file_path ( namespace . alias_source )
_validate_alias_file_content ( namespace . alias_source )
|
def get_header ( fn , file_format , header_bytes = 20000 , verbose = False , * args , ** kwargs ) :
"""Apply rules for detecting the boundary of the header
: param str fn : file name
: param str file _ format : either ` ` AmiraMesh ` ` or ` ` HyperSurface ` `
: param int header _ bytes : number of bytes in which to search for the header [ default : 20000]
: return str data : the header as per the ` ` file _ format ` `"""
|
assert header_bytes > 0
assert file_format in [ 'AmiraMesh' , 'HyperSurface' ]
with open ( fn , 'rb' ) as f :
rough_header = f . read ( header_bytes )
if file_format == "AmiraMesh" :
if verbose :
print >> sys . stderr , "Using pattern: (?P<data>.*)\\n@1"
m = re . search ( r'(?P<data>.*)\n@1' , rough_header , flags = re . S )
elif file_format == "HyperSurface" :
if verbose :
print >> sys . stderr , "Using pattern: (?P<data>.*)\\nVertices [0-9]*\\n"
m = re . search ( r'(?P<data>.*)\nVertices [0-9]*\n' , rough_header , flags = re . S )
elif file_format == "Undefined" :
raise ValueError ( "Unable to parse undefined file" )
# select the data
data = m . group ( 'data' )
# print data
# print
return data
|
def extract_keywords_from_sentences ( self , sentences ) :
"""Method to extract keywords from the list of sentences provided .
: param sentences : Text to extraxt keywords from , provided as a list
of strings , where each string is a sentence ."""
|
phrase_list = self . _generate_phrases ( sentences )
self . _build_frequency_dist ( phrase_list )
self . _build_word_co_occurance_graph ( phrase_list )
self . _build_ranklist ( phrase_list )
|
def is_all_field_none ( self ) :
""": rtype : bool"""
|
if self . _BunqMeTab is not None :
return False
if self . _BunqMeTabResultResponse is not None :
return False
if self . _BunqMeFundraiserResult is not None :
return False
if self . _Card is not None :
return False
if self . _CardDebit is not None :
return False
if self . _DraftPayment is not None :
return False
if self . _FeatureAnnouncement is not None :
return False
if self . _IdealMerchantTransaction is not None :
return False
if self . _Invoice is not None :
return False
if self . _ScheduledPayment is not None :
return False
if self . _ScheduledPaymentBatch is not None :
return False
if self . _ScheduledInstance is not None :
return False
if self . _MasterCardAction is not None :
return False
if self . _BankSwitchServiceNetherlandsIncomingPayment is not None :
return False
if self . _Payment is not None :
return False
if self . _PaymentBatch is not None :
return False
if self . _RequestInquiryBatch is not None :
return False
if self . _RequestInquiry is not None :
return False
if self . _RequestResponse is not None :
return False
if self . _RewardRecipient is not None :
return False
if self . _RewardSender is not None :
return False
if self . _ShareInviteBankInquiryBatch is not None :
return False
if self . _ShareInviteBankInquiry is not None :
return False
if self . _ShareInviteBankResponse is not None :
return False
if self . _SofortMerchantTransaction is not None :
return False
if self . _TabResultInquiry is not None :
return False
if self . _TabResultResponse is not None :
return False
if self . _TransferwiseTransfer is not None :
return False
return True
|
def string_to_bytes ( string ) :
"""Returns the amount of bytes in a human readable form up to Yottabytes ( YB ) .
: param string : integer with suffix ( b , k , m , g , t , p , e , z , y )
: return : amount of bytes in string representation
> > > string _ to _ bytes ( ' 1024 ' )
1024
> > > string _ to _ bytes ( ' 1024k ' )
1048576
> > > string _ to _ bytes ( ' 4 G ' )
4294967296
> > > string _ to _ bytes ( ' 4.5g ' )
4831838208
> > > try :
. . . string _ to _ bytes ( ' 1x ' )
. . . except RuntimeError as re :
. . . assert ' unknown suffix ' in str ( re )"""
|
if string == '0' :
return 0
import re
match = re . match ( '(\d+\.?\d?)\s?([bBkKmMgGtTpPeEzZyY])?(\D?)' , string )
if not match :
raise RuntimeError ( '"{}" does not match "[integer] [suffix]"' . format ( string ) )
if match . group ( 3 ) :
raise RuntimeError ( 'unknown suffix: "{}"' . format ( match . group ( 3 ) ) )
value = float ( match . group ( 1 ) )
if match . group ( 2 ) is None :
return int ( value )
suffix = match . group ( 2 ) . upper ( )
extensions = [ '' , 'K' , 'M' , 'G' , 'T' , 'P' , 'E' , 'Z' , 'Y' ]
x = extensions . index ( suffix )
value *= 1024 ** x
return int ( value )
|
def delete_publisher ( self , publisher_name ) :
"""DeletePublisher .
[ Preview API ]
: param str publisher _ name :"""
|
route_values = { }
if publisher_name is not None :
route_values [ 'publisherName' ] = self . _serialize . url ( 'publisher_name' , publisher_name , 'str' )
self . _send ( http_method = 'DELETE' , location_id = '4ddec66a-e4f6-4f5d-999e-9e77710d7ff4' , version = '5.1-preview.1' , route_values = route_values )
|
def _setup_grid ( self , cutoff , unit_cell , grid ) :
"""Choose a proper grid for the binning process"""
|
if grid is None : # automatically choose a decent grid
if unit_cell is None :
grid = cutoff / 2.9
else : # The following would be faster , but it is not reliable
# enough yet .
# grid = unit _ cell . get _ optimal _ subcell ( cutoff / 2.0)
divisions = np . ceil ( unit_cell . spacings / cutoff )
divisions [ divisions < 1 ] = 1
grid = unit_cell / divisions
if isinstance ( grid , float ) :
grid_cell = UnitCell ( np . array ( [ [ grid , 0 , 0 ] , [ 0 , grid , 0 ] , [ 0 , 0 , grid ] ] ) )
elif isinstance ( grid , UnitCell ) :
grid_cell = grid
else :
raise TypeError ( "Grid must be None, a float or a UnitCell instance." )
if unit_cell is not None : # The columns of integer _ matrix are the unit cell vectors in
# fractional coordinates of the grid cell .
integer_matrix = grid_cell . to_fractional ( unit_cell . matrix . transpose ( ) ) . transpose ( )
if abs ( ( integer_matrix - np . round ( integer_matrix ) ) * self . unit_cell . active ) . max ( ) > 1e-6 :
raise ValueError ( "The unit cell vectors are not an integer linear combination of grid cell vectors." )
integer_matrix = integer_matrix . round ( )
integer_cell = UnitCell ( integer_matrix , unit_cell . active )
else :
integer_cell = None
return grid_cell , integer_cell
|
def get_position ( self , topic_partition = None ) :
"""Return offset of the next record that will be fetched .
- ` ` topic _ partition ` ` ( TopicPartition ) : Partition to check"""
|
if isinstance ( topic_partition , TopicPartition ) :
return self . consumer . position ( topic_partition )
else :
raise TypeError ( "topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword." )
|
def get_bucket_inventory ( client , bucket , inventory_id ) :
"""Check a bucket for a named inventory , and return the destination ."""
|
inventories = client . list_bucket_inventory_configurations ( Bucket = bucket ) . get ( 'InventoryConfigurationList' , [ ] )
inventories = { i [ 'Id' ] : i for i in inventories }
found = fnmatch . filter ( inventories , inventory_id )
if not found :
return None
i = inventories [ found . pop ( ) ]
s3_info = i [ 'Destination' ] [ 'S3BucketDestination' ]
return { 'bucket' : s3_info [ 'Bucket' ] . rsplit ( ':' ) [ - 1 ] , 'prefix' : "%s/%s/%s" % ( s3_info [ 'Prefix' ] , bucket , i [ 'Id' ] ) }
|
def compute_v ( self , memory_antecedent ) :
"""Compute value Tensor v .
Args :
memory _ antecedent : a Tensor with dimensions
{ memory _ input _ dim } + other _ dims
Returns :
a Tensor with dimensions
memory _ heads _ dims + { value _ dim } + other _ dims"""
|
if self . shared_kv :
raise ValueError ( "compute_v cannot be called with shared_kv" )
ret = mtf . einsum ( [ memory_antecedent , self . wv ] , reduced_dims = [ self . memory_input_dim ] )
if self . combine_dims :
ret = mtf . replace_dimensions ( ret , ret . shape . dims [ - 1 ] , self . v_dims )
return ret
|
def _assemble_regulate_amount ( self , stmt ) :
"""Example : p ( HGNC : ELK1 ) = > p ( HGNC : FOS )"""
|
activates = isinstance ( stmt , IncreaseAmount )
relation = get_causal_edge ( stmt , activates )
self . _add_nodes_edges ( stmt . subj , stmt . obj , relation , stmt . evidence )
|
def find_mod_objs ( modname , onlylocals = False ) :
"""Returns all the public attributes of a module referenced by name .
. . note : :
The returned list * not * include subpackages or modules of
` modname ` , nor does it include private attributes ( those that
beginwith ' _ ' or are not in ` _ _ all _ _ ` ) .
Parameters
modname : str
The name of the module to search .
onlylocals : bool
If True , only attributes that are either members of ` modname ` OR one of
its modules or subpackages will be included .
Returns
localnames : list of str
A list of the names of the attributes as they are named in the
module ` modname ` .
fqnames : list of str
A list of the full qualified names of the attributes ( e . g . ,
` ` astropy . utils . misc . find _ mod _ objs ` ` ) . For attributes that are
simple variables , this is based on the local name , but for
functions or classes it can be different if they are actually
defined elsewhere and just referenced in ` modname ` .
objs : list of objects
A list of the actual attributes themselves ( in the same order as
the other arguments )"""
|
__import__ ( modname )
mod = sys . modules [ modname ]
if hasattr ( mod , '__all__' ) :
pkgitems = [ ( k , mod . __dict__ [ k ] ) for k in mod . __all__ ]
else :
pkgitems = [ ( k , mod . __dict__ [ k ] ) for k in dir ( mod ) if k [ 0 ] != '_' ]
# filter out modules and pull the names and objs out
ismodule = inspect . ismodule
localnames = [ k for k , v in pkgitems if not ismodule ( v ) ]
objs = [ v for k , v in pkgitems if not ismodule ( v ) ]
# fully qualified names can be determined from the object ' s module
fqnames = [ ]
for obj , lnm in zip ( objs , localnames ) :
if hasattr ( obj , '__module__' ) and hasattr ( obj , '__name__' ) :
fqnames . append ( obj . __module__ + '.' + obj . __name__ )
else :
fqnames . append ( modname + '.' + lnm )
if onlylocals :
valids = [ fqn . startswith ( modname ) for fqn in fqnames ]
localnames = [ e for i , e in enumerate ( localnames ) if valids [ i ] ]
fqnames = [ e for i , e in enumerate ( fqnames ) if valids [ i ] ]
objs = [ e for i , e in enumerate ( objs ) if valids [ i ] ]
return localnames , fqnames , objs
|
def time_restarts ( data_path ) :
"""When called will create a file and measure its mtime on restarts"""
|
path = os . path . join ( data_path , 'last_restarted' )
if not os . path . isfile ( path ) :
with open ( path , 'a' ) :
os . utime ( path , None )
last_modified = os . stat ( path ) . st_mtime
with open ( path , 'a' ) :
os . utime ( path , None )
now = os . stat ( path ) . st_mtime
dif = round ( now - last_modified , 2 )
last_restart = datetime . fromtimestamp ( now ) . strftime ( '%H:%M:%S' )
result = 'LAST RESTART WAS {} SECONDS AGO at {}' . format ( dif , last_restart )
print ( style ( fg = 'green' , bg = 'red' , text = result ) )
|
def out_of_bounds ( self , index ) :
"""Check index for out of bounds
: param index : index as integer , tuple or slice
: return : local index as tuple"""
|
if type ( index ) is int :
return self . int_out_of_bounds ( index )
elif type ( index ) is slice :
return self . slice_out_of_bounds ( index )
elif type ( index ) is tuple :
local_index = [ ]
for k , item in enumerate ( index ) :
if type ( item ) is slice :
temp_index = self . slice_out_of_bounds ( item , k )
elif type ( item ) is int :
temp_index = self . int_out_of_bounds ( item , k )
# FIXME : will fail if item is no int or slice !
if temp_index is None :
return temp_index
local_index . append ( temp_index )
return tuple ( local_index )
|
def _get_timezone ( self , root ) :
"""Find timezone informatation on bottom of the page ."""
|
tz_str = root . xpath ( '//div[@class="smallfont" and @align="center"]' ) [ 0 ] . text
hours = int ( self . _tz_re . search ( tz_str ) . group ( 1 ) )
return tzoffset ( tz_str , hours * 60 )
|
def find_by_id ( self , organization_export , params = { } , ** options ) :
"""Returns details of a previously - requested Organization export .
Parameters
organization _ export : { Id } Globally unique identifier for the Organization export .
[ params ] : { Object } Parameters for the request"""
|
path = "/organization_exports/%s" % ( organization_export )
return self . client . get ( path , params , ** options )
|
def __save_shadow_copy ( self ) :
"""Saves a copy of the stored routine source with pure SQL ( if shadow directory is set ) ."""
|
if not self . shadow_directory :
return
destination_filename = os . path . join ( self . shadow_directory , self . _routine_name ) + '.sql'
if os . path . realpath ( destination_filename ) == os . path . realpath ( self . _source_filename ) :
raise LoaderException ( "Shadow copy will override routine source '{}'" . format ( self . _source_filename ) )
# Remove the ( read only ) shadow file if it exists .
if os . path . exists ( destination_filename ) :
os . remove ( destination_filename )
# Write the shadow file .
with open ( destination_filename , 'wt' , encoding = self . _routine_file_encoding ) as handle :
handle . write ( self . _routine_source_code )
# Make the file read only .
mode = os . stat ( self . _source_filename ) [ stat . ST_MODE ]
os . chmod ( destination_filename , mode & ~ stat . S_IWUSR & ~ stat . S_IWGRP & ~ stat . S_IWOTH )
|
def disagg_prec ( dailyData , method = 'equal' , cascade_options = None , hourly_data_obs = None , zerodiv = "uniform" , shift = 0 ) :
"""The disaggregation function for precipitation .
Parameters
dailyData : pd . Series
daily data
method : str
method to disaggregate
cascade _ options : cascade object
including statistical parameters for the cascade model
hourly _ data _ obs : pd . Series
observed hourly data of master station
zerodiv : str
method to deal with zero division by key " uniform " - - > uniform
distribution
shift : int
shifts the precipitation data by shift ( int ) steps ( eg + 7 for
7:00 to 6:00)"""
|
if method not in ( 'equal' , 'cascade' , 'masterstation' ) :
raise ValueError ( 'Invalid option' )
if method == 'equal' :
precip_disagg = melodist . distribute_equally ( dailyData . precip , divide = True )
elif method == 'masterstation' :
precip_disagg = precip_master_station ( dailyData , hourly_data_obs , zerodiv )
elif method == 'cascade' :
assert cascade_options is not None
precip_disagg = disagg_prec_cascade ( dailyData , cascade_options , shift = shift )
return precip_disagg
|
def _is_valid_language ( self ) :
"""Return True if the value of component in attribute " language " is valid ,
and otherwise False .
: returns : True if value is valid , False otherwise
: rtype : boolean"""
|
comp_str = self . _encoded_value . lower ( )
lang_rxc = re . compile ( CPEComponentSimple . _LANGTAG_PATTERN )
return lang_rxc . match ( comp_str ) is not None
|
def roc ( y_true , y_score , ax = None ) :
"""Plot ROC curve .
Parameters
y _ true : array - like , shape = [ n _ samples ]
Correct target values ( ground truth ) .
y _ score : array - like , shape = [ n _ samples ] or [ n _ samples , 2 ] for binary
classification or [ n _ samples , n _ classes ] for multiclass
Target scores ( estimator predictions ) .
ax : matplotlib Axes
Axes object to draw the plot onto , otherwise uses current Axes
Notes
It is assumed that the y _ score parameter columns are in order . For example ,
if ` ` y _ true = [ 2 , 2 , 1 , 0 , 0 , 1 , 2 ] ` ` , then the first column in y _ score
must countain the scores for class 0 , second column for class 1 and so on .
Returns
ax : matplotlib Axes
Axes containing the plot
Examples
. . plot : : . . / . . / examples / roc . py"""
|
if any ( ( val is None for val in ( y_true , y_score ) ) ) :
raise ValueError ( "y_true and y_score are needed to plot ROC" )
if ax is None :
ax = plt . gca ( )
# get the number of classes based on the shape of y _ score
y_score_is_vector = is_column_vector ( y_score ) or is_row_vector ( y_score )
if y_score_is_vector :
n_classes = 2
else :
_ , n_classes = y_score . shape
# check data shape ?
if n_classes > 2 : # convert y _ true to binary format
y_true_bin = label_binarize ( y_true , classes = np . unique ( y_true ) )
_roc_multi ( y_true_bin , y_score , ax = ax )
for i in range ( n_classes ) :
_roc ( y_true_bin [ : , i ] , y_score [ : , i ] , ax = ax )
else :
if y_score_is_vector :
_roc ( y_true , y_score , ax )
else :
_roc ( y_true , y_score [ : , 1 ] , ax )
# raise error if n _ classes = 1?
return ax
|
def up ( tag , sql , revision ) :
"""Upgrade to revision"""
|
alembic_command . upgrade ( config = get_config ( ) , revision = revision , sql = sql , tag = tag )
|
def if_unmodified_since ( self ) -> Optional [ datetime . datetime ] :
"""The value of If - Unmodified - Since HTTP header , or None .
This header is represented as a ` datetime ` object ."""
|
return self . _http_date ( self . headers . get ( hdrs . IF_UNMODIFIED_SINCE ) )
|
def GET_AUTH ( self , courseid , scoreboardid ) : # pylint : disable = arguments - differ
"""GET request"""
|
course = self . course_factory . get_course ( courseid )
scoreboards = course . get_descriptor ( ) . get ( 'scoreboard' , [ ] )
try :
scoreboardid = int ( scoreboardid )
scoreboard_name = scoreboards [ scoreboardid ] [ "name" ]
scoreboard_content = scoreboards [ scoreboardid ] [ "content" ]
scoreboard_reverse = bool ( scoreboards [ scoreboardid ] . get ( 'reverse' , False ) )
except :
raise web . notfound ( )
# Convert scoreboard _ content
if isinstance ( scoreboard_content , str ) :
scoreboard_content = OrderedDict ( [ ( scoreboard_content , 1 ) ] )
if isinstance ( scoreboard_content , list ) :
scoreboard_content = OrderedDict ( [ ( entry , 1 ) for entry in scoreboard_content ] )
if not isinstance ( scoreboard_content , OrderedDict ) :
scoreboard_content = OrderedDict ( iter ( scoreboard_content . items ( ) ) )
# Get task names
task_names = { }
for taskid in scoreboard_content :
try :
task_names [ taskid ] = course . get_task ( taskid ) . get_name ( self . user_manager . session_language ( ) )
except :
raise web . notfound ( "Unknown task id " + taskid )
# Get all submissions
results = self . database . submissions . find ( { "courseid" : courseid , "taskid" : { "$in" : list ( scoreboard_content . keys ( ) ) } , "custom.score" : { "$exists" : True } , "result" : "success" } , [ "taskid" , "username" , "custom.score" ] )
# Get best results per users ( / group )
result_per_user = { }
users = set ( )
for submission in results : # Be sure we have a list
if not isinstance ( submission [ "username" ] , list ) :
submission [ "username" ] = [ submission [ "username" ] ]
submission [ "username" ] = tuple ( submission [ "username" ] )
if submission [ "username" ] not in result_per_user :
result_per_user [ submission [ "username" ] ] = { }
if submission [ "taskid" ] not in result_per_user [ submission [ "username" ] ] :
result_per_user [ submission [ "username" ] ] [ submission [ "taskid" ] ] = submission [ "custom" ] [ "score" ]
else : # keep best score
current_score = result_per_user [ submission [ "username" ] ] [ submission [ "taskid" ] ]
new_score = submission [ "custom" ] [ "score" ]
task_reversed = scoreboard_reverse != ( scoreboard_content [ submission [ "taskid" ] ] < 0 )
if task_reversed and current_score > new_score :
result_per_user [ submission [ "username" ] ] [ submission [ "taskid" ] ] = new_score
elif not task_reversed and current_score < new_score :
result_per_user [ submission [ "username" ] ] [ submission [ "taskid" ] ] = new_score
for user in submission [ "username" ] :
users . add ( user )
# Get user names
users_realname = { }
for username , userinfo in self . user_manager . get_users_info ( list ( users ) ) . items ( ) :
users_realname [ username ] = userinfo [ 0 ] if userinfo else username
# Compute overall result per user , and sort them
overall_result_per_user = { }
for key , val in result_per_user . items ( ) :
total = 0
solved = 0
for taskid , coef in scoreboard_content . items ( ) :
if taskid in val :
total += val [ taskid ] * coef
solved += 1
overall_result_per_user [ key ] = { "total" : total , "solved" : solved }
sorted_users = list ( overall_result_per_user . keys ( ) )
sorted_users = sorted ( sorted_users , key = sort_func ( overall_result_per_user , scoreboard_reverse ) )
# Compute table
table = [ ]
# Header
if len ( scoreboard_content ) == 1 :
header = [ "" , "Student(s)" , "Score" ]
emphasized_columns = [ 2 ]
else :
header = [ "" , "Student(s)" , "Solved" , "Total score" ] + [ task_names [ taskid ] for taskid in list ( scoreboard_content . keys ( ) ) ]
emphasized_columns = [ 2 , 3 ]
# Lines
old_score = ( )
rank = 0
for user in sorted_users : # Increment rank if needed , and display it
line = [ ]
if old_score != ( overall_result_per_user [ user ] [ "solved" ] , overall_result_per_user [ user ] [ "total" ] ) :
rank += 1
old_score = ( overall_result_per_user [ user ] [ "solved" ] , overall_result_per_user [ user ] [ "total" ] )
line . append ( rank )
else :
line . append ( "" )
# Users
line . append ( "," . join ( sorted ( [ users_realname [ u ] for u in user ] ) ) )
if len ( scoreboard_content ) == 1 :
line . append ( overall_result_per_user [ user ] [ "total" ] )
else :
line . append ( overall_result_per_user [ user ] [ "solved" ] )
line . append ( overall_result_per_user [ user ] [ "total" ] )
for taskid in scoreboard_content :
line . append ( result_per_user [ user ] . get ( taskid , "" ) )
table . append ( line )
renderer = self . template_helper . get_custom_renderer ( 'frontend/plugins/scoreboard' )
return renderer . scoreboard ( course , scoreboardid , scoreboard_name , header , table , emphasized_columns )
|
def GetHist ( tag_name , start_time , end_time , period = 5 , mode = "raw" , desc_as_label = False , label = None , high_speed = False , utc = False ) :
"""Retrieves data from eDNA history for a given tag .
: param tag _ name : fully - qualified ( site . service . tag ) eDNA tag
: param start _ time : must be in format mm / dd / yy hh : mm : ss
: param end _ time : must be in format mm / dd / yy hh : mm : ss
: param period : specify the number of seconds for the pull interval
: param mode : " raw " , " snap " , " avg " , " interp " , " max " , " min "
See eDNA documentation for more information .
: param desc _ as _ label : use the tag description as the column name instead
of the full tag
: param label : supply a custom label to use as the DataFrame column name
: param high _ speed : if True , pull millisecond data
: param utc : if True , use the integer time format instead of DateTime
: return : a pandas DataFrame with timestamp , value , and status"""
|
# Check if the point even exists
if not DoesIDExist ( tag_name ) :
warnings . warn ( "WARNING- " + tag_name + " does not exist or " + "connection was dropped. Try again if tag does exist." )
return pd . DataFrame ( )
# Define all required variables in the correct ctypes format
szPoint = c_char_p ( tag_name . encode ( 'utf-8' ) )
tStart = c_long ( StringToUTCTime ( start_time ) )
tEnd = c_long ( StringToUTCTime ( end_time ) )
tPeriod = c_long ( period )
pulKey = c_ulong ( 0 )
# Initialize the data pull using the specified pulKey , which is an
# identifier that tells eDNA which data pull is occurring
mode = mode . lower ( ) . strip ( )
if not high_speed :
if mode == "avg" :
nRet = dna_dll . DnaGetHistAvgUTC ( szPoint , tStart , tEnd , tPeriod , byref ( pulKey ) )
if mode == "interp" :
nRet = dna_dll . DnaGetHistInterpUTC ( szPoint , tStart , tEnd , tPeriod , byref ( pulKey ) )
if mode == "min" :
nRet = dna_dll . DnaGetHistMinUTC ( szPoint , tStart , tEnd , tPeriod , byref ( pulKey ) )
if mode == "max" :
nRet = dna_dll . DnaGetHistMaxUTC ( szPoint , tStart , tEnd , tPeriod , byref ( pulKey ) )
if mode == "snap" :
nRet = dna_dll . DnaGetHistSnapUTC ( szPoint , tStart , tEnd , tPeriod , byref ( pulKey ) )
else :
nRet = dna_dll . DnaGetHistRawUTC ( szPoint , tStart , tEnd , byref ( pulKey ) )
time_ , val , stat = _GetNextHistSmallUTC ( pulKey , nRet )
else :
nStartMillis = c_ushort ( 0 )
nEndMillis = c_ushort ( 0 )
nRet = dna_dll . DnaGetHSHistRawUTC ( szPoint , tStart , nStartMillis , tEnd , nEndMillis , byref ( pulKey ) )
time_ , val , stat = _GetNextHSHistUTC ( pulKey , nRet )
# The history request must be cancelled to free up network resources
dna_dll . DnaCancelHistRequest ( pulKey )
# To construct the pandas DataFrame , the tag name will be used as the
# column name , and the index ( which is in the strange eDNA format ) must be
# converted to an actual DateTime
d = { tag_name + ' Status' : stat , tag_name : val }
df = pd . DataFrame ( data = d , index = time_ )
if not utc :
if not high_speed :
df . index = pd . to_datetime ( df . index , unit = "s" )
else :
df . index = pd . to_datetime ( df . index , unit = "ms" )
if df . empty :
warnings . warn ( 'WARNING- No data retrieved for ' + tag_name + '. ' + 'Check eDNA connection, ensure that the start time is ' + 'not later than the end time, verify that the ' + 'DateTime formatting matches eDNA requirements, and ' + 'check that data exists in the query time period.' )
# Check if the user would rather use the description as the column name
if desc_as_label or label :
if label :
new_label = label
else :
new_label = _GetLabel ( tag_name )
df . rename ( inplace = True , columns = { tag_name : new_label , tag_name + " Status" : new_label + " Status" } )
return df
|
def p_BIT ( p ) :
"""asm : bitop expr COMMA A
| bitop pexpr COMMA A
| bitop expr COMMA reg8
| bitop pexpr COMMA reg8
| bitop expr COMMA reg8 _ hl
| bitop pexpr COMMA reg8 _ hl"""
|
bit = p [ 2 ] . eval ( )
if bit < 0 or bit > 7 :
error ( p . lineno ( 3 ) , 'Invalid bit position %i. Must be in [0..7]' % bit )
p [ 0 ] = None
return
p [ 0 ] = Asm ( p . lineno ( 3 ) , '%s %i,%s' % ( p [ 1 ] , bit , p [ 4 ] ) )
|
def add_variable ( self , node ) :
"""Add a variable node to this node .
: sig : ( VariableNode ) - > None
: param node : Variable node to add ."""
|
if node . name not in self . variable_names :
self . variables . append ( node )
self . variable_names . add ( node . name )
node . parent = self
|
def count ( self , signature = None ) :
"""Counts how many crash dumps have been stored in this database .
Optionally filters the count by heuristic signature .
@ type signature : object
@ param signature : ( Optional ) Count only the crashes that match
this signature . See L { Crash . signature } for more details .
@ rtype : int
@ return : Count of crash dumps stored in this database ."""
|
query = self . _session . query ( CrashDTO . id )
if signature :
sig_pickled = pickle . dumps ( signature , protocol = 0 )
query = query . filter_by ( signature = sig_pickled )
return query . count ( )
|
def open_config ( self , type = "shared" ) :
"""Opens the configuration of the currently connected device
Args :
: type : The type of configuration you want to open . Any string can be provided , however the standard supported options are : * * exclusive * * , * * private * * , and * * shared * * . The default mode is * * shared * * .
Examples :
. . code - block : : python
# Open shared config
from pyJunosManager import JunosDevice
dev = JunosDevice ( host = " 1.2.3.4 " , username = " root " , password = " Juniper " )
dev . open ( )
dev . open _ config ( )
dev . close _ config ( )
dev . close ( )
# Open private config
from pyJunosManager import JunosDevice
dev = JunosDevice ( host = " 1.2.3.4 " , username = " root " , password = " Juniper " )
dev . open ( )
dev . open _ config ( " private " )
dev . close _ config ( )
dev . close ( )"""
|
try : # attempt to open a configuration
output = self . dev . rpc ( "<open-configuration><{0}/></open-configuration>" . format ( type ) )
except Exception as err : # output an error if the configuration is not availble
print err
|
def clipping_params ( ts , capacity = 100 , rate_limit = float ( 'inf' ) , method = None , max_attempts = 100 ) :
"""Start , end , and threshold that clips the value of a time series the most , given a limitted " capacity " and " rate "
Assumes that signal can be linearly interpolated between points ( trapezoidal integration )
Arguments :
ts ( TimeSeries ) : Time series to attempt to clip to as low a max value as possible
capacity ( float ) : Total " funds " or " energy " available for clipping ( integrated area under time series )
method ( str ) : scipy optimization algorithm name , one of :
' L - BFGS - B ' : Byrd , 1995 , " A Limited Memory Algorithm for Bound Constrained Optimization "
' TNC ' : Truncated Newton in C , or Newton Conjugate - Gradient , each variable may be constrained with upper and lower bounds
' COBYLA ' : Constrained Optimization by Linear Approximation . Fortran implementation .
' SLSQP ' : Kraft , 1988 , Sequential Least Squares Programming or Quadratic Programming , infinite bounds converted to large floats
TODO :
Bisection search for the optimal threshold .
Returns :
2 - tuple : Timestamp of the start and end of the period of the maximum clipped integrated increase
> > > t = [ ' 2014-12-09T00:00 ' , ' 2014-12-09T00:15 ' , ' 2014-12-09T00:30 ' , ' 2014-12-09T00:45 ' , ' 2014-12-09T01:00 ' , ' 2014-12-09T01:15 ' , ' 2014-12-09T01:30 ' , ' 2014-12-09T01:45 ' ]
> > > import pandas as pd
> > > ts = pd . Series ( [ 217 , 234 , 235 , 231 , 219 , 219 , 231 , 232 ] , index = pd . to _ datetime ( t ) ) # doctest : + ELLIPSIS , + NORMALIZE _ WHITESPACE
> > > clipping _ params ( ts , capacity = 60000 ) [ ' threshold ' ] # doctest : + ELLIPSIS , + NORMALIZE _ WHITESPACE
218.13 . . .
> > > clipping _ params ( ts , capacity = 30000 ) [ ' threshold ' ] # doctest : + ELLIPSIS , + NORMALIZE _ WHITESPACE
224.15358 . . ."""
|
VALID_METHODS = [ 'L-BFGS-B' , 'TNC' , 'SLSQP' , 'COBYLA' ]
# print ( ' in clipping params for ts . index = { 0 } and method = { 1 } ' . format ( ts . index [ 0 ] , method ) )
ts . index = ts . index . astype ( np . int64 )
costs = [ ]
def cost_fun ( x , * args ) :
thresh = x [ 0 ]
ts , capacity , bounds = args
integral = clipped_area ( ts , thresh = thresh )
terms = np . array ( [ ( 10. * ( integral - capacity ) / capacity ) ** 2 , 2. / 0.1 ** ( ( bounds [ 0 ] - thresh ) * capacity / bounds [ 0 ] ) , 2. / 0.1 ** ( ( thresh - bounds [ 1 ] ) * capacity / bounds [ 1 ] ) , 1.2 ** ( integral / capacity ) ] )
return sum ( terms )
bounds = ( ts . min ( ) , ts . max ( ) )
done , attempts = 0 , 0
thresh0 = bounds [ 0 ] + 0.5 * ( bounds [ 1 ] - bounds [ 0 ] )
if not method or not method in VALID_METHODS :
while attempts < max_attempts and not done :
for optimizer_method in VALID_METHODS :
optimum = minimize ( fun = cost_fun , x0 = [ thresh0 ] , bounds = [ bounds ] , args = ( ts , capacity , bounds ) , method = optimizer_method )
if optimum . success :
done = True
break
if done :
break
attempts += 1
thresh0 = bounds [ 0 ] + random . random ( ) * ( bounds [ 1 ] - bounds [ 0 ] )
else :
optimum = minimize ( fun = cost_fun , x0 = [ thresh0 ] , bounds = [ bounds ] , args = ( ts , capacity , bounds ) , method = method )
thresh = optimum . x [ 0 ]
integral = clipped_area ( ts , thresh = thresh )
params = dict ( optimum )
params . update ( { 'costs' : costs , 'threshold' : thresh , 'initial_guess' : thresh0 , 'attempts' : attempts , 'integral' : integral , 'method' : method } )
return params
|
def _check_and_expand_exponential ( expr , variables , data ) :
"""Check if the current operation specifies exponential expansion . ^ ^ 6
specifies all powers up to the 6th , ^ 5-6 the 5th and 6th powers , ^ 6 the
6th only ."""
|
if re . search ( r'\^\^[0-9]+$' , expr ) :
order = re . compile ( r'\^\^([0-9]+)$' ) . findall ( expr )
order = range ( 1 , int ( * order ) + 1 )
variables , data = exponential_terms ( order , variables , data )
elif re . search ( r'\^[0-9]+[\-]?[0-9]*$' , expr ) :
order = re . compile ( r'\^([0-9]+[\-]?[0-9]*)' ) . findall ( expr )
order = _order_as_range ( * order )
variables , data = exponential_terms ( order , variables , data )
return variables , data
|
def build_dirs ( files ) :
'''Build necessary directories based on a list of file paths'''
|
for i in files :
if type ( i ) is list :
build_dirs ( i )
continue
else :
if len ( i [ 'path' ] ) > 1 :
addpath = os . path . join ( os . getcwd ( ) , * i [ 'path' ] [ : - 1 ] )
subdirs = all_subdirs ( os . getcwd ( ) )
if addpath and addpath not in subdirs :
os . makedirs ( addpath )
print 'just made path' , addpath
|
def match_alphabet ( self , pattern ) :
"""Initialise the alphabet for the Bitap algorithm .
Args :
pattern : The text to encode .
Returns :
Hash of character locations ."""
|
s = { }
for char in pattern :
s [ char ] = 0
for i in range ( len ( pattern ) ) :
s [ pattern [ i ] ] |= 1 << ( len ( pattern ) - i - 1 )
return s
|
def evaluate_script ( self ) :
"""Evaluates current * * Script _ Editor _ tabWidget * * Widget tab Model editor content
into the interactive console .
: return : Method success .
: rtype : bool"""
|
editor = self . get_current_editor ( )
if not editor :
return False
LOGGER . debug ( "> Evaluating 'Script Editor' content." )
if self . evaluate_code ( foundations . strings . to_string ( editor . toPlainText ( ) . toUtf8 ( ) ) ) :
self . ui_refresh . emit ( )
return True
|
def get_submissions ( self , fullnames , * args , ** kwargs ) :
"""Generate Submission objects for each item provided in ` fullnames ` .
A submission fullname looks like ` t3 _ < base36 _ id > ` . Submissions are
yielded in the same order they appear in ` fullnames ` .
Up to 100 items are batched at a time - - this happens transparently .
The additional parameters are passed directly into
: meth : ` . get _ content ` . Note : the ` url ` and ` limit ` parameters cannot be
altered ."""
|
fullnames = fullnames [ : ]
while fullnames :
cur = fullnames [ : 100 ]
fullnames [ : 100 ] = [ ]
url = self . config [ 'by_id' ] + ',' . join ( cur )
for item in self . get_content ( url , limit = len ( cur ) , * args , ** kwargs ) :
yield item
|
def get_layout ( ) :
"""Specify a hierarchy of our templates ."""
|
tica_msm = TemplateDir ( 'tica' , [ 'tica/tica.py' , 'tica/tica-plot.py' , 'tica/tica-sample-coordinate.py' , 'tica/tica-sample-coordinate-plot.py' , ] , [ TemplateDir ( 'cluster' , [ 'cluster/cluster.py' , 'cluster/cluster-plot.py' , 'cluster/sample-clusters.py' , 'cluster/sample-clusters-plot.py' , ] , [ TemplateDir ( 'msm' , [ 'msm/timescales.py' , 'msm/timescales-plot.py' , 'msm/microstate.py' , 'msm/microstate-plot.py' , 'msm/microstate-traj.py' , ] , [ ] , ) ] ) ] )
layout = TemplateDir ( '' , [ '0-test-install.py' , '1-get-example-data.py' , 'README.md' , ] , [ TemplateDir ( 'analysis' , [ 'analysis/gather-metadata.py' , 'analysis/gather-metadata-plot.py' , ] , [ TemplateDir ( 'rmsd' , [ 'rmsd/rmsd.py' , 'rmsd/rmsd-plot.py' , ] , [ ] , ) , TemplateDir ( 'landmarks' , [ 'landmarks/find-landmarks.py' , 'landmarks/featurize.py' , 'landmarks/featurize-plot.py' , ] , [ tica_msm ] , ) , TemplateDir ( 'dihedrals' , [ 'dihedrals/featurize.py' , 'dihedrals/featurize-plot.py' , ] , [ tica_msm ] , ) ] ) ] )
return layout
|
def getTotalCpuTimeAndMemoryUsage ( ) :
"""Gives the total cpu time and memory usage of itself and its children ."""
|
me = resource . getrusage ( resource . RUSAGE_SELF )
childs = resource . getrusage ( resource . RUSAGE_CHILDREN )
totalCpuTime = me . ru_utime + me . ru_stime + childs . ru_utime + childs . ru_stime
totalMemoryUsage = me . ru_maxrss + me . ru_maxrss
return totalCpuTime , totalMemoryUsage
|
def router_fabric_virtual_gateway_address_family_ipv4_accept_unicast_arp_request ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
router = ET . SubElement ( config , "router" , xmlns = "urn:brocade.com:mgmt:brocade-common-def" )
fabric_virtual_gateway = ET . SubElement ( router , "fabric-virtual-gateway" , xmlns = "urn:brocade.com:mgmt:brocade-anycast-gateway" )
address_family = ET . SubElement ( fabric_virtual_gateway , "address-family" )
ipv4 = ET . SubElement ( address_family , "ipv4" )
accept_unicast_arp_request = ET . SubElement ( ipv4 , "accept-unicast-arp-request" )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _find_x86_candidates ( self , start_address , end_address ) :
"""Finds possible ' RET - ended ' gadgets ."""
|
roots = [ ]
# find gadgets tail
for addr in xrange ( start_address , end_address + 1 ) : # TODO : Make this ' speed improvement ' architecture - agnostic
op_codes = [ 0xc3 , # RET
0xc2 , # RET imm16
0xeb , # JMP rel8
0xe8 , # CALL rel { 16,32}
0xe9 , # JMP rel { 16,32}
0xff , # JMP / CALL r / m { 16,32,64}
]
if self . _mem [ addr ] not in op_codes :
continue
try :
asm_instr = self . _disasm . disassemble ( self . _mem [ addr : min ( addr + 16 , end_address + 1 ) ] , addr )
except :
asm_instr = None
if not asm_instr :
continue
# restarts ir register numbering
self . _ir_trans . reset ( )
try :
ins_ir = self . _ir_trans . translate ( asm_instr )
except :
continue
# build gadgets
if ins_ir [ - 1 ] and ( ins_ir [ - 1 ] . mnemonic == ReilMnemonic . JCC and isinstance ( ins_ir [ - 1 ] . operands [ 2 ] , ReilRegisterOperand ) ) : # add for REX . W + FF / 3 call instruction
if ins_ir [ - 1 ] . mnemonic == ReilMnemonic . JCC : # try addr - 1
try :
asm_instr_1 = self . _disasm . disassemble ( self . _mem [ addr - 1 : min ( addr + 15 , end_address + 1 ) ] , addr )
self . _ir_trans . reset ( )
ins_ir_1 = self . _ir_trans . translate ( asm_instr_1 )
if ins_ir_1 [ - 1 ] . mnemonic == ReilMnemonic . JCC :
addr = addr - 1
asm_instr = asm_instr_1
ins_ir = ins_ir_1
except :
pass
asm_instr . ir_instrs = ins_ir
root = GadgetTreeNode ( asm_instr )
roots . append ( root )
self . _build_from ( addr , root , start_address , self . _instrs_depth )
# filter roots with no children
roots = [ r for r in roots if len ( r . get_children ( ) ) > 0 ]
# build gadgets
root_gadgets = [ self . _build_gadgets ( r ) for r in roots ]
# flatten root gadgets list
candidates = [ item for l in root_gadgets for item in l ]
return candidates
|
def new_transaction ( vm : VM , from_ : Address , to : Address , amount : int = 0 , private_key : PrivateKey = None , gas_price : int = 10 , gas : int = 100000 , data : bytes = b'' ) -> BaseTransaction :
"""Create and return a transaction sending amount from < from _ > to < to > .
The transaction will be signed with the given private key ."""
|
nonce = vm . state . get_nonce ( from_ )
tx = vm . create_unsigned_transaction ( nonce = nonce , gas_price = gas_price , gas = gas , to = to , value = amount , data = data , )
return tx . as_signed_transaction ( private_key )
|
def finalize ( self ) :
"""Get the base64 - encoded signature itself .
Can only be called once ."""
|
signature = self . signer . finalize ( )
sig_r , sig_s = decode_dss_signature ( signature )
sig_b64 = encode_signature ( sig_r , sig_s )
return sig_b64
|
def template_from_filename ( filename ) :
"""Returns the appropriate template name based on the given file name ."""
|
ext = filename . split ( os . path . extsep ) [ - 1 ]
if not ext in TEMPLATES_MAP :
raise ValueError ( "No template for file extension {}" . format ( ext ) )
return TEMPLATES_MAP [ ext ]
|
def login ( config , api_key = "" ) :
"""Store your Bugzilla API Key"""
|
if not api_key :
info_out ( "If you don't have an API Key, go to:\n" "https://bugzilla.mozilla.org/userprefs.cgi?tab=apikey\n" )
api_key = getpass . getpass ( "API Key: " )
# Before we store it , let ' s test it .
url = urllib . parse . urljoin ( config . bugzilla_url , "/rest/whoami" )
assert url . startswith ( "https://" ) , url
response = requests . get ( url , params = { "api_key" : api_key } )
if response . status_code == 200 :
if response . json ( ) . get ( "error" ) :
error_out ( "Failed - {}" . format ( response . json ( ) ) )
else :
update ( config . configfile , { "BUGZILLA" : { "bugzilla_url" : config . bugzilla_url , "api_key" : api_key , # " login " : login ,
} } , )
success_out ( "Yay! It worked!" )
else :
error_out ( "Failed - {} ({})" . format ( response . status_code , response . json ( ) ) )
|
def read_mac ( self ) :
"""Read MAC from EFUSE region"""
|
words = [ self . read_efuse ( 2 ) , self . read_efuse ( 1 ) ]
bitstring = struct . pack ( ">II" , * words )
bitstring = bitstring [ 2 : 8 ]
# trim the 2 byte CRC
try :
return tuple ( ord ( b ) for b in bitstring )
except TypeError : # Python 3 , bitstring elements are already bytes
return tuple ( bitstring )
|
def _init ( self ) :
"""Finalize the initialization of the RlzsAssoc object by setting
the ( reduced ) weights of the realizations ."""
|
if self . num_samples :
assert len ( self . realizations ) == self . num_samples , ( len ( self . realizations ) , self . num_samples )
for rlz in self . realizations :
for k in rlz . weight . dic :
rlz . weight . dic [ k ] = 1. / self . num_samples
else :
tot_weight = sum ( rlz . weight for rlz in self . realizations )
if not tot_weight . is_one ( ) : # this may happen for rounding errors or because of the
# logic tree reduction ; we ensure the sum of the weights is 1
for rlz in self . realizations :
rlz . weight = rlz . weight / tot_weight
|
def cli ( env , identifier , price = False , guests = False ) :
"""Get details for a virtual server ."""
|
dhost = SoftLayer . DedicatedHostManager ( env . client )
table = formatting . KeyValueTable ( [ 'name' , 'value' ] )
table . align [ 'name' ] = 'r'
table . align [ 'value' ] = 'l'
result = dhost . get_host ( identifier )
result = utils . NestedDict ( result )
table . add_row ( [ 'id' , result [ 'id' ] ] )
table . add_row ( [ 'name' , result [ 'name' ] ] )
table . add_row ( [ 'cpu count' , result [ 'cpuCount' ] ] )
table . add_row ( [ 'memory capacity' , result [ 'memoryCapacity' ] ] )
table . add_row ( [ 'disk capacity' , result [ 'diskCapacity' ] ] )
table . add_row ( [ 'create date' , result [ 'createDate' ] ] )
table . add_row ( [ 'modify date' , result [ 'modifyDate' ] ] )
table . add_row ( [ 'router id' , result [ 'backendRouter' ] [ 'id' ] ] )
table . add_row ( [ 'router hostname' , result [ 'backendRouter' ] [ 'hostname' ] ] )
table . add_row ( [ 'owner' , formatting . FormattedItem ( utils . lookup ( result , 'billingItem' , 'orderItem' , 'order' , 'userRecord' , 'username' ) or formatting . blank ( ) , ) ] )
if price :
total_price = utils . lookup ( result , 'billingItem' , 'nextInvoiceTotalRecurringAmount' ) or 0
total_price += sum ( p [ 'nextInvoiceTotalRecurringAmount' ] for p in utils . lookup ( result , 'billingItem' , 'children' ) or [ ] )
table . add_row ( [ 'price_rate' , total_price ] )
table . add_row ( [ 'guest count' , result [ 'guestCount' ] ] )
if guests :
guest_table = formatting . Table ( [ 'id' , 'hostname' , 'domain' , 'uuid' ] )
for guest in result [ 'guests' ] :
guest_table . add_row ( [ guest [ 'id' ] , guest [ 'hostname' ] , guest [ 'domain' ] , guest [ 'uuid' ] ] )
table . add_row ( [ 'guests' , guest_table ] )
table . add_row ( [ 'datacenter' , result [ 'datacenter' ] [ 'name' ] ] )
env . fout ( table )
|
def _init_lsr ( n_items , alpha , initial_params ) :
"""Initialize the LSR Markov chain and the weights ."""
|
if initial_params is None :
weights = np . ones ( n_items )
else :
weights = exp_transform ( initial_params )
chain = alpha * np . ones ( ( n_items , n_items ) , dtype = float )
return weights , chain
|
def _build_jss_object_list ( self , response , obj_class ) :
"""Build a JSSListData object from response ."""
|
response_objects = [ item for item in response if item is not None and item . tag != "size" ]
objects = [ JSSListData ( obj_class , { i . tag : i . text for i in response_object } , self ) for response_object in response_objects ]
return JSSObjectList ( self , obj_class , objects )
|
def download_cutout ( self , reading , focus = None , needs_apcor = False ) :
"""Downloads a cutout of the FITS image for a given source reading .
Args :
reading : ossos . astrom . SourceReading
The reading which will be the focus of the downloaded image .
focus : tuple ( int , int )
The x , y coordinates that should be the focus of the downloaded
image . These coordinates should be in terms of the
source _ reading parameter ' s coordinate system .
Default value is None , in which case the source reading ' s x , y
position is used as the focus .
needs _ apcor : bool
If True , the apcor file with data needed for photometry
calculations is downloaded in addition to the image .
Defaults to False .
Returns :
cutout : ossos . downloads . data . SourceCutout"""
|
logger . debug ( "Doing download_cutout with inputs: reading:{} focus:{} needs_apcor:{}" . format ( reading , focus , needs_apcor ) )
assert isinstance ( reading , SourceReading )
min_radius = config . read ( 'CUTOUTS.SINGLETS.RADIUS' )
if not isinstance ( min_radius , Quantity ) :
min_radius = min_radius * units . arcsec
radius = max ( reading . uncertainty_ellipse . a , reading . uncertainty_ellipse . b ) * 2.5 + min_radius
logger . debug ( "got radius for cutout: {}" . format ( radius ) )
image_uri = reading . get_image_uri ( )
logger . debug ( "Getting cutout at {} for {}" . format ( reading . reference_sky_coord , image_uri ) )
hdulist = storage . _cutout_expnum ( reading . obs , reading . reference_sky_coord , radius )
# hdulist = storage . ra _ dec _ cutout ( image _ uri , reading . reference _ sky _ coord , radius )
logger . debug ( "Getting the aperture correction." )
source = SourceCutout ( reading , hdulist , radius = radius )
# Accessing the attribute here to trigger the download .
try :
apcor = source . apcor
zmag = source . zmag
source . reading . get_observation_header ( )
except Exception as ex :
if needs_apcor :
import sys , traceback
sys . stderr . write ( "Failed to retrieve apcor but apcor required. Raising error, see logs for more details" )
sys . stderr . write ( traceback . print_exc ( ) )
pass
logger . debug ( "Sending back the source reading." )
return source
|
def _get_index_name_by_column ( table , column_name ) :
"""Find the index name for a given table and column ."""
|
protected_name = metadata . protect_name ( column_name )
possible_index_values = [ protected_name , "values(%s)" % protected_name ]
for index_metadata in table . indexes . values ( ) :
options = dict ( index_metadata . index_options )
if options . get ( 'target' ) in possible_index_values :
return index_metadata . name
|
def build_items ( self ) :
"""main loop"""
|
conn = self . connect ( address = self . server_address , port = self . server_port )
if conn :
while not self . queue . empty ( ) :
item = self . queue . get ( )
self . pool . append ( item )
if type ( item . data ) is ( tuple or list ) :
self . body [ 'data' ] . extend ( item . data )
else :
self . body [ 'data' ] . append ( item . data )
self . logger . debug ( self . body [ 'data' ] )
try :
log_message = ( 'Queue length is {0}' . format ( len ( self . body [ 'data' ] ) ) )
self . logger . debug ( log_message )
if len ( self . body [ 'data' ] ) != 0 :
self . send ( conn )
self . logger . debug ( self . get_result ( ) )
except :
self . _reverse_queue ( )
log_message = ( 'An error occurred.' 'Maybe socket error, or get invalid value.' )
self . logger . debug ( log_message )
else :
del self . body [ 'data' ] [ : ]
self . build_statistics_item ( )
|
def _uniquewords ( * args ) :
"""Dictionary of words to their indices . Helper function to ` encode . `"""
|
words = { }
n = 0
for word in itertools . chain ( * args ) :
if word not in words :
words [ word ] = n
n += 1
return words
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.