signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def announced_networks ( self ) :
"""Show all announced networks for the BGP configuration .
Returns tuple of advertised network , routemap . Route
map may be None .
for advertised in engine . bgp . advertisements :
net , route _ map = advertised
: return : list of tuples ( advertised _ network , route _ map ) .""" | return [ ( Element . from_href ( ne . get ( 'announced_ne_ref' ) ) , Element . from_href ( ne . get ( 'announced_rm_ref' ) ) ) for ne in self . data . get ( 'announced_ne_setting' ) ] |
def make_objs ( names , out_dir = '' ) :
"""Make object file names for cl . exe and link . exe .""" | objs = [ replace_ext ( name , '.obj' ) for name in names ]
if out_dir :
objs = [ os . path . join ( out_dir , obj ) for obj in objs ]
return objs |
async def release_forks ( self , philosopher ) :
'''The ` ` philosopher ` ` has just eaten and is ready to release both
forks .
This method releases them , one by one , by sending the ` ` put _ down ` `
action to the monitor .''' | forks = self . forks
self . forks = [ ]
self . started_waiting = 0
for fork in forks :
philosopher . logger . debug ( 'Putting down fork %s' , fork )
await philosopher . send ( 'monitor' , 'putdown_fork' , fork )
await sleep ( self . cfg . waiting_period ) |
def append ( self , page , content , ** options ) :
"""Appends * content * text to * page * .
Valid * options * are :
* * sum * : ( str ) change summary
* * minor * : ( bool ) whether this is a minor change""" | return self . _dokuwiki . send ( 'dokuwiki.appendPage' , page , content , options ) |
def patch_namespaced_persistent_volume_claim_status ( self , name , namespace , body , ** kwargs ) : # noqa : E501
"""patch _ namespaced _ persistent _ volume _ claim _ status # noqa : E501
partially update status of the specified PersistentVolumeClaim # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . patch _ namespaced _ persistent _ volume _ claim _ status ( name , namespace , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the PersistentVolumeClaim ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param UNKNOWN _ BASE _ TYPE body : ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: return : V1PersistentVolumeClaim
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . patch_namespaced_persistent_volume_claim_status_with_http_info ( name , namespace , body , ** kwargs )
# noqa : E501
else :
( data ) = self . patch_namespaced_persistent_volume_claim_status_with_http_info ( name , namespace , body , ** kwargs )
# noqa : E501
return data |
def evaluate ( self , dataset , metric = "auto" , missing_value_action = 'auto' , with_predictions = False , options = { } , ** kwargs ) :
"""Evaluate the model by making predictions of target values and comparing
these to actual values .
Parameters
dataset : SFrame
Dataset in the same format used for training . The columns names and
types of the dataset must be the same as that used in training .
metric : str , list [ str ]
Evaluation metric ( s ) to be computed .
missing _ value _ action : str , optional
Action to perform when missing values are encountered . This can be
one of :
- ' auto ' : Choose a model dependent missing value policy .
- ' impute ' : Proceed with evaluation by filling in the missing
values with the mean of the training data . Missing
values are also imputed if an entire column of data is
missing during evaluation .
- ' none ' : Treat missing value as is . Model must be able to handle missing value .
- ' error ' : Do not proceed with prediction and terminate with
an error message .
options : dict
additional options to be passed in to prediction
kwargs : dict
additional options to be passed into prediction""" | if missing_value_action == 'auto' :
missing_value_action = select_default_missing_value_policy ( self , 'evaluate' )
_raise_error_if_not_sframe ( dataset , "dataset" )
results = self . __proxy__ . evaluate ( dataset , missing_value_action , metric , with_predictions = with_predictions ) ;
return results |
def polynet ( num_classes = 1000 , pretrained = 'imagenet' ) :
"""PolyNet architecture from the paper
' PolyNet : A Pursuit of Structural Diversity in Very Deep Networks '
https : / / arxiv . org / abs / 1611.05725""" | if pretrained :
settings = pretrained_settings [ 'polynet' ] [ pretrained ]
assert num_classes == settings [ 'num_classes' ] , 'num_classes should be {}, but is {}' . format ( settings [ 'num_classes' ] , num_classes )
model = PolyNet ( num_classes = num_classes )
model . load_state_dict ( model_zoo . load_url ( settings [ 'url' ] ) )
model . input_space = settings [ 'input_space' ]
model . input_size = settings [ 'input_size' ]
model . input_range = settings [ 'input_range' ]
model . mean = settings [ 'mean' ]
model . std = settings [ 'std' ]
else :
model = PolyNet ( num_classes = num_classes )
return model |
def create_page_move ( self , page_move_parameters , project , wiki_identifier , comment = None ) :
"""CreatePageMove .
Creates a page move operation that updates the path and order of the page as provided in the parameters .
: param : class : ` < WikiPageMoveParameters > < azure . devops . v5_0 . wiki . models . WikiPageMoveParameters > ` page _ move _ parameters : Page more operation parameters .
: param str project : Project ID or project name
: param str wiki _ identifier : Wiki Id or name .
: param str comment : Comment that is to be associated with this page move .
: rtype : : class : ` < WikiPageMoveResponse > < azure . devops . v5_0 . wiki . models . WikiPageMoveResponse > `""" | route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
if wiki_identifier is not None :
route_values [ 'wikiIdentifier' ] = self . _serialize . url ( 'wiki_identifier' , wiki_identifier , 'str' )
query_parameters = { }
if comment is not None :
query_parameters [ 'comment' ] = self . _serialize . query ( 'comment' , comment , 'str' )
content = self . _serialize . body ( page_move_parameters , 'WikiPageMoveParameters' )
response = self . _send ( http_method = 'POST' , location_id = 'e37bbe71-cbae-49e5-9a4e-949143b9d910' , version = '5.0' , route_values = route_values , query_parameters = query_parameters , content = content )
response_object = models . WikiPageMoveResponse ( )
response_object . page_move = self . _deserialize ( 'WikiPageMove' , response )
response_object . eTag = response . headers . get ( 'ETag' )
return response_object |
def release ( self , force = False ) :
"""Release lock .
To release a lock , we must already own the lock .
Arguments :
force ( bool , optional ) : If true , ignore any existing lock owner .
Raises :
UnableToReleaseLockError : If the lock is claimed by another
process ( not raised if force option is used ) .""" | # There ' s no lock , so do nothing .
if not self . islocked :
return
if self . owned_by_self or force :
os . remove ( self . path )
else :
raise UnableToReleaseLockError ( self ) |
def do_organization_info ( ava ) :
"""Description of an organization in the configuration is
a dictionary of keys and values , where the values might be tuples : :
" organization " : {
" name " : ( " AB Exempel " , " se " ) ,
" display _ name " : ( " AB Exempel " , " se " ) ,
" url " : " http : / / www . example . org " """ | if ava is None :
return None
org = md . Organization ( )
for dkey , ( ckey , klass ) in ORG_ATTR_TRANSL . items ( ) :
if ckey not in ava :
continue
if isinstance ( ava [ ckey ] , six . string_types ) :
setattr ( org , dkey , [ _localized_name ( ava [ ckey ] , klass ) ] )
elif isinstance ( ava [ ckey ] , list ) :
setattr ( org , dkey , [ _localized_name ( n , klass ) for n in ava [ ckey ] ] )
else :
setattr ( org , dkey , [ _localized_name ( ava [ ckey ] , klass ) ] )
return org |
def split ( self ) :
"""Split the leaf node .""" | if self . split_mode == 'random' : # Split randomly between min and max of node ' s points on split dimension
split_dim_data = self . get_data_x ( ) [ self . idxs , self . split_dim ]
# data on split dim
split_min = min ( split_dim_data )
split_max = max ( split_dim_data )
split_value = split_min + np . random . rand ( ) * ( split_max - split_min )
elif self . split_mode == 'median' : # Split on median ( which fall on the middle of two points for even max _ points _ per _ region )
# of node ' s points on split dimension
split_dim_data = self . get_data_x ( ) [ self . idxs , self . split_dim ]
# data on split dim
split_value = np . median ( split_dim_data )
elif self . split_mode == 'middle' : # Split on the middle of the region : might cause empty leaf
split_dim_data = self . get_data_x ( ) [ self . idxs , self . split_dim ]
# data on split dim
split_value = ( self . bounds_x [ 0 , self . split_dim ] + self . bounds_x [ 1 , self . split_dim ] ) / 2
elif self . split_mode == 'best_interest_diff' : # See Baranes2012 : Active Learning of Inverse Models with Intrinsically Motivated Goal Exploration in Robots
# if strictly more than self . max _ points _ per _ region points : chooses between self . max _ points _ per _ region points random split values
# the one that maximizes card ( lower ) * card ( greater ) * progress difference between the two
# if equal or lower than self . max _ points _ per _ region points : chooses between splits at the middle of each pair of consecutive points ,
# the one that maximizes card ( lower ) * card ( greater ) * progress difference between the two
split_dim_data = self . get_data_x ( ) [ self . idxs , self . split_dim ]
# data on split dim
split_min = min ( split_dim_data )
split_max = max ( split_dim_data )
if len ( self . idxs ) > self . max_points_per_region :
m = self . max_points_per_region
# Constant that might be tuned : number of random split values to choose between
rand_splits = split_min + np . random . rand ( m ) * ( split_max - split_min )
splits_fitness = np . zeros ( m )
for i in range ( m ) :
lower_idx = list ( np . array ( self . idxs ) [ np . nonzero ( split_dim_data <= rand_splits [ i ] ) [ 0 ] ] )
greater_idx = list ( np . array ( self . idxs ) [ np . nonzero ( split_dim_data > rand_splits [ i ] ) [ 0 ] ] )
splits_fitness [ i ] = len ( lower_idx ) * len ( greater_idx ) * abs ( self . progress_idxs ( lower_idx ) - self . progress_idxs ( greater_idx ) )
split_value = rand_splits [ np . argmax ( splits_fitness ) ]
else :
m = self . max_points_per_region - 1
splits = ( np . sort ( split_dim_data ) [ 0 : - 1 ] + np . sort ( split_dim_data ) [ 1 : ] ) / 2
splits_fitness = np . zeros ( m )
for i in range ( m ) :
lower_idx = list ( np . array ( self . idxs ) [ np . nonzero ( split_dim_data <= splits [ i ] ) [ 0 ] ] )
greater_idx = list ( np . array ( self . idxs ) [ np . nonzero ( split_dim_data > splits [ i ] ) [ 0 ] ] )
splits_fitness [ i ] = len ( lower_idx ) * len ( greater_idx ) * abs ( self . progress_idxs ( lower_idx ) - self . progress_idxs ( greater_idx ) )
split_value = splits [ np . argmax ( splits_fitness ) ]
else :
raise NotImplementedError
lower_idx = list ( np . array ( self . idxs ) [ np . nonzero ( split_dim_data <= split_value ) [ 0 ] ] )
greater_idx = list ( np . array ( self . idxs ) [ np . nonzero ( split_dim_data > split_value ) [ 0 ] ] )
self . leafnode = False
self . split_value = split_value
split_dim = np . mod ( self . split_dim + 1 , np . shape ( self . get_data_x ( ) ) [ 1 ] )
l_bounds_x = np . array ( self . bounds_x )
l_bounds_x [ 1 , self . split_dim ] = split_value
g_bounds_x = np . array ( self . bounds_x )
g_bounds_x [ 0 , self . split_dim ] = split_value
self . lower = Tree ( self . get_data_x , l_bounds_x , self . get_data_c , self . max_points_per_region , self . max_depth - 1 , self . split_mode , self . progress_win_size , self . progress_measure , self . sampling_mode , idxs = lower_idx , split_dim = split_dim )
self . greater = Tree ( self . get_data_x , g_bounds_x , self . get_data_c , self . max_points_per_region , self . max_depth - 1 , self . split_mode , self . progress_win_size , self . progress_measure , self . sampling_mode , idxs = greater_idx , split_dim = split_dim ) |
def get_form_layout ( self , process_id , wit_ref_name ) :
"""GetFormLayout .
[ Preview API ] Gets the form layout .
: param str process _ id : The ID of the process .
: param str wit _ ref _ name : The reference name of the work item type .
: rtype : : class : ` < FormLayout > < azure . devops . v5_0 . work _ item _ tracking _ process . models . FormLayout > `""" | route_values = { }
if process_id is not None :
route_values [ 'processId' ] = self . _serialize . url ( 'process_id' , process_id , 'str' )
if wit_ref_name is not None :
route_values [ 'witRefName' ] = self . _serialize . url ( 'wit_ref_name' , wit_ref_name , 'str' )
response = self . _send ( http_method = 'GET' , location_id = 'fa8646eb-43cd-4b71-9564-40106fd63e40' , version = '5.0-preview.1' , route_values = route_values )
return self . _deserialize ( 'FormLayout' , response ) |
def function_dict ( self ) :
"""Equivalent to ` ` self . model _ dict ` ` , but with all variables replaced by
functions if applicable . Sorted by the evaluation order according to
` ` self . ordered _ symbols ` ` , not alphabetical like ` ` self . model _ dict ` ` !""" | func_dict = OrderedDict ( )
for var , func in self . vars_as_functions . items ( ) :
expr = self . model_dict [ var ] . xreplace ( self . vars_as_functions )
func_dict [ func ] = expr
return func_dict |
def put ( self , context ) :
"""Return a reference , making it eligable for recycling once its reference
count reaches zero .""" | LOG . debug ( '%r.put(%r)' , self , context )
self . _lock . acquire ( )
try :
if self . _refs_by_context . get ( context , 0 ) == 0 :
LOG . warning ( '%r.put(%r): refcount was 0. shutdown_all called?' , self , context )
return
self . _refs_by_context [ context ] -= 1
finally :
self . _lock . release ( ) |
def authenticate ( self , request ) :
"""Returns a ` User ` if a correct access token has been supplied
in the Authorization header . Otherwise returns ` None ` .""" | auth = get_authorization_header ( request ) . split ( )
if not auth or auth [ 0 ] . lower ( ) != b'bearer' :
return None
if len ( auth ) == 1 :
msg = 'Invalid authorization header. No credentials provided.'
raise exceptions . AuthenticationFailed ( msg )
elif len ( auth ) > 2 :
msg = 'Invalid authorization header. Access token should not contain spaces.'
raise exceptions . AuthenticationFailed ( msg )
# Authenticate the user
# The AdfsAuthCodeBackend authentication backend will notice the " access _ token " parameter
# and skip the request for an access token using the authorization code
user = authenticate ( access_token = auth [ 1 ] )
if user is None :
raise exceptions . AuthenticationFailed ( 'Invalid access token.' )
if not user . is_active :
raise exceptions . AuthenticationFailed ( 'User inactive or deleted.' )
return user , auth [ 1 ] |
def _split_after_delimiter ( self , item , indent_amt ) :
"""Split the line only after a delimiter .""" | self . _delete_whitespace ( )
if self . fits_on_current_line ( item . size ) :
return
last_space = None
for item in reversed ( self . _lines ) :
if ( last_space and ( not isinstance ( item , Atom ) or not item . is_colon ) ) :
break
else :
last_space = None
if isinstance ( item , self . _Space ) :
last_space = item
if isinstance ( item , ( self . _LineBreak , self . _Indent ) ) :
return
if not last_space :
return
self . add_line_break_at ( self . _lines . index ( last_space ) , indent_amt ) |
def debug ( func ) :
"""Print the function name and arguments for debugging .""" | @ wraps ( func )
def wrapper ( * args , ** kwargs ) :
print ( "{} args: {} kwargs: {}" . format ( func . __name__ , args , kwargs ) )
return func ( * args , ** kwargs )
return wrapper |
def formatted_ghost_file ( self ) :
"""Returns a properly formatted ghost file name .
: returns : formatted ghost _ file name ( string )""" | # replace specials characters in ' drive : \ filename ' in Linux and Dynamips in MS Windows or viceversa .
ghost_file = "{}-{}.ghost" . format ( os . path . basename ( self . _image ) , self . _ram )
ghost_file = ghost_file . replace ( '\\' , '-' ) . replace ( '/' , '-' ) . replace ( ':' , '-' )
return ghost_file |
def optional ( self ) :
"""Flag indicating an optional property .""" | value = self . _schema . get ( "optional" , False )
if value is not False and value is not True :
raise SchemaError ( "optional value {0!r} is not a boolean" . format ( value ) )
return value |
def positional ( max_pos_args ) :
"""A decorator to declare that only the first N arguments may be positional .
Note that for methods , n includes ' self ' .""" | __ndb_debug__ = 'SKIP'
def positional_decorator ( wrapped ) :
if not DEBUG :
return wrapped
__ndb_debug__ = 'SKIP'
@ wrapping ( wrapped )
def positional_wrapper ( * args , ** kwds ) :
__ndb_debug__ = 'SKIP'
if len ( args ) > max_pos_args :
plural_s = ''
if max_pos_args != 1 :
plural_s = 's'
raise TypeError ( '%s() takes at most %d positional argument%s (%d given)' % ( wrapped . __name__ , max_pos_args , plural_s , len ( args ) ) )
return wrapped ( * args , ** kwds )
return positional_wrapper
return positional_decorator |
def validate_commit_range ( repo_dir , old_commit , new_commit ) :
"""Check if commit range is valid . Flip it if needed .""" | # Are there any commits between the two commits that were provided ?
try :
commits = get_commits ( repo_dir , old_commit , new_commit )
except Exception :
commits = [ ]
if len ( commits ) == 0 : # The user might have gotten their commits out of order . Let ' s flip
# the order of the commits and try again .
try :
commits = get_commits ( repo_dir , new_commit , old_commit )
except Exception :
commits = [ ]
if len ( commits ) == 0 : # Okay , so there really are no commits between the two commits
# provided by the user . : )
msg = ( "The commit range {0}..{1} is invalid for {2}." "You may need to use the --update option to fetch the " "latest updates to the git repositories stored on your " "local computer." . format ( old_commit , new_commit , repo_dir ) )
raise exceptions . InvalidCommitRangeException ( msg )
else :
return 'flip'
return True |
def get_logs_multipart ( w3 , startBlock , stopBlock , address , topics , max_blocks ) :
"""Used to break up requests to ` ` eth _ getLogs ` `
The getLog request is partitioned into multiple calls of the max number of blocks
` ` max _ blocks ` ` .""" | _block_ranges = block_ranges ( startBlock , stopBlock , max_blocks )
for from_block , to_block in _block_ranges :
params = { 'fromBlock' : from_block , 'toBlock' : to_block , 'address' : address , 'topics' : topics }
yield w3 . eth . getLogs ( drop_items_with_none_value ( params ) ) |
async def async_delete_device ( self , device_id : int ) -> None :
"""Delete an enrolled device .
: param device _ id : unique identifier for the device to be deleted""" | # Lookup device using zone to obtain an accurate index , which is
# needed to perform the delete command
device = self . _devices [ device_id ]
response = await self . _protocol . async_execute ( GetDeviceCommand ( device . category , device . group_number , device . unit_number ) )
if isinstance ( response , DeviceInfoResponse ) :
response = await self . _protocol . async_execute ( DeleteDeviceCommand ( device . category , response . index ) )
if isinstance ( response , DeviceDeletedResponse ) :
self . _devices . _delete ( device )
# pylint : disable = protected - access
if self . _on_device_deleted :
try :
self . _on_device_deleted ( self , device )
# pylint : disable = protected - access
except Exception : # pylint : disable = broad - except
_LOGGER . error ( "Unhandled exception in on_device_deleted callback" , exc_info = True )
if isinstance ( response , DeviceNotFoundResponse ) :
raise ValueError ( "Device to be deleted was not found" ) |
async def _get_descriptions ( self ) :
"""Read a column descriptor packet for each column in the result .""" | self . fields = [ ]
self . converters = [ ]
use_unicode = self . connection . use_unicode
conn_encoding = self . connection . encoding
description = [ ]
for i in range ( self . field_count ) :
field = await self . connection . _read_packet ( FieldDescriptorPacket )
self . fields . append ( field )
description . append ( field . description ( ) )
field_type = field . type_code
if use_unicode :
if field_type == FIELD_TYPE . JSON : # When SELECT from JSON column : charset = binary
# When SELECT CAST ( . . . AS JSON ) : charset = connection
# encoding
# This behavior is different from TEXT / BLOB .
# We should decode result by connection encoding
# regardless charsetnr .
# See https : / / github . com / PyMySQL / PyMySQL / issues / 488
encoding = conn_encoding
# SELECT CAST ( . . . AS JSON )
elif field_type in TEXT_TYPES :
if field . charsetnr == 63 : # binary
# TEXTs with charset = binary means BINARY types .
encoding = None
else :
encoding = conn_encoding
else : # Integers , Dates and Times , and other basic data
# is encoded in ascii
encoding = 'ascii'
else :
encoding = None
converter = self . connection . decoders . get ( field_type )
if converter is through :
converter = None
self . converters . append ( ( encoding , converter ) )
eof_packet = await self . connection . _read_packet ( )
assert eof_packet . is_eof_packet ( ) , 'Protocol error, expecting EOF'
self . description = tuple ( description ) |
def access_token_handler ( self , ** args ) :
"""Get access token based on cookie sent with this request .
This handler deals with two cases :
1 ) Non - browser client ( indicated by no messageId set in request )
where the response is a simple JSON response .
2 ) Browser client ( indicate by messageId setin request ) where
the request must be made from a an iFrame and the response is
sent as JSON wrapped in HTML containing a postMessage ( ) script
that conveys the access token to the viewer .""" | message_id = request . args . get ( 'messageId' , default = None )
origin = request . args . get ( 'origin' , default = 'unknown_origin' )
self . logger . info ( "access_token_handler: origin = " + origin )
account = request . cookies . get ( self . account_cookie_name , default = '' )
token = self . access_token ( account )
# Build JSON response
data_str = json . dumps ( self . access_token_response ( token , message_id ) )
ct = "application/json"
# If message _ id is set the wrap in HTML with postMessage JavaScript
# for a browser client
if ( message_id is not None ) :
data_str = """<html>
<body style="margin: 0px;">
<div>postMessage ACCESS TOKEN %s</div>
<script>
window.parent.postMessage(%s, '%s');
</script>
</body>
</html>
""" % ( token , data_str , origin )
ct = "text/html"
# Send response along with cookie
response = make_response ( data_str , 200 , { 'Content-Type' : ct } )
if ( token ) :
self . logger . info ( "access_token_handler: setting access token = " + token )
# Set the cookie for the image content
cookie = self . access_cookie ( token )
self . logger . info ( "access_token_handler: setting access cookie = " + cookie )
response . set_cookie ( self . access_cookie_name , cookie )
else :
self . logger . info ( "access_token_handler: auth failed, sending error" )
response . headers [ 'Access-control-allow-origin' ] = '*'
return response |
def _get_unknown_error_response ( self , request , exc ) :
"""Generate HttpResponse for unknown exceptions .
todo : this should be more informative . .""" | logging . getLogger ( 'devil' ) . error ( 'while doing %s on %s with [%s], devil caught: %s' % ( request . method , request . path_info , str ( request . GET ) , str ( exc ) ) , exc_info = True )
if settings . DEBUG :
raise
else :
return HttpResponse ( status = codes . INTERNAL_SERVER_ERROR [ 1 ] ) |
def wkb ( self ) :
"""Get the geometry as an ( E ) WKB .""" | return self . _to_wkb ( use_srid = True , dimz = self . dimz , dimm = self . dimm ) |
def rpole_to_pot_aligned ( rpole , sma , q , F , d , component = 1 ) :
"""Transforms polar radius to surface potential""" | q = q_for_component ( q , component = component )
rpole_ = np . array ( [ 0 , 0 , rpole / sma ] )
logger . debug ( "libphoebe.roche_Omega(q={}, F={}, d={}, rpole={})" . format ( q , F , d , rpole_ ) )
pot = libphoebe . roche_Omega ( q , F , d , rpole_ )
return pot_for_component ( pot , component , reverse = True ) |
def count_tf ( tokens_stream ) :
"""Count term frequencies for a single file .""" | tf = defaultdict ( int )
for tokens in tokens_stream :
for token in tokens :
tf [ token ] += 1
return tf |
def do_visualize ( model , model_path , nr_visualize = 100 , output_dir = 'output' ) :
"""Visualize some intermediate results ( proposals , raw predictions ) inside the pipeline .""" | df = get_train_dataflow ( )
# we don ' t visualize mask stuff
df . reset_state ( )
pred = OfflinePredictor ( PredictConfig ( model = model , session_init = get_model_loader ( model_path ) , input_names = [ 'image' , 'gt_boxes' , 'gt_labels' ] , output_names = [ 'generate_{}_proposals/boxes' . format ( 'fpn' if cfg . MODE_FPN else 'rpn' ) , 'generate_{}_proposals/scores' . format ( 'fpn' if cfg . MODE_FPN else 'rpn' ) , 'fastrcnn_all_scores' , 'output/boxes' , 'output/scores' , 'output/labels' , ] ) )
if os . path . isdir ( output_dir ) :
shutil . rmtree ( output_dir )
utils . fs . mkdir_p ( output_dir )
with tqdm . tqdm ( total = nr_visualize ) as pbar :
for idx , dp in itertools . islice ( enumerate ( df ) , nr_visualize ) :
img , gt_boxes , gt_labels = dp [ 'image' ] , dp [ 'gt_boxes' ] , dp [ 'gt_labels' ]
rpn_boxes , rpn_scores , all_scores , final_boxes , final_scores , final_labels = pred ( img , gt_boxes , gt_labels )
# draw groundtruth boxes
gt_viz = draw_annotation ( img , gt_boxes , gt_labels )
# draw best proposals for each groundtruth , to show recall
proposal_viz , good_proposals_ind = draw_proposal_recall ( img , rpn_boxes , rpn_scores , gt_boxes )
# draw the scores for the above proposals
score_viz = draw_predictions ( img , rpn_boxes [ good_proposals_ind ] , all_scores [ good_proposals_ind ] )
results = [ DetectionResult ( * args ) for args in zip ( final_boxes , final_scores , final_labels , [ None ] * len ( final_labels ) ) ]
final_viz = draw_final_outputs ( img , results )
viz = tpviz . stack_patches ( [ gt_viz , proposal_viz , score_viz , final_viz ] , 2 , 2 )
if os . environ . get ( 'DISPLAY' , None ) :
tpviz . interactive_imshow ( viz )
cv2 . imwrite ( "{}/{:03d}.png" . format ( output_dir , idx ) , viz )
pbar . update ( ) |
def fill_from_allwise ( self , ident , catalog_ident = 'II/328/allwise' ) :
"""Fill in astrometric information from the AllWISE catalog using Astroquery .
This uses the : mod : ` astroquery ` module to query the AllWISE
(2013wise . rept . . . . 1C ) source catalog through the Vizier
(2000A & AS . . 143 . . . 23O ) web service . It then fills in the instance with
the relevant information . Arguments are :
ident
The AllWISE catalog identifier of the form ` ` " J112254.70 + 255021.9 " ` ` .
catalog _ ident
The Vizier designation of the catalog to query . The default is
" II / 328 / allwise " , the current version of the AllWISE catalog .
Raises : exc : ` ~ pwkit . PKError ` if something unexpected happens that
doesn ' t itself result in an exception within : mod : ` astroquery ` .
You should probably prefer : meth : ` fill _ from _ simbad ` for objects that
are known to the CDS Simbad service , but not all objects in the
AllWISE catalog are so known .
If you use this function , you should ` acknowledge AllWISE
< http : / / irsadist . ipac . caltech . edu / wise - allwise / > ` _ and ` Vizier
< http : / / cds . u - strasbg . fr / vizier - org / licences _ vizier . html > ` _ .
Returns * self * .""" | from astroquery . vizier import Vizier
import numpy . ma . core as ma_core
# We should match exactly one table and one row within that table , but
# for robustness we ignore additional results if they happen to
# appear . Strangely , querying for an invalid identifier yields a table
# with two rows that are filled with masked out data .
table_list = Vizier . query_constraints ( catalog = catalog_ident , AllWISE = ident )
if not len ( table_list ) :
raise PKError ( 'Vizier query returned no tables (catalog=%r AllWISE=%r)' , catalog_ident , ident )
table = table_list [ 0 ]
if not len ( table ) :
raise PKError ( 'Vizier query returned empty %s table (catalog=%r AllWISE=%r)' , table . meta [ 'name' ] , catalog_ident , ident )
row = table [ 0 ]
if isinstance ( row [ '_RAJ2000' ] , ma_core . MaskedConstant ) :
raise PKError ( 'Vizier query returned flagged row in %s table; your AllWISE ' 'identifier likely does not exist (it should be of the form ' '"J112254.70+255021.9"; catalog=%r AllWISE=%r)' , table . meta [ 'name' ] , catalog_ident , ident )
# OK , we can actually do this .
self . ra = row [ 'RA_pm' ] * D2R
self . dec = row [ 'DE_pm' ] * D2R
if row [ 'e_RA_pm' ] > row [ 'e_DE_pm' ] :
self . pos_u_maj = row [ 'e_RA_pm' ] * A2R
self . pos_u_min = row [ 'e_DE_pm' ] * A2R
self . pos_u_pa = halfpi
else :
self . pos_u_maj = row [ 'e_DE_pm' ] * A2R
self . pos_u_min = row [ 'e_RA_pm' ] * A2R
self . pos_u_pa = 0
self . pos_epoch = 55400.
# hardcoded in the catalog
self . promo_ra = row [ 'pmRA' ]
self . promo_dec = row [ 'pmDE' ]
if row [ 'e_pmRA' ] > row [ 'e_pmDE' ] :
self . promo_u_maj = row [ 'e_pmRA' ] * 1.
self . promo_u_min = row [ 'e_pmDE' ] * 1.
self . promo_u_pa = halfpi
else :
self . promo_u_maj = row [ 'e_pmDE' ] * 1.
self . promo_u_min = row [ 'e_pmRA' ] * 1.
self . promo_u_pa = 0.
return self |
def splitarg ( args ) :
'''This function will split arguments separated by spaces or commas
to be backwards compatible with the original ArcGet command line tool''' | if not args :
return args
split = list ( )
for arg in args :
if ',' in arg :
split . extend ( [ x for x in arg . split ( ',' ) if x ] )
elif arg :
split . append ( arg )
return split |
def remove_asset ( self , asset_id , composition_id ) :
"""Removes an ` ` Asset ` ` from a ` ` Composition ` ` .
arg : asset _ id ( osid . id . Id ) : ` ` Id ` ` of the ` ` Asset ` `
arg : composition _ id ( osid . id . Id ) : ` ` Id ` ` of the
` ` Composition ` `
raise : NotFound - ` ` asset _ id ` ` ` ` not found in composition _ id ` `
raise : NullArgument - ` ` asset _ id ` ` or ` ` composition _ id ` ` is
` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization fauilure
* compliance : mandatory - - This method must be implemented . *""" | if ( not isinstance ( composition_id , ABCId ) and composition_id . get_identifier_namespace ( ) != 'repository.Composition' ) :
raise errors . InvalidArgument ( 'the argument is not a valid OSID Id' )
composition_map , collection = self . _get_composition_collection ( composition_id )
try :
composition_map [ 'assetIds' ] . remove ( str ( asset_id ) )
except ( KeyError , ValueError ) :
raise errors . NotFound ( )
collection . save ( composition_map ) |
def update ( self ) :
"""Determines in how many segments the whole reach needs to be
divided to approximate the desired lag time via integer rounding .
Adjusts the shape of sequence | QJoints | additionally .
Required control parameters :
| Lag |
Calculated derived parameters :
| NmbSegments |
Prepared state sequence :
| QJoints |
Examples :
Define a lag time of 1.4 days and a simulation step size of 12
hours :
> > > from hydpy . models . hstream import *
> > > parameterstep ( ' 1d ' )
> > > simulationstep ( ' 12h ' )
> > > lag ( 1.4)
Then the actual lag value for the simulation step size is 2.8
> > > lag
lag ( 1.4)
> > > lag . value
2.8
Through rounding the number of segments is determined :
> > > derived . nmbsegments . update ( )
> > > derived . nmbsegments
nmbsegments ( 3)
The number of joints is always the number of segments plus one :
> > > states . qjoints . shape""" | pars = self . subpars . pars
self ( int ( round ( pars . control . lag ) ) )
pars . model . sequences . states . qjoints . shape = self + 1 |
def remove_server_data ( server_id ) :
"""Remove a server from the server data
Args :
server _ id ( int ) : The server to remove from the server data""" | logger . debug ( "Removing server from serverdata" )
# Remove the server from data
data = datatools . get_data ( )
if server_id in data [ "discord" ] [ "servers" ] :
data [ "discord" ] [ "servers" ] . pop ( server_id )
datatools . write_data ( data ) |
def tle ( self , url , reload = False , filename = None ) :
"""Load and parse a satellite TLE file .
Given a URL or a local path , this loads a file of three - line records in
the common Celestrak file format , or two - line records like those from
space - track . org . For a three - line element set , each first line gives
the name of a satellite and the following two lines are the TLE orbital
elements . A two - line element set comprises only these last two lines .
See the : meth : ` ~ skyfield . iokit . Loader . open ( ) ` documentation for
the meaning of the ` ` reload ` ` and ` ` filename ` ` parameters .
Returns a dictionary whose keys are satellite names and numbers ,
and whose values are : class : ` ~ skyfield . sgp4lib . EarthSatellite `
objects . If you want to build a list in which each satellite
appears only once , simply run ` ` sats = set ( d . values ( ) ) ` ` on the
returned dictionary .""" | d = { }
with self . open ( url , reload = reload , filename = filename ) as f :
for names , sat in parse_tle ( f ) :
d [ sat . model . satnum ] = sat
for name in names :
d [ name ] = sat
return d |
def seg ( reference_intervals , estimated_intervals ) :
"""Compute the MIREX ' MeanSeg ' score .
Examples
> > > ( ref _ intervals ,
. . . ref _ labels ) = mir _ eval . io . load _ labeled _ intervals ( ' ref . lab ' )
> > > ( est _ intervals ,
. . . est _ labels ) = mir _ eval . io . load _ labeled _ intervals ( ' est . lab ' )
> > > score = mir _ eval . chord . seg ( ref _ intervals , est _ intervals )
Parameters
reference _ intervals : np . ndarray , shape = ( n , 2 ) , dtype = float
Reference chord intervals to score against .
estimated _ intervals : np . ndarray , shape = ( m , 2 ) , dtype = float
Estimated chord intervals to score against .
Returns
segmentation score : float
Comparison score , in [ 0.0 , 1.0 ] , where 1.0 means perfect segmentation .""" | return min ( underseg ( reference_intervals , estimated_intervals ) , overseg ( reference_intervals , estimated_intervals ) ) |
def league_scores ( self , total_data , time , show_datetime , use_12_hour_format ) :
"""Prints the data in a pretty format""" | for match in total_data [ 'matches' ] :
self . scores ( self . parse_result ( match ) , add_new_line = not show_datetime )
if show_datetime :
click . secho ( ' %s' % Stdout . utc_to_local ( match [ "utcDate" ] , use_12_hour_format , show_datetime ) , fg = self . colors . TIME )
click . echo ( ) |
def main ( ) :
"""Main function for command line usage""" | usage = "usage: %(prog)s [options] "
description = "Merge a set of Fermi-LAT files."
parser = argparse . ArgumentParser ( usage = usage , description = description )
parser . add_argument ( '-o' , '--output' , default = None , type = str , help = 'Output file.' )
parser . add_argument ( '--clobber' , default = False , action = 'store_true' , help = 'Overwrite output file.' )
parser . add_argument ( 'files' , nargs = '+' , default = None , help = 'List of input files.' )
args = parser . parse_args ( )
proj , f , hdu = fits_utils . read_projection_from_fits ( args . files [ 0 ] )
if isinstance ( proj , WCS ) :
hdulist = merge_utils . merge_wcs_counts_cubes ( args . files )
elif isinstance ( proj , HPX ) :
hdulist = merge_utils . merge_hpx_counts_cubes ( args . files )
else :
raise TypeError ( "Could not read projection from file %s" % args . files [ 0 ] )
if args . output :
hdulist . writeto ( args . output , clobber = args . clobber , output_verify = 'silentfix' ) |
def add_catalogue ( self , catalogue , overlay = False ) :
''': param catalogue :
Earthquake catalogue as instance of
: class : ` openquake . hmtk . seismicity . catalogue . Catalogue `
: param dict config :
Configuration parameters of the algorithm , containing the
following information :
' min _ lat ' Minimum value of latitude ( in degrees , float )
' max _ lat ' Minimum value of longitude ( in degrees , float )
( min _ lat , min _ lon ) Defines the inferior corner of the map
' min _ lon ' Maximum value of latitude ( in degrees , float )
' max _ lon ' Maximum value of longitude ( in degrees , float )
( min _ lon , max _ lon ) Defines the upper corner of the map
: returns :
Figure with the spatial distribution of the events .''' | # Magnitudes bins and minimum marrker size
# min _ mag = np . min ( catalogue . data [ ' magnitude ' ] )
# max _ mag = np . max ( catalogue . data [ ' magnitude ' ] )
con_min = np . where ( np . array ( [ symb [ 0 ] for symb in DEFAULT_SYMBOLOGY ] ) < np . min ( catalogue . data [ 'magnitude' ] ) ) [ 0 ]
con_max = np . where ( np . array ( [ symb [ 1 ] for symb in DEFAULT_SYMBOLOGY ] ) > np . max ( catalogue . data [ 'magnitude' ] ) ) [ 0 ]
if len ( con_min ) == 1 :
min_loc = con_min [ 0 ]
else :
min_loc = con_min [ - 1 ]
if len ( con_max ) == 1 :
max_loc = con_max [ 0 ]
else :
max_loc = con_max [ 1 ]
# min _ loc = np . where ( np . array ( [ symb [ 0 ] for symb in DEFAULT _ SYMBOLOGY ] )
# < np . min ( catalogue . data [ ' magnitude ' ] ) ) [ 0 ] [ - 1]
# max _ loc = np . where ( np . array ( [ symb [ 1 ] for symb in DEFAULT _ SYMBOLOGY ] )
# > np . max ( catalogue . data [ ' magnitude ' ] ) ) [ 0 ] [ 1]
symbology = DEFAULT_SYMBOLOGY [ min_loc : max_loc ]
for sym in symbology : # Create legend string
if np . isinf ( sym [ 0 ] ) :
leg_str = 'M < %5.2f' % sym [ 1 ]
elif np . isinf ( sym [ 1 ] ) :
leg_str = 'M >= %5.2f' % sym [ 0 ]
else :
leg_str = '%5.2f <= M < %5.2f' % ( sym [ 0 ] , sym [ 1 ] )
idx = np . logical_and ( catalogue . data [ 'magnitude' ] >= sym [ 0 ] , catalogue . data [ 'magnitude' ] < sym [ 1 ] )
mag_size = 1.2 * np . min ( [ sym [ 0 ] + 0.5 , sym [ 1 ] - 0.5 ] )
x , y = self . m ( catalogue . data [ 'longitude' ] [ idx ] , catalogue . data [ 'latitude' ] [ idx ] )
self . m . plot ( x , y , sym [ 2 ] , markersize = mag_size , label = leg_str )
self . ax . legend ( bbox_to_anchor = LEGEND_OFFSET )
if self . title :
self . ax . set_title ( self . title , fontsize = 16 )
if not overlay :
plt . show ( ) |
def tablib_export_action ( modeladmin , request , queryset , file_type = "xls" ) :
"""Allow the user to download the current filtered list of items
: param file _ type :
One of the formats supported by tablib ( e . g . " xls " , " csv " , " html " ,
etc . )""" | dataset = SimpleDataset ( queryset , headers = None )
filename = '{0}.{1}' . format ( smart_str ( modeladmin . model . _meta . verbose_name_plural ) , file_type )
response_kwargs = { 'content_type' : get_content_type ( file_type ) }
response = HttpResponse ( getattr ( dataset , file_type ) , ** response_kwargs )
response [ 'Content-Disposition' ] = 'attachment; filename={0}' . format ( filename )
return response |
def view_pdf ( name = None ) :
"""Render a pdf file based on the given page .
. . note : : this is a bottle view
Keyword Arguments :
: name : ( str ) - - name of the rest file ( without the . rst extension )
MANDATORY""" | if name is None :
return view_meta_index ( )
files = glob . glob ( "{0}.rst" . format ( name ) )
if len ( files ) > 0 :
file_handle = open ( files [ 0 ] , 'r' )
dest_filename = name + '.pdf'
doctree = publish_doctree ( file_handle . read ( ) )
try :
produce_pdf ( doctree_content = doctree , filename = dest_filename )
except :
raise
else :
return static_file ( dest_filename , root = '' , download = True )
else :
return abort ( 404 ) |
def signalflow ( self , token , endpoint = None , timeout = None , compress = None ) :
"""Obtain a SignalFlow API client .""" | from . import signalflow
compress = compress if compress is not None else self . _compress
return signalflow . SignalFlowClient ( token = token , endpoint = endpoint or self . _stream_endpoint , timeout = timeout or self . _timeout , compress = compress ) |
def _overlap_slices ( self , shape ) :
"""Calculate the slices for the overlapping part of the bounding
box and an array of the given shape .
Parameters
shape : tuple of int
The ` ` ( ny , nx ) ` ` shape of array where the slices are to be
applied .
Returns
slices _ large : tuple of slices
A tuple of slice objects for each axis of the large array ,
such that ` ` large _ array [ slices _ large ] ` ` extracts the region
of the large array that overlaps with the small array .
slices _ small : slice
A tuple of slice objects for each axis of the small array ,
such that ` ` small _ array [ slices _ small ] ` ` extracts the region
of the small array that is inside the large array .""" | if len ( shape ) != 2 :
raise ValueError ( 'input shape must have 2 elements.' )
xmin = self . bbox . ixmin
xmax = self . bbox . ixmax
ymin = self . bbox . iymin
ymax = self . bbox . iymax
if xmin >= shape [ 1 ] or ymin >= shape [ 0 ] or xmax <= 0 or ymax <= 0 : # no overlap of the aperture with the data
return None , None
slices_large = ( slice ( max ( ymin , 0 ) , min ( ymax , shape [ 0 ] ) ) , slice ( max ( xmin , 0 ) , min ( xmax , shape [ 1 ] ) ) )
slices_small = ( slice ( max ( - ymin , 0 ) , min ( ymax - ymin , shape [ 0 ] - ymin ) ) , slice ( max ( - xmin , 0 ) , min ( xmax - xmin , shape [ 1 ] - xmin ) ) )
return slices_large , slices_small |
def get_dataset_url ( self , tournament = 1 ) :
"""Fetch url of the current dataset .
Args :
tournament ( int , optional ) : ID of the tournament , defaults to 1
Returns :
str : url of the current dataset
Example :
> > > NumerAPI ( ) . get _ dataset _ url ( )
https : / / numerai - datasets . s3 . amazonaws . com / t1/104 / numerai _ datasets . zip ? X - Amz - Algorithm = AWS4 - HMAC - SHA256 & X - Amz - Credential = AKIAIYNVLTPMU6QILOHA % 2F20180424%2Fus - west - 1%2Fs3%2Faws4 _ request & X - Amz - Date = 20180424T084911Z & X - Amz - Expires = 900 & X - Amz - SignedHeaders = host & X - Amz - Signature = 83863db44689c9907da6d3c8ac28160cd5e2d17aa90f12c7eee6811810e4b8d3""" | query = """
query($tournament: Int!) {
dataset(tournament: $tournament)
}"""
arguments = { 'tournament' : tournament }
url = self . raw_query ( query , arguments ) [ 'data' ] [ 'dataset' ]
return url |
def withdraw ( self , currency , amount , address , paymentId = None ) :
"""Immediately places a withdrawal for a given currency , with no email
confirmation . In order to use this method , the withdrawal privilege
must be enabled for your API key . Required POST parameters are
" currency " , " amount " , and " address " . For XMR withdrawals , you may
optionally specify " paymentId " .""" | return self . _private ( 'withdraw' , currency = currency , amount = amount , address = address , paymentId = paymentId ) |
def get_localontologies ( pattern = "" ) :
"returns a list of file names in the ontologies folder ( not the full path )" | res = [ ]
ONTOSPY_LOCAL_MODELS = get_home_location ( )
if not os . path . exists ( ONTOSPY_LOCAL_MODELS ) :
get_or_create_home_repo ( )
for f in os . listdir ( ONTOSPY_LOCAL_MODELS ) :
if os . path . isfile ( os . path . join ( ONTOSPY_LOCAL_MODELS , f ) ) :
if not f . startswith ( "." ) and not f . endswith ( ".pickle" ) :
if not pattern :
res += [ f ]
else :
if pattern in f :
res += [ f ]
return res |
def search_tag ( self , tag , symbols = True , feeds = False ) :
"""Get a list of Symbols by searching a tag or partial tag .
Parameters
tag : str
The tag to search . Appending ' % ' will use SQL ' s " LIKE "
functionality .
symbols : bool , optional
Search for Symbol ' s based on their tags .
feeds : bool , optional
Search for Symbol ' s based on their Feeds ' tags .
Returns
List of Symbols or empty list""" | syms = [ ]
if isinstance ( tag , ( str , unicode ) ) :
tags = [ tag ]
else :
tags = tag
if symbols :
crits = [ ]
for tag in tags :
if "%" in tag :
crit = SymbolTag . tag . like ( tag )
else :
crit = SymbolTag . tag == tag
crits . append ( crit )
qry = self . ses . query ( SymbolTag )
qry = qry . filter ( or_ ( * crits ) )
syms = qry . all ( )
syms = [ tagged . symbol for tagged in syms ]
if feeds :
crits = [ ]
for tag in tags :
if "%" in tag :
crit = FeedTag . tag . like ( tag )
else :
crit = FeedTag . tag == tag
crits . append ( crit )
qry = self . ses . query ( Symbol ) . select_from ( FeedTag )
qry = qry . join ( FeedTag . feed ) . join ( Feed . symbol )
qry = qry . filter ( or_ ( * crits ) )
fds = qry . distinct ( )
syms = syms + [ sym for sym in fds ]
return list ( set ( syms ) )
return syms |
def _onShortcutPrint ( self ) :
"""Ctrl + P handler .
Show dialog , print file""" | dialog = QPrintDialog ( self )
if dialog . exec_ ( ) == QDialog . Accepted :
printer = dialog . printer ( )
self . print_ ( printer ) |
def init_arg_names ( obj ) :
"""Names of arguments to _ _ init _ _ method of this object ' s class .""" | # doing something wildly hacky by pulling out the arguments to
# _ _ init _ _ or _ _ new _ _ and hoping that they match fields defined on the
# object
try :
init_code = obj . __init__ . __func__ . __code__
except AttributeError :
try :
init_code = obj . __new__ . __func__ . __code__
except AttributeError : # if object is a namedtuple then we can return its fields
# as the required initial args
if hasattr ( obj , "_fields" ) :
return obj . _fields
else :
raise ValueError ( "Cannot determine args to %s.__init__" % ( obj , ) )
arg_names = init_code . co_varnames [ : init_code . co_argcount ]
# drop self argument
nonself_arg_names = arg_names [ 1 : ]
return nonself_arg_names |
def fetch_plaintext_by_subject ( self , email_name ) :
"""Get the plain text of an email , searching by subject .
@ Params
email _ name - the subject to search for
@ Returns
Plaintext content of the matched email""" | if not email_name :
raise EmailException ( "Subject cannot be null" )
results = self . __imap_search ( SUBJECT = email_name )
sources = self . fetch_plaintext ( results )
return sources |
def pssm_array2pwm_array ( arr , background_probs = DEFAULT_BASE_BACKGROUND ) :
"""Convert pssm array to pwm array""" | b = background_probs2array ( background_probs )
b = b . reshape ( [ 1 , 4 , 1 ] )
return ( np . exp ( arr ) * b ) . astype ( arr . dtype ) |
def _load_edflib ( filename ) :
"""load a multi - channel Timeseries from an EDF ( European Data Format ) file
or EDF + file , using edflib .
Args :
filename : EDF + file
Returns :
Timeseries""" | import edflib
e = edflib . EdfReader ( filename , annotations_mode = 'all' )
if np . ptp ( e . get_samples_per_signal ( ) ) != 0 :
raise Error ( 'channels have differing numbers of samples' )
if np . ptp ( e . get_signal_freqs ( ) ) != 0 :
raise Error ( 'channels have differing sample rates' )
n = e . samples_in_file ( 0 )
m = e . signals_in_file
channelnames = e . get_signal_text_labels ( )
dt = 1.0 / e . samplefrequency ( 0 )
# EDF files hold < = 16 bits of information for each sample . Representing as
# double precision ( 64bit ) is unnecessary use of memory . use 32 bit float :
ar = np . zeros ( ( n , m ) , dtype = np . float32 )
# edflib requires input buffer of float64s
buf = np . zeros ( ( n , ) , dtype = np . float64 )
for i in range ( m ) :
e . read_phys_signal ( i , 0 , n , buf )
ar [ : , i ] = buf
tspan = np . arange ( 0 , ( n - 1 + 0.5 ) * dt , dt , dtype = np . float32 )
return Timeseries ( ar , tspan , labels = [ None , channelnames ] ) |
def trends_available ( self ) :
"""Returns a list of regions for which Twitter tracks trends .""" | url = 'https://api.twitter.com/1.1/trends/available.json'
try :
resp = self . get ( url )
except requests . exceptions . HTTPError as e :
raise e
return resp . json ( ) |
def post_resource ( collection ) :
"""Return the appropriate * Response * based on adding a new resource to
* collection * .
: param string collection : a : class : ` sandman . model . Model ` endpoint
: rtype : : class : ` flask . Response `""" | cls = endpoint_class ( collection )
resource = cls ( )
resource . from_dict ( get_resource_data ( request ) )
_validate ( cls , request . method , resource )
_perform_database_action ( 'add' , resource )
return resource_created_response ( resource ) |
def class_wise_accuracy ( self , scene_label ) :
"""Class - wise accuracy
Returns
dict
results in a dictionary format""" | if len ( self . accuracies_per_class . shape ) == 2 :
return { 'accuracy' : float ( numpy . mean ( self . accuracies_per_class [ : , self . scene_label_list . index ( scene_label ) ] ) ) }
else :
return { 'accuracy' : float ( numpy . mean ( self . accuracies_per_class [ self . scene_label_list . index ( scene_label ) ] ) ) } |
def get_api_resources ( self , ** kwargs ) :
"""get available resources
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . get _ api _ resources ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: return : V1APIResourceList
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . get_api_resources_with_http_info ( ** kwargs )
else :
( data ) = self . get_api_resources_with_http_info ( ** kwargs )
return data |
def dataSetElementType ( h5Dataset ) :
"""Returns a string describing the element type of the dataset""" | dtype = h5Dataset . dtype
if dtype . names :
return '<structured>'
else :
if dtype . metadata and 'vlen' in dtype . metadata :
vlen_type = dtype . metadata [ 'vlen' ]
try :
return "<vlen {}>" . format ( vlen_type . __name__ )
# when vlen _ type is a type
except AttributeError :
return "<vlen {}>" . format ( vlen_type . name )
# when vlen _ type is a dtype
return str ( dtype ) |
def generate_base_points ( num_points , domain_size , density_map = None , reflect = True ) :
r"""Generates a set of base points for passing into the Tessellation - based
Network classes . The points can be distributed in spherical , cylindrical ,
or rectilinear patterns , as well as 2D and 3D ( disks and squares ) .
Parameters
num _ points : scalar
The number of base points that lie within the domain . Note that the
actual number of points returned will be larger , with the extra points
lying outside the domain .
domain _ size : list or array
Controls the size and shape of the domain , as follows :
* * sphere * * : If a single value is received , its treated as the radius
[ r ] of a sphere centered on [ 0 , 0 , 0 ] .
* * cylinder * * : If a two - element list is received it ' s treated as the
radius and height of a cylinder [ r , z ] positioned at [ 0 , 0 , 0 ] and
extending in the positive z - direction . If the z dimension is 0 , a
disk of radius r is created .
* * rectangle * * : If a three element list is received , it ' s treated
as the outer corner of rectangle [ x , y , z ] whose opposite corner lies
at [ 0 , 0 , 0 ] . If the z dimension is 0 , a rectangle of size X - by - Y is
created .
density _ map : array , optional
A an array that contains fractional values ( 0 < i < 1 ) indicating the
liklihood that a point in that region should be kept . The size of this
array can be anything , but the shape must match the ` ` domain _ size ` ` ;
that is for a 3D network the shape of the ` ` density _ map ` ` can be
[10 , 10 , 10 ] or [ 50 , 50 , 50 ] , depending on how important the resolution
of the density distribution is . For a 2D network the ` ` density _ map ` `
should be [ 10 , 10 ] .
When specifying a custom probabiliy map is it recommended to also set
values outside the given domain to zero . If not , then the correct
shape will still be returned , but with too few points in it .
reflect : boolean
If True , the the base points are generated as specified , the reflected
about each face of the domain . This essentially tricks the
tessellation functions into creating smooth flat faces at the
boundaries once these excess pores are trimmed .
Notes
The reflection approach tends to create larger pores near the surfaces , so
it might be necessary to use the ` ` density _ map ` ` argument to specify a
slightly higher density of points near the surfaces .
The ` ` Voronoi ` ` , ` ` Delaunay ` ` , ` ` Gabriel ` ` , and ` ` DelunayVoronoiDual ` `
classes can * techncially * handle base points with spherical or cylindrical
domains , but the reflection across round surfaces does not create perfect
Voronoi cells so the surfaces will not be smooth .
Examples
The following generates a spherical array with higher values near the core .
It uses a distance transform to create a sphere of radius 10 , then a
second distance transform to create larger values in the center away from
the sphere surface . These distance values could be further skewed by
applying a power , with values higher than 1 resulting in higher values in
the core , and fractional values smoothinging them out a bit .
> > > import openpnm as op
> > > import scipy as sp
> > > import scipy . ndimage as spim
> > > im = sp . ones ( [ 21 , 21 , 21 ] , dtype = int )
> > > im [ 10 , 10 , 10 ] = 0
> > > im = spim . distance _ transform _ edt ( im ) < = 20 # Create sphere of 1 ' s
> > > prob = spim . distance _ transform _ edt ( im )
> > > prob = prob / sp . amax ( prob ) # Normalize between 0 and 1
> > > pts = op . topotools . generate _ base _ points ( num _ points = 50,
. . . domain _ size = [ 1 , 1 , 1 ] ,
. . . density _ map = prob )
> > > net = op . network . DelaunayVoronoiDual ( points = pts , shape = [ 1 , 1 , 1 ] )""" | def _try_points ( num_points , prob ) :
prob = sp . atleast_3d ( prob )
prob = sp . array ( prob ) / sp . amax ( prob )
# Ensure prob is normalized
base_pts = [ ]
N = 0
while N < num_points :
pt = sp . random . rand ( 3 )
# Generate a point
# Test whether to keep it or not
[ indx , indy , indz ] = sp . floor ( pt * sp . shape ( prob ) ) . astype ( int )
if sp . random . rand ( 1 ) <= prob [ indx ] [ indy ] [ indz ] :
base_pts . append ( pt )
N += 1
base_pts = sp . array ( base_pts )
return base_pts
if len ( domain_size ) == 1 : # Spherical
domain_size = sp . array ( domain_size )
r = domain_size [ 0 ]
if density_map is None : # Make an image of a sphere filled with ones and use _ try _ points
density_map = sp . ones ( [ 41 , 41 , 41 ] )
density_map [ 20 , 20 , 20 ] = 0
density_map = spim . distance_transform_edt ( density_map ) < 20
base_pts = _try_points ( num_points , density_map )
# Convert to spherical coordinates
[ X , Y , Z ] = sp . array ( base_pts - [ 0.5 , 0.5 , 0.5 ] ) . T
r = 2 * sp . sqrt ( X ** 2 + Y ** 2 + Z ** 2 ) * domain_size [ 0 ]
theta = 2 * sp . arctan ( Y / X )
phi = 2 * sp . arctan ( sp . sqrt ( X ** 2 + Y ** 2 ) / Z )
# Trim points outside the domain ( from improper prob images )
inds = r <= domain_size [ 0 ]
[ r , theta , phi ] = [ r [ inds ] , theta [ inds ] , phi [ inds ] ]
# Reflect base points across perimeter
if reflect :
r , theta , phi = reflect_base_points ( sp . vstack ( ( r , theta , phi ) ) , domain_size )
# Convert to Cartesean coordinates
X = r * sp . cos ( theta ) * sp . sin ( phi )
Y = r * sp . sin ( theta ) * sp . sin ( phi )
Z = r * sp . cos ( phi )
base_pts = sp . vstack ( [ X , Y , Z ] ) . T
elif len ( domain_size ) == 2 : # Cylindrical or Disk
domain_size = sp . array ( domain_size )
if density_map is None :
density_map = sp . ones ( [ 41 , 41 , 41 ] )
density_map [ 20 , 20 , : ] = 0
if domain_size [ 1 ] == 0 : # Disk
density_map = density_map [ : , : , 0 ]
density_map = spim . distance_transform_edt ( density_map ) < 20
base_pts = _try_points ( num_points , density_map )
# Convert to cylindrical coordinates
[ X , Y , Z ] = sp . array ( base_pts - [ 0.5 , 0.5 , 0 ] ) . T
# Center on z - axis
r = 2 * sp . sqrt ( X ** 2 + Y ** 2 ) * domain_size [ 0 ]
theta = 2 * sp . arctan ( Y / X )
z = Z * domain_size [ 1 ]
# Trim points outside the domain ( from improper prob images )
inds = r <= domain_size [ 0 ]
[ r , theta , z ] = [ r [ inds ] , theta [ inds ] , z [ inds ] ]
inds = ~ ( ( z > domain_size [ 1 ] ) + ( z < 0 ) )
[ r , theta , z ] = [ r [ inds ] , theta [ inds ] , z [ inds ] ]
if reflect :
r , theta , z = reflect_base_points ( sp . vstack ( [ r , theta , z ] ) , domain_size )
# Convert to Cartesean coordinates
X = r * sp . cos ( theta )
Y = r * sp . sin ( theta )
Z = z
base_pts = sp . vstack ( [ X , Y , Z ] ) . T
elif len ( domain_size ) == 3 : # Cube or square
if density_map is None :
density_map = sp . ones ( [ 41 , 41 , 41 ] )
if domain_size [ 2 ] == 0 :
density_map = density_map [ : , : , 0 ]
base_pts = _try_points ( num_points , density_map )
base_pts = base_pts * domain_size
if reflect :
base_pts = reflect_base_points ( base_pts , domain_size )
return base_pts |
def classify_segmented_recording ( recording , result_format = None ) :
"""Use this function if you are sure you have a single symbol .
Parameters
recording : string
The recording in JSON format
Returns
list of dictionaries
Each dictionary contains the keys ' symbol ' and ' probability ' . The list
is sorted descending by probability .""" | global single_symbol_classifier
if single_symbol_classifier is None :
single_symbol_classifier = SingleClassificer ( )
return single_symbol_classifier . predict ( recording , result_format ) |
def get_occurrence ( exchange_id , instance_index , format = u"Default" ) :
"""Requests one or more calendar items from the store matching the master & index .
exchange _ id is the id for the master event in the Exchange store .
format controls how much data you get back from Exchange . Full docs are here , but acceptible values
are IdOnly , Default , and AllProperties .
GetItem Doc :
http : / / msdn . microsoft . com / en - us / library / aa564509 ( v = exchg . 140 ) . aspx
OccurrenceItemId Doc :
http : / / msdn . microsoft . com / en - us / library / office / aa580744 ( v = exchg . 150 ) . aspx
< m : GetItem xmlns : m = " http : / / schemas . microsoft . com / exchange / services / 2006 / messages "
xmlns : t = " http : / / schemas . microsoft . com / exchange / services / 2006 / types " >
< m : ItemShape >
< t : BaseShape > { format } < / t : BaseShape >
< / m : ItemShape >
< m : ItemIds >
{ % for index in instance _ index % }
< t : OccurrenceItemId RecurringMasterId = " { exchange _ id } " InstanceIndex = " { { index } } " / >
{ % endfor % }
< / m : ItemIds >
< / m : GetItem >""" | root = M . GetItem ( M . ItemShape ( T . BaseShape ( format ) ) , M . ItemIds ( ) )
items_node = root . xpath ( "//m:ItemIds" , namespaces = NAMESPACES ) [ 0 ]
for index in instance_index :
items_node . append ( T . OccurrenceItemId ( RecurringMasterId = exchange_id , InstanceIndex = str ( index ) ) )
return root |
def __gen_random_values ( self ) :
'''Generate random values based on supplied value ranges
Returns :
list : random values , one per tunable variable''' | values = [ ]
if self . _value_ranges is None :
self . _logger . log ( 'crit' , 'Must set the type/range of possible values' )
raise RuntimeError ( "Must set the type/range of possible values" )
else :
for t in self . _value_ranges :
if t [ 0 ] == 'int' :
values . append ( randint ( t [ 1 ] [ 0 ] , t [ 1 ] [ 1 ] ) )
elif t [ 0 ] == 'float' :
values . append ( np . random . uniform ( t [ 1 ] [ 0 ] , t [ 1 ] [ 1 ] ) )
else :
self . _logger . log ( 'crit' , 'Value type must be either an `int` or a `float`' )
raise RuntimeError ( 'Value type must be either an `int` or a `float`' )
return values |
def _read_join_synack ( self , bits , size , kind ) :
"""Read Join Connection option for Responding SYN / ACK .
Positional arguments :
* bits - str , 4 - bit data
* size - int , length of option
* kind - int , 30 ( Multipath TCP )
Returns :
* dict - - extracted Join Connection ( MP _ JOIN - SYN / ACK ) option for Responding SYN / ACK
Structure of MP _ JOIN - SYN / ACK [ RFC 6824 ] :
1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| Kind | Length = 16 | Subtype | | B | Address ID |
| Sender ' s Truncated HMAC ( 64 bits ) |
| Sender ' s Random Number ( 32 bits ) |
Octets Bits Name Description
0 0 tcp . mp . kind Kind ( 30)
1 8 tcp . mp . length Length ( 16)
2 16 tcp . mp . subtype Subtype ( 1 | SYN / ACK )
2 20 - Reserved ( must be zero )
2 23 tcp . mp . join . synack . backup Backup Path ( B )
3 24 tcp . mp . join . synack . addrid Address ID
4 32 tcp . mp . join . synack . hmac Sender ' s Truncated HMAC
12 96 tcp . mp . join . synack . randnum Sender ' s Random Number""" | adid = self . _read_unpack ( 1 )
hmac = self . _read_fileng ( 8 )
srno = self . _read_unpack ( 4 )
data = dict ( kind = kind , length = size + 1 , subtype = 'MP_JOIN-SYN/ACK' , join = dict ( synack = dict ( backup = True if int ( bits [ 3 ] ) else False , addrid = adid , hmac = hmac , randnum = srno , ) , ) , )
return data |
def distance ( p0 , p1 , deg = True , r = r_earth_mean ) :
"""Return the distance between two points on the surface of the Earth .
Parameters
p0 : point - like ( or array of point - like ) [ longitude , latitude ] objects
p1 : point - like ( or array of point - like ) [ longitude , latitude ] objects
deg : bool , optional ( default True )
indicates if p0 and p1 are specified in degrees
r : float , optional ( default r _ earth _ mean )
radius of the sphere
Returns
d : float
Reference
http : / / www . movable - type . co . uk / scripts / latlong . html - Distance
Note : Spherical earth model . By default uses radius of 6371.0 km .""" | single , ( p0 , p1 ) = _to_arrays ( ( p0 , 2 ) , ( p1 , 2 ) )
if deg :
p0 = np . radians ( p0 )
p1 = np . radians ( p1 )
lon0 , lat0 = p0 [ : , 0 ] , p0 [ : , 1 ]
lon1 , lat1 = p1 [ : , 0 ] , p1 [ : , 1 ]
# h _ x used to denote haversine ( x ) : sin ^ 2 ( x / 2)
h_dlat = sin ( ( lat1 - lat0 ) / 2.0 ) ** 2
h_dlon = sin ( ( lon1 - lon0 ) / 2.0 ) ** 2
h_angle = h_dlat + cos ( lat0 ) * cos ( lat1 ) * h_dlon
angle = 2.0 * arcsin ( sqrt ( h_angle ) )
d = r * angle
if single :
d = d [ 0 ]
return d |
def parse_samtools_idxstats ( self ) :
"""Find Samtools idxstats logs and parse their data""" | self . samtools_idxstats = dict ( )
for f in self . find_log_files ( 'samtools/idxstats' ) :
parsed_data = parse_single_report ( f [ 'f' ] )
if len ( parsed_data ) > 0 :
if f [ 's_name' ] in self . samtools_idxstats :
log . debug ( "Duplicate sample name found! Overwriting: {}" . format ( f [ 's_name' ] ) )
self . add_data_source ( f , section = 'idxstats' )
self . samtools_idxstats [ f [ 's_name' ] ] = parsed_data
# Filter to strip out ignored sample names
self . samtools_idxstats = self . ignore_samples ( self . samtools_idxstats )
if len ( self . samtools_idxstats ) > 0 : # Write parsed report data to a file ( restructure first )
self . write_data_file ( self . samtools_idxstats , 'multiqc_samtools_idxstats' )
# Prep the data for the plots
keys = list ( )
pdata = dict ( )
pdata_norm = dict ( )
xy_counts = dict ( )
# Count the total mapped reads for every chromosome
chrs_mapped = defaultdict ( lambda : 0 )
sample_mapped = defaultdict ( lambda : 0 )
total_mapped = 0
# Cutoff , can be customised in config
cutoff = float ( getattr ( config , 'samtools_idxstats_fraction_cutoff' , 0.001 ) )
if cutoff != 0.001 :
log . info ( 'Setting idxstats cutoff to: {}%' . format ( cutoff * 100.0 ) )
for s_name in self . samtools_idxstats :
for chrom in self . samtools_idxstats [ s_name ] :
chrs_mapped [ chrom ] += self . samtools_idxstats [ s_name ] [ chrom ]
sample_mapped [ s_name ] += self . samtools_idxstats [ s_name ] [ chrom ]
total_mapped += self . samtools_idxstats [ s_name ] [ chrom ]
req_reads = float ( total_mapped ) * cutoff
chr_always = getattr ( config , 'samtools_idxstats_always' , [ ] )
if len ( chr_always ) > 0 :
log . info ( 'Trying to include these chromosomes in idxstats: {}' . format ( ', ' . join ( chr_always ) ) )
chr_ignore = getattr ( config , 'samtools_idxstats_ignore' , [ ] )
if len ( chr_ignore ) > 0 :
log . info ( 'Excluding these chromosomes from idxstats: {}' . format ( ', ' . join ( chr_ignore ) ) )
xchr = getattr ( config , 'samtools_idxstats_xchr' , False )
if xchr :
log . info ( 'Using "{}" as X chromosome name' . format ( xchr ) )
ychr = getattr ( config , 'samtools_idxstats_ychr' , False )
if ychr :
log . info ( 'Using "{}" as Y chromosome name' . format ( ychr ) )
# Go through again and collect all of the keys that have enough counts
# Also get the X / Y counts if we find them
for s_name in self . samtools_idxstats :
x_count = False
y_count = False
for chrom in self . samtools_idxstats [ s_name ] :
if float ( chrs_mapped [ chrom ] ) > req_reads or chrom in chr_always :
if chrom not in chr_ignore and chrom not in keys :
keys . append ( chrom )
# Collect X and Y counts if we have them
mapped = self . samtools_idxstats [ s_name ] [ chrom ]
if xchr is not False :
if str ( xchr ) == str ( chrom ) :
x_count = mapped
else :
if chrom . lower ( ) == 'x' or chrom . lower ( ) == 'chrx' :
x_count = mapped
if ychr is not False :
if str ( ychr ) == str ( chrom ) :
y_count = mapped
else :
if chrom . lower ( ) == 'y' or chrom . lower ( ) == 'chry' :
y_count = mapped
# Only save these counts if we have both x and y
if x_count and y_count :
xy_counts [ s_name ] = { 'x' : x_count , 'y' : y_count }
# Ok , one last time . We have the chromosomes that we want to plot ,
# now collect the counts
for s_name in self . samtools_idxstats :
pdata [ s_name ] = OrderedDict ( )
pdata_norm [ s_name ] = OrderedDict ( )
for k in keys :
try :
pdata [ s_name ] [ k ] = self . samtools_idxstats [ s_name ] [ k ]
pdata_norm [ s_name ] [ k ] = float ( self . samtools_idxstats [ s_name ] [ k ] ) / sample_mapped [ s_name ]
except ( KeyError , ZeroDivisionError ) :
pdata [ s_name ] [ k ] = 0
pdata_norm [ s_name ] [ k ] = 0
# X / Y ratio plot
if len ( xy_counts ) > 0 :
xy_keys = OrderedDict ( )
xy_keys [ 'x' ] = { 'name' : xchr if xchr else 'Chromosome X' }
xy_keys [ 'y' ] = { 'name' : ychr if ychr else 'Chromosome Y' }
pconfig = { 'id' : 'samtools-idxstats-xy-plot' , 'title' : 'Samtools idxstats: chrXY mapped reads' , 'ylab' : 'Percent of X+Y Reads' , 'cpswitch_counts_label' : 'Number of Reads' , 'cpswitch_percent_label' : 'Percent of X+Y Reads' , 'cpswitch_c_active' : False }
self . add_section ( name = 'XY counts' , anchor = 'samtools-idxstats-xy-counts' , plot = bargraph . plot ( xy_counts , xy_keys , pconfig ) )
# Mapped reads per chr line plot
pconfig = { 'id' : 'samtools-idxstats-mapped-reads-plot' , 'title' : 'Samtools idxstats: Mapped reads per contig' , 'ylab' : '# mapped reads' , 'xlab' : 'Chromosome Name' , 'categories' : True , 'tt_label' : '<strong>{point.category}:</strong> {point.y:.2f}' , 'data_labels' : [ { 'name' : 'Normalised Counts' , 'ylab' : 'Fraction of total count' } , { 'name' : 'Counts' , 'ylab' : '# mapped reads' } ] }
self . add_section ( name = 'Mapped reads per contig' , anchor = 'samtools-idxstats' , description = 'The <code>samtools idxstats</code> tool counts the number of mapped reads per chromosome / contig. ' + 'Chromosomes with < {}% of the total aligned reads are omitted from this plot.' . format ( cutoff * 100 ) , plot = linegraph . plot ( [ pdata_norm , pdata ] , pconfig ) )
# Return the number of logs that were found
return len ( self . samtools_idxstats ) |
def trace_integration ( tracer = None ) :
"""Integrate with pymongo to trace it using event listener .""" | log . info ( 'Integrated module: {}' . format ( MODULE_NAME ) )
monitoring . register ( MongoCommandListener ( tracer = tracer ) ) |
def _set_pb_meaning_from_entity ( entity , name , value , value_pb , is_list = False ) :
"""Add meaning information ( from an entity ) to a protobuf .
: type entity : : class : ` google . cloud . datastore . entity . Entity `
: param entity : The entity to be turned into a protobuf .
: type name : str
: param name : The name of the property .
: type value : object
: param value : The current value stored as property ` ` name ` ` .
: type value _ pb : : class : ` . entity _ pb2 . Value `
: param value _ pb : The protobuf value to add meaning / meanings to .
: type is _ list : bool
: param is _ list : ( Optional ) Boolean indicating if the ` ` value ` ` is
a list value .""" | if name not in entity . _meanings :
return
meaning , orig_value = entity . _meanings [ name ]
# Only add the meaning back to the protobuf if the value is
# unchanged from when it was originally read from the API .
if orig_value is not value :
return
# For lists , we set meaning on each sub - element .
if is_list :
if not isinstance ( meaning , list ) :
meaning = itertools . repeat ( meaning )
val_iter = six . moves . zip ( value_pb . array_value . values , meaning )
for sub_value_pb , sub_meaning in val_iter :
if sub_meaning is not None :
sub_value_pb . meaning = sub_meaning
else :
value_pb . meaning = meaning |
def temp_output_dir ( prefix = "tmp" , suffix = "" , dir = None , make_parents = False , always_clean = False ) :
"""A context manager for convenience in creating a temporary directory ,
which is deleted when exiting the context .
Usage :
with temp _ output _ dir ( ) as dirname :""" | return _temp_output ( True , prefix = prefix , suffix = suffix , dir = dir , make_parents = make_parents , always_clean = always_clean ) |
def amen_mv ( A , x , tol , y = None , z = None , nswp = 20 , kickrank = 4 , kickrank2 = 0 , verb = True , init_qr = True , renorm = 'direct' , fkick = False ) :
'''Approximate the matrix - by - vector via the AMEn iteration
[ y , z ] = amen _ mv ( A , x , tol , varargin )
Attempts to approximate the y = A * x
with accuracy TOL using the AMEn + ALS iteration .
Matrix A has to be given in the TT - format , right - hand side x should be
given in the TT - format also .
Options are provided in form
' PropertyName1 ' , PropertyValue1 , ' PropertyName2 ' , PropertyValue2 and so
on . The parameters are set to default ( in brackets in the following )
The list of option names and default values are :
o y0 - initial approximation to Ax [ rand rank - 2]
o nswp - maximal number of sweeps [ 20]
o verb - verbosity level , 0 - silent , 1 - sweep info , 2 - block info [ 1]
o kickrank - compression rank of the error ,
i . e . enrichment size [ 3]
o init _ qr - perform QR of the input ( save some time in ts , etc ) [ true ]
o renorm - Orthog . and truncation methods : direct ( svd , qr ) or gram
( apply svd to the gram matrix , faster for m > > n ) [ direct ]
o fkick - Perform solution enrichment during forward sweeps [ false ]
( rather questionable yet ; false makes error higher , but " better
structured " : it does not explode in e . g . subsequent matvecs )
o z0 - initial approximation to the error Ax - y [ rand rank - kickrank ]
For description of adaptive ALS please see
Sergey V . Dolgov , Dmitry V . Savostyanov ,
Alternating minimal energy methods for linear systems in higher dimensions .
Part I : SPD systems , http : / / arxiv . org / abs / 1301.6068,
Part II : Faster algorithm and application to nonsymmetric systems , http : / / arxiv . org / abs / 1304.1222
Use { sergey . v . dolgov , dmitry . savostyanov } @ gmail . com for feedback''' | if renorm is 'gram' :
print ( "Not implemented yet. Renorm is switched to 'direct'" )
renorm = 'direct'
if isinstance ( x , _tt . vector ) :
d = x . d
m = x . n
rx = x . r
x = _tt . vector . to_list ( x )
vectype = 1
# tt _ tensor
elif isinstance ( x , list ) :
d = len ( x )
m = _np . zeros ( d )
rx = _np . ones ( d + 1 , dtype = _np . int32 )
for i in xrange ( d ) :
[ _ , m [ i ] , rx [ i + 1 ] ] = x [ i ] . shape
vectype = 0
# cell
else :
raise Exception ( 'x: use tt.tensor or list of cores as numpy.arrays' )
if isinstance ( A , _tt . matrix ) :
n = A . n
ra = A . tt . r
A = _tt . matrix . to_list ( A )
# prepare A for fast ALS - mv
for i in xrange ( d ) :
A [ i ] = _reshape ( A [ i ] , ( ra [ i ] * n [ i ] , m [ i ] * ra [ i + 1 ] ) )
atype = 1
# tt _ matrix
# Alternative : A is a cell of cell : sparse canonical format
elif isinstance ( A , list ) :
n = _np . zeros ( d )
for i in xrange ( d ) :
n [ i ] = A [ i ] [ 0 ] . shape [ 0 ]
ra = len ( A [ 0 ] )
atype = 0
# cell
else :
raise Exception ( 'A: use tt.matrix or list of cores as numpy.arrays' )
if y is None :
y = _tt . rand ( n , d , 2 )
y = _tt . vector . to_list ( y )
else :
if isinstance ( y , _tt . vector ) :
y = _tt . vector . to_list ( y )
ry = _np . ones ( d + 1 , dtype = _np . int32 )
for i in range ( d ) :
ry [ i + 1 ] = y [ i ] . shape [ 2 ]
if ( kickrank + kickrank2 > 0 ) :
if z is None :
z = _tt . rand ( n , d , kickrank + kickrank2 )
rz = z . r
z = _tt . vector . to_list ( z )
else :
if isinstance ( z , _tt . vector ) :
z = _tt . vector . to_list ( z )
rz = _np . ones ( d + 1 , dtype = _np . int32 )
for i in range ( d ) :
rz [ i + 1 ] = z [ i ] . shape [ 2 ]
phizax = [ None ] * ( d + 1 )
# cell ( d + 1,1 ) ;
if ( atype == 1 ) :
phizax [ 0 ] = _np . ones ( ( 1 , 1 , 1 ) )
phizax [ d ] = _np . ones ( ( 1 , 1 , 1 ) )
else :
phizax [ 0 ] = _np . ones ( ( 1 , ra ) )
# 33
phizax [ d ] = _np . ones ( ( 1 , ra ) )
phizy = [ None ] * ( d + 1 )
phizy [ 0 ] = _np . ones ( ( 1 ) )
phizy [ d ] = _np . ones ( ( 1 ) )
phiyax = [ None ] * ( d + 1 )
if ( atype == 1 ) :
phiyax [ 0 ] = _np . ones ( ( 1 , 1 , 1 ) )
phiyax [ d ] = _np . ones ( ( 1 , 1 , 1 ) )
else :
phiyax [ 0 ] = _np . ones ( ( 1 , ra ) )
phiyax [ d ] = _np . ones ( ( 1 , ra ) )
nrms = _np . ones ( d )
# Initial ort
for i in range ( d - 1 ) :
if init_qr :
cr = _reshape ( y [ i ] , ( ry [ i ] * n [ i ] , ry [ i + 1 ] ) )
if ( renorm is 'gram' ) and ( ry [ i ] * n [ i ] > 5 * ry [ i + 1 ] ) :
[ cr , s , R ] = _svdgram ( cr )
else :
[ cr , R ] = _np . linalg . qr ( cr )
nrmr = _np . linalg . norm ( R )
# , ' fro ' )
if ( nrmr > 0 ) :
R = R / nrmr
cr2 = _reshape ( y [ i + 1 ] , ( ry [ i + 1 ] , n [ i + 1 ] * ry [ i + 2 ] ) )
cr2 = _np . dot ( R , cr2 )
ry [ i + 1 ] = cr . shape [ 1 ]
y [ i ] = _reshape ( cr , ( ry [ i ] , n [ i ] , ry [ i + 1 ] ) )
y [ i + 1 ] = _reshape ( cr2 , ( ry [ i + 1 ] , n [ i + 1 ] , ry [ i + 2 ] ) )
[ phiyax [ i + 1 ] , nrms [ i ] ] = _compute_next_Phi ( phiyax [ i ] , y [ i ] , x [ i ] , 'lr' , A [ i ] )
if ( kickrank + kickrank2 > 0 ) :
cr = _reshape ( z [ i ] , ( rz [ i ] * n [ i ] , rz [ i + 1 ] ) )
if ( renorm == 'gram' ) and ( rz [ i ] * n [ i ] > 5 * rz [ i + 1 ] ) :
[ cr , s , R ] = _svdgram ( cr )
else :
[ cr , R ] = _np . linalg . qr ( cr )
nrmr = _np . linalg . norm ( R )
# , ' fro ' )
if ( nrmr > 0 ) :
R = R / nrmr
cr2 = _reshape ( z [ i + 1 ] , ( rz [ i + 1 ] , n [ i + 1 ] * rz [ i + 2 ] ) )
cr2 = _np . dot ( R , cr2 )
rz [ i + 1 ] = cr . shape [ 1 ]
z [ i ] = _reshape ( cr , ( rz [ i ] , n [ i ] , rz [ i + 1 ] ) )
z [ i + 1 ] = _reshape ( cr2 , ( rz [ i + 1 ] , n [ i + 1 ] , rz [ i + 2 ] ) )
phizax [ i + 1 ] = _compute_next_Phi ( phizax [ i ] , z [ i ] , x [ i ] , 'lr' , A [ i ] , nrms [ i ] , return_norm = False )
phizy [ i + 1 ] = _compute_next_Phi ( phizy [ i ] , z [ i ] , y [ i ] , 'lr' , return_norm = False )
i = d - 1
direct = - 1
swp = 1
max_dx = 0
while swp <= nswp : # Project the MatVec generating vector
crx = _reshape ( x [ i ] , ( rx [ i ] * m [ i ] * rx [ i + 1 ] , 1 ) )
cry = _bfun3 ( phiyax [ i ] , A [ i ] , phiyax [ i + 1 ] , crx )
nrms [ i ] = _np . linalg . norm ( cry )
# , ' fro ' )
# The main goal is to keep y [ i ] of norm 1
if ( nrms [ i ] > 0 ) :
cry = cry / nrms [ i ]
else :
nrms [ i ] = 1
y [ i ] = _reshape ( y [ i ] , ( ry [ i ] * n [ i ] * ry [ i + 1 ] , 1 ) )
dx = _np . linalg . norm ( cry - y [ i ] )
max_dx = max ( max_dx , dx )
# Truncation and enrichment
if ( ( direct > 0 ) and ( i < d - 1 ) ) : # ? ? i < d
cry = _reshape ( cry , ( ry [ i ] * n [ i ] , ry [ i + 1 ] ) )
if ( renorm == 'gram' ) :
[ u , s , v ] = _svdgram ( cry , tol / d ** 0.5 )
v = v . T
r = u . shape [ 1 ]
else :
[ u , s , vt ] = _np . linalg . svd ( cry , full_matrices = False )
# s = diag ( s )
r = _my_chop2 ( s , tol * _np . linalg . norm ( s ) / d ** 0.5 )
u = u [ : , : r ]
# ? ? ? ? ? s - matrix or vector
v = _np . dot ( _tconj ( vt [ : r , : ] ) , _np . diag ( s [ : r ] ) )
# Prepare enrichment , if needed
if ( kickrank + kickrank2 > 0 ) :
cry = _np . dot ( u , v . T )
cry = _reshape ( cry , ( ry [ i ] * n [ i ] , ry [ i + 1 ] ) )
# For updating z
crz = _bfun3 ( phizax [ i ] , A [ i ] , phizax [ i + 1 ] , crx )
crz = _reshape ( crz , ( rz [ i ] * n [ i ] , rz [ i + 1 ] ) )
ys = _np . dot ( cry , phizy [ i + 1 ] )
yz = _reshape ( ys , ( ry [ i ] , n [ i ] * rz [ i + 1 ] ) )
yz = _np . dot ( phizy [ i ] , yz )
yz = _reshape ( yz , ( rz [ i ] * n [ i ] , rz [ i + 1 ] ) )
crz = crz / nrms [ i ] - yz
nrmz = _np . linalg . norm ( crz )
# , ' fro ' )
if ( kickrank2 > 0 ) :
[ crz , _ , _ ] = _np . linalg . svd ( crz , full_matrices = False )
crz = crz [ : , : min ( crz . shape [ 1 ] , kickrank ) ]
crz = _np . hstack ( ( crz , _np . random . randn ( rz [ i ] * n [ i ] , kickrank2 ) ) )
# For adding into solution
if fkick :
crs = _bfun3 ( phiyax [ i ] , A [ i ] , phizax [ i + 1 ] , crx )
crs = _reshape ( crs , ( ry [ i ] * n [ i ] , rz [ i + 1 ] ) )
crs = crs / nrms [ i ] - ys
u = _np . hstack ( ( u , crs ) )
if ( renorm == 'gram' ) and ( ry [ i ] * n [ i ] > 5 * ( ry [ i + 1 ] + rz [ i + 1 ] ) ) :
[ u , s , R ] = _svdgram ( u )
else :
[ u , R ] = _np . linalg . qr ( u )
v = _np . hstack ( ( v , _np . zeros ( ( ry [ i + 1 ] , rz [ i + 1 ] ) ) ) )
v = _np . dot ( v , R . T )
r = u . shape [ 1 ]
y [ i ] = _reshape ( u , ( ry [ i ] , n [ i ] , r ) )
cr2 = _reshape ( y [ i + 1 ] , ( ry [ i + 1 ] , n [ i + 1 ] * ry [ i + 2 ] ) )
v = _reshape ( v , ( ry [ i + 1 ] , r ) )
cr2 = _np . dot ( v . T , cr2 )
y [ i + 1 ] = _reshape ( cr2 , ( r , n [ i + 1 ] , ry [ i + 2 ] ) )
ry [ i + 1 ] = r
[ phiyax [ i + 1 ] , nrms [ i ] ] = _compute_next_Phi ( phiyax [ i ] , y [ i ] , x [ i ] , 'lr' , A [ i ] )
if ( kickrank + kickrank2 > 0 ) :
if ( renorm == 'gram' ) and ( rz [ i ] * n [ i ] > 5 * rz [ i + 1 ] ) :
[ crz , s , R ] = _svdgram ( crz )
else :
[ crz , R ] = _np . linalg . qr ( crz )
rz [ i + 1 ] = crz . shape [ 1 ]
z [ i ] = _reshape ( crz , ( rz [ i ] , n [ i ] , rz [ i + 1 ] ) )
# z [ i + 1 ] will be recomputed from scratch in the next step
phizax [ i + 1 ] = _compute_next_Phi ( phizax [ i ] , z [ i ] , x [ i ] , 'lr' , A [ i ] , nrms [ i ] , return_norm = False )
phizy [ i + 1 ] = _compute_next_Phi ( phizy [ i ] , z [ i ] , y [ i ] , 'lr' , return_norm = False )
elif ( ( direct < 0 ) and ( i > 0 ) ) :
cry = _reshape ( cry , ( ry [ i ] , n [ i ] * ry [ i + 1 ] ) )
if ( renorm == 'gram' ) :
[ v , s , u ] = _svdgram ( cry . T , tol / d ** 0.5 )
u = u . T
r = v . shape [ 1 ]
else : # [ v , s , u ] = _ np . linalg . svd ( cry . T , full _ matrices = False )
[ u , s , vt ] = _np . linalg . svd ( cry , full_matrices = False )
# s = diag ( s ) ;
r = _my_chop2 ( s , tol * _np . linalg . norm ( s ) / d ** 0.5 )
v = _tconj ( vt [ : r , : ] )
# v = vt [ : r , : ]
# v = _ np . dot ( v [ : , : r ] , _ np . diag ( s [ : r ] ) )
u = _np . dot ( u [ : , : r ] , _np . diag ( s [ : r ] ) )
# Prepare enrichment , if needed
if ( kickrank + kickrank2 > 0 ) :
cry = _np . dot ( u , v . T )
cry = _reshape ( cry , ( ry [ i ] , n [ i ] * ry [ i + 1 ] ) )
# For updating z
crz = _bfun3 ( phizax [ i ] , A [ i ] , phizax [ i + 1 ] , crx )
crz = _reshape ( crz , ( rz [ i ] , n [ i ] * rz [ i + 1 ] ) )
ys = _np . dot ( phizy [ i ] , cry )
yz = _reshape ( ys , ( rz [ i ] * n [ i ] , ry [ i + 1 ] ) )
yz = _np . dot ( yz , phizy [ i + 1 ] )
yz = _reshape ( yz , ( rz [ i ] , n [ i ] * rz [ i + 1 ] ) )
crz = crz / nrms [ i ] - yz
nrmz = _np . linalg . norm ( crz )
# , ' fro ' )
if ( kickrank2 > 0 ) :
[ _ , _ , crz ] = _np . linalg . svd ( crz , full_matrices = False )
crz = crz [ : , : min ( crz . shape [ 1 ] , kickrank ) ]
crz = _tconj ( crz )
crz = _np . vstack ( ( crz , _np . random . randn ( kickrank2 , n [ i ] * rz [ i + 1 ] ) ) )
# For adding into solution
crs = _bfun3 ( phizax [ i ] , A [ i ] , phiyax [ i + 1 ] , crx )
crs = _reshape ( crs , ( rz [ i ] , n [ i ] * ry [ i + 1 ] ) )
crs = crs / nrms [ i ] - ys
v = _np . hstack ( ( v , crs . T ) )
# v = v . T
if ( renorm == 'gram' ) and ( n [ i ] * ry [ i + 1 ] > 5 * ( ry [ i ] + rz [ i ] ) ) :
[ v , s , R ] = _svdgram ( v )
else :
[ v , R ] = _np . linalg . qr ( v )
u = _np . hstack ( ( u , _np . zeros ( ( ry [ i ] , rz [ i ] ) ) ) )
u = _np . dot ( u , R . T )
r = v . shape [ 1 ]
cr2 = _reshape ( y [ i - 1 ] , ( ry [ i - 1 ] * n [ i - 1 ] , ry [ i ] ) )
cr2 = _np . dot ( cr2 , u )
y [ i - 1 ] = _reshape ( cr2 , ( ry [ i - 1 ] , n [ i - 1 ] , r ) )
y [ i ] = _reshape ( v . T , ( r , n [ i ] , ry [ i + 1 ] ) )
ry [ i ] = r
[ phiyax [ i ] , nrms [ i ] ] = _compute_next_Phi ( phiyax [ i + 1 ] , y [ i ] , x [ i ] , 'rl' , A [ i ] )
if ( kickrank + kickrank2 > 0 ) :
if ( renorm == 'gram' ) and ( n [ i ] * rz [ i + 1 ] > 5 * rz [ i ] ) :
[ crz , s , R ] = _svdgram ( crz . T )
else :
[ crz , R ] = _np . linalg . qr ( crz . T )
rz [ i ] = crz . shape [ 1 ]
z [ i ] = _reshape ( crz . T , ( rz [ i ] , n [ i ] , rz [ i + 1 ] ) )
# don ' t update z [ i - 1 ] , it will be recomputed from scratch
phizax [ i ] = _compute_next_Phi ( phizax [ i + 1 ] , z [ i ] , x [ i ] , 'rl' , A [ i ] , nrms [ i ] , return_norm = False )
phizy [ i ] = _compute_next_Phi ( phizy [ i + 1 ] , z [ i ] , y [ i ] , 'rl' , return_norm = False )
if ( verb > 1 ) :
print ( 'amen-mv: swp=[%d,%d], dx=%.3e, r=%d, |y|=%.3e, |z|=%.3e' % ( swp , i , dx , r , _np . linalg . norm ( cry ) , nrmz ) )
# Stopping or reversing
if ( ( direct > 0 ) and ( i == d - 1 ) ) or ( ( direct < 0 ) and ( i == 0 ) ) :
if ( verb > 0 ) :
print ( 'amen-mv: swp=%d{%d}, max_dx=%.3e, max_r=%d' % ( swp , ( 1 - direct ) // 2 , max_dx , max ( ry ) ) )
if ( ( max_dx < tol ) or ( swp == nswp ) ) and ( direct > 0 ) :
break
else : # We are at the terminal block
y [ i ] = _reshape ( cry , ( ry [ i ] , n [ i ] , ry [ i + 1 ] ) )
if ( direct > 0 ) :
swp = swp + 1
max_dx = 0
direct = - direct
else :
i = i + direct
# if ( direct > 0)
y [ d - 1 ] = _reshape ( cry , ( ry [ d - 1 ] , n [ d - 1 ] , ry [ d ] ) )
# else
# y { 1 } = reshape ( cry , ry ( 1 ) , n ( 1 ) , ry ( 2 ) ) ;
# end ;
# Distribute norms equally . . .
nrms = _np . exp ( sum ( _np . log ( nrms ) ) / d )
# . . . and plug them into y
for i in xrange ( d ) :
y [ i ] = _np . dot ( y [ i ] , nrms )
if ( vectype == 1 ) :
y = _tt . vector . from_list ( y )
if kickrank == 0 :
z = None
else :
z = _tt . vector . from_list ( z )
return y , z |
def get_beamarea_deg2 ( self , ra , dec ) :
"""Calculate the area of the beam in square degrees .
Parameters
ra , dec : float
The sky position ( degrees ) .
Returns
area : float
The area of the beam in square degrees .""" | beam = self . get_beam ( ra , dec )
if beam is None :
return 0
return beam . a * beam . b * np . pi |
def set_parent ( self , parent ) :
"""Set the parent of the treeitem
: param parent : parent treeitem
: type parent : : class : ` TreeItem ` | None
: returns : None
: rtype : None
: raises : None""" | if self . _parent == parent :
return
if self . _parent :
self . _parent . remove_child ( self )
self . _parent = parent
if parent :
parent . add_child ( self ) |
def coherence ( freq , power , cross ) :
"""Calculate frequency resolved coherence for given power - and crossspectra .
Parameters
freq : numpy . ndarray
Frequencies , 1 dim array .
power : numpy . ndarray
Power spectra , 1st axis units , 2nd axis frequencies .
cross : numpy . ndarray ,
Cross spectra , 1st axis units , 2nd axis units , 3rd axis frequencies .
Returns
freq : tuple
1 dim numpy . ndarray of frequencies .
coh : tuple
ndim 3 numpy . ndarray of coherences , 1st axis units , 2nd axis units ,
3rd axis frequencies .""" | N = len ( power )
coh = np . zeros ( np . shape ( cross ) )
for i in range ( N ) :
for j in range ( N ) :
coh [ i , j ] = cross [ i , j ] / np . sqrt ( power [ i ] * power [ j ] )
assert ( len ( freq ) == len ( coh [ 0 , 0 ] ) )
return freq , coh |
def gates_close ( gate0 : Gate , gate1 : Gate , tolerance : float = TOLERANCE ) -> bool :
"""Returns : True if gates are almost identical .
Closeness is measured with the gate angle .""" | return vectors_close ( gate0 . vec , gate1 . vec , tolerance ) |
def rmtree_p ( self ) :
"""Like : meth : ` rmtree ` , but does not raise an exception if the
directory does not exist .""" | try :
self . rmtree ( )
except OSError :
_ , e , _ = sys . exc_info ( )
if e . errno != errno . ENOENT :
raise
return self |
def _build_wells ( self ) -> List [ Well ] :
"""This function is used to create one instance of wells to be used by all
accessor functions . It is only called again if a new offset needs
to be applied .""" | return [ Well ( self . _well_definition [ well ] , Location ( self . _calibrated_offset , self ) , "{} of {}" . format ( well , self . _display_name ) , self . is_tiprack ) for well in self . _ordering ] |
async def reply_sticker ( self , sticker : typing . Union [ base . InputFile , base . String ] , disable_notification : typing . Union [ base . Boolean , None ] = None , reply_markup = None , reply = True ) -> Message :
"""Use this method to send . webp stickers .
Source : https : / / core . telegram . org / bots / api # sendsticker
: param sticker : Sticker to send .
: type sticker : : obj : ` typing . Union [ base . InputFile , base . String ] `
: param disable _ notification : Sends the message silently . Users will receive a notification with no sound .
: type disable _ notification : : obj : ` typing . Union [ base . Boolean , None ] `
: param reply _ markup : Additional interface options .
: type reply _ markup : : obj : ` typing . Union [ types . InlineKeyboardMarkup ,
types . ReplyKeyboardMarkup , types . ReplyKeyboardRemove , types . ForceReply , None ] `
: param reply : fill ' reply _ to _ message _ id '
: return : On success , the sent Message is returned .
: rtype : : obj : ` types . Message `""" | return await self . bot . send_sticker ( chat_id = self . chat . id , sticker = sticker , disable_notification = disable_notification , reply_to_message_id = self . message_id if reply else None , reply_markup = reply_markup ) |
def positive_int ( val ) :
"""Parse ` val ` into a positive integer .""" | if isinstance ( val , float ) :
raise ValueError ( '"{}" must not be a float' . format ( val ) )
val = int ( val )
if val >= 0 :
return val
raise ValueError ( '"{}" must be positive' . format ( val ) ) |
def not_right ( self , num ) :
"""WITH SLICES BEING FLAT , WE NEED A SIMPLE WAY TO SLICE FROM THE LEFT [ : - num : ]""" | if num == None :
return FlatList ( [ _get_list ( self ) [ : - 1 : ] ] )
if num <= 0 :
return FlatList . EMPTY
return FlatList ( _get_list ( self ) [ : - num : ] ) |
def info ( ) :
"""Generate information for a bug report .
Based on the requests package help utility module .""" | try :
platform_info = { "system" : platform . system ( ) , "release" : platform . release ( ) }
except IOError :
platform_info = { "system" : "Unknown" , "release" : "Unknown" }
implementation = platform . python_implementation ( )
if implementation == "CPython" :
implementation_version = platform . python_version ( )
elif implementation == "PyPy" :
implementation_version = "%s.%s.%s" % ( sys . pypy_version_info . major , sys . pypy_version_info . minor , sys . pypy_version_info . micro , )
if sys . pypy_version_info . releaselevel != "final" :
implementation_version = "" . join ( [ implementation_version , sys . pypy_version_info . releaselevel ] )
else :
implementation_version = "Unknown"
return { "platform" : platform_info , "implementation" : { "name" : implementation , "version" : implementation_version } , "cryptography" : { "version" : getattr ( cryptography , "__version__" , "" ) } , "pyjwt" : { "version" : pyjwt_version } , } |
def qsat ( self , temp , pres , parameter ) :
"""Calculate ( qsat _ lst ) vector of saturation humidity from :
temp = vector of element layer temperatures
pres = pressure ( at current timestep ) .""" | gamw = ( parameter . cl - parameter . cpv ) / parameter . rv
betaw = ( parameter . lvtt / parameter . rv ) + ( gamw * parameter . tt )
alpw = math . log ( parameter . estt ) + ( betaw / parameter . tt ) + ( gamw * math . log ( parameter . tt ) )
work2 = parameter . r / parameter . rv
foes_lst = [ 0 for i in range ( len ( temp ) ) ]
work1_lst = [ 0 for i in range ( len ( temp ) ) ]
qsat_lst = [ 0 for i in range ( len ( temp ) ) ]
for i in range ( len ( temp ) ) : # saturation vapor pressure
foes_lst [ i ] = math . exp ( alpw - betaw / temp [ i ] - gamw * math . log ( temp [ i ] ) )
work1_lst [ i ] = foes_lst [ i ] / pres [ i ]
# saturation humidity
qsat_lst [ i ] = work2 * work1_lst [ i ] / ( 1. + ( work2 - 1. ) * work1_lst [ i ] )
return qsat_lst |
def get_repository_with_parent ( self , repository_id , include_parent , project = None ) :
"""GetRepositoryWithParent .
Retrieve a git repository .
: param str repository _ id : The name or ID of the repository .
: param bool include _ parent : True to include parent repository . Only available in authenticated calls .
: param str project : Project ID or project name
: rtype : : class : ` < GitRepository > < azure . devops . v5_0 . git . models . GitRepository > `""" | route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
if repository_id is not None :
route_values [ 'repositoryId' ] = self . _serialize . url ( 'repository_id' , repository_id , 'str' )
query_parameters = { }
if include_parent is not None :
query_parameters [ 'includeParent' ] = self . _serialize . query ( 'include_parent' , include_parent , 'bool' )
response = self . _send ( http_method = 'GET' , location_id = '225f7195-f9c7-4d14-ab28-a83f7ff77e1f' , version = '5.0' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( 'GitRepository' , response ) |
def add_widget ( self , wclass ) :
"""Adds a new item to the treeview .""" | tree = self . treeview
# get the selected item :
selected_item = ''
tsel = tree . selection ( )
if tsel :
selected_item = tsel [ 0 ]
# Need to remove filter if set
self . filter_remove ( )
root = selected_item
# check if the widget can be added at selected point
if not self . _validate_add ( root , wclass , False ) : # if not try to add at item parent level
parent = tree . parent ( root )
if parent != root :
if self . _validate_add ( parent , wclass ) :
root = parent
else :
return
else :
return
# root item should be set at this point
# setup properties
widget_id = self . get_unique_id ( wclass )
data = WidgetDescr ( wclass , widget_id )
# setup default values for properties
for pname in builder . CLASS_MAP [ wclass ] . builder . properties :
pdescription = { }
if pname in properties . WIDGET_PROPERTIES :
pdescription = properties . WIDGET_PROPERTIES [ pname ]
if wclass in pdescription :
pdescription = dict ( pdescription , ** pdescription [ wclass ] )
default_value = str ( pdescription . get ( 'default' , '' ) )
data . set_property ( pname , default_value )
# default text for widgets with text prop :
if pname in ( 'text' , 'label' ) :
data . set_property ( pname , widget_id )
# default grid properties
# is _ container = builder . CLASS _ MAP [ wclass ] . builder . container
for prop_name in properties . GRID_PROPERTIES :
pdescription = properties . LAYOUT_OPTIONS [ prop_name ]
if wclass in pdescription :
pdescription = dict ( pdescription , ** pdescription [ wclass ] )
default_value = str ( pdescription . get ( 'default' , '' ) )
data . set_layout_property ( prop_name , default_value )
rownum = '0'
if root :
rownum = str ( self . get_max_row ( root ) + 1 )
data . set_layout_property ( 'row' , rownum )
data . set_layout_property ( 'column' , '0' )
item = self . _insert_item ( root , data )
# Do redraw
self . draw_widget ( item )
# Select and show the item created
tree . after_idle ( lambda : tree . selection_set ( item ) )
tree . after_idle ( lambda : tree . focus ( item ) )
tree . after_idle ( lambda : tree . see ( item ) ) |
def is_entailed_by ( self , other ) :
"""Given two beliefstates , returns True iff the calling instance
implies the other beliefstate , meaning it contains at least the same
structure ( for all structures ) and all values ( for all defined values ) .
Inverse of ` entails ` .
Note : this only compares the items in the DictCell , not ` pos ` ,
` environment _ variables ` or ` deferred _ effects ` .""" | for ( s_key , s_val ) in self :
if s_key in other :
if not hasattr ( other [ s_key ] , 'implies' ) :
raise Exception ( "Cell for %s is missing implies()" % s_key )
if not other [ s_key ] . implies ( s_val ) :
return False
else :
return False
return True |
def dp2hx ( number , lenout = _default_len_out ) :
"""Convert a double precision number to an equivalent character
string using base 16 " scientific notation . "
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / dp2hx _ c . html
: param number : D . p . number to be converted .
: type number : float
: param lenout : Available space for output string .
: type lenout : int
: return : Equivalent character string , left justified .
: rtype : str""" | number = ctypes . c_double ( number )
lenout = ctypes . c_int ( lenout )
string = stypes . stringToCharP ( lenout )
length = ctypes . c_int ( )
libspice . dp2hx_c ( number , lenout , string , ctypes . byref ( length ) )
return stypes . toPythonString ( string ) |
def main ( ) :
"""Keywords generator command line""" | parser = argparse . ArgumentParser ( description = __doc__ )
parser . add_argument ( 'learn' , help = "learning source codes directory" )
parser . add_argument ( 'keywords' , help = "output keywords file, JSON" )
parser . add_argument ( '-n' , '--nbkeywords' , type = int , default = 10000 , help = "the number of keywords to keep" )
parser . add_argument ( '-d' , '--debug' , default = False , action = 'store_true' , help = "show debug messages" )
args = parser . parse_args ( )
config_logging ( args . debug )
learn_path = Path ( args . learn )
keywords_path = Path ( args . keywords )
nb_keywords = args . nbkeywords
languages = config_dict ( 'languages.json' )
exts = { ext : lang for lang , exts in languages . items ( ) for ext in exts }
term_count = Counter ( )
document_count = Counter ( )
pos = 0
LOGGER . info ( "Reading files form %s" , learn_path )
for pos , path in enumerate ( Path ( learn_path ) . glob ( '**/*' ) , 1 ) :
if pos % STEP == 0 :
LOGGER . debug ( "Processed %d" , pos )
gc . collect ( )
# Cleanup dirt
if not path . is_file ( ) or not exts . get ( path . suffix . lstrip ( '.' ) ) :
continue
counter = _extract ( path )
term_count . update ( counter )
document_count . update ( counter . keys ( ) )
nb_terms = sum ( term_count . values ( ) )
nb_documents = pos - 1
if not nb_documents :
LOGGER . error ( "No source files found in %s" , learn_path )
raise RuntimeError ( 'No source files in {}' . format ( learn_path ) )
LOGGER . info ( "%d unique terms found" , len ( term_count ) )
terms = _most_frequent ( ( term_count , nb_terms ) , ( document_count , nb_documents ) , nb_keywords )
keywords = { token : int ( hashlib . sha1 ( token . encode ( ) ) . hexdigest ( ) , 16 ) for token in terms }
with keywords_path . open ( 'w' ) as keywords_file :
json . dump ( keywords , keywords_file , indent = 2 , sort_keys = True )
LOGGER . info ( "%d keywords written into %s" , len ( keywords ) , keywords_path )
LOGGER . debug ( "Exit OK" ) |
def createalphabet ( alphabetinput = None ) :
"""Creates a sample alphabet containing printable ASCII characters""" | if alphabetinput and os . path . isfile ( alphabetinput ) :
return _load_alphabet ( alphabetinput )
elif alphabetinput :
alpha = [ ]
setlist = alphabetinput . split ( ',' )
for alphaset in setlist :
a = int ( alphaset . split ( '-' ) [ 0 ] )
b = int ( alphaset . split ( '-' ) [ 1 ] )
for i in range ( a , b ) :
alpha . append ( str ( unichr ( i ) ) )
return alpha
alpha = [ ]
for i in range ( 32 , 127 ) :
alpha . append ( str ( unichr ( i ) ) )
return alpha |
def create_node ( self , network , participant ) :
"""Make a new node for participants .""" | return self . models . RogersAgent ( network = network , participant = participant ) |
def on_step_end ( self , step , logs = { } ) :
"""Save weights at interval steps during training""" | self . total_steps += 1
if self . total_steps % self . interval != 0 : # Nothing to do .
return
filepath = self . filepath . format ( step = self . total_steps , ** logs )
if self . verbose > 0 :
print ( 'Step {}: saving model to {}' . format ( self . total_steps , filepath ) )
self . model . save_weights ( filepath , overwrite = True ) |
def put_record ( self , data , partition_key = None ) :
"""Add data to the record queue in the proper format .
Parameters
data : str
Data to send .
partition _ key : str
Hash that determines which shard a given data record belongs to .""" | # Byte encode the data
data = encode_data ( data )
# Create a random partition key if not provided
if not partition_key :
partition_key = uuid . uuid4 ( ) . hex
# Build the record
record = { 'Data' : data , 'PartitionKey' : partition_key }
# Flush the queue if it reaches the batch size
if self . queue . qsize ( ) >= self . batch_size :
logger . info ( "Queue Flush: batch size reached" )
self . pool . submit ( self . flush_queue )
# Append the record
logger . debug ( 'Putting record "{}"' . format ( record [ 'Data' ] [ : 100 ] ) )
self . queue . put ( record ) |
def _extrac_qtl ( peak , block , headers ) :
"""Given a row containing the peak of the QTL and all the rows of
the linkage group of the said QTL ( splitted per trait ) , determine
the QTL interval and find the start and stop marker of the said
interval .
The interval is a LOD 2 interval .
The approach is conservative in the way it takes the first and last
marker within the interval .
: arg peak , a list containing the row information for the peak marker
: arg block , a hash containing per column , all the rows in the
linkage group of this QTL , splitted per trait .
: arg headers , the first row of the QTL matrix file , used to determine
which block to look at for each trait process .""" | qtls = [ ]
if not peak :
return qtls
threshold = 2
for trait in peak :
blockcnt = headers . index ( trait )
local_block = block [ blockcnt ]
lod2_threshold = float ( peak [ trait ] [ - 1 ] ) - float ( threshold )
# Search QTL start
cnt = local_block . index ( peak [ trait ] )
start = local_block [ cnt ]
while cnt >= 0 :
start = local_block [ cnt ]
if re . match ( r'c\d+\.loc[\d\.]+' , local_block [ cnt ] [ 0 ] ) :
cnt = cnt - 1
continue
if float ( local_block [ cnt ] [ - 1 ] ) < lod2_threshold :
break
cnt = cnt - 1
# Search QTL end
end = [ ]
cnt = local_block . index ( peak [ trait ] )
end = local_block [ cnt ]
while cnt < len ( local_block ) :
end = local_block [ cnt ]
if re . match ( r'c\d+\.loc[\d\.]+' , local_block [ cnt ] [ 0 ] ) :
cnt += 1
continue
if float ( local_block [ cnt ] [ - 1 ] ) < lod2_threshold :
break
cnt = cnt + 1
qtl = QTL ( )
qtl . trait = trait
qtl . start_mk = start [ 0 ]
qtl . start_position = start [ 2 ]
qtl . peak_mk = peak [ trait ] [ 0 ]
qtl . peak_start_position = peak [ trait ] [ 2 ]
qtl . peak_stop_position = peak [ trait ] [ 2 ]
qtl . stop_mk = end [ 0 ]
qtl . stop_position = end [ 2 ]
qtls . append ( qtl )
return qtls |
def extrapolate_error ( self ) :
"""Estimate the numerical error to be expected when applying all
methods available based on the results of the current and the
last method .
Note that this expolation strategy cannot be applied on the first
method . If the current method is the first one , ` - 999.9 ` is returned .
> > > from hydpy . models . test _ v1 import *
> > > parameterstep ( )
> > > model . numvars . error = 1e - 2
> > > model . numvars . last _ error = 1e - 1
> > > model . numvars . idx _ method = 10
> > > model . extrapolate _ error ( )
> > > from hydpy import round _
> > > round _ ( model . numvars . extrapolated _ error )
0.01
> > > model . numvars . idx _ method = 9
> > > model . extrapolate _ error ( )
> > > round _ ( model . numvars . extrapolated _ error )
0.001""" | if self . numvars . idx_method > 2 :
self . numvars . extrapolated_error = modelutils . exp ( modelutils . log ( self . numvars . error ) + ( modelutils . log ( self . numvars . error ) - modelutils . log ( self . numvars . last_error ) ) * ( self . numconsts . nmb_methods - self . numvars . idx_method ) )
else :
self . numvars . extrapolated_error = - 999.9 |
def dump ( self , f , name ) :
"""Write the attribute to a file - like object""" | array = self . get ( )
# print the header line
print ( "% 40s kind=%s shape=(%s)" % ( name , array . dtype . kind , "," . join ( [ str ( int ( size_axis ) ) for size_axis in array . shape ] ) , ) , file = f )
# print the numbers
counter = 0
for value in array . flat :
counter += 1
print ( "% 20s" % value , end = ' ' , file = f )
if counter % 4 == 0 :
print ( file = f )
if counter % 4 != 0 :
print ( file = f ) |
def _disable_access_key ( self , force_disable_self = False ) :
"""This function first checks to see if the key is already disabled
if not then it goes to disabling""" | client = self . client
if self . validate is True :
return
else :
try :
client . update_access_key ( UserName = self . _search_user_for_key ( ) , AccessKeyId = self . access_key_id , Status = 'Inactive' )
logger . info ( "Access key {id} has " "been disabled." . format ( id = self . access_key_id ) )
except Exception as e :
logger . info ( "Access key {id} could not " "be disabled due to: {e}." . format ( e = e , id = self . access_key_id ) ) |
def entity_delete ( args ) :
"""Delete entity in a workspace .""" | msg = "WARNING: this will delete {0} {1} in {2}/{3}" . format ( args . entity_type , args . entity , args . project , args . workspace )
if not ( args . yes or _confirm_prompt ( msg ) ) :
return
json_body = [ { "entityType" : args . entity_type , "entityName" : args . entity } ]
r = fapi . delete_entities ( args . project , args . workspace , json_body )
fapi . _check_response_code ( r , 204 )
if fcconfig . verbosity :
print ( "Succesfully deleted " + args . type + " " + args . entity ) |
def merge ( a , b , op = None , recurse_list = False , max_depth = None ) :
"""Immutable merge ` ` a ` ` structure with ` ` b ` ` using binary operator ` ` op ` `
on leaf nodes . All nodes at , or below , ` ` max _ depth ` ` are considered to be
leaf nodes .
Merged structure is returned , input data structures are not modified .
If ` ` recurse _ list = True ` ` , leaf lists of equal length will be merged on a
list - element level . Lists are considered to be leaf nodes by default
( ` ` recurse _ list = False ` ` ) , and they are merged with user - provided ` ` op ` ` .
Note the difference : :
merge ( [ 1 , 2 ] , [ 3 , 4 ] ) = = > [ 1 , 2 , 3 , 4]
merge ( [ 1 , 2 ] , [ 3 , 4 ] , recurse _ list = True ) = = > [ 4 , 6]""" | if op is None :
op = operator . add
if max_depth is not None :
if max_depth < 1 :
return op ( a , b )
else :
max_depth -= 1
if isinstance ( a , dict ) and isinstance ( b , dict ) :
result = { }
for key in set ( chain ( a . keys ( ) , b . keys ( ) ) ) :
if key in a and key in b :
result [ key ] = merge ( a [ key ] , b [ key ] , op = op , recurse_list = recurse_list , max_depth = max_depth )
elif key in a :
result [ key ] = deepcopy ( a [ key ] )
elif key in b :
result [ key ] = deepcopy ( b [ key ] )
return result
elif isinstance ( a , list ) and isinstance ( b , list ) :
if recurse_list and len ( a ) == len ( b ) : # merge subelements
result = [ ]
for idx in range ( len ( a ) ) :
result . append ( merge ( a [ idx ] , b [ idx ] , op = op , recurse_list = recurse_list , max_depth = max_depth ) )
return result
else : # merge lists
return op ( a , b )
# all other merge ops should be handled by ` ` op ` ` .
# default ` ` operator . add ` ` will handle addition of numeric types , but fail
# with TypeError for incompatible types ( eg . str + None , etc . )
return op ( a , b ) |
def _inputrc_enables_vi_mode ( ) :
'''Emulate a small bit of readline behavior .
Returns :
( bool ) True if current user enabled vi mode ( " set editing - mode vi " ) in . inputrc''' | for filepath in ( os . path . expanduser ( '~/.inputrc' ) , '/etc/inputrc' ) :
try :
with open ( filepath ) as f :
for line in f :
if _setre . fullmatch ( line ) :
return True
except IOError :
continue
return False |
def as_labeller ( x , default = label_value , multi_line = True ) :
"""Coerse to labeller function
Parameters
x : function | dict
Object to coerce
default : function | str
Default labeller . If it is a string ,
it should be the name of one the labelling
functions provided by plotnine .
multi _ line : bool
Whether to place each variable on a separate line
Returns
out : function
Labelling function""" | if x is None :
x = default
# One of the labelling functions as string
with suppress ( KeyError , TypeError ) :
x = LABELLERS [ x ]
# x is a labeller
with suppress ( AttributeError ) :
if x . __name__ == '_labeller' :
return x
def _labeller ( label_info ) :
label_info = pd . Series ( label_info ) . astype ( str )
if callable ( x ) and x . __name__ in LABELLERS : # labellers in this module
return x ( label_info )
elif hasattr ( x , '__contains__' ) : # dictionary lookup
for var in label_info . index :
if label_info [ var ] in x :
label_info [ var ] = x [ label_info [ var ] ]
return label_info
elif callable ( x ) : # generic function
for var in label_info . index :
label_info [ var ] = x ( label_info [ var ] )
return label_info
else :
msg = "Could not use '{0}' for labelling."
raise PlotnineError ( msg . format ( x ) )
return _labeller |
def delete_api_integration_response ( restApiId , resourcePath , httpMethod , statusCode , region = None , key = None , keyid = None , profile = None ) :
'''Deletes an integration response for a given method in a given API
CLI Example :
. . code - block : : bash
salt myminion boto _ apigateway . delete _ api _ integration _ response restApiId resourcePath httpMethod statusCode''' | try :
resource = describe_api_resource ( restApiId , resourcePath , region = region , key = key , keyid = keyid , profile = profile ) . get ( 'resource' )
if resource :
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
conn . delete_integration_response ( restApiId = restApiId , resourceId = resource [ 'id' ] , httpMethod = httpMethod , statusCode = statusCode )
return { 'deleted' : True }
return { 'deleted' : False , 'error' : 'no such resource' }
except ClientError as e :
return { 'deleted' : False , 'error' : __utils__ [ 'boto3.get_error' ] ( e ) } |
def get_response ( self , path , ** params ) :
"""Giving a service path and optional specific arguments , returns
the response string .""" | url = "%s%s" % ( self . base_url , path )
return self . _get_response ( url , ** params ) |
def reload ( ) :
'''Remove all stopped VMs and reload configuration from the default configuration file .
CLI Example :
. . code - block : : bash
salt ' * ' vmctl . reload''' | ret = False
cmd = 'vmctl reload'
result = __salt__ [ 'cmd.run_all' ] ( cmd , output_loglevel = 'trace' , python_shell = False )
if result [ 'retcode' ] == 0 :
ret = True
else :
raise CommandExecutionError ( 'Problem encountered running vmctl' , info = { 'errors' : [ result [ 'stderr' ] ] , 'changes' : ret } )
return ret |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.