signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get ( self , resource , operation_timeout = None , max_envelope_size = None , locale = None ) :
"""resource can be a URL or a ResourceLocator"""
|
if isinstance ( resource , str ) :
resource = ResourceLocator ( resource )
headers = self . _build_headers ( resource , Session . GetAction , operation_timeout , max_envelope_size , locale )
self . service . invoke . set_options ( tsoapheaders = headers )
return self . service . invoke
|
def build ( python = PYTHON ) :
"""Build the bigfloat library for in - place testing ."""
|
clean ( )
local ( "LIBRARY_PATH={library_path} CPATH={include_path} {python} " "setup.py build_ext --inplace" . format ( library_path = LIBRARY_PATH , include_path = INCLUDE_PATH , python = python , ) )
|
def _to_dict ( self ) :
"""Return a json dictionary representing this model ."""
|
_dict = { }
if hasattr ( self , 'name' ) and self . name is not None :
_dict [ 'name' ] = self . name
if hasattr ( self , 'description' ) and self . description is not None :
_dict [ 'description' ] = self . description
if hasattr ( self , 'language' ) and self . language is not None :
_dict [ 'language' ] = self . language
if hasattr ( self , 'metadata' ) and self . metadata is not None :
_dict [ 'metadata' ] = self . metadata
if hasattr ( self , 'learning_opt_out' ) and self . learning_opt_out is not None :
_dict [ 'learning_opt_out' ] = self . learning_opt_out
if hasattr ( self , 'system_settings' ) and self . system_settings is not None :
_dict [ 'system_settings' ] = self . system_settings . _to_dict ( )
if hasattr ( self , 'workspace_id' ) and self . workspace_id is not None :
_dict [ 'workspace_id' ] = self . workspace_id
if hasattr ( self , 'status' ) and self . status is not None :
_dict [ 'status' ] = self . status
if hasattr ( self , 'created' ) and self . created is not None :
_dict [ 'created' ] = datetime_to_string ( self . created )
if hasattr ( self , 'updated' ) and self . updated is not None :
_dict [ 'updated' ] = datetime_to_string ( self . updated )
if hasattr ( self , 'intents' ) and self . intents is not None :
_dict [ 'intents' ] = [ x . _to_dict ( ) for x in self . intents ]
if hasattr ( self , 'entities' ) and self . entities is not None :
_dict [ 'entities' ] = [ x . _to_dict ( ) for x in self . entities ]
if hasattr ( self , 'dialog_nodes' ) and self . dialog_nodes is not None :
_dict [ 'dialog_nodes' ] = [ x . _to_dict ( ) for x in self . dialog_nodes ]
if hasattr ( self , 'counterexamples' ) and self . counterexamples is not None :
_dict [ 'counterexamples' ] = [ x . _to_dict ( ) for x in self . counterexamples ]
return _dict
|
def delete ( cls , mp , part_number ) :
"""Get part number ."""
|
return cls . query . filter_by ( upload_id = mp . upload_id , part_number = part_number ) . delete ( )
|
def _entry_must_exist ( df , k1 , k2 ) :
"""Evaluate key - subkey existence .
Checks that the key - subkey combo exists in the
configuration options ."""
|
count = df [ ( df [ 'k1' ] == k1 ) & ( df [ 'k2' ] == k2 ) ] . shape [ 0 ]
if count == 0 :
raise NotRegisteredError ( "Option {0}.{1} not registered" . format ( k1 , k2 ) )
|
def is_up_url ( url , allow_redirects = False , timeout = 5 ) :
r"""Check URL to see if it is a valid web page , return the redirected location if it is
Returns :
None if ConnectionError
False if url is invalid ( any HTTP error code )
cleaned up URL ( following redirects and possibly adding HTTP schema " http : / / " )
> > > is _ up _ url ( " duckduckgo . com " ) # a more private , less manipulative search engine
' https : / / duckduckgo . com / '
> > > urlisup = is _ up _ url ( " totalgood . org " )
> > > not urlisup or str ( urlisup ) . startswith ( ' http ' )
True
> > > urlisup = is _ up _ url ( " wikipedia . org " )
> > > str ( urlisup ) . startswith ( ' http ' )
True
> > > ' wikipedia . org ' in str ( urlisup )
True
> > > bool ( is _ up _ url ( ' 8158989668202919656 ' ) )
False
> > > is _ up _ url ( ' invalidurlwithoutadomain ' )
False"""
|
if not isinstance ( url , basestring ) or '.' not in url :
return False
normalized_url = prepend_http ( url )
session = requests . Session ( )
session . mount ( url , HTTPAdapter ( max_retries = 2 ) )
try :
resp = session . get ( normalized_url , allow_redirects = allow_redirects , timeout = timeout )
except ConnectionError :
return None
except :
return None
if resp . status_code in ( 301 , 302 , 307 ) or resp . headers . get ( 'location' , None ) :
return resp . headers . get ( 'location' , None )
# return redirected URL
elif 100 <= resp . status_code < 400 :
return normalized_url
# return the original URL that was requested / visited
else :
return False
|
def do_where ( virtualenv = False , bare = True ) :
"""Executes the where functionality ."""
|
if not virtualenv :
if not project . pipfile_exists :
click . echo ( "No Pipfile present at project home. Consider running " "{0} first to automatically generate a Pipfile for you." "" . format ( crayons . green ( "`pipenv install`" ) ) , err = True , )
return
location = project . pipfile_location
# Shorten the virtual display of the path to the virtualenv .
if not bare :
location = shorten_path ( location )
click . echo ( "Pipfile found at {0}.\n Considering this to be the project home." "" . format ( crayons . green ( location ) ) , err = True , )
else :
click . echo ( project . project_directory )
else :
location = project . virtualenv_location
if not bare :
click . echo ( "Virtualenv location: {0}" . format ( crayons . green ( location ) ) , err = True )
else :
click . echo ( location )
|
def add_route ( self , handler , uri , methods = frozenset ( { 'GET' } ) , host = None , strict_slashes = False ) :
"""Create a blueprint route from a function .
: param handler : function for handling uri requests . Accepts function ,
or class instance with a view _ class method .
: param uri : endpoint at which the route will be accessible .
: param methods : list of acceptable HTTP methods .
: return : function or class instance"""
|
# Handle HTTPMethodView differently
if hasattr ( handler , 'view_class' ) :
http_methods = ( 'GET' , 'POST' , 'PUT' , 'HEAD' , 'OPTIONS' , 'PATCH' , 'DELETE' )
methods = set ( )
for method in http_methods :
if getattr ( handler . view_class , method . lower ( ) , None ) :
methods . add ( method )
# handle composition view differently
if isinstance ( handler , self . _composition_view_class ) :
methods = handler . handlers . keys ( )
self . route ( uri = uri , methods = methods , host = host , strict_slashes = strict_slashes ) ( handler )
return handler
|
def state ( self ) :
"""Compute and return the device state .
: returns : Device state ."""
|
# Check if device is disconnected .
if not self . available :
return STATE_UNKNOWN
# Check if device is off .
if not self . screen_on :
return STATE_OFF
# Check if screen saver is on .
if not self . awake :
return STATE_IDLE
# Check if the launcher is active .
if self . launcher or self . settings :
return STATE_STANDBY
# Check for a wake lock ( device is playing ) .
if self . wake_lock :
return STATE_PLAYING
# Otherwise , device is paused .
return STATE_PAUSED
|
def check_permission ( instance , field , permission ) :
"""Check a permission for a given instance or field . Raises an error if
denied .
: param instance : The instance to check
: param field : The field name to check or None for instance
: param permission : The permission to check"""
|
if not get_permission_test ( instance , field , permission ) ( instance ) :
raise PermissionDeniedError ( permission , instance , instance , field )
|
def to_pb ( self ) :
"""Converts the row filter to a protobuf .
: rtype : : class : ` . data _ v2 _ pb2 . RowFilter `
: returns : The converted current object ."""
|
interleave = data_v2_pb2 . RowFilter . Interleave ( filters = [ row_filter . to_pb ( ) for row_filter in self . filters ] )
return data_v2_pb2 . RowFilter ( interleave = interleave )
|
def checkGradient ( self , h = 1e-6 , verbose = True ) :
"""utility function to check the gradient of the gp"""
|
grad_an = self . LMLgrad ( )
grad_num = { }
params0 = self . params . copy ( )
for key in list ( self . params . keys ( ) ) :
paramsL = params0 . copy ( )
paramsR = params0 . copy ( )
grad_num [ key ] = SP . zeros_like ( self . params [ key ] )
e = SP . zeros ( self . params [ key ] . shape [ 0 ] )
for i in range ( self . params [ key ] . shape [ 0 ] ) :
e [ i ] = 1
paramsL [ key ] = params0 [ key ] - h * e
paramsR [ key ] = params0 [ key ] + h * e
lml_L = self . LML ( paramsL )
lml_R = self . LML ( paramsR )
grad_num [ key ] [ i ] = ( lml_R - lml_L ) / ( 2 * h )
e [ i ] = 0
if verbose :
print ( ( '%s:' % key ) )
print ( ( abs ( grad_an [ key ] - grad_num [ key ] ) ) )
print ( '' )
self . setParams ( params0 )
|
def from_dict ( data , ctx ) :
"""Instantiate a new Account from a dict ( generally from loading a JSON
response ) . The data used to instantiate the Account is a shallow copy
of the dict passed in , with any complex child types instantiated
appropriately ."""
|
data = data . copy ( )
if data . get ( 'balance' ) is not None :
data [ 'balance' ] = ctx . convert_decimal_number ( data . get ( 'balance' ) )
if data . get ( 'pl' ) is not None :
data [ 'pl' ] = ctx . convert_decimal_number ( data . get ( 'pl' ) )
if data . get ( 'resettablePL' ) is not None :
data [ 'resettablePL' ] = ctx . convert_decimal_number ( data . get ( 'resettablePL' ) )
if data . get ( 'financing' ) is not None :
data [ 'financing' ] = ctx . convert_decimal_number ( data . get ( 'financing' ) )
if data . get ( 'commission' ) is not None :
data [ 'commission' ] = ctx . convert_decimal_number ( data . get ( 'commission' ) )
if data . get ( 'guaranteedExecutionFees' ) is not None :
data [ 'guaranteedExecutionFees' ] = ctx . convert_decimal_number ( data . get ( 'guaranteedExecutionFees' ) )
if data . get ( 'marginRate' ) is not None :
data [ 'marginRate' ] = ctx . convert_decimal_number ( data . get ( 'marginRate' ) )
if data . get ( 'unrealizedPL' ) is not None :
data [ 'unrealizedPL' ] = ctx . convert_decimal_number ( data . get ( 'unrealizedPL' ) )
if data . get ( 'NAV' ) is not None :
data [ 'NAV' ] = ctx . convert_decimal_number ( data . get ( 'NAV' ) )
if data . get ( 'marginUsed' ) is not None :
data [ 'marginUsed' ] = ctx . convert_decimal_number ( data . get ( 'marginUsed' ) )
if data . get ( 'marginAvailable' ) is not None :
data [ 'marginAvailable' ] = ctx . convert_decimal_number ( data . get ( 'marginAvailable' ) )
if data . get ( 'positionValue' ) is not None :
data [ 'positionValue' ] = ctx . convert_decimal_number ( data . get ( 'positionValue' ) )
if data . get ( 'marginCloseoutUnrealizedPL' ) is not None :
data [ 'marginCloseoutUnrealizedPL' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutUnrealizedPL' ) )
if data . get ( 'marginCloseoutNAV' ) is not None :
data [ 'marginCloseoutNAV' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutNAV' ) )
if data . get ( 'marginCloseoutMarginUsed' ) is not None :
data [ 'marginCloseoutMarginUsed' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutMarginUsed' ) )
if data . get ( 'marginCloseoutPercent' ) is not None :
data [ 'marginCloseoutPercent' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutPercent' ) )
if data . get ( 'marginCloseoutPositionValue' ) is not None :
data [ 'marginCloseoutPositionValue' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutPositionValue' ) )
if data . get ( 'withdrawalLimit' ) is not None :
data [ 'withdrawalLimit' ] = ctx . convert_decimal_number ( data . get ( 'withdrawalLimit' ) )
if data . get ( 'marginCallMarginUsed' ) is not None :
data [ 'marginCallMarginUsed' ] = ctx . convert_decimal_number ( data . get ( 'marginCallMarginUsed' ) )
if data . get ( 'marginCallPercent' ) is not None :
data [ 'marginCallPercent' ] = ctx . convert_decimal_number ( data . get ( 'marginCallPercent' ) )
if data . get ( 'trades' ) is not None :
data [ 'trades' ] = [ ctx . trade . TradeSummary . from_dict ( d , ctx ) for d in data . get ( 'trades' ) ]
if data . get ( 'positions' ) is not None :
data [ 'positions' ] = [ ctx . position . Position . from_dict ( d , ctx ) for d in data . get ( 'positions' ) ]
if data . get ( 'orders' ) is not None :
data [ 'orders' ] = [ ctx . order . Order . from_dict ( d , ctx ) for d in data . get ( 'orders' ) ]
return Account ( ** data )
|
async def delete ( self , request , resource = None , ** kwargs ) :
"""Delete a resource .
Supports batch delete ."""
|
if resource :
resources = [ resource ]
else :
data = await self . parse ( request )
if data :
resources = list ( self . collection . where ( self . meta . model_pk << data ) )
if not resources :
raise RESTNotFound ( reason = 'Resource not found' )
for resource in resources :
resource . delete_instance ( )
|
def yang_modules ( self ) :
"""Return a list of advertised YANG module names with revisions .
Avoid repeated modules ."""
|
res = { }
for c in self . capabilities :
m = c . parameters . get ( "module" )
if m is None or m in res :
continue
res [ m ] = c . parameters . get ( "revision" )
return res . items ( )
|
def count_indents ( self , spacecount , tabs = 0 , offset = 0 ) :
"""Counts the number of indents that can be tabs or spacecount
number of spaces in a row from the current line ."""
|
if not self . has_space ( offset = offset ) :
return 0
spaces = 0
indents = 0
for char in self . string [ self . pos + offset - self . col : ] :
if char == ' ' :
spaces += 1
elif tabs and char == '\t' :
indents += 1
spaces = 0
else :
break
if spaces == spacecount :
indents += 1
spaces = 0
return indents
|
def push_build ( id , tag_prefix ) :
"""Push build to Brew"""
|
req = swagger_client . BuildRecordPushRequestRest ( )
req . tag_prefix = tag_prefix
req . build_record_id = id
response = utils . checked_api_call ( pnc_api . build_push , 'push' , body = req )
if response :
return utils . format_json_list ( response )
|
def merge_batches ( self , data ) :
"""Merge a list of data minibatches into one single instance representing the data
Parameters
data : list
List of minibatches to merge
Returns
( anonymous ) : sparse matrix | pd . DataFrame | list
Single complete list - like data merged from given batches"""
|
if isinstance ( data [ 0 ] , ssp . csr_matrix ) :
return ssp . vstack ( data )
if isinstance ( data [ 0 ] , pd . DataFrame ) or isinstance ( data [ 0 ] , pd . Series ) :
return pd . concat ( data )
return [ item for sublist in data for item in sublist ]
|
def com_google_fonts_check_metadata_os2_weightclass ( ttFont , font_metadata ) :
"""Checking OS / 2 usWeightClass matches weight specified at METADATA . pb ."""
|
# Weight name to value mapping :
GF_API_WEIGHT_NAMES = { 250 : "Thin" , 275 : "ExtraLight" , 300 : "Light" , 400 : "Regular" , 500 : "Medium" , 600 : "SemiBold" , 700 : "Bold" , 800 : "ExtraBold" , 900 : "Black" }
CSS_WEIGHT_NAMES = { 100 : "Thin" , 200 : "ExtraLight" , 300 : "Light" , 400 : "Regular" , 500 : "Medium" , 600 : "SemiBold" , 700 : "Bold" , 800 : "ExtraBold" , 900 : "Black" }
gf_weight = GF_API_WEIGHT_NAMES . get ( ttFont [ "OS/2" ] . usWeightClass , "bad Google Fonts API weight value" )
css_weight = CSS_WEIGHT_NAMES . get ( font_metadata . weight , "bad CSS weight value" )
if gf_weight != css_weight :
yield FAIL , ( "OS/2 usWeightClass ({}:\"{}\") does not match" " weight specified at METADATA.pb ({}:\"{}\")." "" ) . format ( ttFont [ "OS/2" ] . usWeightClass , gf_weight , font_metadata . weight , css_weight )
else :
yield PASS , ( "OS/2 usWeightClass matches" " weight specified at METADATA.pb" )
|
def clear_sonos_playlist ( self , sonos_playlist , update_id = 0 ) :
"""Clear all tracks from a Sonos playlist .
This is a convenience method for : py : meth : ` reorder _ sonos _ playlist ` .
Example : :
device . clear _ sonos _ playlist ( sonos _ playlist )
Args :
sonos _ playlist
( : py : class : ` ~ . soco . data _ structures . DidlPlaylistContainer ` ) :
Sonos playlist object or the item _ id ( str ) of the Sonos
playlist .
update _ id ( int ) : Optional update counter for the object . If left
at the default of 0 , it will be looked up .
Returns :
dict : See : py : meth : ` reorder _ sonos _ playlist `
Raises :
ValueError : If sonos _ playlist specified by string and is not found .
SoCoUPnPException : See : py : meth : ` reorder _ sonos _ playlist `"""
|
if not isinstance ( sonos_playlist , DidlPlaylistContainer ) :
sonos_playlist = self . get_sonos_playlist_by_attr ( 'item_id' , sonos_playlist )
count = self . music_library . browse ( ml_item = sonos_playlist ) . total_matches
tracks = ',' . join ( [ str ( x ) for x in range ( count ) ] )
if tracks :
return self . reorder_sonos_playlist ( sonos_playlist , tracks = tracks , new_pos = '' , update_id = update_id )
else :
return { 'change' : 0 , 'update_id' : update_id , 'length' : count }
|
def list_assets_ddo ( self ) :
"""List all the ddos registered in the aquarius instance .
: return : List of DDO instance"""
|
return json . loads ( self . requests_session . get ( self . url ) . content )
|
def _get_fs ( thin_pathname ) :
"""Returns the file system type ( xfs , ext4 ) of a given device"""
|
cmd = [ 'lsblk' , '-o' , 'FSTYPE' , '-n' , thin_pathname ]
fs_return = util . subp ( cmd )
return fs_return . stdout . strip ( )
|
def show ( ctx ) :
"""Show migrations list"""
|
for app_name , app in ctx . obj [ 'config' ] [ 'apps' ] . items ( ) :
click . echo ( click . style ( app_name , fg = 'green' , bold = True ) )
for migration in app [ 'migrations' ] :
applied = ctx . obj [ 'db' ] . is_migration_applied ( app_name , migration )
click . echo ( ' {0} {1}' . format ( migration , click . style ( '(applied)' , bold = True ) if applied else '' ) )
|
def select_groups ( adata , groups = 'all' , key = 'louvain' ) :
"""Get subset of groups in adata . obs [ key ] ."""
|
strings_to_categoricals ( adata )
if isinstance ( groups , list ) and isinstance ( groups [ 0 ] , int ) :
groups = [ str ( n ) for n in groups ]
categories = adata . obs [ key ] . cat . categories
groups_masks = np . array ( [ categories [ i ] == adata . obs [ key ] . values for i , name in enumerate ( categories ) ] )
if groups == 'all' :
groups = categories . values
else :
groups_ids = [ np . where ( categories . values == name ) [ 0 ] [ 0 ] for name in groups ]
groups_masks = groups_masks [ groups_ids ]
groups = categories [ groups_ids ] . values
return groups , groups_masks
|
def get_tty_password ( confirm ) :
"""When returning a password from a TTY we assume a user
is entering it on a keyboard so we ask for confirmation ."""
|
LOG . debug ( "Reading password from TTY" )
new_password = getpass ( 'Enter Password: ' , stream = sys . stderr )
if not new_password :
raise aomi . exceptions . AomiCommand ( "Must specify a password" )
if not confirm :
return new_password
confirm_password = getpass ( 'Again, Please: ' , stream = sys . stderr )
if confirm_password != new_password :
raise aomi . exceptions . AomiCommand ( "Passwords do not match" )
return new_password
|
def modified_recipes ( branch = 'origin/master' ) :
"""Returns a set of modified recipes between the current branch and the one
in param ."""
|
# using the contrib version on purpose rather than sh . git , since it comes
# with a bunch of fixes , e . g . disabled TTY , see :
# https : / / stackoverflow . com / a / 20128598/185510
git_diff = sh . contrib . git . diff ( '--name-only' , branch )
recipes = set ( )
for file_path in git_diff :
if 'pythonforandroid/recipes/' in file_path :
recipe = file_path . split ( '/' ) [ 2 ]
recipes . add ( recipe )
return recipes
|
def usearch_qf ( fasta_filepath , refseqs_fp = None , output_dir = None , percent_id = 0.97 , percent_id_err = 0.97 , minsize = 4 , abundance_skew = 2.0 , db_filepath = None , rev = False , label_prefix = "" , label_suffix = "" , retain_label_as_comment = False , count_start = 0 , perc_id_blast = 0.97 , save_intermediate_files = False , HALT_EXEC = False , global_alignment = True , sizein = True , sizeout = True , w = 64 , slots = 16769023 , maxrejects = 64 , minlen = 64 , de_novo_chimera_detection = True , derep_fullseq = False , reference_chimera_detection = True , cluster_size_filtering = True , remove_usearch_logs = False , usersort = True , suppress_new_clusters = False , chimeras_retention = "union" , verbose = False ) :
"""Main convenience wrapper for using usearch to filter / cluster seqs
The complete ' usearch _ qf ' process is a multistep process with many calls
to usearch with various parameters . It is likely to change from the
original implementation . A lot .
fasta _ filepath = fasta filepath to filtering / clustering ( e . g . , output
seqs . fna file from split _ libraries . py )
refseqs _ fp = fasta filepath for ref - based otu picking .
output _ dir = directory to store the otu mapping file , as well logs and
the intermediate files created if save _ intermediate _ files is True .
percent _ ID = percent ID for clustering sequences .
percent _ ID _ err = percent ID for filtering out chimeras
minsize = Minimum size of cluster for retention after chimera removal .
abundance _ skew = threshold setting for chimera removal with de novo
chimera detection .
db _ filepath = filepath of reference fasta sequence set for ref based
chimera detection .
rev = search plus and minus strands of sequences , used in ref based chimera
detection .
label _ prefix = optional prefix added to filtered fasta file .
label _ suffix = optional suffix added to filtered fasta file .
retain _ label _ as _ comment = option to add usearch generated label to
enumerated fasta labels .
count _ start = integer to begin counting at for sequence enumeration .
perc _ id _ blast = percent identity setting for using blast algorithm to
assign original sequence labels to filtered fasta .
global _ alignment = Setting for assignment of original seq labels to filtered
seqs .
sizein = not defined in usearch helpstring
sizeout = not defined in usearch helpstring
w = Word length for U - sorting
slots = Size of compressed index table . Should be prime , e . g . 4000003.
Should also specify - - w , typical is - - w 16 or - - w 32.
maxrejects = Max rejected targets , 0 = ignore , default 32.
save _ intermediate _ files = retain all the intermediate files created during
this process .
minlen = ( not specified in usearch helpstring ) , but seems like a good bet
that this refers to the minimum length of the sequences for dereplication .
HALT _ EXEC = used to debug app controller problems .
de _ novo _ chimera _ detection = If True , will detect chimeras de novo
reference _ chimera _ detection = If True , will detect chimeras ref based
cluster _ size _ filtering = If True , will filter OTUs according to seq counts .
remove _ usearch _ logs = If True , will not call the - - log function for each
usearch call .
usersort = Used for specifying custom sorting ( i . e . , non - length based
sorting ) with usearch / uclust .
suppress _ new _ clusters = with reference based OTU picking , if enabled ,
will prevent new clusters that do not match the reference from being
clustered .
chimeras _ retention = accepts either ' intersection ' or ' union ' . Will test
for chimeras against the full input error clustered sequence set , and
retain sequences flagged as non - chimeras by either ( union ) or
only those flagged as non - chimeras by both ( intersection ) ."""
|
# Save a list of intermediate filepaths in case they are to be removed .
intermediate_files = [ ]
# Need absolute paths to avoid problems with app controller
if output_dir :
output_dir = abspath ( output_dir ) + '/'
fasta_filepath = abspath ( fasta_filepath )
try :
if verbose :
print "Sorting sequences by length..."
# Sort seqs by length
app_result , output_filepath_len_sorted = usearch_fasta_sort_from_filepath ( fasta_filepath , output_filepath = join ( output_dir , 'len_sorted.fasta' ) , save_intermediate_files = save_intermediate_files , remove_usearch_logs = remove_usearch_logs , working_dir = output_dir , HALT_EXEC = HALT_EXEC )
intermediate_files . append ( output_filepath_len_sorted )
if verbose :
print "Dereplicating sequences..."
# Dereplicate sequences
app_result , output_filepath_dereplicated = usearch_dereplicate_exact_subseqs ( output_filepath_len_sorted , output_filepath = join ( output_dir , 'dereplicated_seqs.fasta' ) , minlen = minlen , w = w , slots = slots , sizeout = sizeout , maxrejects = maxrejects , save_intermediate_files = save_intermediate_files , remove_usearch_logs = remove_usearch_logs , working_dir = output_dir , HALT_EXEC = HALT_EXEC )
intermediate_files . append ( output_filepath_dereplicated )
if verbose :
print "Sorting by abundance..."
# Sort by abundance , initially no filter based on seqs / otu
app_result , output_fp = usearch_sort_by_abundance ( output_filepath_dereplicated , output_filepath = join ( output_dir , 'abundance_sorted.fasta' ) , usersort = True , sizein = sizein , sizeout = sizeout , minsize = 0 , remove_usearch_logs = remove_usearch_logs , working_dir = output_dir , HALT_EXEC = HALT_EXEC )
intermediate_files . append ( output_fp )
if verbose :
print "Clustering sequences for error correction..."
# Create . uc file of clusters file , to identify original sequences
# later
output_uc_filepath = output_dir + 'err_corrected_clusters.uc'
app_result , error_clustered_output_fp = usearch_cluster_error_correction ( output_fp , output_filepath = join ( output_dir , 'clustered_error_corrected.fasta' ) , output_uc_filepath = output_uc_filepath , usersort = True , percent_id_err = percent_id_err , sizein = sizein , sizeout = sizeout , w = w , slots = slots , maxrejects = maxrejects , remove_usearch_logs = remove_usearch_logs , save_intermediate_files = save_intermediate_files , working_dir = output_dir , HALT_EXEC = HALT_EXEC )
intermediate_files . append ( error_clustered_output_fp )
intermediate_files . append ( output_uc_filepath )
# Series of conditional tests , using generic ' output _ fp ' name so the
# conditional filtering , if any / all are selected , do not matter .
if de_novo_chimera_detection :
if verbose :
print "Performing de novo chimera detection..."
app_result , output_fp_de_novo_nonchimeras = usearch_chimera_filter_de_novo ( error_clustered_output_fp , abundance_skew = abundance_skew , output_chimera_filepath = join ( output_dir , 'de_novo_chimeras.fasta' ) , output_non_chimera_filepath = join ( output_dir , 'de_novo_non_chimeras.fasta' ) , usersort = True , save_intermediate_files = save_intermediate_files , remove_usearch_logs = remove_usearch_logs , working_dir = output_dir , HALT_EXEC = HALT_EXEC )
intermediate_files . append ( output_fp_de_novo_nonchimeras )
output_fp = output_fp_de_novo_nonchimeras
if reference_chimera_detection :
if verbose :
print "Performing reference based chimera detection..."
app_result , output_fp_ref_nonchimeras = usearch_chimera_filter_ref_based ( error_clustered_output_fp , db_filepath = db_filepath , output_chimera_filepath = join ( output_dir , 'reference_chimeras.fasta' ) , output_non_chimera_filepath = join ( output_dir , 'reference_non_chimeras.fasta' ) , usersort = True , save_intermediate_files = save_intermediate_files , rev = rev , remove_usearch_logs = remove_usearch_logs , working_dir = output_dir , HALT_EXEC = HALT_EXEC )
intermediate_files . append ( output_fp_ref_nonchimeras )
output_fp = output_fp_ref_nonchimeras
# get intersection or union if both ref and de novo chimera detection
if de_novo_chimera_detection and reference_chimera_detection :
if verbose :
print "Finding %s of non-chimeras..." % chimeras_retention
output_fp = get_retained_chimeras ( output_fp_de_novo_nonchimeras , output_fp_ref_nonchimeras , output_combined_fp = join ( output_dir , 'combined_non_chimeras.fasta' ) , chimeras_retention = chimeras_retention )
intermediate_files . append ( output_fp )
if cluster_size_filtering : # Test for empty filepath following filters , raise error if all seqs
# have been removed
if verbose :
print "Filtering by cluster size..."
# chimera detection was not performed , use output file of step 4 as input
# to filtering by cluster size
if not ( reference_chimera_detection and de_novo_chimera_detection ) :
output_fp = error_clustered_output_fp
app_result , output_fp = usearch_sort_by_abundance ( output_fp , output_filepath = join ( output_dir , 'abundance_sorted_minsize_' + str ( minsize ) + '.fasta' ) , minsize = minsize , sizein = sizein , sizeout = sizeout , remove_usearch_logs = remove_usearch_logs , working_dir = output_dir , HALT_EXEC = HALT_EXEC )
intermediate_files . append ( output_fp )
# cluster seqs
# Should we add in option to use alternative OTU picking here ?
# Seems like it will be a bit of a mess . . . maybe after we determine
# if usearch _ qf should become standard .
if refseqs_fp :
if verbose :
print "Clustering against reference sequences..."
app_result , output_filepath = usearch_cluster_seqs_ref ( output_fp , output_filepath = join ( output_dir , 'ref_clustered_seqs.uc' ) , percent_id = percent_id , sizein = sizein , sizeout = sizeout , w = w , slots = slots , maxrejects = maxrejects , save_intermediate_files = save_intermediate_files , remove_usearch_logs = remove_usearch_logs , suppress_new_clusters = suppress_new_clusters , refseqs_fp = refseqs_fp , output_dir = output_dir , working_dir = output_dir , rev = rev , HALT_EXEC = HALT_EXEC )
else :
if verbose :
print "De novo clustering sequences..."
app_result , output_filepath = usearch_cluster_seqs ( output_fp , output_filepath = join ( output_dir , 'clustered_seqs.fasta' ) , percent_id = percent_id , sizein = sizein , sizeout = sizeout , w = w , slots = slots , maxrejects = maxrejects , save_intermediate_files = save_intermediate_files , remove_usearch_logs = remove_usearch_logs , working_dir = output_dir , HALT_EXEC = HALT_EXEC )
intermediate_files . append ( output_filepath )
# Enumerate the OTUs in the clusters
if not suppress_new_clusters :
if verbose :
print "Enumerating OTUs..."
output_filepath = enumerate_otus ( output_filepath , output_filepath = join ( output_dir , 'enumerated_otus.fasta' ) , label_prefix = label_prefix , label_suffix = label_suffix , count_start = count_start , retain_label_as_comment = retain_label_as_comment )
intermediate_files . append ( output_filepath )
# Get original sequence label identities
if verbose :
print "Assigning sequences to clusters..."
app_result , clusters_file = assign_reads_to_otus ( fasta_filepath , filtered_fasta = output_filepath , output_filepath = join ( output_dir , 'assign_reads_to_otus.uc' ) , perc_id_blast = percent_id , global_alignment = global_alignment , remove_usearch_logs = remove_usearch_logs , working_dir = output_dir , HALT_EXEC = HALT_EXEC )
intermediate_files . append ( clusters_file )
except ApplicationError :
raise ApplicationError ( 'Error running usearch. Possible causes are ' 'unsupported version (current supported version is usearch ' + 'v5.2.236) is installed or improperly formatted input file was ' + 'provided' )
except ApplicationNotFoundError :
remove_files ( files_to_remove )
raise ApplicationNotFoundError ( 'usearch not found, is it properly ' + 'installed?' )
# Get dict of clusters , list of failures
# Set OTU ID field to 9 for the case of closed reference OTU picking
if suppress_new_clusters :
otu_id_field = 9
else :
otu_id_field = 1
clusters , failures = clusters_from_blast_uc_file ( open ( clusters_file , "U" ) , otu_id_field )
# Remove temp files unless user specifies output filepath
if not save_intermediate_files :
remove_files ( intermediate_files )
return clusters , failures
|
def delete_secret_versions ( self , path , versions , mount_point = DEFAULT_MOUNT_POINT ) :
"""Issue a soft delete of the specified versions of the secret .
This marks the versions as deleted and will stop them from being returned from reads ,
but the underlying data will not be removed . A delete can be undone using the
undelete path .
Supported methods :
POST : / { mount _ point } / delete / { path } . Produces : 204 ( empty body )
: param path : Specifies the path of the secret to delete . This is specified as part of the URL .
: type path : str | unicode
: param versions : The versions to be deleted . The versioned data will not be deleted , but it will no longer be
returned in normal get requests .
: type versions : int
: param mount _ point : The " path " the secret engine was mounted on .
: type mount _ point : str | unicode
: return : The response of the request .
: rtype : requests . Response"""
|
if not isinstance ( versions , list ) or len ( versions ) == 0 :
error_msg = 'argument to "versions" must be a list containing one or more integers, "{versions}" provided.' . format ( versions = versions )
raise exceptions . ParamValidationError ( error_msg )
params = { 'versions' : versions , }
api_path = '/v1/{mount_point}/delete/{path}' . format ( mount_point = mount_point , path = path )
return self . _adapter . post ( url = api_path , json = params , )
|
def spawn ( self , __groups , __coro_fun , * args , ** kwargs ) :
"""Start a new coroutine and add it to the pool atomically .
: param groups : The groups the coroutine belongs to .
: type groups : : class : ` set ` of group keys
: param coro _ fun : Coroutine function to run
: param args : Positional arguments to pass to ` coro _ fun `
: param kwargs : Keyword arguments to pass to ` coro _ fun `
: raise RuntimeError : if the limit on any of the groups or the total
limit is exhausted
: rtype : : class : ` asyncio . Task `
: return : The task in which the coroutine runs .
Every group must have at least one free slot available for ` coro ` to be
spawned ; if any groups capacity ( or the total limit ) is exhausted , the
coroutine is not accepted into the pool and : class : ` RuntimeError ` is
raised .
If the coroutine cannot be added due to limiting , it is not started at
all .
The coroutine is started by calling ` coro _ fun ` with ` args ` and
` kwargs ` .
. . note : :
The first two arguments can only be passed positionally , not as
keywords . This is to prevent conflicts with keyword arguments to
` coro _ fun ` ."""
|
# ensure the implicit group is included
__groups = set ( __groups ) | { ( ) }
return asyncio . ensure_future ( __coro_fun ( * args , ** kwargs ) )
|
def update_host ( self , url : URL ) -> None :
"""Update destination host , port and connection type ( ssl ) ."""
|
# get host / port
if not url . host :
raise InvalidURL ( url )
# basic auth info
username , password = url . user , url . password
if username :
self . auth = helpers . BasicAuth ( username , password or '' )
|
def chunks ( l , n ) :
'''chunk l in n sized bits'''
|
# http : / / stackoverflow . com / a / 3226719
# . . . not that this is hard to understand .
return [ l [ x : x + n ] for x in range ( 0 , len ( l ) , n ) ] ;
|
def main ( ) :
"""parse command line options and either launch some configuration dialog or start an instance of _ MainLoop as a daemon"""
|
( options , _ ) = _parse_args ( )
if options . change_password :
c . keyring_set_password ( c [ "username" ] )
sys . exit ( 0 )
if options . select :
courses = client . get_courses ( )
c . selection_dialog ( courses )
c . save ( )
sys . exit ( 0 )
if options . stop :
os . system ( "kill -2 `cat ~/.studdp/studdp.pid`" )
sys . exit ( 0 )
task = _MainLoop ( options . daemonize , options . update_courses )
if options . daemonize :
log . info ( "daemonizing..." )
with daemon . DaemonContext ( working_directory = "." , pidfile = PIDLockFile ( PID_FILE ) ) : # we have to create a new logger in the daemon context
handler = logging . FileHandler ( LOG_PATH )
handler . setFormatter ( '%(asctime)s [%(levelname)s] %(name)s: %(message)s' )
log . addHandler ( handler )
task ( )
else :
task ( )
|
def url_value_preprocessor ( self , func : Callable , name : AppOrBlueprintKey = None ) -> Callable :
"""Add a url value preprocessor .
This is designed to be used as a decorator . An example usage ,
. . code - block : : python
@ app . url _ value _ preprocessor
def value _ preprocessor ( endpoint , view _ args ) :"""
|
self . url_value_preprocessors [ name ] . append ( func )
return func
|
def key ( self , key_id ) :
"""GET / : login / keys / : key
: param key _ id : identifier for an individual key record for the account
: type key _ id : : py : class : ` basestring `
: returns : details of the key
: rtype : : py : class : ` dict `"""
|
j , _ = self . request ( 'GET' , '/keys/' + str ( key_id ) )
return j
|
def urlencode ( query , params ) :
"""Correctly convert the given query and parameters into a full query + query
string , ensuring the order of the params ."""
|
return query + '?' + "&" . join ( key + '=' + quote_plus ( str ( value ) ) for key , value in params )
|
def get_last_ticker ( self , symbol ) :
"""获取最新的ticker
: param symbol
: return :"""
|
params = { 'symbol' : symbol }
url = u . MARKET_URL + '/market/trade'
def _wrapper ( _func ) :
@ wraps ( _func )
def handle ( ) :
_func ( http_get_request ( url , params ) )
return handle
return _wrapper
|
def clean_tempdir ( context , scenario ) :
"""Clean up temporary test dirs for passed tests .
Leave failed test dirs for manual inspection ."""
|
tempdir = getattr ( context , 'tempdir' , None )
if tempdir and scenario . status == 'passed' :
shutil . rmtree ( tempdir )
del ( context . tempdir )
|
def put_mapping ( self , doc_type = None , mapping = None , indices = None , ignore_conflicts = None ) :
"""Register specific mapping definition for a specific type against one or more indices .
( See : ref : ` es - guide - reference - api - admin - indices - put - mapping ` )"""
|
if not isinstance ( mapping , dict ) :
if mapping is None :
mapping = { }
if hasattr ( mapping , "as_dict" ) :
mapping = mapping . as_dict ( )
if doc_type :
path = self . conn . _make_path ( indices , doc_type , "_mapping" )
if doc_type not in mapping :
mapping = { doc_type : mapping }
else :
path = self . conn . _make_path ( indices , ( ) , "_mapping" )
parameters = { }
if ignore_conflicts is not None :
parameters [ 'ignore_conflicts' ] = ignore_conflicts
return self . conn . _send_request ( 'PUT' , path , mapping , params = parameters )
|
def get_network_ipv4 ( self , id_network ) :
"""Get networkipv4
: param id _ network : Identifier of the Network . Integer value and greater than zero .
: return : Following dictionary :
{ ' network ' : { ' id ' : < id _ networkIpv6 > ,
' network _ type ' : < id _ tipo _ rede > ,
' ambiente _ vip ' : < id _ ambiente _ vip > ,
' vlan ' : < id _ vlan >
' oct1 ' : < rede _ oct1 > ,
' oct2 ' : < rede _ oct2 > ,
' oct3 ' : < rede _ oct3 > ,
' oct4 ' : < rede _ oct4 >
' blocK ' : < bloco > ,
' mask _ oct1 ' : < mascara _ oct1 > ,
' mask _ oct2 ' : < mascara _ oct2 > ,
' mask _ oct3 ' : < mascara _ oct3 > ,
' mask _ oct4 ' : < mascara _ oct4 > ,
' active ' : < ativada > ,
' broadcast ' : < ' broadcast > , } }
: raise NetworkIPv4NotFoundError : NetworkIPV4 not found .
: raise InvalidValueError : Invalid ID for NetworkIpv4
: raise NetworkIPv4Error : Error in NetworkIpv4
: raise XMLError : Networkapi failed to generate the XML response ."""
|
if not is_valid_int_param ( id_network ) :
raise InvalidParameterError ( u'O id do rede ip4 foi informado incorretamente.' )
url = 'network/ipv4/id/' + str ( id_network ) + '/'
code , xml = self . submit ( None , 'GET' , url )
return self . response ( code , xml )
|
def poll ( self , timeout ) :
""": param float timeout : Timeout in seconds ."""
|
timeout = float ( timeout )
end_time = time . time ( ) + timeout
while True : # Keep reading until data is received or timeout
incoming = self . stream . read ( self . _max_read )
if incoming :
return incoming
if ( end_time - time . time ( ) ) < 0 :
raise ExpectTimeout ( )
time . sleep ( self . _poll_period )
|
def get ( self ) :
"""Reads the remote file from Gist and save it locally"""
|
if self . gist :
content = self . github . read_gist_file ( self . gist )
self . local . save ( content )
|
def listen ( self ) :
"""Set up a quick connection . Returns on disconnect .
After calling ` connect ( ) ` , this waits for messages from the server
using ` select ` , and notifies the subscriber of any events ."""
|
import select
while self . connected :
r , w , e = select . select ( ( self . ws . sock , ) , ( ) , ( ) )
if r :
self . on_message ( )
elif e :
self . subscriber . on_sock_error ( e )
self . disconnect ( )
|
def _format_episode_numbers ( episodenumbers ) :
"""Format episode number ( s ) into string , using configured values ."""
|
if len ( episodenumbers ) == 1 :
epno = cfg . CONF . episode_single % episodenumbers [ 0 ]
else :
epno = cfg . CONF . episode_separator . join ( cfg . CONF . episode_single % x for x in episodenumbers )
return epno
|
def _MergeIdenticalCaseInsensitive ( self , a , b ) :
"""Tries to merge two strings .
The string are required to be the same ignoring case . The second string is
always used as the merged value .
Args :
a : The first string .
b : The second string .
Returns :
The merged string . This is equal to the second string .
Raises :
MergeError : The strings were not the same ignoring case ."""
|
if a . lower ( ) != b . lower ( ) :
raise MergeError ( "values must be the same (case insensitive) " "('%s' vs '%s')" % ( transitfeed . EncodeUnicode ( a ) , transitfeed . EncodeUnicode ( b ) ) )
return b
|
def flush ( self , queue_name ) :
"""Drop all the messages from a queue .
Parameters :
queue _ name ( str ) : The queue to flush ."""
|
for name in ( queue_name , dq_name ( queue_name ) , xq_name ( queue_name ) ) :
self . channel . queue_purge ( name )
|
def get_urlpatterns ( self ) :
"""Returns the URL patterns managed by the considered factory / application ."""
|
return [ url ( r'' , include ( self . forum_urlpatterns_factory . urlpatterns ) ) , url ( r'' , include ( self . conversation_urlpatterns_factory . urlpatterns ) ) , url ( _ ( r'^feeds/' ) , include ( self . feeds_urlpatterns_factory . urlpatterns ) ) , url ( _ ( r'^member/' ) , include ( self . member_urlpatterns_factory . urlpatterns ) ) , url ( _ ( r'^moderation/' ) , include ( self . moderation_urlpatterns_factory . urlpatterns ) ) , url ( _ ( r'^search/' ) , include ( self . search_urlpatterns_factory . urlpatterns ) ) , url ( _ ( r'^tracking/' ) , include ( self . tracking_urlpatterns_factory . urlpatterns ) ) , ]
|
def check_key ( user , key , enc , comment , options , config = '.ssh/authorized_keys' , cache_keys = None , fingerprint_hash_type = None ) :
'''Check to see if a key needs updating , returns " update " , " add " or " exists "
CLI Example :
. . code - block : : bash
salt ' * ' ssh . check _ key < user > < key > < enc > < comment > < options >'''
|
if cache_keys is None :
cache_keys = [ ]
enc = _refine_enc ( enc )
current = auth_keys ( user , config = config , fingerprint_hash_type = fingerprint_hash_type )
nline = _format_auth_line ( key , enc , comment , options )
# Removing existing keys from the auth _ keys isn ' t really a good idea
# in fact
# as :
# - We can have non - salt managed keys in that file
# - We can have multiple states defining keys for an user
# and with such code only one state will win
# the remove all - other - keys war
# if cache _ keys :
# for pub _ key in set ( current ) . difference ( set ( cache _ keys ) ) :
# rm _ auth _ key ( user , pub _ key )
if key in current :
cline = _format_auth_line ( key , current [ key ] [ 'enc' ] , current [ key ] [ 'comment' ] , current [ key ] [ 'options' ] )
if cline != nline :
return 'update'
else :
return 'add'
return 'exists'
|
def unique ( transactions ) :
"""Remove any duplicate entries ."""
|
seen = set ( )
# TODO : Handle comments
return [ x for x in transactions if not ( x in seen or seen . add ( x ) ) ]
|
def allowMethodWithConditions ( self , verb , resource , conditions ) :
"""Adds an API Gateway method ( Http verb + Resource path ) to the list of allowed
methods and includes a condition for the policy statement . More on AWS policy
conditions here : http : / / docs . aws . amazon . com / IAM / latest / UserGuide / reference _ policies _ elements . html # Condition"""
|
self . _addMethod ( "Allow" , verb , resource , conditions )
|
def perturb_tmat ( transmat , scale ) :
'''Perturbs each nonzero entry in the MSM transition matrix by treating it as a Gaussian random variable
with mean t _ ij and standard deviation equal to the standard error computed using " create _ perturb _ params " .
Returns a sampled transition matrix that takes into consideration errors due to finite sampling
( useful for boostrapping , etc . )
Parameters :
transmat : np . ndarray :
The transition matrix , whose elements serve as the means of the Gaussian random variables
scale : np . ndarray :
The matrix of standard errors . For transition probability t _ ij , this is assumed to be the standard
error of the mean of a binomial distribution with p = transition probability and number of observations
equal to the summed counts in row i .'''
|
output = np . vectorize ( np . random . normal ) ( transmat , scale )
output [ np . where ( output < 0 ) ] = 0
return ( output . transpose ( ) / np . sum ( output , axis = 1 ) ) . transpose ( )
|
def handle_json_wrapper_GET ( self , handler , parsed_params ) :
"""Call handler and output the return value in JSON ."""
|
schedule = self . server . schedule
result = handler ( parsed_params )
content = ResultEncoder ( ) . encode ( result )
self . send_response ( 200 )
self . send_header ( 'Content-Type' , 'text/plain' )
self . send_header ( 'Content-Length' , str ( len ( content ) ) )
self . end_headers ( )
self . wfile . write ( content )
|
def factory ( method , description = "" , request_example = None , request_ctor = None , responses = None , method_choices = HTTP_METHODS , ) :
"""desc : Describes a single HTTP method of a URI
args :
- name : method
type : str
desc : The HTTP request method to use
- name : description
type : str
desc : The description of what this call does
required : false
default : " "
- name : request _ example
type : dict
desc : An example JSON request body
required : false
default : null
- name : request _ ctor
type : method
desc : Docstring will be parsed into help for @ request _ example
required : false
default : null
- name : responses
type : list
subtypes : [ RouteMethodResponse ]
desc : >
Each object describes a possible response and describes
the condition ( s ) that may cause it
ctor : pymarshal . api _ docs . routes . RouteMethodResponse . _ _ init _ _
- name : method _ choices
type : list
subtypes : [ " str " ]
desc : The HTTP methods to allow for @ method
hide : true
required : false
default : [ DELETE , GET , PATCH , POST , PUT ]"""
|
return RouteMethod ( method , description , request_example , DocString . from_ctor ( request_ctor ) if request_ctor else None , responses , method_choices , )
|
def ReadChildFlowObjects ( self , client_id , flow_id , cursor = None ) :
"""Reads flows that were started by a given flow from the database ."""
|
query = ( "SELECT " + self . FLOW_DB_FIELDS + "FROM flows WHERE client_id=%s AND parent_flow_id=%s" )
cursor . execute ( query , [ db_utils . ClientIDToInt ( client_id ) , db_utils . FlowIDToInt ( flow_id ) ] )
return [ self . _FlowObjectFromRow ( row ) for row in cursor . fetchall ( ) ]
|
def PHASE ( angle , qubit ) :
"""Produces the PHASE gate : :
PHASE ( phi ) = [ [ 1 , 0 ] ,
[0 , exp ( 1j * phi ) ] ]
This is the same as the RZ gate .
: param angle : The angle to rotate around the z - axis on the bloch sphere .
: param qubit : The qubit apply the gate to .
: returns : A Gate object ."""
|
return Gate ( name = "PHASE" , params = [ angle ] , qubits = [ unpack_qubit ( qubit ) ] )
|
def weighted_choice ( weights , as_index_and_value_tuple = False ) :
"""Generate a non - uniform random choice based on a list of option tuples .
Treats each outcome as a discreet unit with a chance to occur .
Args :
weights ( list ) : a list of options where each option
is a tuple of form ` ` ( Any , float ) ` ` corresponding to
` ` ( outcome , strength ) ` ` . Outcome values may be of any type .
Options with strength ` ` 0 ` ` or less will have no chance to be
chosen .
as _ index _ and _ value _ tuple ( bool ) : Option to return an ` ` ( index , value ) ` `
tuple instead of just a single ` ` value ` ` . This is useful when
multiple outcomes in ` ` weights ` ` are the same and you need to know
exactly which one was picked .
Returns :
Any : If ` ` as _ index _ and _ value _ tuple is False ` ` , any one of the items in
the outcomes of ` ` weights ` `
tuple ( int , Any ) : If ` ` as _ index _ and _ value _ tuple is True ` ` ,
a 2 - tuple of form ` ` ( int , Any ) ` ` corresponding to ` ` ( index , value ) ` ` .
the index as well as value of the item that was picked .
Example :
> > > choices = [ ( ' choice one ' , 10 ) , ( ' choice two ' , 3 ) ]
> > > weighted _ choice ( choices ) # doctest : + SKIP
# Often will be . . .
' choice one '
> > > weighted _ choice ( choices ,
. . . as _ index _ and _ value _ tuple = True ) # doctest : + SKIP
# Often will be . . .
(0 , ' choice one ' )"""
|
if not len ( weights ) :
raise ValueError ( 'List passed to weighted_choice() cannot be empty.' )
# Construct a line segment where each weight outcome is
# allotted a length equal to the outcome ' s weight ,
# pick a uniformally random point along the line , and take
# the outcome that point corresponds to
prob_sum = sum ( w [ 1 ] for w in weights )
if prob_sum <= 0 :
raise ProbabilityUndefinedError ( 'No item weights in weighted_choice() are greater than 0. ' 'Probability distribution is undefined.' )
sample = random . uniform ( 0 , prob_sum )
current_pos = 0
i = 0
while i < len ( weights ) :
if current_pos <= sample <= ( current_pos + weights [ i ] [ 1 ] ) :
if as_index_and_value_tuple :
return ( i , weights [ i ] [ 0 ] )
else :
return weights [ i ] [ 0 ]
current_pos += weights [ i ] [ 1 ]
i += 1
else :
raise AssertionError ( 'Something went wrong in weighted_choice(). ' 'Please submit a bug report!' )
|
def _rgba ( r , g , b , a , ** kwargs ) :
"""Converts an rgba ( red , green , blue , alpha ) quadruplet into a color ."""
|
return ColorValue ( ( float ( r ) , float ( g ) , float ( b ) , float ( a ) ) )
|
def copy_contents ( self , fileinstance , progress_callback = None , chunk_size = None , ** kwargs ) :
"""Copy this file instance into another file instance ."""
|
if not fileinstance . readable :
raise ValueError ( 'Source file instance is not readable.' )
if not self . size == 0 :
raise ValueError ( 'File instance has data.' )
self . set_uri ( * self . storage ( ** kwargs ) . copy ( fileinstance . storage ( ** kwargs ) , chunk_size = chunk_size , progress_callback = progress_callback ) )
|
def transform ( self , X ) :
"""if already fit , can add new points and see where they fall"""
|
iclustup = [ ]
dims = self . n_components
if hasattr ( self , 'isort1' ) :
if X . shape [ 1 ] == self . v . shape [ 0 ] : # reduce dimensionality of X
X = X @ self . v
nclust = self . n_X
AtS = self . A . T @ self . S
vnorm = np . sum ( self . S * ( self . A @ AtS ) , axis = 0 ) [ np . newaxis , : ]
cv = X @ AtS
cmap = np . maximum ( 0. , cv ) ** 2 / vnorm
iclustup , cmax = upsample ( np . sqrt ( cmap ) , dims , nclust , 10 )
else :
print ( 'ERROR: new points do not have as many features as original data' )
else :
print ( 'ERROR: need to fit model first before you can embed new points' )
if iclustup . ndim > 1 :
iclustup = iclustup . T
else :
iclustup = iclustup . flatten ( )
return iclustup
|
def sync_to_db_from_config ( cls , druid_config , user , cluster , refresh = True ) :
"""Merges the ds config from druid _ config into one stored in the db ."""
|
session = db . session
datasource = ( session . query ( cls ) . filter_by ( datasource_name = druid_config [ 'name' ] ) . first ( ) )
# Create a new datasource .
if not datasource :
datasource = cls ( datasource_name = druid_config [ 'name' ] , cluster = cluster , owners = [ user ] , changed_by_fk = user . id , created_by_fk = user . id , )
session . add ( datasource )
elif not refresh :
return
dimensions = druid_config [ 'dimensions' ]
col_objs = ( session . query ( DruidColumn ) . filter ( DruidColumn . datasource_id == datasource . id ) . filter ( DruidColumn . column_name . in_ ( dimensions ) ) )
col_objs = { col . column_name : col for col in col_objs }
for dim in dimensions :
col_obj = col_objs . get ( dim , None )
if not col_obj :
col_obj = DruidColumn ( datasource_id = datasource . id , column_name = dim , groupby = True , filterable = True , # TODO : fetch type from Hive .
type = 'STRING' , datasource = datasource , )
session . add ( col_obj )
# Import Druid metrics
metric_objs = ( session . query ( DruidMetric ) . filter ( DruidMetric . datasource_id == datasource . id ) . filter ( DruidMetric . metric_name . in_ ( spec [ 'name' ] for spec in druid_config [ 'metrics_spec' ] ) ) )
metric_objs = { metric . metric_name : metric for metric in metric_objs }
for metric_spec in druid_config [ 'metrics_spec' ] :
metric_name = metric_spec [ 'name' ]
metric_type = metric_spec [ 'type' ]
metric_json = json . dumps ( metric_spec )
if metric_type == 'count' :
metric_type = 'longSum'
metric_json = json . dumps ( { 'type' : 'longSum' , 'name' : metric_name , 'fieldName' : metric_name , } )
metric_obj = metric_objs . get ( metric_name , None )
if not metric_obj :
metric_obj = DruidMetric ( metric_name = metric_name , metric_type = metric_type , verbose_name = '%s(%s)' % ( metric_type , metric_name ) , datasource = datasource , json = metric_json , description = ( 'Imported from the airolap config dir for %s' % druid_config [ 'name' ] ) , )
session . add ( metric_obj )
session . commit ( )
|
def update_keys ( self ) :
"""Update the redis keys to listen for new jobs priorities ."""
|
self . keys = self . queue_model . get_waiting_keys ( self . queues )
if not self . keys :
self . log ( 'No queues yet' , level = 'warning' )
self . last_update_keys = datetime . utcnow ( )
|
def new_data ( self , mem , addr , data ) :
"""Callback for when new memory data has been fetched"""
|
done = False
if mem . id == self . id :
if addr == LocoMemory . MEM_LOCO_INFO :
self . nr_of_anchors = data [ 0 ]
if self . nr_of_anchors == 0 :
done = True
else :
self . anchor_data = [ AnchorData ( ) for _ in range ( self . nr_of_anchors ) ]
self . _request_page ( 0 )
else :
page = int ( ( addr - LocoMemory . MEM_LOCO_ANCHOR_BASE ) / LocoMemory . MEM_LOCO_ANCHOR_PAGE_SIZE )
self . anchor_data [ page ] . set_from_mem_data ( data )
next_page = page + 1
if next_page < self . nr_of_anchors :
self . _request_page ( next_page )
else :
done = True
if done :
self . valid = True
if self . _update_finished_cb :
self . _update_finished_cb ( self )
self . _update_finished_cb = None
|
def set_start ( self , time , pad = None ) :
"""Set the start time of the datafind query .
@ param time : GPS start time of query ."""
|
if pad :
self . add_var_opt ( 'gps-start-time' , int ( time ) - int ( pad ) )
else :
self . add_var_opt ( 'gps-start-time' , int ( time ) )
self . __start = time
self . __set_output ( )
|
def add ( self , left_column , right_column , indexes = None ) :
"""Math helper method that adds element - wise two columns . If indexes are not None then will only perform the math
on that sub - set of the columns .
: param left _ column : first column name
: param right _ column : second column name
: param indexes : list of index values or list of booleans . If a list of booleans then the list must be the same length as the DataFrame
: return : list"""
|
left_list , right_list = self . _get_lists ( left_column , right_column , indexes )
return [ l + r for l , r in zip ( left_list , right_list ) ]
|
def get_apps ( exclude = ( ) , append = ( ) , current = { 'apps' : INSTALLED_APPS } ) :
"""Returns INSTALLED _ APPS without the apps listed in exclude and with the apps
listed in append .
The use of a mutable dict is intentional , in order to preserve the state of
the INSTALLED _ APPS tuple across multiple settings files ."""
|
current [ 'apps' ] = tuple ( [ a for a in current [ 'apps' ] if a not in exclude ] ) + tuple ( append )
return current [ 'apps' ]
|
def change_interface_id ( self , newid ) :
"""Change the inline interface ID . The current format is
nicid = ' 1-2 ' , where ' 1 ' is the top level interface ID ( first ) ,
and ' 2 ' is the second interface in the pair . Consider the existing
nicid in case this is a VLAN .
: param str newid : string defining new pair , i . e . ' 3-4'
: return : None"""
|
try :
newleft , newright = newid . split ( '-' )
except ValueError :
raise EngineCommandFailed ( 'You must provide two parts when changing ' 'the interface ID on an inline interface, i.e. 1-2.' )
first , second = self . nicid . split ( '-' )
if '.' in first and '.' in second :
firstvlan = first . split ( '.' ) [ - 1 ]
secondvlan = second . split ( '.' ) [ - 1 ]
self . update ( nicid = '{}.{}-{}.{}' . format ( newleft , firstvlan , newright , secondvlan ) )
else : # Top level interface or no VLANs
self . update ( nicid = newid )
|
def analyze_lib ( lib_dir , cover_filename , * , ignore_existing = False ) :
"""Recursively analyze library , and return a dict of path - > ( artist , album ) ."""
|
work = { }
stats = collections . OrderedDict ( ( ( k , 0 ) for k in ( "files" , "albums" , "missing covers" , "errors" ) ) )
with tqdm . tqdm ( desc = "Analyzing library" , unit = "dir" , postfix = stats ) as progress , tqdm_logging . redirect_logging ( progress ) :
for rootpath , rel_dirpaths , rel_filepaths in os . walk ( lib_dir ) :
metadata = analyze_dir ( stats , rootpath , rel_filepaths , cover_filename , ignore_existing = ignore_existing )
progress . set_postfix ( stats , refresh = False )
progress . update ( 1 )
if all ( metadata [ : - 1 ] ) :
work [ rootpath ] = metadata [ : - 1 ]
return work
|
async def get_box_ids_json ( self ) -> str :
"""Return json object on lists of all unique box identifiers for credentials in wallet :
schema identifiers , credential definition identifiers , and revocation registry identifiers ; e . g . ,
" schema _ id " : [
" R17v42T4pk . . . : 2 : tombstone : 1.2 " ,
"9cHbp54C8n . . . : 2 : business : 2.0 " ,
" cred _ def _ id " : [
" R17v42T4pk . . . : 3 : CL : 19:0 " ,
"9cHbp54C8n . . . : 3 : CL : 37:0 " ,
" rev _ reg _ id " : [
" R17v42T4pk . . . : 4 : R17v42T4pk . . . : 3 : CL : 19:0 : CL _ ACCUM : 0 " ,
" R17v42T4pk . . . : 4 : R17v42T4pk . . . : 3 : CL : 19:0 : CL _ ACCUM : 1 " ,
"9cHbp54C8n . . . : 4:9cHbp54C8n . . . : 3 : CL : 37:0 : CL _ ACCUM : 0 " ,
"9cHbp54C8n . . . : 4:9cHbp54C8n . . . : 3 : CL : 37:0 : CL _ ACCUM : 1 " ,
"9cHbp54C8n . . . : 4:9cHbp54C8n . . . : 3 : CL : 37:0 : CL _ ACCUM : 2 " ,
: return : tuple of sets for schema ids , cred def ids , rev reg ids"""
|
LOGGER . debug ( 'HolderProver.get_box_ids_json >>>' )
s_ids = set ( )
cd_ids = set ( )
rr_ids = set ( )
for cred in json . loads ( await self . get_creds_display_coarse ( ) ) :
s_ids . add ( cred [ 'schema_id' ] )
cd_ids . add ( cred [ 'cred_def_id' ] )
if cred [ 'rev_reg_id' ] :
rr_ids . add ( cred [ 'rev_reg_id' ] )
rv = json . dumps ( { 'schema_id' : list ( s_ids ) , 'cred_def_id' : list ( cd_ids ) , 'rev_reg_id' : list ( rr_ids ) } )
LOGGER . debug ( 'HolderProver.get_box_ids_json <<< %s' , rv )
return rv
|
def Deserialize ( self , reader ) :
"""Deserialize full object .
Args :
reader ( neo . IO . BinaryReader ) :"""
|
self . Magic = reader . ReadUInt32 ( )
self . Command = reader . ReadFixedString ( 12 ) . decode ( 'utf-8' )
self . Length = reader . ReadUInt32 ( )
if self . Length > self . PayloadMaxSizeInt :
raise Exception ( "invalid format- payload too large" )
self . Checksum = reader . ReadUInt32 ( )
self . Payload = reader . ReadBytes ( self . Length )
checksum = Message . GetChecksum ( self . Payload )
if checksum != self . Checksum :
raise ChecksumException ( "checksum mismatch" )
|
def locale ( self ) :
'''Do a lookup for the locale code that is set for this layout .
NOTE : USB HID specifies only 35 different locales . If your layout does not fit , it should be set to Undefined / 0
@ return : Tuple ( < USB HID locale code > , < name > )'''
|
name = self . json_data [ 'hid_locale' ]
# Set to Undefined / 0 if not set
if name is None :
name = "Undefined"
return ( int ( self . json_data [ 'from_hid_locale' ] [ name ] ) , name )
|
def set_circuit_breakers ( mv_grid , mode = 'load' , debug = False ) :
"""Calculates the optimal position of a circuit breaker on all routes of mv _ grid , adds and connects them to graph .
Args
mv _ grid : MVGridDing0
Description # TODO
debug : bool , defaults to False
If True , information is printed during process
Notes
According to planning principles of MV grids , a MV ring is run as two strings ( half - rings ) separated by a
circuit breaker which is open at normal operation [ # ] _ , [ # ] _ .
Assuming a ring ( route which is connected to the root node at either sides ) , the optimal position of a circuit
breaker is defined as the position ( virtual cable ) between two nodes where the conveyed current is minimal on
the route . Instead of the peak current , the peak load is used here ( assuming a constant voltage ) .
If a ring is dominated by loads ( peak load > peak capacity of generators ) , only loads are used for determining
the location of circuit breaker . If generators are prevailing ( peak load < peak capacity of generators ) ,
only generator capacities are considered for relocation .
The core of this function ( calculation of the optimal circuit breaker position ) is the same as in
ding0 . grid . mv _ grid . models . Route . calc _ circuit _ breaker _ position but here it is
1 . applied to a different data type ( NetworkX Graph ) and it
2 . adds circuit breakers to all rings .
The re - location of circuit breakers is necessary because the original position ( calculated during routing with
method mentioned above ) shifts during the connection of satellites and therefore it is no longer valid .
References
. . [ # ] X . Tao , " Automatisierte Grundsatzplanung von Mittelspannungsnetzen " , Dissertation , 2006
. . [ # ] FGH e . V . : " Technischer Bericht 302 : Ein Werkzeug zur Optimierung der Störungsbeseitigung
für Planung und Betrieb von Mittelspannungsnetzen " , Tech . rep . , 2008"""
|
# get power factor for loads and generators
cos_phi_load = cfg_ding0 . get ( 'assumptions' , 'cos_phi_load' )
cos_phi_feedin = cfg_ding0 . get ( 'assumptions' , 'cos_phi_gen' )
# iterate over all rings and circuit breakers
for ring , circ_breaker in zip ( mv_grid . rings_nodes ( include_root_node = False ) , mv_grid . circuit_breakers ( ) ) :
nodes_peak_load = [ ]
nodes_peak_generation = [ ]
# iterate over all nodes of ring
for node in ring : # node is LV station - > get peak load and peak generation
if isinstance ( node , LVStationDing0 ) :
nodes_peak_load . append ( node . peak_load / cos_phi_load )
nodes_peak_generation . append ( node . peak_generation / cos_phi_feedin )
# node is cable distributor - > get all connected nodes of subtree using graph _ nodes _ from _ subtree ( )
elif isinstance ( node , CableDistributorDing0 ) :
nodes_subtree = mv_grid . graph_nodes_from_subtree ( node )
nodes_subtree_peak_load = 0
nodes_subtree_peak_generation = 0
for node_subtree in nodes_subtree : # node is LV station - > get peak load and peak generation
if isinstance ( node_subtree , LVStationDing0 ) :
nodes_subtree_peak_load += node_subtree . peak_load / cos_phi_load
nodes_subtree_peak_generation += node_subtree . peak_generation / cos_phi_feedin
# node is LV station - > get peak load and peak generation
if isinstance ( node_subtree , GeneratorDing0 ) :
nodes_subtree_peak_generation += node_subtree . capacity / cos_phi_feedin
nodes_peak_load . append ( nodes_subtree_peak_load )
nodes_peak_generation . append ( nodes_subtree_peak_generation )
else :
raise ValueError ( 'Ring node has got invalid type.' )
if mode == 'load' :
node_peak_data = nodes_peak_load
elif mode == 'loadgen' : # is ring dominated by load or generation ?
# ( check if there ' s more load than generation in ring or vice versa )
if sum ( nodes_peak_load ) > sum ( nodes_peak_generation ) :
node_peak_data = nodes_peak_load
else :
node_peak_data = nodes_peak_generation
else :
raise ValueError ( 'parameter \'mode\' is invalid!' )
# calc optimal circuit breaker position
# set init value
diff_min = 10e6
# check where difference of demand / generation in two half - rings is minimal
for ctr in range ( len ( node_peak_data ) ) : # split route and calc demand difference
route_data_part1 = sum ( node_peak_data [ 0 : ctr ] )
route_data_part2 = sum ( node_peak_data [ ctr : len ( node_peak_data ) ] )
diff = abs ( route_data_part1 - route_data_part2 )
# equality has to be respected , otherwise comparison stops when demand / generation = 0
if diff <= diff_min :
diff_min = diff
position = ctr
else :
break
# relocate circuit breaker
node1 = ring [ position - 1 ]
node2 = ring [ position ]
circ_breaker . branch = mv_grid . _graph . adj [ node1 ] [ node2 ] [ 'branch' ]
circ_breaker . branch_nodes = ( node1 , node2 )
circ_breaker . branch . circuit_breaker = circ_breaker
circ_breaker . geo_data = calc_geo_centre_point ( node1 , node2 )
if debug :
logger . debug ( 'Ring: {}' . format ( ring ) )
logger . debug ( 'Circuit breaker {0} was relocated to edge {1}-{2} ' '(position on route={3})' . format ( circ_breaker , node1 , node2 , position ) )
logger . debug ( 'Peak load sum: {}' . format ( sum ( nodes_peak_load ) ) )
logger . debug ( 'Peak loads: {}' . format ( nodes_peak_load ) )
|
def approximate_surface ( points , size_u , size_v , degree_u , degree_v , ** kwargs ) :
"""Surface approximation using least squares method with fixed number of control points .
This algorithm interpolates the corner control points and approximates the remaining control points . Please refer to
Algorithm A9.7 of The NURBS Book ( 2nd Edition ) , pp . 422-423 for details .
Keyword Arguments :
* ` ` centripetal ` ` : activates centripetal parametrization method . * Default : False *
* ` ` ctrlpts _ size _ u ` ` : number of control points on the u - direction . * Default : size _ u - 1*
* ` ` ctrlpts _ size _ v ` ` : number of control points on the v - direction . * Default : size _ v - 1*
: param points : data points
: type points : list , tuple
: param size _ u : number of data points on the u - direction , : math : ` r `
: type size _ u : int
: param size _ v : number of data points on the v - direction , : math : ` s `
: type size _ v : int
: param degree _ u : degree of the output surface for the u - direction
: type degree _ u : int
: param degree _ v : degree of the output surface for the v - direction
: type degree _ v : int
: return : approximated B - Spline surface
: rtype : BSpline . Surface"""
|
# Keyword arguments
use_centripetal = kwargs . get ( 'centripetal' , False )
num_cpts_u = kwargs . get ( 'ctrlpts_size_u' , size_u - 1 )
# number of datapts , r + 1 > number of ctrlpts , n + 1
num_cpts_v = kwargs . get ( 'ctrlpts_size_v' , size_v - 1 )
# number of datapts , s + 1 > number of ctrlpts , m + 1
# Dimension
dim = len ( points [ 0 ] )
# Get uk and vl
uk , vl = compute_params_surface ( points , size_u , size_v , use_centripetal )
# Compute knot vectors
kv_u = compute_knot_vector2 ( degree_u , size_u , num_cpts_u , uk )
kv_v = compute_knot_vector2 ( degree_v , size_v , num_cpts_v , vl )
# Construct matrix Nu
matrix_nu = [ ]
for i in range ( 1 , size_u - 1 ) :
m_temp = [ ]
for j in range ( 1 , num_cpts_u - 1 ) :
m_temp . append ( helpers . basis_function_one ( degree_u , kv_u , j , uk [ i ] ) )
matrix_nu . append ( m_temp )
# Compute Nu transpose
matrix_ntu = linalg . matrix_transpose ( matrix_nu )
# Compute NTNu matrix
matrix_ntnu = linalg . matrix_multiply ( matrix_ntu , matrix_nu )
# Compute LU - decomposition of NTNu matrix
matrix_ntnul , matrix_ntnuu = linalg . lu_decomposition ( matrix_ntnu )
# Fit u - direction
ctrlpts_tmp = [ [ 0.0 for _ in range ( dim ) ] for _ in range ( num_cpts_u * size_v ) ]
for j in range ( size_v ) :
ctrlpts_tmp [ j + ( size_v * 0 ) ] = list ( points [ j + ( size_v * 0 ) ] )
ctrlpts_tmp [ j + ( size_v * ( num_cpts_u - 1 ) ) ] = list ( points [ j + ( size_v * ( size_u - 1 ) ) ] )
# Compute Rku - Eqn . 9.63
pt0 = points [ j + ( size_v * 0 ) ]
# Qzero
ptm = points [ j + ( size_v * ( size_u - 1 ) ) ]
# Qm
rku = [ ]
for i in range ( 1 , size_u - 1 ) :
ptk = points [ j + ( size_v * i ) ]
n0p = helpers . basis_function_one ( degree_u , kv_u , 0 , uk [ i ] )
nnp = helpers . basis_function_one ( degree_u , kv_u , num_cpts_u - 1 , uk [ i ] )
elem2 = [ c * n0p for c in pt0 ]
elem3 = [ c * nnp for c in ptm ]
rku . append ( [ a - b - c for a , b , c in zip ( ptk , elem2 , elem3 ) ] )
# Compute Ru - Eqn . 9.67
ru = [ [ 0.0 for _ in range ( dim ) ] for _ in range ( num_cpts_u - 2 ) ]
for i in range ( 1 , num_cpts_u - 1 ) :
ru_tmp = [ ]
for idx , pt in enumerate ( rku ) :
ru_tmp . append ( [ p * helpers . basis_function_one ( degree_u , kv_u , i , uk [ idx + 1 ] ) for p in pt ] )
for d in range ( dim ) :
for idx in range ( len ( ru_tmp ) ) :
ru [ i - 1 ] [ d ] += ru_tmp [ idx ] [ d ]
# Get intermediate control points
for d in range ( dim ) :
b = [ pt [ d ] for pt in ru ]
y = linalg . forward_substitution ( matrix_ntnul , b )
x = linalg . backward_substitution ( matrix_ntnuu , y )
for i in range ( 1 , num_cpts_u - 1 ) :
ctrlpts_tmp [ j + ( size_v * i ) ] [ d ] = x [ i - 1 ]
# Construct matrix Nv
matrix_nv = [ ]
for i in range ( 1 , size_v - 1 ) :
m_temp = [ ]
for j in range ( 1 , num_cpts_v - 1 ) :
m_temp . append ( helpers . basis_function_one ( degree_v , kv_v , j , vl [ i ] ) )
matrix_nv . append ( m_temp )
# Compute Nv transpose
matrix_ntv = linalg . matrix_transpose ( matrix_nv )
# Compute NTNv matrix
matrix_ntnv = linalg . matrix_multiply ( matrix_ntv , matrix_nv )
# Compute LU - decomposition of NTNv matrix
matrix_ntnvl , matrix_ntnvu = linalg . lu_decomposition ( matrix_ntnv )
# Fit v - direction
ctrlpts = [ [ 0.0 for _ in range ( dim ) ] for _ in range ( num_cpts_u * num_cpts_v ) ]
for i in range ( num_cpts_u ) :
ctrlpts [ 0 + ( num_cpts_v * i ) ] = list ( ctrlpts_tmp [ 0 + ( size_v * i ) ] )
ctrlpts [ num_cpts_v - 1 + ( num_cpts_v * i ) ] = list ( ctrlpts_tmp [ size_v - 1 + ( size_v * i ) ] )
# Compute Rkv - Eqs . 9.63
pt0 = ctrlpts_tmp [ 0 + ( size_v * i ) ]
# Qzero
ptm = ctrlpts_tmp [ size_v - 1 + ( size_v * i ) ]
# Qm
rkv = [ ]
for j in range ( 1 , size_v - 1 ) :
ptk = ctrlpts_tmp [ j + ( size_v * i ) ]
n0p = helpers . basis_function_one ( degree_v , kv_v , 0 , vl [ j ] )
nnp = helpers . basis_function_one ( degree_v , kv_v , num_cpts_v - 1 , vl [ j ] )
elem2 = [ c * n0p for c in pt0 ]
elem3 = [ c * nnp for c in ptm ]
rkv . append ( [ a - b - c for a , b , c in zip ( ptk , elem2 , elem3 ) ] )
# Compute Rv - Eqn . 9.67
rv = [ [ 0.0 for _ in range ( dim ) ] for _ in range ( num_cpts_v - 2 ) ]
for j in range ( 1 , num_cpts_v - 1 ) :
rv_tmp = [ ]
for idx , pt in enumerate ( rkv ) :
rv_tmp . append ( [ p * helpers . basis_function_one ( degree_v , kv_v , j , vl [ idx + 1 ] ) for p in pt ] )
for d in range ( dim ) :
for idx in range ( len ( rv_tmp ) ) :
rv [ j - 1 ] [ d ] += rv_tmp [ idx ] [ d ]
# Get intermediate control points
for d in range ( dim ) :
b = [ pt [ d ] for pt in rv ]
y = linalg . forward_substitution ( matrix_ntnvl , b )
x = linalg . backward_substitution ( matrix_ntnvu , y )
for j in range ( 1 , num_cpts_v - 1 ) :
ctrlpts [ j + ( num_cpts_v * i ) ] [ d ] = x [ j - 1 ]
# Generate B - spline surface
surf = BSpline . Surface ( )
surf . degree_u = degree_u
surf . degree_v = degree_v
surf . ctrlpts_size_u = num_cpts_u
surf . ctrlpts_size_v = num_cpts_v
surf . ctrlpts = ctrlpts
surf . knotvector_u = kv_u
surf . knotvector_v = kv_v
return surf
|
def is_parent_of_vault ( self , id_ , vault_id ) :
"""Tests if an ` ` Id ` ` is a direct parent of a vault .
arg : id ( osid . id . Id ) : an ` ` Id ` `
arg : vault _ id ( osid . id . Id ) : the ` ` Id ` ` of a vault
return : ( boolean ) - ` ` true ` ` if this ` ` id ` ` is a parent of
` ` vault _ id , ` ` ` ` false ` ` otherwise
raise : NotFound - ` ` vault _ id ` ` is not found
raise : NullArgument - ` ` id ` ` or ` ` vault _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *
* implementation notes * : If ` ` id ` ` not found return ` ` false ` ` ."""
|
# Implemented from template for
# osid . resource . BinHierarchySession . is _ parent _ of _ bin
if self . _catalog_session is not None :
return self . _catalog_session . is_parent_of_catalog ( id_ = id_ , catalog_id = vault_id )
return self . _hierarchy_session . is_parent ( id_ = vault_id , parent_id = id_ )
|
def create ( hypervisor , identifier , configuration ) :
"""Creates a virtual network according to the given configuration .
@ param hypervisor : ( libvirt . virConnect ) connection to libvirt hypervisor .
@ param identifier : ( str ) UUID for the virtual network .
@ param configuration : ( dict ) network configuration .
@ return : ( libvirt . virNetwork ) virtual network ."""
|
counter = count ( )
xml_config = DEFAULT_NETWORK_XML
if not { 'configuration' , 'dynamic_address' } & set ( configuration . keys ( ) ) :
raise RuntimeError ( "Either configuration or dynamic_address must be specified" )
if 'configuration' in configuration :
with open ( configuration [ 'configuration' ] ) as xml_file :
xml_config = xml_file . read ( )
while True :
if 'dynamic_address' in configuration :
address = generate_address ( hypervisor , configuration [ 'dynamic_address' ] )
xml_string = network_xml ( identifier , xml_config , address = address )
else :
xml_string = network_xml ( identifier , xml_config )
try :
return hypervisor . networkCreateXML ( xml_string )
except libvirt . libvirtError as error :
if next ( counter ) > MAX_ATTEMPTS :
raise RuntimeError ( "Exceeded failed attempts ({}) to get IP address." . format ( MAX_ATTEMPTS ) , "Last error: {}" . format ( error ) )
|
def select_with_correspondence ( self , selector , result_selector = KeyedElement ) :
'''Apply a callable to each element in an input sequence , generating a new
sequence of 2 - tuples where the first element is the input value and the
second is the transformed input value .
The generated sequence is lazily evaluated .
Note : This method uses deferred execution .
Args :
selector : A unary function mapping a value in the source sequence
to the second argument of the result selector .
result _ selector : A binary callable mapping the of a value in
the source sequence and the transformed value to the
corresponding value in the generated sequence . The two
positional arguments of the selector function are the original
source element and the transformed value . The return value
should be the corresponding value in the result sequence . The
default selector produces a KeyedElement containing the index
and the element giving this function similar behaviour to the
built - in enumerate ( ) .
Returns :
When using the default selector , a Queryable whose elements are
KeyedElements where the first element is from the input sequence
and the second is the result of invoking the transform function on
the first value .
Raises :
ValueError : If this Queryable has been closed .
TypeError : If transform is not callable .'''
|
if self . closed ( ) :
raise ValueError ( "Attempt to call select_with_correspondence() on a " "closed Queryable." )
if not is_callable ( selector ) :
raise TypeError ( "select_with_correspondence() parameter selector={0} is " "not callable" . format ( repr ( selector ) ) )
if not is_callable ( result_selector ) :
raise TypeError ( "select_with_correspondence() parameter result_selector={0} is " "not callable" . format ( repr ( result_selector ) ) )
return self . _create ( result_selector ( elem , selector ( elem ) ) for elem in iter ( self ) )
|
def _analyze_function_features ( self , all_funcs_completed = False ) :
"""For each function in the function _ manager , try to determine if it returns or not . A function does not return if
it calls another function that is known to be not returning , and this function does not have other exits .
We might as well analyze other features of functions in the future .
: param bool all _ funcs _ completed : Ignore _ completed _ functions set and treat all functions as completed . This
can be set to True after the entire CFG is built and _ post _ analysis ( ) is
called ( at which point analysis on all functions must be completed ) ."""
|
changes = { 'functions_return' : [ ] , 'functions_do_not_return' : [ ] }
if self . _updated_nonreturning_functions is not None :
all_func_addrs = self . _updated_nonreturning_functions
# Convert addresses to objects
all_functions = [ self . kb . functions . get_by_addr ( f ) for f in all_func_addrs if self . kb . functions . contains_addr ( f ) ]
else :
all_functions = list ( self . kb . functions . values ( ) )
analyzed_functions = set ( )
# short - hand
functions = self . kb . functions
# type : angr . knowledge . FunctionManager
while all_functions :
func = all_functions . pop ( - 1 )
# type : angr . knowledge . Function
analyzed_functions . add ( func . addr )
if func . returning is not None : # It has been determined before . Skip it
continue
returning = self . _determine_function_returning ( func , all_funcs_completed = all_funcs_completed )
if returning :
func . returning = True
changes [ 'functions_return' ] . append ( func )
elif returning is False :
func . returning = False
changes [ 'functions_do_not_return' ] . append ( func )
if returning is not None : # Add all callers of this function to all _ functions list
if func . addr in functions . callgraph :
callers = functions . callgraph . predecessors ( func . addr )
for caller in callers :
if caller in analyzed_functions :
continue
if functions . contains_addr ( caller ) :
all_functions . append ( functions . get_by_addr ( caller ) )
return changes
|
def chain ( self , block , count ) :
"""Returns a list of block hashes in the account chain starting at
* * block * * up to * * count * *
: param block : Block hash to start at
: type block : str
: param count : Number of blocks to return up to
: type count : int
: raises : : py : exc : ` nano . rpc . RPCException `
> > > rpc . chain (
. . . block = " 000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F " ,
. . . count = 1
"000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F " """
|
block = self . _process_value ( block , 'block' )
count = self . _process_value ( count , 'int' )
payload = { "block" : block , "count" : count }
resp = self . call ( 'chain' , payload )
return resp . get ( 'blocks' ) or [ ]
|
def sanitize_string ( string_or_unicode ) :
"""remove leading / trailing whitespace and always return unicode ."""
|
if isinstance ( string_or_unicode , unicode ) :
return string_or_unicode . strip ( )
elif isinstance ( string_or_unicode , str ) :
return string_or_unicode . decode ( 'utf-8' ) . strip ( )
else : # e . g . if input is None
return u''
|
def init_pypsa_network ( time_range_lim ) :
"""Instantiate PyPSA network
Parameters
time _ range _ lim :
Returns
network : PyPSA network object
Contains powerflow problem
snapshots : iterable
Contains snapshots to be analyzed by powerplow calculation"""
|
network = Network ( )
network . set_snapshots ( time_range_lim )
snapshots = network . snapshots
return network , snapshots
|
def object_to_items ( data_structure ) :
"""Converts a object to a items list respecting also slots .
Use dict ( object _ to _ items ( obj ) ) to get a dictionary ."""
|
items = [ ]
# Get all items from dict
try :
items = list ( data_structure . __dict__ . items ( ) )
except :
pass
# Get all slots
hierarchy = [ data_structure ]
try :
hierarchy += inspect . getmro ( data_structure )
except :
pass
slots = [ ]
try :
for b in hierarchy :
try :
slots += b . __slots__
except : # pragma : no cover
pass
except : # pragma : no cover
pass
# Get attrs from slots
for x in slots :
items . append ( ( x , getattr ( data_structure , x ) ) )
return items
|
def make_dict_unstructure_fn ( cl , converter , ** kwargs ) : # type : ( Type [ T ] , Converter ) - > Callable [ [ T ] , Dict [ str , Any ] ]
"""Generate a specialized dict unstructuring function for a class ."""
|
cl_name = cl . __name__
fn_name = "unstructure_" + cl_name
globs = { "__c_u" : converter . unstructure }
lines = [ ]
post_lines = [ ]
attrs = cl . __attrs_attrs__
lines . append ( "def {}(i):" . format ( fn_name ) )
lines . append ( " res = {" )
for a in attrs :
attr_name = a . name
override = kwargs . pop ( attr_name , _neutral )
d = a . default
if a . type is None : # No type annotation , doing runtime dispatch .
if d is not attr . NOTHING and override . omit_if_default :
def_name = "__cattr_def_{}" . format ( attr_name )
if isinstance ( d , attr . Factory ) :
globs [ def_name ] = d . factory
if d . takes_self :
post_lines . append ( " if i.{name} != {def_name}(i):" . format ( name = attr_name , def_name = def_name ) )
else :
post_lines . append ( " if i.{name} != {def_name}():" . format ( name = attr_name , def_name = def_name ) )
post_lines . append ( " res['{name}'] = i.{name}" . format ( name = attr_name ) )
else :
globs [ def_name ] = d
post_lines . append ( " if i.{name} != {def_name}:" . format ( name = attr_name , def_name = def_name ) )
post_lines . append ( " res['{name}'] = __c_u(i.{name})" . format ( name = attr_name ) )
else : # No default or no override .
lines . append ( " '{name}': __c_u(i.{name})," . format ( name = attr_name ) )
else : # Do the dispatch here and now .
type = a . type
conv_function = converter . _unstructure_func . dispatch ( type )
if d is not attr . NOTHING and override . omit_if_default :
def_name = "__cattr_def_{}" . format ( attr_name )
if isinstance ( d , attr . Factory ) : # The default is computed every time .
globs [ def_name ] = d . factory
if d . takes_self :
post_lines . append ( " if i.{name} != {def_name}(i):" . format ( name = attr_name , def_name = def_name ) )
else :
post_lines . append ( " if i.{name} != {def_name}():" . format ( name = attr_name , def_name = def_name ) )
if conv_function == converter . _unstructure_identity : # Special case this , avoid a function call .
post_lines . append ( " res['{name}'] = i.{name}" . format ( name = attr_name ) )
else :
unstruct_fn_name = "__cattr_unstruct_{}" . format ( attr_name )
globs [ unstruct_fn_name ] = conv_function
post_lines . append ( " res['{name}'] = {fn}(i.{name})," . format ( name = attr_name , fn = unstruct_fn_name ) )
else : # Default is not a factory , but a constant .
globs [ def_name ] = d
post_lines . append ( " if i.{name} != {def_name}:" . format ( name = attr_name , def_name = def_name ) )
if conv_function == converter . _unstructure_identity :
post_lines . append ( " res['{name}'] = i.{name}" . format ( name = attr_name ) )
else :
unstruct_fn_name = "__cattr_unstruct_{}" . format ( attr_name )
globs [ unstruct_fn_name ] = conv_function
post_lines . append ( " res['{name}'] = {fn}(i.{name})" . format ( name = attr_name , fn = unstruct_fn_name ) )
else : # No omitting of defaults .
if conv_function == converter . _unstructure_identity : # Special case this , avoid a function call .
lines . append ( " '{name}': i.{name}," . format ( name = attr_name ) )
else :
unstruct_fn_name = "__cattr_unstruct_{}" . format ( attr_name )
globs [ unstruct_fn_name ] = conv_function
lines . append ( " '{name}': {fn}(i.{name})," . format ( name = attr_name , fn = unstruct_fn_name ) )
lines . append ( " }" )
total_lines = lines + post_lines + [ " return res" ]
eval ( compile ( "\n" . join ( total_lines ) , "" , "exec" ) , globs )
fn = globs [ fn_name ]
return fn
|
def create_serialization_dir ( params : Params , serialization_dir : str , recover : bool , force : bool ) -> None :
"""This function creates the serialization directory if it doesn ' t exist . If it already exists
and is non - empty , then it verifies that we ' re recovering from a training with an identical configuration .
Parameters
params : ` ` Params ` `
A parameter object specifying an AllenNLP Experiment .
serialization _ dir : ` ` str ` `
The directory in which to save results and logs .
recover : ` ` bool ` `
If ` ` True ` ` , we will try to recover from an existing serialization directory , and crash if
the directory doesn ' t exist , or doesn ' t match the configuration we ' re given .
force : ` ` bool ` `
If ` ` True ` ` , we will overwrite the serialization directory if it already exists ."""
|
if recover and force :
raise ConfigurationError ( "Illegal arguments: both force and recover are true." )
if os . path . exists ( serialization_dir ) and force :
shutil . rmtree ( serialization_dir )
if os . path . exists ( serialization_dir ) and os . listdir ( serialization_dir ) :
if not recover :
raise ConfigurationError ( f"Serialization directory ({serialization_dir}) already exists and is " f"not empty. Specify --recover to recover training from existing output." )
logger . info ( f"Recovering from prior training at {serialization_dir}." )
recovered_config_file = os . path . join ( serialization_dir , CONFIG_NAME )
if not os . path . exists ( recovered_config_file ) :
raise ConfigurationError ( "The serialization directory already exists but doesn't " "contain a config.json. You probably gave the wrong directory." )
else :
loaded_params = Params . from_file ( recovered_config_file )
# Check whether any of the training configuration differs from the configuration we are
# resuming . If so , warn the user that training may fail .
fail = False
flat_params = params . as_flat_dict ( )
flat_loaded = loaded_params . as_flat_dict ( )
for key in flat_params . keys ( ) - flat_loaded . keys ( ) :
logger . error ( f"Key '{key}' found in training configuration but not in the serialization " f"directory we're recovering from." )
fail = True
for key in flat_loaded . keys ( ) - flat_params . keys ( ) :
logger . error ( f"Key '{key}' found in the serialization directory we're recovering from " f"but not in the training config." )
fail = True
for key in flat_params . keys ( ) :
if flat_params . get ( key , None ) != flat_loaded . get ( key , None ) :
logger . error ( f"Value for '{key}' in training configuration does not match that the value in " f"the serialization directory we're recovering from: " f"{flat_params[key]} != {flat_loaded[key]}" )
fail = True
if fail :
raise ConfigurationError ( "Training configuration does not match the configuration we're " "recovering from." )
else :
if recover :
raise ConfigurationError ( f"--recover specified but serialization_dir ({serialization_dir}) " "does not exist. There is nothing to recover from." )
os . makedirs ( serialization_dir , exist_ok = True )
|
def format_python_stack ( self ) :
"""Return a traceback of Python frames , from where the error occurred
to where it was first caught and wrapped ."""
|
ret = [ "Traceback:\n" ]
ret . extend ( traceback . format_tb ( self . original_traceback ) )
return "" . join ( ret )
|
def get_default_template ( ) :
"""Returns default getTemplate request specification .
: return :"""
|
return { "format" : 1 , "protocol" : 1 , "environment" : Environment . DEV , # shows whether the UO should be for production ( live ) ,
# test ( pre - production testing ) , or dev ( development )
"maxtps" : "one" , # maximum guaranteed TPS
"core" : "empty" , # how many cards have UO loaded permanently
"persistence" : "one_minute" , # once loaded onto card , how long will the UO stay there without use
# ( this excludes the " core " )
"priority" : "default" , # this defines a ) priority when the server capacity is fully utilised and it also
# defines how quickly new copies of UO are installed ( pre - empting icreasing demand )
"separation" : "time" , # " complete " = only one UO can be loaded on a smartcard at one one time
"bcr" : TemplateFields . yes , # " yes " will ensure the UO is replicated to provide high availability for any
# possible service disruption
"unlimited" : TemplateFields . yes , # if " yes " , we expect the data starts with an IV to initialize decryption
# of data - this is for communication security
"clientiv" : TemplateFields . yes , # if " yes " , we expect the data starting with a diversification 16B for
# communication keys
"clientdiv" : TemplateFields . no , "resource" : "global" , "credit" : 32677 , # < 1-32767 > , a limit a seed card can provide to the EB service
TemplateFields . generation : { TemplateFields . commkey : Gen . CLIENT , TemplateFields . billingkey : Gen . LEGACY_RANDOM , TemplateFields . appkey : Gen . LEGACY_RANDOM } }
|
def askokcancel ( title = None , message = None , ** options ) :
"""Original doc : Ask if operation should proceed ; return true if the answer is ok"""
|
return psidialogs . ask_ok_cancel ( title = title , message = message )
|
def _normalize_hparams ( hparams ) :
"""Normalize a dict keyed by ` HParam ` s and / or raw strings .
Args :
hparams : A ` dict ` whose keys are ` HParam ` objects and / or strings
representing hyperparameter names , and whose values are
hyperparameter values . No two keys may have the same name .
Returns :
A ` dict ` whose keys are hyperparameter names ( as strings ) and whose
values are the corresponding hyperparameter values .
Raises :
ValueError : If two entries in ` hparams ` share the same
hyperparameter name ."""
|
result = { }
for ( k , v ) in six . iteritems ( hparams ) :
if isinstance ( k , HParam ) :
k = k . name
if k in result :
raise ValueError ( "multiple values specified for hparam %r" % ( k , ) )
result [ k ] = v
return result
|
def get_permissions ( cls ) :
"""Generates permissions for all CrudView based class methods .
Returns :
List of Permission objects ."""
|
perms = [ ]
for kls_name , kls in cls . registry . items ( ) :
for method_name in cls . __dict__ . keys ( ) :
if method_name . endswith ( '_view' ) :
perms . append ( "%s.%s" % ( kls_name , method_name ) )
return perms
|
def to_string ( address , dns_format = False ) :
"""Convert address to string
: param address : WIPV4Address to convert
: param dns _ format : whether to use arpa - format or not
: return :"""
|
if isinstance ( address , WIPV4Address ) is False :
raise TypeError ( 'Invalid address type' )
address = [ str ( int ( x ) ) for x in address . __address ]
if dns_format is False :
return '.' . join ( address )
address . reverse ( )
return ( '.' . join ( address ) + '.in-addr.arpa' )
|
def bind_path ( self , name , folder ) :
"""Adds a mask that maps to a given folder relative to ` base _ path ` ."""
|
if not len ( name ) or name [ 0 ] != '/' or name [ - 1 ] != '/' :
raise ValueError ( "name must start and end with '/': {0}" . format ( name ) )
self . _folder_masks . insert ( 0 , ( name , folder ) )
|
def filter_search ( self , code = None , name = None , abilities = None , attributes = None , info = None ) :
"""Return a list of codes and names pertaining to cards that have the
given information values stored .
Can take a code integer , name string , abilities dict { phase : ability
list / " * " } , attributes list , info dict { key , value list / " * " } .
In the above argument examples " * " is a string that may be passed
instead of a list as the dict value to match anything that stores that
key ."""
|
command = "SELECT code, name FROM CARDS "
command += Where_filter_gen ( ( "code" , code ) , ( "name" , name ) , ( "abilities" , abilities ) , ( "attributes" , attributes ) , ( "info" , info ) )
with sqlite3 . connect ( self . dbname ) as carddb :
return carddb . execute ( command ) . fetchall ( )
|
def get_src_model ( self , name , paramsonly = False , reoptimize = False , npts = None , ** kwargs ) :
"""Compose a dictionary for a source with the current best - fit
parameters .
Parameters
name : str
paramsonly : bool
Skip computing TS and likelihood profile .
reoptimize : bool
Re - fit background parameters in likelihood scan .
npts : int
Number of points for likelihood scan .
Returns
src _ dict : dict"""
|
self . logger . debug ( 'Generating source dict for ' + name )
optimizer = kwargs . get ( 'optimizer' , self . config [ 'optimizer' ] )
if npts is None :
npts = self . config [ 'gtlike' ] [ 'llscan_npts' ]
name = self . get_source_name ( name )
source = self . like [ name ] . src
spectrum = source . spectrum ( )
normPar = self . like . normPar ( name )
src_dict = defaults . make_default_dict ( defaults . source_flux_output )
src_dict . update ( { 'name' : name , 'pivot_energy' : 1000. , 'ts' : np . nan , 'loglike' : np . nan , 'npred' : 0.0 , 'npred_wt' : 0.0 , 'loglike_scan' : np . nan * np . ones ( npts ) , 'dloglike_scan' : np . nan * np . ones ( npts ) , 'eflux_scan' : np . nan * np . ones ( npts ) , 'flux_scan' : np . nan * np . ones ( npts ) , 'norm_scan' : np . nan * np . ones ( npts ) , } )
src_dict . update ( gtutils . gtlike_spectrum_to_vectors ( spectrum ) )
src_dict [ 'spectral_pars' ] = gtutils . get_function_pars_dict ( spectrum )
# Get Counts Spectrum
src_dict [ 'model_counts' ] = self . model_counts_spectrum ( name , summed = True )
src_dict [ 'model_counts_wt' ] = self . model_counts_spectrum ( name , summed = True , weighted = True )
# Get NPred
src_dict [ 'npred' ] = self . like . NpredValue ( str ( name ) )
# EAC , we need this b / c older version of the ST don ' t have the right signature
try :
src_dict [ 'npred_wt' ] = self . like . NpredValue ( str ( name ) , True )
except ( TypeError , NotImplementedError ) :
src_dict [ 'npred_wt' ] = src_dict [ 'npred' ]
# Get the Model Fluxes
try :
thesrc = self . like [ name ]
src_dict [ 'flux' ] = self . like . flux ( name , self . energies [ 0 ] , self . energies [ - 1 ] )
src_dict [ 'flux100' ] = self . like . flux ( name , 100. , 10 ** 5.5 )
src_dict [ 'flux1000' ] = self . like . flux ( name , 1000. , 10 ** 5.5 )
src_dict [ 'flux10000' ] = self . like . flux ( name , 10000. , 10 ** 5.5 )
src_dict [ 'eflux' ] = self . like . energyFlux ( name , self . energies [ 0 ] , self . energies [ - 1 ] )
src_dict [ 'eflux100' ] = self . like . energyFlux ( name , 100. , 10 ** 5.5 )
src_dict [ 'eflux1000' ] = self . like . energyFlux ( name , 1000. , 10 ** 5.5 )
src_dict [ 'eflux10000' ] = self . like . energyFlux ( name , 10000. , 10 ** 5.5 )
src_dict [ 'dnde' ] = self . like [ name ] . spectrum ( ) ( pyLike . dArg ( src_dict [ 'pivot_energy' ] ) )
src_dict [ 'dnde100' ] = self . like [ name ] . spectrum ( ) ( pyLike . dArg ( 100. ) )
src_dict [ 'dnde1000' ] = self . like [ name ] . spectrum ( ) ( pyLike . dArg ( 1000. ) )
src_dict [ 'dnde10000' ] = self . like [ name ] . spectrum ( ) ( pyLike . dArg ( 10000. ) )
if normPar . getValue ( ) == 0 :
normPar . setValue ( 1.0 )
dnde_index = - get_spectral_index ( self . like [ name ] , src_dict [ 'pivot_energy' ] )
dnde100_index = - get_spectral_index ( self . like [ name ] , 100. )
dnde1000_index = - get_spectral_index ( self . like [ name ] , 1000. )
dnde10000_index = - get_spectral_index ( self . like [ name ] , 10000. )
normPar . setValue ( 0.0 )
else :
dnde_index = - get_spectral_index ( self . like [ name ] , src_dict [ 'pivot_energy' ] )
dnde100_index = - get_spectral_index ( self . like [ name ] , 100. )
dnde1000_index = - get_spectral_index ( self . like [ name ] , 1000. )
dnde10000_index = - get_spectral_index ( self . like [ name ] , 10000. )
src_dict [ 'dnde_index' ] = dnde_index
src_dict [ 'dnde100_index' ] = dnde100_index
src_dict [ 'dnde1000_index' ] = dnde1000_index
src_dict [ 'dnde10000_index' ] = dnde10000_index
except Exception :
self . logger . error ( 'Failed to update source parameters.' , exc_info = True )
# Only compute TS , errors , and ULs if the source was free in
# the fit
if not self . get_free_source_params ( name ) or paramsonly :
return src_dict
emax = 10 ** 5.5
try :
src_dict [ 'flux_err' ] = self . like . fluxError ( name , self . energies [ 0 ] , self . energies [ - 1 ] )
src_dict [ 'flux100_err' ] = self . like . fluxError ( name , 100. , emax )
src_dict [ 'flux1000_err' ] = self . like . fluxError ( name , 1000. , emax )
src_dict [ 'flux10000_err' ] = self . like . fluxError ( name , 10000. , emax )
src_dict [ 'eflux_err' ] = self . like . energyFluxError ( name , self . energies [ 0 ] , self . energies [ - 1 ] )
src_dict [ 'eflux100_err' ] = self . like . energyFluxError ( name , 100. , emax )
src_dict [ 'eflux1000_err' ] = self . like . energyFluxError ( name , 1000. , emax )
src_dict [ 'eflux10000_err' ] = self . like . energyFluxError ( name , 10000. , emax )
except Exception :
pass
# self . logger . error ( ' Failed to update source parameters . ' ,
# exc _ info = True )
lnlp = self . profile_norm ( name , savestate = True , reoptimize = reoptimize , npts = npts , optimizer = optimizer )
src_dict [ 'loglike_scan' ] = lnlp [ 'loglike' ]
src_dict [ 'dloglike_scan' ] = lnlp [ 'dloglike' ]
src_dict [ 'eflux_scan' ] = lnlp [ 'eflux' ]
src_dict [ 'flux_scan' ] = lnlp [ 'flux' ]
src_dict [ 'norm_scan' ] = lnlp [ 'xvals' ]
src_dict [ 'loglike' ] = np . max ( lnlp [ 'loglike' ] )
flux_ul_data = utils . get_parameter_limits ( lnlp [ 'flux' ] , lnlp [ 'dloglike' ] )
eflux_ul_data = utils . get_parameter_limits ( lnlp [ 'eflux' ] , lnlp [ 'dloglike' ] )
if normPar . getValue ( ) == 0 :
normPar . setValue ( 1.0 )
flux = self . like . flux ( name , self . energies [ 0 ] , self . energies [ - 1 ] )
flux100 = self . like . flux ( name , 100. , emax )
flux1000 = self . like . flux ( name , 1000. , emax )
flux10000 = self . like . flux ( name , 10000. , emax )
eflux = self . like . energyFlux ( name , self . energies [ 0 ] , self . energies [ - 1 ] )
eflux100 = self . like . energyFlux ( name , 100. , emax )
eflux1000 = self . like . energyFlux ( name , 1000. , emax )
eflux10000 = self . like . energyFlux ( name , 10000. , emax )
flux100_ratio = flux100 / flux
flux1000_ratio = flux1000 / flux
flux10000_ratio = flux10000 / flux
eflux100_ratio = eflux100 / eflux
eflux1000_ratio = eflux1000 / eflux
eflux10000_ratio = eflux10000 / eflux
normPar . setValue ( 0.0 )
else :
flux100_ratio = src_dict [ 'flux100' ] / src_dict [ 'flux' ]
flux1000_ratio = src_dict [ 'flux1000' ] / src_dict [ 'flux' ]
flux10000_ratio = src_dict [ 'flux10000' ] / src_dict [ 'flux' ]
eflux100_ratio = src_dict [ 'eflux100' ] / src_dict [ 'eflux' ]
eflux1000_ratio = src_dict [ 'eflux1000' ] / src_dict [ 'eflux' ]
eflux10000_ratio = src_dict [ 'eflux10000' ] / src_dict [ 'eflux' ]
src_dict [ 'flux_ul95' ] = flux_ul_data [ 'ul' ]
src_dict [ 'flux100_ul95' ] = flux_ul_data [ 'ul' ] * flux100_ratio
src_dict [ 'flux1000_ul95' ] = flux_ul_data [ 'ul' ] * flux1000_ratio
src_dict [ 'flux10000_ul95' ] = flux_ul_data [ 'ul' ] * flux10000_ratio
src_dict [ 'eflux_ul95' ] = eflux_ul_data [ 'ul' ]
src_dict [ 'eflux100_ul95' ] = eflux_ul_data [ 'ul' ] * eflux100_ratio
src_dict [ 'eflux1000_ul95' ] = eflux_ul_data [ 'ul' ] * eflux1000_ratio
src_dict [ 'eflux10000_ul95' ] = eflux_ul_data [ 'ul' ] * eflux10000_ratio
# Extract covariance matrix
fd = None
try :
fd = FluxDensity . FluxDensity ( self . like , name )
src_dict [ 'covar' ] = fd . covar
except RuntimeError :
pass
# if ex . message = = ' Covariance matrix has not been
# computed . ' :
# Extract bowtie
if fd and len ( src_dict [ 'covar' ] ) and src_dict [ 'covar' ] . ndim >= 1 :
loge = np . linspace ( self . log_energies [ 0 ] , self . log_energies [ - 1 ] , 50 )
src_dict [ 'model_flux' ] = self . bowtie ( name , fd = fd , loge = loge )
src_dict [ 'dnde100_err' ] = fd . error ( 100. )
src_dict [ 'dnde1000_err' ] = fd . error ( 1000. )
src_dict [ 'dnde10000_err' ] = fd . error ( 10000. )
src_dict [ 'pivot_energy' ] = src_dict [ 'model_flux' ] [ 'pivot_energy' ]
e0 = src_dict [ 'pivot_energy' ]
src_dict [ 'dnde' ] = self . like [ name ] . spectrum ( ) ( pyLike . dArg ( e0 ) )
src_dict [ 'dnde_err' ] = fd . error ( e0 )
if not reoptimize :
src_dict [ 'ts' ] = self . like . Ts2 ( name , reoptimize = reoptimize )
else :
src_dict [ 'ts' ] = - 2.0 * lnlp [ 'dloglike' ] [ 0 ]
return src_dict
|
def _save ( self ) :
"""Saves the current state of this AssessmentTaken .
Should be called every time the sections map changes ."""
|
collection = JSONClientValidated ( 'assessment' , collection = 'AssessmentTaken' , runtime = self . _runtime )
collection . save ( self . _my_map )
|
def _multiply ( X , coef ) :
"""Multiple X by coef element - wise , preserving sparsity ."""
|
if sp . issparse ( X ) :
return X . multiply ( sp . csr_matrix ( coef ) )
else :
return np . multiply ( X , coef )
|
def _create_body ( self , name , description = None , volume = None , force = False ) :
"""Used to create the dict required to create a new snapshot"""
|
body = { "snapshot" : { "display_name" : name , "display_description" : description , "volume_id" : volume . id , "force" : str ( force ) . lower ( ) , } }
return body
|
def zoom_in ( self , increment = 1 ) :
"""Zooms in the editor ( makes the font bigger ) .
: param increment : zoom level increment . Default is 1."""
|
self . zoom_level += increment
TextHelper ( self ) . mark_whole_doc_dirty ( )
self . _reset_stylesheet ( )
|
def getUsersWithinEnterpriseGroup ( self , groupName , searchFilter = None , maxCount = 10 ) :
"""This operation returns the users that are currently assigned to the
enterprise group within the enterprise user / group store . You can
use the filter parameter to narrow down the user search .
Inputs :
groupName - name of the group
searchFilter - string used to narrow down the search
maxCount - maximum number of users to return"""
|
params = { "f" : "json" , "groupName" : groupName , "maxCount" : maxCount }
if searchFilter :
params [ 'filters' ] = searchFilter
url = self . _url + "/groups/getUsersWithinEnterpriseGroup"
return self . _get ( url = url , param_dict = params , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
|
def open_phbst ( self ) :
"""Open PHBST file produced by Anaddb and returns : class : ` PhbstFile ` object ."""
|
from abipy . dfpt . phonons import PhbstFile
phbst_path = os . path . join ( self . workdir , "run.abo_PHBST.nc" )
if not phbst_path :
if self . status == self . S_OK :
logger . critical ( "%s reached S_OK but didn't produce a PHBST file in %s" % ( self , self . outdir ) )
return None
try :
return PhbstFile ( phbst_path )
except Exception as exc :
logger . critical ( "Exception while reading GSR file at %s:\n%s" % ( phbst_path , str ( exc ) ) )
return None
|
def bookmarks_index_changed ( self ) :
"""Update the UI when the bookmarks combobox has changed ."""
|
index = self . bookmarks_list . currentIndex ( )
if index >= 0 :
self . tool . reset ( )
rectangle = self . bookmarks_list . itemData ( index )
self . tool . set_rectangle ( rectangle )
self . canvas . setExtent ( rectangle )
self . ok_button . setEnabled ( True )
else :
self . ok_button . setDisabled ( True )
|
def process_frames ( self , data , sampling_rate , offset = 0 , last = False , utterance = None , corpus = None ) :
"""Execute the processing of this step and all dependent parent steps ."""
|
if offset == 0 :
self . steps_sorted = list ( nx . algorithms . dag . topological_sort ( self . graph ) )
self . _create_buffers ( )
self . _define_output_buffers ( )
# Update buffers with input data
self . _update_buffers ( None , data , offset , last )
# Go through the ordered ( by dependencies ) steps
for step in self . steps_sorted :
chunk = self . buffers [ step ] . get ( )
if chunk is not None :
res = step . compute ( chunk , sampling_rate , utterance = utterance , corpus = corpus )
# If step is self , we know its the last step so return the data
if step == self :
return res
# Otherwise update buffers of child steps
else :
self . _update_buffers ( step , res , chunk . offset + chunk . left_context , chunk . is_last )
|
def get_observation ( self , observation_id ) :
"""Retrieve an existing : class : ` meteorpi _ model . Observation ` by its ID
: param string observation _ id :
UUID of the observation
: return :
A : class : ` meteorpi _ model . Observation ` instance , or None if not found"""
|
search = mp . ObservationSearch ( observation_id = observation_id )
b = search_observations_sql_builder ( search )
sql = b . get_select_sql ( columns = 'l.publicId AS obstory_id, l.name AS obstory_name, ' 'o.obsTime, s.name AS obsType, o.publicId, o.uid' , skip = 0 , limit = 1 , order = 'o.obsTime DESC' )
obs = list ( self . generators . observation_generator ( sql = sql , sql_args = b . sql_args ) )
if not obs :
return None
return obs [ 0 ]
|
def get ( self , params , ** options ) :
"""Dispatches a GET request to / events of the API to get a set of recent changes to a resource ."""
|
options = self . client . _merge_options ( { 'full_payload' : True } )
return self . client . get ( '/events' , params , ** options )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.