signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def retrieve_remote_profile ( id : str ) -> Optional [ Profile ] :
"""High level retrieve profile method .
Retrieve the profile from a remote location , using protocol based on the given ID ."""
|
protocol = identify_protocol_by_id ( id )
utils = importlib . import_module ( f"federation.utils.{protocol.PROTOCOL_NAME}" )
return utils . retrieve_and_parse_profile ( id )
|
def run ( self ) :
"""Starts a development server for the zengine application"""
|
from zengine . wf_daemon import run_workers , Worker
worker_count = int ( self . manager . args . workers or 1 )
if not self . manager . args . daemonize :
print ( "Starting worker(s)" )
if worker_count > 1 or self . manager . args . autoreload :
run_workers ( worker_count , self . manager . args . paths . split ( ' ' ) , self . manager . args . daemonize )
else :
worker = Worker ( )
worker . run ( )
|
def parse_cov ( cov_table , scaffold2genome ) :
"""calculate genome coverage from scaffold coverage table"""
|
size = { }
# size [ genome ] = genome size
mapped = { }
# mapped [ genome ] [ sample ] = mapped bases
# parse coverage files
for line in open ( cov_table ) :
line = line . strip ( ) . split ( '\t' )
if line [ 0 ] . startswith ( '#' ) :
samples = line [ 1 : ]
samples = [ i . rsplit ( '/' , 1 ) [ - 1 ] . split ( '.' , 1 ) [ 0 ] for i in samples ]
continue
scaffold , length = line [ 0 ] . split ( ': ' )
length = float ( length )
covs = [ float ( i ) for i in line [ 1 : ] ]
bases = [ c * length for c in covs ]
if scaffold not in scaffold2genome :
continue
genome = scaffold2genome [ scaffold ]
if genome not in size :
size [ genome ] = 0
mapped [ genome ] = { sample : 0 for sample in samples }
# keep track of genome size
size [ genome ] += length
# keep track of number of mapped bases
for sample , count in zip ( samples , bases ) :
mapped [ genome ] [ sample ] += count
# calculate coverage from base counts and genome size
coverage = { 'genome' : [ ] , 'genome size (bp)' : [ ] , 'sample' : [ ] , 'coverage' : [ ] }
for genome , length in size . items ( ) :
for sample in samples :
cov = mapped [ genome ] [ sample ] / length
coverage [ 'genome' ] . append ( genome )
coverage [ 'genome size (bp)' ] . append ( length )
coverage [ 'sample' ] . append ( sample )
coverage [ 'coverage' ] . append ( cov )
return pd . DataFrame ( coverage )
|
def cells_dn_meta ( workbook , sheet , row , col , final_dict ) :
"""Traverse all cells in a column moving downward . Primarily created for the metadata sheet , but may use elsewhere .
Check the cell title , and switch it to .
: param obj workbook :
: param str sheet :
: param int row :
: param int col :
: param dict final _ dict :
: return : none"""
|
logger_excel . info ( "enter cells_dn_meta" )
row_loop = 0
pub_cases = [ 'id' , 'year' , 'author' , 'journal' , 'issue' , 'volume' , 'title' , 'pages' , 'reportNumber' , 'abstract' , 'alternateCitation' ]
geo_cases = [ 'latMin' , 'lonMin' , 'lonMax' , 'latMax' , 'elevation' , 'siteName' , 'location' ]
funding_cases = [ "agency" , "grant" , "principalInvestigator" , "country" ]
# Temp
pub_qty = 0
geo_temp = { }
general_temp = { }
pub_temp = [ ]
funding_temp = [ ]
temp_sheet = workbook . sheet_by_name ( sheet )
# Loop until we hit the max rows in the sheet
while row_loop < temp_sheet . nrows :
try : # Get cell value
cell = temp_sheet . cell_value ( row , col )
# If there is content in the cell . . .
if cell not in EMPTY : # Convert title to correct format , and grab the cell data for that row
title_formal = temp_sheet . cell_value ( row , col )
title_json = name_to_jsonld ( title_formal )
# If we don ' t have a title for it , then it ' s not information we want to grab
if title_json : # Geo
if title_json in geo_cases :
cell_data = cells_rt_meta ( workbook , sheet , row , col )
geo_temp = compile_temp ( geo_temp , title_json , cell_data )
# Pub
# Create a list of dicts . One for each pub column .
elif title_json in pub_cases : # Authors seem to be the only consistent field we can rely on to determine number of Pubs .
if title_json == 'author' :
cell_data = cells_rt_meta ( workbook , sheet , row , col )
pub_qty = len ( cell_data )
for i in range ( pub_qty ) :
author_lst = compile_authors ( cell_data [ i ] )
pub_temp . append ( { 'author' : author_lst , 'pubDataUrl' : 'Manually Entered' } )
else :
cell_data = cells_rt_meta_pub ( workbook , sheet , row , col , pub_qty )
for pub in range ( pub_qty ) :
if title_json == 'id' :
pub_temp [ pub ] [ 'identifier' ] = [ { "type" : "doi" , "id" : cell_data [ pub ] } ]
else :
pub_temp [ pub ] [ title_json ] = cell_data [ pub ]
# Funding
elif title_json in funding_cases :
if title_json == "agency" :
funding_temp = compile_fund ( workbook , sheet , row , col )
# All other cases do not need fancy structuring
else :
cell_data = cells_rt_meta ( workbook , sheet , row , col )
general_temp = compile_temp ( general_temp , title_json , cell_data )
except IndexError as e :
logger_excel . debug ( "cells_dn_datasheets: IndexError: sheet: {}, row: {}, col: {}, {}" . format ( sheet , row , col , e ) )
row += 1
row_loop += 1
# Compile the more complicated items
geo = compile_geo ( geo_temp )
logger_excel . info ( "compile metadata dictionary" )
# Insert into final dictionary
final_dict [ '@context' ] = "context.jsonld"
final_dict [ 'pub' ] = pub_temp
final_dict [ 'funding' ] = funding_temp
final_dict [ 'geo' ] = geo
# Add remaining general items
for k , v in general_temp . items ( ) :
final_dict [ k ] = v
logger_excel . info ( "exit cells_dn_meta" )
return final_dict
|
def libvlc_media_player_get_state ( p_mi ) :
'''Get current movie state .
@ param p _ mi : the Media Player .
@ return : the current state of the media player ( playing , paused , . . . ) See libvlc _ state _ t .'''
|
f = _Cfunctions . get ( 'libvlc_media_player_get_state' , None ) or _Cfunction ( 'libvlc_media_player_get_state' , ( ( 1 , ) , ) , None , State , MediaPlayer )
return f ( p_mi )
|
def set_json_item ( key , value ) :
"""manipulate json data on the fly"""
|
data = get_json ( )
data [ key ] = value
request = get_request ( )
request [ "BODY" ] = json . dumps ( data )
|
def obfuscatable_class ( tokens , index , ** kwargs ) :
"""Given a list of * tokens * and an * index * ( representing the current position ) ,
returns the token string if it is a class name that can be safely
obfuscated ."""
|
tok = tokens [ index ]
token_type = tok [ 0 ]
token_string = tok [ 1 ]
if index > 0 :
prev_tok = tokens [ index - 1 ]
else : # Pretend it ' s a newline ( for simplicity )
prev_tok = ( 54 , '\n' , ( 1 , 1 ) , ( 1 , 2 ) , '#\n' )
prev_tok_string = prev_tok [ 1 ]
if token_type != tokenize . NAME :
return None
# Skip this token
if token_string . startswith ( '__' ) : # Don ' t mess with specials
return None
if prev_tok_string == "class" :
return token_string
|
def extract_views_from_urlpatterns ( self , urlpatterns , base = '' , namespace = None ) :
"""Return a list of views from a list of urlpatterns .
Each object in the returned list is a three - tuple : ( view _ func , regex , name )"""
|
views = [ ]
for p in urlpatterns :
if isinstance ( p , ( URLPattern , RegexURLPattern ) ) :
try :
if not p . name :
name = p . name
elif namespace :
name = '{0}:{1}' . format ( namespace , p . name )
else :
name = p . name
pattern = describe_pattern ( p )
views . append ( ( p . callback , base + pattern , name ) )
except ViewDoesNotExist :
continue
elif isinstance ( p , ( URLResolver , RegexURLResolver ) ) :
try :
patterns = p . url_patterns
except ImportError :
continue
if namespace and p . namespace :
_namespace = '{0}:{1}' . format ( namespace , p . namespace )
else :
_namespace = ( p . namespace or namespace )
pattern = describe_pattern ( p )
if isinstance ( p , LocaleRegexURLResolver ) :
for language in self . LANGUAGES :
with translation . override ( language [ 0 ] ) :
views . extend ( self . extract_views_from_urlpatterns ( patterns , base + pattern , namespace = _namespace ) )
else :
views . extend ( self . extract_views_from_urlpatterns ( patterns , base + pattern , namespace = _namespace ) )
elif hasattr ( p , '_get_callback' ) :
try :
views . append ( ( p . _get_callback ( ) , base + describe_pattern ( p ) , p . name ) )
except ViewDoesNotExist :
continue
elif hasattr ( p , 'url_patterns' ) or hasattr ( p , '_get_url_patterns' ) :
try :
patterns = p . url_patterns
except ImportError :
continue
views . extend ( self . extract_views_from_urlpatterns ( patterns , base + describe_pattern ( p ) , namespace = namespace ) )
else :
raise TypeError ( "%s does not appear to be a urlpattern object" % p )
return views
|
def check_redis ( ) :
"""Redis checks the connection
It displays on the screen whether or not you have a connection ."""
|
from pyoko . db . connection import cache
from redis . exceptions import ConnectionError
try :
cache . ping ( )
print ( CheckList . OKGREEN + "{0}Redis is working{1}" + CheckList . ENDC )
except ConnectionError as e :
print ( __ ( u"{0}Redis is not working{1} " ) . format ( CheckList . FAIL , CheckList . ENDC ) , e . message )
|
def _prep_noise_interpolants ( self ) :
"""Construct interpolated sensitivity curves
This will construct the interpolated sensitivity curves
using scipy . interpolate . interp1d . It will add wd noise
if that is requested .
Raises :
ValueError : ` ` len ( noise _ type _ in ) ! = len ( sensitivity _ curves ) ` `
ValueError : Issue with sensitivity curve type provided ."""
|
noise_lists = { }
self . noise_interpolants = { }
if isinstance ( self . sensitivity_curves , str ) :
self . sensitivity_curves = [ self . sensitivity_curves ]
if isinstance ( self . noise_type_in , list ) :
if len ( self . noise_type_in ) != len ( self . sensitivity_curves ) :
raise ValueError ( 'noise_type_in must have same shape as sensitivity_curves if it is' + 'provided as a list.' + 'If all curves are of the same type, provide a string.' )
else :
assert isinstance ( self . noise_type_in , str )
self . noise_type_in = [ self . noise_type_in for _ in self . sensitivity_curves ]
if isinstance ( self . signal_type , str ) :
self . signal_type = [ self . signal_type ]
# read in all the noise curves
for num , sc in enumerate ( self . sensitivity_curves ) :
if isinstance ( sc , str ) :
f , h_n = read_noise_curve ( sc , noise_type_in = self . noise_type_in [ num ] , noise_type_out = 'char_strain' )
if sc [ - 4 : ] == '.txt' :
key = sc . split ( '.' ) [ 0 ] . split ( '/' ) [ - 1 ]
else :
key = sc
elif isinstance ( sc , list ) : # TODO : add to docs if inputing special noise curve , make sure its char _ strain
f , h_n = sc
key = str ( num )
else :
raise ValueError ( 'Sensitivity curves must either be string' + 'or list containing f_n and asd_n.' )
noise_lists [ key ] = [ f , h_n ]
# add wd noise
if str ( self . add_wd_noise ) . lower ( ) in [ 'true' , 'both' , 'yes' ] :
if isinstance ( self . wd_noise , str ) :
f_n_wd , h_n_wd = read_noise_curve ( self . wd_noise , noise_type_in = self . wd_noise_type_in , noise_type_out = 'char_strain' )
elif isinstance ( self , wd_noise , list ) :
f_n_wd , h_n_wd = self . wd_noise
trans_dict = { }
for sc in noise_lists . keys ( ) :
f_n , h_n = noise_lists [ sc ]
if self . add_wd_noise . lower ( ) == 'both' :
trans_dict [ sc ] = [ f_n , h_n ]
f_n , h_n = combine_with_wd_noise ( f_n , h_n , f_n_wd , h_n_wd )
trans_dict [ sc + '_wd' ] = [ f_n , h_n ]
noise_lists = trans_dict
# interpolate
for sc in noise_lists :
f_n , h_n = noise_lists [ sc ]
self . noise_interpolants [ sc ] = ( interpolate . interp1d ( f_n , h_n , bounds_error = False , fill_value = 1e30 ) )
return
|
def windowed_tajima_d ( pos , ac , size = None , start = None , stop = None , step = None , windows = None , min_sites = 3 ) :
"""Calculate the value of Tajima ' s D in windows over a single
chromosome / contig .
Parameters
pos : array _ like , int , shape ( n _ items , )
Variant positions , using 1 - based coordinates , in ascending order .
ac : array _ like , int , shape ( n _ variants , n _ alleles )
Allele counts array .
size : int , optional
The window size ( number of bases ) .
start : int , optional
The position at which to start ( 1 - based ) .
stop : int , optional
The position at which to stop ( 1 - based ) .
step : int , optional
The distance between start positions of windows . If not given ,
defaults to the window size , i . e . , non - overlapping windows .
windows : array _ like , int , shape ( n _ windows , 2 ) , optional
Manually specify the windows to use as a sequence of ( window _ start ,
window _ stop ) positions , using 1 - based coordinates . Overrides the
size / start / stop / step parameters .
min _ sites : int , optional
Minimum number of segregating sites for which to calculate a value . If
there are fewer , np . nan is returned . Defaults to 3.
Returns
D : ndarray , float , shape ( n _ windows , )
Tajima ' s D .
windows : ndarray , int , shape ( n _ windows , 2)
The windows used , as an array of ( window _ start , window _ stop ) positions ,
using 1 - based coordinates .
counts : ndarray , int , shape ( n _ windows , )
Number of variants in each window .
Examples
> > > import allel
> > > g = allel . GenotypeArray ( [ [ [ 0 , 0 ] , [ 0 , 0 ] ] ,
. . . [ [ 0 , 0 ] , [ 0 , 1 ] ] ,
. . . [ [ 0 , 0 ] , [ 1 , 1 ] ] ,
. . . [ [ 0 , 1 ] , [ 1 , 1 ] ] ,
. . . [ [ 1 , 1 ] , [ 1 , 1 ] ] ,
. . . [ [ 0 , 0 ] , [ 1 , 2 ] ] ,
. . . [ [ 0 , 1 ] , [ 1 , 2 ] ] ,
. . . [ [ 0 , 1 ] , [ - 1 , - 1 ] ] ,
. . . [ [ - 1 , - 1 ] , [ - 1 , - 1 ] ] ] )
> > > ac = g . count _ alleles ( )
> > > pos = [ 2 , 4 , 7 , 14 , 15 , 20 , 22 , 25 , 27]
> > > D , windows , counts = allel . windowed _ tajima _ d ( pos , ac , size = 20 , step = 10 , start = 1 , stop = 31)
array ( [ 1.36521524 , 4.22566622 ] )
> > > windows
array ( [ [ 1 , 20 ] ,
[11 , 31 ] ] )
> > > counts
array ( [ 6 , 6 ] )"""
|
# check inputs
if not isinstance ( pos , SortedIndex ) :
pos = SortedIndex ( pos , copy = False )
if not hasattr ( ac , 'count_segregating' ) :
ac = AlleleCountsArray ( ac , copy = False )
# assume number of chromosomes sampled is constant for all variants
n = ac . sum ( axis = 1 ) . max ( )
# calculate constants
a1 = np . sum ( 1 / np . arange ( 1 , n ) )
a2 = np . sum ( 1 / ( np . arange ( 1 , n ) ** 2 ) )
b1 = ( n + 1 ) / ( 3 * ( n - 1 ) )
b2 = 2 * ( n ** 2 + n + 3 ) / ( 9 * n * ( n - 1 ) )
c1 = b1 - ( 1 / a1 )
c2 = b2 - ( ( n + 2 ) / ( a1 * n ) ) + ( a2 / ( a1 ** 2 ) )
e1 = c1 / a1
e2 = c2 / ( a1 ** 2 + a2 )
# locate segregating variants
is_seg = ac . is_segregating ( )
# calculate mean pairwise difference
mpd = mean_pairwise_difference ( ac , fill = 0 )
# define statistic to compute for each window
# noinspection PyPep8Naming
def statistic ( w_is_seg , w_mpd ) :
S = np . count_nonzero ( w_is_seg )
if S < min_sites :
return np . nan
pi = np . sum ( w_mpd )
d = pi - ( S / a1 )
d_stdev = np . sqrt ( ( e1 * S ) + ( e2 * S * ( S - 1 ) ) )
wD = d / d_stdev
return wD
D , windows , counts = windowed_statistic ( pos , values = ( is_seg , mpd ) , statistic = statistic , size = size , start = start , stop = stop , step = step , windows = windows , fill = np . nan )
return D , windows , counts
|
def goto_line ( self ) :
"""Shows the * go to line dialog * and go to the selected line ."""
|
helper = TextHelper ( self )
line , result = DlgGotoLine . get_line ( self , helper . current_line_nbr ( ) , helper . line_count ( ) )
if not result :
return
return helper . goto_line ( line , move = True )
|
def run ( users , hosts , func , ** kwargs ) :
"""Convenience function that creates an Exscript . Queue instance , adds
the given accounts , and calls Queue . run ( ) with the given
hosts and function as an argument .
If you also want to pass arguments to the given function , you may use
util . decorator . bind ( ) like this : :
def my _ callback ( job , host , conn , my _ arg , * * kwargs ) :
print ( my _ arg , kwargs . get ( ' foo ' ) )
run ( account ,
host ,
bind ( my _ callback , ' hello ' , foo = ' world ' ) ,
max _ threads = 10)
: type users : Account | list [ Account ]
: param users : The account ( s ) to use for logging in .
: type hosts : Host | list [ Host ]
: param hosts : A list of Host objects .
: type func : function
: param func : The callback function .
: type kwargs : dict
: param kwargs : Passed to the Exscript . Queue constructor ."""
|
attempts = kwargs . get ( "attempts" , 1 )
if "attempts" in kwargs :
del kwargs [ "attempts" ]
queue = Queue ( ** kwargs )
queue . add_account ( users )
queue . run ( hosts , func , attempts )
queue . destroy ( )
|
def get_bewit ( resource ) :
"""Returns a bewit identifier for the resource as a string .
: param resource :
Resource to generate a bewit for
: type resource : ` mohawk . base . Resource `"""
|
if resource . method != 'GET' :
raise ValueError ( 'bewits can only be generated for GET requests' )
if resource . nonce != '' :
raise ValueError ( 'bewits must use an empty nonce' )
mac = calculate_mac ( 'bewit' , resource , None , )
if isinstance ( mac , six . binary_type ) :
mac = mac . decode ( 'ascii' )
if resource . ext is None :
ext = ''
else :
validate_header_attr ( resource . ext , name = 'ext' )
ext = resource . ext
# b64encode works only with bytes in python3 , but all of our parameters are
# in unicode , so we need to encode them . The cleanest way to do this that
# works in both python 2 and 3 is to use string formatting to get a
# unicode string , and then explicitly encode it to bytes .
inner_bewit = u"{id}\\{exp}\\{mac}\\{ext}" . format ( id = resource . credentials [ 'id' ] , exp = resource . timestamp , mac = mac , ext = ext , )
inner_bewit_bytes = inner_bewit . encode ( 'ascii' )
bewit_bytes = urlsafe_b64encode ( inner_bewit_bytes )
# Now decode the resulting bytes back to a unicode string
return bewit_bytes . decode ( 'ascii' )
|
def is_subdomains_enabled ( blockstack_opts ) :
"""Can we do subdomain operations ?"""
|
if not is_atlas_enabled ( blockstack_opts ) :
log . debug ( "Subdomains are disabled" )
return False
if 'subdomaindb_path' not in blockstack_opts :
log . debug ( "Subdomains are disabled: no 'subdomaindb_path' path set" )
return False
return True
|
def register_postloop_hook ( self , func : Callable [ [ None ] , None ] ) -> None :
"""Register a function to be called at the end of the command loop ."""
|
self . _validate_prepostloop_callable ( func )
self . _postloop_hooks . append ( func )
|
def render_html_attributes ( ** kwargs ) :
"""Returns a string representation of attributes for html entities
: param kwargs : attributes and values
: return : a well - formed string representation of attributes"""
|
attr = list ( )
if kwargs :
attr = [ '{}="{}"' . format ( key , val ) for key , val in kwargs . items ( ) ]
return " " . join ( attr ) . replace ( "css_class" , "class" )
|
def reward_bonus ( self , assignment_id , amount , reason ) :
"""Print out bonus info for the assignment"""
|
logger . info ( 'Award ${} for assignment {}, with reason "{}"' . format ( amount , assignment_id , reason ) )
|
def get_command ( self ) :
"""Get a line of text that was received from the DE . The class ' s
cmd _ ready attribute will be true if lines are available ."""
|
cmd = None
count = len ( self . command_list )
if count > 0 :
cmd = self . command_list . pop ( 0 )
# # If that was the last line , turn off lines _ pending
if count == 1 :
self . cmd_ready = False
return cmd
|
async def execute ( self , run_id : str = None , code : str = None , mode : str = 'query' , opts : dict = None ) :
'''Executes a code snippet directly in the compute session or sends a set of
build / clean / execute commands to the compute session .
For more details about using this API , please refer : doc : ` the official API
documentation < user - api / intro > ` .
: param run _ id : A unique identifier for a particular run loop . In the
first call , it may be ` ` None ` ` so that the server auto - assigns one .
Subsequent calls must use the returned ` ` runId ` ` value to request
continuation or to send user inputs .
: param code : A code snippet as string . In the continuation requests , it
must be an empty string . When sending user inputs , this is where the
user input string is stored .
: param mode : A constant string which is one of ` ` " query " ` ` , ` ` " batch " ` ` ,
` ` " continue " ` ` , and ` ` " user - input " ` ` .
: param opts : A dict for specifying additional options . Mainly used in the
batch mode to specify build / clean / execution commands .
See : ref : ` the API object reference < batch - execution - query - object > `
for details .
: returns : : ref : ` An execution result object < execution - result - object > `'''
|
opts = opts if opts is not None else { }
params = { }
if self . owner_access_key :
params [ 'owner_access_key' ] = self . owner_access_key
if mode in { 'query' , 'continue' , 'input' } :
assert code is not None , 'The code argument must be a valid string even when empty.'
rqst = Request ( self . session , 'POST' , '/kernel/{}' . format ( self . kernel_id ) , params = params )
rqst . set_json ( { 'mode' : mode , 'code' : code , 'runId' : run_id , } )
elif mode == 'batch' :
rqst = Request ( self . session , 'POST' , '/kernel/{}' . format ( self . kernel_id ) , params = params )
rqst . set_json ( { 'mode' : mode , 'code' : code , 'runId' : run_id , 'options' : { 'clean' : opts . get ( 'clean' , None ) , 'build' : opts . get ( 'build' , None ) , 'buildLog' : bool ( opts . get ( 'buildLog' , False ) ) , 'exec' : opts . get ( 'exec' , None ) , } , } )
elif mode == 'complete' :
rqst = Request ( self . session , 'POST' , '/kernel/{}/complete' . format ( self . kernel_id ) , params = params )
rqst . set_json ( { 'code' : code , 'options' : { 'row' : int ( opts . get ( 'row' , 0 ) ) , 'col' : int ( opts . get ( 'col' , 0 ) ) , 'line' : opts . get ( 'line' , '' ) , 'post' : opts . get ( 'post' , '' ) , } , } )
else :
raise BackendClientError ( 'Invalid execution mode: {0}' . format ( mode ) )
async with rqst . fetch ( ) as resp :
return ( await resp . json ( ) ) [ 'result' ]
|
def createCitation ( self , multiCite = False ) :
"""Overwriting the general [ citation creator ] ( . / ExtendedRecord . html # metaknowledge . ExtendedRecord . createCitation ) to deal with scopus weirdness .
Creates a citation string , using the same format as other WOS citations , for the [ Record ] ( . / Record . html # metaknowledge . Record ) by reading the relevant special tags ( ` ' year ' ` , ` ' J9 ' ` , ` ' volume ' ` , ` ' beginningPage ' ` , ` ' DOI ' ` ) and using it to create a [ Citation ] ( . / Citation . html # metaknowledge . citation . Citation ) object .
# Parameters
_ multiCite _ : ` optional [ bool ] `
> Default ` False ` , if ` True ` a tuple of Citations is returned with each having a different one of the records authors as the author
# Returns
` Citation `
> A [ Citation ] ( . / Citation . html # metaknowledge . citation . Citation ) object containing a citation for the Record ."""
|
# Need to put the import here to avoid circular import issues
from . . citation import Citation
valsStr = ''
if multiCite :
auths = [ ]
for auth in self . get ( "authorsShort" , [ ] ) :
auths . append ( auth . replace ( ',' , '' ) )
else :
if self . get ( "authorsShort" , False ) :
valsStr += self [ 'authorsShort' ] [ 0 ] . replace ( ',' , '' ) + ', '
if self . get ( "title" , False ) :
valsStr += self . get ( 'title' ) . replace ( '(' , '' ) . replace ( ')' , '' ) + ' '
if self . get ( "year" , False ) :
valsStr += "({}) " . format ( self . get ( 'year' ) )
if self . get ( "journal" , False ) :
valsStr += self . get ( 'journal' ) + ', '
if self . get ( "volume" , False ) :
valsStr += str ( self . get ( 'volume' ) ) + ', '
if self . get ( "beginningPage" , False ) :
valsStr += 'PP. ' + str ( self . get ( 'beginningPage' ) )
if multiCite and len ( auths ) > 0 :
ret = ( tuple ( ( Citation ( a + valsStr , scopusMode = True ) for a in auths ) ) )
elif multiCite :
ret = Citation ( valsStr , scopusMode = True ) ,
else :
ret = Citation ( valsStr , scopusMode = True )
if multiCite :
rL = [ ]
for c in ret :
if c . bad :
c . year = self . get ( 'year' , 0 )
c . name = self . get ( 'title' , '' ) . upper ( )
c . journal = self . get ( "journal" , '' ) . upper ( )
rL . append ( c )
return tuple ( rL )
else :
if ret . bad :
ret . year = self . get ( 'year' , 0 )
ret . name = self . get ( 'title' , '' ) . upper ( )
ret . journal = self . get ( "journal" , '' ) . upper ( )
return ret
|
def numpy ( ) :
'''Lazily import the numpy module'''
|
if LazyImport . numpy_module is None :
try :
LazyImport . numpy_module = __import__ ( 'numpypy' )
except ImportError :
try :
LazyImport . numpy_module = __import__ ( 'numpy' )
except ImportError :
raise ImportError ( 'The numpy module is required' )
return LazyImport . numpy_module
|
def join_gates ( * gates : Gate ) -> Gate :
"""Direct product of two gates . Qubit count is the sum of each gate ' s
bit count ."""
|
vectors = [ gate . vec for gate in gates ]
vec = reduce ( outer_product , vectors )
return Gate ( vec . tensor , vec . qubits )
|
def remove_parameter ( self , twig = None , ** kwargs ) :
"""Remove a : class : ` Parameter ` from the ParameterSet
Note : removing Parameters from a ParameterSet will not remove
them from any parent ParameterSets
( including the : class : ` phoebe . frontend . bundle . Bundle ` )
: parameter str twig : the twig to search for the parameter
: parameter * * kwargs : meta - tags to search
: raises ValueError : if 0 or more than 1 results are found using the
provided search criteria ."""
|
param = self . get ( twig = twig , ** kwargs )
self . _remove_parameter ( param )
|
def compose ( self ) :
"""get CGR of reaction
reagents will be presented as unchanged molecules
: return : CGRContainer"""
|
rr = self . __reagents + self . __reactants
if rr :
if not all ( isinstance ( x , ( MoleculeContainer , CGRContainer ) ) for x in rr ) :
raise TypeError ( 'Queries not composable' )
r = reduce ( or_ , rr )
else :
r = MoleculeContainer ( )
if self . __products :
if not all ( isinstance ( x , ( MoleculeContainer , CGRContainer ) ) for x in self . __products ) :
raise TypeError ( 'Queries not composable' )
p = reduce ( or_ , self . __products )
else :
p = MoleculeContainer ( )
return r ^ p
|
def handle_get_passphrase ( self , conn , _ ) :
"""Allow simple GPG symmetric encryption ( using a passphrase ) ."""
|
p1 = self . client . device . ui . get_passphrase ( 'Symmetric encryption:' )
p2 = self . client . device . ui . get_passphrase ( 'Re-enter encryption:' )
if p1 == p2 :
result = b'D ' + util . assuan_serialize ( p1 . encode ( 'ascii' ) )
keyring . sendline ( conn , result , confidential = True )
else :
log . warning ( 'Passphrase does not match!' )
|
def create_and_setup_cors ( self , restapi , resource , uri , depth , config ) :
"""Set up the methods , integration responses and method responses for a given API Gateway resource ."""
|
if config is True :
config = { }
method_name = "OPTIONS"
method = troposphere . apigateway . Method ( method_name + str ( depth ) )
method . RestApiId = troposphere . Ref ( restapi )
if type ( resource ) is troposphere . apigateway . Resource :
method . ResourceId = troposphere . Ref ( resource )
else :
method . ResourceId = resource
method . HttpMethod = method_name . upper ( )
method . AuthorizationType = "NONE"
method_response = troposphere . apigateway . MethodResponse ( )
method_response . ResponseModels = { "application/json" : "Empty" }
response_headers = { "Access-Control-Allow-Headers" : "'%s'" % "," . join ( config . get ( "allowed_headers" , [ "Content-Type" , "X-Amz-Date" , "Authorization" , "X-Api-Key" , "X-Amz-Security-Token" ] ) ) , "Access-Control-Allow-Methods" : "'%s'" % "," . join ( config . get ( "allowed_methods" , [ "DELETE" , "GET" , "HEAD" , "OPTIONS" , "PATCH" , "POST" , "PUT" ] ) ) , "Access-Control-Allow-Origin" : "'%s'" % config . get ( "allowed_origin" , "*" ) }
method_response . ResponseParameters = { "method.response.header.%s" % key : True for key in response_headers }
method_response . StatusCode = "200"
method . MethodResponses = [ method_response ]
self . cf_template . add_resource ( method )
self . cf_api_resources . append ( method . title )
integration = troposphere . apigateway . Integration ( )
integration . Type = 'MOCK'
integration . PassthroughBehavior = 'NEVER'
integration . RequestTemplates = { "application/json" : "{\"statusCode\": 200}" }
integration_response = troposphere . apigateway . IntegrationResponse ( )
integration_response . ResponseParameters = { "method.response.header.%s" % key : value for key , value in response_headers . items ( ) }
integration_response . ResponseTemplates = { "application/json" : "" }
integration_response . StatusCode = "200"
integration . IntegrationResponses = [ integration_response ]
integration . Uri = uri
method . Integration = integration
|
def getPayloadStruct ( self , attributes , objType ) :
"""Function getPayloadStruct
Get the payload structure to do a creation or a modification
@ param attribute : The data
@ param objType : SubItem type ( e . g : hostgroup for hostgroup _ class )
@ return RETURN : the payload"""
|
payload = { self . payloadObj : attributes , objType + "_class" : { self . payloadObj : attributes } }
return payload
|
def plot_bargraph ( self , rank = "auto" , normalize = "auto" , top_n = "auto" , threshold = "auto" , title = None , xlabel = None , ylabel = None , tooltip = None , return_chart = False , haxis = None , legend = "auto" , label = None , ) :
"""Plot a bargraph of relative abundance of taxa for multiple samples .
Parameters
rank : { ' auto ' , ' kingdom ' , ' phylum ' , ' class ' , ' order ' , ' family ' , ' genus ' , ' species ' } , optional
Analysis will be restricted to abundances of taxa at the specified level .
normalize : ' auto ' or ` bool ` , optional
Convert read counts to relative abundances such that each sample sums to 1.0 . Setting
' auto ' will choose automatically based on the data .
return _ chart : ` bool ` , optional
When True , return an ` altair . Chart ` object instead of displaying the resulting plot in
the current notebook .
top _ n : ` int ` , optional
Display the top N most abundant taxa in the entire cohort of samples .
threshold : ` float `
Display only taxa that are more abundant that this threshold in one or more samples .
title : ` string ` , optional
Text label at the top of the plot .
xlabel : ` string ` , optional
Text label along the horizontal axis .
ylabel : ` string ` , optional
Text label along the vertical axis .
tooltip : ` string ` or ` list ` , optional
A string or list containing strings representing metadata fields . When a point in the
plot is hovered over , the value of the metadata associated with that sample will be
displayed in a modal .
haxis : ` string ` , optional
The metadata field ( or tuple containing multiple categorical fields ) used to group
samples together .
legend : ` string ` , optional
Title for color scale . Defaults to the field used to generate the plot , e . g .
readcount _ w _ children or abundance .
label : ` string ` or ` callable ` , optional
A metadata field ( or function ) used to label each analysis . If passing a function , a
dict containing the metadata for each analysis is passed as the first and only
positional argument . The callable function must return a string .
Examples
Plot a bargraph of the top 10 most abundant genera
> > > plot _ bargraph ( rank = ' genus ' , top _ n = 10)"""
|
if rank is None :
raise OneCodexException ( "Please specify a rank or 'auto' to choose automatically" )
if not ( threshold or top_n ) :
raise OneCodexException ( "Please specify at least one of: threshold, top_n" )
if top_n == "auto" and threshold == "auto" :
top_n = 10
threshold = None
elif top_n == "auto" and threshold != "auto" :
top_n = None
elif top_n != "auto" and threshold == "auto" :
threshold = None
if legend == "auto" :
legend = self . _field
df = self . to_df ( rank = rank , normalize = normalize , top_n = top_n , threshold = threshold , table_format = "long" )
if tooltip :
if not isinstance ( tooltip , list ) :
tooltip = [ tooltip ]
else :
tooltip = [ ]
if haxis :
tooltip . append ( haxis )
tooltip . insert ( 0 , "Label" )
# takes metadata columns and returns a dataframe with just those columns
# renames columns in the case where columns are taxids
magic_metadata , magic_fields = self . _metadata_fetch ( tooltip , label = label )
# add sort order to long - format df
if haxis :
sort_order = magic_metadata . sort_values ( magic_fields [ haxis ] ) . index . tolist ( )
for sort_num , sort_class_id in enumerate ( sort_order ) :
magic_metadata . loc [ sort_class_id , "sort_order" ] = sort_num
df [ "sort_order" ] = magic_metadata [ "sort_order" ] [ df [ "classification_id" ] ] . tolist ( )
sort_order = alt . EncodingSortField ( field = "sort_order" , op = "mean" )
else :
sort_order = None
# transfer metadata from wide - format df ( magic _ metadata ) to long - format df
for f in tooltip :
df [ magic_fields [ f ] ] = magic_metadata [ magic_fields [ f ] ] [ df [ "classification_id" ] ] . tolist ( )
# add taxa names
df [ "tax_name" ] = [ "{} ({})" . format ( self . taxonomy [ "name" ] [ t ] , t ) if t in self . taxonomy [ "name" ] else t for t in df [ "tax_id" ] ]
# TODO : how to sort bars in bargraph
# - abundance ( mean across all samples )
# - parent taxon ( this will require that we make a few assumptions
# about taxonomic ranks but as all taxonomic data will be coming from
# OCX this should be okay )
ylabel = self . _field if ylabel is None else ylabel
xlabel = "" if xlabel is None else xlabel
# should ultimately be Label , tax _ name , readcount _ w _ children , then custom fields
tooltip_for_altair = [ magic_fields [ f ] for f in tooltip ]
tooltip_for_altair . insert ( 1 , "tax_name" )
tooltip_for_altair . insert ( 2 , "{}:Q" . format ( self . _field ) )
# generate dataframes to plot , one per facet
dfs_to_plot = [ ]
if haxis : # if using facets , first facet is just the vertical axis
blank_df = df . iloc [ : 1 ] . copy ( )
blank_df [ self . _field ] = 0
dfs_to_plot . append ( blank_df )
for md_val in magic_metadata [ magic_fields [ haxis ] ] . unique ( ) :
plot_df = df . where ( df [ magic_fields [ haxis ] ] == md_val ) . dropna ( )
# preserve booleans
if magic_metadata [ magic_fields [ haxis ] ] . dtype == "bool" :
plot_df [ magic_fields [ haxis ] ] = plot_df [ magic_fields [ haxis ] ] . astype ( bool )
dfs_to_plot . append ( plot_df )
else :
dfs_to_plot . append ( df )
charts = [ ]
for plot_num , plot_df in enumerate ( dfs_to_plot ) :
chart = ( alt . Chart ( plot_df ) . mark_bar ( ) . encode ( x = alt . X ( "Label" , axis = alt . Axis ( title = xlabel ) , sort = sort_order ) , y = alt . Y ( self . _field , axis = alt . Axis ( title = ylabel ) , scale = alt . Scale ( domain = [ 0 , 1 ] , zero = True , nice = False ) , ) , color = alt . Color ( "tax_name" , legend = alt . Legend ( title = legend ) ) , tooltip = tooltip_for_altair , href = "url:N" , ) )
if haxis :
if plot_num == 0 : # first plot ( blank _ df ) has vert axis but no horiz axis
chart . encoding . x . axis = None
elif plot_num > 0 : # strip vertical axis from subsequent facets
chart . encoding . y . axis = None
# facet ' s title set to value of metadata in this group
chart . title = str ( plot_df [ magic_fields [ haxis ] ] . tolist ( ) [ 0 ] )
charts . append ( chart )
# add all the facets together
final_chart = charts [ 0 ]
if len ( charts ) > 1 :
for chart in charts [ 1 : ] :
final_chart |= chart
# add title to chart
# ( cannot specify None or False for no title )
final_chart = final_chart . properties ( title = title ) if title else final_chart
return final_chart if return_chart else final_chart . display ( )
|
def inferheader ( lines , comments = None , metadata = None , verbosity = DEFAULT_VERBOSITY ) :
"""Infers header from a CSV or other tab - delimited file .
This is essentially small extension of the csv . Sniffer . has _ header algorithm .
provided in the Python csv module . First , it checks to see whether a
metametadata dictionary is present , specifiying the lines numbers of
metadata lines in the header , and if so , sets the header lines to include
at least those lines . Then iookms to see if a comments character is
present , and if so , includes those lines as well . If either of the above
returns a nono - zero number of headerlines , the function returns that
number ; otherwise , it uses the csv . Sniffer module , checking each line in
succession , and stopping at the first line where the sniffer module finds no
evidence of a header , and returning that line numner .
* * Parameters * *
* * lines * * : line of strings
The list of lines representing lines in the file
* * comments * * : single - character string , optional
Comments character specification .
* * metadata * * : metadata dictionary , optional
Used to determine a comments character and metametadata dicationary ,
if present .
* * Returns * *
Integer , representing the number of ( inferred ) header lines at the top
of the file ."""
|
if ( ( comments is None ) and metadata and ( 'comments' in metadata . keys ( ) ) ) :
comments = metadata [ 'comments' ]
if ( comments is None ) :
comments = '#'
if ( 'metametadata' in metadata . keys ( ) ) :
mmd = metadata [ 'metametadata' ]
cc = 1 + max ( [ v if isinstance ( v , int ) else max ( v ) for v in mmd . values ( ) ] )
else :
cc = 0
if ( comments != '' ) :
if ( cc < len ( lines ) ) :
for l in xrange ( cc , len ( lines ) ) :
if not lines [ l ] . startswith ( comments ) :
break
else :
l = cc
if ( l > 0 ) :
return l
else :
for j in xrange ( min ( 1000 , len ( lines ) ) ) :
hasheader = 'unset'
for k in [ 100 , 200 , 400 , 800 , 1600 ] :
F = '\n' . join ( lines [ j : ( j + k ) ] )
try :
hasheader = csv . Sniffer ( ) . has_header ( F )
except :
pass
else :
break
if not hasheader :
return j
|
def send ( self , data ) :
"""Send data over scgi to URL and get response ."""
|
start = time . time ( )
try :
scgi_resp = '' . join ( self . transport . send ( _encode_payload ( data ) ) )
finally :
self . latency = time . time ( ) - start
resp , self . resp_headers = _parse_response ( scgi_resp )
return resp
|
def do_format ( value , * args , ** kwargs ) :
"""Apply python string formatting on an object :
. . sourcecode : : jinja
{ { " % s - % s " | format ( " Hello ? " , " Foo ! " ) } }
- > Hello ? - Foo !"""
|
if args and kwargs :
raise FilterArgumentError ( 'can\'t handle positional and keyword ' 'arguments at the same time' )
return soft_unicode ( value ) % ( kwargs or args )
|
def get_activities_for_objectives ( self , objective_ids = None ) :
"""Gets the activities for the given objectives .
In plenary mode , the returned list contains all of the
activities specified in the objective Id list , in the order of
the list , including duplicates , or an error results if a course
offering Id in the supplied list is not found or inaccessible .
Otherwise , inaccessible Activities may be omitted from the list
and may present the elements in any order including returning a
unique set .
arg : objectiveIds ( osid . id . IdList ) : list of objective Ids
return : ( osid . learning . ActivityList ) - list of activities
raise : NotFound - an objectiveId not found
raise : NullArgument - objectiveIdList is null
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
compliance : mandatory - This method is must be implemented ."""
|
if objective_ids is None :
raise NullArgument ( )
# Should also check if objective _ id exists ?
activities = [ ]
for i in objective_ids :
acts = None
url_path = construct_url ( 'activities' , bank_id = self . _catalog_idstr , obj_id = i )
try :
acts = json . loads ( self . _get_request ( url_path ) )
except ( NotFound , OperationFailed ) :
if self . _activity_view == PLENARY :
raise
else :
pass
if acts :
activities += acts
return objects . ActivityList ( activities )
|
def hardware_custom_profile_kap_custom_profile_name ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
hardware = ET . SubElement ( config , "hardware" , xmlns = "urn:brocade.com:mgmt:brocade-hardware" )
custom_profile = ET . SubElement ( hardware , "custom-profile" )
kap_custom_profile = ET . SubElement ( custom_profile , "kap-custom-profile" )
name = ET . SubElement ( kap_custom_profile , "name" )
name . text = kwargs . pop ( 'name' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def effect_emd ( d1 , d2 ) :
"""Compute the EMD between two effect repertoires .
Because the nodes are independent , the EMD between effect repertoires is
equal to the sum of the EMDs between the marginal distributions of each
node , and the EMD between marginal distribution for a node is the absolute
difference in the probabilities that the node is OFF .
Args :
d1 ( np . ndarray ) : The first repertoire .
d2 ( np . ndarray ) : The second repertoire .
Returns :
float : The EMD between ` ` d1 ` ` and ` ` d2 ` ` ."""
|
return sum ( abs ( marginal_zero ( d1 , i ) - marginal_zero ( d2 , i ) ) for i in range ( d1 . ndim ) )
|
def config_managed_object ( p_dn , p_class_id , class_id , mo_config , mo_dn , handle = None , delete = True ) :
"""Configure the specified MO in UCS Manager .
: param uuid : MO config
: param p _ dn : parent MO DN
: param p _ class _ id : parent MO class ID
: param class _ id : MO class ID
: param MO configuration : MO config
: param mo _ dn : MO DN value
: param handle : optional UCS Manager handle object
: returns : Managed Object
: raises : UcsOperationError in case of failure ."""
|
if handle is None :
handle = self . handle
try :
result = handle . AddManagedObject ( None , classId = class_id , params = mo_config , modifyPresent = True , dumpXml = YesOrNo . FALSE )
return result
except UcsException as ex :
print ( _ ( "Cisco client exception: %(msg)s" ) , { 'msg' : ex } )
raise exception . UcsOperationError ( 'config_managed_object' , error = ex )
|
def make_initial_state ( project , stack_length ) :
""": return : an initial state with a symbolic stack and good options for rop"""
|
initial_state = project . factory . blank_state ( add_options = { options . AVOID_MULTIVALUED_READS , options . AVOID_MULTIVALUED_WRITES , options . NO_SYMBOLIC_JUMP_RESOLUTION , options . CGC_NO_SYMBOLIC_RECEIVE_LENGTH , options . NO_SYMBOLIC_SYSCALL_RESOLUTION , options . TRACK_ACTION_HISTORY } , remove_options = options . resilience | options . simplification )
initial_state . options . discard ( options . CGC_ZERO_FILL_UNCONSTRAINED_MEMORY )
initial_state . options . update ( { options . TRACK_REGISTER_ACTIONS , options . TRACK_MEMORY_ACTIONS , options . TRACK_JMP_ACTIONS , options . TRACK_CONSTRAINT_ACTIONS } )
symbolic_stack = initial_state . solver . BVS ( "symbolic_stack" , project . arch . bits * stack_length )
initial_state . memory . store ( initial_state . regs . sp , symbolic_stack )
if initial_state . arch . bp_offset != initial_state . arch . sp_offset :
initial_state . regs . bp = initial_state . regs . sp + 20 * initial_state . arch . bytes
initial_state . solver . _solver . timeout = 500
# only solve for half a second at most
return initial_state
|
def p_else_single ( p ) :
'''else _ single : empty
| ELSE statement'''
|
if len ( p ) == 3 :
p [ 0 ] = ast . Else ( p [ 2 ] , lineno = p . lineno ( 1 ) )
|
def register_callback ( self , callback , remove = False ) :
"""( Un ) Register a callback
Parameters
callback : method handle
Method to be registered or unregistered .
remove = False : bool
Whether to unregister the callback ."""
|
# ( Un ) Register the callback .
if remove and callback in self . callbacks :
self . callbacks . remove ( callback )
elif not remove and callback not in self . callbacks :
self . callbacks . append ( callback )
|
def multi_bulk ( self , args ) :
'''Multi bulk encoding for list / tuple ` ` args ` `'''
|
return null_array if args is None else b'' . join ( self . _pack ( args ) )
|
def update ( self , items ) :
"""Updates the dependencies with the given items . Note that this does not reset all previously - evaluated and cached
nodes .
: param items : Iterable or dictionary in the format ` ( dependent _ item , dependencies ) ` .
: type items : collections . Iterable"""
|
for item , parents in _iterate_dependencies ( items ) :
dep = self . _deps [ item ]
merge_list ( dep . parent , parents )
|
def _parse_container ( tokens , index , for_or_if = None ) :
"""Parse a high - level container , such as a list , tuple , etc ."""
|
# Store the opening bracket .
items = [ Atom ( Token ( * tokens [ index ] ) ) ]
index += 1
num_tokens = len ( tokens )
while index < num_tokens :
tok = Token ( * tokens [ index ] )
if tok . token_string in ',)]}' : # First check if we ' re at the end of a list comprehension or
# if - expression . Don ' t add the ending token as part of the list
# comprehension or if - expression , because they aren ' t part of those
# constructs .
if for_or_if == 'for' :
return ( ListComprehension ( items ) , index - 1 )
elif for_or_if == 'if' :
return ( IfExpression ( items ) , index - 1 )
# We ' ve reached the end of a container .
items . append ( Atom ( tok ) )
# If not , then we are at the end of a container .
if tok . token_string == ')' : # The end of a tuple .
return ( Tuple ( items ) , index )
elif tok . token_string == ']' : # The end of a list .
return ( List ( items ) , index )
elif tok . token_string == '}' : # The end of a dictionary or set .
return ( DictOrSet ( items ) , index )
elif tok . token_string in '([{' : # A sub - container is being defined .
( container , index ) = _parse_container ( tokens , index )
items . append ( container )
elif tok . token_string == 'for' :
( container , index ) = _parse_container ( tokens , index , 'for' )
items . append ( container )
elif tok . token_string == 'if' :
( container , index ) = _parse_container ( tokens , index , 'if' )
items . append ( container )
else :
items . append ( Atom ( tok ) )
index += 1
return ( None , None )
|
def _set_fcoe_fip_keep_alive ( self , v , load = False ) :
"""Setter method for fcoe _ fip _ keep _ alive , mapped from YANG variable / fcoe / fcoe _ fabric _ map / fcoe _ fip _ keep _ alive ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ fcoe _ fip _ keep _ alive is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ fcoe _ fip _ keep _ alive ( ) directly .
YANG Description : This provides the grouping of FCoE keep alive
configuration elements ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = fcoe_fip_keep_alive . fcoe_fip_keep_alive , is_container = 'container' , presence = False , yang_name = "fcoe-fip-keep-alive" , rest_name = "keep-alive" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Enable/Disable the keep-alive timeout' , u'alt-name' : u'keep-alive' , u'cli-incomplete-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-fcoe' , defining_module = 'brocade-fcoe' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """fcoe_fip_keep_alive must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=fcoe_fip_keep_alive.fcoe_fip_keep_alive, is_container='container', presence=False, yang_name="fcoe-fip-keep-alive", rest_name="keep-alive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable/Disable the keep-alive timeout', u'alt-name': u'keep-alive', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)""" , } )
self . __fcoe_fip_keep_alive = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def locate_key ( self , k1 , k2 = None ) :
"""Get index location for the requested key .
Parameters
k1 : object
Level 1 key .
k2 : object , optional
Level 2 key .
Returns
loc : int or slice
Location of requested key ( will be slice if there are duplicate
entries ) .
Examples
> > > import allel
> > > chrom = [ ' chr1 ' , ' chr1 ' , ' chr2 ' , ' chr2 ' , ' chr2 ' , ' chr3 ' ]
> > > pos = [ 1 , 4 , 2 , 5 , 5 , 3]
> > > idx = allel . SortedMultiIndex ( chrom , pos )
> > > idx . locate _ key ( ' chr1 ' )
slice ( 0 , 2 , None )
> > > idx . locate _ key ( ' chr1 ' , 4)
> > > idx . locate _ key ( ' chr2 ' , 5)
slice ( 3 , 5 , None )
> > > try :
. . . idx . locate _ key ( ' chr3 ' , 4)
. . . except KeyError as e :
. . . print ( e )
( ' chr3 ' , 4)"""
|
loc1 = self . l1 . locate_key ( k1 )
if k2 is None :
return loc1
if isinstance ( loc1 , slice ) :
offset = loc1 . start
try :
loc2 = SortedIndex ( self . l2 [ loc1 ] , copy = False ) . locate_key ( k2 )
except KeyError : # reraise with more information
raise KeyError ( k1 , k2 )
else :
if isinstance ( loc2 , slice ) :
loc = slice ( offset + loc2 . start , offset + loc2 . stop )
else : # assume singleton
loc = offset + loc2
else : # singleton match in l1
v = self . l2 [ loc1 ]
if v == k2 :
loc = loc1
else :
raise KeyError ( k1 , k2 )
return loc
|
def scale ( text = "" , value = 0 , min = 0 , max = 100 , step = 1 , draw_value = True , title = "" , width = DEFAULT_WIDTH , height = DEFAULT_HEIGHT , timeout = None ) :
"""Select a number with a range widget
: param text : text inside window
: type text : str
: param value : current value
: type value : int
: param min : minimum value
: type min : int
: param max : maximum value
: type max : int
: param step : incrementation value
: type step : int
: param draw _ value : hide / show cursor value
: type draw _ value : bool
: param title : title of the window
: type title : str
: param width : window width
: type width : int
: param height : window height
: type height : int
: param timeout : close the window after n seconds
: type timeout : int
: return : The value selected by the user
: rtype : float"""
|
dialog = ZScale ( text , value , min , max , step , draw_value , title , width , height , timeout )
dialog . run ( )
return dialog . response
|
def fit ( self , X , y , sample_weight = None , eval_set = None , eval_metric = None , early_stopping_rounds = None , verbose = True ) : # pylint : disable = attribute - defined - outside - init , arguments - differ
"""Fit gradient boosting classifier
Parameters
X : array _ like
Feature matrix
y : array _ like
Labels
sample _ weight : array _ like
Weight for each instance
eval _ set : list , optional
A list of ( X , y ) pairs to use as a validation set for
early - stopping
eval _ metric : str , callable , optional
If a str , should be a built - in evaluation metric to use . See
doc / parameter . md . If callable , a custom evaluation metric . The call
signature is func ( y _ predicted , y _ true ) where y _ true will be a
DMatrix object such that you may need to call the get _ label
method . It must return a str , value pair where the str is a name
for the evaluation and value is the value of the evaluation
function . This objective is always minimized .
early _ stopping _ rounds : int , optional
Activates early stopping . Validation error needs to decrease at
least every < early _ stopping _ rounds > round ( s ) to continue training .
Requires at least one item in evals . If there ' s more than one ,
will use the last . Returns the model from the last iteration
( not the best one ) . If early stopping occurs , the model will
have two additional fields : bst . best _ score and bst . best _ iteration .
verbose : bool
If ` verbose ` and an evaluation set is used , writes the evaluation
metric measured on the validation set to stderr ."""
|
evals_result = { }
self . classes_ = list ( np . unique ( y ) )
self . n_classes_ = len ( self . classes_ )
if self . n_classes_ > 2 : # Switch to using a multiclass objective in the underlying XGB instance
self . objective = "multi:softprob"
xgb_options = self . get_xgb_params ( )
xgb_options [ 'num_class' ] = self . n_classes_
else :
xgb_options = self . get_xgb_params ( )
feval = eval_metric if callable ( eval_metric ) else None
if eval_metric is not None :
if callable ( eval_metric ) :
eval_metric = None
else :
xgb_options . update ( { "eval_metric" : eval_metric } )
if eval_set is not None : # TODO : use sample _ weight if given ?
evals = list ( DMatrix ( x [ 0 ] , label = x [ 1 ] ) for x in eval_set )
nevals = len ( evals )
eval_names = [ "validation_{}" . format ( i ) for i in range ( nevals ) ]
evals = list ( zip ( evals , eval_names ) )
else :
evals = ( )
self . _le = LabelEncoder ( ) . fit ( y )
training_labels = self . _le . transform ( y )
if sample_weight is not None :
train_dmatrix = DMatrix ( X , label = training_labels , weight = sample_weight , missing = self . missing )
else :
train_dmatrix = DMatrix ( X , label = training_labels , missing = self . missing )
self . _Booster = train ( xgb_options , train_dmatrix , self . n_estimators , evals = evals , early_stopping_rounds = early_stopping_rounds , evals_result = evals_result , feval = feval , verbose_eval = verbose )
if evals_result :
for val in evals_result . items ( ) :
evals_result_key = list ( val [ 1 ] . keys ( ) ) [ 0 ]
evals_result [ val [ 0 ] ] [ evals_result_key ] = val [ 1 ] [ evals_result_key ]
self . evals_result_ = evals_result
if early_stopping_rounds is not None :
self . best_score = self . _Booster . best_score
self . best_iteration = self . _Booster . best_iteration
return self
|
def hacking_assert_is_none ( logical_line , noqa ) :
"""Use assertIs ( Not ) None to check for None in assertions .
Okay : self . assertEqual ( ' foo ' , ' bar ' )
Okay : self . assertNotEqual ( ' foo ' , { } . get ( ' bar ' , None ) )
Okay : self . assertIs ( ' foo ' , ' bar ' )
Okay : self . assertIsNot ( ' foo ' , ' bar ' , None )
Okay : foo ( self . assertIsNot ( ' foo ' , ' bar ' ) )
H203 : self . assertEqual ( None , ' foo ' )
H203 : self . assertNotEqual ( ' foo ' , None )
H203 : self . assertIs ( None , ' foo ' , ' bar ' )
H203 : self . assertIsNot ( ' foo ' , None , ' bar ' )
H203 : foo ( self . assertIsNot ( ' foo ' , None , ' bar ' ) )
Okay : self . assertEqual ( None , ' foo ' ) # noqa
Okay : self . assertIs ( None , ' foo ' ) # noqa
Okay : self . assertIsNone ( ' foo ' )"""
|
if noqa :
return
for func_name in ( 'assertEqual' , 'assertIs' , 'assertNotEqual' , 'assertIsNot' ) :
try :
start = logical_line . index ( '.%s(' % func_name ) + 1
except ValueError :
continue
checker = NoneArgChecker ( func_name )
checker . visit ( ast . parse ( logical_line ) )
if checker . none_found :
yield start , "H203: Use assertIs(Not)None to check for None"
|
def check_member_pool ( self , member , pool_name ) :
'''Check a pool member exists in a specific pool'''
|
members = self . bigIP . LocalLB . Pool . get_member ( pool_names = [ pool_name ] ) [ 0 ]
for mem in members :
if member == mem . address :
return True
return False
|
def fetch ( cls , id , service = None , endpoint = None , * args , ** kwargs ) :
"""Customize fetch because it lives on a special endpoint ."""
|
if service is None and endpoint is None :
raise InvalidArguments ( service , endpoint )
if endpoint is None :
sid = service [ 'id' ] if isinstance ( service , Entity ) else service
endpoint = 'services/{0}/integrations' . format ( sid )
return getattr ( Entity , 'fetch' ) . __func__ ( cls , id , endpoint = endpoint , * args , ** kwargs )
|
def add_providers ( self , * providers : Type [ BaseProvider ] ) -> None :
"""Add a lot of custom providers to Generic ( ) object .
: param providers : Custom providers .
: return : None"""
|
for provider in providers :
self . add_provider ( provider )
|
def floating_ip_pool_list ( self ) :
'''List all floating IP pools
. . versionadded : : 2016.3.0'''
|
nt_ks = self . compute_conn
pools = nt_ks . floating_ip_pools . list ( )
response = { }
for pool in pools :
response [ pool . name ] = { 'name' : pool . name , }
return response
|
def note_create_update ( self , post_id = None , coor_x = None , coor_y = None , width = None , height = None , is_active = None , body = None , note_id = None ) :
"""Function to create or update note ( Requires login ) ( UNTESTED ) .
Parameters :
post _ id ( int ) : The post id number this note belongs to .
coor _ x ( int ) : The X coordinate of the note .
coor _ y ( int ) : The Y coordinate of the note .
width ( int ) : The width of the note .
height ( int ) : The height of the note .
is _ active ( int ) : Whether or not the note is visible . Set to 1 for
active , 0 for inactive .
body ( str ) : The note message .
note _ id ( int ) : If you are updating a note , this is the note id
number to update ."""
|
params = { 'id' : note_id , 'note[post]' : post_id , 'note[x]' : coor_x , 'note[y]' : coor_y , 'note[width]' : width , 'note[height]' : height , 'note[body]' : body , 'note[is_active]' : is_active }
return self . _get ( 'note/update' , params , method = 'POST' )
|
def _init_map ( self ) :
"""stub"""
|
super ( EdXDragAndDropQuestionFormRecord , self ) . _init_map ( )
QuestionTextFormRecord . _init_map ( self )
QuestionFilesFormRecord . _init_map ( self )
self . my_osid_object_form . _my_map [ 'text' ] [ 'text' ] = ''
|
def on_connection_open_error ( self , connection , error ) :
"""Invoked if the connection to RabbitMQ can not be made .
: type connection : pika . TornadoConnection
: param Exception error : The exception indicating failure"""
|
LOGGER . critical ( 'Could not connect to RabbitMQ (%s): %r' , connection , error )
self . state = self . STATE_CLOSED
self . _reconnect ( )
|
def append ( self , item , parent = None , select = False ) :
"""Add an item to the end of the list .
: param item : The item to be added
: param parent : The parent item to add this as a child of , or None for
a top - level node
: param select : Whether the item should be selected after adding"""
|
if item in self :
raise ValueError ( "item %s already in list" % item )
if parent is not None :
giter = self . _iter_for ( parent )
else :
giter = None
modeliter = self . model . append ( giter , ( item , ) )
self . _id_to_iter [ id ( item ) ] = modeliter
if select :
self . selected_item = item
|
def parse ( self , items ) :
"""Parse ` 主题 ` , ` 时间 ` , ` 场馆 ` , 票价 ` in every item ."""
|
rows = [ ]
for i , item in enumerate ( items ) :
theme = colored . green ( item . find ( class_ = 'ico' ) . a . text . strip ( ) )
text = item . find ( class_ = 'mt10' ) . text . strip ( )
mix = re . sub ( '\s+' , ' ' , text ) . split ( ':' )
time = mix [ 1 ] [ : - 3 ]
place = mix [ 2 ] [ : - 7 ]
# display time below theme
theme_time = '\n' . join ( [ theme , colored . red ( time ) ] )
price = item . find ( class_ = 'price-sort' ) . text . strip ( )
rows . append ( [ theme_time , price , place ] )
return rows
|
def start ( self ) :
"""Starts the background thread .
Additionally , this registers a handler for process exit to attempt
to send any pending log entries before shutdown ."""
|
with self . _operational_lock :
if self . is_alive :
return
self . _thread = threading . Thread ( target = self . _thread_main , name = _WORKER_THREAD_NAME )
self . _thread . daemon = True
self . _thread . start ( )
atexit . register ( self . _main_thread_terminated )
|
def stdio ( filters = None , search_dirs = None , data_dir = True , sys_path = True , panfl_ = False , input_stream = None , output_stream = None ) :
"""Reads JSON from stdin and second CLI argument :
` ` sys . argv [ 1 ] ` ` . Dumps JSON doc to the stdout .
: param filters : Union [ List [ str ] , None ]
if None then read from metadata
: param search _ dirs : Union [ List [ str ] , None ]
if None then read from metadata
: param data _ dir : bool
: param sys _ path : bool
: param panfl _ : bool
: param input _ stream : io . StringIO or None
for debug purpose
: param output _ stream : io . StringIO or None
for debug purpose
: return : None"""
|
doc = load ( input_stream )
# meta = doc . metadata # Local variable ' meta ' value is not used
verbose = doc . get_metadata ( 'panflute-verbose' , False )
if search_dirs is None : # metadata ' panflute - path ' can be a list , a string , or missing
# ` search _ dirs ` should be a list of str
search_dirs = doc . get_metadata ( 'panflute-path' , [ ] )
if type ( search_dirs ) != list :
search_dirs = [ search_dirs ]
if '--data-dir' in search_dirs :
data_dir = True
if '--no-sys-path' in search_dirs :
sys_path = False
search_dirs = [ dir_ for dir_ in search_dirs if dir_ not in ( '--data-dir' , '--no-sys-path' ) ]
if verbose :
debug ( 'panflute: data_dir={} sys_path={}' . format ( data_dir , sys_path ) )
search_dirs = [ p . normpath ( p . expanduser ( p . expandvars ( dir_ ) ) ) for dir_ in search_dirs ]
if not panfl_ : # default panflute behaviour :
search_dirs . append ( '.' )
if data_dir :
search_dirs . append ( get_filter_dir ( ) )
if sys_path :
search_dirs += sys . path
else : # panfl / pandoctools behaviour :
if data_dir :
search_dirs . append ( get_filter_dir ( ) )
if sys_path :
search_dirs += reduced_sys_path
# Display message ( tests that everything is working ok )
msg = doc . get_metadata ( 'panflute-echo' , False )
if msg :
debug ( msg )
if filters is None : # metadata ' panflute - filters ' can be a list , a string , or missing
# ` filters ` should be a list of str
filters = doc . get_metadata ( 'panflute-filters' , [ ] )
if type ( filters ) != list :
filters = [ filters ]
if filters :
if verbose :
msg = "panflute: will run the following filters:"
debug ( msg , ' ' . join ( filters ) )
doc = autorun_filters ( filters , doc , search_dirs , verbose )
elif verbose :
debug ( "panflute: no filters were provided" )
dump ( doc , output_stream )
|
async def put ( self , path = '' ) :
"""Publish a notebook on a given path .
The payload directly matches the contents API for PUT ."""
|
self . log . info ( "Attempt publishing to %s" , path )
if path == '' or path == '/' :
raise web . HTTPError ( 400 , "Must provide a path for publishing" )
model = self . get_json_body ( )
if model :
await self . _publish ( model , path . lstrip ( '/' ) )
else :
raise web . HTTPError ( 400 , "Cannot publish an empty model" )
|
def credentials_delegated ( self ) :
"""Checks if credentials are delegated ( server mode ) .
: return : ` ` True ` ` if credentials are delegated , otherwise ` ` False ` `"""
|
return self . _gss_flags & sspicon . ISC_REQ_DELEGATE and ( self . _gss_srv_ctxt_status or self . _gss_flags )
|
def _set_cluster ( self , v , load = False ) :
"""Setter method for cluster , mapped from YANG variable / mgmt _ cluster / cluster ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ cluster is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ cluster ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = cluster . cluster , is_container = 'container' , presence = False , yang_name = "cluster" , rest_name = "cluster" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Cluster Management virtual IP Configuration' , u'cli-incomplete-no' : None } } , namespace = 'http://brocade.com/ns/brocade-cluster' , defining_module = 'brocade-cluster' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """cluster must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=cluster.cluster, is_container='container', presence=False, yang_name="cluster", rest_name="cluster", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cluster Management virtual IP Configuration', u'cli-incomplete-no': None}}, namespace='http://brocade.com/ns/brocade-cluster', defining_module='brocade-cluster', yang_type='container', is_config=True)""" , } )
self . __cluster = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def list_str_summarized ( list_ , list_name , maxlen = 5 ) :
"""prints the list members when the list is small and the length when it is
large"""
|
if len ( list_ ) > maxlen :
return 'len(%s)=%d' % ( list_name , len ( list_ ) )
else :
return '%s=%r' % ( list_name , list_ )
|
def help_center_user_segment_create ( self , data , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / help _ center / user _ segments # create - user - segment"
|
api_path = "/api/v2/help_center/user_segments.json"
return self . call ( api_path , method = "POST" , data = data , ** kwargs )
|
def crypto_box_keypair ( ) :
"""Returns a randomly generated public and secret key .
: rtype : ( bytes ( public _ key ) , bytes ( secret _ key ) )"""
|
pk = ffi . new ( "unsigned char[]" , crypto_box_PUBLICKEYBYTES )
sk = ffi . new ( "unsigned char[]" , crypto_box_SECRETKEYBYTES )
rc = lib . crypto_box_keypair ( pk , sk )
ensure ( rc == 0 , 'Unexpected library error' , raising = exc . RuntimeError )
return ( ffi . buffer ( pk , crypto_box_PUBLICKEYBYTES ) [ : ] , ffi . buffer ( sk , crypto_box_SECRETKEYBYTES ) [ : ] , )
|
def _is_valid_netmask ( self , netmask ) :
"""Verify that the netmask is valid .
Args :
netmask : A string , either a prefix or dotted decimal
netmask .
Returns :
A boolean , True if the prefix represents a valid IPv4
netmask ."""
|
mask = netmask . split ( '.' )
if len ( mask ) == 4 :
try :
for x in mask :
if int ( x ) not in self . _valid_mask_octets :
return False
except ValueError : # Found something that isn ' t an integer or isn ' t valid
return False
for idx , y in enumerate ( mask ) :
if idx > 0 and y > mask [ idx - 1 ] :
return False
return True
try :
netmask = int ( netmask )
except ValueError :
return False
return 0 <= netmask <= self . _max_prefixlen
|
def _true_anomaly ( M , ecc , itermax = 8 ) :
r"""Calculation of true and eccentric anomaly in Kepler orbits .
` ` M ` ` is the phase of the star , ` ` ecc ` ` is the eccentricity
See p . 39 of Hilditch , ' An Introduction To Close Binary Stars ' :
Kepler ' s equation :
. . math : :
E - e \ sin E = \ frac { 2 \ pi } { P } ( t - T )
with : math : ` E ` the eccentric anomaly . The right hand size denotes the
observed phase : math : ` M ` . This function returns the true anomaly , which is
the position angle of the star in the orbit ( : math : ` \ theta ` in Hilditch '
book ) . The relationship between the eccentric and true anomaly is as
follows :
. . math : :
\ tan ( \ theta / 2 ) = \ sqrt { \ frac { 1 + e } { 1 - e } } \ tan ( E / 2)
@ parameter M : phase
@ type M : float
@ parameter ecc : eccentricity
@ type ecc : float
@ keyword itermax : maximum number of iterations
@ type itermax : integer
@ return : eccentric anomaly ( E ) , true anomaly ( theta )
@ rtype : float , float"""
|
# Initial value
Fn = M + ecc * sin ( M ) + ecc ** 2 / 2. * sin ( 2 * M )
# Iterative solving of the transcendent Kepler ' s equation
for i in range ( itermax ) :
F = Fn
Mn = F - ecc * sin ( F )
Fn = F + ( M - Mn ) / ( 1. - ecc * cos ( F ) )
keep = F != 0
# take care of zerodivision
if hasattr ( F , '__iter__' ) :
if np . all ( abs ( ( Fn - F ) [ keep ] / F [ keep ] ) < 0.00001 ) :
break
elif ( abs ( ( Fn - F ) / F ) < 0.00001 ) :
break
# relationship between true anomaly ( theta ) and eccentric anomaly ( Fn )
true_an = 2. * arctan ( sqrt ( ( 1. + ecc ) / ( 1. - ecc ) ) * tan ( Fn / 2. ) )
return Fn , true_an
|
def handle_emphasis ( self , start , tag_style , parent_style ) :
"""handles various text emphases"""
|
tag_emphasis = google_text_emphasis ( tag_style )
parent_emphasis = google_text_emphasis ( parent_style )
# handle Google ' s text emphasis
strikethrough = 'line-through' in tag_emphasis and self . hide_strikethrough
bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
fixed = google_fixed_width_font ( tag_style ) and not google_fixed_width_font ( parent_style ) and not self . pre
if start : # crossed - out text must be handled before other attributes
# in order not to output qualifiers unnecessarily
if bold or italic or fixed :
self . emphasis += 1
if strikethrough :
self . quiet += 1
if italic :
self . o ( self . emphasis_mark )
self . drop_white_space += 1
if bold :
self . o ( self . strong_mark )
self . drop_white_space += 1
if fixed :
self . o ( '`' )
self . drop_white_space += 1
self . code = True
else :
if bold or italic or fixed : # there must not be whitespace before closing emphasis mark
self . emphasis -= 1
self . space = 0
self . outtext = self . outtext . rstrip ( )
if fixed :
if self . drop_white_space : # empty emphasis , drop it
self . drop_last ( 1 )
self . drop_white_space -= 1
else :
self . o ( '`' )
self . code = False
if bold :
if self . drop_white_space : # empty emphasis , drop it
self . drop_last ( 2 )
self . drop_white_space -= 1
else :
self . o ( self . strong_mark )
if italic :
if self . drop_white_space : # empty emphasis , drop it
self . drop_last ( 1 )
self . drop_white_space -= 1
else :
self . o ( self . emphasis_mark )
# space is only allowed after * all * emphasis marks
if ( bold or italic ) and not self . emphasis :
self . o ( " " )
if strikethrough :
self . quiet -= 1
|
def get_calc_id ( db , datadir , job_id = None ) :
"""Return the latest calc _ id by looking both at the datastore
and the database .
: param db : a : class : ` openquake . server . dbapi . Db ` instance
: param datadir : the directory containing the datastores
: param job _ id : a job ID ; if None , returns the latest job ID"""
|
calcs = datastore . get_calc_ids ( datadir )
calc_id = 0 if not calcs else calcs [ - 1 ]
if job_id is None :
try :
job_id = db ( 'SELECT seq FROM sqlite_sequence WHERE name="job"' , scalar = True )
except NotFound :
job_id = 0
return max ( calc_id , job_id )
|
def load_user ( self , uid ) :
'''a method to retrieve the account details of a user in the bucket
: param uid : string with id of user in bucket
: return : dictionary with account fields for user'''
|
title = '%s.load_user' % self . __class__ . __name__
# validate inputs
input_fields = { 'uid' : uid }
for key , value in input_fields . items ( ) :
if value :
object_title = '%s(%s=%s)' % ( title , key , str ( value ) )
self . fields . validate ( value , '.%s' % key , object_title )
# construct url
url = self . bucket_url + '/_user/%s' % uid
# send request and unwrap response
response = requests . get ( url )
response = response . json ( )
return response
|
def search ( self , category , term = '' , index = 0 , count = 100 ) :
"""Search for an item in a category .
Args :
category ( str ) : The search category to use . Standard Sonos search
categories are ' artists ' , ' albums ' , ' tracks ' , ' playlists ' ,
' genres ' , ' stations ' , ' tags ' . Not all are available for each
music service . Call available _ search _ categories for a list for
this service .
term ( str ) : The term to search for .
index ( int ) : The starting index . Default 0.
count ( int ) : The maximum number of items to return . Default 100.
Returns :
~ collections . OrderedDict : The search results , or ` None ` .
See also :
The Sonos ` search API < http : / / musicpartners . sonos . com / node / 86 > ` _"""
|
search_category = self . _get_search_prefix_map ( ) . get ( category , None )
if search_category is None :
raise MusicServiceException ( "%s does not support the '%s' search category" % ( self . service_name , category ) )
response = self . soap_client . call ( 'search' , [ ( 'id' , search_category ) , ( 'term' , term ) , ( 'index' , index ) , ( 'count' , count ) ] )
return parse_response ( self , response , category )
|
def build_dependencies ( self ) :
"""Build the dependencies for this module .
Parse the code with ast , find all the import statements , convert
them into Dependency objects ."""
|
highest = self . dsm or self . root
if self is highest :
highest = LeafNode ( )
for _import in self . parse_code ( ) :
target = highest . get_target ( _import [ 'target' ] )
if target :
what = _import [ 'target' ] . split ( '.' ) [ - 1 ]
if what != target . name :
_import [ 'what' ] = what
_import [ 'target' ] = target
self . dependencies . append ( Dependency ( source = self , ** _import ) )
|
def _get_formatter ( fmt ) :
"""Args :
fmt ( str | unicode ) : Format specification
Returns :
( logging . Formatter ) : Associated logging formatter"""
|
fmt = _replace_and_pad ( fmt , "%(timezone)s" , LogManager . spec . timezone )
return logging . Formatter ( fmt )
|
def resource_etree_element ( self , resource , element_name = 'url' ) :
"""Return xml . etree . ElementTree . Element representing the resource .
Returns and element for the specified resource , of the form < url >
with enclosed properties that are based on the sitemap with extensions
for ResourceSync ."""
|
e = Element ( element_name )
sub = Element ( 'loc' )
sub . text = resource . uri
e . append ( sub )
if ( resource . timestamp is not None ) : # Create appriate element for timestamp
sub = Element ( 'lastmod' )
sub . text = str ( resource . lastmod )
# W3C Datetime in UTC
e . append ( sub )
md_atts = { }
for att in ( 'capability' , 'change' , 'hash' , 'length' , 'path' , 'mime_type' , 'md_at' , 'md_completed' , 'md_from' , 'md_until' ) :
val = getattr ( resource , att , None )
if ( val is not None ) :
md_atts [ self . _xml_att_name ( att ) ] = str ( val )
if ( len ( md_atts ) > 0 ) :
md = Element ( 'rs:md' , md_atts )
e . append ( md )
# add any < rs : ln >
if ( hasattr ( resource , 'ln' ) and resource . ln is not None ) :
for ln in resource . ln :
self . add_element_with_atts_to_etree ( e , 'rs:ln' , ln )
if ( self . pretty_xml ) :
e . tail = "\n"
return ( e )
|
def increment_key ( key ) :
"""given a key increment the value"""
|
pipe = REDIS_SERVER . pipeline ( )
pipe . incr ( key , 1 )
if config . COOLOFF_TIME :
pipe . expire ( key , config . COOLOFF_TIME )
new_value = pipe . execute ( ) [ 0 ]
return new_value
|
def SETUP ( self ) :
"""Set up stream transport ."""
|
message = "SETUP " + self . session . control_url + " RTSP/1.0\r\n"
message += self . sequence
message += self . authentication
message += self . user_agent
message += self . transport
message += '\r\n'
return message
|
def _sensoryComputeInferenceMode ( self , anchorInput ) :
"""Infer the location from sensory input . Activate any cells with enough active
synapses to this sensory input . Deactivate all other cells .
@ param anchorInput ( numpy array )
A sensory input . This will often come from a feature - location pair layer ."""
|
if len ( anchorInput ) == 0 :
return
overlaps = self . connections . computeActivity ( anchorInput , self . connectedPermanence )
activeSegments = np . where ( overlaps >= self . activationThreshold ) [ 0 ]
sensorySupportedCells = np . unique ( self . connections . mapSegmentsToCells ( activeSegments ) )
self . bumpPhases = self . cellPhases [ : , sensorySupportedCells ]
self . _computeActiveCells ( )
self . activeSegments = activeSegments
self . sensoryAssociatedCells = sensorySupportedCells
|
def as_categorical_frame ( self , index , columns , name = None ) :
"""Coerce self into a pandas DataFrame of Categoricals ."""
|
if len ( self . shape ) != 2 :
raise ValueError ( "Can't convert a non-2D LabelArray into a DataFrame." )
expected_shape = ( len ( index ) , len ( columns ) )
if expected_shape != self . shape :
raise ValueError ( "Can't construct a DataFrame with provided indices:\n\n" "LabelArray shape is {actual}, but index and columns imply " "that shape should be {expected}." . format ( actual = self . shape , expected = expected_shape , ) )
return pd . Series ( index = pd . MultiIndex . from_product ( [ index , columns ] ) , data = self . ravel ( ) . as_categorical ( ) , name = name , ) . unstack ( )
|
def purge ( self ) :
"""Deletes all tasks in the queue ."""
|
try :
return self . _api . purge ( )
except AttributeError :
while True :
lst = self . list ( )
if len ( lst ) == 0 :
break
for task in lst :
self . delete ( task )
self . wait ( )
return self
|
def init_validator ( required , cls , * additional_validators ) :
"""Create an attrs validator based on the cls provided and required setting .
: param bool required : whether the field is required in a given model .
: param cls : the expected class type of object value .
: return : attrs validator chained correctly ( e . g . optional ( instance _ of ) )"""
|
validator = validators . instance_of ( cls )
if additional_validators :
additional_validators = list ( additional_validators )
additional_validators . append ( validator )
validator = composite ( * additional_validators )
return validator if required else validators . optional ( validator )
|
def set_acceptance_filter_bypass ( self , bus , bypass ) :
"""Control the status of CAN acceptance filter for a bus .
Returns True if the command was successful ."""
|
request = { "command" : "af_bypass" , "bus" : bus , "bypass" : bypass }
return self . _check_command_response_status ( request )
|
def copy_dir ( self , path ) :
"""Recursively copy directory"""
|
for directory in path :
if os . path . isdir ( path ) :
full_path = os . path . join ( self . archive_dir , directory . lstrip ( '/' ) )
logger . debug ( "Copying %s to %s" , directory , full_path )
shutil . copytree ( directory , full_path )
else :
logger . debug ( "Not a directory: %s" , directory )
return path
|
def get_field_value ( self , instance , field_name ) :
"""Given an instance , and the name of an attribute , returns the value
of that attribute on the instance .
Default behavior will map the ` ` percent ` ` attribute to ` ` id ` ` ."""
|
# XXX : can we come up w / a better API ?
# Ensure we map ` ` percent ` ` to the ` ` id ` ` column
if field_name == 'percent' :
field_name = 'id'
value = getattr ( instance , field_name )
if callable ( value ) :
value = value ( )
return value
|
def setup_domain_socket ( location ) :
'''Setup Domain Socket
Setup a connection to a Unix Domain Socket
@ param location : str The path to the Unix Domain Socket to connect to .
@ return < class ' socket . _ socketobject ' >'''
|
clientsocket = socket . socket ( socket . AF_UNIX , socket . SOCK_STREAM )
clientsocket . settimeout ( timeout )
clientsocket . connect ( location )
return clientsocket
|
def get_all_roles ( path_prefix = None , region = None , key = None , keyid = None , profile = None ) :
'''Get and return all IAM role details , starting at the optional path .
. . versionadded : : 2016.3.0
CLI Example :
salt - call boto _ iam . get _ all _ roles'''
|
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
if not conn :
return None
_roles = conn . list_roles ( path_prefix = path_prefix )
roles = _roles . list_roles_response . list_roles_result . roles
marker = getattr ( _roles . list_roles_response . list_roles_result , 'marker' , None )
while marker :
_roles = conn . list_roles ( path_prefix = path_prefix , marker = marker )
roles = roles + _roles . list_roles_response . list_roles_result . roles
marker = getattr ( _roles . list_roles_response . list_roles_result , 'marker' , None )
return roles
|
def unpack_auth_b64 ( self , docker_registry ) :
"""Decode and unpack base64 ' auth ' credentials from config file .
: param docker _ registry : str , registry reference in config file
: return : namedtuple , UnpackedAuth ( or None if no ' auth ' is available )"""
|
UnpackedAuth = namedtuple ( 'UnpackedAuth' , [ 'raw_str' , 'username' , 'password' ] )
credentials = self . get_credentials ( docker_registry )
auth_b64 = credentials . get ( 'auth' )
if auth_b64 :
raw_str = b64decode ( auth_b64 ) . decode ( 'utf-8' )
unpacked_credentials = raw_str . split ( ':' , 1 )
if len ( unpacked_credentials ) == 2 :
return UnpackedAuth ( raw_str , * unpacked_credentials )
else :
raise ValueError ( "Failed to parse 'auth' in '%s'" % self . json_secret_path )
|
def __install_perforce ( self , config ) :
"""install perforce binary"""
|
if not system . is_64_bit ( ) :
self . logger . warn ( "Perforce formula is only designed for 64 bit systems! Not install executables..." )
return False
version = config . get ( 'version' , 'r13.2' )
key = 'osx' if system . is_osx ( ) else 'linux'
perforce_packages = package_dict [ version ] [ key ]
d = self . directory . install_directory ( self . feature_name )
if not os . path . exists ( d ) :
os . makedirs ( d )
self . logger . info ( "Downloading p4 executable..." )
with open ( os . path . join ( d , "p4" ) , 'wb+' ) as fh :
fh . write ( lib . cleaned_request ( 'get' , url_prefix + perforce_packages [ 'p4' ] ) . content )
self . directory . symlink_to_bin ( "p4" , os . path . join ( d , "p4" ) )
self . p4_command = os . path . join ( d , "p4" )
self . logger . info ( "Installing p4v..." )
if system . is_osx ( ) :
return self . _install_p4v_osx ( url_prefix + perforce_packages [ 'p4v' ] )
else :
return self . _install_p4v_linux ( url_prefix + perforce_packages [ 'p4v' ] )
|
def add_agent_cloud ( self , agent_cloud ) :
"""AddAgentCloud .
[ Preview API ]
: param : class : ` < TaskAgentCloud > < azure . devops . v5_0 . task _ agent . models . TaskAgentCloud > ` agent _ cloud :
: rtype : : class : ` < TaskAgentCloud > < azure . devops . v5_0 . task _ agent . models . TaskAgentCloud > `"""
|
content = self . _serialize . body ( agent_cloud , 'TaskAgentCloud' )
response = self . _send ( http_method = 'POST' , location_id = 'bfa72b3d-0fc6-43fb-932b-a7f6559f93b9' , version = '5.0-preview.1' , content = content )
return self . _deserialize ( 'TaskAgentCloud' , response )
|
def cmd_condition ( args ) :
'''control MAVExporer conditions'''
|
if len ( args ) == 0 :
print ( "condition is: %s" % mestate . settings . condition )
return
mestate . settings . condition = ' ' . join ( args )
if len ( mestate . settings . condition ) == 0 or mestate . settings . condition == 'clear' :
mestate . settings . condition = None
|
def create_from_other ( Class , other , values = None ) :
"""Create a new Matrix with attributes taken from ` other ` but with the
values taken from ` values ` if provided"""
|
m = Class ( )
m . alphabet = other . alphabet
m . sorted_alphabet = other . sorted_alphabet
m . char_to_index = other . char_to_index
if values is not None :
m . values = values
else :
m . values = other . values
return m
|
def _time_stretcher ( self , stretch_factor ) :
"""Real time time - scale without pitch modification .
: param int i : index of the beginning of the chunk to stretch
: param float stretch _ factor : audio scale factor ( if > 1 speed up the sound else slow it down )
. . warning : : This method needs to store the phase computed from the previous chunk . Thus , it can only be called chunk by chunk ."""
|
start = self . _i2
end = min ( self . _i2 + self . _N , len ( self . _sy ) - ( self . _N + self . _H ) )
if start >= end :
raise StopIteration
# The not so clean code below basically implements a phase vocoder
out = numpy . zeros ( self . _N , dtype = numpy . complex )
while self . _i2 < end :
if self . _i1 + self . _N + self . _H > len ( self . y ) :
raise StopIteration
a , b = self . _i1 , self . _i1 + self . _N
S1 = numpy . fft . fft ( self . _win * self . y [ a : b ] )
S2 = numpy . fft . fft ( self . _win * self . y [ a + self . _H : b + self . _H ] )
self . _phi += ( numpy . angle ( S2 ) - numpy . angle ( S1 ) )
self . _phi = self . _phi - 2.0 * numpy . pi * numpy . round ( self . _phi / ( 2.0 * numpy . pi ) )
out . real , out . imag = numpy . cos ( self . _phi ) , numpy . sin ( self . _phi )
self . _sy [ self . _i2 : self . _i2 + self . _N ] += self . _win * numpy . fft . ifft ( numpy . abs ( S2 ) * out ) . real
self . _i1 += int ( self . _H * self . stretch_factor )
self . _i2 += self . _H
chunk = self . _sy [ start : end ]
if stretch_factor == 1.0 :
chunk = self . y [ start : end ]
return chunk
|
def setFont ( self , font ) :
"""Assigns the font to this widget and all of its children .
: param font | < QtGui . QFont >"""
|
super ( XTimeEdit , self ) . setFont ( font )
# update the fonts for the time combos
self . _hourCombo . setFont ( font )
self . _minuteCombo . setFont ( font )
self . _secondCombo . setFont ( font )
self . _timeOfDayCombo . setFont ( font )
|
def create ( cls , datacenter , memory , cores , ip_version , bandwidth , login , password , hostname , image , run , background , sshkey , size , vlan , ip , script , script_args , ssh ) :
"""Create a new virtual machine ."""
|
from gandi . cli . modules . network import Ip , Iface
if not background and not cls . intty ( ) :
background = True
datacenter_id_ = int ( Datacenter . usable_id ( datacenter ) )
if not hostname :
hostname = randomstring ( 'vm' )
disk_name = 'sys_%s' % hostname [ 2 : ]
else :
disk_name = 'sys_%s' % hostname . replace ( '.' , '' )
vm_params = { 'hostname' : hostname , 'datacenter_id' : datacenter_id_ , 'memory' : memory , 'cores' : cores , }
if login :
vm_params [ 'login' ] = login
if run :
vm_params [ 'run' ] = run
if password :
vm_params [ 'password' ] = password
if ip_version :
vm_params [ 'ip_version' ] = ip_version
vm_params [ 'bandwidth' ] = bandwidth
if script :
with open ( script ) as fd :
vm_params [ 'script' ] = fd . read ( )
if script_args :
vm_params [ 'script_args' ] = script_args
vm_params . update ( cls . convert_sshkey ( sshkey ) )
# XXX : name of disk is limited to 15 chars in ext2fs , ext3fs
# but api allow 255 , so we limit to 15 for now
disk_params = { 'datacenter_id' : vm_params [ 'datacenter_id' ] , 'name' : disk_name [ : 15 ] }
if size :
if isinstance ( size , tuple ) :
prefix , size = size
disk_params [ 'size' ] = size
sys_disk_id_ = int ( Image . usable_id ( image , datacenter_id_ ) )
ip_summary = [ ]
if ip_version == 4 :
ip_summary = [ 'v4' , 'v6' ]
elif ip_version == 6 :
ip_summary = [ 'v6' ]
if vlan :
ip_ = None
ip_summary . append ( 'private' )
if ip :
try :
ip_ = Ip . info ( ip )
except Exception :
pass
else :
if not Ip . _check_and_detach ( ip_ , None ) :
return
if ip_ :
iface_id = ip_ [ 'iface_id' ]
else :
ip_create = Ip . create ( 4 , vm_params [ 'datacenter_id' ] , bandwidth , None , vlan , ip )
iface_id = ip_create [ 'iface_id' ]
# if there is a public ip , will attach this one later , else give
# the iface to vm . create
if not ip_version :
vm_params [ 'iface_id' ] = iface_id
result = cls . call ( 'hosting.vm.create_from' , vm_params , disk_params , sys_disk_id_ )
cls . echo ( '* Configuration used: %d cores, %dMb memory, ip %s, ' 'image %s, hostname: %s, datacenter: %s' % ( cores , memory , '+' . join ( ip_summary ) , image , hostname , datacenter ) )
# background mode , bail out now ( skip interactive part )
if background and ( not vlan or not ip_version ) :
return result
# interactive mode , run a progress bar
cls . echo ( 'Creating your Virtual Machine %s.' % hostname )
cls . display_progress ( result )
cls . echo ( 'Your Virtual Machine %s has been created.' % hostname )
vm_id = None
for oper in result :
if oper . get ( 'vm_id' ) :
vm_id = oper . get ( 'vm_id' )
break
if vlan and ip_version :
attach = Iface . _attach ( iface_id , vm_id )
if background :
return attach
if 'ssh_key' not in vm_params and 'keys' not in vm_params :
return
if vm_id and ip_version :
cls . wait_for_sshd ( vm_id )
if ssh :
cls . ssh_keyscan ( vm_id )
cls . ssh ( vm_id , 'root' , None )
|
def _wait_for_request ( self , uuid , connection_adapter = None ) :
"""Wait for RPC request to arrive .
: param str uuid : Rpc Identifier .
: param obj connection _ adapter : Provide custom connection adapter .
: return :"""
|
start_time = time . time ( )
while not self . _response [ uuid ] :
connection_adapter . check_for_errors ( )
if time . time ( ) - start_time > self . _timeout :
self . _raise_rpc_timeout_error ( uuid )
time . sleep ( IDLE_WAIT )
|
def _block_shape ( values , ndim = 1 , shape = None ) :
"""guarantee the shape of the values to be at least 1 d"""
|
if values . ndim < ndim :
if shape is None :
shape = values . shape
if not is_extension_array_dtype ( values ) : # TODO : https : / / github . com / pandas - dev / pandas / issues / 23023
# block . shape is incorrect for " 2D " ExtensionArrays
# We can ' t , and don ' t need to , reshape .
values = values . reshape ( tuple ( ( 1 , ) + shape ) )
return values
|
def unpack ( self , token ) :
"""Unpack a received signed or signed and encrypted Json Web Token
: param token : The Json Web Token
: return : If decryption and signature verification work the payload
will be returned as a Message instance if possible ."""
|
if not token :
raise KeyError
_jwe_header = _jws_header = None
# Check if it ' s an encrypted JWT
darg = { }
if self . allowed_enc_encs :
darg [ 'enc' ] = self . allowed_enc_encs
if self . allowed_enc_algs :
darg [ 'alg' ] = self . allowed_enc_algs
try :
_decryptor = jwe_factory ( token , ** darg )
except ( KeyError , HeaderError ) :
_decryptor = None
if _decryptor : # Yes , try to decode
_info = self . _decrypt ( _decryptor , token )
_jwe_header = _decryptor . jwt . headers
# Try to find out if the information encrypted was a signed JWT
try :
_content_type = _decryptor . jwt . headers [ 'cty' ]
except KeyError :
_content_type = ''
else :
_content_type = 'jwt'
_info = token
# If I have reason to believe the information I have is a signed JWT
if _content_type . lower ( ) == 'jwt' : # Check that is a signed JWT
if self . allowed_sign_algs :
_verifier = jws_factory ( _info , alg = self . allowed_sign_algs )
else :
_verifier = jws_factory ( _info )
if _verifier :
_info = self . _verify ( _verifier , _info )
else :
raise Exception ( )
_jws_header = _verifier . jwt . headers
else : # So , not a signed JWT
try : # A JSON document ?
_info = json . loads ( _info )
except JSONDecodeError : # Oh , no ! Not JSON
return _info
except TypeError :
try :
_info = as_unicode ( _info )
_info = json . loads ( _info )
except JSONDecodeError : # Oh , no ! Not JSON
return _info
# If I know what message class the info should be mapped into
if self . msg_cls :
_msg_cls = self . msg_cls
else :
try : # try to find a issuer specific message class
_msg_cls = self . iss2msg_cls [ _info [ 'iss' ] ]
except KeyError :
_msg_cls = None
if _msg_cls :
vp_args = { 'skew' : self . skew }
if self . iss :
vp_args [ 'aud' ] = self . iss
_info = self . verify_profile ( _msg_cls , _info , ** vp_args )
_info . jwe_header = _jwe_header
_info . jws_header = _jws_header
return _info
else :
return _info
|
def _op_method ( self , data , extra_factor = 1.0 ) :
"""Operator
This method returns the input data after the singular values have been
thresholded
Parameters
data : np . ndarray
Input data array
extra _ factor : float
Additional multiplication factor
Returns
np . ndarray SVD thresholded data"""
|
# Update threshold with extra factor .
threshold = self . thresh * extra_factor
if self . lowr_type == 'standard' :
data_matrix = svd_thresh ( cube2matrix ( data ) , threshold , thresh_type = self . thresh_type )
elif self . lowr_type == 'ngole' :
data_matrix = svd_thresh_coef ( cube2matrix ( data ) , self . operator , threshold , thresh_type = self . thresh_type )
new_data = matrix2cube ( data_matrix , data . shape [ 1 : ] )
# Return updated data .
return new_data
|
def get_bundles ( self ) : # type : ( ) - > List [ Bundle ]
"""Returns the list of all installed bundles
: return : the list of all installed bundles"""
|
with self . __bundles_lock :
return [ self . __bundles [ bundle_id ] for bundle_id in sorted ( self . __bundles . keys ( ) ) ]
|
def get_build_info ( api_instance , build_id = None , keys = DEFAULT_BUILD_KEYS , wait = False ) :
"""print build info about a job"""
|
build = ( api_instance . get_build ( build_id ) if build_id else api_instance . get_last_build ( ) )
output = ""
if wait :
build . block_until_complete ( )
if 'timestamp' in keys :
output += str ( build . get_timestamp ( ) ) + '\n'
if 'console' in keys :
output += build . get_console ( ) + '\n'
if 'scm' in keys : # https : / / github . com / salimfadhley / jenkinsapi / pull / 250
# try / except while this is still occuring
try :
output += build . get_revision ( ) + '\n'
except IndexError :
pass
return output
|
def build_row_dict ( cls , row , dialect , deleted = False , user_id = None , use_dirty = True ) :
"""Builds a dictionary of archive data from row which is suitable for insert .
NOTE : If ` deleted ` is False , version ID will be set to an AsIs SQL construct .
: param row : instance of : class : ` ~ SavageModelMixin `
: param dialect : : py : class : ` ~ sqlalchemy . engine . interfaces . Dialect `
: param deleted : whether or not the row is deleted ( defaults to False )
: param user _ id : ID of user that is performing the update on this row ( defaults to None )
: param use _ dirty : whether to use the dirty fields from row or not ( defaults to True )
: return : a dictionary of archive table column names to values , suitable for insert
: rtype : dict"""
|
data = { 'data' : row . to_archivable_dict ( dialect , use_dirty = use_dirty ) , 'deleted' : deleted , 'updated_at' : datetime . now ( ) , 'version_id' : current_version_sql ( as_is = True ) if deleted else row . version_id }
for col_name in row . version_columns :
data [ col_name ] = utils . get_column_attribute ( row , col_name , use_dirty = use_dirty )
if user_id is not None :
data [ 'user_id' ] = user_id
return data
|
def simulateCatalog ( config , roi = None , lon = None , lat = None ) :
"""Simulate a catalog object ."""
|
import ugali . simulation . simulator
if roi is None :
roi = createROI ( config , lon , lat )
sim = ugali . simulation . simulator . Simulator ( config , roi )
return sim . catalog ( )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.