signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def grant_assistance ( self , column = None , value = None , ** kwargs ) :
"""Many - to - many table connecting grants and assistance ."""
|
return self . _resolve_call ( 'GIC_GRANT_ASST_PGM' , column , value , ** kwargs )
|
def parse_code ( code , var_factory , ** kwargs ) :
"""Parse a piece of text and substitude $ var by either unique
variable names or by the given kwargs mapping . Use $ $ to escape $ .
Returns a CodeBlock and the resulting variable mapping .
parse ( " $ foo = $ foo + $ bar " , bar = " 1 " )
( " t1 = t1 + 1 " , { ' foo ' : ' t1 ' , ' bar ' : ' 1 ' } )"""
|
block = CodeBlock ( )
defdict = collections . defaultdict ( var_factory )
defdict . update ( kwargs )
indent = - 1
code = code . strip ( )
for line in code . splitlines ( ) :
length = len ( line )
line = line . lstrip ( )
spaces = length - len ( line )
if spaces :
if indent < 0 :
indent = spaces
level = 1
else :
level = spaces // indent
else :
level = 0
# if there is a single variable and the to be inserted object
# is a code block , insert the block with the current indentation level
if line . startswith ( "$" ) and line . count ( "$" ) == 1 :
name = line [ 1 : ]
if name in kwargs and isinstance ( kwargs [ name ] , CodeBlock ) :
kwargs [ name ] . write_into ( block , level )
continue
block . write_line ( string . Template ( line ) . substitute ( defdict ) , level )
return block , dict ( defdict )
|
def create ( self , request , * args , ** kwargs ) :
"""Grabbing the user from request . user , and the rest of the method
is the same as ModelViewSet . create ( ) .
: param request : a WSGI request object
: param args : inline arguments ( optional )
: param kwargs : keyword arguments ( optional )
: return : ` rest _ framework . response . Response `
: raise : 400"""
|
data = get_request_data ( request ) . copy ( )
data [ "user" ] = request . user . id
serializer = self . get_serializer ( data = data , files = request . FILES )
if serializer . is_valid ( ) :
self . pre_save ( serializer . object )
self . object = serializer . save ( force_insert = True )
self . post_save ( self . object , created = True )
headers = self . get_success_headers ( serializer . data )
return Response ( serializer . data , status = status . HTTP_201_CREATED , headers = headers )
return Response ( serializer . errors , status = status . HTTP_400_BAD_REQUEST )
|
def set_param ( self , params , value = None ) :
"""Set parameters into the Booster .
Parameters
params : dict / list / str
list of key , value pairs , dict of key to value or simply str key
value : optional
value of the specified parameter , when params is str key"""
|
if isinstance ( params , Mapping ) :
params = params . items ( )
elif isinstance ( params , STRING_TYPES ) and value is not None :
params = [ ( params , value ) ]
for key , val in params :
_check_call ( _LIB . XGBoosterSetParam ( self . handle , c_str ( key ) , c_str ( str ( val ) ) ) )
|
def thing2place ( self , name ) :
"""Unset a Thing ' s location , and thus turn it into a Place ."""
|
self . engine . _set_thing_loc ( self . name , name , None )
if ( self . name , name ) in self . engine . _node_objs :
thing = self . engine . _node_objs [ self . name , name ]
place = Place ( self , name )
for port in thing . portals ( ) :
port . origin = place
for port in thing . preportals ( ) :
port . destination = place
self . engine . _node_objs [ self . name , name ] = place
|
def _get_non_defined ( self , data , class_types ) :
"""returns a list of URIs and blanknodes that are not defined within
the dataset . For example : schema : Person has an associated rdf : type
then it is considered defined .
args :
data : a list of triples
class _ types : list of subjects that are defined in the dataset"""
|
subj_set = set ( [ item [ self . smap ] for item in class_types ] )
non_def_set = set ( [ item [ self . smap ] for item in data ] )
return list ( non_def_set - subj_set )
|
def _pooling_output_shape ( input_shape , pool_size = ( 2 , 2 ) , strides = None , padding = 'VALID' ) :
"""Helper : compute the output shape for the pooling layer ."""
|
dims = ( 1 , ) + pool_size + ( 1 , )
# NHWC
spatial_strides = strides or ( 1 , ) * len ( pool_size )
strides = ( 1 , ) + spatial_strides + ( 1 , )
pads = padtype_to_pads ( input_shape , dims , strides , padding )
operand_padded = onp . add ( input_shape , onp . add ( * zip ( * pads ) ) )
t = onp . floor_divide ( onp . subtract ( operand_padded , dims ) , strides ) + 1
return tuple ( t )
|
def compaction ( self , request_compaction = False ) :
"""Retrieve a report on , or request compaction for this instance .
: param bool request _ compaction : A boolean indicating whether or not to request compaction ."""
|
url = self . _service_url + 'compaction/'
if request_compaction :
response = requests . post ( url , ** self . _instances . _default_request_kwargs )
else :
response = requests . get ( url , ** self . _instances . _default_request_kwargs )
return response . json ( )
|
def getFileFormat ( self , name , args ) :
"""Récupération du contenu d ' un fichier via la configuration
et interprétation des variables données en argument"""
|
# récupération du nom du fichier
template_pathname = self . get ( name , "--" )
if not os . path . isfile ( template_pathname ) :
return False
# configuration
content = ""
with open ( template_pathname ) as fp : # Create a text / plain message
content = fp . read ( ) . format ( ** args )
# retour ok
return content
|
def add_node ( self , node ) :
"""Link to the agent from a parent based on the parent ' s fitness"""
|
num_agents = len ( self . nodes ( type = Agent ) )
curr_generation = int ( ( num_agents - 1 ) / float ( self . generation_size ) )
node . generation = curr_generation
if curr_generation == 0 and self . initial_source :
parent = self . _select_oldest_source ( )
else :
parent = self . _select_fit_node_from_generation ( node_type = type ( node ) , generation = curr_generation - 1 )
if parent is not None :
parent . connect ( whom = node )
parent . transmit ( to_whom = node )
|
def _avro_schema ( read_session ) :
"""Extract and parse Avro schema from a read session .
Args :
read _ session ( ~ google . cloud . bigquery _ storage _ v1beta1 . types . ReadSession ) :
The read session associated with this read rows stream . This
contains the schema , which is required to parse the data
blocks .
Returns :
Tuple [ fastavro . schema , Tuple [ str ] ] :
A parsed Avro schema , using : func : ` fastavro . schema . parse _ schema `
and the column names for a read session ."""
|
json_schema = json . loads ( read_session . avro_schema . schema )
column_names = tuple ( ( field [ "name" ] for field in json_schema [ "fields" ] ) )
return fastavro . parse_schema ( json_schema ) , column_names
|
def lex ( code , lexer ) :
"""Lex ` ` code ` ` with ` ` lexer ` ` and return an iterable of tokens ."""
|
try :
return lexer . get_tokens ( code )
except TypeError as err :
if ( isinstance ( err . args [ 0 ] , str ) and ( 'unbound method get_tokens' in err . args [ 0 ] or 'missing 1 required positional argument' in err . args [ 0 ] ) ) :
raise TypeError ( 'lex() argument must be a lexer instance, ' 'not a class' )
raise
|
def shift_right ( self , times = 1 ) :
"""Finds Location shifted right by 1
: rtype : Location"""
|
try :
return Location ( self . _rank , self . _file + times )
except IndexError as e :
raise IndexError ( e )
|
def lowercased_words ( content , filter = True , predicate = None ) :
"""Returns an iterable of lowercased words from the provided text .
` content `
A text .
` filter `
Indicates if stop words and garbage like " xxxxx " should be removed from
the word list .
` predicate `
An alternative word filter . If it is ` ` None ` ` " xxxx " , " - - - " ,
default stop words , and words which have no min . length of 3 are filtered
( iff ` ` filter ` ` is set to ` ` True ` ` ) .
> > > list ( lowercased _ words ( ' Hello and goodbye - - - - - ' ) )
[ ' hello ' , ' goodbye ' ]
> > > list ( lowercased _ words ( ' Hello , and goodbye - - - - - Subject xxxxx XXXXX here ' ) )
[ ' hello ' , ' goodbye ' , ' subject ' ]
> > > list ( lowercased _ words ( ' Hello , and goodbye . How are you ? ' ) )
[ ' hello ' , ' goodbye ' ]"""
|
return ( w . lower ( ) for w in words ( content , filter , predicate ) )
|
def _timeout_expired ( self ) :
"""A timeout was supplied during setup , and the time has run out ."""
|
self . _did_timeout = True
try :
self . transport . signalProcess ( 'TERM' )
except error . ProcessExitedAlready : # XXX why don ' t we just always do this ?
self . transport . loseConnection ( )
fail = Failure ( RuntimeError ( "timeout while launching Tor" ) )
self . _maybe_notify_connected ( fail )
|
def leave_chat ( chat_id , ** kwargs ) :
"""Use this method for your bot to leave a group , supergroup or channel .
: param chat _ id : Unique identifier for the target chat or username of the target channel ( in the format @ channelusername )
: param kwargs : Args that get passed down to : class : ` TelegramBotRPCRequest `
: type chat _ id : int or str
: returns : Returns true on success .
: rtype : bool"""
|
# required args
params = dict ( chat_id = chat_id , )
return TelegramBotRPCRequest ( 'leaveChat' , params = params , on_result = lambda result : result , ** kwargs )
|
def _find_docstring_line ( self , start , end ) :
"""Find the row where a docstring starts in a function or class .
This will search for the first match of a triple quote token in
row sequence from the start of the class or function .
Args :
start : the row where the class / function starts .
end : the row where the class / function ends .
Returns :
int : the row number where the docstring is found ."""
|
for i in range ( start , end + 1 ) :
if i in self . _tokenized_triple_quotes :
return i
return None
|
def create_elb_dns ( self , regionspecific = False ) :
"""Create dns entries in route53.
Args :
regionspecific ( bool ) : The DNS entry should have region on it
Returns :
str : Auto - generated DNS name for the Elastic Load Balancer ."""
|
if regionspecific :
dns_elb = self . generated . dns ( ) [ 'elb_region' ]
else :
dns_elb = self . generated . dns ( ) [ 'elb' ]
dns_elb_aws = find_elb ( name = self . app_name , env = self . env , region = self . region )
zone_ids = get_dns_zone_ids ( env = self . env , facing = self . elb_subnet )
self . log . info ( 'Updating Application URL: %s' , dns_elb )
dns_kwargs = { 'dns_name' : dns_elb , 'dns_name_aws' : dns_elb_aws , 'dns_ttl' : self . dns_ttl , }
for zone_id in zone_ids :
self . log . debug ( 'zone_id: %s' , zone_id )
update_dns_zone_record ( self . env , zone_id , ** dns_kwargs )
return dns_elb
|
def _register_allocator ( self , plugin_name , plugin_instance ) :
"""Register an allocator .
: param plugin _ name : Allocator name
: param plugin _ instance : RunPluginBase
: return :"""
|
for allocator in plugin_instance . get_allocators ( ) . keys ( ) :
if allocator in self . _allocators :
raise PluginException ( "Allocator with name {} already exists! unable to add " "allocators from plugin {}" . format ( allocator , plugin_name ) )
self . _allocators [ allocator ] = plugin_instance . get_allocators ( ) . get ( allocator )
|
def _get_payload ( self , key ) :
"""Retrieve an item and expiry time from the cache by key .
: param key : The cache key
: type key : str
: rtype : dict"""
|
path = self . _path ( key )
# If the file doesn ' t exists , we obviously can ' t return the cache so we will
# just return null . Otherwise , we ' ll get the contents of the file and get
# the expiration UNIX timestamps from the start of the file ' s contents .
if not os . path . exists ( path ) :
return { 'data' : None , 'time' : None }
with open ( path , 'rb' ) as fh :
contents = fh . read ( )
expire = int ( contents [ : 10 ] )
# If the current time is greater than expiration timestamps we will delete
# the file and return null . This helps clean up the old files and keeps
# this directory much cleaner for us as old files aren ' t hanging out .
if round ( time . time ( ) ) >= expire :
self . forget ( key )
return { 'data' : None , 'time' : None }
data = self . unserialize ( contents [ 10 : ] )
# Next , we ' ll extract the number of minutes that are remaining for a cache
# so that we can properly retain the time for things like the increment
# operation that may be performed on the cache . We ' ll round this out .
time_ = math . ceil ( ( expire - round ( time . time ( ) ) ) / 60. )
return { 'data' : data , 'time' : time_ }
|
def set_focus_to_state_model ( self , state_m , ratio_requested = 0.8 ) :
"""Focus a state view of respective state model
: param rafcon . gui . model . state state _ m : Respective state model of state view to be focused
: param ratio _ requested : Minimum ratio of the screen which is requested , so can be more
: return :"""
|
state_machine_m = self . model
state_v = self . canvas . get_view_for_model ( state_m )
if state_v is None :
logger . warning ( 'There is no view for state model {0}' . format ( state_m ) )
self . move_item_into_viewport ( state_v )
# check _ relative size in view and call it again if the state is still very small
state_v = self . canvas . get_view_for_model ( state_machine_m . root_state )
state_size = self . view . editor . get_matrix_i2v ( state_v ) . transform_distance ( state_v . width , state_v . height )
viewport_size = self . view . editor . get_allocation ( ) . width , self . view . editor . get_allocation ( ) . height
if state_size [ 0 ] < ratio_requested * viewport_size [ 0 ] and state_size [ 1 ] < ratio_requested * viewport_size [ 1 ] :
self . set_focus_to_state_model ( state_m , ratio_requested )
|
def plot_bargraph ( self , data , cats = None , pconfig = None ) :
"""Depreciated function . Forwards to new location ."""
|
from multiqc . plots import bargraph
if pconfig is None :
pconfig = { }
return bargraph . plot ( data , cats , pconfig )
|
def parse_event_record ( self , node ) :
"""Parses < EventRecord >
@ param node : Node containing the < EventRecord > element
@ type node : xml . etree . Element"""
|
if self . current_simulation == None :
self . raise_error ( '<EventRecord> must be only be used inside a ' + 'simulation specification' )
if 'quantity' in node . lattrib :
quantity = node . lattrib [ 'quantity' ]
else :
self . raise_error ( '<EventRecord> must specify a quantity.' )
if 'eventport' in node . lattrib :
eventPort = node . lattrib [ 'eventport' ]
else :
self . raise_error ( '<EventRecord> must specify an eventPort.' )
self . current_simulation . add_event_record ( EventRecord ( quantity , eventPort ) )
|
def _get_doubles_target ( module , class_name , path ) :
"""Validate and return the class to be doubled .
: param module module : The module that contains the class that will be doubled .
: param str class _ name : The name of the class that will be doubled .
: param str path : The full path to the class that will be doubled .
: return : The class that will be doubled .
: rtype : type
: raise : ` ` VerifyingDoubleImportError ` ` if the target object doesn ' t exist or isn ' t a class ."""
|
try :
doubles_target = getattr ( module , class_name )
if isinstance ( doubles_target , ObjectDouble ) :
return doubles_target . _doubles_target
if not isclass ( doubles_target ) :
raise VerifyingDoubleImportError ( 'Path does not point to a class: {}.' . format ( path ) )
return doubles_target
except AttributeError :
raise VerifyingDoubleImportError ( 'No object at path: {}.' . format ( path ) )
|
def mark_in_progress ( self , rr_id : str , rr_size : int ) -> None :
"""Prepare sentinel directory for revocation registry construction .
: param rr _ id : revocation registry identifier
: rr _ size : size of revocation registry to build"""
|
try :
makedirs ( join ( self . _dir_tails_sentinel , rr_id ) , exist_ok = False )
except FileExistsError :
LOGGER . warning ( 'Rev reg %s construction already in progress' , rr_id )
else :
open ( join ( self . _dir_tails_sentinel , rr_id , '.{}' . format ( rr_size ) ) , 'w' ) . close ( )
|
def edit ( path ) :
"""Edit a post .
If requested over GET , shows the edit UI .
If requested over POST , saves the post and shows the edit UI .
: param path : Path to post to edit ."""
|
context = { 'path' : path , 'site' : site }
post = find_post ( path )
if post is None :
return error ( "No such post or page." , 404 )
current_auid = int ( post . meta ( 'author.uid' ) or current_user . uid )
if ( not current_user . can_edit_all_posts and current_auid != current_user . uid ) :
return error ( "Cannot edit posts of other users." , 401 )
if request . method == 'POST' :
meta = { }
for k , v in request . form . items ( ) :
meta [ k ] = v
meta . pop ( '_wysihtml5_mode' , '' )
try :
meta [ 'author' ] = get_user ( meta [ 'author.uid' ] ) . realname
current_auid = int ( meta [ 'author.uid' ] )
author_change_success = True
except Exception :
author_change_success = False
if ( not current_user . can_transfer_post_authorship or not author_change_success ) :
meta [ 'author' ] = post . meta ( 'author' ) or current_user . realname
meta [ 'author.uid' ] = str ( current_auid )
twofile = post . is_two_file
onefile = not twofile
post . compiler . create_post ( post . source_path , onefile = onefile , is_page = False , ** meta )
context [ 'post_content' ] = meta [ 'content' ]
if twofile :
meta_path = os . path . splitext ( path ) [ 0 ] + '.meta'
# We cannot save ` content ` as meta , otherwise things break badly
meta . pop ( 'content' , '' )
with io . open ( meta_path , 'w+' , encoding = 'utf-8' ) as fh :
fh . write ( write_metadata ( meta ) )
scan_site ( )
if db is not None :
db . set ( 'site:needs_rebuild' , '1' )
else :
site . coil_needs_rebuild = '1'
post = find_post ( path )
context [ 'action' ] = 'save'
else :
context [ 'action' ] = 'edit'
with io . open ( path , 'r' , encoding = 'utf-8' ) as fh :
context [ 'post_content' ] = fh . read ( )
if not post . is_two_file :
context [ 'post_content' ] = context [ 'post_content' ] . split ( '\n\n' , 1 ) [ 1 ]
context [ 'post' ] = post
users = [ ]
if db is not None :
uids = db . hgetall ( 'users' ) . values ( )
for u in uids :
u = u . decode ( 'utf-8' )
realname , active = db . hmget ( 'user:{0}' . format ( u ) , 'realname' , 'active' )
if active in ( u'1' , b'1' ) :
users . append ( ( u , realname . decode ( 'utf-8' ) ) )
else :
for u , d in app . config [ 'COIL_USERS' ] . items ( ) :
if d [ 'active' ] :
users . append ( ( int ( u ) , d [ 'realname' ] ) )
context [ 'users' ] = sorted ( users )
context [ 'current_auid' ] = current_auid
context [ 'title' ] = 'Editing {0}' . format ( post . title ( ) )
context [ 'is_html' ] = post . compiler . name == 'html'
return render ( 'coil_post_edit.tmpl' , context )
|
def build_ast ( self ) :
"""Convert an top level parse tree node into an AST mod ."""
|
n = self . root_node
if n . type == syms . file_input :
stmts = [ ]
for i in range ( len ( n . children ) - 1 ) :
stmt = n . children [ i ]
if stmt . type == tokens . NEWLINE :
continue
sub_stmts_count = self . number_of_statements ( stmt )
if sub_stmts_count == 1 :
stmts . append ( self . handle_stmt ( stmt ) )
else :
stmt = stmt . children [ 0 ]
for j in range ( sub_stmts_count ) :
small_stmt = stmt . children [ j * 2 ]
stmts . append ( self . handle_stmt ( small_stmt ) )
return ast . Module ( stmts )
elif n . type == syms . eval_input :
body = self . handle_testlist ( n . children [ 0 ] )
return ast . Expression ( body )
elif n . type == syms . single_input :
first_child = n . children [ 0 ]
if first_child . type == tokens . NEWLINE : # An empty line .
return ast . Interactive ( [ ] )
else :
num_stmts = self . number_of_statements ( first_child )
if num_stmts == 1 :
stmts = [ self . handle_stmt ( first_child ) ]
else :
stmts = [ ]
for i in range ( 0 , len ( first_child . children ) , 2 ) :
stmt = first_child . children [ i ]
if stmt . type == tokens . NEWLINE :
break
stmts . append ( self . handle_stmt ( stmt ) )
return ast . Interactive ( stmts )
else :
raise AssertionError ( "unknown root node" )
|
def dispatch ( self , event : Any ) -> None :
"""Send an event to an ` ev _ * ` method .
` * ` will be the events type converted to lower - case .
If ` event . type ` is an empty string or None then it will be ignored ."""
|
if event . type :
getattr ( self , "ev_%s" % ( event . type . lower ( ) , ) ) ( event )
|
def normalize_fft_params ( series , kwargs = None , func = None ) :
"""Normalize a set of FFT parameters for processing
This method reads the ` ` fftlength ` ` and ` ` overlap ` ` keyword arguments
( presumed to be values in seconds ) , works out sensible defaults ,
then updates ` ` kwargs ` ` in place to include ` ` nfft ` ` and ` ` noverlap ` `
as values in sample counts .
If a ` ` window ` ` is given , the ` ` noverlap ` ` parameter will be set to the
recommended overlap for that window type , if ` ` overlap ` ` is not given .
If a ` ` window ` ` is given as a ` str ` , it will be converted to a
` numpy . ndarray ` containing the correct window ( of the correct length ) .
Parameters
series : ` gwpy . timeseries . TimeSeries `
the data that will be processed using an FFT - based method
kwargs : ` dict `
the dict of keyword arguments passed by the user
func : ` callable ` , optional
the FFT method that will be called
Examples
> > > from numpy . random import normal
> > > from gwpy . timeseries import TimeSeries
> > > normalize _ fft _ params ( TimeSeries ( normal ( size = 1024 ) , sample _ rate = 256 ) )
{ ' nfft ' : 1024 , ' noverlap ' : 0}
> > > normalize _ fft _ params ( TimeSeries ( normal ( size = 1024 ) , sample _ rate = 256 ) ,
. . . { ' window ' : ' hann ' } )
{ ' window ' : array ( [ 0.00000e + 00 , 9.41235870e - 06 , . . . ,
3.76490804e - 05 , 9.41235870e - 06 ] ) , ' noverlap ' : 0 , ' nfft ' : 1024}"""
|
# parse keywords
if kwargs is None :
kwargs = dict ( )
samp = series . sample_rate
fftlength = kwargs . pop ( 'fftlength' , None ) or series . duration
overlap = kwargs . pop ( 'overlap' , None )
window = kwargs . pop ( 'window' , None )
# parse function library and name
if func is None :
method = library = None
else :
method = func . __name__
library = _fft_library ( func )
# fftlength - > nfft
nfft = seconds_to_samples ( fftlength , samp )
# overlap - > noverlap
noverlap = _normalize_overlap ( overlap , window , nfft , samp , method = method )
# create window
window = _normalize_window ( window , nfft , library , series . dtype )
if window is not None : # allow FFT methods to use their own defaults
kwargs [ 'window' ] = window
# create FFT plan for LAL
if library == 'lal' and kwargs . get ( 'plan' , None ) is None :
from . _lal import generate_fft_plan
kwargs [ 'plan' ] = generate_fft_plan ( nfft , dtype = series . dtype )
kwargs . update ( { 'nfft' : nfft , 'noverlap' : noverlap , } )
return kwargs
|
def encipher ( self , string ) :
"""Encipher string using Foursquare cipher according to initialised key . Punctuation and whitespace
are removed from the input . If the input plaintext is not an even number of characters , an ' X ' will be appended .
Example : :
ciphertext = Foursquare ( key1 = ' zgptfoihmuwdrcnykeqaxvsbl ' , key2 = ' mfnbdcrhsaxyogvituewlqzkp ' ) . encipher ( plaintext )
: param string : The string to encipher .
: returns : The enciphered string ."""
|
string = self . remove_punctuation ( string )
if len ( string ) % 2 == 1 :
string = string + 'X'
ret = ''
for c in range ( 0 , len ( string . upper ( ) ) , 2 ) :
a , b = self . encipher_pair ( string [ c ] , string [ c + 1 ] )
ret += a + b
return ret
|
def warn_if_element_not_of_class ( element , class_suffix , special_ui_components_prefix ) :
"""Log a warning if the element is not of the given type ( indicating that it is not internationalized ) .
Args :
element : The xib ' s XML element .
class _ name : The type the element should be , but is missing .
special _ ui _ components _ prefix : If provided , will not warn about class with this prefix ( default is only ' JT ' )"""
|
valid_class_names = [ "%s%s" % ( DEFAULT_UI_COMPONENTS_PREFIX , class_suffix ) ]
if special_ui_components_prefix is not None :
valid_class_names . append ( "%s%s" % ( special_ui_components_prefix , class_suffix ) )
if ( not element . hasAttribute ( 'customClass' ) ) or element . attributes [ 'customClass' ] . value not in valid_class_names :
logging . warn ( "WARNING: %s is internationalized but isn't one of %s" , extract_element_internationalized_comment ( element ) , valid_class_names )
|
def query ( self , transport , protocol , * data ) :
"""Generates and sends a query message unit .
: param transport : An object implementing the ` . Transport ` interface .
It is used by the protocol to send the message and receive the
response .
: param protocol : An object implementing the ` . Protocol ` interface .
: param data : The program data .
: raises AttributeError : if the command is not queryable ."""
|
if not self . _query :
raise AttributeError ( 'Command is not queryable' )
if self . protocol :
protocol = self . protocol
if self . _query . data_type :
data = _dump ( self . _query . data_type , data )
else : # TODO We silently ignore possible data
data = ( )
if isinstance ( transport , SimulatedTransport ) :
response = self . simulate_query ( data )
else :
response = protocol . query ( transport , self . _query . header , * data )
response = _load ( self . _query . response_type , response )
# Return single value if parsed _ data is 1 - tuple .
return response [ 0 ] if len ( response ) == 1 else response
|
def set_handler ( self , handler ) :
"""set the callback processing object to be used by the receiving thread after receiving the data . User should set
their own callback object setting in order to achieve event driven .
: param handler : the object in callback handler base
: return : ret _ error or ret _ ok"""
|
set_flag = False
for protoc in self . _handler_table :
if isinstance ( handler , self . _handler_table [ protoc ] [ "type" ] ) :
self . _handler_table [ protoc ] [ "obj" ] = handler
return RET_OK
if set_flag is False :
return RET_ERROR
|
def untargz ( input_targz_file , untar_to_dir ) :
"""This module accepts a tar . gz archive and untars it .
RETURN VALUE : path to the untar - ed directory / file
NOTE : this module expects the multiple files to be in a directory before
being tar - ed ."""
|
assert tarfile . is_tarfile ( input_targz_file ) , 'Not a tar file.'
tarball = tarfile . open ( input_targz_file )
return_value = os . path . join ( untar_to_dir , tarball . getmembers ( ) [ 0 ] . name )
tarball . extractall ( path = untar_to_dir )
tarball . close ( )
return return_value
|
def _set_interface_reverse_metric ( self , v , load = False ) :
"""Setter method for interface _ reverse _ metric , mapped from YANG variable / routing _ system / interface / ve / intf _ isis / interface _ isis / interface _ reverse _ metric ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ interface _ reverse _ metric is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ interface _ reverse _ metric ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = interface_reverse_metric . interface_reverse_metric , is_container = 'container' , presence = True , yang_name = "interface-reverse-metric" , rest_name = "reverse-metric" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-compact-syntax' : None , u'info' : u'Configure reverse metric at the interface level' , u'alt-name' : u'reverse-metric' } } , namespace = 'urn:brocade.com:mgmt:brocade-isis' , defining_module = 'brocade-isis' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """interface_reverse_metric must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=interface_reverse_metric.interface_reverse_metric, is_container='container', presence=True, yang_name="interface-reverse-metric", rest_name="reverse-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'Configure reverse metric at the interface level', u'alt-name': u'reverse-metric'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""" , } )
self . __interface_reverse_metric = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def __fix_bases ( base_classes , have_mt ) :
"""This function check whether base _ classes contains a Model
instance . If not , choose the best fitting class for
model . Furthermore , it makes the list in a cannonical
ordering form in a way that ic can be used as memoization
key"""
|
fixed = list ( base_classes )
contains_model = False
for b in fixed :
if isinstance ( fixed , Model ) :
contains_model = True ;
break
pass
# adds a model when user is lazy
if not contains_model :
if have_mt :
from gtkmvc3 . model_mt import ModelMT
fixed . insert ( 0 , ModelMT )
else :
fixed . insert ( 0 , Model )
pass
class ModelFactoryWrap ( object ) :
__metaclass__ = get_noconflict_metaclass ( tuple ( fixed ) , ( ) , ( ) )
def __init__ ( self , * args , ** kwargs ) :
pass
pass
fixed . append ( ModelFactoryWrap )
fixed . sort ( )
return tuple ( fixed )
|
def list_rules ( self , service = None ) :
"""List fault fault injection rules installed on instances of a given service ( or all services )
returns a JSON dictionary"""
|
rules = { }
for service in self . app . get_services ( ) :
rules [ service ] = { }
for instance in self . app . get_service_instances ( service ) :
rules [ service ] [ instance ] = { }
resp = requests . get ( "http://{}/gremlin/v1/rules/list" . format ( instance ) )
if resp . status_code != 200 :
print 'Failed to fetch rules from %s - instance %s' % ( service , instance )
continue
rules [ service ] [ instance ] = resp . json ( )
return rules
|
def parse_reference_line ( ref_line , kbs , bad_titles_count = { } , linker_callback = None ) :
"""Parse one reference line
@ input a string representing a single reference bullet
@ output parsed references ( a list of elements objects )"""
|
# Strip the ' marker ' ( e . g . [ 1 ] ) from this reference line :
line_marker , ref_line = remove_reference_line_marker ( ref_line )
# Find DOI sections in citation
ref_line , identified_dois = identify_and_tag_DOI ( ref_line )
# Identify and replace URLs in the line :
ref_line , identified_urls = identify_and_tag_URLs ( ref_line )
# Tag < cds . JOURNAL > , etc .
tagged_line , bad_titles_count = tag_reference_line ( ref_line , kbs , bad_titles_count )
# Debug print tagging ( authors , titles , volumes , etc . )
LOGGER . debug ( "tags %r" , tagged_line )
# Using the recorded information , create a MARC XML representation
# of the rebuilt line :
# At the same time , get stats of citations found in the reference line
# ( titles , urls , etc ) :
citation_elements , line_marker , counts = parse_tagged_reference_line ( line_marker , tagged_line , identified_dois , identified_urls )
# Transformations on elements
split_volume_from_journal ( citation_elements )
format_volume ( citation_elements )
handle_special_journals ( citation_elements , kbs )
format_report_number ( citation_elements )
format_author_ed ( citation_elements )
look_for_books ( citation_elements , kbs )
format_hep ( citation_elements )
remove_b_for_nucl_phys ( citation_elements )
mangle_volume ( citation_elements )
arxiv_urls_to_report_numbers ( citation_elements )
look_for_hdl ( citation_elements )
look_for_hdl_urls ( citation_elements )
# Link references if desired
if linker_callback :
associate_recids ( citation_elements , linker_callback )
# Split the reference in multiple ones if needed
splitted_citations = split_citations ( citation_elements )
# Look for implied ibids
look_for_implied_ibids ( splitted_citations )
# Find year
add_year_elements ( splitted_citations )
# Look for books in misc field
look_for_undetected_books ( splitted_citations , kbs )
if linker_callback : # Link references with the newly added ibids / books information
for citations in splitted_citations :
associate_recids ( citations , linker_callback )
# FIXME : Needed ?
# Remove references with only misc text
# splitted _ citations = remove _ invalid _ references ( splitted _ citations )
# Merge references with only misc text
# splitted _ citations = merge _ invalid _ references ( splitted _ citations )
remove_duplicated_authors ( splitted_citations )
remove_duplicated_dois ( splitted_citations )
remove_duplicated_collaborations ( splitted_citations )
add_recid_elements ( splitted_citations )
# For debugging purposes
print_citations ( splitted_citations , line_marker )
return splitted_citations , line_marker , counts , bad_titles_count
|
def extract_schemas_from_source ( source , filename = '<unknown>' ) :
"""Extract schemas from ' source ' .
The ' source ' parameter must be a string , and should be valid python
source .
If ' source ' is not valid python source , a SyntaxError will be raised .
: returns : a list of ViewSchema objects ."""
|
# Track which acceptable services have been configured .
acceptable_services = set ( )
# Track which acceptable views have been configured :
acceptable_views = { }
schemas_found = [ ]
ast_tree = ast . parse ( source , filename )
simple_names = _get_simple_assignments ( ast_tree )
assigns = [ n for n in ast_tree . body if isinstance ( n , ast . Assign ) ]
call_assigns = [ n for n in assigns if isinstance ( n . value , ast . Call ) ]
# We need to extract the AcceptableService - related views . We parse the
# assignations twice : The first time to extract the AcceptableService
# instances , the second to extract the views created on those services .
for assign in call_assigns :
if isinstance ( assign . value . func , ast . Attribute ) :
continue
if assign . value . func . id == 'AcceptableService' :
for target in assign . targets :
acceptable_services . add ( target . id )
for assign in call_assigns : # only consider calls which are attribute accesses , AND
# calls where the object being accessed is in acceptable _ services , AND
# calls where the attribute being accessed is the ' api ' method .
if isinstance ( assign . value . func , ast . Attribute ) and assign . value . func . value . id in acceptable_services and assign . value . func . attr == 'api' : # this is a view . We need to extract the url and methods specified .
# they may be specified positionally or via a keyword .
url = None
name = None
# methods has a default value :
methods = [ 'GET' ]
# This is a view - the URL is the first positional argument :
args = assign . value . args
if len ( args ) >= 1 :
url = ast . literal_eval ( args [ 0 ] )
if len ( args ) >= 2 :
name = ast . literal_eval ( args [ 1 ] )
kwargs = assign . value . keywords
for kwarg in kwargs :
if kwarg . arg == 'url' :
url = ast . literal_eval ( kwarg . value )
if kwarg . arg == 'methods' :
methods = ast . literal_eval ( kwarg . value )
if kwarg . arg == 'view_name' :
name = ast . literal_eval ( kwarg . value )
if url and name :
for target in assign . targets :
acceptable_views [ target . id ] = { 'url' : url , 'name' : name , 'methods' : methods , }
# iterate over all functions , attempting to find the views .
functions = [ n for n in ast_tree . body if isinstance ( n , ast . FunctionDef ) ]
for function in functions :
input_schema = None
output_schema = None
doc = ast . get_docstring ( function )
api_options_list = [ ]
for decorator in function . decorator_list :
if not isinstance ( decorator , ast . Call ) :
continue
if isinstance ( decorator . func , ast . Attribute ) :
decorator_name = decorator . func . value . id
# extract version this view was introduced at , which can be
# specified as an arg or a kwarg :
version = None
for kwarg in decorator . keywords :
if kwarg . arg == 'introduced_at' :
version = ast . literal_eval ( kwarg . value )
break
if len ( decorator . args ) == 1 :
version = ast . literal_eval ( decorator . args [ 0 ] )
if decorator_name in acceptable_views :
api_options = acceptable_views [ decorator_name ]
api_options [ 'version' ] = version
api_options_list . append ( api_options )
else :
decorator_name = decorator . func . id
if decorator_name == 'validate_body' :
_SimpleNamesResolver ( simple_names ) . visit ( decorator . args [ 0 ] )
input_schema = ast . literal_eval ( decorator . args [ 0 ] )
if decorator_name == 'validate_output' :
_SimpleNamesResolver ( simple_names ) . visit ( decorator . args [ 0 ] )
output_schema = ast . literal_eval ( decorator . args [ 0 ] )
for api_options in api_options_list :
schema = ViewSchema ( view_name = api_options [ 'name' ] , version = api_options [ 'version' ] , input_schema = input_schema , output_schema = output_schema , methods = api_options [ 'methods' ] , url = api_options [ 'url' ] , doc = doc , )
schemas_found . append ( schema )
return schemas_found
|
def _get_uptime ( self , app_stats ) :
"""Return Icinga ' s uptime"""
|
if "program_start" not in app_stats . keys ( ) :
return 0
if not app_stats [ "program_start" ] . isdigit ( ) :
return 0
uptime = int ( time . time ( ) ) - int ( app_stats [ "program_start" ] )
if uptime < 0 :
return 0
return uptime
|
def _get_relative_ext ( of , sf ) :
"""Retrieve relative extension given the original and secondary files ."""
|
def half_finished_trim ( orig , prefix ) :
return ( os . path . basename ( prefix ) . count ( "." ) > 0 and os . path . basename ( orig ) . count ( "." ) == os . path . basename ( prefix ) . count ( "." ) )
# Handle remote files
if of . find ( ":" ) > 0 :
of = os . path . basename ( of . split ( ":" ) [ - 1 ] )
if sf . find ( ":" ) > 0 :
sf = os . path . basename ( sf . split ( ":" ) [ - 1 ] )
prefix = os . path . commonprefix ( [ sf , of ] )
while prefix . endswith ( "." ) or ( half_finished_trim ( sf , prefix ) and half_finished_trim ( of , prefix ) ) :
prefix = prefix [ : - 1 ]
exts_to_remove = of . replace ( prefix , "" )
ext_to_add = sf . replace ( prefix , "" )
# Return extensions relative to original
if not exts_to_remove or exts_to_remove . startswith ( "." ) :
return str ( "^" * exts_to_remove . count ( "." ) + ext_to_add )
else :
raise ValueError ( "No cross platform way to reference complex extension: %s %s" % ( sf , of ) )
|
def sort_descendants ( self , attr = "name" ) :
"""This function sort the branches of a given tree by
considerening node names . After the tree is sorted , nodes are
labeled using ascendent numbers . This can be used to ensure
that nodes in a tree with the same node names are always
labeled in the same way . Note that if duplicated names are
present , extra criteria should be added to sort nodes .
Unique id is stored as a node . _ nid attribute"""
|
node2content = self . get_cached_content ( store_attr = attr , container_type = list )
for n in self . traverse ( ) :
if not n . is_leaf ( ) :
n . children . sort ( key = lambda x : str ( sorted ( node2content [ x ] ) ) )
|
def _config_chooser_dialog ( self , title_text , description ) :
"""Dialog to select which config shall be exported
: param title _ text : Title text
: param description : Description"""
|
dialog = Gtk . Dialog ( title_text , self . view [ "preferences_window" ] , flags = 0 , buttons = ( Gtk . STOCK_CANCEL , Gtk . ResponseType . REJECT , Gtk . STOCK_OK , Gtk . ResponseType . ACCEPT ) )
label = Gtk . Label ( label = description )
label . set_padding ( xpad = 10 , ypad = 10 )
dialog . vbox . pack_start ( label , True , True , 0 )
label . show ( )
self . _gui_checkbox = Gtk . CheckButton ( label = "GUI Config" )
dialog . vbox . pack_start ( self . _gui_checkbox , True , True , 0 )
self . _gui_checkbox . show ( )
self . _core_checkbox = Gtk . CheckButton ( label = "Core Config" )
self . _core_checkbox . show ( )
dialog . vbox . pack_start ( self . _core_checkbox , True , True , 0 )
response = dialog . run ( )
dialog . destroy ( )
return response
|
def mdl_nominal ( x , y , separate_max ) :
"""Function calculates minimum description length for discrete features . If feature is continuous it is firstly discretized .
x : numpy array - numerical or discrete feature
y : numpy array - labels"""
|
x_vals = np . unique ( x )
# unique values
if len ( x_vals ) == 1 : # if there is just one unique value
return None
y_dist = Counter ( y )
# label distribution
# calculate distributions and splits in accordance with feature type
dist , splits = nominal_splits ( x , y , x_vals , y_dist , separate_max )
prior_mdl = calc_mdl ( dist , y_dist )
max_mdl , max_i = 0 , 1
for i in range ( 1 , len ( dist ) ) : # iter 0 : take first distribution
dist0_x = [ el for el in dist [ : i ] ]
dist0_y = np . sum ( dist0_x )
post_mdl0 = calc_mdl ( dist0_x , dist0_y )
# iter 0 : take the other distributions without first
dist1_x = [ el for el in dist [ i : ] ]
dist1_y = np . sum ( dist1_x )
post_mdl1 = calc_mdl ( dist1_x , dist1_y )
coef = np . true_divide ( [ sum ( dist0_y . values ( ) ) , sum ( dist1_y . values ( ) ) ] , len ( x ) )
mdl_val = prior_mdl - np . dot ( coef , [ post_mdl0 , post_mdl1 ] )
# calculate mdl
if mdl_val > max_mdl :
max_mdl , max_i = mdl_val , i
# store splits of maximal mdl in accordance with feature type
split = [ splits [ : max_i ] , splits [ max_i : ] ]
return ( max_mdl , split )
|
def init_app ( self , app , session ) :
"""Will initialize the Flask app , supporting the app factory pattern .
: param app :
: param session : The SQLAlchemy session"""
|
app . config . setdefault ( "APP_NAME" , "F.A.B." )
app . config . setdefault ( "APP_THEME" , "" )
app . config . setdefault ( "APP_ICON" , "" )
app . config . setdefault ( "LANGUAGES" , { "en" : { "flag" : "gb" , "name" : "English" } } )
app . config . setdefault ( "ADDON_MANAGERS" , [ ] )
app . config . setdefault ( "FAB_API_MAX_PAGE_SIZE" , 20 )
self . app = app
if self . update_perms : # default is True , if False takes precedence from config
self . update_perms = app . config . get ( 'FAB_UPDATE_PERMS' , True )
_security_manager_class_name = app . config . get ( 'FAB_SECURITY_MANAGER_CLASS' , None )
if _security_manager_class_name is not None :
self . security_manager_class = dynamic_class_import ( _security_manager_class_name )
if self . security_manager_class is None :
from flask_appbuilder . security . sqla . manager import SecurityManager
self . security_manager_class = SecurityManager
self . _addon_managers = app . config [ "ADDON_MANAGERS" ]
self . session = session
self . sm = self . security_manager_class ( self )
self . bm = BabelManager ( self )
self . openapi_manager = OpenApiManager ( self )
self . _add_global_static ( )
self . _add_global_filters ( )
app . before_request ( self . sm . before_request )
self . _add_admin_views ( )
self . _add_addon_views ( )
if self . app :
self . _add_menu_permissions ( )
else :
self . post_init ( )
self . _init_extension ( app )
|
def write_badge ( self , file_path , overwrite = False ) :
"""Write badge to file ."""
|
# Validate path ( part 1)
if file_path . endswith ( '/' ) :
raise Exception ( 'File location may not be a directory.' )
# Get absolute filepath
path = os . path . abspath ( file_path )
if not path . lower ( ) . endswith ( '.svg' ) :
path += '.svg'
# Validate path ( part 2)
if not overwrite and os . path . exists ( path ) :
raise Exception ( 'File "{}" already exists.' . format ( path ) )
with open ( path , mode = 'w' ) as file_handle :
file_handle . write ( self . badge_svg_text )
|
def _tidy_repr ( self , max_vals = 10 , footer = True ) :
"""a short repr displaying only max _ vals and an optional ( but default
footer )"""
|
num = max_vals // 2
head = self [ : num ] . _get_repr ( length = False , footer = False )
tail = self [ - ( max_vals - num ) : ] . _get_repr ( length = False , footer = False )
result = '{head}, ..., {tail}' . format ( head = head [ : - 1 ] , tail = tail [ 1 : ] )
if footer :
result = '{result}\n{footer}' . format ( result = result , footer = self . _repr_footer ( ) )
return str ( result )
|
def _cron_id ( cron ) :
'''SAFETYBELT , Only set if we really have an identifier'''
|
cid = None
if cron [ 'identifier' ] :
cid = cron [ 'identifier' ]
else :
cid = SALT_CRON_NO_IDENTIFIER
if cid :
return _ensure_string ( cid )
|
def port_bindings ( val , ** kwargs ) :
'''On the CLI , these are passed as multiple instances of a given CLI option .
In Salt , we accept these as a comma - delimited list but the API expects a
Python dictionary mapping ports to their bindings . The format the API
expects is complicated depending on whether or not the external port maps
to a different internal port , or if the port binding is for UDP instead of
TCP ( the default ) . For reference , see the " Port bindings " section in the
docker - py documentation at the following URL :
http : / / docker - py . readthedocs . io / en / stable / api . html'''
|
validate_ip_addrs = kwargs . get ( 'validate_ip_addrs' , True )
if not isinstance ( val , dict ) :
if not isinstance ( val , list ) :
try :
val = helpers . split ( val )
except AttributeError :
val = helpers . split ( six . text_type ( val ) )
for idx in range ( len ( val ) ) :
if not isinstance ( val [ idx ] , six . string_types ) :
val [ idx ] = six . text_type ( val [ idx ] )
def _format_port ( port_num , proto ) :
return six . text_type ( port_num ) + '/udp' if proto . lower ( ) == 'udp' else port_num
bindings = { }
for binding in val :
bind_parts = helpers . split ( binding , ':' )
num_bind_parts = len ( bind_parts )
if num_bind_parts == 1 : # Single port or port range being passed through ( no
# special mapping )
container_port = six . text_type ( bind_parts [ 0 ] )
if container_port == '' :
raise SaltInvocationError ( 'Empty port binding definition found' )
container_port , _ , proto = container_port . partition ( '/' )
try :
start , end = helpers . get_port_range ( container_port )
except ValueError as exc : # Using _ _ str _ _ ( ) to avoid deprecation warning for using
# the message attribute of the ValueError .
raise SaltInvocationError ( exc . __str__ ( ) )
bind_vals = [ ( _format_port ( port_num , proto ) , None ) for port_num in range ( start , end + 1 ) ]
elif num_bind_parts == 2 :
if bind_parts [ 0 ] == '' :
raise SaltInvocationError ( 'Empty host port in port binding definition ' '\'{0}\'' . format ( binding ) )
if bind_parts [ 1 ] == '' :
raise SaltInvocationError ( 'Empty container port in port binding definition ' '\'{0}\'' . format ( binding ) )
container_port , _ , proto = bind_parts [ 1 ] . partition ( '/' )
try :
cport_start , cport_end = helpers . get_port_range ( container_port )
hport_start , hport_end = helpers . get_port_range ( bind_parts [ 0 ] )
except ValueError as exc : # Using _ _ str _ _ ( ) to avoid deprecation warning for
# using the message attribute of the ValueError .
raise SaltInvocationError ( exc . __str__ ( ) )
if ( hport_end - hport_start ) != ( cport_end - cport_start ) : # Port range is mismatched
raise SaltInvocationError ( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})' . format ( bind_parts [ 0 ] , container_port ) )
cport_list = list ( range ( cport_start , cport_end + 1 ) )
hport_list = list ( range ( hport_start , hport_end + 1 ) )
bind_vals = [ ( _format_port ( cport_list [ x ] , proto ) , hport_list [ x ] ) for x in range ( len ( cport_list ) ) ]
elif num_bind_parts == 3 :
host_ip , host_port = bind_parts [ 0 : 2 ]
if validate_ip_addrs :
helpers . validate_ip ( host_ip )
container_port , _ , proto = bind_parts [ 2 ] . partition ( '/' )
try :
cport_start , cport_end = helpers . get_port_range ( container_port )
except ValueError as exc : # Using _ _ str _ _ ( ) to avoid deprecation warning for
# using the message attribute of the ValueError .
raise SaltInvocationError ( exc . __str__ ( ) )
cport_list = list ( range ( cport_start , cport_end + 1 ) )
if host_port == '' :
hport_list = [ None ] * len ( cport_list )
else :
try :
hport_start , hport_end = helpers . get_port_range ( host_port )
except ValueError as exc : # Using _ _ str _ _ ( ) to avoid deprecation warning for
# using the message attribute of the ValueError .
raise SaltInvocationError ( exc . __str__ ( ) )
hport_list = list ( range ( hport_start , hport_end + 1 ) )
if ( hport_end - hport_start ) != ( cport_end - cport_start ) : # Port range is mismatched
raise SaltInvocationError ( 'Host port range ({0}) does not have the same ' 'number of ports as the container port range ' '({1})' . format ( host_port , container_port ) )
bind_vals = [ ( _format_port ( val , proto ) , ( host_ip , ) if hport_list [ idx ] is None else ( host_ip , hport_list [ idx ] ) ) for idx , val in enumerate ( cport_list ) ]
else :
raise SaltInvocationError ( '\'{0}\' is an invalid port binding definition (at most ' '3 components are allowed, found {1})' . format ( binding , num_bind_parts ) )
for cport , bind_def in bind_vals :
if cport not in bindings :
bindings [ cport ] = bind_def
else :
if isinstance ( bindings [ cport ] , list ) : # Append to existing list of bindings for this
# container port .
bindings [ cport ] . append ( bind_def )
else :
bindings [ cport ] = [ bindings [ cport ] , bind_def ]
for idx in range ( len ( bindings [ cport ] ) ) :
if bindings [ cport ] [ idx ] is None : # Now that we are adding multiple
# bindings
try : # Convert 1234 / udp to 1234
bindings [ cport ] [ idx ] = int ( cport . split ( '/' ) [ 0 ] )
except AttributeError : # Port was tcp , the AttributeError
# signifies that the split failed
# because the port number was
# already defined as an integer .
# Just use the cport .
bindings [ cport ] [ idx ] = cport
val = bindings
return val
|
def _register_key ( fingerprint , gpg ) :
"""Registers key in config"""
|
for private_key in gpg . list_keys ( True ) :
try :
if str ( fingerprint ) == private_key [ 'fingerprint' ] :
config [ "gpg_key_fingerprint" ] = repr ( private_key [ 'fingerprint' ] )
except KeyError :
pass
|
def com_adobe_fonts_check_name_postscript_vs_cff ( ttFont ) :
"""CFF table FontName must match name table ID 6 ( PostScript name ) ."""
|
failed = False
cff_names = ttFont [ 'CFF ' ] . cff . fontNames
if len ( cff_names ) != 1 :
yield ERROR , ( "Unexpected number of font names in CFF table." )
return
cff_name = cff_names [ 0 ]
for entry in ttFont [ 'name' ] . names :
if entry . nameID == NameID . POSTSCRIPT_NAME :
postscript_name = entry . toUnicode ( )
if postscript_name != cff_name :
yield FAIL , ( "Name table PostScript name '{}' " "does not match CFF table FontName '{}'." . format ( postscript_name , cff_name ) )
failed = True
if not failed :
yield PASS , ( "Name table PostScript name matches CFF table FontName." )
|
def dihed_iter ( self , g_nums , ats_1 , ats_2 , ats_3 , ats_4 , invalid_error = False ) :
"""Iterator over selected dihedral angles .
Angles are in degrees as with : meth : ` dihed _ single ` .
See ` above < toc - generators _ > ` _ for more information on
calling options .
Parameters
g _ nums
| int | or iterable | int | or | None | - -
Indices of the desired geometry
ats _ 1
| int | or iterable | int | or | None | - -
Indices of the first atoms
ats _ 2
| int | or iterable | int | or | None | - -
Indices of the second atoms
ats _ 3
| int | or iterable | int | or | None | - -
Indices of the third atoms
ats _ 4
| int | or iterable | int | or | None | - -
Indices of the fourth atoms
invalid _ error
| bool | , optional - -
If | False | ( the default ) , | None | values are returned for
results corresponding to invalid indices . If | True | ,
exceptions are raised per normal .
Yields
dihed
| npfloat _ | - -
Out - of - plane / dihedral angles in degrees for the indicated
atom sets ` ats _ 1 ` - ` ats _ 2 ` - ` ats _ 3 ` - ` ats _ 4 ` , drawn from
the respective ` g _ nums ` .
Raises
~ exceptions . IndexError
If an invalid ( out - of - range ) ` g _ num ` or ` at _ # ` is provided .
~ exceptions . ValueError
If all iterable objects are not the same length .
~ exceptions . ValueError
If any corresponding ` ats _ # ` indices are equal .
~ opan . error . XYZError
( typecode : data : ` ~ opan . error . XYZError . DIHED ` ) If either
of the atom trios ( 1-2-3 or
2-3-4 ) is too close to linearity for any group of ` ats _ # `"""
|
# Suitability of ats _ n indices will be checked within the
# self . dihed _ single ( ) calls and thus no check is needed here .
# Import the tuple - generating function
from . utils import pack_tups
# Print the function inputs if debug mode is on
if _DEBUG : # pragma : no cover
print ( "g_nums = {0}" . format ( g_nums ) )
print ( "ats_1 = {0}" . format ( ats_1 ) )
print ( "ats_2 = {0}" . format ( ats_2 ) )
print ( "ats_3 = {0}" . format ( ats_3 ) )
print ( "ats_4 = {0}" . format ( ats_4 ) )
# # end if
# Perform the None substitution
arglist = self . _none_subst ( g_nums , ats_1 , ats_2 , ats_3 , ats_4 )
# Expand / pack the tuples from the inputs
tups = pack_tups ( * arglist )
# Dump the results if debug mode is on
if _DEBUG : # pragma : no cover
print ( tups )
# # end if
# Construct the generator using the packed tuples .
for tup in tups :
yield self . _iter_return ( tup , self . dihed_single , invalid_error )
|
def __RemoteExecuteHelper ( args ) :
"""Helper for multiprocessing ."""
|
cmd , hostname , ssh_key = args
# Random . atfork ( ) # needed to fix bug in old python 2.6 interpreters
private_key = paramiko . RSAKey . from_private_key ( StringIO . StringIO ( ssh_key ) )
client = paramiko . SSHClient ( )
client . set_missing_host_key_policy ( paramiko . AutoAddPolicy ( ) )
while True :
try :
client . connect ( hostname , username = 'ubuntu' , pkey = private_key , allow_agent = False , look_for_keys = False )
break
except socket . error as e :
print '.'
time . sleep ( 5 )
except paramiko . AuthenticationException as e :
print e
time . sleep ( 5 )
channel = client . get_transport ( ) . open_session ( )
channel . exec_command ( cmd )
exit_code = channel . recv_exit_status ( )
output = channel . recv ( 1000000 )
client . close ( )
return exit_code , output
|
def listen_forever ( self , timeout_ms : int = 30000 , exception_handler : Callable [ [ Exception ] , None ] = None , bad_sync_timeout : int = 5 , ) :
"""Keep listening for events forever .
Args :
timeout _ ms : How long to poll the Home Server for before retrying .
exception _ handler : Optional exception handler function which can
be used to handle exceptions in the caller thread .
bad _ sync _ timeout : Base time to wait after an error before retrying .
Will be increased according to exponential backoff ."""
|
_bad_sync_timeout = bad_sync_timeout
self . should_listen = True
while self . should_listen :
try : # may be killed and raise exception from _ handle _ thread
self . _sync ( timeout_ms )
_bad_sync_timeout = bad_sync_timeout
except MatrixRequestError as e :
log . warning ( 'A MatrixRequestError occured during sync.' )
if e . code >= 500 :
log . warning ( 'Problem occured serverside. Waiting' , wait_for = _bad_sync_timeout , )
gevent . sleep ( _bad_sync_timeout )
_bad_sync_timeout = min ( _bad_sync_timeout * 2 , self . bad_sync_timeout_limit )
else :
raise
except MatrixHttpLibError :
log . exception ( 'A MatrixHttpLibError occured during sync.' )
if self . should_listen :
gevent . sleep ( _bad_sync_timeout )
_bad_sync_timeout = min ( _bad_sync_timeout * 2 , self . bad_sync_timeout_limit )
except Exception as e :
log . exception ( 'Exception thrown during sync' )
if exception_handler is not None :
exception_handler ( e )
else :
raise
|
def _set_get_lldp_neighbor_detail ( self , v , load = False ) :
"""Setter method for get _ lldp _ neighbor _ detail , mapped from YANG variable / brocade _ lldp _ ext _ rpc / get _ lldp _ neighbor _ detail ( rpc )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ get _ lldp _ neighbor _ detail is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ get _ lldp _ neighbor _ detail ( ) directly .
YANG Description : This is a function that serves to return the Neighbor
details of all the interfaces of the managed entity ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = get_lldp_neighbor_detail . get_lldp_neighbor_detail , is_leaf = True , yang_name = "get-lldp-neighbor-detail" , rest_name = "get-lldp-neighbor-detail" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = { u'tailf-common' : { u'hidden' : u'rpccmd' , u'actionpoint' : u'LldpShowAction' } } , namespace = 'urn:brocade.com:mgmt:brocade-lldp-ext' , defining_module = 'brocade-lldp-ext' , yang_type = 'rpc' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """get_lldp_neighbor_detail must be of a type compatible with rpc""" , 'defined-type' : "rpc" , 'generated-type' : """YANGDynClass(base=get_lldp_neighbor_detail.get_lldp_neighbor_detail, is_leaf=True, yang_name="get-lldp-neighbor-detail", rest_name="get-lldp-neighbor-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'LldpShowAction'}}, namespace='urn:brocade.com:mgmt:brocade-lldp-ext', defining_module='brocade-lldp-ext', yang_type='rpc', is_config=True)""" , } )
self . __get_lldp_neighbor_detail = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def get_widget ( title ) :
"""Get the Qt widget of the IDA window with the given title ."""
|
tform = idaapi . find_tform ( title )
if not tform :
raise exceptions . FormNotFound ( "No form titled {!r} found." . format ( title ) )
return form_to_widget ( tform )
|
def liftover ( args ) :
"""% prog liftover blastfile anchorfile [ options ]
Typical use for this program is given a list of anchors ( syntennic
genes ) , choose from the blastfile the pairs that are close to the anchors .
Anchorfile has the following format , each row defines a pair .
geneA geneB
geneC geneD"""
|
p = OptionParser ( liftover . __doc__ )
p . set_stripnames ( )
blast_file , anchor_file , dist , opts = add_options ( p , args )
qbed , sbed , qorder , sorder , is_self = check_beds ( blast_file , p , opts )
filtered_blast = read_blast ( blast_file , qorder , sorder , is_self = is_self , ostrip = opts . strip_names )
blast_to_score = dict ( ( ( b . qi , b . si ) , int ( b . score ) ) for b in filtered_blast )
accepted = dict ( ( ( b . query , b . subject ) , str ( int ( b . score ) ) ) for b in filtered_blast )
ac = AnchorFile ( anchor_file )
all_hits = group_hits ( filtered_blast )
all_anchors , anchor_to_block = read_anchors ( ac , qorder , sorder )
# select hits that are close to the anchor list
lifted = 0
for chr_pair in sorted ( all_anchors . keys ( ) ) :
hits = np . array ( all_hits [ chr_pair ] )
anchors = np . array ( all_anchors [ chr_pair ] )
if not len ( hits ) :
continue
for point , nearest in synteny_liftover ( hits , anchors , dist ) :
qi , si = point [ : 2 ]
block_id = anchor_to_block [ nearest ]
query , subject = qbed [ qi ] . accn , sbed [ si ] . accn
score = blast_to_score [ ( qi , si ) ]
ac . blocks [ block_id ] . append ( ( query , subject , str ( score ) + "L" ) )
lifted += 1
logging . debug ( "{0} new pairs found." . format ( lifted ) )
newanchorfile = anchor_file . rsplit ( "." , 1 ) [ 0 ] + ".lifted.anchors"
ac . print_to_file ( filename = newanchorfile , accepted = accepted )
summary ( [ newanchorfile ] )
return newanchorfile
|
def root ( x , k , context = None ) :
"""Return the kth root of x .
For k odd and x negative ( including - Inf ) , return a negative number .
For k even and x negative ( including - Inf ) , return NaN .
The kth root of - 0 is defined to be - 0 , whatever the parity of k .
This function is only implemented for nonnegative k ."""
|
if k < 0 :
raise ValueError ( "root function not implemented for negative k" )
return _apply_function_in_current_context ( BigFloat , mpfr . mpfr_root , ( BigFloat . _implicit_convert ( x ) , k ) , context , )
|
def sign ( node = None ) :
"""Sign a specific node to grant it access
you can specify " all " to sign all nodes
returns the nodes that were signed"""
|
if not node :
raise Exception ( "Specify either 'all' your specify token/host_name of node to sign. " )
if node == 'all' :
node = 'unsigned'
nodes = list_nodes ( search = node )
result = { }
for token , i in nodes . items ( ) :
i [ 'access' ] = 'node'
i . save ( )
result [ token ] = i
return result
|
def _process_strainmeans_file ( self , limit ) :
"""This will store the entire set of strain means in a hash .
Not the most efficient representation ,
but easy access .
We will loop through this later to then apply cutoffs
and add associations
: param limit :
: return :"""
|
LOG . info ( "Processing strain means ..." )
line_counter = 0
raw = '/' . join ( ( self . rawdir , self . files [ 'strainmeans' ] [ 'file' ] ) )
with gzip . open ( raw , 'rb' ) as f :
f = io . TextIOWrapper ( f )
reader = csv . reader ( f )
self . check_header ( self . files [ 'strainmeans' ] [ 'file' ] , f . readline ( ) )
score_means_by_measure = { }
strain_scores_by_measure = { }
for row in reader :
try : # ( measnum , varname , strain , strainid , sex , mean , nmice , sd , sem ,
# cv , minval , maxval , logmean , logsd , zscore , logzscore )
( measnum , varname , strain , strainid , sex , mean , nmice , sd , sem , cv , minval , maxval , zscore ) = row
except ValueError :
continue
line_counter += 1
strain_num = int ( strainid )
assay_num = int ( measnum )
# assuming the zscore is across all the items
# in the same measure + var + strain + sex
# note : it seems that there is only ever 1 varname per measnum .
# note : some assays only tested one sex !
# we split this here by sex
if assay_num not in score_means_by_measure :
score_means_by_measure [ assay_num ] = { }
if sex not in score_means_by_measure [ assay_num ] :
score_means_by_measure [ assay_num ] [ sex ] = list ( )
score_means_by_measure [ assay_num ] [ sex ] . append ( float ( mean ) )
if strain_num not in strain_scores_by_measure :
strain_scores_by_measure [ strain_num ] = { }
if sex not in strain_scores_by_measure [ strain_num ] :
strain_scores_by_measure [ strain_num ] [ sex ] = { }
strain_scores_by_measure [ strain_num ] [ sex ] [ assay_num ] = { 'mean' : float ( mean ) , 'zscore' : float ( zscore ) }
# end loop over strainmeans
self . score_means_by_measure = score_means_by_measure
self . strain_scores_by_measure = strain_scores_by_measure
return
|
def pretty_print ( ast , indent_str = ' ' ) :
"""Simple pretty print function ; returns a string rendering of an input
AST of an ES5 Program .
arguments
ast
The AST to pretty print
indent _ str
The string used for indentations . Defaults to two spaces ."""
|
return '' . join ( chunk . text for chunk in pretty_printer ( indent_str ) ( ast ) )
|
def delete_repository ( self , repository_id = None ) :
"""Deletes a ` ` Repository ` ` .
arg : repository _ id ( osid . id . Id ) : the ` ` Id ` ` of the
` ` Repository ` ` to remove
raise : NotFound - ` ` repository _ id ` ` not found
raise : NullArgument - ` ` repository _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from awsosid template for -
# osid . resource . BinAdminSession . delete _ bin _ template
if not self . _can ( 'delete' ) :
raise PermissionDenied ( )
else :
return self . _provider_session . delete_repository ( repository_id )
|
def make_jvp_reversemode ( fun , x ) :
"""Builds a function for evaluating the Jacobian - vector product at a
point . Roughly 1.5x more FLOPs than forward - mode , plus memory requirements
that scale with the number of primitives applied in the evaluation of f , as
well as other overheads . See j - towns . github . io / 2017/06/12 / A - new - trick . html ."""
|
vjp , y = _make_vjp ( fun , x )
vjp_vjp , _ = _make_vjp ( vjp , vspace ( y ) . zeros ( ) )
return vjp_vjp
|
def getFirstLevel ( self ) :
"""returns a list of couples ( x1 , x2 ) of all the first level indexed regions"""
|
res = [ ]
if len ( self . children ) > 0 :
for c in self . children :
res . append ( ( c . x1 , c . x2 ) )
else :
if self . x1 != None :
res = [ ( self . x1 , self . x2 ) ]
else :
res = None
return res
|
def push ( self , id ) :
"Sends a message to the specified connection ( id )"
|
conn = session [ 'connections' ] . get ( id , None )
if conn :
msgs = simplejson . loads ( request . body )
for msg in msgs :
try :
cmd = msg . pop ( 0 ) . upper ( )
assert ' ' not in cmd , "Bad message"
if cmd in ( 'USER' , ) :
sufix = " :" + msg . pop ( )
else :
sufix = ''
assert not [ i for i in msg if ' ' in i ] , "Bad message"
print 'PUSH:' , ( cmd , msg , sufix )
if msg :
payload = "%s %s%s\r\n" % ( cmd , ' ' . join ( msg ) , sufix )
else :
payload = "%s%s\r\n" % ( cmd , sufix )
yield request . environ [ 'cogen.call' ] ( conn . sock . sendall ) ( payload . encode ( 'utf-8' ) )
if isinstance ( request . environ [ 'cogen.wsgi' ] . result , Exception ) :
yield simplejson . dumps ( ( '' , 'ERROR' , str ( e ) ) )
else :
yield simplejson . dumps ( ( '' , 'PUSH_OK' , '' ) )
except Exception , e :
yield simplejson . dumps ( ( '' , 'ERROR' , str ( e ) ) )
else :
yield simplejson . dumps ( ( '' , 'ERROR' , 'Invalid connection id.' ) )
|
def cell_sides_angstrom ( self ) :
"""Array of sizes of a unit cell in Angstroms .
The value is determined from the ` ` ' cella ' ` ` entry in ` header ` ."""
|
return np . asarray ( self . header [ 'cella' ] [ 'value' ] , dtype = float ) / self . data_shape
|
def format_docstring ( owner_name , docstring , formatters ) :
"""Template ` ` formatters ` ` into ` ` docstring ` ` .
Parameters
owner _ name : str
The name of the function or class whose docstring is being templated .
Only used for error messages .
docstring : str
The docstring to template .
formatters : dict [ str - > str ]
Parameters for a a str . format ( ) call on ` ` docstring ` ` .
Multi - line values in ` ` formatters ` ` will have leading whitespace padded
to match the leading whitespace of the substitution string ."""
|
# Build a dict of parameters to a vanilla format ( ) call by searching for
# each entry in * * formatters and applying any leading whitespace to each
# line in the desired substitution .
format_params = { }
for target , doc_for_target in iteritems ( formatters ) : # Search for ' { name } ' , with optional leading whitespace .
regex = re . compile ( r'^(\s*)' + '({' + target + '})$' , re . MULTILINE )
matches = regex . findall ( docstring )
if not matches :
raise ValueError ( "Couldn't find template for parameter {!r} in docstring " "for {}." "\nParameter name must be alone on a line surrounded by " "braces." . format ( target , owner_name ) , )
elif len ( matches ) > 1 :
raise ValueError ( "Couldn't found multiple templates for parameter {!r}" "in docstring for {}." "\nParameter should only appear once." . format ( target , owner_name ) )
( leading_whitespace , _ ) = matches [ 0 ]
format_params [ target ] = pad_lines_after_first ( leading_whitespace , doc_for_target , )
return docstring . format ( ** format_params )
|
def load_labels ( path : Union [ str , Path ] ) -> List [ SingleConditionSpec ] :
"""Load labels files .
Parameters
path
Path of labels file .
Returns
List [ SingleConditionSpec ]
List of SingleConditionSpec stored in labels file ."""
|
condition_specs = np . load ( str ( path ) )
return [ c . view ( SingleConditionSpec ) for c in condition_specs ]
|
def modifyExtensions ( self , extensionObjects = [ ] ) :
"""enables / disables a service extension type based on the name"""
|
if len ( extensionObjects ) > 0 and isinstance ( extensionObjects [ 0 ] , Extension ) :
self . _extensions = extensionObjects
res = self . edit ( str ( self ) )
self . _json = None
self . __init ( )
return res
|
def get_pool_for_host ( self , host_id ) :
"""Returns the connection pool for the given host .
This connection pool is used by the redis clients to make sure
that it does not have to reconnect constantly . If you want to use
a custom redis client you can pass this in as connection pool
manually ."""
|
if isinstance ( host_id , HostInfo ) :
host_info = host_id
host_id = host_info . host_id
else :
host_info = self . hosts . get ( host_id )
if host_info is None :
raise LookupError ( 'Host %r does not exist' % ( host_id , ) )
rv = self . _pools . get ( host_id )
if rv is not None :
return rv
with self . _lock :
rv = self . _pools . get ( host_id )
if rv is None :
opts = dict ( self . pool_options or ( ) )
opts [ 'db' ] = host_info . db
opts [ 'password' ] = host_info . password
if host_info . unix_socket_path is not None :
opts [ 'path' ] = host_info . unix_socket_path
opts [ 'connection_class' ] = UnixDomainSocketConnection
if host_info . ssl :
raise TypeError ( 'SSL is not supported for unix ' 'domain sockets.' )
else :
opts [ 'host' ] = host_info . host
opts [ 'port' ] = host_info . port
if host_info . ssl :
if SSLConnection is None :
raise TypeError ( 'This version of py-redis does ' 'not support SSL connections.' )
opts [ 'connection_class' ] = SSLConnection
opts . update ( ( 'ssl_' + k , v ) for k , v in ( host_info . ssl_options or { } ) . iteritems ( ) )
rv = self . pool_cls ( ** opts )
self . _pools [ host_id ] = rv
return rv
|
def get_zonefile_inventory ( hostport , offset , count , timeout = 30 , my_hostport = None , proxy = None ) :
"""Get the atlas zonefile inventory from the given peer .
offset / count are in bytes .
Return { ' status ' : True , ' inv ' : inventory } on success .
Return { ' error ' : . . . } on error"""
|
assert hostport or proxy , 'Need either hostport or proxy'
inv_schema = { 'type' : 'object' , 'properties' : { 'inv' : { 'type' : 'string' , 'pattern' : OP_BASE64_EMPTY_PATTERN } , } , 'required' : [ 'inv' ] }
schema = json_response_schema ( inv_schema )
if proxy is None :
proxy = connect_hostport ( hostport )
zf_inv = None
try :
zf_inv = proxy . get_zonefile_inventory ( offset , count )
zf_inv = json_validate ( schema , zf_inv )
if json_is_error ( zf_inv ) :
return zf_inv
# decode
zf_inv [ 'inv' ] = base64 . b64decode ( str ( zf_inv [ 'inv' ] ) )
# make sure it corresponds to this range
assert len ( zf_inv [ 'inv' ] ) <= count , 'Zonefile inventory in is too long (got {} bytes)' . format ( len ( zf_inv [ 'inv' ] ) )
except ValidationError as ve :
if BLOCKSTACK_DEBUG :
log . exception ( ve )
resp = { 'error' : 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.' , 'http_status' : 502 }
return resp
except AssertionError as ae :
if BLOCKSTACK_DEBUG :
log . exception ( ae )
resp = { 'error' : 'Server replied an invalid zone file inventory vector' }
return resp
except socket . timeout :
log . error ( "Connection timed out" )
resp = { 'error' : 'Connection to remote host timed out.' , 'http_status' : 503 }
return resp
except socket . error as se :
log . error ( "Connection error {}" . format ( se . errno ) )
resp = { 'error' : 'Connection to remote host failed.' , 'http_status' : 502 }
return resp
except Exception as ee :
if BLOCKSTACK_DEBUG :
log . exception ( ee )
log . error ( "Caught exception while connecting to Blockstack node: {}" . format ( ee ) )
resp = { 'error' : 'Failed to contact Blockstack node. Try again with `--debug`.' , 'http_status' : 500 }
return resp
return zf_inv
|
def add_composition ( self , composition ) :
"""Add a composition to the suite .
Raise an UnexpectedObjectError when the supplied argument is not a
Composition object ."""
|
if not hasattr ( composition , 'tracks' ) :
raise UnexpectedObjectError ( "Object '%s' not expected. Expecting " "a mingus.containers.Composition object." % composition )
self . compositions . append ( composition )
return self
|
def parse_record_static ( self , raw ) :
"""Parse raw data ( that is retrieved by static request ) and return pandas . DataFrame .
Returns tuple ( data , metadata )
data - pandas . DataFrame with retrieved data .
metadata - pandas . DataFrame with info about symbol , currency , frequency ,
displayname and status of given request"""
|
# Parsing status
status = self . status ( raw )
# Testing if no errors
if status [ 'StatusType' ] != 'Connected' :
if self . raise_on_error :
raise DatastreamException ( '%s (error %i): %s --> "%s"' % ( status [ 'StatusType' ] , status [ 'StatusCode' ] , status [ 'StatusMessage' ] , status [ 'Request' ] ) )
else :
self . _test_status_and_warn ( )
return pd . DataFrame ( ) , { }
# Convert record to dict
record = self . extract_data ( raw )
try :
error = record [ 'INSTERROR' ]
if self . raise_on_error :
raise DatastreamException ( 'Error: %s --> "%s"' % ( error , status [ 'Request' ] ) )
else :
self . last_status [ 'StatusMessage' ] = error
self . last_status [ 'StatusType' ] = 'INSTERROR'
self . _test_status_and_warn ( )
return pd . DataFrame ( ) , { 'Status' : error , 'Date' : None }
except KeyError :
metadata = { 'Status' : 'OK' , 'Date' : '' }
# All fields that are available
fields = [ x for x in record if '_' not in x ]
metadata [ 'Date' ] = record [ 'DATE' ]
fields . remove ( 'DATE' )
# Number of elements
num = len ( [ x [ 0 ] for x in record if 'SYMBOL' in x ] )
# field naming ' CCY ' , ' CCY _ 2 ' , ' CCY _ 3 ' , . . .
fld_name = lambda field , indx : field if indx == 0 else field + '_%i' % ( indx + 1 )
# Construct pd . DataFrame
res = pd . DataFrame ( { fld : [ record [ fld_name ( fld , ind ) ] if fld_name ( fld , ind ) in record else '' for ind in range ( num ) ] for fld in fields } )
return res , metadata
|
def supplement ( self , coordsys = 'gal' ) :
"""Add some supplemental columns"""
|
from ugali . utils . projector import gal2cel , gal2cel_angle
from ugali . utils . projector import cel2gal , cel2gal_angle
coordsys = coordsys . lower ( )
kwargs = dict ( usemask = False , asrecarray = True )
out = copy . deepcopy ( self )
if ( 'lon' in out . names ) and ( 'lat' in out . names ) : # Ignore entries that are all zero
zeros = np . all ( self . ndarray == 0 , axis = 1 )
if coordsys == 'gal' :
ra , dec = gal2cel ( out . lon , out . lat )
glon , glat = out . lon , out . lat
else :
ra , dec = out . lon , out . lat
glon , glat = cel2gal ( out . lon , out . lat )
ra [ zeros ] = 0 ;
dec [ zeros ] = 0
glon [ zeros ] = 0 ;
glat [ zeros ] = 0
names = [ 'ra' , 'dec' , 'glon' , 'glat' ]
arrs = [ ra , dec , glon , glat ]
out = mlab . rec_append_fields ( out , names , arrs ) . view ( Samples )
# out = recfuncs . append _ fields ( out , names , arrs , * * kwargs ) . view ( Samples )
if 'position_angle' in out . names :
if coordsys == 'gal' :
pa_gal = out . position_angle
pa_cel = gal2cel_angle ( out . lon , out . lat , out . position_angle )
pa_cel = pa_cel - 180. * ( pa_cel > 180. )
else :
pa_gal = cel2gal_angle ( out . lon , out . lat , out . position_angle )
pa_cel = out . position_angle
pa_gal = pa_gal - 180. * ( pa_gal > 180. )
pa_gal [ zeros ] = 0 ;
pa_cel [ zeros ] = 0
names = [ 'position_angle_gal' , 'position_angle_cel' ]
arrs = [ pa_gal , pa_cel ]
out = recfuncs . append_fields ( out , names , arrs , ** kwargs ) . view ( Samples )
return out
|
def list_requests ( self , status = None , assignee = None , author = None ) :
"""Get all pull requests of a project .
: param status : filters the status of the requests
: param assignee : filters the assignee of the requests
: param author : filters the author of the requests
: return :"""
|
request_url = "{}pull-requests" . format ( self . create_basic_url ( ) )
payload = { }
if status is not None :
payload [ 'status' ] = status
if assignee is not None :
payload [ 'assignee' ] = assignee
if author is not None :
payload [ 'author' ] = author
return_value = self . _call_api ( request_url , params = payload )
return return_value [ 'requests' ]
|
def add_depth_channel ( img_tensor , pad_mode ) :
'''img _ tensor : N , C , H , W'''
|
img_tensor [ : , 1 ] = get_depth_tensor ( pad_mode )
img_tensor [ : , 2 ] = img_tensor [ : , 0 ] * get_depth_tensor ( pad_mode )
|
def fit ( self , X , y = None , ** fit_params ) :
"""This method performs preliminary computations in order to set up the
figure or perform other analyses . It can also call drawing methods in
order to set up various non - instance related figure elements .
This method must return self ."""
|
# Handle the feature names if they ' re None .
if self . features_ is None : # If X is a data frame , get the columns off it .
if is_dataframe ( X ) :
self . features_ = np . array ( X . columns )
# Otherwise create numeric labels for each column .
else :
_ , ncols = X . shape
self . features_ = np . arange ( 0 , ncols )
return self
|
def remove_existing_fpaths ( fpath_list , verbose = VERBOSE , quiet = QUIET , strict = False , print_caller = PRINT_CALLER , lbl = 'files' ) :
"""checks existance before removing . then tries to remove exisint paths"""
|
import utool as ut
if print_caller :
print ( util_dbg . get_caller_name ( range ( 1 , 4 ) ) + ' called remove_existing_fpaths' )
fpath_list_ = ut . filter_Nones ( fpath_list )
exists_list = list ( map ( exists , fpath_list_ ) )
if verbose :
n_total = len ( fpath_list )
n_valid = len ( fpath_list_ )
n_exist = sum ( exists_list )
print ( '[util_path.remove_existing_fpaths] request delete of %d %s' % ( n_total , lbl ) )
if n_valid != n_total :
print ( ( '[util_path.remove_existing_fpaths] ' 'trying to delete %d/%d non None %s ' ) % ( n_valid , n_total , lbl ) )
print ( ( '[util_path.remove_existing_fpaths] ' ' %d/%d exist and need to be deleted' ) % ( n_exist , n_valid ) )
existing_fpath_list = ut . compress ( fpath_list_ , exists_list )
return remove_fpaths ( existing_fpath_list , verbose = verbose , quiet = quiet , strict = strict , print_caller = False , lbl = lbl )
|
def GetMemMappedMB ( self ) :
'''Retrieves the amount of memory that is allocated to the virtual machine .
Memory that is ballooned , swapped , or has never been accessed is
excluded .'''
|
counter = c_uint ( )
ret = vmGuestLib . VMGuestLib_GetMemMappedMB ( self . handle . value , byref ( counter ) )
if ret != VMGUESTLIB_ERROR_SUCCESS :
raise VMGuestLibException ( ret )
return counter . value
|
def make_op_return_outputs ( data , inputs , change_address , fee = OP_RETURN_FEE , send_amount = 0 , format = 'bin' ) :
"""Builds the outputs for an OP _ RETURN transaction ."""
|
return [ # main output
{ "script_hex" : make_op_return_script ( data , format = format ) , "value" : send_amount } , # change output
{ "script_hex" : make_pay_to_address_script ( change_address ) , "value" : calculate_change_amount ( inputs , send_amount , fee ) } ]
|
def replace_dataset ( self , dataset_key , ** kwargs ) :
"""Replace an existing dataset
* This method will completely overwrite an existing dataset . *
: param description : Dataset description
: type description : str , optional
: param summary : Dataset summary markdown
: type summary : str , optional
: param tags : Dataset tags
: type tags : list , optional
: param license : Dataset license
: type license : { ' Public Domain ' , ' PDDL ' , ' CC - 0 ' , ' CC - BY ' , ' ODC - BY ' ,
' CC - BY - SA ' , ' ODC - ODbL ' , ' CC BY - NC ' , ' CC BY - NC - SA ' , ' Other ' }
: param visibility : Dataset visibility
: type visibility : { ' OPEN ' , ' PRIVATE ' }
: param files : File names and source URLs to add or update
: type files : dict , optional
: param dataset _ key : Dataset identifier , in the form of owner / id
: type dataset _ key : str
: raises RestApiException : If a server error occurs
Examples
> > > import datadotworld as dw
> > > api _ client = dw . api _ client ( )
> > > api _ client . replace _ dataset (
. . . ' username / test - dataset ' ,
. . . visibility = ' PRIVATE ' , license = ' Public Domain ' ,
. . . description = ' A better description ' ) # doctest : + SKIP"""
|
request = self . __build_dataset_obj ( lambda : _swagger . DatasetPutRequest ( title = kwargs . get ( 'title' ) , visibility = kwargs . get ( 'visibility' ) ) , lambda name , url , expand_archive , description , labels : _swagger . FileCreateRequest ( name = name , source = _swagger . FileSourceCreateRequest ( url = url , expand_archive = expand_archive ) , description = description , labels = labels ) , kwargs )
owner_id , dataset_id = parse_dataset_key ( dataset_key )
try :
self . _datasets_api . replace_dataset ( owner_id , dataset_id , request )
except _swagger . rest . ApiException as e :
raise RestApiError ( cause = e )
|
def getColData ( self , attri , fname , numtype = 'cycNum' ) :
"""In this method a column of data for the associated column
attribute is returned .
Parameters
attri : string
The name of the attribute we are looking for .
fname : string
The name of the file we are getting the data from or the
cycle number found in the filename .
numtype : string , optional
Determines whether fname is the name of a file or , the
cycle number . If it is ' file ' it will then interpret it as
a file , if it is ' cycNum ' it will then interpret it as a
cycle number . The default is " cycNum " ."""
|
fname = self . findFile ( fname , numtype )
f = open ( fname , 'r' )
for i in range ( self . index + 1 ) :
f . readline ( )
lines = f . readlines ( )
for i in range ( len ( lines ) ) :
lines [ i ] = lines [ i ] . strip ( )
lines [ i ] = lines [ i ] . split ( )
index = 0
data = [ ]
while index < len ( self . dcols ) :
if attri == self . dcols [ index ] :
break
index += 1
for i in range ( len ( lines ) ) :
if index == 5 and len ( lines [ i ] ) == 7 :
data . append ( str ( lines [ i ] [ index ] . capitalize ( ) ) + '-' + str ( lines [ i ] [ index + 1 ] ) )
elif index == 5 and len ( lines [ i ] ) != 7 :
tmp = str ( lines [ i ] [ index ] )
if tmp [ len ( tmp ) - 1 ] . isdigit ( ) :
tmp1 = tmp [ 0 ] + tmp [ 1 ]
tmp1 = tmp1 . capitalize ( )
tmp2 = ''
for j in range ( len ( tmp ) ) :
if j == 0 or j == 1 :
continue
tmp2 += tmp [ j ]
data . append ( tmp1 + '-' + tmp2 )
elif tmp == 'PROT' :
data . append ( 'H-1' )
elif tmp == ( 'NEUT' or 'NEUTR' or 'nn' or 'N 1' or 'N-1' ) :
data . append ( 'N-1' )
else :
data . append ( tmp )
elif index == 0 :
data . append ( int ( lines [ i ] [ index ] ) )
else :
data . append ( float ( lines [ i ] [ index ] ) )
return array ( data )
|
def hamming_emd ( d1 , d2 ) :
"""Return the Earth Mover ' s Distance between two distributions ( indexed
by state , one dimension per node ) using the Hamming distance between states
as the transportation cost function .
Singleton dimensions are sqeezed out ."""
|
N = d1 . squeeze ( ) . ndim
d1 , d2 = flatten ( d1 ) , flatten ( d2 )
return emd ( d1 , d2 , _hamming_matrix ( N ) )
|
def load_diagram_from_csv ( filepath , bpmn_diagram ) :
"""Reads an CSV file from given filepath and maps it into inner representation of BPMN diagram .
Returns an instance of BPMNDiagramGraph class .
: param filepath : string with output filepath ,
: param bpmn _ diagram : an instance of BpmnDiagramGraph class ."""
|
sequence_flows = bpmn_diagram . sequence_flows
process_elements_dict = bpmn_diagram . process_elements
diagram_attributes = bpmn_diagram . diagram_attributes
plane_attributes = bpmn_diagram . plane_attributes
process_dict = BpmnDiagramGraphCSVImport . import_csv_file_as_dict ( filepath )
BpmnDiagramGraphCSVImport . populate_diagram_elements_dict ( diagram_attributes )
BpmnDiagramGraphCSVImport . populate_process_elements_dict ( process_elements_dict , process_dict )
BpmnDiagramGraphCSVImport . populate_plane_elements_dict ( plane_attributes )
BpmnDiagramGraphCSVImport . import_nodes ( process_dict , bpmn_diagram , sequence_flows )
BpmnDiagramGraphCSVImport . representation_adjustment ( process_dict , bpmn_diagram , sequence_flows )
|
def price_dataframe ( symbols = ( 'sne' , ) , start = datetime . datetime ( 2008 , 1 , 1 ) , end = datetime . datetime ( 2009 , 12 , 31 ) , price_type = 'actual_close' , cleaner = util . clean_dataframe , ) :
"""Retrieve the prices of a list of equities as a DataFrame ( columns = symbols )
Arguments :
symbols ( list of str ) : Ticker symbols like " GOOG " , " AAPL " , etc
e . g . [ " AAPL " , " slv " , GLD " , " GOOG " , " $ SPX " , " XOM " , " msft " ]
start ( datetime ) : The date at the start of the period being analyzed .
end ( datetime ) : The date at the end of the period being analyzed .
Yahoo data stops at 2013/1/1"""
|
if isinstance ( price_type , basestring ) :
price_type = [ price_type ]
start = nlp . util . normalize_date ( start or datetime . date ( 2008 , 1 , 1 ) )
end = nlp . util . normalize_date ( end or datetime . date ( 2009 , 12 , 31 ) )
symbols = util . make_symbols ( symbols )
df = get_dataframes ( symbols )
# t = du . getNYSEdays ( start , end , datetime . timedelta ( hours = 16 ) )
# df = clean _ dataframes ( dataobj . get _ data ( t , symbols , price _ type ) )
if not df or len ( df ) > 1 :
return cleaner ( df )
else :
return cleaner ( df [ 0 ] )
|
def dumps ( asts ) :
"""Create a compressed string from an Trace ."""
|
d = asts . values . tostring ( )
t = asts . index . values . astype ( float ) . tostring ( )
lt = struct . pack ( '<L' , len ( t ) )
i = asts . name . encode ( 'utf-8' )
li = struct . pack ( '<L' , len ( i ) )
try : # python 2
return buffer ( zlib . compress ( li + lt + i + t + d ) )
except NameError : # python 3
return zlib . compress ( li + lt + i + t + d )
|
def to_protobuf ( self ) -> SaveStateProto :
"""Create protobuf item .
: return : protobuf structure
: rtype : ~ unidown . plugin . protobuf . save _ state _ pb2 . SaveStateProto"""
|
result = SaveStateProto ( )
result . version = str ( self . version )
result . last_update . CopyFrom ( datetime_to_timestamp ( self . last_update ) )
result . plugin_info . CopyFrom ( self . plugin_info . to_protobuf ( ) )
for key , link_item in self . link_item_dict . items ( ) :
result . data [ key ] . CopyFrom ( link_item . to_protobuf ( ) )
return result
|
def get ( context , resource , ** kwargs ) :
"""List a specific resource"""
|
uri = '%s/%s/%s' % ( context . dci_cs_api , resource , kwargs . pop ( 'id' ) )
r = context . session . get ( uri , timeout = HTTP_TIMEOUT , params = kwargs )
return r
|
def reload ( self ) :
"""Load this object from the server again and update ` ` attrs ` ` with the
new data ."""
|
new_model = self . collection . get ( self . id )
self . attrs = new_model . attrs
|
def _pick_lead_item ( items ) :
"""Pick single representative sample for batch calling to attach calls to .
For cancer samples , attach to tumor ."""
|
if vcfutils . is_paired_analysis ( [ dd . get_align_bam ( x ) for x in items ] , items ) :
for data in items :
if vcfutils . get_paired_phenotype ( data ) == "tumor" :
return data
raise ValueError ( "Did not find tumor sample in paired tumor/normal calling" )
else :
return items [ 0 ]
|
def prepare ( data ) :
"""Restructure / prepare data about refs for output ."""
|
ref = data . get ( "ref" )
obj = data . get ( "object" )
sha = obj . get ( "sha" )
return { "ref" : ref , "head" : { "sha" : sha } }
|
def combine_reducers ( reducers ) :
"""composition tool for creating reducer trees .
Args :
reducers : dict with state keys and reducer functions
that are responsible for each key
Returns :
a new , combined reducer function"""
|
final_reducers = { key : reducer for key , reducer in reducers . items ( ) if hasattr ( reducer , '__call__' ) }
sanity_error = None
try :
assert_reducer_sanity ( final_reducers )
except Exception as e :
sanity_error = e
def combination ( state = None , action = None ) :
if state is None :
state = { }
if sanity_error :
raise sanity_error
has_changed = False
next_state = { }
for key , reducer in final_reducers . items ( ) :
previous_state_for_key = state . get ( key )
next_state_for_key = reducer ( previous_state_for_key , action )
if next_state_for_key is None :
msg = get_undefined_state_error_message ( key , action )
raise Exception ( msg )
next_state [ key ] = next_state_for_key
has_changed = ( has_changed or next_state_for_key != previous_state_for_key )
return next_state if has_changed else state
return combination
|
def get_slack_channels ( self , token ) :
'''Get all channel names from Slack'''
|
ret = salt . utils . slack . query ( function = 'rooms' , api_key = token , # These won ' t be honored until https : / / github . com / saltstack / salt / pull / 41187 / files is merged
opts = { 'exclude_archived' : True , 'exclude_members' : True } )
channels = { }
if 'message' in ret :
for item in ret [ 'message' ] :
channels [ item [ 'id' ] ] = item [ 'name' ]
return channels
|
def run ( self ) :
"""Version of run that traps Exceptions and stores
them in the fifo"""
|
try :
threading . Thread . run ( self )
except Exception :
t , v , tb = sys . exc_info ( )
error = traceback . format_exception_only ( t , v ) [ 0 ] [ : - 1 ]
tback = ( self . name + ' Traceback (most recent call last):\n' + '' . join ( traceback . format_tb ( tb ) ) )
self . fifo . put ( ( self . name , error , tback ) )
|
def sipprverse_full ( self ) :
"""Run a subset of the methods - only the targets used in the sipprverse are required here"""
|
logging . info ( 'Beginning sipprverse full database downloads' )
if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'genesippr' ) ) :
self . sipprverse_targets ( databasepath = self . databasepath )
if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'ConFindr' ) ) :
self . confindr_targets ( )
if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'mash' ) ) :
self . mash ( databasepath = self . databasepath )
if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'MLST' ) ) :
self . mlst ( databasepath = self . databasepath )
if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'rMLST' ) ) :
self . rmlst ( databasepath = self . databasepath , credentials = self . credentials )
if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'resfinder' ) ) :
self . cge_db_downloader ( databasepath = self . databasepath , analysistype = 'resfinder' , dbname = 'resfinder_db' )
if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'virulence' ) ) :
self . cge_db_downloader ( databasepath = self . databasepath , analysistype = 'virulence' , dbname = 'virulencefinder_db' )
if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'serosippr' ) ) :
self . cge_db_downloader ( databasepath = self . databasepath , analysistype = 'serosippr' , dbname = 'serotypefinder_db' )
|
def __parse ( self , stream , has_orgs ) :
"""Parse identities and organizations using mailmap format .
Mailmap format is a text plain document that stores on each
line a map between an email address and its aliases . Each
line follows any of the next formats :
Proper Name < commit @ email . xx >
< proper @ email . xx > < commit @ email . xx >
Proper Name < proper @ email . xx > < commit @ email . xx >
Proper Name < proper @ email . xx > Commit Name < commit @ email . xx >
When the flag ` has _ orgs ` is set , the stream maps organizations
an identities , following the next format :
Organization Name < org @ email . xx > Proper Name < proper @ email . xx >
: parse data : mailmap stream to parse
: raise InvalidFormatError : raised when the format of the stream is
not valid ."""
|
if has_orgs :
self . __parse_organizations ( stream )
else :
self . __parse_identities ( stream )
|
def _save_results ( options , module , core_results , fit_results ) :
"""Save results of analysis as tables and figures
Parameters
options : dict
Option names and values for analysis
module : str
Module that contained function used to generate core _ results
core _ results : dataframe , array , value , list of tuples
Results of main analysis
fit _ results : list or None
Results of comparing emp analysis to models , None if not applicable"""
|
logging . info ( "Saving all results" )
# Use custom plot format
mpl . rcParams . update ( misc . rcparams . ggplot_rc )
# Make run directory
os . makedirs ( options [ 'run_dir' ] )
# Write core results
_write_core_tables ( options , module , core_results )
# Write additional results if analysis from emp
if module == 'emp' :
_write_subset_index_file ( options , core_results )
# Write model / data comparison if models were given
if fit_results :
models = options [ 'models' ] . replace ( ' ' , '' ) . split ( ';' )
for i , core_result in enumerate ( core_results ) :
_write_fitted_params ( i , models , options , fit_results )
_write_test_statistics ( i , models , options , fit_results )
_write_comparison_plot_table ( i , models , options , core_results , fit_results )
|
def save ( self , fname ) :
"""Save a pickled version of the embedding into ` fname ` ."""
|
vec = self . vectors
voc = self . vocabulary . getstate ( )
state = ( voc , vec )
with open ( fname , 'wb' ) as f :
pickle . dump ( state , f , protocol = pickle . HIGHEST_PROTOCOL )
|
def redirect ( self , url , status = None ) :
"""Redirect to the specified url , optional status code defaults to 302."""
|
self . status_code = 302 if status is None else status
self . headers = Headers ( [ ( 'location' , url ) ] )
self . message = ''
self . end ( )
|
def as_namedtuple ( self ) :
"""Export color register as namedtuple ."""
|
d = self . as_dict ( )
return namedtuple ( 'ColorRegister' , d . keys ( ) ) ( * d . values ( ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.