signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def star ( self , login , repo ) :
"""Star to login / repo
: param str login : ( required ) , owner of the repo
: param str repo : ( required ) , name of the repo
: return : bool"""
|
resp = False
if login and repo :
url = self . _build_url ( 'user' , 'starred' , login , repo )
resp = self . _boolean ( self . _put ( url ) , 204 , 404 )
return resp
|
def fwdl_status_output_fwdl_state ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
fwdl_status = ET . Element ( "fwdl_status" )
config = fwdl_status
output = ET . SubElement ( fwdl_status , "output" )
fwdl_state = ET . SubElement ( output , "fwdl-state" )
fwdl_state . text = kwargs . pop ( 'fwdl_state' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def create_entry_file ( self , filename , script_map , enapps ) :
'''Creates an entry file for the given script map'''
|
if len ( script_map ) == 0 :
return
# create the entry file
template = MakoTemplate ( '''
<%! import os %>
// dynamic imports are within functions so they don't happen until called
DMP_CONTEXT.loadBundle({
%for (app, template), script_paths in script_map.items():
"${ app }/${ template }": () => [
%for path in script_paths:
import(/* webpackMode: "eager" */ "./${ os.path.relpath(path, os.path.dirname(filename)) }"),
%endfor
],
%endfor
})
''' )
content = template . render ( enapps = enapps , script_map = script_map , filename = filename , ) . strip ( )
# ensure the parent directories exist
if not os . path . exists ( os . path . dirname ( filename ) ) :
os . makedirs ( os . path . dirname ( filename ) )
# if the file exists , then consider the options
file_exists = os . path . exists ( filename )
if file_exists and self . running_inline : # running inline means that we ' re in debug mode and webpack is likely watching , so
# we don ' t want to recreate the entry file ( and cause webpack to constantly reload )
# unless we have changes
with open ( filename , 'r' ) as fin :
if content == fin . read ( ) :
return False
if file_exists and not self . options . get ( 'overwrite' ) :
raise CommandError ( 'Refusing to destroy existing file: {} (use --overwrite option or remove the file)' . format ( filename ) )
# if we get here , write the file
self . message ( 'Creating {}' . format ( os . path . relpath ( filename , settings . BASE_DIR ) ) , level = 3 )
with open ( filename , 'w' ) as fout :
fout . write ( content )
return True
|
def extract_labels ( self , f , one_hot = False , num_classes = 10 ) :
"""Extract the labels into a 1D uint8 numpy array [ index ] .
Args :
f : A file object that can be passed into a gzip reader .
one _ hot : Does one hot encoding for the result .
num _ classes : Number of classes for the one hot encoding .
Returns :
labels : a 1D unit8 numpy array .
Raises :
ValueError : If the bystream doesn ' t start with 2049."""
|
print ( 'Extracting' , f . name )
with gzip . GzipFile ( fileobj = f ) as bytestream :
magic = self . _read32 ( bytestream )
if magic != 2049 :
raise ValueError ( 'Invalid magic number %d in MNIST label file: %s' % ( magic , f . name ) )
num_items = self . _read32 ( bytestream )
buf = bytestream . read ( num_items )
labels = np . frombuffer ( buf , dtype = np . uint8 )
if one_hot :
return self . dense_to_one_hot ( labels , num_classes )
return labels
|
def toggle_deriv ( self , evt = None , value = None ) :
"toggle derivative of data"
|
if value is None :
self . conf . data_deriv = not self . conf . data_deriv
expr = self . conf . data_expr or ''
if self . conf . data_deriv :
expr = "deriv(%s)" % expr
self . write_message ( "plotting %s" % expr , panel = 0 )
self . conf . process_data ( )
|
def result ( self ) :
"""Concatenate accumulated tensors"""
|
return { k : torch . stack ( v ) for k , v in self . accumulants . items ( ) }
|
def remove_duplicate_notes ( self ) :
"""Remove duplicate and enharmonic notes from the container ."""
|
res = [ ]
for x in self . notes :
if x not in res :
res . append ( x )
self . notes = res
return res
|
def cla_adder ( a , b , cin = 0 , la_unit_len = 4 ) :
"""Carry Lookahead Adder
: param int la _ unit _ len : the length of input that every unit processes
A Carry LookAhead Adder is an adder that is faster than
a ripple carry adder , as it calculates the carry bits faster .
It is not as fast as a Kogge - Stone adder , but uses less area ."""
|
a , b = pyrtl . match_bitwidth ( a , b )
if len ( a ) <= la_unit_len :
sum , cout = _cla_adder_unit ( a , b , cin )
return pyrtl . concat ( cout , sum )
else :
sum , cout = _cla_adder_unit ( a [ 0 : la_unit_len ] , b [ 0 : la_unit_len ] , cin )
msbits = cla_adder ( a [ la_unit_len : ] , b [ la_unit_len : ] , cout , la_unit_len )
return pyrtl . concat ( msbits , sum )
|
def npci_contents ( self , use_dict = None , as_class = dict ) :
"""Return the contents of an object as a dict ."""
|
if _debug :
NPCI . _debug ( "npci_contents use_dict=%r as_class=%r" , use_dict , as_class )
# make / extend the dictionary of content
if use_dict is None :
if _debug :
NPCI . _debug ( " - new use_dict" )
use_dict = as_class ( )
# version and control are simple
use_dict . __setitem__ ( 'version' , self . npduVersion )
use_dict . __setitem__ ( 'control' , self . npduControl )
# dnet / dlen / dadr
if self . npduDADR is not None :
if self . npduDADR . addrType == Address . remoteStationAddr :
use_dict . __setitem__ ( 'dnet' , self . npduDADR . addrNet )
use_dict . __setitem__ ( 'dlen' , self . npduDADR . addrLen )
use_dict . __setitem__ ( 'dadr' , btox ( self . npduDADR . addrAddr or '' ) )
elif self . npduDADR . addrType == Address . remoteBroadcastAddr :
use_dict . __setitem__ ( 'dnet' , self . npduDADR . addrNet )
use_dict . __setitem__ ( 'dlen' , 0 )
use_dict . __setitem__ ( 'dadr' , '' )
elif self . npduDADR . addrType == Address . globalBroadcastAddr :
use_dict . __setitem__ ( 'dnet' , 0xFFFF )
use_dict . __setitem__ ( 'dlen' , 0 )
use_dict . __setitem__ ( 'dadr' , '' )
# snet / slen / sadr
if self . npduSADR is not None :
use_dict . __setitem__ ( 'snet' , self . npduSADR . addrNet )
use_dict . __setitem__ ( 'slen' , self . npduSADR . addrLen )
use_dict . __setitem__ ( 'sadr' , btox ( self . npduSADR . addrAddr or '' ) )
# hop count
if self . npduHopCount is not None :
use_dict . __setitem__ ( 'hop_count' , self . npduHopCount )
# network layer message name decoded
if self . npduNetMessage is not None :
use_dict . __setitem__ ( 'net_message' , self . npduNetMessage )
if self . npduVendorID is not None :
use_dict . __setitem__ ( 'vendor_id' , self . npduVendorID )
# return what we built / updated
return use_dict
|
def run ( self ) :
"""Run command ."""
|
self . announce ( 'Building thunks' , level = distutils . log . INFO )
# run short circuit logic here
srcDir = os . path . join ( "build" , "lib" )
destBody = os . path . join ( "build" , "src" , "jp_thunk.cpp" )
destHeader = os . path . join ( "build" , "src" , "jp_thunk.h" )
if os . path . isfile ( destBody ) :
t1 = os . path . getctime ( destBody )
update = False
for filename in _glob ( srcDir , "*.class" ) :
if t1 < os . path . getctime ( filename ) :
update = True
if not update :
self . announce ( 'Skip build thunks' , level = distutils . log . INFO )
return
# do the build
createThunks ( srcDir , destBody , destHeader , namespace = "JPThunk" )
|
def get_collection_name ( cls ) :
'''Gets the full name of the collection , as declared by the ModelOptions class like so :
namespace . name
If no namespace or name is provided , the class ' s lowercase name is used'''
|
if hasattr ( cls , '_meta' ) :
np = getattr ( cls . _meta , 'namespace' , None )
cname = getattr ( cls . _meta , 'name' , None )
if np :
return '{}.{}' . format ( np , cname or cls . __name__ . lower ( ) )
return cname or cls . __name__ . lower ( )
|
def run ( argv = None ) :
"""Main CLI entry point ."""
|
cli = InfrascopeCLI ( )
return cli . run ( sys . argv [ 1 : ] if argv is None else argv )
|
def get_authorization_ids_by_vault ( self , vault_id ) :
"""Gets the list of ` ` Authorization ` ` ` ` Ids ` ` associated with a ` ` Vault ` ` .
arg : vault _ id ( osid . id . Id ) : ` ` Id ` ` of a ` ` Vault ` `
return : ( osid . id . IdList ) - list of related authorization ` ` Ids ` `
raise : NotFound - ` ` vault _ id ` ` is not found
raise : NullArgument - ` ` vault _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for
# osid . resource . ResourceBinSession . get _ resource _ ids _ by _ bin
id_list = [ ]
for authorization in self . get_authorizations_by_vault ( vault_id ) :
id_list . append ( authorization . get_id ( ) )
return IdList ( id_list )
|
def batch_encode ( self , iterator , * args , ** kwargs ) :
"""Args :
batch ( list ) : Batch of objects to encode .
* args : Arguments passed to ` ` encode ` ` .
* * kwargs : Keyword arguments passed to ` ` encode ` ` .
Returns :
list : Batch of encoded objects ."""
|
return [ self . encode ( object_ , * args , ** kwargs ) for object_ in iterator ]
|
def methodcall ( self , methodname , localobject , Params = None , ** params ) :
"""* * Deprecated : * * Low - level method used by the
: meth : ` ~ pywbem . WBEMConnection . InvokeMethod ` method of this class .
* Deprecated since pywbem 0.12 . *
Calling this function directly has been deprecated and will issue a
: term : ` DeprecationWarning ` .
Users should call : meth : ` ~ pywbem . WBEMConnection . InvokeMethod ` instead
of this method .
This method will be removed in the next pywbem release after 0.12."""
|
warnings . warn ( "Calling methodcall() directly is deprecated; it will be removed " "in the next pywbem release after 0.12" , DeprecationWarning , 2 )
return self . _methodcall ( methodname , localobject , Params , ** params )
|
def _read_tags_for_revset ( self , spec ) :
"""Return TaggedRevision for each tag / rev combination in the revset spec"""
|
cmd = [ 'log' , '--style' , 'default' , '--config' , 'defaults.log=' , '-r' , spec ]
res = self . _invoke ( * cmd )
header_pattern = re . compile ( r'(?P<header>\w+?):\s+(?P<value>.*)' )
match_res = map ( header_pattern . match , res . splitlines ( ) )
matched_lines = filter ( None , match_res )
matches = ( match . groupdict ( ) for match in matched_lines )
for match in matches :
if match [ 'header' ] == 'changeset' :
id , sep , rev = match [ 'value' ] . partition ( ':' )
if match [ 'header' ] == 'tag' :
tag = match [ 'value' ]
yield TaggedRevision ( tag , rev )
|
def _add_meta ( xs , sample = None , config = None ) :
"""Add top level information about the sample or flowcell to output .
Sorts outputs into sample names ( sample input ) and project ( config input ) ."""
|
out = [ ]
for x in xs :
if not isinstance ( x [ "path" ] , six . string_types ) or not os . path . exists ( x [ "path" ] ) :
raise ValueError ( "Unexpected path for upload: %s" % x )
x [ "mtime" ] = shared . get_file_timestamp ( x [ "path" ] )
if sample :
sample_name = dd . get_sample_name ( sample )
if "sample" not in x :
x [ "sample" ] = sample_name
elif x [ "sample" ] != sample_name :
x [ "run" ] = sample_name
if config :
fc_name = config . get ( "fc_name" ) or "project"
fc_date = config . get ( "fc_date" ) or datetime . datetime . now ( ) . strftime ( "%Y-%m-%d" )
x [ "run" ] = "%s_%s" % ( fc_date , fc_name )
out . append ( x )
return out
|
def _get_free_gpu ( max_gpu_utilization = 40 , min_free_memory = 0.5 , num_gpu = 1 ) :
"""Get available GPUs according to utilization thresholds .
Args :
: max _ gpu _ utilization : percent utilization threshold to consider a GPU " free "
: min _ free _ memory : percent free memory to consider a GPU " free "
: num _ gpu : number of requested GPUs
Returns :
A tuple of ( available _ gpus , minimum _ free _ memory ) , where available _ gpus is a comma - delimited string of GPU ids , and minimum _ free _ memory
is the lowest amount of free memory available on the available _ gpus ."""
|
def get_gpu_info ( ) : # Get the gpu information
gpu_info = subprocess . check_output ( [ "nvidia-smi" , "--format=csv,noheader,nounits" , "--query-gpu=index,memory.total,memory.free,memory.used,utilization.gpu" ] ) . decode ( )
gpu_info = gpu_info . split ( '\n' )
gpu_info_array = [ ]
# Check each gpu
for line in gpu_info :
if len ( line ) > 0 :
gpu_id , total_memory , free_memory , used_memory , gpu_util = line . split ( ',' )
gpu_memory_util = float ( used_memory ) / float ( total_memory )
gpu_info_array . append ( ( float ( gpu_util ) , gpu_memory_util , gpu_id ) )
return ( gpu_info_array )
# Read the gpu information multiple times
num_times_to_average = 5
current_array = [ ]
for ind in range ( num_times_to_average ) :
current_array . append ( get_gpu_info ( ) )
time . sleep ( 1 )
# Get number of gpus
num_gpus = len ( current_array [ 0 ] )
# Average the gpu information
avg_array = [ ( 0 , 0 , str ( x ) ) for x in range ( num_gpus ) ]
for ind in range ( num_times_to_average ) :
for gpu_ind in range ( num_gpus ) :
avg_array [ gpu_ind ] = ( avg_array [ gpu_ind ] [ 0 ] + current_array [ ind ] [ gpu_ind ] [ 0 ] , avg_array [ gpu_ind ] [ 1 ] + current_array [ ind ] [ gpu_ind ] [ 1 ] , avg_array [ gpu_ind ] [ 2 ] )
for gpu_ind in range ( num_gpus ) :
avg_array [ gpu_ind ] = ( float ( avg_array [ gpu_ind ] [ 0 ] ) / num_times_to_average , float ( avg_array [ gpu_ind ] [ 1 ] ) / num_times_to_average , avg_array [ gpu_ind ] [ 2 ] )
avg_array . sort ( )
gpus_found = 0
gpus_to_use = ""
free_memory = 1.0
# Return the least utilized GPUs if it ' s utilized less than max _ gpu _ utilization and amount of free memory is at least min _ free _ memory
# Otherwise , run in cpu only mode
for current_gpu in avg_array :
if current_gpu [ 0 ] < max_gpu_utilization and ( 1 - current_gpu [ 1 ] ) > min_free_memory :
if gpus_found == 0 :
gpus_to_use = current_gpu [ 2 ]
free_memory = 1 - current_gpu [ 1 ]
else :
gpus_to_use = gpus_to_use + "," + current_gpu [ 2 ]
free_memory = min ( free_memory , 1 - current_gpu [ 1 ] )
gpus_found = gpus_found + 1
if gpus_found == num_gpu :
break
return gpus_to_use , free_memory
|
def _get_area_def ( self ) :
"""Get the area definition of the band ."""
|
cfac = np . int32 ( self . mda [ 'cfac' ] )
lfac = np . int32 ( self . mda [ 'lfac' ] )
coff = np . float32 ( self . mda [ 'coff' ] )
loff = self . _get_line_offset ( )
a = self . mda [ 'projection_parameters' ] [ 'a' ]
b = self . mda [ 'projection_parameters' ] [ 'b' ]
h = self . mda [ 'projection_parameters' ] [ 'h' ]
lon_0 = self . mda [ 'projection_parameters' ] [ 'SSP_longitude' ]
nlines = int ( self . mda [ 'number_of_lines' ] )
ncols = int ( self . mda [ 'number_of_columns' ] )
area_extent = self . get_area_extent ( ( nlines , ncols ) , ( loff , coff ) , ( lfac , cfac ) , h )
proj_dict = { 'a' : float ( a ) , 'b' : float ( b ) , 'lon_0' : float ( lon_0 ) , 'h' : float ( h ) , 'proj' : 'geos' , 'units' : 'm' }
area = geometry . AreaDefinition ( AREA_NAMES [ self . area_id ] [ 'short' ] , AREA_NAMES [ self . area_id ] [ 'long' ] , 'geosmsg' , proj_dict , ncols , nlines , area_extent )
return area
|
def peer_echo ( includes = None , force = False ) :
"""Echo filtered attributes back onto the same relation for storage .
This is a requirement to use the peerstorage module - it needs to be called
from the peer relation ' s changed hook .
If Juju leader support exists this will be a noop unless force is True ."""
|
try :
is_leader ( )
except NotImplementedError :
pass
else :
if not force :
return
# NOOP if leader - election is supported
# Use original non - leader calls
relation_get = _relation_get
relation_set = _relation_set
rdata = relation_get ( )
echo_data = { }
if includes is None :
echo_data = rdata . copy ( )
for ex in [ 'private-address' , 'public-address' ] :
if ex in echo_data :
echo_data . pop ( ex )
else :
for attribute , value in six . iteritems ( rdata ) :
for include in includes :
if include in attribute :
echo_data [ attribute ] = value
if len ( echo_data ) > 0 :
relation_set ( relation_settings = echo_data )
|
def identify_triggers ( cfg , sources , sinks , lattice , nosec_lines ) :
"""Identify sources , sinks and sanitisers in a CFG .
Args :
cfg ( CFG ) : CFG to find sources , sinks and sanitisers in .
sources ( tuple ) : list of sources , a source is a ( source , sanitiser ) tuple .
sinks ( tuple ) : list of sources , a sink is a ( sink , sanitiser ) tuple .
nosec _ lines ( set ) : lines with # nosec whitelisting
Returns :
Triggers tuple with sink and source nodes and a sanitiser node dict ."""
|
assignment_nodes = filter_cfg_nodes ( cfg , AssignmentNode )
tainted_nodes = filter_cfg_nodes ( cfg , TaintedNode )
tainted_trigger_nodes = [ TriggerNode ( Source ( 'Framework function URL parameter' ) , cfg_node = node ) for node in tainted_nodes ]
sources_in_file = find_triggers ( assignment_nodes , sources , nosec_lines )
sources_in_file . extend ( tainted_trigger_nodes )
find_secondary_sources ( assignment_nodes , sources_in_file , lattice )
sinks_in_file = find_triggers ( cfg . nodes , sinks , nosec_lines )
sanitiser_node_dict = build_sanitiser_node_dict ( cfg , sinks_in_file )
return Triggers ( sources_in_file , sinks_in_file , sanitiser_node_dict )
|
def logical_xor ( f1 , f2 ) : # function factory
'''Logical xor from functions .
Parameters
f1 , f2 : function
Function that takes array and returns true or false for each item in array .
Returns
Function .'''
|
def f ( value ) :
return np . logical_xor ( f1 ( value ) , f2 ( value ) )
f . __name__ = "(" + f1 . __name__ + "_xor_" + f2 . __name__ + ")"
return f
|
def process_pmc ( pmc_id , offline = False , output_fname = default_output_fname ) :
"""Return a ReachProcessor by processing a paper with a given PMC id .
Uses the PMC client to obtain the full text . If it ' s not available ,
None is returned .
Parameters
pmc _ id : str
The ID of a PubmedCentral article . The string may start with PMC but
passing just the ID also works .
Examples : 3717945 , PMC3717945
https : / / www . ncbi . nlm . nih . gov / pmc /
offline : Optional [ bool ]
If set to True , the REACH system is ran offline . Otherwise ( by default )
the web service is called . Default : False
Returns
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp . statements ."""
|
xml_str = pmc_client . get_xml ( pmc_id )
if xml_str is None :
return None
fname = pmc_id + '.nxml'
with open ( fname , 'wb' ) as fh :
fh . write ( xml_str . encode ( 'utf-8' ) )
ids = id_lookup ( pmc_id , 'pmcid' )
pmid = ids . get ( 'pmid' )
rp = process_nxml_file ( fname , citation = pmid , offline = offline , output_fname = output_fname )
return rp
|
def downcase_hook ( self , data : cmd2 . plugin . PostparsingData ) -> cmd2 . plugin . PostparsingData :
"""A hook to make uppercase commands lowercase ."""
|
command = data . statement . command . lower ( )
data . statement = self . statement_parser . parse ( "{} {}" . format ( command , '' if data . statement . args is None else data . statement . args ) )
return data
|
def energies ( self , samples_like , dtype = np . float ) :
"""The energies of the given samples .
Args :
samples _ like ( samples _ like ) :
A collection of raw samples . ` samples _ like ` is an extension of
NumPy ' s array _ like structure . See : func : ` . as _ samples ` .
dtype ( : class : ` numpy . dtype ` , optional ) :
The data type of the returned energies . Defaults to float .
Returns :
: obj : ` numpy . ndarray ` : The energies ."""
|
samples , labels = as_samples ( samples_like )
if labels :
idx , label = zip ( * enumerate ( labels ) )
labeldict = dict ( zip ( label , idx ) )
else :
labeldict = { }
num_samples = samples . shape [ 0 ]
energies = np . zeros ( num_samples , dtype = dtype )
for term , bias in self . items ( ) :
if len ( term ) == 0 :
energies += bias
else :
energies += np . prod ( [ samples [ : , labeldict [ v ] ] for v in term ] , axis = 0 ) * bias
return energies
|
def get_md_header ( header_text_line : str , header_duplicate_counter : dict , keep_header_levels : int = 3 , parser : str = 'github' , no_links : bool = False ) -> dict :
r"""Build a data structure with the elements needed to create a TOC line .
: parameter header _ text _ line : a single markdown line that needs to be
transformed into a TOC line .
: parameter header _ duplicate _ counter : a data structure that contains the
number of occurrencies of each header anchor link . This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser .
: parameter keep _ header _ levels : the maximum level of headers to be
considered as such when building the table of contents .
Defaults to ` ` 3 ` ` .
: parameter parser : decides rules on how to generate anchor links .
Defaults to ` ` github ` ` .
: type header _ text _ line : str
: type header _ duplicate _ counter : dict
: type keep _ header _ levels : int
: type parser : str
: returns : None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line , otherwise .
: rtype : dict
: raises : a built - in exception .
. . note : :
This works like a wrapper to other functions ."""
|
result = get_atx_heading ( header_text_line , keep_header_levels , parser , no_links )
if result is None :
return result
else :
header_type , header_text_trimmed = result
header = { 'type' : header_type , 'text_original' : header_text_trimmed , 'text_anchor_link' : build_anchor_link ( header_text_trimmed , header_duplicate_counter , parser ) }
return header
|
def _insertBPoint ( self , index , type , anchor , bcpIn , bcpOut , ** kwargs ) :
"""Subclasses may override this method ."""
|
# insert a simple line segment at the given anchor
# look it up as a bPoint and change the bcpIn and bcpOut there
# this avoids code duplication
self . _insertSegment ( index = index , type = "line" , points = [ anchor ] , smooth = False )
bPoints = self . bPoints
index += 1
if index >= len ( bPoints ) : # its an append instead of an insert
# so take the last bPoint
index = - 1
bPoint = bPoints [ index ]
bPoint . bcpIn = bcpIn
bPoint . bcpOut = bcpOut
bPoint . type = type
|
def do_disable ( self , arg ) :
"""disable bpnumber [ bpnumber . . . ]
Disables the breakpoints given as a space separated list of
breakpoint numbers . Disabling a breakpoint means it cannot
cause the program to stop execution , but unlike clearing a
breakpoint , it remains in the list of breakpoints and can be
( re - ) enabled ."""
|
args = arg . split ( )
for i in args :
try :
bp = self . get_bpbynumber ( i )
except ValueError as err :
self . error ( err )
else :
bp . disable ( )
self . done_breakpoint_state ( bp , False )
|
def build ( ) :
"""Builds pages given template . jinja , style . css , and content . rst
produces index . html ."""
|
test_files ( )
with open ( 'content.rst' ) as f :
content = publish_parts ( f . read ( ) , writer_name = 'html' )
title = content [ 'title' ]
body = content [ 'html_body' ] . replace ( '\n' , ' ' )
with open ( 'template.jinja' , 'r' ) as f :
loader = FileSystemLoader ( getcwd ( ) )
env = Environment ( loader = loader )
template = env . get_template ( 'template.jinja' )
page = template . render ( title = title , content = body )
with open ( 'index.html' , 'w' ) as f :
f . write ( page )
|
def authenticate ( self , req , resp , resource ) :
"""Extract basic auth token from request ` authorization ` header , deocode the
token , verifies the username / password and return either a ` ` user ` `
object if successful else raise an ` falcon . HTTPUnauthoried exception `"""
|
username , password = self . _extract_credentials ( req )
user = self . user_loader ( username , password )
if not user :
raise falcon . HTTPUnauthorized ( description = 'Invalid Username/Password' )
return user
|
def list ( self , name , iterator = False , ** kwargs ) :
"""Returns a list of the files under the specified path
name must be in the form of ` s3 : / / bucket / prefix `
Parameters
keys : optional
if True then this will return the actual boto keys for files
that are encountered
objects : optional
if True then this will return the actual boto objects for
files or prefixes that are encountered
delimiter : optional
if set this
iterator : optional
if True return iterator rather than converting to list object"""
|
assert self . _is_s3 ( name ) , "name must be in form s3://bucket/key"
it = self . _list ( bucket = self . _bucket_name ( name ) , prefix = self . _key_name ( name ) , ** kwargs )
return iter ( it ) if iterator else list ( it )
|
def extremum_icohpvalue ( self , summed_spin_channels = True , spin = Spin . up ) :
"""get ICOHP / ICOOP of strongest bond
Args :
summed _ spin _ channels : Boolean to indicate whether the ICOHPs / ICOOPs of both spin channels should be summed
spin : if summed _ spin _ channels is equal to False , this spin indicates which spin channel should be returned
Returns :
lowest ICOHP / largest ICOOP value ( i . e . ICOHP / ICOOP value of strongest bond )"""
|
if not self . _are_coops :
extremum = sys . float_info . max
else :
extremum = - sys . float_info . max
if not self . _is_spin_polarized :
if spin == Spin . down :
warnings . warn ( "This spin channel does not exist. I am switching to Spin.up" )
spin = Spin . up
for value in self . _icohplist . values ( ) :
if not value . is_spin_polarized or not summed_spin_channels :
if not self . _are_coops :
if value . icohpvalue ( spin ) < extremum :
extremum = value . icohpvalue ( spin )
# print ( extremum )
else :
if value . icohpvalue ( spin ) > extremum :
extremum = value . icohpvalue ( spin )
# print ( extremum )
else :
if not self . _are_coops :
if value . summed_icohp < extremum :
extremum = value . summed_icohp
# print ( extremum )
else :
if value . summed_icohp > extremum :
extremum = value . summed_icohp
# print ( extremum )
return extremum
|
async def get_all ( self , url , params = None ) :
"""Aggregate data from all pages of an API query .
Args :
url ( str ) : Google API endpoint URL .
params ( dict ) : ( optional ) URL query parameters .
Returns :
list : Parsed JSON query response results ."""
|
if not params :
params = { }
items = [ ]
next_page_token = None
while True :
if next_page_token :
params [ 'pageToken' ] = next_page_token
response = await self . get_json ( url , params = params )
items . append ( response )
next_page_token = response . get ( 'nextPageToken' )
if not next_page_token :
break
return items
|
def construct_source_candidate_set ( addr , plen , laddr , loname ) :
"""Given all addresses assigned to a specific interface ( ' laddr ' parameter ) ,
this function returns the " candidate set " associated with ' addr / plen ' .
Basically , the function filters all interface addresses to keep only those
that have the same scope as provided prefix .
This is on this list of addresses that the source selection mechanism
will then be performed to select the best source address associated
with some specific destination that uses this prefix ."""
|
def cset_sort ( x , y ) :
x_global = 0
if in6_isgladdr ( x ) :
x_global = 1
y_global = 0
if in6_isgladdr ( y ) :
y_global = 1
res = y_global - x_global
if res != 0 or y_global != 1 :
return res
# two global addresses : if one is native , it wins .
if not in6_isaddr6to4 ( x ) :
return - 1 ;
return - res
cset = [ ]
if in6_isgladdr ( addr ) or in6_isuladdr ( addr ) :
cset = [ x for x in laddr if x [ 1 ] == IPV6_ADDR_GLOBAL ]
elif in6_islladdr ( addr ) :
cset = [ x for x in laddr if x [ 1 ] == IPV6_ADDR_LINKLOCAL ]
elif in6_issladdr ( addr ) :
cset = [ x for x in laddr if x [ 1 ] == IPV6_ADDR_SITELOCAL ]
elif in6_ismaddr ( addr ) :
if in6_ismnladdr ( addr ) :
cset = [ ( '::1' , 16 , loname ) ]
elif in6_ismgladdr ( addr ) :
cset = [ x for x in laddr if x [ 1 ] == IPV6_ADDR_GLOBAL ]
elif in6_ismlladdr ( addr ) :
cset = [ x for x in laddr if x [ 1 ] == IPV6_ADDR_LINKLOCAL ]
elif in6_ismsladdr ( addr ) :
cset = [ x for x in laddr if x [ 1 ] == IPV6_ADDR_SITELOCAL ]
elif addr == '::' and plen == 0 :
cset = [ x for x in laddr if x [ 1 ] == IPV6_ADDR_GLOBAL ]
cset = [ x [ 0 ] for x in cset ]
cset . sort ( key = cmp_to_key ( cset_sort ) )
# Sort with global addresses first
return cset
|
def keyPressEvent ( self , event ) :
"""Reimplement Qt method to allow cyclic behavior ."""
|
if event . key ( ) == Qt . Key_Down :
self . select_row ( 1 )
elif event . key ( ) == Qt . Key_Up :
self . select_row ( - 1 )
|
def sendToReplica ( self , msg , frm ) :
"""Send the message to the intended replica .
: param msg : the message to send
: param frm : the name of the node which sent this ` msg `"""
|
# TODO : discard or stash messages here instead of doing
# this in msgHas * methods ! ! !
if self . msgHasAcceptableInstId ( msg , frm ) :
self . replicas . pass_message ( ( msg , frm ) , msg . instId )
|
async def _deferred_init ( self ) :
"""Register the web hook onto which Telegram should send its messages ."""
|
hook_path = self . make_hook_path ( )
url = urljoin ( settings . BERNARD_BASE_URL , hook_path )
await self . call ( 'setWebhook' , url = url )
logger . info ( 'Setting Telegram webhook to "%s"' , url )
|
def _get_seal_key_ntlm1 ( negotiate_flags , exported_session_key ) :
"""3.4.5.3 SEALKEY
Calculates the seal _ key used to seal ( encrypt ) messages . This for
authentication where NTLMSSP _ NEGOTIATE _ EXTENDED _ SESSIONSECURITY has not
been negotiated . Will weaken the keys if NTLMSSP _ NEGOTIATE _ 56 is not
negotiated it will default to the 40 - bit key
: param negotiate _ flags : The negotiate _ flags structure sent by the server
: param exported _ session _ key : A 128 - bit session key used to derive signing
and sealing keys
: return seal _ key : Key used to seal messages"""
|
if negotiate_flags & NegotiateFlags . NTLMSSP_NEGOTIATE_56 :
seal_key = exported_session_key [ : 7 ] + b"\xa0"
else :
seal_key = exported_session_key [ : 5 ] + b"\xe5\x38\xb0"
return seal_key
|
def _co_names_from_config ( self ) :
"""Parse the configuration for the names of the COs for which to
construct virtual IdPs .
: rtype : [ str ]
: return : list of CO names"""
|
co_names = [ co [ self . KEY_ENCODEABLE_NAME ] for co in self . config [ self . KEY_CO ] ]
return co_names
|
def items_for_tree_result ( cl , result , form ) :
"""Generates the actual list of data ."""
|
first = True
pk = cl . lookup_opts . pk . attname
for field_name in cl . list_display :
row_class = ''
try :
f , attr , value = lookup_field ( field_name , result , cl . model_admin )
except ( AttributeError , ObjectDoesNotExist ) :
result_repr = get_empty_value_display ( cl )
else :
if f is None :
if django . VERSION [ 0 ] == 1 and django . VERSION [ 1 ] == 4 :
if field_name == 'action_checkbox' :
row_class = ' class="action-checkbox disclosure"'
allow_tags = getattr ( attr , 'allow_tags' , False )
boolean = getattr ( attr , 'boolean' , False )
if boolean :
allow_tags = True
result_repr = _boolean_icon ( value )
else :
result_repr = smart_text ( value )
# Strip HTML tags in the resulting text , except if the
# function has an " allow _ tags " attribute set to True .
if not allow_tags :
result_repr = escape ( result_repr )
else :
result_repr = mark_safe ( result_repr )
else :
if value is None :
result_repr = get_empty_value_display ( cl )
if hasattr ( f , 'rel' ) and isinstance ( f . rel , models . ManyToOneRel ) :
result_repr = escape ( getattr ( result , f . name ) )
else :
result_repr = display_for_field ( value , f , '' )
if isinstance ( f , models . DateField ) or isinstance ( f , models . TimeField ) :
row_class = ' class="nowrap"'
if first :
if django . VERSION [ 0 ] == 1 and django . VERSION [ 1 ] < 4 :
try :
f , attr , checkbox_value = lookup_field ( 'action_checkbox' , result , cl . model_admin )
if row_class :
row_class = "%s%s" % ( row_class [ : - 1 ] , ' disclosure"' )
else :
row_class = ' class="disclosure"'
except ( AttributeError , ObjectDoesNotExist ) :
pass
if force_text ( result_repr ) == '' :
result_repr = mark_safe ( ' ' )
# If list _ display _ links not defined , add the link tag to the first field
if ( first and not cl . list_display_links ) or field_name in cl . list_display_links :
if django . VERSION [ 0 ] == 1 and django . VERSION [ 1 ] < 4 :
table_tag = 'td'
# { True : ' th ' , False : ' td ' } [ first ]
else :
table_tag = { True : 'th' , False : 'td' } [ first ]
url = cl . url_for_result ( result )
# Convert the pk to something that can be used in Javascript .
# Problem cases are long ints ( 23L ) and non - ASCII strings .
if cl . to_field :
attr = str ( cl . to_field )
else :
attr = pk
value = result . serializable_value ( attr )
result_id = repr ( force_text ( value ) ) [ 1 : ]
first = False
result_id = escapejs ( value )
yield mark_safe ( format_html ( smart_text ( '<{}{}><a href="{}"{}>{}</a></{}>' ) , table_tag , row_class , url , format_html ( ' onclick="opener.dismissRelatedLookupPopup(window, ' ''{}'); return false;"' , result_id ) if cl . is_popup else '' , result_repr , table_tag ) )
else : # By default the fields come from ModelAdmin . list _ editable , but if we pull
# the fields out of the form instead of list _ editable custom admins
# can provide fields on a per request basis
if form and field_name in form . fields :
bf = form [ field_name ]
result_repr = mark_safe ( force_text ( bf . errors ) + force_text ( bf ) )
else :
result_repr = conditional_escape ( result_repr )
yield mark_safe ( smart_text ( '<td%s>%s</td>' % ( row_class , result_repr ) ) )
if form and not form [ cl . model . _meta . pk . name ] . is_hidden :
yield mark_safe ( smart_text ( '<td>%s</td>' % force_text ( form [ cl . model . _meta . pk . name ] ) ) )
|
def get_user_details ( self , response ) :
"""Complete with additional information from original LTI POST data , as available ."""
|
data = { }
# None of them is mandatory
data [ 'id' ] = response . get ( 'user_id' , None )
data [ 'username' ] = response . get ( 'custom_username' , None )
if not data [ 'username' ] :
data [ 'username' ] = response . get ( 'ext_user_username' , None )
data [ 'last_name' ] = response . get ( 'lis_person_name_family' , None )
data [ 'email' ] = response . get ( 'lis_person_contact_email_primary' , None )
data [ 'first_name' ] = response . get ( 'lis_person_name_given' , None )
data [ 'fullname' ] = response . get ( 'lis_person_name_full' , None )
logger . debug ( "User details being used: " + str ( data ) )
return data
|
def generate_html_report ( self , include_turtle = False , exclude_warning = False , list_auxiliary_line = False ) -> str :
"""Shows links to all classes and properties , a nice hierarchy of the classes , and then a nice
description of all the classes with all the properties that apply to it .
Example : http : / / www . cidoc - crm . org / sites / default / files / Documents / cidoc _ crm _ version _ 5.0.4 . html
: param include _ turtle : include turtle related to this entity .
: param exclude _ warning : Exclude warning messages in HTML report
: return : HTML in raw string"""
|
import os
template = os . path . dirname ( os . path . abspath ( __file__ ) ) + '/../ontologies/template.html'
with open ( template ) as f : # Lists
content = f . read ( ) . replace ( '{{{title}}}' , 'Ontology Entities' )
content = content . replace ( '{{{class_list}}}' , self . __html_entities_hierarchy ( self . classes ) )
content = content . replace ( '{{{dataproperty_list}}}' , self . __html_entities_hierarchy ( self . data_properties ) )
content = content . replace ( '{{{objectproperty_list}}}' , self . __html_entities_hierarchy ( self . object_properties ) )
# Classes
content = content . replace ( '{{{classes}}}' , self . __html_classes ( include_turtle ) )
# Properties
properties = self . __html_properties ( include_turtle )
content = content . replace ( '{{{dataproperties}}}' , properties [ 0 ] )
content = content . replace ( '{{{objectproperties}}}' , properties [ 1 ] )
# Logging
content = content . replace ( '{{{logging-title}}}' , '' if exclude_warning else '<h2>Logging</h2>' )
logs = '' if exclude_warning else self . ontology . log_stream . getvalue ( )
content = content . replace ( '{{{logging}}}' , '<pre><code>{}</code></pre>' . format ( logs ) )
# Auxiliary line
content = content . replace ( '{{{list_auxiliary_line}}}' , self . __show_list_auxiliary_line ( list_auxiliary_line ) )
return content
|
def cot ( x , context = None ) :
"""Return the cotangent of ` ` x ` ` ."""
|
return _apply_function_in_current_context ( BigFloat , mpfr . mpfr_cot , ( BigFloat . _implicit_convert ( x ) , ) , context , )
|
def delete_lifecycle_configuration ( Bucket , region = None , key = None , keyid = None , profile = None ) :
'''Delete the lifecycle configuration for the given bucket
Returns { deleted : true } if Lifecycle was deleted and returns
{ deleted : False } if Lifecycle was not deleted .
CLI Example :
. . code - block : : bash
salt myminion boto _ s3 _ bucket . delete _ lifecycle _ configuration my _ bucket'''
|
try :
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
conn . delete_bucket_lifecycle ( Bucket = Bucket )
return { 'deleted' : True , 'name' : Bucket }
except ClientError as e :
return { 'deleted' : False , 'error' : __utils__ [ 'boto3.get_error' ] ( e ) }
|
def compute_triangle_plane_intersections ( mesh , tid , plane , dist_tol = 1e-8 ) :
"""Compute the intersection between a triangle and a plane
Returns a list of intersections in the form
( INTERSECT _ EDGE , < intersection point > , < edge > ) for edges intersection
( INTERSECT _ VERTEX , < intersection point > , < vertex index > ) for vertices
This return between 0 and 2 intersections :
- 0 : the plane does not intersect the plane
- 1 : one of the triangle ' s vertices lies on the plane ( so it just
" touches " the plane without really intersecting )
- 2 : the plane slice the triangle in two parts ( either vertex - edge ,
vertex - vertex or edge - edge )"""
|
# TODO : Use a distance cache
dists = { vid : point_to_plane_dist ( mesh . verts [ vid ] , plane ) for vid in mesh . tris [ tid ] }
# TODO : Use an edge intersection cache ( we currently compute each edge
# intersection twice : once for each tri )
# This is to avoid registering the same vertex intersection twice
# from two different edges
vert_intersect = { vid : False for vid in dists . keys ( ) }
# Iterate through the edges , cutting the ones that intersect
intersections = [ ]
for e in mesh . edges_for_triangle ( tid ) :
v1 = mesh . verts [ e [ 0 ] ]
d1 = dists [ e [ 0 ] ]
v2 = mesh . verts [ e [ 1 ] ]
d2 = dists [ e [ 1 ] ]
if np . fabs ( d1 ) < dist_tol : # Avoid creating the vertex intersection twice
if not vert_intersect [ e [ 0 ] ] : # point on plane
intersections . append ( ( INTERSECT_VERTEX , v1 , e [ 0 ] ) )
vert_intersect [ e [ 0 ] ] = True
if np . fabs ( d2 ) < dist_tol :
if not vert_intersect [ e [ 1 ] ] : # point on plane
intersections . append ( ( INTERSECT_VERTEX , v2 , e [ 1 ] ) )
vert_intersect [ e [ 1 ] ] = True
# If vertices are on opposite sides of the plane , we have an edge
# intersection
if d1 * d2 < 0 : # Due to numerical accuracy , we could have both a vertex intersect
# and an edge intersect on the same vertex , which is impossible
if not vert_intersect [ e [ 0 ] ] and not vert_intersect [ e [ 1 ] ] : # intersection factor ( between 0 and 1)
# here is a nice drawing :
# https : / / ravehgonen . files . wordpress . com / 2013/02 / slide8 . png
# keep in mind d1 , d2 are * signed * distances ( = > d1 - d2)
s = d1 / ( d1 - d2 )
vdir = v2 - v1
ipos = v1 + vdir * s
intersections . append ( ( INTERSECT_EDGE , ipos , e ) )
return intersections
|
def cluster ( self , n , embed_dim = None , algo = mds . CLASSICAL , method = methods . KMEANS ) :
"""Cluster the embedded coordinates using multidimensional scaling
Parameters
n : int
The number of clusters to return
embed _ dim int
The dimensionality of the underlying coordinates
Defaults to same value as n
method : enum value ( methods . KMEANS | methods . GMM )
The clustering method to use
Returns
Partition : Partition object describing the data partition"""
|
if n == 1 :
return Partition ( [ 1 ] * len ( self . get_dm ( False ) ) )
if embed_dim is None :
embed_dim = n
if algo == mds . CLASSICAL :
self . _coords = self . dm . embedding ( embed_dim , 'cmds' )
elif algo == mds . METRIC :
self . _coords = self . dm . embedding ( embed_dim , 'mmds' )
else :
raise OptionError ( algo , list ( mds . reverse . values ( ) ) )
if method == methods . KMEANS :
p = self . kmeans ( n , self . _coords . values )
elif method == methods . GMM :
p = self . gmm ( n , self . _coords . values )
elif method == methods . WARD :
linkmat = fastcluster . linkage ( self . _coords . values , 'ward' )
p = _hclust ( linkmat , n )
else :
raise OptionError ( method , list ( methods . reverse . values ( ) ) )
# if self . _ verbosity > 0:
# print ( ' Using clustering method : { } ' . format ( methods . reverse [ method ] ) )
return p
|
def standard_bytes_header ( self , title , addr , length ) :
"""Generates a standard header block of CODE type"""
|
self . save_header ( self . HEADER_TYPE_CODE , title , length , param1 = addr , param2 = 32768 )
|
def set ( self , value ) :
"""Sets the value of the object
: param value :
A unicode string . May be a dotted integer string , or if _ map is
provided , one of the mapped values .
: raises :
ValueError - when an invalid value is passed"""
|
if not isinstance ( value , str_cls ) :
raise TypeError ( unwrap ( '''
%s value must be a unicode string, not %s
''' , type_name ( self ) , type_name ( value ) ) )
self . _native = value
if self . _map is not None :
if value in self . _reverse_map :
value = self . _reverse_map [ value ]
self . contents = b''
first = None
for index , part in enumerate ( value . split ( '.' ) ) :
part = int ( part )
# The first two parts are merged into a single byte
if index == 0 :
first = part
continue
elif index == 1 :
part = ( first * 40 ) + part
encoded_part = chr_cls ( 0x7F & part )
part = part >> 7
while part > 0 :
encoded_part = chr_cls ( 0x80 | ( 0x7F & part ) ) + encoded_part
part = part >> 7
self . contents += encoded_part
self . _header = None
if self . _trailer != b'' :
self . _trailer = b''
|
def _git_command ( params , cwd ) :
"""Executes a git command , returning the output
: param params :
A list of the parameters to pass to git
: param cwd :
The working directory to execute git in
: return :
A 2 - element tuple of ( stdout , stderr )"""
|
proc = subprocess . Popen ( [ 'git' ] + params , stdout = subprocess . PIPE , stderr = subprocess . STDOUT , cwd = cwd )
stdout , stderr = proc . communicate ( )
code = proc . wait ( )
if code != 0 :
e = OSError ( 'git exit code was non-zero' )
e . stdout = stdout
raise e
return stdout . decode ( 'utf-8' ) . strip ( )
|
def build_namespace ( self ) :
"""Build out the directory skeleton and python namespace files :
dbt /
_ _ init _ _ . py
adapters /
$ { adapter _ name }
_ _ init _ _ . py
include /
$ { adapter _ name }
_ _ init _ _ . py"""
|
os . makedirs ( self . adapters_path )
os . makedirs ( pj ( self . include_path , 'macros' ) )
with open ( pj ( self . dbt_dir , '__init__.py' ) , 'w' ) as fp :
fp . write ( NAMESPACE_INIT_TEMPLATE )
with open ( pj ( self . dbt_dir , 'adapters' , '__init__.py' ) , 'w' ) as fp :
fp . write ( NAMESPACE_INIT_TEMPLATE )
with open ( pj ( self . dbt_dir , 'include' , '__init__.py' ) , 'w' ) as fp :
fp . write ( NAMESPACE_INIT_TEMPLATE )
|
def login_url ( self , org = None ) :
"""Returns the login url which will automatically log into the target
Salesforce org . By default , the org _ name passed to the library
constructor is used but this can be overridden with the org option
to log into a different org ."""
|
if org is None :
org = self . org
else :
org = self . keychain . get_org ( org )
return org . start_url
|
def delete_value ( self , key ) :
"""Delete the key if the token is expired .
Arg :
key : cache key"""
|
response = { }
response [ 'status' ] = False
response [ 'msg' ] = "key does not exist"
file_cache = self . read_file ( )
if key in file_cache :
del file_cache [ key ]
self . update_file ( file_cache )
response [ 'status' ] = True
response [ 'msg' ] = "success"
return response
|
def _filter_subinfo_list ( cls , subinfo_list , videoinfo , languages , exts ) :
"""filter subinfo list base on :
- season
- episode
- languages
- exts
return a best matched subinfo"""
|
filter_field_list = [ 'season' , 'episode' , 'resolution' , 'source' , 'video_encoding' , 'audio_encoding' ]
filtered_subinfo_list = dict ( ( f , [ ] ) for f in filter_field_list )
for subinfo in subinfo_list :
title = subinfo . get ( 'title' )
videoinfo_ = cls . _parse_videoname ( title )
last_field = None
for field in filter_field_list :
if videoinfo . get ( field ) == videoinfo_ . get ( field ) :
last_field = field
else :
break
if last_field is not None :
filtered_subinfo_list [ last_field ] . append ( subinfo )
for field in filter_field_list [ : : - 1 ] :
if len ( filtered_subinfo_list [ field ] ) > 0 : # sort by download _ count and rate
sorted_subinfo_list = sorted ( filtered_subinfo_list [ field ] , key = lambda item : ( item [ 'rate' ] , item [ 'download_count' ] ) , reverse = True )
return sorted_subinfo_list [ 0 ]
return None
|
def add_behavior_for_pclass ( self , cls ) :
"""Define an additional base class for the Python class created for a
particular definition .
: param type cls : The additional base class . Its name must exactly
match the name of a definition with a version matching this
object ' s version .
: return : ` ` None ` `"""
|
kind = cls . __name__
for version in sorted ( self . versions ) :
try :
self . spec . add_behavior_for_pclass ( self . full_name ( version , kind ) , cls )
except NoSuchDefinition :
pass
else :
return None
raise NoSuchDefinition ( kind )
|
def OAuthClient ( domain , consumer_key , consumer_secret , token , token_secret , user_agent = None , request_encoder = default_request_encoder , response_decoder = default_response_decoder ) :
"""Creates a Freshbooks client for a freshbooks domain , using
OAuth . Token management is assumed to have been handled out of band .
The optional request _ encoder and response _ decoder parameters can be
passed the logging _ request _ encoder and logging _ response _ decoder objects
from this module , or custom encoders , to aid debugging or change the
behaviour of refreshbooks ' request - to - XML - to - response mapping .
The optional user _ agent keyword parameter can be used to specify the
user agent string passed to FreshBooks . If unset , a default user agent
string is used ."""
|
return _create_oauth_client ( AuthorizingClient , domain , consumer_key , consumer_secret , token , token_secret , user_agent = user_agent , request_encoder = request_encoder , response_decoder = response_decoder )
|
def collection_props ( event ) :
"""Get information for a pick event on an artist collection ( e . g .
LineCollection , PathCollection , PatchCollection , etc ) . This will"""
|
ind = event . ind [ 0 ]
arr = event . artist . get_array ( )
# If a constant color / c / z was specified , don ' t return it
if arr is None or len ( arr ) == 1 :
z = None
else :
z = arr [ ind ]
return dict ( z = z , c = z )
|
def bug ( self , container : Container ) -> Bug :
"""Returns a description of the bug inside a given container ."""
|
name = container . bug
return self . __installation . bugs [ name ]
|
def release_subnet ( self , cidr , direc ) :
"""Routine to release a subnet from the DB ."""
|
if direc == 'in' :
self . service_in_ip . release_subnet ( cidr )
else :
self . service_out_ip . release_subnet ( cidr )
|
def _guess_available_methods ( self ) :
"""Guess the method implemented by the subclass"""
|
available_methods = [ ]
for m in [ "GET" , "POST" , "PUT" , "DELETE" , "PATCH" , "HEAD" , "OPTIONS" ] :
self_method = getattr ( type ( self ) , "API_{}" . format ( m ) )
super_method = getattr ( APIPage , "API_{}" . format ( m ) )
if self_method != super_method :
available_methods . append ( m )
return available_methods
|
def _update_repo ( self , juicer_repo , pulp_repo , env , repo_diff , query = '/repositories/' ) :
"""` from _ file ` - JSON file of repo definitions
` noop ` - Boolean , if true don ' t actually create / update repos , just show what would have happened
https : / / pulp - dev - guide . readthedocs . org / en / pulp - 2.3 / integration / rest - api / repo / cud . html # update - a - distributor - associated - with - a - repository
https : / / pulp - dev - guide . readthedocs . org / en / pulp - 2.3 / integration / rest - api / repo / cud . html # update - an - importer - associated - with - a - repository
Distributor update :
Method : PUT
Path : / pulp / api / v2 / repositories / < repo _ id > / distributors / < distributor _ id > /
Importer update :
Method : PUT
Path : / pulp / api / v2 / repositories / < repo _ id > / importers / < importer _ id > /"""
|
repo_id = "%s-%s" % ( juicer_repo [ 'name' ] , env )
distributor_id = "yum_distributor"
importer_id = "yum_importer"
distributor_diff = repo_diff . diff ( ) [ 'distributor' ]
importer_diff = repo_diff . diff ( ) [ 'importer' ]
distributor_query = query + "%s/distributors/%s/" % ( repo_id , distributor_id )
importer_query = query + "%s/importers/%s/" % ( repo_id , importer_id )
# Importer update
_r = self . connectors [ env ] . put ( distributor_query , distributor_diff )
if _r . status_code == Constants . PULP_PUT_OK :
juicer . utils . Log . log_notice ( "Update request accepted for %s" , repo_id )
elif _r . status_code == Constants . PULP_PUT_CONFLICT :
juicer . utils . Log . log_debug ( str ( _r . content ) )
elif _r . status_code == Constants . PULP_PUT_NOT_FOUND :
juicer . utils . Log . log_debug ( str ( _r . content ) )
else :
_r . raise_for_status ( )
# Distributor update
_r = self . connectors [ env ] . put ( importer_query , importer_diff )
if _r . status_code == Constants . PULP_PUT_OK :
juicer . utils . Log . log_notice ( "Update request accepted for %s" , repo_id )
elif _r . status_code == Constants . PULP_PUT_CONFLICT :
juicer . utils . Log . log_debug ( str ( _r . content ) )
elif _r . status_code == Constants . PULP_PUT_NOT_FOUND :
juicer . utils . Log . log_debug ( str ( _r . content ) )
else :
_r . raise_for_status ( )
return True
|
def get_buy ( self , buy_id , ** params ) :
"""https : / / developers . coinbase . com / api / v2 # show - a - buy"""
|
return self . api_client . get_buy ( self . id , buy_id , ** params )
|
def read ( self , size = None ) :
"""Reads a byte string from the file - like object at the current offset .
The function will read a byte string of the specified size or
all of the remaining data if no size was specified .
Args :
size ( Optional [ int ] ) : number of bytes to read , where None is all
remaining data .
Returns :
bytes : data read .
Raises :
IOError : if the read failed .
OSError : if the read failed ."""
|
if not self . _is_open :
raise IOError ( 'Not opened.' )
if self . _current_offset < 0 :
raise IOError ( 'Invalid current offset: {0:d} value less than zero.' . format ( self . _current_offset ) )
if self . _uncompressed_stream_size is None :
self . _uncompressed_stream_size = self . _GetUncompressedStreamSize ( )
if self . _uncompressed_stream_size < 0 :
raise IOError ( 'Invalid uncompressed stream size.' )
if self . _current_offset >= self . _uncompressed_stream_size :
return b''
if self . _realign_offset :
self . _AlignUncompressedDataOffset ( self . _current_offset )
self . _realign_offset = False
if size is None :
size = self . _uncompressed_stream_size
if self . _current_offset + size > self . _uncompressed_stream_size :
size = self . _uncompressed_stream_size - self . _current_offset
uncompressed_data = b''
if size == 0 :
return uncompressed_data
while size > self . _uncompressed_data_size :
uncompressed_data = b'' . join ( [ uncompressed_data , self . _uncompressed_data [ self . _uncompressed_data_offset : ] ] )
remaining_uncompressed_data_size = ( self . _uncompressed_data_size - self . _uncompressed_data_offset )
self . _current_offset += remaining_uncompressed_data_size
size -= remaining_uncompressed_data_size
if self . _current_offset >= self . _uncompressed_stream_size :
break
read_count = self . _ReadCompressedData ( self . _COMPRESSED_DATA_BUFFER_SIZE )
self . _uncompressed_data_offset = 0
if read_count == 0 :
break
if size > 0 :
slice_start_offset = self . _uncompressed_data_offset
slice_end_offset = slice_start_offset + size
uncompressed_data = b'' . join ( [ uncompressed_data , self . _uncompressed_data [ slice_start_offset : slice_end_offset ] ] )
self . _uncompressed_data_offset += size
self . _current_offset += size
return uncompressed_data
|
def _get_ilo_details ( self ) :
"""Gets iLO details
: raises : IloError , on an error from iLO .
: raises : IloConnectionError , if iLO is not up after reset .
: raises : IloCommandNotSupportedError , if the command is not supported
on the server ."""
|
manager_uri = '/rest/v1/Managers/1'
status , headers , manager = self . _rest_get ( manager_uri )
if status != 200 :
msg = self . _get_extended_error ( manager )
raise exception . IloError ( msg )
# verify expected type
mtype = self . _get_type ( manager )
if ( mtype not in [ 'Manager.0' , 'Manager.1' ] ) :
msg = "%s is not a valid Manager type " % mtype
raise exception . IloError ( msg )
return manager , manager_uri
|
def _end_comment_type_from_line ( line ) :
"""Return the " comment footer " ( eg ' * / ' ' ) .
This header goes after the content of a start of the
line in a replacement that would be at the end of a header block ."""
|
regex = re . compile ( r"^{0}" . format ( _FTR_COMMENT ) )
match = regex . match ( line )
if match :
return " */"
return ""
|
def calc_stats ( motifs , fg_file , bg_file , genome = None , stats = None , ncpus = None ) :
"""Calculate motif enrichment metrics .
Parameters
motifs : str , list or Motif instance
A file with motifs in pwm format , a list of Motif instances or a
single Motif instance .
fg _ file : str
Filename of a FASTA , BED or region file with positive sequences .
bg _ file : str
Filename of a FASTA , BED or region file with negative sequences .
genome : str , optional
Genome or index directory in case of BED / regions .
stats : list , optional
Names of metrics to calculate . See gimmemotifs . rocmetrics . _ _ all _ _
for available metrics .
ncpus : int , optional
Number of cores to use .
Returns
result : dict
Dictionary with results where keys are motif ids and the values are
dictionary with metric name and value pairs ."""
|
result = { }
for batch_result in calc_stats_iterator ( motifs , fg_file , bg_file , genome = genome , stats = stats , ncpus = ncpus ) :
for motif_id in batch_result :
if motif_id not in result :
result [ motif_id ] = { }
for s , ret in batch_result [ motif_id ] . items ( ) :
result [ motif_id ] [ s ] = ret
return result
|
def inline_bbl ( root_tex , bbl_tex ) :
"""Inline a compiled bibliography ( . bbl ) in place of a bibliography
environment .
Parameters
root _ tex : unicode
Text to process .
bbl _ tex : unicode
Text of bibliography file .
Returns
txt : unicode
Text with bibliography included ."""
|
bbl_tex = bbl_tex . replace ( u'\\' , u'\\\\' )
result = bib_pattern . sub ( bbl_tex , root_tex )
return result
|
def addAggShkDstn ( self , AggShkDstn ) :
'''Variation on AggShockConsumerType . addAggShkDstn that handles the Markov
state . AggShkDstn is a list of aggregate productivity shock distributions
for each Markov state .'''
|
if len ( self . IncomeDstn [ 0 ] [ 0 ] ) > 3 :
self . IncomeDstn = self . IncomeDstnWithoutAggShocks
else :
self . IncomeDstnWithoutAggShocks = self . IncomeDstn
IncomeDstnOut = [ ]
N = self . MrkvArray . shape [ 0 ]
for t in range ( self . T_cycle ) :
IncomeDstnOut . append ( [ combineIndepDstns ( self . IncomeDstn [ t ] [ n ] , AggShkDstn [ n ] ) for n in range ( N ) ] )
self . IncomeDstn = IncomeDstnOut
|
def setup ( app : Application , tracer : Tracer , * , skip_routes : Optional [ AbstractRoute ] = None , tracer_key : str = APP_AIOZIPKIN_KEY , request_key : str = REQUEST_AIOZIPKIN_KEY ) -> Application :
"""Sets required parameters in aiohttp applications for aiozipkin .
Tracer added into application context and cleaned after application
shutdown . You can provide custom tracer _ key , if default name is not
suitable ."""
|
app [ tracer_key ] = tracer
m = middleware_maker ( skip_routes = skip_routes , tracer_key = tracer_key , request_key = request_key )
app . middlewares . append ( m )
# register cleanup signal to close zipkin transport connections
async def close_aiozipkin ( app : Application ) -> None :
await app [ tracer_key ] . close ( )
app . on_cleanup . append ( close_aiozipkin )
return app
|
def dir_names ( self , connId = 'default' ) :
"""Returns list of files ( and / or directories ) of current directory .
Parameters :
- connId ( optional ) - connection identifier . By default equals ' default '"""
|
files_list = [ ]
thisConn = self . __getConnection ( connId )
try :
files_list = thisConn . nlst ( )
except :
files_list = [ ]
return files_list
|
def transaction_effects ( self , tx_hash , cursor = None , order = 'asc' , limit = 10 ) :
"""This endpoint represents all effects that occurred as a result of a
given transaction .
` GET / transactions / { hash } / effects { ? cursor , limit , order }
< https : / / www . stellar . org / developers / horizon / reference / endpoints / effects - for - transaction . html > ` _
: param str tx _ hash : The hex - encoded transaction hash .
: param int cursor : A paging token , specifying where to start returning records from .
: param str order : The order in which to return rows , " asc " or " desc " .
: param int limit : Maximum number of records to return .
: return : A single transaction ' s effects .
: rtype : dict"""
|
endpoint = '/transactions/{tx_hash}/effects' . format ( tx_hash = tx_hash )
params = self . __query_params ( cursor = cursor , order = order , limit = limit )
return self . query ( endpoint , params )
|
def parse_lxml ( self , file , encoding = None , target_class = HTMLParserTarget , parser_type = 'html' ) :
'''Return an iterator of elements found in the document .
Args :
file : A file object containing the document .
encoding ( str ) : The encoding of the document .
target _ class : A class to be used for target parsing .
parser _ type ( str ) : The type of parser to use . Accepted values :
` ` html ` ` , ` ` xhtml ` ` , ` ` xml ` ` .
Returns :
iterator : Each item is an element from
: mod : ` . document . htmlparse . element `'''
|
if encoding :
lxml_encoding = to_lxml_encoding ( encoding ) or 'latin1'
else :
lxml_encoding = encoding
elements = [ ]
callback_func = elements . append
target = target_class ( callback_func )
if parser_type == 'html' :
parser = lxml . html . HTMLParser ( encoding = lxml_encoding , target = target )
elif parser_type == 'xhtml' :
parser = lxml . html . XHTMLParser ( encoding = lxml_encoding , target = target , recover = True )
else :
parser = lxml . etree . XMLParser ( encoding = lxml_encoding , target = target , recover = True )
if parser_type == 'html' : # XXX : Force libxml2 to do full read in case of early " < / html > "
# See https : / / github . com / chfoo / wpull / issues / 104
# See https : / / bugzilla . gnome . org / show _ bug . cgi ? id = 727935
for dummy in range ( 3 ) :
parser . feed ( '<html>' . encode ( encoding ) )
while True :
data = file . read ( self . BUFFER_SIZE )
if not data :
break
parser . feed ( data )
for element in elements :
yield element
del elements [ : ]
parser . close ( )
for element in elements :
yield element
|
def drop ( self ) :
"""Release the message from lease management .
This informs the policy to no longer hold on to the lease for this
message . Pub / Sub will re - deliver the message if it is not acknowledged
before the existing lease expires .
. . warning : :
For most use cases , the only reason to drop a message from
lease management is on : meth : ` ack ` or : meth : ` nack ` ; these methods
both call this one . You probably do not want to call this method
directly ."""
|
self . _request_queue . put ( requests . DropRequest ( ack_id = self . _ack_id , byte_size = self . size ) )
|
def _index_key_for ( self , att , value = None ) :
"""Returns a key based on the attribute and its value .
The key is used for indexing ."""
|
if value is None :
value = getattr ( self , att )
if callable ( value ) :
value = value ( )
if value is None :
return None
if att not in self . lists :
return self . _get_index_key_for_non_list_attr ( att , value )
else :
return self . _tuple_for_index_key_attr_list ( att , value )
|
def match ( self , row ) :
"""Returns True if the field matches the regular expression of this simple condition . Returns False otherwise .
: param dict row : The row .
: rtype : bool"""
|
if re . search ( self . _expression , row [ self . _field ] ) :
return True
return False
|
def getTypeDefinition ( self , attribute = None ) :
"""If attribute is None , " type " is assumed , return the corresponding
representation of the global type definition ( TypeDefinition ) ,
or the local definition if don ' t find " type " . To maintain backwards
compat , if attribute is provided call base class method ."""
|
if attribute :
return XMLSchemaComponent . getTypeDefinition ( self , attribute )
gt = XMLSchemaComponent . getTypeDefinition ( self , 'type' )
if gt :
return gt
return self . content
|
def requested_packages ( self , include_implicit = False ) :
"""Get packages in the request .
Args :
include _ implicit ( bool ) : If True , implicit packages are appended
to the result .
Returns :
List of ` PackageRequest ` objects ."""
|
if include_implicit :
return self . _package_requests + self . implicit_packages
else :
return self . _package_requests
|
def numericalize_tok ( tokens , max_vocab = 50000 , min_freq = 0 , unk_tok = "_unk_" , pad_tok = "_pad_" , bos_tok = "_bos_" , eos_tok = "_eos_" ) :
"""Takes in text tokens and returns int2tok and tok2int converters
Arguments :
tokens ( list ) : List of tokens . Can be a list of strings , or a list of lists of strings .
max _ vocab ( int ) : Number of tokens to return in the vocab ( sorted by frequency )
min _ freq ( int ) : Minimum number of instances a token must be present in order to be preserved .
unk _ tok ( str ) : Token to use when unknown tokens are encountered in the source text .
pad _ tok ( str ) : Token to use when padding sequences ."""
|
if isinstance ( tokens , str ) :
raise ValueError ( "Expected to receive a list of tokens. Received a string instead" )
if isinstance ( tokens [ 0 ] , list ) :
tokens = [ p for o in tokens for p in o ]
freq = Counter ( tokens )
int2tok = [ o for o , c in freq . most_common ( max_vocab ) if c > min_freq ]
unk_id = 3
int2tok . insert ( 0 , bos_tok )
int2tok . insert ( 1 , pad_tok )
int2tok . insert ( 2 , eos_tok )
int2tok . insert ( unk_id , unk_tok )
tok2int = collections . defaultdict ( lambda : unk_id , { v : k for k , v in enumerate ( int2tok ) } )
return int2tok , tok2int
|
def _calculate_degree_days ( temperature_equivalent , base_temperature , cooling = False ) :
"""Calculates degree days , starting with a series of temperature equivalent values
Parameters
temperature _ equivalent : Pandas Series
base _ temperature : float
cooling : bool
Set True if you want cooling degree days instead of heating degree days
Returns
Pandas Series called HDD _ base _ temperature for heating degree days or
CDD _ base _ temperature for cooling degree days ."""
|
if cooling :
ret = temperature_equivalent - base_temperature
else :
ret = base_temperature - temperature_equivalent
# degree days cannot be negative
ret [ ret < 0 ] = 0
prefix = 'CDD' if cooling else 'HDD'
ret . name = '{}_{}' . format ( prefix , base_temperature )
return ret
|
def _set_route_precedence ( self , v , load = False ) :
"""Setter method for route _ precedence , mapped from YANG variable / routing _ system / router / hide _ pim _ holder / pim / route _ precedence ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ route _ precedence is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ route _ precedence ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = route_precedence . route_precedence , is_container = 'container' , presence = False , yang_name = "route-precedence" , rest_name = "route-precedence" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-compact-syntax' : None , u'info' : u'Specify Route Selection criteria' , u'callpoint' : u'PimRoutePrecedenceCallpoint' , u'cli-incomplete-command' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-pim' , defining_module = 'brocade-pim' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """route_precedence must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=route_precedence.route_precedence, is_container='container', presence=False, yang_name="route-precedence", rest_name="route-precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'Specify Route Selection criteria', u'callpoint': u'PimRoutePrecedenceCallpoint', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='container', is_config=True)""" , } )
self . __route_precedence = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def derivatives ( self , x , y , coeffs , beta , center_x = 0 , center_y = 0 ) :
"""returns df / dx and df / dy of the function"""
|
shapelets = self . _createShapelet ( coeffs )
r , phi = param_util . cart2polar ( x , y , center = np . array ( [ center_x , center_y ] ) )
alpha1_shapelets , alpha2_shapelets = self . _alphaShapelets ( shapelets , beta )
f_x = self . _shapeletOutput ( r , phi , beta , alpha1_shapelets )
f_y = self . _shapeletOutput ( r , phi , beta , alpha2_shapelets )
return f_x , f_y
|
def register_actionhandler ( self , action_handler : type ) -> None :
"""register class as action handler"""
|
for k in action_handler . __dict__ :
if k . startswith ( '_' ) :
continue
app = action_handler_adapter ( action_handler , k )
self . register_app ( k , app )
|
def get_country_code ( longname ) :
""": returns : the code of the country contained in ` longname ` , or a ValuError
> > > for country , code in country2code . items ( ) :
. . . assert get _ country _ code ( ' Exp _ ' + country ) = = code , ( country , code )"""
|
mo = re . search ( REGEX , longname , re . I )
if mo is None :
raise ValueError ( 'Could not find a valid country in %s' % longname )
return country2code [ COUNTRIES [ mo . lastindex - 1 ] ]
|
def _process_connect_init ( self ) :
"""Handles the initial part of the NATS protocol , moving from
the ( RE ) CONNECTING to CONNECTED states when establishing
a connection with the server ."""
|
# INFO { . . . }
line = yield self . io . read_until ( _CRLF_ , max_bytes = None )
_ , args = line . split ( INFO_OP + _SPC_ , 1 )
self . _server_info = tornado . escape . json_decode ( ( args ) )
if 'max_payload' in self . _server_info :
self . _max_payload_size = self . _server_info [ "max_payload" ]
# Check whether we need to upgrade to TLS first of all
if 'tls_required' in self . _server_info and self . _server_info [ 'tls_required' ] : # Detach and prepare for upgrading the TLS connection .
self . _loop . remove_handler ( self . _socket . fileno ( ) )
tls_opts = { }
if "tls" in self . options : # Allow customizing the TLS version though default
# to one that the server supports at least .
tls_opts = self . options [ "tls" ]
# Rewrap using a TLS connection , can ' t do handshake on connect
# as the socket is non blocking .
self . _socket = ssl . wrap_socket ( self . _socket , do_handshake_on_connect = False , ** tls_opts )
# Use the TLS stream instead from now
self . io = tornado . iostream . SSLIOStream ( self . _socket )
self . io . set_close_callback ( self . _process_op_err )
self . io . _do_ssl_handshake ( )
# Refresh state of the parser upon reconnect .
if self . is_reconnecting :
self . _ps . reset ( )
# CONNECT then send a PING expecting a PONG to make a
# roundtrip to the server and assert that sent commands sent
# this far have been processed already .
cmd = self . connect_command ( )
yield self . io . write ( cmd )
yield self . io . write ( PING_PROTO )
# FIXME : Add readline timeout for these .
next_op = yield self . io . read_until ( _CRLF_ , max_bytes = MAX_CONTROL_LINE_SIZE )
if self . options [ "verbose" ] and OK_OP in next_op :
next_op = yield self . io . read_until ( _CRLF_ , max_bytes = MAX_CONTROL_LINE_SIZE )
if ERR_OP in next_op :
err_line = next_op . decode ( )
_ , err_msg = err_line . split ( _SPC_ , 1 )
# FIXME : Maybe handling could be more special here ,
# checking for ErrAuthorization for example .
# yield from self . _ process _ err ( err _ msg )
raise NatsError ( "nats: " + err_msg . rstrip ( '\r\n' ) )
if PONG_PROTO in next_op :
self . _status = Client . CONNECTED
self . _loop . spawn_callback ( self . _read_loop )
self . _pongs = [ ]
self . _pings_outstanding = 0
self . _ping_timer = tornado . ioloop . PeriodicCallback ( self . _ping_interval , self . options [ "ping_interval" ] * 1000 )
self . _ping_timer . start ( )
# Queue and flusher for coalescing writes to the server .
self . _flush_queue = tornado . queues . Queue ( maxsize = 1024 )
self . _loop . spawn_callback ( self . _flusher_loop )
|
def locateChild ( self , ctx , segments ) :
"""Return a clone of this page that remembers its segments , so that URLs like
/ login / private / stuff will redirect the user to / private / stuff after
login has completed ."""
|
arguments = IRequest ( ctx ) . args
return self . __class__ ( self . store , segments , arguments ) , ( )
|
def list_menu ( self , options , title = "Choose a value" , message = "Choose a value" , default = None , ** kwargs ) :
"""Show a single - selection list menu
Usage : C { dialog . list _ menu ( options , title = " Choose a value " , message = " Choose a value " , default = None , * * kwargs ) }
@ param options : list of options ( strings ) for the dialog
@ param title : window title for the dialog
@ param message : message displayed above the list
@ param default : default value to be selected
@ return : a tuple containing the exit code and user choice
@ rtype : C { DialogData ( int , str ) }"""
|
choices = [ ]
for option in options :
if option == default :
choices . append ( "TRUE" )
else :
choices . append ( "FALSE" )
choices . append ( option )
return self . _run_zenity ( title , [ "--list" , "--radiolist" , "--text" , message , "--column" , " " , "--column" , "Options" ] + choices , kwargs )
|
def flattened_iterator ( l , types = ( list , tuple ) ) :
"""Generator for a list / tuple that potentially contains nested / lists / tuples of arbitrary nesting
that returns every individual non - list / tuple element . In other words , [ ( 5 , 6 , [ 8 , 3 ] ) , 2 , [ 2 , 1 , ( 3 , 4 ) ] ]
will yield 5 , 6 , 8 , 3 , 2 , 2 , 1 , 3 , 4
This is safe to call on something not a list / tuple - the original input is yielded ."""
|
if not isinstance ( l , types ) :
yield l
return
for element in l :
for sub_element in flattened_iterator ( element , types ) :
yield sub_element
|
def is_ready ( self ) :
"""Is Socket Ready .
: rtype : tuple"""
|
try :
ready , _ , _ = self . select . select ( [ self . fileno ] , [ ] , [ ] , POLL_TIMEOUT )
return bool ( ready )
except self . select . error as why :
if why . args [ 0 ] != EINTR :
self . _exceptions . append ( AMQPConnectionError ( why ) )
return False
|
def addkey ( ctx , key ) :
"""Add a private key to the wallet"""
|
if not key :
while True :
key = click . prompt ( "Private Key (wif) [Enter to quit]" , hide_input = True , show_default = False , default = "exit" , )
if not key or key == "exit" :
break
try :
ctx . bitshares . wallet . addPrivateKey ( key )
except Exception as e :
click . echo ( str ( e ) )
continue
else :
for k in key :
try :
ctx . bitshares . wallet . addPrivateKey ( k )
except Exception as e :
click . echo ( str ( e ) )
installedKeys = ctx . bitshares . wallet . getPublicKeys ( )
if len ( installedKeys ) == 1 :
name = ctx . bitshares . wallet . getAccountFromPublicKey ( installedKeys [ 0 ] )
if name : # only if a name to the key was found
account = Account ( name , bitshares_instance = ctx . bitshares )
click . echo ( "=" * 30 )
click . echo ( "Setting new default user: %s" % account [ "name" ] )
click . echo ( )
click . echo ( "You can change these settings with:" )
click . echo ( " uptick set default_account <account>" )
click . echo ( "=" * 30 )
config [ "default_account" ] = account [ "name" ]
|
def undelete_alert ( self , id , ** kwargs ) : # noqa : E501
"""Undelete a specific alert # noqa : E501
# noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . undelete _ alert ( id , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str id : ( required )
: return : ResponseContainerAlert
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . undelete_alert_with_http_info ( id , ** kwargs )
# noqa : E501
else :
( data ) = self . undelete_alert_with_http_info ( id , ** kwargs )
# noqa : E501
return data
|
def snap ( args ) :
"""% prog snap species gffile fastafile
Train SNAP model given gffile and fastafile . Whole procedure taken from :
< http : / / gmod . org / wiki / MAKER _ Tutorial _ 2012 >"""
|
p = OptionParser ( snap . __doc__ )
p . set_home ( "maker" )
opts , args = p . parse_args ( args )
if len ( args ) != 3 :
sys . exit ( not p . print_help ( ) )
species , gffile , fastafile = args
mhome = opts . maker_home
snapdir = "snap"
mkdir ( snapdir )
cwd = os . getcwd ( )
os . chdir ( snapdir )
newgffile = "training.gff3"
logging . debug ( "Construct GFF file combined with sequence ..." )
sh ( "cat ../{0} > {1}" . format ( gffile , newgffile ) )
sh ( 'echo "##FASTA" >> {0}' . format ( newgffile ) )
sh ( "cat ../{0} >> {1}" . format ( fastafile , newgffile ) )
logging . debug ( "Make models ..." )
sh ( "{0}/src/bin/maker2zff training.gff3" . format ( mhome ) )
sh ( "{0}/exe/snap/fathom -categorize 1000 genome.ann genome.dna" . format ( mhome ) )
sh ( "{0}/exe/snap/fathom -export 1000 -plus uni.ann uni.dna" . format ( mhome ) )
sh ( "{0}/exe/snap/forge export.ann export.dna" . format ( mhome ) )
sh ( "{0}/exe/snap/hmm-assembler.pl {1} . > {1}.hmm" . format ( mhome , species ) )
os . chdir ( cwd )
logging . debug ( "SNAP matrix written to `{0}/{1}.hmm`" . format ( snapdir , species ) )
|
def addToCommits ( self , commit : Commit , sender : str ) :
"""Add the specified COMMIT to this replica ' s list of received
commit requests .
: param commit : the COMMIT to add to the list
: param sender : the name of the node that sent the COMMIT"""
|
# BLS multi - sig :
self . _bls_bft_replica . process_commit ( commit , sender )
self . commits . addVote ( commit , sender )
self . tryOrder ( commit )
|
def return_xy ( self , labels = None ) :
"""Returns the location in xy for some channels .
Parameters
labels : list of str , optional
the names of the channels .
Returns
numpy . ndarray
a 2xn vector with the position of a channel .
Notes
Simplest implementation . We should at least use project onto a 2D plane"""
|
xyz = self . return_xyz ( labels = labels )
xy = asarray ( xyz ) [ : , 1 : ]
return xy
|
def parse ( self , fo , width , seed = None ) :
"""Convert Posmo output to motifs
Parameters
fo : file - like
File object containing Posmo output .
Returns
motifs : list
List of Motif instances ."""
|
motifs = [ ]
lines = [ fo . readline ( ) for x in range ( 6 ) ]
while lines [ 0 ] :
matrix = [ [ float ( x ) for x in line . strip ( ) . split ( "\t" ) ] for line in lines [ 2 : ] ]
matrix = [ [ matrix [ x ] [ y ] for x in range ( 4 ) ] for y in range ( len ( matrix [ 0 ] ) ) ]
m = Motif ( matrix )
m . trim ( 0.1 )
m . id = lines [ 0 ] . strip ( ) . split ( " " ) [ - 1 ]
motifs . append ( m )
lines = [ fo . readline ( ) for x in range ( 6 ) ]
for i , motif in enumerate ( motifs ) :
if seed :
motif . id = "%s_w%s.%s_%s" % ( self . name , width , seed , i + 1 )
else :
motif . id = "%s_w%s_%s" % ( self . name , width , i + 1 )
motif . trim ( 0.25 )
return motifs
|
def GetRelativePath ( self , path_spec ) :
"""Returns the relative path based on a resolved path specification .
The relative path is the location of the upper most path specification .
The the location of the mount point is stripped off if relevant .
Args :
path _ spec ( PathSpec ) : path specification .
Returns :
str : corresponding relative path or None if the relative path could not
be determined .
Raises :
PathSpecError : if the path specification is incorrect ."""
|
location = getattr ( path_spec , 'location' , None )
if location is None :
raise errors . PathSpecError ( 'Path specification missing location.' )
if path_spec_factory . Factory . IsSystemLevelTypeIndicator ( self . _file_system . type_indicator ) :
if not location . startswith ( self . _mount_point . location ) :
raise errors . PathSpecError ( 'Path specification does not contain mount point.' )
else :
if not hasattr ( path_spec , 'parent' ) :
raise errors . PathSpecError ( 'Path specification missing parent.' )
if path_spec . parent != self . _mount_point :
raise errors . PathSpecError ( 'Path specification does not contain mount point.' )
path_segments = self . _file_system . SplitPath ( location )
if path_spec_factory . Factory . IsSystemLevelTypeIndicator ( self . _file_system . type_indicator ) :
mount_point_path_segments = self . _file_system . SplitPath ( self . _mount_point . location )
path_segments = path_segments [ len ( mount_point_path_segments ) : ]
return '{0:s}{1:s}' . format ( self . _file_system . PATH_SEPARATOR , self . _file_system . PATH_SEPARATOR . join ( path_segments ) )
|
def update ( self , resource , timeout = - 1 ) :
"""Updates the specified data center resource .
Args :
resource ( dict ) : Object to update .
timeout : Timeout in seconds . Wait for task completion by default . The timeout does not abort the operation
in OneView ; it just stops waiting for its completion .
Returns :
dict : Updated data center ."""
|
return self . _client . update ( resource , timeout = timeout )
|
def convert_type ( d , intype , outtype , convert_list = True , in_place = True ) :
"""convert all values of one type to another
Parameters
d : dict
intype : type _ class
outtype : type _ class
convert _ list : bool
whether to convert instances inside lists and tuples
in _ place : bool
if True , applies conversions to original dict , else returns copy
Examples
> > > from pprint import pprint
> > > d = { ' a ' : ' 1 ' , ' b ' : ' 2 ' }
> > > pprint ( convert _ type ( d , str , float ) )
{ ' a ' : 1.0 , ' b ' : 2.0}
> > > d = { ' a ' : [ ' 1 ' , ' 2 ' ] }
> > > pprint ( convert _ type ( d , str , float ) )
{ ' a ' : [ 1.0 , 2.0 ] }
> > > d = { ' a ' : [ ( ' 1 ' , ' 2 ' ) , [ 3,4 ] ] }
> > > pprint ( convert _ type ( d , str , float ) )
{ ' a ' : [ ( 1.0 , 2.0 ) , [ 3 , 4 ] ] }"""
|
if not in_place :
out_dict = copy . deepcopy ( d )
else :
out_dict = d
def _convert ( obj ) :
if isinstance ( obj , intype ) :
try :
obj = outtype ( obj )
except Exception :
pass
elif isinstance ( obj , list ) and convert_list :
obj = _traverse_iter ( obj )
elif isinstance ( obj , tuple ) and convert_list :
obj = tuple ( _traverse_iter ( obj ) )
return obj
def _traverse_dict ( dic ) :
for key in dic . keys ( ) :
if is_dict_like ( dic [ key ] ) :
_traverse_dict ( dic [ key ] )
else :
dic [ key ] = _convert ( dic [ key ] )
def _traverse_iter ( iter ) :
new_iter = [ ]
for key in iter :
if is_dict_like ( key ) :
_traverse_dict ( key )
new_iter . append ( key )
else :
new_iter . append ( _convert ( key ) )
return new_iter
if is_dict_like ( out_dict ) :
_traverse_dict ( out_dict )
else :
_convert ( out_dict )
return out_dict
|
def add_model ( self , propname , model , regen_mode = 'normal' , ** kwargs ) :
r"""Adds a new model to the models dictionary ( ` ` object . models ` ` )
Parameters
propname : string
The name of the property to be calculated by the model .
model : function
A reference ( handle ) to the function to be used .
regen _ mode : string
Controls how / when the model is run ( See Notes for more details ) .
Options are :
* ' normal ' * : The model is run directly upon being assiged , and
also run every time ` ` regenerate _ models ` ` is called .
* ' constant ' * : The model is run directly upon being assigned , but
is not called again , thus making it ' s data act like a constant .
If , however , the data is deleted from the object it will be
regenerated again .
* ' deferred ' * Is not run upon being assigned , but is run the first
time that ` ` regenerate _ models ` ` is called ."""
|
if propname in kwargs . values ( ) : # Prevent infinite loops of look - ups
raise Exception ( propname + ' can\'t be both dependency and propname' )
# Add model and regen _ mode to kwargs dictionary
kwargs . update ( { 'model' : model , 'regen_mode' : regen_mode } )
# Insepct model to extract arguments and default values
if model . __defaults__ :
vals = list ( inspect . getfullargspec ( model ) . defaults )
keys = inspect . getfullargspec ( model ) . args [ - len ( vals ) : ]
for k , v in zip ( keys , vals ) : # Put defaults into kwargs
if k not in kwargs : # Skip if argument was given in kwargs
kwargs . update ( { k : v } )
self . models [ propname ] = kwargs
# Store all keyword argumnents in model
# Regenerate model values if necessary
if regen_mode not in [ 'deferred' ] :
self . _regen ( propname )
|
def truncate_name ( name , rule = None ) :
"""shorten taxa names for tree display
Options of rule . This only affects tree display .
- headn ( eg . head3 truncates first 3 chars )
- oheadn ( eg . ohead3 retains only the first 3 chars )
- tailn ( eg . tail3 truncates last 3 chars )
- otailn ( eg . otail3 retains only the last 3 chars )
n = 1 ~ 99"""
|
import re
if rule is None :
return name
k = re . search ( "(?<=^head)[0-9]{1,2}$" , rule )
if k :
k = k . group ( 0 )
tname = name [ int ( k ) : ]
else :
k = re . search ( "(?<=^ohead)[0-9]{1,2}$" , rule )
if k :
k = k . group ( 0 )
tname = name [ : int ( k ) ]
else :
k = re . search ( "(?<=^tail)[0-9]{1,2}$" , rule )
if k :
k = k . group ( 0 )
tname = name [ : - int ( k ) ]
else :
k = re . search ( "(?<=^otail)[0-9]{1,2}$" , rule )
if k :
k = k . group ( 0 )
tname = name [ - int ( k ) : ]
else :
print ( truncate_name . __doc__ , file = sys . stderr )
raise ValueError ( 'Wrong rule for truncation!' )
return tname
|
def verify_permitted_to_read ( gs_path ) :
"""Check if the user has permissions to read from the given path .
Args :
gs _ path : the GCS path to check if user is permitted to read .
Raises :
Exception if user has no permissions to read ."""
|
# TODO ( qimingj ) : Storage APIs need to be modified to allow absence of project
# or credential on Items . When that happens we can move the function
# to Items class .
from . import _bucket
bucket , prefix = _bucket . parse_name ( gs_path )
credentials = None
if datalab . context . Context . is_signed_in ( ) :
credentials = datalab . context . _utils . get_credentials ( )
args = { 'maxResults' : Api . _MAX_RESULTS , 'projection' : 'noAcl' }
if prefix is not None :
args [ 'prefix' ] = prefix
url = Api . _ENDPOINT + ( Api . _OBJECT_PATH % ( bucket , '' ) )
try :
datalab . utils . Http . request ( url , args = args , credentials = credentials )
except datalab . utils . RequestException as e :
if e . status == 401 :
raise Exception ( 'Not permitted to read from specified path. ' 'Please sign in and make sure you have read access.' )
raise e
|
def patched_web3_eth_estimate_gas ( self , transaction , block_identifier = None ) :
"""Temporary workaround until next web3 . py release ( 5 . X . X )
Current master of web3 . py has this implementation already :
https : / / github . com / ethereum / web3 . py / blob / 2a67ea9f0ab40bb80af2b803dce742d6cad5943e / web3 / eth . py # L311"""
|
if 'from' not in transaction and is_checksum_address ( self . defaultAccount ) :
transaction = assoc ( transaction , 'from' , self . defaultAccount )
if block_identifier is None :
params = [ transaction ]
else :
params = [ transaction , block_identifier ]
try :
result = self . web3 . manager . request_blocking ( 'eth_estimateGas' , params , )
except ValueError as e :
if check_value_error_for_parity ( e , ParityCallType . ESTIMATE_GAS ) :
result = None
else : # else the error is not denoting estimate gas failure and is something else
raise e
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.