signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_network_instances ( self , name = "" ) :
"""get _ network _ instances implementation for NX - OS""" | # command ' show vrf detail ' returns all VRFs with detailed information
# format : list of dictionaries with keys such as ' vrf _ name ' and ' rd '
command = "show vrf detail"
vrf_table_raw = self . _get_command_table ( command , "TABLE_vrf" , "ROW_vrf" )
# command ' show vrf interface ' returns all interfaces including their assigned VRF
# format : list of dictionaries with keys ' if _ name ' , ' vrf _ name ' , ' vrf _ id ' and ' soo '
command = "show vrf interface"
intf_table_raw = self . _get_command_table ( command , "TABLE_if" , "ROW_if" )
# create a dictionary with key = ' vrf _ name ' and value = list of interfaces
vrf_intfs = defaultdict ( list )
for intf in intf_table_raw :
vrf_intfs [ intf [ "vrf_name" ] ] . append ( py23_compat . text_type ( intf [ "if_name" ] ) )
vrfs = { }
for vrf in vrf_table_raw :
vrf_name = py23_compat . text_type ( vrf . get ( "vrf_name" ) )
vrfs [ vrf_name ] = { }
vrfs [ vrf_name ] [ "name" ] = vrf_name
# differentiate between VRF type ' DEFAULT _ INSTANCE ' and ' L3VRF '
if vrf_name == "default" :
vrfs [ vrf_name ] [ "type" ] = "DEFAULT_INSTANCE"
else :
vrfs [ vrf_name ] [ "type" ] = "L3VRF"
vrfs [ vrf_name ] [ "state" ] = { "route_distinguisher" : py23_compat . text_type ( vrf . get ( "rd" ) ) }
# convert list of interfaces ( vrf _ intfs [ vrf _ name ] ) to expected format
# format = dict with key = interface name and empty values
vrfs [ vrf_name ] [ "interfaces" ] = { }
vrfs [ vrf_name ] [ "interfaces" ] [ "interface" ] = dict . fromkeys ( vrf_intfs [ vrf_name ] , { } )
# if name of a specific VRF was passed as an argument
# only return results for this particular VRF
if name :
if name in vrfs . keys ( ) :
return { py23_compat . text_type ( name ) : vrfs [ name ] }
else :
return { }
# else return results for all VRFs
else :
return vrfs |
def list_platform_sets ( server_url ) :
'''To list all ASAM platform sets present on the Novell Fan - Out Driver
CLI Example :
. . code - block : : bash
salt - run asam . list _ platform _ sets prov1 . domain . com''' | config = _get_asam_configuration ( server_url )
if not config :
return False
url = config [ 'platformset_config_url' ]
data = { 'manual' : 'false' , }
auth = ( config [ 'username' ] , config [ 'password' ] )
try :
html_content = _make_post_request ( url , data , auth , verify = False )
except Exception as exc :
err_msg = "Failed to look up existing platform sets"
log . error ( '%s:\n%s' , err_msg , exc )
return { server_url : err_msg }
parser = _parse_html_content ( html_content )
platform_set_list = _get_platform_sets ( parser . data )
if platform_set_list :
return { server_url : platform_set_list }
else :
return { server_url : "No existing platform sets found" } |
def get_decomposition_energy ( self , entry , pH , V ) :
"""Finds decomposition to most stable entry
Args :
entry ( PourbaixEntry ) : PourbaixEntry corresponding to
compound to find the decomposition for
pH ( float ) : pH at which to find the decomposition
V ( float ) : voltage at which to find the decomposition
Returns :
reaction corresponding to the decomposition""" | # Find representative multientry
if self . _multielement and not isinstance ( entry , MultiEntry ) :
possible_entries = self . _generate_multielement_entries ( self . _filtered_entries , forced_include = [ entry ] )
# Filter to only include materials where the entry is only solid
if entry . phase_type == "solid" :
possible_entries = [ e for e in possible_entries if e . phase_type . count ( "Solid" ) == 1 ]
possible_energies = [ e . normalized_energy_at_conditions ( pH , V ) for e in possible_entries ]
else :
possible_energies = [ entry . normalized_energy_at_conditions ( pH , V ) ]
min_energy = np . min ( possible_energies , axis = 0 )
# Find entry and take the difference
hull = self . get_hull_energy ( pH , V )
return min_energy - hull |
def remove ( self , cls , originalMemberNameList , memberName , classNamingConvention ) :
""": type cls : type
: type originalMemberNameList : list ( str )
: type memberName : str
: type classNamingConvention : INamingConvention | None""" | accessorDict = self . _accessorDict ( memberName , classNamingConvention )
for accessorName , _ in accessorDict . items ( ) :
if accessorName not in originalMemberNameList and hasattr ( cls , accessorName ) :
delattr ( cls , accessorName ) |
def _set_mpl_backend ( self , backend , pylab = False ) :
"""Set a backend for Matplotlib .
backend : A parameter that can be passed to % matplotlib
( e . g . ' inline ' or ' tk ' ) .""" | import traceback
from IPython . core . getipython import get_ipython
generic_error = ( "\n" + "=" * 73 + "\n" "NOTE: The following error appeared when setting " "your Matplotlib backend!!\n" + "=" * 73 + "\n\n" "{0}" )
magic = 'pylab' if pylab else 'matplotlib'
error = None
try :
get_ipython ( ) . run_line_magic ( magic , backend )
except RuntimeError as err : # This catches errors generated by ipykernel when
# trying to set a backend . See issue 5541
if "GUI eventloops" in str ( err ) :
import matplotlib
previous_backend = matplotlib . get_backend ( )
if not backend in previous_backend . lower ( ) : # Only inform about an error if the user selected backend
# and the one set by Matplotlib are different . Else this
# message is very confusing .
error = ( "\n" "NOTE: Spyder *can't* set your selected Matplotlib " "backend because there is a previous backend already " "in use.\n\n" "Your backend will be {0}" . format ( previous_backend ) )
del matplotlib
# This covers other RuntimeError ' s
else :
error = generic_error . format ( traceback . format_exc ( ) )
except Exception :
error = generic_error . format ( traceback . format_exc ( ) )
self . _mpl_backend_error = error |
def cancelMktData ( self , contract : Contract ) :
"""Unsubscribe from realtime streaming tick data .
Args :
contract : The exact contract object that was used to
subscribe with .""" | ticker = self . ticker ( contract )
reqId = self . wrapper . endTicker ( ticker , 'mktData' )
if reqId :
self . client . cancelMktData ( reqId )
else :
self . _logger . error ( 'cancelMktData: ' f'No reqId found for contract {contract}' ) |
def parse_impl ( self ) :
"""Parses the HTML content as a stream . This is far less memory
intensive than loading the entire HTML file into memory , like
BeautifulSoup does .""" | # Cast to str to ensure not unicode under Python 2 , as the parser
# doesn ' t like that .
parser = XMLParser ( encoding = str ( 'UTF-8' ) )
element_iter = ET . iterparse ( self . handle , events = ( "start" , "end" ) , parser = parser )
for pos , element in element_iter :
tag , class_attr = _tag_and_class_attr ( element )
if tag == "h1" and pos == "end" :
if not self . user :
self . user = element . text . strip ( )
elif tag == "div" and "thread" in class_attr and pos == "start" :
participants = self . parse_participants ( element )
thread = self . parse_thread ( participants , element_iter , True )
self . save_thread ( thread ) |
def handleRestartRequest ( self , req : Request ) -> None :
"""Handles transaction of type POOL _ RESTART
Can schedule or cancel restart to a newer
version at specified time
: param req :""" | txn = req . operation
if txn [ TXN_TYPE ] != POOL_RESTART :
return
action = txn [ ACTION ]
if action == START :
when = dateutil . parser . parse ( txn [ DATETIME ] ) if DATETIME in txn . keys ( ) and txn [ DATETIME ] not in [ "0" , "" , None ] else None
fail_timeout = txn . get ( TIMEOUT , self . defaultActionTimeout )
self . requestRestart ( when , fail_timeout )
return
if action == CANCEL :
if self . scheduledAction :
self . _cancelScheduledRestart ( )
logger . info ( "Node '{}' cancels restart" . format ( self . nodeName ) )
return
logger . error ( "Got {} transaction with unsupported action {}" . format ( POOL_RESTART , action ) ) |
def _add_genotype_calls ( self , variant_obj , variant_line , case_obj ) :
"""Add the genotype calls for the variant
Args :
variant _ obj ( puzzle . models . Variant )
variant _ dict ( dict ) : A variant dictionary
case _ obj ( puzzle . models . Case )""" | variant_line = variant_line . split ( '\t' )
# if there is gt calls we have no individuals to add
if len ( variant_line ) > 8 :
gt_format = variant_line [ 8 ] . split ( ':' )
for individual in case_obj . individuals :
sample_id = individual . ind_id
index = individual . ind_index
gt_call = variant_line [ 9 + index ] . split ( ':' )
raw_call = dict ( zip ( gt_format , gt_call ) )
genotype = Genotype ( ** raw_call )
variant_obj . add_individual ( puzzle_genotype ( sample_id = sample_id , genotype = genotype . genotype , case_id = case_obj . name , phenotype = individual . phenotype , ref_depth = genotype . ref_depth , alt_depth = genotype . alt_depth , genotype_quality = genotype . genotype_quality , depth = genotype . depth_of_coverage , supporting_evidence = genotype . supporting_evidence , pe_support = genotype . pe_support , sr_support = genotype . sr_support , ) ) |
def split_expr ( expr , op ) :
"""Returns a list containing the top - level AND or OR operands in the
expression ' expr ' , in the same ( left - to - right ) order as they appear in
the expression .
This can be handy e . g . for splitting ( weak ) reverse dependencies
from ' select ' and ' imply ' into individual selects / implies .
op :
Either AND to get AND operands , or OR to get OR operands .
( Having this as an operand might be more future - safe than having two
hardcoded functions . )
Pseudo - code examples :
split _ expr ( A , OR ) - > [ A ]
split _ expr ( A & & B , OR ) - > [ A & & B ]
split _ expr ( A | | B , OR ) - > [ A , B ]
split _ expr ( A | | B , AND ) - > [ A | | B ]
split _ expr ( A | | B | | ( C & & D ) , OR ) - > [ A , B , C & & D ]
# Second | | is not at the top level
split _ expr ( A | | ( B & & ( C | | D ) ) , OR ) - > [ A , B & & ( C | | D ) ]
# Parentheses don ' t matter as long as we stay at the top level ( don ' t
# encounter any non - ' op ' nodes )
split _ expr ( ( A | | B ) | | C , OR ) - > [ A , B , C ]
split _ expr ( A | | ( B | | C ) , OR ) - > [ A , B , C ]""" | res = [ ]
def rec ( subexpr ) :
if subexpr . __class__ is tuple and subexpr [ 0 ] is op :
rec ( subexpr [ 1 ] )
rec ( subexpr [ 2 ] )
else :
res . append ( subexpr )
rec ( expr )
return res |
def id_to_name ( id ) :
"""Convert a PDG ID to a printable string .""" | name = pdgid_names . get ( id )
if not name :
name = repr ( id )
return name |
def __item_descriptor ( self , config ) :
"""Builds an item descriptor for a service configuration .
Args :
config : A dictionary containing the service configuration to describe .
Returns :
A dictionary that describes the service configuration .""" | descriptor = { 'kind' : 'discovery#directoryItem' , 'icons' : { 'x16' : 'https://www.gstatic.com/images/branding/product/1x/' 'googleg_16dp.png' , 'x32' : 'https://www.gstatic.com/images/branding/product/1x/' 'googleg_32dp.png' , } , 'preferred' : True , }
description = config . get ( 'description' )
root_url = config . get ( 'root' )
name = config . get ( 'name' )
version = config . get ( 'api_version' )
relative_path = '/apis/{0}/{1}/rest' . format ( name , version )
if description :
descriptor [ 'description' ] = description
descriptor [ 'name' ] = name
descriptor [ 'version' ] = version
descriptor [ 'discoveryLink' ] = '.{0}' . format ( relative_path )
root_url_port = urlparse . urlparse ( root_url ) . port
original_path = self . __request . reconstruct_full_url ( port_override = root_url_port )
descriptor [ 'discoveryRestUrl' ] = '{0}/{1}/{2}/rest' . format ( original_path , name , version )
if name and version :
descriptor [ 'id' ] = '{0}:{1}' . format ( name , version )
return descriptor |
def create ( cls , name , address , proxy_port = 8080 , username = None , password = None , secondary = None , comment = None ) :
"""Create a new HTTP Proxy service . Proxy must define at least
one primary address but can optionally also define a list
of secondary addresses .
: param str name : Name of the proxy element
: param str address : Primary address for proxy
: param int proxy _ port : proxy port ( default : 8080)
: param str username : optional username for authentication ( default : None )
: param str password : password for username if defined ( default : None )
: param str comment : optional comment
: param list secondary : secondary list of proxy server addresses
: raises CreateElementFailed : Failed to create the proxy element
: rtype : HttpProxy""" | json = { 'name' : name , 'address' : address , 'comment' : comment , 'http_proxy_port' : proxy_port , 'http_proxy_username' : username if username else '' , 'http_proxy_password' : password if password else '' , 'secondary' : secondary if secondary else [ ] }
return ElementCreator ( cls , json ) |
def unquote ( text ) :
"""Replace all percent - encoded entities in text .""" | while '%' in text :
newtext = url_unquote ( text )
if newtext == text :
break
text = newtext
return text |
def parse_finalize ( self ) :
"""Raises errors for incomplete buffered data that could not be parsed
because the end of the input data has been reached .
Raises
~ ipfsapi . exceptions . DecodingError
Returns
tuple : Always empty""" | try :
try : # Raise exception for remaining bytes in bytes decoder
self . _decoder1 . decode ( b'' , True )
except UnicodeDecodeError as error :
raise exceptions . DecodingError ( 'json' , error )
# Late raise errors that looked like they could have been fixed if
# the caller had provided more data
if self . _buffer :
raise exceptions . DecodingError ( 'json' , self . _lasterror )
finally : # Reset state
self . _buffer = [ ]
self . _lasterror = None
self . _decoder1 . reset ( )
return ( ) |
def handle_startendtag ( self , tag , attrs ) :
"""Function called for empty tags ( e . g . < br / > )""" | if tag . lower ( ) in self . allowed_tag_whitelist :
self . result += '<' + tag
for ( attr , value ) in attrs :
if attr . lower ( ) in self . allowed_attribute_whitelist :
self . result += ' %s="%s"' % ( attr , self . handle_attribute_value ( value ) )
self . result += ' />'
else :
if self . render_unallowed_tags :
self . result += '<' + cgi . escape ( tag )
for ( attr , value ) in attrs :
self . result += ' %s="%s"' % ( attr , cgi . escape ( value , True ) )
self . result += ' />' |
def page ( self , language = values . unset , model_build = values . unset , status = values . unset , page_token = values . unset , page_number = values . unset , page_size = values . unset ) :
"""Retrieve a single page of QueryInstance records from the API .
Request is executed immediately
: param unicode language : The ISO language - country string that specifies the language used by the Query resources to read
: param unicode model _ build : The SID or unique name of the Model Build to be queried
: param unicode status : The status of the resources to read
: param str page _ token : PageToken provided by the API
: param int page _ number : Page Number , this value is simply for client state
: param int page _ size : Number of records to return , defaults to 50
: returns : Page of QueryInstance
: rtype : twilio . rest . autopilot . v1 . assistant . query . QueryPage""" | params = values . of ( { 'Language' : language , 'ModelBuild' : model_build , 'Status' : status , 'PageToken' : page_token , 'Page' : page_number , 'PageSize' : page_size , } )
response = self . _version . page ( 'GET' , self . _uri , params = params , )
return QueryPage ( self . _version , response , self . _solution ) |
def start_element ( self , tag , attrs ) :
"""Search for meta robots . txt " nofollow " and " noindex " flags .""" | if tag == 'meta' and attrs . get ( 'name' ) == 'robots' :
val = attrs . get_true ( 'content' , u'' ) . lower ( ) . split ( u',' )
self . follow = u'nofollow' not in val
self . index = u'noindex' not in val
raise StopParse ( "found <meta name=robots> tag" )
elif tag == 'body' :
raise StopParse ( "found <body> tag" ) |
def etree_to_dict ( t , trim = True , ** kw ) :
u"""Converts an lxml . etree object to Python dict .
> > > etree _ to _ dict ( etree . Element ( ' root ' ) )
{ ' root ' : None }
: param etree . Element t : lxml tree to convert
: returns d : a dict representing the lxml tree ` ` t ` `
: rtype : dict""" | d = { t . tag : { } if t . attrib else None }
children = list ( t )
etree_to_dict_w_args = partial ( etree_to_dict , trim = trim , ** kw )
if children :
dd = defaultdict ( list )
d = { t . tag : { } }
for dc in map ( etree_to_dict_w_args , children ) :
for k , v in dc . iteritems ( ) : # do not add Comment instance to the key
if k is not etree . Comment :
dd [ k ] . append ( v )
d [ t . tag ] = { k : v [ 0 ] if len ( v ) == 1 else v for k , v in dd . iteritems ( ) }
if t . attrib :
d [ t . tag ] . update ( ( '@' + k , v ) for k , v in t . attrib . iteritems ( ) )
if trim and t . text :
t . text = t . text . strip ( )
if t . text :
if t . tag is etree . Comment and not kw . get ( 'without_comments' ) : # adds a comments node
d [ '#comments' ] = t . text
elif children or t . attrib :
d [ t . tag ] [ '#text' ] = t . text
else :
d [ t . tag ] = t . text
return d |
async def get_sound_settings ( self , target = "" ) -> List [ Setting ] :
"""Get the current sound settings .
: param str target : settings target , defaults to all .""" | res = await self . services [ "audio" ] [ "getSoundSettings" ] ( { "target" : target } )
return [ Setting . make ( ** x ) for x in res ] |
def write_index ( data , group , append ) :
"""Write the data index to the given group .
: param h5features . Data data : The that is being indexed .
: param h5py . Group group : The group where to write the index .
: param bool append : If True , append the created index to the
existing one in the ` group ` . Delete any existing data in index
if False .""" | # build the index from data
nitems = group [ 'items' ] . shape [ 0 ] if 'items' in group else 0
last_index = group [ 'index' ] [ - 1 ] if nitems > 0 else - 1
index = last_index + cumindex ( data . _entries [ 'features' ] )
if append :
nidx = group [ 'index' ] . shape [ 0 ]
# # in case we append to the end of an existing item
# if data . _ entries [ ' items ' ] . _ continue _ last _ item ( group ) :
# nidx - = 1
group [ 'index' ] . resize ( ( nidx + index . shape [ 0 ] , ) )
group [ 'index' ] [ nidx : ] = index
else :
group [ 'index' ] . resize ( ( index . shape [ 0 ] , ) )
group [ 'index' ] [ ... ] = index |
def accept ( self , value , silent = False ) :
'''Accepts a value from the form , calls : meth : ` to _ python ` method ,
checks ` required ` condition , applies filters and validators ,
catches ValidationError .
: param value : a value to be accepted
: param silent = False : write errors to ` form . errors ` or not''' | try :
value = self . to_python ( value )
for v in self . validators :
value = v ( self , value )
if self . required and self . _is_empty ( value ) :
raise ValidationError ( self . error_required )
except ValidationError as e :
if not silent :
e . fill_errors ( self . field )
# NOTE : by default value for field is in python _ data ,
# but this is not true for FieldList where data
# is dynamic , so we set value to None for absent value .
value = self . _existing_value
return value |
def remove_object ( self , instance , bucket_name , object_name ) :
"""Remove an object from a bucket .
: param str instance : A Yamcs instance name .
: param str bucket _ name : The name of the bucket .
: param str object _ name : The object to remove .""" | url = '/buckets/{}/{}/{}' . format ( instance , bucket_name , object_name )
self . _client . delete_proto ( url ) |
def load_data_batch ( self , data_batch ) :
"""Load data and labels into arrays .""" | if self . sym_gen is not None :
key = data_batch . bucket_key
if key not in self . execgrp_bucket : # create new bucket entry
symbol = self . sym_gen ( key )
execgrp = DataParallelExecutorGroup ( symbol , self . arg_names , self . param_names , self . ctx , self . slices , data_batch , shared_group = self . execgrp )
self . execgrp_bucket [ key ] = execgrp
self . curr_execgrp = self . execgrp_bucket [ key ]
else :
self . curr_execgrp = self . execgrp
self . curr_execgrp . load_data_batch ( data_batch ) |
def _get_parts_list ( to_go , so_far = [ [ ] ] , ticker = None ) :
"""Iterates over to _ go , building the list of parts . To provide
items for the beginning , use so _ far .""" | try :
part = to_go . pop ( 0 )
except IndexError :
return so_far , ticker
# Lists of input groups
if isinstance ( part , list ) and any ( isinstance ( e , list ) for e in part ) :
while len ( part ) > 0 :
so_far , ticker = _get_parts_list ( part , so_far , ticker )
ticker . tick ( )
# Input Group
elif isinstance ( part , list ) and any ( isinstance ( e , Input ) for e in part ) :
while len ( part ) > 0 :
so_far , ticker = _get_parts_list ( part , so_far , ticker )
# Magic Inputs
elif isinstance ( part , Input ) and part . is_magic :
inputs = part . eval ( )
while len ( inputs ) > 0 :
so_far , ticker = _get_parts_list ( inputs , so_far , ticker )
ticker . tick ( )
# Normal inputs
elif isinstance ( part , Input ) and not part . is_magic :
so_far [ ticker . value ] . append ( part )
# Everything else
else :
so_far = _append ( so_far , part )
return so_far , ticker |
def transformations ( self , relationship = "all" ) :
"""Get all the transformations of this info .
Return a list of transformations involving this info . ` ` relationship ` `
can be " parent " ( in which case only transformations where the info is
the ` ` info _ in ` ` are returned ) , " child " ( in which case only
transformations where the info is the ` ` info _ out ` ` are returned ) or
` ` all ` ` ( in which case any transformations where the info is the
` ` info _ out ` ` or the ` ` info _ in ` ` are returned ) . The default is ` ` all ` `""" | if relationship not in [ "all" , "parent" , "child" ] :
raise ValueError ( "You cannot get transformations of relationship {}" . format ( relationship ) + "Relationship can only be parent, child or all." )
if relationship == "all" :
return Transformation . query . filter ( and_ ( Transformation . failed == false ( ) , or_ ( Transformation . info_in == self , Transformation . info_out == self ) , ) ) . all ( )
if relationship == "parent" :
return Transformation . query . filter_by ( info_in_id = self . id , failed = False ) . all ( )
if relationship == "child" :
return Transformation . query . filter_by ( info_out_id = self . id , failed = False ) . all ( ) |
def set_image ( self , image ) :
"""Set display buffer to Python Image Library image . Image will be converted
to 1 bit color and non - zero color values will light the LEDs .""" | imwidth , imheight = image . size
if imwidth != 8 or imheight != 16 :
raise ValueError ( 'Image must be an 8x16 pixels in size.' )
# Convert image to 1 bit color and grab all the pixels .
pix = image . convert ( '1' ) . load ( )
# Loop through each pixel and write the display buffer pixel .
for x in xrange ( 8 ) :
for y in xrange ( 16 ) :
color = pix [ ( x , y ) ]
# Handle the color of the pixel , off or on .
if color == 0 :
self . set_pixel ( x , y , 0 )
else :
self . set_pixel ( x , y , 1 ) |
def get_plugin_actions ( self ) :
"""Return a list of actions related to plugin""" | self . new_project_action = create_action ( self , _ ( "New Project..." ) , triggered = self . create_new_project )
self . open_project_action = create_action ( self , _ ( "Open Project..." ) , triggered = lambda v : self . open_project ( ) )
self . close_project_action = create_action ( self , _ ( "Close Project" ) , triggered = self . close_project )
self . delete_project_action = create_action ( self , _ ( "Delete Project" ) , triggered = self . delete_project )
self . clear_recent_projects_action = create_action ( self , _ ( "Clear this list" ) , triggered = self . clear_recent_projects )
self . edit_project_preferences_action = create_action ( self , _ ( "Project Preferences" ) , triggered = self . edit_project_preferences )
self . recent_project_menu = QMenu ( _ ( "Recent Projects" ) , self )
if self . main is not None :
self . main . projects_menu_actions += [ self . new_project_action , MENU_SEPARATOR , self . open_project_action , self . close_project_action , self . delete_project_action , MENU_SEPARATOR , self . recent_project_menu , self . toggle_view_action ]
self . setup_menu_actions ( )
return [ ] |
def sims_by_vec ( self , vec , normalize = None ) :
"""Find the most similar documents to a given vector ( = already processed document ) .""" | if normalize is None :
normalize = self . qindex . normalize
norm , self . qindex . normalize = self . qindex . normalize , normalize
# store old value
self . qindex . num_best = self . topsims
sims = self . qindex [ vec ]
self . qindex . normalize = norm
# restore old value of qindex . normalize
return self . sims2scores ( sims ) |
def solve ( A , b ) :
r"""Solve for the linear equations : math : ` \ mathrm A \ mathbf x = \ mathbf b ` .
Args :
A ( array _ like ) : Coefficient matrix .
b ( array _ like ) : Ordinate values .
Returns :
: class : ` numpy . ndarray ` : Solution ` ` x ` ` .""" | A = asarray ( A , float )
b = asarray ( b , float )
if A . shape [ 0 ] == 1 :
with errstate ( divide = "ignore" ) :
A_ = array ( [ [ 1.0 / A [ 0 , 0 ] ] ] )
if not isfinite ( A_ [ 0 , 0 ] ) :
raise LinAlgError ( "Division error." )
return dot ( A_ , b )
elif A . shape [ 0 ] == 2 :
a = A [ 0 , 0 ]
b_ = A [ 0 , 1 ]
c = A [ 1 , 0 ]
d = A [ 1 , 1 ]
A_ = array ( [ [ d , - b_ ] , [ - c , a ] ] )
with errstate ( divide = "ignore" ) :
A_ /= a * d - b_ * c
if not npy_all ( isfinite ( A_ ) ) :
raise LinAlgError ( "Division error." )
return dot ( A_ , b )
return _solve ( A , b ) |
def authors ( self , * usernames ) :
"""Return the entries written by the given usernames
When multiple tags are provided , they operate as " OR " query .""" | if len ( usernames ) == 1 :
return self . filter ( ** { "author__{}" . format ( User . USERNAME_FIELD ) : usernames [ 0 ] } )
else :
return self . filter ( ** { "author__{}__in" . format ( User . USERNAME_FIELD ) : usernames } ) |
def _positionalArgumentKeyValueList ( self , originalConstructorExpectedArgList , syntheticMemberList , argTuple ) :
"""Transforms args tuple to a dictionary mapping argument names to values using original constructor
positional args specification , then it adds synthesized members at the end if they are not already present .
: type syntheticMemberList : list ( SyntheticMember )
: type argTuple : tuple""" | # First , the list of expected arguments is set to original constructor ' s arg spec .
expectedArgList = copy . copy ( originalConstructorExpectedArgList )
# . . . then we append members that are not already present .
for syntheticMember in syntheticMemberList :
memberName = syntheticMember . memberName ( )
if memberName not in expectedArgList :
expectedArgList . append ( memberName )
# Makes a list of tuples ( argumentName , argumentValue ) with each element of each list ( expectedArgList , argTuple )
# until the shortest list ' s end is reached .
positionalArgumentKeyValueList = list ( zip ( expectedArgList , argTuple ) )
# Add remanining arguments ( those that are not expected by the original constructor ) .
for argumentValue in argTuple [ len ( positionalArgumentKeyValueList ) : ] :
positionalArgumentKeyValueList . append ( ( None , argumentValue ) )
return positionalArgumentKeyValueList |
def landing_target_encode ( self , time_usec , target_num , frame , angle_x , angle_y , distance , size_x , size_y ) :
'''The location of a landing area captured from a downward facing camera
time _ usec : Timestamp ( micros since boot or Unix epoch ) ( uint64 _ t )
target _ num : The ID of the target if multiple targets are present ( uint8 _ t )
frame : MAV _ FRAME enum specifying the whether the following feilds are earth - frame , body - frame , etc . ( uint8 _ t )
angle _ x : X - axis angular offset ( in radians ) of the target from the center of the image ( float )
angle _ y : Y - axis angular offset ( in radians ) of the target from the center of the image ( float )
distance : Distance to the target from the vehicle in meters ( float )
size _ x : Size in radians of target along x - axis ( float )
size _ y : Size in radians of target along y - axis ( float )''' | return MAVLink_landing_target_message ( time_usec , target_num , frame , angle_x , angle_y , distance , size_x , size_y ) |
def _adjust_overlap ( positions_list , index , direction ) :
'''Increase overlap to the right or left of an index .
: param positions _ list : list of overlap positions
: type positions _ list : list
: param index : index of the overlap to increase .
: type index : int
: param direction : which side of the overlap to increase - left or right .
: type direction : str
: returns : A list of overlap positions ( 2 - element lists )
: rtype : list
: raises : ValueError if direction isn ' t \' left \' or \' right \' .''' | if direction == 'left' :
positions_list [ index + 1 ] -= 1
elif direction == 'right' :
positions_list [ index ] += 1
else :
raise ValueError ( 'direction must be \'left\' or \'right\'.' )
return positions_list |
def genKw ( w , msk , z ) :
"""Generates key Kw using key - selector @ w , master secret key @ msk , and
table value @ z .
@ returns Kw as a BigInt .""" | # Hash inputs into a string of bytes
b = hmac ( msk , z + w , tag = "TAG_PYTHIA_KW" )
# Convert the string into a long value ( no larger than the order of Gt ) ,
# then return a BigInt value .
return BigInt ( longFromString ( b ) % long ( orderGt ( ) ) ) |
def _randomize_speed ( base_speed : int , sigma : int = None ) -> int :
"""Creates a variation in wind speed
Args :
base _ speed : base wind speed
sigma : sigma value for gaussian variation
Returns : random wind speed""" | if sigma is None :
int_sigma = int ( base_speed / 4 )
else :
int_sigma = sigma
val = MissionWeather . _gauss ( base_speed , int_sigma )
if val < 0 :
return 0
return min ( val , 50 ) |
def write_omega_scan_config ( channellist , fobj , header = True ) :
"""Write a ` ChannelList ` to an Omega - pipeline scan configuration file
This method is dumb and assumes the channels are sorted in the right
order already""" | if isinstance ( fobj , FILE_LIKE ) :
close = False
else :
fobj = open ( fobj , 'w' )
close = True
try : # print header
if header :
print ( '# Q Scan configuration file' , file = fobj )
print ( '# Generated with GWpy from a ChannelList' , file = fobj )
group = None
for channel in channellist : # print header
if channel . group != group :
group = channel . group
print ( '\n[%s]' % group , file = fobj )
print ( "" , file = fobj )
print_omega_channel ( channel , file = fobj )
finally :
if close :
fobj . close ( ) |
def run_clingo ( draco_query : List [ str ] , constants : Dict [ str , str ] = None , files : List [ str ] = None , relax_hard = False , silence_warnings = False , debug = False , ) -> Tuple [ str , str ] :
"""Run draco and return stderr and stdout""" | # default args
files = files or DRACO_LP
if relax_hard and "hard-integrity.lp" in files :
files . remove ( "hard-integrity.lp" )
constants = constants or { }
options = [ "--outf=2" , "--quiet=1,2,2" ]
if silence_warnings :
options . append ( "--warn=no-atom-undefined" )
for name , value in constants . items ( ) :
options . append ( f"-c {name}={value}" )
cmd = [ "clingo" ] + options
logger . debug ( "Command: %s" , " " . join ( cmd ) )
proc = subprocess . Popen ( args = cmd , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
program = "\n" . join ( draco_query )
file_names = [ os . path . join ( DRACO_LP_DIR , f ) for f in files ]
asp_program = b"\n" . join ( map ( load_file , file_names ) ) + program . encode ( "utf8" )
if debug :
with tempfile . NamedTemporaryFile ( mode = "w" , delete = False ) as fd :
fd . write ( program )
logger . info ( 'Debug ASP with "clingo %s %s"' , " " . join ( file_names ) , fd . name )
stdout , stderr = proc . communicate ( asp_program )
return ( stderr , stdout ) |
def filtered ( self , indices ) :
""": param indices :
a subset of indices in the range [ 0 . . tot _ sites - 1]
: returns :
a filtered SiteCollection instance if ` indices ` is a proper subset
of the available indices , otherwise returns the full SiteCollection""" | if indices is None or len ( indices ) == len ( self ) :
return self
new = object . __new__ ( self . __class__ )
indices = numpy . uint32 ( sorted ( indices ) )
new . array = self . array [ indices ]
new . complete = self . complete
return new |
async def schemes ( dev : Device ) :
"""Print supported uri schemes .""" | schemes = await dev . get_schemes ( )
for scheme in schemes :
click . echo ( scheme ) |
def _get_rule_changes ( rules , _rules ) :
'''given a list of desired rules ( rules ) and existing rules ( _ rules ) return
a list of rules to delete ( to _ delete ) and to create ( to _ create )''' | to_delete = [ ]
to_create = [ ]
# for each rule in state file
# 1 . validate rule
# 2 . determine if rule exists in existing security group rules
for rule in rules :
try :
ip_protocol = six . text_type ( rule . get ( 'ip_protocol' ) )
except KeyError :
raise SaltInvocationError ( 'ip_protocol, to_port, and from_port are' ' required arguments for security group' ' rules.' )
supported_protocols = [ 'tcp' , '6' , 6 , 'udp' , '17' , 17 , 'icmp' , '1' , 1 , 'all' , '-1' , - 1 ]
if ip_protocol not in supported_protocols and ( not '{0}' . format ( ip_protocol ) . isdigit ( ) or int ( ip_protocol ) > 255 ) :
raise SaltInvocationError ( 'Invalid ip_protocol {0} specified in security group rule.' . format ( ip_protocol ) )
# For the ' all ' case , we need to change the protocol name to ' - 1 ' .
if ip_protocol == 'all' :
rule [ 'ip_protocol' ] = '-1'
cidr_ip = rule . get ( 'cidr_ip' , None )
group_name = rule . get ( 'source_group_name' , None )
group_id = rule . get ( 'source_group_group_id' , None )
if cidr_ip and ( group_id or group_name ) :
raise SaltInvocationError ( 'cidr_ip and source groups can not both' ' be specified in security group rules.' )
if group_id and group_name :
raise SaltInvocationError ( 'Either source_group_group_id or' ' source_group_name can be specified in' ' security group rules, but not both.' )
if not ( cidr_ip or group_id or group_name ) :
raise SaltInvocationError ( 'cidr_ip, source_group_group_id, or' ' source_group_name must be provided for' ' security group rules.' )
rule_found = False
# for each rule in existing security group ruleset determine if
# new rule exists
for _rule in _rules :
if _check_rule ( rule , _rule ) :
rule_found = True
break
if not rule_found :
to_create . append ( rule )
# for each rule in existing security group configuration
# 1 . determine if rules needed to be deleted
for _rule in _rules :
rule_found = False
for rule in rules :
if _check_rule ( rule , _rule ) :
rule_found = True
break
if not rule_found : # Can only supply name or id , not both . Since we ' re deleting
# entries , it doesn ' t matter which we pick .
_rule . pop ( 'source_group_name' , None )
to_delete . append ( _rule )
log . debug ( 'Rules to be deleted: %s' , to_delete )
log . debug ( 'Rules to be created: %s' , to_create )
return ( to_delete , to_create ) |
def get_string_scope ( self , code , resource = None ) :
"""Returns a ` Scope ` object for the given code""" | return rope . base . libutils . get_string_scope ( code , resource ) |
def export_csv ( self , filename , delimiter = ',' , line_terminator = '\n' , header = True , quote_level = csv . QUOTE_NONNUMERIC , double_quote = True , escape_char = '\\' , quote_char = '\"' , na_rep = '' , file_header = '' , file_footer = '' , line_prefix = '' , _no_prefix_on_first_value = False , ** kwargs ) :
"""Writes an SFrame to a CSV file .
Parameters
filename : string
The location to save the CSV .
delimiter : string , optional
This describes the delimiter used for writing csv files .
line _ terminator : string , optional
The newline character
header : bool , optional
If true , the column names are emitted as a header .
quote _ level : csv . QUOTE _ ALL | csv . QUOTE _ NONE | csv . QUOTE _ NONNUMERIC , optional
The quoting level . If csv . QUOTE _ ALL , every field is quoted .
if csv . quote _ NONE , no field is quoted . If csv . QUOTE _ NONNUMERIC , only
non - numeric fileds are quoted . csv . QUOTE _ MINIMAL is interpreted as
csv . QUOTE _ NONNUMERIC .
double _ quote : bool , optional
If True , quotes are escaped as two consecutive quotes
escape _ char : string , optional
Character which begins a C escape sequence
quote _ char : string , optional
Character used to quote fields
na _ rep : string , optional
The value used to denote a missing value .
file _ header : string , optional
A string printed to the start of the file
file _ footer : string , optional
A string printed to the end of the file
line _ prefix : string , optional
A string printed at the start of each value line""" | # Pandas argument compatibility
if "sep" in kwargs :
delimiter = kwargs [ 'sep' ]
del kwargs [ 'sep' ]
if "quotechar" in kwargs :
quote_char = kwargs [ 'quotechar' ]
del kwargs [ 'quotechar' ]
if "doublequote" in kwargs :
double_quote = kwargs [ 'doublequote' ]
del kwargs [ 'doublequote' ]
if "lineterminator" in kwargs :
line_terminator = kwargs [ 'lineterminator' ]
del kwargs [ 'lineterminator' ]
if len ( kwargs ) > 0 :
raise TypeError ( "Unexpected keyword arguments " + str ( list ( kwargs . keys ( ) ) ) )
write_csv_options = { }
write_csv_options [ 'delimiter' ] = delimiter
write_csv_options [ 'escape_char' ] = escape_char
write_csv_options [ 'double_quote' ] = double_quote
write_csv_options [ 'quote_char' ] = quote_char
if quote_level == csv . QUOTE_MINIMAL :
write_csv_options [ 'quote_level' ] = 0
elif quote_level == csv . QUOTE_ALL :
write_csv_options [ 'quote_level' ] = 1
elif quote_level == csv . QUOTE_NONNUMERIC :
write_csv_options [ 'quote_level' ] = 2
elif quote_level == csv . QUOTE_NONE :
write_csv_options [ 'quote_level' ] = 3
write_csv_options [ 'header' ] = header
write_csv_options [ 'line_terminator' ] = line_terminator
write_csv_options [ 'na_value' ] = na_rep
write_csv_options [ 'file_header' ] = file_header
write_csv_options [ 'file_footer' ] = file_footer
write_csv_options [ 'line_prefix' ] = line_prefix
# undocumented option . Disables line prefix on the first value line
write_csv_options [ '_no_prefix_on_first_value' ] = _no_prefix_on_first_value
url = _make_internal_url ( filename )
self . __proxy__ . save_as_csv ( url , write_csv_options ) |
def _sortValueIntoGroup ( groupKeys , groupLimits , value ) :
"""returns the Key of the group a value belongs to
: param groupKeys : a list / tuple of keys ie [ ' 1-3 ' , ' 3-5 ' , ' 5-8 ' , ' 8-10 ' , ' 10 + ' ]
: param groupLimits : a list of the limits for the group [ 1,3,5,8,10 , float ( ' inf ' ) ] note the first value is an absolute
minimum and the last an absolute maximum . You can therefore use float ( ' inf ' )
: param value :
: return :""" | if not len ( groupKeys ) == len ( groupLimits ) - 1 :
raise ValueError ( 'len(groupKeys) must equal len(grouplimits)-1 got \nkeys:{0} \nlimits:{1}' . format ( groupKeys , groupLimits ) )
if math . isnan ( value ) :
return 'Uncertain'
# TODO add to other if bad value or outside limits
keyIndex = None
if value == groupLimits [ 0 ] : # if value is = = minimum skip the comparison
keyIndex = 1
elif value == groupLimits [ - 1 ] : # if value is = = minimum skip the comparison
keyIndex = len ( groupLimits ) - 1
else :
for i , limit in enumerate ( groupLimits ) :
if value < limit :
keyIndex = i
break
if keyIndex == 0 : # below the minimum
raise BelowLimitsError ( 'Value {0} below limit {1}' . format ( value , groupLimits [ 0 ] ) )
if keyIndex is None :
raise AboveLimitsError ( 'Value {0} above limit {1}' . format ( value , groupLimits [ - 1 ] ) )
return groupKeys [ keyIndex - 1 ] |
def NextPage ( self , page = None ) :
"""Sets the LIMIT clause of the AWQL to the next page .
This method is meant to be used with HasNext ( ) . When using DataService ,
page is needed , as its paging mechanism is different from other services .
For details , see
https : / / developers . google . com / adwords / api / docs / guides / bid - landscapes # paging _ through _ results .
Args :
page : An optional dict - like page returned in an API response , where the
type depends on the configured SOAP client . The page contains the
' totalNumEntries ' key whose value represents the total number of
results from making the query to the AdWords API services . This page
is required when using this method with DataService .
Returns :
This service query object .
Raises :
ValueError : If the start index of this object is None , meaning that the
LIMIT clause hasn ' t been set before .""" | if self . _start_index is None :
raise ValueError ( 'Cannot page through query with no LIMIT clause.' )
# DataService has a different paging mechanism , resulting in different
# method of determining if there is still a page left .
page_size = None
if ( page and self . _PAGE_TYPE in page and page [ self . _PAGE_TYPE ] in self . _BID_LANDSCAPE_PAGES ) :
page_size = sum ( [ len ( bid_landscape [ self . _LANDSCAPE_POINTS ] ) for bid_landscape in page [ self . _ENTRIES ] ] )
increment = page_size or self . _page_size
self . _start_index += increment
return self |
def min_base_quality ( self ) :
'''The minimum of the base qualities . In the case of a deletion , in which
case there are no bases in this PileupElement , the minimum is taken
over the sequenced bases immediately before and after the deletion .''' | try :
return min ( self . base_qualities )
except ValueError : # We are mid - deletion . We return the minimum of the adjacent bases .
assert self . offset_start == self . offset_end
adjacent_qualities = [ self . alignment . query_qualities [ offset ] for offset in [ self . offset_start - 1 , self . offset_start ] if 0 <= offset < len ( self . alignment . query_qualities ) ]
return min ( adjacent_qualities ) |
def _get_fault_type_dummy_variables ( self , rup ) :
"""Fault type ( Strike - slip , Normal , Thrust / reverse ) is
derived from rake angle .
Rakes angles within 30 of horizontal are strike - slip ,
angles from 30 to 150 are reverse , and angles from
-30 to - 150 are normal .
Note that the ' Unspecified ' case is not considered ,
because rake is always given .""" | U , SS , NS , RS = 0 , 0 , 0 , 0
if np . abs ( rup . rake ) <= 30.0 or ( 180.0 - np . abs ( rup . rake ) ) <= 30.0 : # strike - slip
SS = 1
elif rup . rake > 30.0 and rup . rake < 150.0 : # reverse
RS = 1
else : # normal
NS = 1
return U , SS , NS , RS |
def get_collections_for_image ( self , image_id ) :
"""Get identifier of all collections that contain a given image .
Parameters
image _ id : string
Unique identifierof image object
Returns
List ( string )
List of image collection identifier""" | result = [ ]
# Get all active collections that contain the image identifier
for document in self . collection . find ( { 'active' : True , 'images.identifier' : image_id } ) :
result . append ( str ( document [ '_id' ] ) )
return result |
def _send ( self , method , route , data , params , ** kwargs ) :
"""Send request of type ` method ` to ` route ` .""" | route = self . _fmt_route ( route , params )
log ( _ ( "sending {} request to {}" ) . format ( method . upper ( ) , route ) )
try :
self . response = getattr ( self . _client , method . lower ( ) ) ( route , data = data , ** kwargs )
except BaseException as e : # Catch all exceptions thrown by app
log ( _ ( "exception raised in application: {}: {}" ) . format ( type ( e ) . __name__ , e ) )
raise Failure ( _ ( "application raised an exception (rerun with --log for more details)" ) )
return self |
def send_ack ( self ) :
"""Send an ack message""" | if self . last_ack == self . proto . max_id :
return
LOGGER . debug ( "ack (%d)" , self . proto . max_id )
self . last_ack = self . proto . max_id
self . send_message ( f"4{to_json([self.proto.max_id])}" ) |
def create ( self , ** kwargs ) :
"""Creates a new statement matching the keyword arguments specified .
Returns the created statement .""" | Statement = self . get_model ( 'statement' )
Tag = self . get_model ( 'tag' )
session = self . Session ( )
tags = set ( kwargs . pop ( 'tags' , [ ] ) )
if 'search_text' not in kwargs :
kwargs [ 'search_text' ] = self . tagger . get_bigram_pair_string ( kwargs [ 'text' ] )
if 'search_in_response_to' not in kwargs :
in_response_to = kwargs . get ( 'in_response_to' )
if in_response_to :
kwargs [ 'search_in_response_to' ] = self . tagger . get_bigram_pair_string ( in_response_to )
statement = Statement ( ** kwargs )
for tag_name in tags :
tag = session . query ( Tag ) . filter_by ( name = tag_name ) . first ( )
if not tag : # Create the tag
tag = Tag ( name = tag_name )
statement . tags . append ( tag )
session . add ( statement )
session . flush ( )
session . refresh ( statement )
statement_object = self . model_to_object ( statement )
self . _session_finish ( session )
return statement_object |
def ensure_mapping_format ( variables ) :
"""ensure variables are in mapping format .
Args :
variables ( list / dict ) : original variables
Returns :
dict : ensured variables in dict format
Examples :
> > > variables = [
{ " a " : 1 } ,
{ " b " : 2}
> > > print ( ensure _ mapping _ format ( variables ) )
" a " : 1,
" b " : 2""" | if isinstance ( variables , list ) :
variables_dict = { }
for map_dict in variables :
variables_dict . update ( map_dict )
return variables_dict
elif isinstance ( variables , dict ) :
return variables
else :
raise exceptions . ParamsError ( "variables format error!" ) |
def expect_token ( lexer : Lexer , kind : TokenKind ) -> Token :
"""Expect the next token to be of the given kind .
If the next token is of the given kind , return that token after advancing the lexer .
Otherwise , do not change the parser state and throw an error .""" | token = lexer . token
if token . kind == kind :
lexer . advance ( )
return token
raise GraphQLSyntaxError ( lexer . source , token . start , f"Expected {kind.value}, found {token.kind.value}" ) |
def is_dataframe ( obj ) :
"""Returns True if the given object is a Pandas Data Frame .
Parameters
obj : instance
The object to test whether or not is a Pandas DataFrame .""" | try : # This is the best method of type checking
from pandas import DataFrame
return isinstance ( obj , DataFrame )
except ImportError : # Pandas is not a dependency , so this is scary
return obj . __class__ . __name__ == "DataFrame" |
def unsubscribe ( self , destination = None , id = None , headers = None , ** keyword_headers ) :
"""Unsubscribe from a destination by either id or the destination name .
: param str destination : the name of the topic or queue to unsubscribe from
: param str id : the unique identifier of the topic or queue to unsubscribe from
: param dict headers : a map of any additional headers the broker requires
: param keyword _ headers : any additional headers the broker requires""" | assert id is not None or destination is not None , "'id' or 'destination' is required"
headers = utils . merge_headers ( [ headers , keyword_headers ] )
if id :
headers [ HDR_ID ] = id
if destination :
headers [ HDR_DESTINATION ] = destination
self . send_frame ( CMD_UNSUBSCRIBE , headers ) |
def import_data ( self , data ) :
"""Import additional data for tuning
Parameters
data :
a list of dictionarys , each of which has at least two keys , ' parameter ' and ' value '""" | _completed_num = 0
for trial_info in data :
logger . info ( "Importing data, current processing progress %s / %s" % ( _completed_num , len ( data ) ) )
_completed_num += 1
assert "parameter" in trial_info
_params = trial_info [ "parameter" ]
assert "value" in trial_info
_value = trial_info [ 'value' ]
if not _value :
logger . info ( "Useless trial data, value is %s, skip this trial data." % _value )
continue
self . supplement_data_num += 1
_parameter_id = '_' . join ( [ "ImportData" , str ( self . supplement_data_num ) ] )
self . total_data . append ( _params )
self . receive_trial_result ( parameter_id = _parameter_id , parameters = _params , value = _value )
logger . info ( "Successfully import data to metis tuner." ) |
def set ( string , target_level , indent_string = " " , indent_empty_lines = False ) :
"""Sets indentation of a single / multi - line string .""" | lines = string . splitlines ( )
set_lines ( lines , target_level , indent_string = indent_string , indent_empty_lines = indent_empty_lines )
result = "\n" . join ( lines )
return result |
def name ( self ) :
"""The name of this instance""" | try :
return self . _name
except AttributeError :
self . _name = "%s.%s[%i]" % ( self . __module__ , self . __class__ . __name__ , next ( Loggable . __ids ) )
return self . _name |
def btc_make_p2sh_address ( script_hex ) :
"""Make a P2SH address from a hex script""" | h = hashing . bin_hash160 ( binascii . unhexlify ( script_hex ) )
addr = bin_hash160_to_address ( h , version_byte = multisig_version_byte )
return addr |
def truncate_loc ( self , character , location , branch , turn , tick ) :
"""Remove future data about a particular location
Return True if I deleted anything , False otherwise .""" | r = False
branches_turns = self . branches [ character , location ] [ branch ]
branches_turns . truncate ( turn )
if turn in branches_turns :
bttrn = branches_turns [ turn ]
if bttrn . future ( tick ) :
bttrn . truncate ( tick )
r = True
keyses = self . keys [ character , location ]
for keysbranches in keyses . values ( ) :
if branch not in keysbranches :
continue
keysbranch = keysbranches [ branch ]
if keysbranch . future ( turn ) :
keysbranch . truncate ( turn )
r = True
if turn in keysbranch :
keysbranchturn = keysbranch [ turn ]
if keysbranchturn . future ( tick ) :
keysbranchturn . truncate ( tick )
r = True
if branch in self . settings :
for sets in ( self . settings , self . presettings ) :
sets_branch = sets [ branch ]
if turn in sets_branch :
sets_turn = sets_branch [ turn ]
for tic , setting in list ( sets_turn . future ( tick ) . items ( ) ) :
if setting [ : 2 ] == ( character , location ) :
del sets_turn [ tic ]
r = True
if not sets_turn :
del sets_branch [ turn ]
assert r , "Found an empty cache when I didn't delete anything"
for trn , tics in list ( sets_branch . future ( turn ) . items ( ) ) :
for tic , setting in list ( tics . future ( tick ) . items ( ) ) :
if setting [ : 2 ] == ( character , location ) :
del tics [ tic ]
r = True
if not tics :
del sets_branch [ trn ]
assert r , "Found an empty cache when I didn't delete anything"
if not sets_branch :
del sets [ branch ]
assert r , "Found an empty cache when I didn't delete anything"
self . shallowest = OrderedDict ( )
return r |
def xyz ( self ) :
"""Return all particle coordinates in this compound .
Returns
pos : np . ndarray , shape = ( n , 3 ) , dtype = float
Array with the positions of all particles .""" | if not self . children :
pos = np . expand_dims ( self . _pos , axis = 0 )
else :
arr = np . fromiter ( itertools . chain . from_iterable ( particle . pos for particle in self . particles ( ) ) , dtype = float )
pos = arr . reshape ( ( - 1 , 3 ) )
return pos |
def bind_column ( model , name , column , force = False , recursive = False , copy = False ) -> Column :
"""Bind a column to the model with the given name .
This method is primarily used during BaseModel . _ _ init _ subclass _ _ , although it can be used to easily
attach a new column to an existing model :
. . code - block : : python
import bloop . models
class User ( BaseModel ) :
id = Column ( String , hash _ key = True )
email = Column ( String , dynamo _ name = " e " )
bound = bloop . models . bind _ column ( User , " email " , email )
assert bound is email
# rebind with force , and use a copy
bound = bloop . models . bind _ column ( User , " email " , email , force = True , copy = True )
assert bound is not email
If an existing index refers to this column , it will be updated to point to the new column
using : meth : ` ~ bloop . models . refresh _ index ` , including recalculating the index projection .
Meta attributes including ` ` Meta . columns ` ` , ` ` Meta . hash _ key ` ` , etc . will be updated if necessary .
If ` ` name ` ` or the column ' s ` ` dynamo _ name ` ` conflicts with an existing column or index on the model , raises
: exc : ` ~ bloop . exceptions . InvalidModel ` unless ` ` force ` ` is True . If ` ` recursive ` ` is ` ` True ` ` and there are
existing subclasses of ` ` model ` ` , a copy of the column will attempt to bind to each subclass . The recursive
calls will not force the bind , and will always use a new copy . If ` ` copy ` ` is ` ` True ` ` then a copy of the
provided column is used . This uses a shallow copy via : meth : ` ~ bloop . models . Column . _ _ copy _ _ ` .
: param model :
The model to bind the column to .
: param name :
The name to bind the column as . In effect , used for ` ` setattr ( model , name , column ) ` `
: param column :
The column to bind to the model .
: param force :
Unbind existing columns or indexes with the same name or dynamo _ name . Default is False .
: param recursive :
Bind to each subclass of this model . Default is False .
: param copy :
Use a copy of the column instead of the column directly . Default is False .
: return :
The bound column . This is a new column when ` ` copy ` ` is True , otherwise the input column .""" | if not subclassof ( model , BaseModel ) :
raise InvalidModel ( f"{model} is not a subclass of BaseModel" )
meta = model . Meta
if copy :
column = copyfn ( column )
# TODO elif column . model is not None : logger . warning ( f " Trying to rebind column bound to { column . model } " )
column . _name = name
safe_repr = unbound_repr ( column )
# Guard against name , dynamo _ name collisions ; if force = True , unbind any matches
same_dynamo_name = ( util . index ( meta . columns , "dynamo_name" ) . get ( column . dynamo_name ) or util . index ( meta . indexes , "dynamo_name" ) . get ( column . dynamo_name ) )
same_name = ( meta . columns_by_name . get ( column . name ) or util . index ( meta . indexes , "name" ) . get ( column . name ) )
if column . hash_key and column . range_key :
raise InvalidModel ( f"Tried to bind {safe_repr} as both a hash and range key." )
if force :
if same_name :
unbind ( meta , name = column . name )
if same_dynamo_name :
unbind ( meta , dynamo_name = column . dynamo_name )
else :
if same_name :
raise InvalidModel ( f"The column {safe_repr} has the same name as an existing column " f"or index {same_name}. Did you mean to bind with force=True?" )
if same_dynamo_name :
raise InvalidModel ( f"The column {safe_repr} has the same dynamo_name as an existing " f"column or index {same_name}. Did you mean to bind with force=True?" )
if column . hash_key and meta . hash_key :
raise InvalidModel ( f"Tried to bind {safe_repr} but {meta.model} " f"already has a different hash_key: {meta.hash_key}" )
if column . range_key and meta . range_key :
raise InvalidModel ( f"Tried to bind {safe_repr} but {meta.model} " f"already has a different range_key: {meta.range_key}" )
# success !
column . model = meta . model
meta . columns . add ( column )
meta . columns_by_name [ name ] = column
setattr ( meta . model , name , column )
if column . hash_key :
meta . hash_key = column
meta . keys . add ( column )
if column . range_key :
meta . range_key = column
meta . keys . add ( column )
try :
for index in meta . indexes :
refresh_index ( meta , index )
except KeyError as e :
raise InvalidModel ( f"Binding column {column} removed a required column for index {unbound_repr(index)}" ) from e
if recursive :
for subclass in util . walk_subclasses ( meta . model ) :
try :
bind_column ( subclass , name , column , force = False , recursive = False , copy = True )
except InvalidModel :
pass
return column |
def memoize_nullary ( f ) :
"""Memoizes a function that takes no arguments . The memoization lasts only as
long as we hold a reference to the returned function .""" | def func ( ) :
if not hasattr ( func , 'retval' ) :
func . retval = f ( )
return func . retval
return func |
def _bake_css ( link ) :
"""Takes a link element and turns it into an inline style link if applicable""" | if "href" in link . attrs and ( re . search ( "\.css$" , link [ "href" ] ) ) or ( "rel" in link . attrs and link [ "rel" ] is "stylesheet" ) or ( "type" in link . attrs and link [ "type" ] is "text/css" ) :
if re . match ( "https?://" , link [ "href" ] ) :
css_data = _load_url ( link [ "href" ] ) . read ( )
else :
css_data = _load_file ( link [ "href" ] ) . read ( )
link . clear ( )
if USING_PYTHON2 :
link . string = css_data
else :
link . string = str ( css_data )
link . name = "style"
del link [ "rel" ]
del link [ "href" ] |
def _vec_lnqmed_residuals ( self , catchments ) :
"""Return ln ( QMED ) model errors for a list of catchments
: param catchments : List of gauged catchments
: type catchments : list of : class : ` Catchment `
: return : Model errors
: rtype : list of float""" | result = np . empty ( len ( catchments ) )
for index , donor in enumerate ( catchments ) :
result [ index ] = self . _lnqmed_residual ( donor )
return result |
def sg_get_context ( ) :
r"""Get current context information
Returns :
tf . sg _ opt class object which contains all context information""" | global _context
# merge current context
res = tf . sg_opt ( )
for c in _context :
res += c
return res |
def extract_dmg ( self , path = '.' ) :
"""Extract builds with . dmg extension
Will only work if ` hdiutil ` is available .
@ type path :
@ param path :""" | dmg_fd , dmg_fn = tempfile . mkstemp ( prefix = 'fuzzfetch-' , suffix = '.dmg' )
os . close ( dmg_fd )
out_tmp = tempfile . mkdtemp ( prefix = 'fuzzfetch-' , suffix = '.tmp' )
try :
_download_url ( self . artifact_url ( 'dmg' ) , dmg_fn )
if std_platform . system ( ) == 'Darwin' :
LOG . info ( '.. extracting' )
subprocess . check_call ( [ 'hdiutil' , 'attach' , '-quiet' , '-mountpoint' , out_tmp , dmg_fn ] )
try :
apps = [ mt for mt in os . listdir ( out_tmp ) if mt . endswith ( 'app' ) ]
assert len ( apps ) == 1
shutil . copytree ( os . path . join ( out_tmp , apps [ 0 ] ) , os . path . join ( path , apps [ 0 ] ) , symlinks = True )
finally :
subprocess . check_call ( [ 'hdiutil' , 'detach' , '-quiet' , out_tmp ] )
else :
LOG . warning ( '.. can\'t extract target.dmg on %s' , std_platform . system ( ) )
shutil . copy ( dmg_fn , os . path . join ( path , 'target.dmg' ) )
finally :
shutil . rmtree ( out_tmp , onerror = onerror )
os . unlink ( dmg_fn ) |
def process_file ( f , stop_tag = DEFAULT_STOP_TAG , details = True , strict = False , debug = False ) :
"""Process an image file ( expects an open file object ) .
This is the function that has to deal with all the arbitrary nasty bits
of the EXIF standard .""" | # by default do not fake an EXIF beginning
fake_exif = 0
# determine whether it ' s a JPEG or TIFF
data = f . read ( 12 )
if data [ 0 : 4 ] in [ b'II*\x00' , b'MM\x00*' ] : # it ' s a TIFF file
logger . debug ( "TIFF format recognized in data[0:4]" )
f . seek ( 0 )
endian = f . read ( 1 )
f . read ( 1 )
offset = 0
elif data [ 0 : 2 ] == b'\xFF\xD8' : # it ' s a JPEG file
logger . debug ( "JPEG format recognized data[0:2]=0x%X%X" , ord_ ( data [ 0 ] ) , ord_ ( data [ 1 ] ) )
base = 2
logger . debug ( "data[2]=0x%X data[3]=0x%X data[6:10]=%s" , ord_ ( data [ 2 ] ) , ord_ ( data [ 3 ] ) , data [ 6 : 10 ] )
while ord_ ( data [ 2 ] ) == 0xFF and data [ 6 : 10 ] in ( b'JFIF' , b'JFXX' , b'OLYM' , b'Phot' ) :
length = ord_ ( data [ 4 ] ) * 256 + ord_ ( data [ 5 ] )
logger . debug ( " Length offset is %s" , length )
f . read ( length - 8 )
# fake an EXIF beginning of file
# I don ' t think this is used . - - gd
data = b'\xFF\x00' + f . read ( 10 )
fake_exif = 1
if base > 2 :
logger . debug ( " Added to base" )
base = base + length + 4 - 2
else :
logger . debug ( " Added to zero" )
base = length + 4
logger . debug ( " Set segment base to 0x%X" , base )
# Big ugly patch to deal with APP2 ( or other ) data coming before APP1
f . seek ( 0 )
# in theory , this could be insufficient since 64K is the maximum size - - gd
data = f . read ( base + 4000 )
# base = 2
while 1 :
logger . debug ( " Segment base 0x%X" , base )
if data [ base : base + 2 ] == b'\xFF\xE1' : # APP1
logger . debug ( " APP1 at base 0x%X" , base )
logger . debug ( " Length: 0x%X 0x%X" , ord_ ( data [ base + 2 ] ) , ord_ ( data [ base + 3 ] ) )
logger . debug ( " Code: %s" , data [ base + 4 : base + 8 ] )
if data [ base + 4 : base + 8 ] == b"Exif" :
logger . debug ( " Decrement base by 2 to get to pre-segment header (for compatibility with later code)" )
base -= 2
break
increment = increment_base ( data , base )
logger . debug ( " Increment base by %s" , increment )
base += increment
elif data [ base : base + 2 ] == b'\xFF\xE0' : # APP0
logger . debug ( " APP0 at base 0x%X" , base )
logger . debug ( " Length: 0x%X 0x%X" , ord_ ( data [ base + 2 ] ) , ord_ ( data [ base + 3 ] ) )
logger . debug ( " Code: %s" , data [ base + 4 : base + 8 ] )
increment = increment_base ( data , base )
logger . debug ( " Increment base by %s" , increment )
base += increment
elif data [ base : base + 2 ] == b'\xFF\xE2' : # APP2
logger . debug ( " APP2 at base 0x%X" , base )
logger . debug ( " Length: 0x%X 0x%X" , ord_ ( data [ base + 2 ] ) , ord_ ( data [ base + 3 ] ) )
logger . debug ( " Code: %s" , data [ base + 4 : base + 8 ] )
increment = increment_base ( data , base )
logger . debug ( " Increment base by %s" , increment )
base += increment
elif data [ base : base + 2 ] == b'\xFF\xEE' : # APP14
logger . debug ( " APP14 Adobe segment at base 0x%X" , base )
logger . debug ( " Length: 0x%X 0x%X" , ord_ ( data [ base + 2 ] ) , ord_ ( data [ base + 3 ] ) )
logger . debug ( " Code: %s" , data [ base + 4 : base + 8 ] )
increment = increment_base ( data , base )
logger . debug ( " Increment base by %s" , increment )
base += increment
logger . debug ( " There is useful EXIF-like data here, but we have no parser for it." )
elif data [ base : base + 2 ] == b'\xFF\xDB' :
logger . debug ( " JPEG image data at base 0x%X No more segments are expected." , base )
break
elif data [ base : base + 2 ] == b'\xFF\xD8' : # APP12
logger . debug ( " FFD8 segment at base 0x%X" , base )
logger . debug ( " Got 0x%X 0x%X and %s instead" , ord_ ( data [ base ] ) , ord_ ( data [ base + 1 ] ) , data [ 4 + base : 10 + base ] )
logger . debug ( " Length: 0x%X 0x%X" , ord_ ( data [ base + 2 ] ) , ord_ ( data [ base + 3 ] ) )
logger . debug ( " Code: %s" , data [ base + 4 : base + 8 ] )
increment = increment_base ( data , base )
logger . debug ( " Increment base by %s" , increment )
base += increment
elif data [ base : base + 2 ] == b'\xFF\xEC' : # APP12
logger . debug ( " APP12 XMP (Ducky) or Pictureinfo segment at base 0x%X" , base )
logger . debug ( " Got 0x%X and 0x%X instead" , ord_ ( data [ base ] ) , ord_ ( data [ base + 1 ] ) )
logger . debug ( " Length: 0x%X 0x%X" , ord_ ( data [ base + 2 ] ) , ord_ ( data [ base + 3 ] ) )
logger . debug ( "Code: %s" , data [ base + 4 : base + 8 ] )
increment = increment_base ( data , base )
logger . debug ( " Increment base by %s" , increment )
base += increment
logger . debug ( " There is useful EXIF-like data here (quality, comment, copyright), but we have no parser for it." )
else :
try :
increment = increment_base ( data , base )
logger . debug ( " Got 0x%X and 0x%X instead" , ord_ ( data [ base ] ) , ord_ ( data [ base + 1 ] ) )
except IndexError :
logger . debug ( " Unexpected/unhandled segment type or file content." )
return { }
else :
logger . debug ( " Increment base by %s" , increment )
base += increment
f . seek ( base + 12 )
if ord_ ( data [ 2 + base ] ) == 0xFF and data [ 6 + base : 10 + base ] == b'Exif' : # detected EXIF header
offset = f . tell ( )
endian = f . read ( 1 )
# HACK TEST : endian = ' M '
elif ord_ ( data [ 2 + base ] ) == 0xFF and data [ 6 + base : 10 + base + 1 ] == b'Ducky' : # detected Ducky header .
logger . debug ( "EXIF-like header (normally 0xFF and code): 0x%X and %s" , ord_ ( data [ 2 + base ] ) , data [ 6 + base : 10 + base + 1 ] )
offset = f . tell ( )
endian = f . read ( 1 )
elif ord_ ( data [ 2 + base ] ) == 0xFF and data [ 6 + base : 10 + base + 1 ] == b'Adobe' : # detected APP14 ( Adobe )
logger . debug ( "EXIF-like header (normally 0xFF and code): 0x%X and %s" , ord_ ( data [ 2 + base ] ) , data [ 6 + base : 10 + base + 1 ] )
offset = f . tell ( )
endian = f . read ( 1 )
else : # no EXIF information
logger . debug ( "No EXIF header expected data[2+base]==0xFF and data[6+base:10+base]===Exif (or Duck)" )
logger . debug ( "Did get 0x%X and %s" , ord_ ( data [ 2 + base ] ) , data [ 6 + base : 10 + base + 1 ] )
return { }
else : # file format not recognized
logger . debug ( "File format not recognized." )
return { }
endian = chr ( ord_ ( endian [ 0 ] ) )
# deal with the EXIF info we found
logger . debug ( "Endian format is %s (%s)" , endian , { 'I' : 'Intel' , 'M' : 'Motorola' , '\x01' : 'Adobe Ducky' , 'd' : 'XMP/Adobe unknown' } [ endian ] )
hdr = ExifHeader ( f , endian , offset , fake_exif , strict , debug , details )
ifd_list = hdr . list_ifd ( )
thumb_ifd = False
ctr = 0
for ifd in ifd_list :
if ctr == 0 :
ifd_name = 'Image'
elif ctr == 1 :
ifd_name = 'Thumbnail'
thumb_ifd = ifd
else :
ifd_name = 'IFD %d' % ctr
logger . debug ( 'IFD %d (%s) at offset %s:' , ctr , ifd_name , ifd )
hdr . dump_ifd ( ifd , ifd_name , stop_tag = stop_tag )
ctr += 1
# EXIF IFD
exif_off = hdr . tags . get ( 'Image ExifOffset' )
if exif_off :
logger . debug ( 'Exif SubIFD at offset %s:' , exif_off . values [ 0 ] )
hdr . dump_ifd ( exif_off . values [ 0 ] , 'EXIF' , stop_tag = stop_tag )
# deal with MakerNote contained in EXIF IFD
# ( Some apps use MakerNote tags but do not use a format for which we
# have a description , do not process these ) .
if details and 'EXIF MakerNote' in hdr . tags and 'Image Make' in hdr . tags :
hdr . decode_maker_note ( )
# extract thumbnails
if details and thumb_ifd :
hdr . extract_tiff_thumbnail ( thumb_ifd )
hdr . extract_jpeg_thumbnail ( )
# parse XMP tags ( experimental )
if debug and details :
xmp_string = b''
# Easy we already have them
if 'Image ApplicationNotes' in hdr . tags :
logger . debug ( 'XMP present in Exif' )
xmp_string = make_string ( hdr . tags [ 'Image ApplicationNotes' ] . values )
# We need to look in the entire file for the XML
else :
logger . debug ( 'XMP not in Exif, searching file for XMP info...' )
xml_started = False
xml_finished = False
for line in f :
open_tag = line . find ( b'<x:xmpmeta' )
close_tag = line . find ( b'</x:xmpmeta>' )
if open_tag != - 1 :
xml_started = True
line = line [ open_tag : ]
logger . debug ( 'XMP found opening tag at line position %s' % open_tag )
if close_tag != - 1 :
logger . debug ( 'XMP found closing tag at line position %s' % close_tag )
line_offset = 0
if open_tag != - 1 :
line_offset = open_tag
line = line [ : ( close_tag - line_offset ) + 12 ]
xml_finished = True
if xml_started :
xmp_string += line
if xml_finished :
break
logger . debug ( 'XMP Finished searching for info' )
if xmp_string :
hdr . parse_xmp ( xmp_string )
return hdr . tags |
def get_instance ( self , payload ) :
"""Build an instance of SyncListPermissionInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . sync . v1 . service . sync _ list . sync _ list _ permission . SyncListPermissionInstance
: rtype : twilio . rest . sync . v1 . service . sync _ list . sync _ list _ permission . SyncListPermissionInstance""" | return SyncListPermissionInstance ( self . _version , payload , service_sid = self . _solution [ 'service_sid' ] , list_sid = self . _solution [ 'list_sid' ] , ) |
def get_queryset ( self ) :
"""Check that the queryset is defined and call it .""" | if self . queryset is None :
raise ImproperlyConfigured ( "'%s' must define 'queryset'" % self . __class__ . __name__ )
return self . queryset ( ) |
def render ( self , flags : Flags ) -> List [ Text ] :
"""Returns a list of randomly chosen outcomes for each sentence of the
list .""" | return [ x . render ( flags ) for x in self . sentences ] |
def get_top_level_categories ( parser , token ) :
"""Retrieves an alphabetical list of all the categories that have no parents .
Syntax : :
{ % get _ top _ level _ categories [ using " app . Model " ] as categories % }
Returns an list of categories [ < category > , < category > , < category , . . . ]""" | bits = token . split_contents ( )
usage = 'Usage: {%% %s [using "app.Model"] as <variable> %%}' % bits [ 0 ]
if len ( bits ) == 3 :
if bits [ 1 ] != 'as' :
raise template . TemplateSyntaxError ( usage )
varname = bits [ 2 ]
model = "categories.category"
elif len ( bits ) == 5 :
if bits [ 1 ] not in ( 'as' , 'using' ) and bits [ 3 ] not in ( 'as' , 'using' ) :
raise template . TemplateSyntaxError ( usage )
if bits [ 1 ] == 'using' :
model = bits [ 2 ] . strip ( "'\"" )
varname = bits [ 4 ] . strip ( "'\"" )
else :
model = bits [ 4 ] . strip ( "'\"" )
varname = bits [ 2 ] . strip ( "'\"" )
return TopLevelCategoriesNode ( varname , model ) |
def allocate_ip_for_subnet ( self , subnet_id , mac , port_id ) :
"""Allocates an IP from the specified subnet and creates a port""" | # Get an available IP and mark it as used before someone else does
# If there ' s no IP , , log it and return an error
# If we successfully get an IP , create a port with the specified MAC and device data
# If port creation fails , deallocate the IP
subnet = self . get_subnet ( subnet_id )
ip , mask , port_id = self . a10_allocate_ip_from_dhcp_range ( subnet , "vlan" , mac , port_id )
return ip , mask , port_id |
def propagate_lithology_cols ( self ) :
"""Propagate any data from lithologies , geologic _ types , or geologic _ classes
from the sites table to the samples and specimens table .
In the samples / specimens tables , null or " Not Specified " values
will be overwritten based on the data from their parent site .""" | cols = [ 'lithologies' , 'geologic_types' , 'geologic_classes' ]
# for table in [ ' specimens ' , ' samples ' ] :
# convert " Not Specified " to blank
# self . tables [ table ] . df . replace ( " ^ [ Nn ] ot [ Ss ] pecified " , ' ' ,
# regex = True , inplace = True )
self . propagate_cols ( cols , 'samples' , 'sites' )
cols = [ 'lithologies' , 'geologic_types' , 'geologic_classes' ]
self . propagate_cols ( cols , 'specimens' , 'samples' )
# if sites table is missing any values ,
# go ahead and propagate values UP as well
if 'sites' not in self . tables :
return
for col in cols :
if col not in self . tables [ 'sites' ] . df . columns :
self . tables [ 'sites' ] . df [ col ] = None
if not all ( self . tables [ 'sites' ] . df [ cols ] . values . ravel ( ) ) :
print ( '-I- Propagating values up from samples to sites...' )
self . propagate_cols_up ( cols , 'sites' , 'samples' ) |
def drawing_end ( self ) :
'''end line drawing''' | from MAVProxy . modules . mavproxy_map import mp_slipmap
if self . draw_callback is None :
return
self . draw_callback ( self . draw_line )
self . draw_callback = None
self . map . add_object ( mp_slipmap . SlipDefaultPopup ( self . default_popup , combine = True ) )
self . map . add_object ( mp_slipmap . SlipClearLayer ( 'Drawing' ) ) |
def main ( args ) :
"""Main program""" | ( ribo_file , rna_file , transcript_name , transcriptome_fasta , read_lengths , read_offsets , output_path , html_file ) = ( args . ribo_file , args . rna_file , args . transcript_name , args . transcriptome_fasta , args . read_lengths , args . read_offsets , args . output_path , args . html_file )
# error messages ( simple format ) are written to html file
fh = logging . FileHandler ( html_file )
fh . setLevel ( logging . ERROR )
fh . setFormatter ( ErrorLogFormatter ( '%(message)s' ) )
log . addHandler ( fh )
log . debug ( 'Supplied arguments\n{}' . format ( '\n' . join ( [ '{:<20}: {}' . format ( k , v ) for k , v in vars ( args ) . items ( ) ] ) ) )
log . debug ( 'Testing debugggg' )
log . info ( 'Checking if required arguments are valid...' )
ribocore . check_required_arguments ( ribo_file = ribo_file , transcriptome_fasta = transcriptome_fasta , transcript_name = transcript_name )
log . info ( 'Done' )
if rna_file :
log . info ( 'Checking if RNA-Seq file is valid...' )
ribocore . check_rna_file ( rna_file = rna_file )
log . info ( 'Done' )
log . info ( 'Checking read lengths...' )
ribocore . check_read_lengths ( ribo_file = ribo_file , read_lengths = read_lengths )
log . info ( 'Done' )
log . info ( 'Checking read offsets...' )
ribocore . check_read_offsets ( read_offsets = read_offsets )
log . info ( 'Done' )
log . info ( 'Checking if each read length has a corresponding offset' )
ribocore . check_read_lengths_offsets ( read_lengths = read_lengths , read_offsets = read_offsets )
log . info ( 'Done' )
log . info ( 'Get sequence and length of the given transcript from FASTA file...' )
record = ribocore . get_fasta_record ( transcriptome_fasta , transcript_name )
transcript_sequence = record [ transcript_name ]
transcript_length = len ( transcript_sequence )
log . info ( 'Get ribo-seq read counts and total reads in Ribo-Seq...' )
with ribocore . open_pysam_file ( fname = ribo_file , ftype = 'bam' ) as bam_fileobj :
ribo_counts , total_reads = ribocore . get_ribo_counts ( ribo_fileobj = bam_fileobj , transcript_name = transcript_name , read_lengths = read_lengths , read_offsets = read_offsets )
if not ribo_counts :
msg = ( 'No RiboSeq read counts for transcript {}. No plot will be ' 'generated!' . format ( transcript_name ) )
log . error ( msg )
raise ribocore . RiboPlotError ( msg )
else :
log . info ( 'Get RNA counts for the given transcript...' )
mrna_counts = { }
if rna_file :
try :
mrna_counts = get_rna_counts ( rna_file , transcript_name )
except OSError as e :
log . error ( e )
raise
if not mrna_counts :
log . warn ( 'No RNA counts for this transcript from the given RNA Seq file. ' 'RNA-Seq coverage will not be generated' )
else :
log . debug ( 'No RNA-Seq data provided. Not generating coverage' )
log . info ( 'Get start/stop positions in transcript sequence (3 frames)...' )
codon_positions = get_start_stops ( transcript_sequence )
if not os . path . exists ( output_path ) :
os . mkdir ( output_path )
log . info ( 'Writing RiboSeq read counts for {}' . format ( transcript_name ) )
with open ( os . path . join ( output_path , 'RiboCounts.csv' ) , 'w' ) as f :
f . write ( '"Position","Nucleotide","Frame 1","Frame 2","Frame 3"\n' )
for pos in range ( 1 , transcript_length + 1 ) :
if pos in ribo_counts :
f . write ( '{0},{1},{2},{3},{4}\n' . format ( pos , transcript_sequence [ pos - 1 ] , ribo_counts [ pos ] [ 1 ] , ribo_counts [ pos ] [ 2 ] , ribo_counts [ pos ] [ 3 ] ) )
else :
f . write ( '{0},{1},{2},{3},{4}\n' . format ( pos , transcript_sequence [ pos - 1 ] , 0 , 0 , 0 ) )
log . info ( 'Generating RiboPlot...' )
plot_profile ( ribo_counts , transcript_name , transcript_length , codon_positions , read_lengths , read_offsets , mrna_counts , color_scheme = args . color_scheme , html_file = args . html_file , output_path = args . output_path )
log . info ( 'Finished!' ) |
def count_sources_in_cluster ( n_src , cdict , rev_dict ) :
"""Make a vector of sources in each cluster
Parameters
n _ src : number of sources
cdict : dict ( int : [ int , ] )
A dictionary of clusters . Each cluster is a source index and
the list of other source in the cluster .
rev _ dict : dict ( int : int )
A single valued dictionary pointing from source index to
cluster key for each source in a cluster . Note that the key
does not point to itself .
Returns
` np . ndarray ( ( n _ src ) , int ) ' with the number of in the cluster a given source
belongs to .""" | ret_val = np . zeros ( ( n_src ) , int )
for i in range ( n_src ) :
try :
key = rev_dict [ i ]
except KeyError :
key = i
try :
n = len ( cdict [ key ] )
except :
n = 0
ret_val [ i ] = n
return ret_val |
def _send_command ( self , command ) :
"""Wrapper for self . device . send . command ( ) .
If command is a list will iterate through commands until valid command .""" | try :
if isinstance ( command , list ) :
for cmd in command :
output = self . device . send_command ( cmd )
if "% Invalid" not in output :
break
else :
output = self . device . send_command ( command )
return self . _send_command_postprocess ( output )
except ( socket . error , EOFError ) as e :
raise ConnectionClosedException ( str ( e ) ) |
def set_distributed_assembled ( self , irn_loc , jcn_loc , a_loc ) :
"""Set the distributed assembled matrix .
Distributed assembled matrices require setting icntl ( 18 ) ! = 0.""" | self . set_distributed_assembled_rows_cols ( irn_loc , jcn_loc )
self . set_distributed_assembled_values ( a_loc ) |
def prepare_exclude_file ( items , base_file , chrom = None ) :
"""Prepare a BED file for exclusion .
Excludes high depth and centromere regions which contribute to long run times and
false positive structural variant calls .""" | items = shared . add_highdepth_genome_exclusion ( items )
out_file = "%s-exclude%s.bed" % ( utils . splitext_plus ( base_file ) [ 0 ] , "-%s" % chrom if chrom else "" )
if not utils . file_exists ( out_file ) and not utils . file_exists ( out_file + ".gz" ) :
with shared . bedtools_tmpdir ( items [ 0 ] ) :
with file_transaction ( items [ 0 ] , out_file ) as tx_out_file : # Get a bedtool for the full region if no variant regions
want_bedtool = callable . get_ref_bedtool ( tz . get_in ( [ "reference" , "fasta" , "base" ] , items [ 0 ] ) , items [ 0 ] [ "config" ] , chrom )
want_bedtool = pybedtools . BedTool ( shared . subset_variant_regions ( want_bedtool . saveas ( ) . fn , chrom , tx_out_file , items ) )
sv_exclude_bed = _get_sv_exclude_file ( items )
if sv_exclude_bed and len ( want_bedtool ) > 0 :
want_bedtool = want_bedtool . subtract ( sv_exclude_bed , nonamecheck = True ) . saveas ( )
full_bedtool = callable . get_ref_bedtool ( tz . get_in ( [ "reference" , "fasta" , "base" ] , items [ 0 ] ) , items [ 0 ] [ "config" ] )
if len ( want_bedtool ) > 0 :
full_bedtool . subtract ( want_bedtool , nonamecheck = True ) . saveas ( tx_out_file )
else :
full_bedtool . saveas ( tx_out_file )
return out_file |
def _nodedev_event_lifecycle_cb ( conn , dev , event , detail , opaque ) :
'''Node device lifecycle events handler''' | _salt_send_event ( opaque , conn , { 'nodedev' : { 'name' : dev . name ( ) } , 'event' : _get_libvirt_enum_string ( 'VIR_NODE_DEVICE_EVENT_' , event ) , 'detail' : 'unknown' # currently unused
} ) |
def get_media_url ( self , context ) :
"""Checks to see whether to use the normal or the secure media source ,
depending on whether the current page view is being sent over SSL .
The USE _ SSL setting can be used to force HTTPS ( True ) or HTTP ( False ) .
NOTE : Not all backends implement SSL media . In this case , they ' ll just
return an unencrypted URL .""" | use_ssl = msettings [ 'USE_SSL' ]
is_secure = use_ssl if use_ssl is not None else self . is_secure ( context )
return client . media_url ( with_ssl = True ) if is_secure else client . media_url ( ) |
def to_json ( self , root_id = 0 , output = { } ) :
"""Recursive function to dump this tree as a json blob .
Parameters
root _ id : Root id of the sub - tree
output : Carry over output from the previous sub - trees .
Returns
dict : A tree in JSON format . Starts at the root node and recursively
represents each node in JSON .
- node _ id : ID of the node .
- left _ id : ID of left child ( None if it doesn ' t exist ) .
- right _ id : ID of right child ( None if it doesn ' t exist ) .
- split _ feature _ column : Feature column on which a decision is made .
- split _ feature _ index : Feature index ( within that column ) on which the
decision is made .
- is _ leaf : Is this node a leaf node ?
- node _ type : Node type ( categorical , numerical , leaf etc . )
- value : Prediction ( if leaf ) , decision split point
( if not leaf ) .
- left : JSON representation of the left node .
- right : JSON representation of the right node .
Examples
. . sourcecode : : python
> > > tree . to _ json ( ) # Leaf node
{ ' is _ leaf ' : False ,
' left ' : { ' is _ leaf ' : True ,
' left _ id ' : None ,
' node _ id ' : 115,
' node _ type ' : u ' leaf ' ,
' parent _ id ' : 60,
' right _ id ' : None ,
' split _ feature _ column ' : None ,
' split _ feature _ index ' : None ,
' value ' : 0.436364 } ,
' left _ id ' : 115,
' node _ id ' : 60,
' node _ type ' : u ' float ' ,
' parent _ id ' : 29,
' right ' : { ' is _ leaf ' : True ,
' left _ id ' : None ,
' node _ id ' : 116,
' node _ type ' : u ' leaf ' ,
' parent _ id ' : 60,
' right _ id ' : None ,
' split _ feature _ column ' : None ,
' split _ feature _ index ' : None ,
' value ' : - 0.105882 } ,
' right _ id ' : 116,
' split _ feature _ column ' : ' Quantity _ features _ 14 ' ,
' split _ feature _ index ' : ' count _ sum ' ,
' value ' : 22.5}""" | _raise_error_if_not_of_type ( root_id , [ int , long ] , "root_id" )
_numeric_param_check_range ( "root_id" , root_id , 0 , self . num_nodes - 1 )
node = self . nodes [ root_id ]
output = node . to_dict ( )
if node . left_id is not None :
j = node . left_id
output [ 'left' ] = self . to_json ( j , output )
if node . right_id is not None :
j = node . right_id
output [ 'right' ] = self . to_json ( j , output )
return output |
def socket ( self ) :
'''Lazily create the socket .''' | if not hasattr ( self , '_socket' ) : # create a new one
self . _socket = self . context . socket ( zmq . REQ )
if hasattr ( zmq , 'RECONNECT_IVL_MAX' ) :
self . _socket . setsockopt ( zmq . RECONNECT_IVL_MAX , 5000 )
self . _set_tcp_keepalive ( )
if self . master . startswith ( 'tcp://[' ) : # Hint PF type if bracket enclosed IPv6 address
if hasattr ( zmq , 'IPV6' ) :
self . _socket . setsockopt ( zmq . IPV6 , 1 )
elif hasattr ( zmq , 'IPV4ONLY' ) :
self . _socket . setsockopt ( zmq . IPV4ONLY , 0 )
self . _socket . linger = self . linger
if self . id_ :
self . _socket . setsockopt ( zmq . IDENTITY , self . id_ )
self . _socket . connect ( self . master )
return self . _socket |
def FlatArrow ( line1 , line2 , c = "m" , alpha = 1 , tipSize = 1 , tipWidth = 1 ) :
"""Build a 2D arrow in 3D space by joining two close lines .
. . hint : : | flatarrow | | flatarrow . py | _""" | if isinstance ( line1 , Actor ) :
line1 = line1 . coordinates ( )
if isinstance ( line2 , Actor ) :
line2 = line2 . coordinates ( )
sm1 , sm2 = np . array ( line1 [ - 1 ] ) , np . array ( line2 [ - 1 ] )
v = ( sm1 - sm2 ) / 3 * tipWidth
p1 = sm1 + v
p2 = sm2 - v
pm1 = ( sm1 + sm2 ) / 2
pm2 = ( np . array ( line1 [ - 2 ] ) + np . array ( line2 [ - 2 ] ) ) / 2
pm12 = pm1 - pm2
tip = pm12 / np . linalg . norm ( pm12 ) * np . linalg . norm ( v ) * 3 * tipSize / tipWidth + pm1
line1 . append ( p1 )
line1 . append ( tip )
line2 . append ( p2 )
line2 . append ( tip )
resm = max ( 100 , len ( line1 ) )
actor = Ribbon ( line1 , line2 , alpha = alpha , c = c , res = ( resm , 1 ) ) . phong ( )
settings . collectable_actors . pop ( )
settings . collectable_actors . append ( actor )
return actor |
def splitext ( path ) :
"Same as os . path . splitext ( ) but faster ." | sep = rightmost_separator ( path , os . sep )
dot = path . rfind ( '.' )
# An ext is only real if it has at least one non - digit char
if dot > sep and not containsOnly ( path [ dot : ] , "0123456789." ) :
return path [ : dot ] , path [ dot : ]
else :
return path , "" |
def _get_options_for_model ( self , model , opts_class = None , ** options ) :
"""Returns an instance of translation options with translated fields
defined for the ` ` model ` ` and inherited from superclasses .""" | if model not in self . _registry : # Create a new type for backwards compatibility .
opts = type ( "%sTranslationOptions" % model . __name__ , ( opts_class or TranslationOptions , ) , options ) ( model )
# Fields for translation may be inherited from abstract
# superclasses , so we need to look at all parents .
for base in model . __bases__ :
if not hasattr ( base , '_meta' ) : # Things without _ meta aren ' t functional models , so they ' re
# uninteresting parents .
continue
opts . update ( self . _get_options_for_model ( base ) )
# Cache options for all models - - we may want to compute options
# of registered subclasses of unregistered models .
self . _registry [ model ] = opts
return self . _registry [ model ] |
def apply_entities_as_html ( text , entities ) :
"""Format text as HTML . Also take care of escaping special characters .
Returned value can be passed to : meth : ` . Bot . sendMessage ` with appropriate
` ` parse _ mode ` ` .
: param text :
plain text
: param entities :
a list of ` MessageEntity < https : / / core . telegram . org / bots / api # messageentity > ` _ objects""" | escapes = { '<' : '<' , '>' : '>' , '&' : '&' , }
formatters = { 'bold' : lambda s , e : '<b>' + s + '</b>' , 'italic' : lambda s , e : '<i>' + s + '</i>' , 'text_link' : lambda s , e : '<a href="' + e [ 'url' ] + '">' + s + '</a>' , 'text_mention' : lambda s , e : '<a href="tg://user?id=' + str ( e [ 'user' ] [ 'id' ] ) + '">' + s + '</a>' , 'code' : lambda s , e : '<code>' + s + '</code>' , 'pre' : lambda s , e : '<pre>' + s + '</pre>' }
return _apply_entities ( text , entities , escapes , formatters ) |
def with_ ( contextmanager , do ) :
"""Emulate a ` with ` ` statement , performing an operation within context .
: param contextmanager : Context manager to use for ` ` with ` ` statement
: param do : Operation to perform : callable that receives the ` ` as ` ` value
: return : Result of the operation
Example : :
# read all lines from given list of ` ` files ` `
all _ lines = sum ( ( with _ ( open ( filename ) , do = dotcall ( ' readlines ' ) )
for filename in files ) , [ ] )""" | ensure_contextmanager ( contextmanager )
ensure_callable ( do )
with contextmanager as value :
return do ( value ) |
def warning ( self , msg , * args , ** kwargs ) :
"""Log ' msg % args ' with the warning severity level""" | self . _log ( "WARNING" , msg , args , kwargs ) |
def fit ( self , X , y = None ) :
"""X : data matrix , ( n x d )
y : unused""" | X = self . _prepare_inputs ( X , ensure_min_samples = 2 )
M = np . cov ( X , rowvar = False )
if M . ndim == 0 :
M = 1. / M
else :
M = np . linalg . inv ( M )
self . transformer_ = transformer_from_metric ( np . atleast_2d ( M ) )
return self |
def _remove_trailing_spaces ( text : str ) -> str :
"""Remove trailing spaces and tabs
: param text : Text to clean up
: return :""" | clean_text = str ( )
for line in text . splitlines ( True ) : # remove trailing spaces ( 0x20 ) and tabs ( 0x09)
clean_text += line . rstrip ( "\x09\x20" )
return clean_text |
def _write_training_metrics ( self ) :
"""Write all CSV metrics
: return :""" | for brain_name in self . trainers . keys ( ) :
if brain_name in self . trainer_metrics :
self . trainers [ brain_name ] . write_training_metrics ( ) |
def check_stripe_api_key ( app_configs = None , ** kwargs ) :
"""Check the user has configured API live / test keys correctly .""" | from . import settings as djstripe_settings
messages = [ ]
if not djstripe_settings . STRIPE_SECRET_KEY :
msg = "Could not find a Stripe API key."
hint = "Add STRIPE_TEST_SECRET_KEY and STRIPE_LIVE_SECRET_KEY to your settings."
messages . append ( checks . Critical ( msg , hint = hint , id = "djstripe.C001" ) )
elif djstripe_settings . STRIPE_LIVE_MODE :
if not djstripe_settings . LIVE_API_KEY . startswith ( ( "sk_live_" , "rk_live_" ) ) :
msg = "Bad Stripe live API key."
hint = 'STRIPE_LIVE_SECRET_KEY should start with "sk_live_"'
messages . append ( checks . Critical ( msg , hint = hint , id = "djstripe.C002" ) )
else :
if not djstripe_settings . TEST_API_KEY . startswith ( ( "sk_test_" , "rk_test_" ) ) :
msg = "Bad Stripe test API key."
hint = 'STRIPE_TEST_SECRET_KEY should start with "sk_test_"'
messages . append ( checks . Critical ( msg , hint = hint , id = "djstripe.C003" ) )
return messages |
def fit_transform ( self , X , y = None ) :
"""Fit the imputer and then transform input ` X `
Note : all imputations should have a ` fit _ transform ` method ,
but only some ( like IterativeImputer ) also support inductive mode
using ` fit ` or ` fit _ transform ` on ` X _ train ` and then ` transform `
on new ` X _ test ` .""" | X_original , missing_mask = self . prepare_input_data ( X )
observed_mask = ~ missing_mask
X = X_original . copy ( )
if self . normalizer is not None :
X = self . normalizer . fit_transform ( X )
X_filled = self . fill ( X , missing_mask , inplace = True )
if not isinstance ( X_filled , np . ndarray ) :
raise TypeError ( "Expected %s.fill() to return NumPy array but got %s" % ( self . __class__ . __name__ , type ( X_filled ) ) )
X_result = self . solve ( X_filled , missing_mask )
if not isinstance ( X_result , np . ndarray ) :
raise TypeError ( "Expected %s.solve() to return NumPy array but got %s" % ( self . __class__ . __name__ , type ( X_result ) ) )
X_result = self . project_result ( X = X_result )
X_result [ observed_mask ] = X_original [ observed_mask ]
return X_result |
def create_alert_policy ( self , policy_name ) :
"""Creates an alert policy in NewRelic""" | policy_data = { 'policy' : { 'incident_preference' : 'PER_POLICY' , 'name' : policy_name } }
create_policy = requests . post ( 'https://api.newrelic.com/v2/alerts_policies.json' , headers = self . auth_header , data = json . dumps ( policy_data ) )
create_policy . raise_for_status ( )
policy_id = create_policy . json ( ) [ 'policy' ] [ 'id' ]
self . refresh_all_alerts ( )
return policy_id |
def convert_tree ( self , element1 , element2 = None ) :
'''convert _ tree
High - level api : Convert cxml tree to an internal schema tree . This
method is recursive .
Parameters
element1 : ` Element `
The node to be converted .
element2 : ` Element `
A new node being constructed .
Returns
Element
This is element2 after convertion .''' | if element2 is None :
attributes = deepcopy ( element1 . attrib )
tag = attributes [ 'name' ]
del attributes [ 'name' ]
element2 = etree . Element ( tag , attributes )
for e1 in element1 . findall ( 'node' ) :
attributes = deepcopy ( e1 . attrib )
tag = self . prefix_to_url ( attributes [ 'name' ] )
del attributes [ 'name' ]
e2 = etree . SubElement ( element2 , tag , attributes )
self . convert_tree ( e1 , e2 )
return element2 |
def get_trips ( self , timestamp , start , via , destination , departure = True , prev_advices = 1 , next_advices = 1 ) :
"""Fetch trip possibilities for these parameters
http : / / webservices . ns . nl / ns - api - treinplanner ? < parameters >
fromStation
toStation
dateTime : 2012-02-21T15:50
departure : true for starting at timestamp , false for arriving at timestamp
previousAdvices
nextAdvices""" | timezonestring = '+0100'
if is_dst ( 'Europe/Amsterdam' ) :
timezonestring = '+0200'
url = 'http://webservices.ns.nl/ns-api-treinplanner?'
url = url + 'fromStation=' + start
url = url + '&toStation=' + destination
if via :
url = url + '&via=' + via
if len ( timestamp ) == 5 : # Format of HH : MM - api needs yyyy - mm - ddThh : mm
timestamp = time . strftime ( "%Y-%m-%d" ) + 'T' + timestamp
# requested _ time = datetime . strptime ( timestamp , " % Y - % m - % dT % H : % M " )
# TODO : DST / normal time
requested_time = load_datetime ( timestamp + timezonestring , "%Y-%m-%dT%H:%M%z" )
else : # requested _ time = datetime . strptime ( timestamp , " % d - % m - % Y % H : % M " )
requested_time = load_datetime ( timestamp + timezonestring , "%d-%m-%Y %H:%M%z" )
timestamp = datetime . strptime ( timestamp , "%d-%m-%Y %H:%M" ) . strftime ( "%Y-%m-%dT%H:%M" )
url = url + '&previousAdvices=' + str ( prev_advices )
url = url + '&nextAdvices=' + str ( next_advices )
url = url + '&dateTime=' + timestamp
raw_trips = self . _request ( 'GET' , url )
return self . parse_trips ( raw_trips , requested_time ) |
def refresh ( self ) :
"""Refresh the server and it ' s child objects .
This method removes all the cache information in the server
and it ' s child objects , and fetches the information again from
the server using hpssacli / ssacli command .
: raises : HPSSAOperationError , if hpssacli / ssacli operation failed .""" | config = self . _get_all_details ( )
raid_info = _convert_to_dict ( config )
self . controllers = [ ]
for key , value in raid_info . items ( ) :
self . controllers . append ( Controller ( key , value , self ) )
self . last_updated = time . time ( ) |
def enrich_relations ( rdf , enrich_mappings , use_narrower , use_transitive ) :
"""Enrich the SKOS relations according to SKOS semantics , including
subproperties of broader and symmetric related properties . If use _ narrower
is True , include inverse narrower relations for all broader relations . If
use _ narrower is False , instead remove all narrower relations , replacing
them with inverse broader relations . If use _ transitive is True , calculate
transitive hierarchical relationships .
( broaderTransitive , and also narrowerTransitive if use _ narrower is
True ) and include them in the model .""" | # 1 . first enrich mapping relationships ( because they affect regular ones )
if enrich_mappings :
infer . skos_symmetric_mappings ( rdf )
infer . skos_hierarchical_mappings ( rdf , use_narrower )
# 2 . then enrich regular relationships
# related < - > related
infer . skos_related ( rdf )
# broaderGeneric - > broader + inverse narrowerGeneric
for s , o in rdf . subject_objects ( SKOSEXT . broaderGeneric ) :
rdf . add ( ( s , SKOS . broader , o ) )
# broaderPartitive - > broader + inverse narrowerPartitive
for s , o in rdf . subject_objects ( SKOSEXT . broaderPartitive ) :
rdf . add ( ( s , SKOS . broader , o ) )
infer . skos_hierarchical ( rdf , use_narrower )
# transitive closure : broaderTransitive and narrowerTransitive
if use_transitive :
infer . skos_transitive ( rdf , use_narrower )
else : # transitive relationships are not wanted , so remove them
for s , o in rdf . subject_objects ( SKOS . broaderTransitive ) :
rdf . remove ( ( s , SKOS . broaderTransitive , o ) )
for s , o in rdf . subject_objects ( SKOS . narrowerTransitive ) :
rdf . remove ( ( s , SKOS . narrowerTransitive , o ) )
infer . skos_topConcept ( rdf ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.