signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_siblings_treepos ( self , treepos ) :
"""Given a treeposition , return the treepositions of its siblings .""" | parent_pos = self . get_parent_treepos ( treepos )
siblings_treepos = [ ]
if parent_pos is not None :
for child_treepos in self . get_children_treepos ( parent_pos ) :
if child_treepos != treepos :
siblings_treepos . append ( child_treepos )
return siblings_treepos |
def process ( self , request , item ) :
"""Process a PayPal direct payment .""" | warn_untested ( )
from paypal . pro . helpers import PayPalWPP
wpp = PayPalWPP ( request )
params = self . cleaned_data
params [ 'creditcardtype' ] = self . fields [ 'acct' ] . card_type
params [ 'expdate' ] = self . cleaned_data [ 'expdate' ] . strftime ( "%m%Y" )
params [ 'ipaddress' ] = request . META . get ( "REMOTE_ADDR" , "" )
params . update ( item )
try : # Create single payment :
if 'billingperiod' not in params :
wpp . doDirectPayment ( params )
# Create recurring payment :
else :
wpp . createRecurringPaymentsProfile ( params , direct = True )
except PayPalFailure :
return False
return True |
def to_python ( self , data ) :
"""Adds support for txtinfo format""" | try :
return super ( OlsrParser , self ) . to_python ( data )
except ConversionException as e :
return self . _txtinfo_to_jsoninfo ( e . data ) |
def copy ( self , newdoc = None , idsuffix = "" ) :
"""Make a deep copy of this element and all its children .
Parameters :
newdoc ( : class : ` Document ` ) : The document the copy should be associated with .
idsuffix ( str or bool ) : If set to a string , the ID of the copy will be append with this ( prevents duplicate IDs when making copies for the same document ) . If set to ` ` True ` ` , a random suffix will be generated .
Returns :
a copy of the element""" | if idsuffix is True :
idsuffix = ".copy." + "%08x" % random . getrandbits ( 32 )
# random 32 - bit hash for each copy , same one will be reused for all children
c = deepcopy ( self )
if idsuffix :
c . addidsuffix ( idsuffix )
c . setparents ( )
c . setdoc ( newdoc )
return c |
def show_menu ( title , options , default = None , height = None , width = None , multiselect = False , precolored = False ) :
"""Shows an interactive menu in the terminal .
Arguments :
options : list of menu options
default : initial option to highlight
height : maximum height of the menu
width : maximum width of the menu
multiselect : allow multiple items to be selected ?
precolored : allow strings with embedded ANSI commands
Returns :
* If multiselect is True , returns a list of selected options .
* If mutliselect is False , returns the selected option .
* If an option is a 2 - tuple , the first item will be displayed and the
second item will be returned .
* If menu is cancelled ( Esc pressed ) , returns None .
Notes :
* You can pass OptionGroup objects to ` options ` to create sub - headers
in the menu .""" | plugins = [ FilterPlugin ( ) ]
if any ( isinstance ( opt , OptionGroup ) for opt in options ) :
plugins . append ( OptionGroupPlugin ( ) )
if title :
plugins . append ( TitlePlugin ( title ) )
if precolored :
plugins . append ( PrecoloredPlugin ( ) )
menu = Termenu ( options , default = default , height = height , width = width , multiselect = multiselect , plugins = plugins )
return menu . show ( ) |
def _set_edge_loop_detection_native ( self , v , load = False ) :
"""Setter method for edge _ loop _ detection _ native , mapped from YANG variable / interface / ethernet / edge _ loop _ detection _ native ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ edge _ loop _ detection _ native is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ edge _ loop _ detection _ native ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = edge_loop_detection_native . edge_loop_detection_native , is_container = 'container' , presence = False , yang_name = "edge-loop-detection-native" , rest_name = "" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-drop-node-name' : None , u'sort-priority' : u'RUNNCFG_INTERFACE_LEVEL_ELD' } } , namespace = 'urn:brocade.com:mgmt:brocade-interface' , defining_module = 'brocade-interface' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """edge_loop_detection_native must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=edge_loop_detection_native.edge_loop_detection_native, is_container='container', presence=False, yang_name="edge-loop-detection-native", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_ELD'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""" , } )
self . __edge_loop_detection_native = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def _get_forums_for_user ( self , user , perm_codenames , use_tree_hierarchy = False ) :
"""Returns all the forums that satisfy the given list of permission codenames .
User and group forum permissions are used .
If the ` ` use _ tree _ hierarchy ` ` keyword argument is set the granted forums will be filtered so
that a forum which has an ancestor which is not in the granted forums set will not be
returned .""" | granted_forums_cache_key = '{}__{}' . format ( ':' . join ( perm_codenames ) , user . id if not user . is_anonymous else 'anonymous' , )
if granted_forums_cache_key in self . _granted_forums_cache :
return self . _granted_forums_cache [ granted_forums_cache_key ]
forums = self . _get_all_forums ( )
# First check if the user is a superuser and if so , returns the forum queryset immediately .
if user . is_superuser : # pragma : no cover
forum_objects = forums
else : # Generates the appropriate queryset filter in order to handle both authenticated users
# and anonymous users .
user_kwargs_filter = { 'anonymous_user' : True } if user . is_anonymous else { 'user' : user }
# Get all the user permissions for the considered user .
user_perms = ( UserForumPermission . objects . filter ( ** user_kwargs_filter ) . filter ( permission__codename__in = perm_codenames ) )
# The first thing to do is to compute three lists of permissions : one containing only
# globally granted permissions , one containing granted permissions ( these permissions
# are associated with specific forums ) and one containing non granted permissions ( the
# latest are also associated with specific forums ) .
globally_granted_user_perms = list ( filter ( lambda p : p . has_perm and p . forum_id is None , user_perms ) )
per_forum_granted_user_perms = list ( filter ( lambda p : p . has_perm and p . forum_id is not None , user_perms ) )
per_forum_nongranted_user_perms = list ( filter ( lambda p : not p . has_perm and p . forum_id is not None , user_perms ) )
# Using the previous lists we are able to compute a list of forums ids for which
# permissions are explicitly not granted . It should be noted that any permission that is
# explicitely set for a user will not be considered as non granted if a " non granted "
# permission also exists . The explicitly granted permissions always win precedence .
granted_user_forum_ids = [ p . forum_id for p in per_forum_granted_user_perms ]
nongranted_forum_ids = [ p . forum_id for p in per_forum_nongranted_user_perms if p . forum_id not in granted_user_forum_ids ]
required_perm_codenames_count = len ( perm_codenames )
initial_forum_ids = [ f . id for f in forums ]
# Now we build a dictionary allowing to associate each forum ID of the initial queryset
# with a set of permissions that are granted for the considered forum .
granted_permissions_per_forum = collections . defaultdict ( set )
for perm in per_forum_granted_user_perms :
granted_permissions_per_forum [ perm . forum_id ] . add ( perm . permission_id )
for forum_id in initial_forum_ids :
granted_permissions_per_forum [ forum_id ] . update ( [ perm . permission_id for perm in globally_granted_user_perms ] )
if not user . is_anonymous :
user_model = get_user_model ( )
# Get all the group permissions for the considered user .
group_perms = ( GroupForumPermission . objects . filter ( ** { 'group__{0}' . format ( user_model . groups . field . related_query_name ( ) ) : user } ) . filter ( permission__codename__in = perm_codenames ) )
# Again , we compute three lists of permissions . But this time we are considering
# group permissions . The first list contains only globally granted permissions . The
# second one contains only granted permissions that are associated with specific
# forums . The third list contains non granted permissions .
globally_granted_group_perms = list ( filter ( lambda p : p . has_perm and p . forum_id is None , group_perms ) )
per_forum_granted_group_perms = list ( filter ( lambda p : p . has_perm and p . forum_id is not None , group_perms ) )
per_forum_nongranted_group_perms = list ( filter ( lambda p : not p . has_perm and p . forum_id is not None , group_perms ) )
# Now we can update the list of forums ids for which permissions are explicitly not
# granted .
granted_group_forum_ids = [ p . forum_id for p in per_forum_granted_group_perms ]
nongranted_forum_ids += [ p . forum_id for p in per_forum_nongranted_group_perms if p . forum_id not in granted_group_forum_ids ]
# Now we will update our previous dictionary that associated each forum ID with a
# set of granted permissions ( at the user level ) . We will update it with the new
# permissions we ' ve computed for the user ' s groups .
for perm in per_forum_granted_group_perms :
granted_permissions_per_forum [ perm . forum_id ] . add ( perm . permission_id )
for forum_id in initial_forum_ids :
granted_permissions_per_forum [ forum_id ] . update ( [ perm . permission_id for perm in globally_granted_group_perms ] )
# We keep only the forum IDs for which the length of the set containing the granted
# permissions is equal to the number of initial permission codenames . The other forums
# have not all the required granted permissions , so we just throw them away .
for forum_id in list ( granted_permissions_per_forum ) :
if len ( granted_permissions_per_forum [ forum_id ] ) < required_perm_codenames_count :
del granted_permissions_per_forum [ forum_id ]
# Alright ! It is now possible to filter the initial queryset using the forums associated
# with the granted permissions and the list of forums for which permissions are
# explicitly not granted .
forum_objects = [ f for f in forums if f . id in granted_permissions_per_forum and f . id not in nongranted_forum_ids ]
if ( not user . is_anonymous and set ( perm_codenames ) . issubset ( set ( machina_settings . DEFAULT_AUTHENTICATED_USER_FORUM_PERMISSIONS ) ) ) :
forum_objects += [ f for f in forums if f . id not in nongranted_forum_ids and f not in forum_objects ]
if use_tree_hierarchy :
forum_objects = self . _filter_granted_forums_using_tree ( forum_objects )
self . _granted_forums_cache [ granted_forums_cache_key ] = forum_objects
return forum_objects |
def vtmlrender ( vtmarkup , plain = None , strict = False , vtmlparser = VTMLParser ( ) ) :
"""Look for vt100 markup and render vt opcodes into a VTMLBuffer .""" | if isinstance ( vtmarkup , VTMLBuffer ) :
return vtmarkup . plain ( ) if plain else vtmarkup
try :
vtmlparser . feed ( vtmarkup )
vtmlparser . close ( )
except :
if strict :
raise
buf = VTMLBuffer ( )
buf . append_str ( str ( vtmarkup ) )
return buf
else :
buf = vtmlparser . getvalue ( )
return buf . plain ( ) if plain else buf
finally :
vtmlparser . reset ( ) |
def _set_data ( self , ** kwargs ) :
"""Sets data from given parameters
Old values are deleted .
If a paremeter is not given , nothing is changed .
Parameters
shape : 3 - tuple of Integer
\t Grid shape
grid : Dict of 3 - tuples to strings
\t Cell content
attributes : List of 3 - tuples
\t Cell attributes
row _ heights : Dict of 2 - tuples to float
\t ( row , tab ) : row _ height
col _ widths : Dict of 2 - tuples to float
\t ( col , tab ) : col _ width
macros : String
\t Macros from macro list""" | if "shape" in kwargs :
self . shape = kwargs [ "shape" ]
if "grid" in kwargs :
self . dict_grid . clear ( )
self . dict_grid . update ( kwargs [ "grid" ] )
if "attributes" in kwargs :
self . attributes [ : ] = kwargs [ "attributes" ]
if "row_heights" in kwargs :
self . row_heights = kwargs [ "row_heights" ]
if "col_widths" in kwargs :
self . col_widths = kwargs [ "col_widths" ]
if "macros" in kwargs :
self . macros = kwargs [ "macros" ] |
def table_maker ( subset , ind1 , ind2 , row_labels , col_labels , title ) :
"""` subset ` provides a subsetted boolean of items to consider . If no subset ,
you can use all with ` np . ones _ like ( ind1 ) = = 1 `
` ind1 ` is used to subset rows , e . g . , log2fc > 0 . This is used for rows , so
row _ label might be [ ' upregulated ' , ' others ' ]
` ind2 ` is used to subset cols . For example , col _ labels would be
[ ' bound ' , ' unbound ' ]""" | table = [ sum ( subset & ind1 & ind2 ) , sum ( subset & ind1 & ~ ind2 ) , sum ( subset & ~ ind1 & ind2 ) , sum ( subset & ~ ind1 & ~ ind2 ) ]
print
print title
print '-' * len ( title )
print print_2x2_table ( table , row_labels = row_labels , col_labels = col_labels )
print print_row_perc_table ( table , row_labels = row_labels , col_labels = col_labels )
print print_col_perc_table ( table , row_labels = row_labels , col_labels = col_labels )
print fisher . pvalue ( * table ) |
def to_str ( string ) :
"""Return the given string ( either byte string or Unicode string )
converted to native - str , that is ,
a byte string on Python 2 , or a Unicode string on Python 3.
Return ` ` None ` ` if ` ` string ` ` is ` ` None ` ` .
: param str string : the string to convert to native - str
: rtype : native - str""" | if string is None :
return None
if isinstance ( string , str ) :
return string
if PY2 :
return string . encode ( "utf-8" )
return string . decode ( "utf-8" ) |
def check_node_position ( self , parent_id , position , on_same_branch , db_session = None , * args , ** kwargs ) :
"""Checks if node position for given parent is valid , raises exception if
this is not the case
: param parent _ id :
: param position :
: param on _ same _ branch : indicates that we are checking same branch
: param db _ session :
: return :""" | return self . service . check_node_position ( parent_id = parent_id , position = position , on_same_branch = on_same_branch , db_session = db_session , * args , ** kwargs ) |
def chart_type ( cls , plot ) :
"""Return the member of : ref : ` XlChartType ` that corresponds to the chart
type of * plot * .""" | try :
chart_type_method = { 'AreaPlot' : cls . _differentiate_area_chart_type , 'Area3DPlot' : cls . _differentiate_area_3d_chart_type , 'BarPlot' : cls . _differentiate_bar_chart_type , 'BubblePlot' : cls . _differentiate_bubble_chart_type , 'DoughnutPlot' : cls . _differentiate_doughnut_chart_type , 'LinePlot' : cls . _differentiate_line_chart_type , 'PiePlot' : cls . _differentiate_pie_chart_type , 'RadarPlot' : cls . _differentiate_radar_chart_type , 'XyPlot' : cls . _differentiate_xy_chart_type , } [ plot . __class__ . __name__ ]
except KeyError :
raise NotImplementedError ( "chart_type() not implemented for %s" % plot . __class__ . __name__ )
return chart_type_method ( plot ) |
def call_cmd ( cmdlist , stdin = None ) :
"""get a shell commands output , error message and return value and immediately
return .
. . warning : :
This returns with the first screen content for interactive commands .
: param cmdlist : shellcommand to call , already splitted into a list accepted
by : meth : ` subprocess . Popen `
: type cmdlist : list of str
: param stdin : string to pipe to the process
: type stdin : str , bytes , or None
: return : triple of stdout , stderr , return value of the shell command
: rtype : str , str , int""" | termenc = urwid . util . detected_encoding
if isinstance ( stdin , str ) :
stdin = stdin . encode ( termenc )
try :
logging . debug ( "Calling %s" % cmdlist )
proc = subprocess . Popen ( cmdlist , stdout = subprocess . PIPE , stderr = subprocess . PIPE , stdin = subprocess . PIPE if stdin is not None else None )
except OSError as e :
out = b''
err = e . strerror
ret = e . errno
else :
out , err = proc . communicate ( stdin )
ret = proc . returncode
out = string_decode ( out , termenc )
err = string_decode ( err , termenc )
return out , err , ret |
def ynnm ( n , m ) :
"""Initial value for recursion formula""" | a = 1.0 / np . sqrt ( 4.0 * np . pi )
pm = np . abs ( m )
out = 0.0
if ( n < pm ) :
out = 0.0
elif ( n == 0 ) :
out = a
else :
out = a
for k in xrange ( 1 , n + 1 ) :
out *= np . sqrt ( ( 2.0 * k + 1.0 ) / 8.0 / k )
if ( n != pm ) :
for k in xrange ( n - 1 , pm - 1 , - 1 ) :
out *= np . sqrt ( ( n + k + 1.0 ) / ( n - k ) )
return out |
def game_events ( game_id , innings_endpoint = False ) :
"""Return list of Inning objects for game matching the game id .
Using ` inning _ endpoints = True ` will result in objects with
additional , undocumented data properties , but also objects
that may be missing properties expected by the user .
` innings _ endpoint ` : bool , use more detailed ` innings ` API endpoint""" | data = mlbgame . events . game_events ( game_id , innings_endpoint )
return [ mlbgame . events . Inning ( data [ x ] , x ) for x in data ] |
def delete ( self , task_id ) :
"""Deletes a task from a TaskQueue .""" | if isinstance ( task_id , RegisteredTask ) :
task_id = task_id . id
def cloud_delete ( api ) :
api . delete ( task_id )
if len ( self . _threads ) :
self . put ( cloud_delete )
else :
cloud_delete ( self . _api )
return self |
def _normalize_params ( image , width , height , crop ) :
"""Normalize params and calculate aspect .""" | if width is None and height is None :
raise ValueError ( "Either width or height must be set. Otherwise " "resizing is useless." )
if width is None or height is None :
aspect = float ( image . width ) / float ( image . height )
if crop :
raise ValueError ( "Cropping the image would be useless since only " "one dimention is give to resize along." )
if width is None :
width = int ( round ( height * aspect ) )
else :
height = int ( round ( width / aspect ) )
return ( width , height , crop ) |
def modifiers ( self ) :
"""For verb phrases ( VP ) , yields a list of the nearest adjectives and adverbs .""" | if self . _modifiers is None : # Iterate over all the chunks and attach modifiers to their VP - anchor .
is_modifier = lambda ch : ch . type in ( "ADJP" , "ADVP" ) and ch . relation is None
for chunk in self . sentence . chunks :
chunk . _modifiers = [ ]
for chunk in filter ( is_modifier , self . sentence . chunks ) :
anchor = chunk . nearest ( "VP" )
if anchor :
anchor . _modifiers . append ( chunk )
return self . _modifiers |
def _addModuleInfo ( self , moduleInfo ) :
"""Adds a line with module info to the editor
: param moduleInfo : can either be a string or a module info class .
In the first case , an object is instantiated as ImportedModuleInfo ( moduleInfo ) .""" | if is_a_string ( moduleInfo ) :
moduleInfo = mi . ImportedModuleInfo ( moduleInfo )
line = "{:15s}: {}" . format ( moduleInfo . name , moduleInfo . verboseVersion )
self . editor . appendPlainText ( line )
QtWidgets . QApplication . instance ( ) . processEvents ( ) |
def download ( self , url , save_path , header = { } , redownload = False ) :
"""Currently does not use the proxied driver
TODO : Be able to use cookies just like headers is used here
: return : the path of the file that was saved""" | if save_path is None :
logger . error ( "save_path cannot be None" )
return None
# Get headers of current web driver
header = self . get_headers ( )
if len ( header ) > 0 : # Add more headers if needed
header . update ( header )
logger . debug ( "Download {url} to {save_path}" . format ( url = url , save_path = save_path ) )
save_location = cutil . norm_path ( save_path )
if redownload is False : # See if we already have the file
if os . path . isfile ( save_location ) :
logger . debug ( "File {save_location} already exists" . format ( save_location = save_location ) )
return save_location
# Create the dir path on disk
cutil . create_path ( save_location )
if url . startswith ( '//' ) :
url = "http:" + url
try :
with urllib . request . urlopen ( urllib . request . Request ( url , headers = header ) ) as response , open ( save_location , 'wb' ) as out_file :
data = response . read ( )
out_file . write ( data )
except urllib . error . HTTPError as e :
save_location = None
# We do not need to show the user 404 errors
if e . code != 404 :
logger . exception ( "Download Http Error {url}" . format ( url = url ) )
except Exception :
save_location = None
logger . exception ( "Download Error: {url}" . format ( url = url ) )
return save_location |
def concretize_load_idx ( self , idx , strategies = None ) :
"""Concretizes a load index .
: param idx : An expression for the index .
: param strategies : A list of concretization strategies ( to override the default ) .
: param min _ idx : Minimum value for a concretized index ( inclusive ) .
: param max _ idx : Maximum value for a concretized index ( exclusive ) .
: returns : A list of concrete indexes .""" | if isinstance ( idx , int ) :
return [ idx ]
elif not self . state . solver . symbolic ( idx ) :
return [ self . state . solver . eval ( idx ) ]
strategies = self . load_strategies if strategies is None else strategies
return self . _apply_concretization_strategies ( idx , strategies , 'load' ) |
def add_json ( self , json_obj , ** kwargs ) :
"""Adds a json - serializable Python dict as a json file to IPFS .
. . code - block : : python
> > > c . add _ json ( { ' one ' : 1 , ' two ' : 2 , ' three ' : 3 } )
' QmVz9g7m5u3oHiNKHj2CJX1dbG1gtismRS3g9NaPBBLbob '
Parameters
json _ obj : dict
A json - serializable Python dictionary
Returns
str : Hash of the added IPFS object""" | return self . add_bytes ( encoding . Json ( ) . encode ( json_obj ) , ** kwargs ) |
def pop ( self ) :
"""Removes a random element from the collection and returns it
# Returns
` object `
> A random object from the collection""" | try :
return self . _collection . pop ( )
except KeyError :
raise KeyError ( "Nothing left in the {}: '{}'." . format ( type ( self ) . __name__ , self ) ) from None |
def to_value_list ( original_strings , corenlp_values = None ) :
"""Convert a list of strings to a list of Values
Args :
original _ strings ( list [ basestring ] )
corenlp _ values ( list [ basestring or None ] )
Returns :
list [ Value ]""" | assert isinstance ( original_strings , ( list , tuple , set ) )
if corenlp_values is not None :
assert isinstance ( corenlp_values , ( list , tuple , set ) )
assert len ( original_strings ) == len ( corenlp_values )
return list ( set ( to_value ( x , y ) for ( x , y ) in zip ( original_strings , corenlp_values ) ) )
else :
return list ( set ( to_value ( x ) for x in original_strings ) ) |
def rationalize ( flt : float , denominators : Set [ int ] = None ) -> Fraction :
"""Convert a floating point number to a Fraction with a small
denominator .
Args :
flt : A floating point number
denominators : Collection of standard denominators . Default is
1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 12 , 16 , 32 , 64 , 128 , 256 , 512,
1024 , 2048 , 4096 , 8192
Raises :
ValueError : If cannot rationalize float""" | if denominators is None :
denominators = _DENOMINATORS
frac = Fraction . from_float ( flt ) . limit_denominator ( )
if frac . denominator not in denominators :
raise ValueError ( 'Cannot rationalize' )
return frac |
def get_pandasframe ( self ) :
"""The method loads data from dataset""" | if self . dataset :
self . _load_dimensions ( )
return self . _get_pandasframe_one_dataset ( )
return self . _get_pandasframe_across_datasets ( ) |
def get_relationship_query_session_for_family ( self , family_id ) :
"""Gets the ` ` OsidSession ` ` associated with the relationship query service for the given family .
arg : family _ id ( osid . id . Id ) : the ` ` Id ` ` of the family
return : ( osid . relationship . RelationshipQuerySession ) - a
` ` RelationshipQuerySession ` `
raise : NotFound - no ` ` Family ` ` found by the given ` ` Id ` `
raise : NullArgument - ` ` family _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ relationship _ query ( ) ` ` or
` ` supports _ visible _ federation ( ) ` ` is ` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ relationship _ query ( ) ` ` and
` ` supports _ visible _ federation ( ) ` ` are ` ` true ` ` *""" | if not self . supports_relationship_query ( ) :
raise errors . Unimplemented ( )
# Also include check to see if the catalog Id is found otherwise raise errors . NotFound
# pylint : disable = no - member
return sessions . RelationshipQuerySession ( family_id , runtime = self . _runtime ) |
def mouseMoved ( self , viewPos ) :
"""Updates the probe text with the values under the cursor .
Draws a vertical line and a symbol at the position of the probe .""" | try :
check_class ( viewPos , QtCore . QPointF )
show_data_point = False
# shows the data point as a circle in the cross hair plots
self . crossPlotRow , self . crossPlotCol = None , None
self . probeLabel . setText ( "<span style='color: #808080'>no data at cursor</span>" )
self . crossLineHorizontal . setVisible ( False )
self . crossLineVertical . setVisible ( False )
self . crossLineHorShadow . setVisible ( False )
self . crossLineVerShadow . setVisible ( False )
self . horCrossPlotItem . clear ( )
self . verCrossPlotItem . clear ( )
if ( self . _hasValidData ( ) and self . slicedArray is not None and self . viewBox . sceneBoundingRect ( ) . contains ( viewPos ) ) : # Calculate the row and column at the cursor . We use math . floor because the pixel
# corners of the image lie at integer values ( and not the centers of the pixels ) .
scenePos = self . viewBox . mapSceneToView ( viewPos )
row , col = math . floor ( scenePos . y ( ) ) , math . floor ( scenePos . x ( ) )
row , col = int ( row ) , int ( col )
# Needed in Python 2
nRows , nCols = self . slicedArray . shape
if ( 0 <= row < nRows ) and ( 0 <= col < nCols ) :
self . viewBox . setCursor ( Qt . CrossCursor )
self . crossPlotRow , self . crossPlotCol = row , col
index = tuple ( [ row , col ] )
valueStr = to_string ( self . slicedArray [ index ] , masked = self . slicedArray . maskAt ( index ) , maskFormat = '<masked>' )
txt = "pos = ({:d}, {:d}), value = {}" . format ( row , col , valueStr )
self . probeLabel . setText ( txt )
# Show cross section at the cursor pos in the line plots
if self . config . horCrossPlotCti . configValue :
self . crossLineHorShadow . setVisible ( True )
self . crossLineHorizontal . setVisible ( True )
self . crossLineHorShadow . setPos ( row )
self . crossLineHorizontal . setPos ( row )
# Line plot of cross section row .
# First determine which points are connected or separated by masks / nans .
rowData = self . slicedArray . data [ row , : ]
connected = np . isfinite ( rowData )
if is_an_array ( self . slicedArray . mask ) :
connected = np . logical_and ( connected , ~ self . slicedArray . mask [ row , : ] )
else :
connected = ( np . zeros_like ( rowData ) if self . slicedArray . mask else connected )
# Replace infinite value with nans because PyQtGraph can ' t handle them
rowData = replaceMaskedValueWithFloat ( rowData , np . isinf ( rowData ) , np . nan , copyOnReplace = True )
horPlotDataItem = self . config . crossPenCti . createPlotDataItem ( )
horPlotDataItem . setData ( rowData , connect = connected )
self . horCrossPlotItem . addItem ( horPlotDataItem )
# Vertical line in hor - cross plot
crossLineShadow90 = pg . InfiniteLine ( angle = 90 , movable = False , pen = self . crossShadowPen )
crossLineShadow90 . setPos ( col )
self . horCrossPlotItem . addItem ( crossLineShadow90 , ignoreBounds = True )
crossLine90 = pg . InfiniteLine ( angle = 90 , movable = False , pen = self . crossPen )
crossLine90 . setPos ( col )
self . horCrossPlotItem . addItem ( crossLine90 , ignoreBounds = True )
if show_data_point :
crossPoint90 = pg . PlotDataItem ( symbolPen = self . crossPen )
crossPoint90 . setSymbolBrush ( QtGui . QBrush ( self . config . crossPenCti . penColor ) )
crossPoint90 . setSymbolSize ( 10 )
crossPoint90 . setData ( ( col , ) , ( rowData [ col ] , ) )
self . horCrossPlotItem . addItem ( crossPoint90 , ignoreBounds = True )
self . config . horCrossPlotRangeCti . updateTarget ( )
# update auto range
del rowData
# defensive programming
if self . config . verCrossPlotCti . configValue :
self . crossLineVerShadow . setVisible ( True )
self . crossLineVertical . setVisible ( True )
self . crossLineVerShadow . setPos ( col )
self . crossLineVertical . setPos ( col )
# Line plot of cross section row .
# First determine which points are connected or separated by masks / nans .
colData = self . slicedArray . data [ : , col ]
connected = np . isfinite ( colData )
if is_an_array ( self . slicedArray . mask ) :
connected = np . logical_and ( connected , ~ self . slicedArray . mask [ : , col ] )
else :
connected = ( np . zeros_like ( colData ) if self . slicedArray . mask else connected )
# Replace infinite value with nans because PyQtGraph can ' t handle them
colData = replaceMaskedValueWithFloat ( colData , np . isinf ( colData ) , np . nan , copyOnReplace = True )
verPlotDataItem = self . config . crossPenCti . createPlotDataItem ( )
verPlotDataItem . setData ( colData , np . arange ( nRows ) , connect = connected )
self . verCrossPlotItem . addItem ( verPlotDataItem )
# Horizontal line in ver - cross plot
crossLineShadow0 = pg . InfiniteLine ( angle = 0 , movable = False , pen = self . crossShadowPen )
crossLineShadow0 . setPos ( row )
self . verCrossPlotItem . addItem ( crossLineShadow0 , ignoreBounds = True )
crossLine0 = pg . InfiniteLine ( angle = 0 , movable = False , pen = self . crossPen )
crossLine0 . setPos ( row )
self . verCrossPlotItem . addItem ( crossLine0 , ignoreBounds = True )
if show_data_point :
crossPoint0 = pg . PlotDataItem ( symbolPen = self . crossPen )
crossPoint0 . setSymbolBrush ( QtGui . QBrush ( self . config . crossPenCti . penColor ) )
crossPoint0 . setSymbolSize ( 10 )
crossPoint0 . setData ( ( colData [ row ] , ) , ( row , ) )
self . verCrossPlotItem . addItem ( crossPoint0 , ignoreBounds = True )
self . config . verCrossPlotRangeCti . updateTarget ( )
# update auto range
del colData
# defensive programming
except Exception as ex : # In contrast to _ drawContents , this function is a slot and thus must not throw
# exceptions . The exception is logged . Perhaps we should clear the cross plots , but
# this could , in turn , raise exceptions .
if DEBUGGING :
raise
else :
logger . exception ( ex ) |
def sort ( self , key_or_list , direction = None ) :
"""Sorts a cursor object based on the input
: param key _ or _ list : a list / tuple containing the sort specification ,
i . e . ( ' user _ number ' : - 1 ) , or a basestring
: param direction : sorting direction , 1 or - 1 , needed if key _ or _ list
is a basestring
: return :""" | # checking input format
sort_specifier = list ( )
if isinstance ( key_or_list , list ) :
if direction is not None :
raise ValueError ( 'direction can not be set separately ' 'if sorting by multiple fields.' )
for pair in key_or_list :
if not ( isinstance ( pair , list ) or isinstance ( pair , tuple ) ) :
raise TypeError ( 'key pair should be a list or tuple.' )
if not len ( pair ) == 2 :
raise ValueError ( 'Need to be (key, direction) pair' )
if not isinstance ( pair [ 0 ] , basestring ) :
raise TypeError ( 'first item in each key pair must ' 'be a string' )
if not isinstance ( pair [ 1 ] , int ) or not abs ( pair [ 1 ] ) == 1 :
raise TypeError ( 'bad sort specification.' )
sort_specifier = key_or_list
elif isinstance ( key_or_list , basestring ) :
if direction is not None :
if not isinstance ( direction , int ) or not abs ( direction ) == 1 :
raise TypeError ( 'bad sort specification.' )
else : # default ASCENDING
direction = 1
sort_specifier = [ ( key_or_list , direction ) ]
else :
raise ValueError ( 'Wrong input, pass a field name and a direction,' ' or pass a list of (key, direction) pairs.' )
# sorting
_cursordat = self . cursordat
total = len ( _cursordat )
pre_sect_stack = list ( )
for pair in sort_specifier :
is_reverse = bool ( 1 - pair [ 1 ] )
value_stack = list ( )
for index , data in enumerate ( _cursordat ) : # get field value
not_found = None
for key in pair [ 0 ] . split ( '.' ) :
not_found = True
if isinstance ( data , dict ) and key in data :
data = copy . deepcopy ( data [ key ] )
not_found = False
elif isinstance ( data , list ) :
if not is_reverse and len ( data ) == 1 : # MongoDB treat [ { data } ] as { data }
# when finding fields
if isinstance ( data [ 0 ] , dict ) and key in data [ 0 ] :
data = copy . deepcopy ( data [ 0 ] [ key ] )
not_found = False
elif is_reverse : # MongoDB will keep finding field in reverse mode
for _d in data :
if isinstance ( _d , dict ) and key in _d :
data = copy . deepcopy ( _d [ key ] )
not_found = False
break
if not_found :
break
# parsing data for sorting
if not_found : # treat no match as None
data = None
value = self . _order ( data , is_reverse )
# read previous section
pre_sect = pre_sect_stack [ index ] if pre_sect_stack else 0
# inverse if in reverse mode
# for keeping order as ASCENDING after sort
pre_sect = ( total - pre_sect ) if is_reverse else pre_sect
_ind = ( total - index ) if is_reverse else index
value_stack . append ( ( pre_sect , value , _ind ) )
# sorting cursor data
value_stack . sort ( reverse = is_reverse )
ordereddat = list ( )
sect_stack = list ( )
sect_id = - 1
last_dat = None
for dat in value_stack : # restore if in reverse mode
_ind = ( total - dat [ - 1 ] ) if is_reverse else dat [ - 1 ]
ordereddat . append ( _cursordat [ _ind ] )
# define section
# maintain the sorting result in next level sorting
if not dat [ 1 ] == last_dat :
sect_id += 1
sect_stack . append ( sect_id )
last_dat = dat [ 1 ]
# save result for next level sorting
_cursordat = ordereddat
pre_sect_stack = sect_stack
# done
self . cursordat = _cursordat
return self |
def Tracer_CMFR_N ( t_seconds , t_bar , C_bar , N ) :
"""Used by Solver _ CMFR _ N . All inputs and outputs are unitless . This is
The model function , f ( x , . . . ) . It takes the independent variable as the
first argument and the parameters to fit as separate remaining arguments .
: param t _ seconds : List of times
: type t _ seconds : float list
: param t _ bar : Average time spent in the reactor
: type t _ bar : float
: param C _ bar : Average concentration ( mass of tracer ) / ( volume of the reactor )
: type C _ bar : float
: param N : Number of completely mixed flow reactors ( CMFRs ) in series , must be greater than 1
: type N : int
: return : The model concentration as a function of time
: rtype : float list
: Examples :
> > > from aguaclara . research . environmental _ processes _ analysis import Tracer _ CMFR _ N
> > > from aguaclara . core . units import unit _ registry as u
> > > Tracer _ CMFR _ N ( [ 1 , 2 , 3 , 4 , 5 ] * u . s , 5 * u . s , 10 * u . mg / u . L , 3)
< Quantity ( [ 2.96358283 6.50579498 8.03352597 7.83803116 6.72125423 ] , ' milligram / liter ' ) >""" | return C_bar * E_CMFR_N ( t_seconds / t_bar , N ) |
def cpu_speed ( self , silent = False ) :
"""Retrieves the CPU speed of the target .
If the target does not support CPU frequency detection , this function
will return ` ` 0 ` ` .
Args :
self ( JLink ) : the ` ` JLink ` ` instance
silent ( bool ) : ` ` True ` ` if the CPU detection should not report errors
to the error handler on failure .
Returns :
The measured CPU frequency on success , otherwise ` ` 0 ` ` if the core does
not support CPU frequency detection .
Raises :
JLinkException : on hardware error .""" | res = self . _dll . JLINKARM_MeasureCPUSpeedEx ( - 1 , 1 , int ( silent ) )
if res < 0 :
raise errors . JLinkException ( res )
return res |
def _generic_matrix_calc ( fn , trees , normalise , min_overlap = 4 , overlap_fail_value = 0 , show_progress = True ) :
"""( fn , trees , normalise )
Calculates all pairwise distances between trees given in the parameter ' trees ' .
Distance functions :
eucdist _ matrix
geodist _ matrix
rfdist _ matrix
wrfdist _ matrix
These wrap the leafset - checking functions . If the faster non - leafset - checking functions are needed , do this :
scipy . spatial . distance ( [ ' getDistance ' ( t1 . phylotree , t2 . phylotree , normalise )
for ( t1 , t2 ) in itertools . combinations ( trees , 2 ) ] )
for your choice of ' getDistance ' out of :
getEuclideanDistance
getGeodesicDistance
getRobinsonFouldsDistance
getWeightedRobinsonFouldsDistance
: param trees : list or tuple , or some other iterable container type containing Tree objects
: param normalise : boolean
: param min _ overlap : int
: return : numpy . array""" | jobs = itertools . combinations ( trees , 2 )
results = [ ]
if show_progress :
pbar = setup_progressbar ( 'Calculating tree distances' , 0.5 * len ( trees ) * ( len ( trees ) - 1 ) )
pbar . start ( )
for i , ( t1 , t2 ) in enumerate ( jobs ) :
results . append ( _generic_distance_calc ( fn , t1 , t2 , normalise , min_overlap , overlap_fail_value ) )
if show_progress :
pbar . update ( i )
if show_progress :
pbar . finish ( )
return scipy . spatial . distance . squareform ( results ) |
def name ( self ) :
""": return : ( shortest ) Name of this reference - it may contain path components""" | # first two path tokens are can be removed as they are
# refs / heads or refs / tags or refs / remotes
tokens = self . path . split ( '/' )
if len ( tokens ) < 3 :
return self . path
# could be refs / HEAD
return '/' . join ( tokens [ 2 : ] ) |
def rotate ( self , nDegrees ) :
"""rotates the image a given number of degrees
Parameters :
| nDegrees - the number of degrees you want the image rotated ( images start at zero degrees ) .
| Positive numbers are clockwise , negative numbers are counter - clockwise""" | self . angle = self . angle + nDegrees
self . _transmogrophy ( self . angle , self . percent , self . scaleFromCenter , self . flipH , self . flipV ) |
def umount ( self , source ) :
"""Unmount partion
: param source : Full partition path like / dev / sda1""" | args = { 'source' : source , }
self . _umount_chk . check ( args )
response = self . _client . raw ( 'disk.umount' , args )
result = response . get ( )
if result . state != 'SUCCESS' :
raise RuntimeError ( 'failed to umount partition: %s' % result . stderr ) |
def _check_uri ( cls , uri ) :
"""Check whether a URI is compatible with a : class : ` . Driver `
subclass . When called from a subclass , execution simply passes
through if the URI scheme is valid for that class . If invalid ,
a ` ValueError ` is raised .
: param uri : URI to check for compatibility
: raise : ` ValueError ` if URI scheme is incompatible""" | parsed = urlparse ( uri )
if parsed . scheme != cls . uri_scheme :
raise ValueError ( "%s objects require the %r URI scheme" % ( cls . __name__ , cls . uri_scheme ) ) |
def A_term ( i , r , u , l1 , l2 , PAx , PBx , CPx , gamma ) :
"""THO eq . 2.18
> > > A _ term ( 0,0,0,0,0,0,0,0,1)
1.0
> > > A _ term ( 0,0,0,0,1,1,1,1,1)
1.0
> > > A _ term ( 1,0,0,0,1,1,1,1,1)
-1.0
> > > A _ term ( 0,0,0,1,1,1,1,1,1)
1.0
> > > A _ term ( 1,0,0,1,1,1,1,1,1)
-2.0
> > > A _ term ( 2,0,0,1,1,1,1,1,1)
1.0
> > > A _ term ( 2,0,1,1,1,1,1,1,1)
-0.5
> > > A _ term ( 2,1,0,1,1,1,1,1,1)
0.5""" | return pow ( - 1 , i ) * binomial_prefactor ( i , l1 , l2 , PAx , PBx ) * pow ( - 1 , u ) * factorial ( i ) * pow ( CPx , i - 2 * r - 2 * u ) * pow ( 0.25 / gamma , r + u ) / factorial ( r ) / factorial ( u ) / factorial ( i - 2 * r - 2 * u ) |
def merge ( self , other ) :
"""Add requirements from ' other ' metadata . rb into this one .""" | if not isinstance ( other , MetadataRb ) :
raise TypeError ( "MetadataRb to merge should be a 'MetadataRb' " "instance, not %s." , type ( other ) )
current = self . to_dict ( )
new = other . to_dict ( )
# compare and gather cookbook dependencies
meta_writelines = [ '%s\n' % self . depends_statement ( cbn , meta ) for cbn , meta in new . get ( 'depends' , { } ) . items ( ) if cbn not in current . get ( 'depends' , { } ) ]
self . write_statements ( meta_writelines )
return self . to_dict ( ) |
def to_vec3 ( self ) :
"""Convert this vector4 instance into a vector3 instance .""" | vec3 = Vector3 ( )
vec3 . x = self . x
vec3 . y = self . y
vec3 . z = self . z
if self . w != 0 :
vec3 /= self . w
return vec3 |
def main ( ) :
"""Main CLI application .""" | parser = get_parser ( )
argcomplete . autocomplete ( parser , always_complete_options = False )
args = parser . parse_args ( )
setup_logger ( level = args . log_level )
try :
if args . config and args . command in ( 'aggregate' , 'show-closed-prs' , 'show-all-prs' ) :
run ( args )
else :
parser . print_help ( )
except KeyboardInterrupt :
pass |
def get ( self ) :
"""Constructs a TaskActionsContext
: returns : twilio . rest . autopilot . v1 . assistant . task . task _ actions . TaskActionsContext
: rtype : twilio . rest . autopilot . v1 . assistant . task . task _ actions . TaskActionsContext""" | return TaskActionsContext ( self . _version , assistant_sid = self . _solution [ 'assistant_sid' ] , task_sid = self . _solution [ 'task_sid' ] , ) |
def _method_scope ( input_layer , name ) :
"""Creates a nested set of name and id scopes and avoids repeats .""" | global _in_method_scope
# pylint : disable = protected - access
with input_layer . g . as_default ( ) , scopes . var_and_name_scope ( None if _in_method_scope else input_layer . _scope ) , scopes . var_and_name_scope ( ( name , None ) ) as ( scope , var_scope ) :
was_in_method_scope = _in_method_scope
yield scope , var_scope
_in_method_scope = was_in_method_scope |
def eliminate_nested ( input_tup ) :
"""Function to eliminate the nested tuple from a given tuple .
Examples :
> > > eliminate _ nested ( ( 1 , 5 , 7 , ( 4 , 6 ) , 10 ) )
(1 , 5 , 7 , 10)
> > > eliminate _ nested ( ( 2 , 6 , 8 , ( 5 , 7 ) , 11 ) )
(2 , 6 , 8 , 11)
> > > eliminate _ nested ( ( 3 , 7 , 9 , ( 6 , 8 ) , 12 ) )
(3 , 7 , 9 , 12)
Args :
input _ tup : Tuple can contain nested tuple .
Returns :
Tuple : The input tuple with the nested tuple removed .""" | result = ( )
for item in input_tup :
if not isinstance ( item , tuple ) :
result += ( item , )
return result |
def _parse ( self ) :
"""Given self . resource , split information from the CTS API
: return : None""" | self . response = self . resource
self . resource = self . resource . xpath ( "//ti:passage/tei:TEI" , namespaces = XPATH_NAMESPACES ) [ 0 ]
self . _prev_id , self . _next_id = _SharedMethod . prevnext ( self . response )
if not self . citation . is_set ( ) and len ( self . resource . xpath ( "//ti:citation" , namespaces = XPATH_NAMESPACES ) ) :
self . citation = CtsCollection . XmlCtsCitation . ingest ( self . response , xpath = ".//ti:citation[not(ancestor::ti:citation)]" ) |
def df_drop_duplicates ( df , ignore_key_pattern = "time" ) :
"""Drop duplicates from dataframe ignore columns with keys containing defined pattern .
: param df :
: param noinfo _ key _ pattern :
: return :""" | keys_to_remove = list_contains ( df . keys ( ) , ignore_key_pattern )
# key _ tf = [ key . find ( noinfo _ key _ pattern ) ! = - 1 for key in df . keys ( ) ]
# keys _ to _ remove
# remove duplicates
ks = copy . copy ( list ( df . keys ( ) ) )
for key in keys_to_remove :
ks . remove ( key )
df = df . drop_duplicates ( ks )
return df |
def _create_extractors ( cls , metrics ) :
"""Build metrics extractors according to the ` metrics ` config
: param metrics : Benchmark ` metrics ` configuration section""" | metrics_dict = { }
# group entries by ` category ` attribute ( default is " standard " )
for metric , config in six . iteritems ( metrics ) :
category = config . get ( 'category' , StdBenchmark . DEFAULT_CATEGORY )
metrics_dict . setdefault ( category , { } ) [ metric ] = config
# create one StdExtractor instance per category ,
# passing associated metrics
return dict ( ( category , StdExtractor ( metrics ) ) for category , metrics in six . iteritems ( metrics_dict ) ) |
def SelectPoint ( ) :
"""Opens an eDNA point picker , where the user can select a single tag .
: return : selected tag name""" | # Define all required variables in the correct ctypes format
pszPoint = create_string_buffer ( 20 )
nPoint = c_ushort ( 20 )
# Opens the point picker
dna_dll . DnaSelectPoint ( byref ( pszPoint ) , nPoint )
tag_result = pszPoint . value . decode ( 'utf-8' )
return tag_result |
def _FormatMessage ( self , event ) :
"""Formats the message .
Args :
event ( EventObject ) : event .
Returns :
str : message field .
Raises :
NoFormatterFound : if no event formatter can be found to match the data
type in the event .""" | message , _ = self . _output_mediator . GetFormattedMessages ( event )
if message is None :
data_type = getattr ( event , 'data_type' , 'UNKNOWN' )
raise errors . NoFormatterFound ( 'Unable to find event formatter for: {0:s}.' . format ( data_type ) )
return message |
def find_elements ( self , selector , by = By . CSS_SELECTOR , limit = 0 ) :
"""Returns a list of matching WebElements .
If " limit " is set and > 0 , will only return that many elements .""" | self . wait_for_ready_state_complete ( )
if page_utils . is_xpath_selector ( selector ) :
by = By . XPATH
if page_utils . is_link_text_selector ( selector ) :
selector = page_utils . get_link_text_from_selector ( selector )
by = By . LINK_TEXT
elements = self . driver . find_elements ( by = by , value = selector )
if limit and limit > 0 and len ( elements ) > limit :
elements = elements [ : limit ]
return elements |
def get_session ( self , token = None , signature = None ) :
'''If provided a ` token ` parameter , tries to retrieve a stored
` rauth . OAuth1Session ` instance . Otherwise generates a new session
instance with the : class : ` rauth . OAuth1Service . consumer _ key ` and
: class : ` rauth . OAuth1Service . consumer _ secret ` stored on the
` rauth . OAuth1Service ` instance .
: param token : A tuple of strings with which to memoize the session
object instance .
: type token : tuple''' | if token is not None :
access_token , access_token_secret = token
session = self . session_obj ( self . consumer_key , self . consumer_secret , access_token , access_token_secret , signature or self . signature_obj , service = self )
else : # pragma : no cover
signature = signature or self . signature_obj
session = self . session_obj ( self . consumer_key , self . consumer_secret , signature = signature , service = self )
return session |
def pexpire ( self , key , timeout ) :
"""This command works exactly like : meth : ` ~ tredis . RedisClient . pexpire `
but the time to live of the key is specified in milliseconds instead of
seconds .
. . note : :
* * Time complexity * * : ` ` O ( 1 ) ` `
: param key : The key to set an expiration for
: type key : : class : ` str ` , : class : ` bytes `
: param int timeout : The number of milliseconds to set the timeout to
: rtype : bool
: raises : : exc : ` ~ tredis . exceptions . RedisError `""" | return self . _execute ( [ b'PEXPIRE' , key , ascii ( timeout ) . encode ( 'ascii' ) ] , 1 ) |
def scope ( self , framebuffer , enable_only = None , * , textures = ( ) , uniform_buffers = ( ) , storage_buffers = ( ) ) -> 'Scope' :
'''Create a : py : class : ` Scope ` object .
Args :
framebuffer ( Framebuffer ) : The framebuffer to use when entering .
enable _ only ( int ) : The enable _ only flags to set when entering .
Keyword Args :
textures ( list ) : List of ( texture , binding ) tuples .
uniform _ buffers ( list ) : List of ( buffer , binding ) tuples .
storage _ buffers ( list ) : List of ( buffer , binding ) tuples .''' | textures = tuple ( ( tex . mglo , idx ) for tex , idx in textures )
uniform_buffers = tuple ( ( buf . mglo , idx ) for buf , idx in uniform_buffers )
storage_buffers = tuple ( ( buf . mglo , idx ) for buf , idx in storage_buffers )
res = Scope . __new__ ( Scope )
res . mglo = self . mglo . scope ( framebuffer . mglo , enable_only , textures , uniform_buffers , storage_buffers )
res . ctx = self
res . extra = None
return res |
def session_update ( self , session , * _ ) :
"""Record the sqlalchemy object states in the middle of session ,
prepare the events for the final pub in session _ commit .""" | self . _session_init ( session )
session . pending_write |= set ( session . new )
session . pending_update |= set ( session . dirty )
session . pending_delete |= set ( session . deleted )
self . logger . debug ( "%s - session_update" % session . meepo_unique_id ) |
def find_genusspecific_allele_list ( profiles_file , target_genus ) :
"""A new way of making our specific databases : Make our profiles file have lists of every gene / allele present for
each genus instead of just excluding a few genes for each . This way , should have much smaller databases
while managing to make ConFindr a decent bit faster ( maybe )
: param profiles _ file : Path to profiles file .
: param target _ genus :
: return : List of gene / allele combinations that should be part of species - specific database .""" | alleles = list ( )
with open ( profiles_file ) as f :
lines = f . readlines ( )
for line in lines :
line = line . rstrip ( )
genus = line . split ( ':' ) [ 0 ]
if genus == target_genus :
alleles = line . split ( ':' ) [ 1 ] . split ( ',' ) [ : - 1 ]
return alleles |
def __set_interval ( self , value ) :
'''Sets the treatment interval
@ param value : Interval''' | if not isinstance ( self , Interval ) :
raise ValueError ( "'value' must be of type Interval" )
self . __interval = value |
def subject ( self ) :
"""Normalized subject .
Only used for debugging and human - friendly logging .""" | # Fetch subject from first message .
subject = self . message . get ( 'Subject' , '' )
subject , _ = re . subn ( r'\s+' , ' ' , subject )
return subject |
def get_agents ( self , pool_id , agent_name = None , include_capabilities = None , include_assigned_request = None , include_last_completed_request = None , property_filters = None , demands = None ) :
"""GetAgents .
[ Preview API ] Get a list of agents .
: param int pool _ id : The agent pool containing the agents
: param str agent _ name : Filter on agent name
: param bool include _ capabilities : Whether to include the agents ' capabilities in the response
: param bool include _ assigned _ request : Whether to include details about the agents ' current work
: param bool include _ last _ completed _ request : Whether to include details about the agents ' most recent completed work
: param [ str ] property _ filters : Filter which custom properties will be returned
: param [ str ] demands : Filter by demands the agents can satisfy
: rtype : [ TaskAgent ]""" | route_values = { }
if pool_id is not None :
route_values [ 'poolId' ] = self . _serialize . url ( 'pool_id' , pool_id , 'int' )
query_parameters = { }
if agent_name is not None :
query_parameters [ 'agentName' ] = self . _serialize . query ( 'agent_name' , agent_name , 'str' )
if include_capabilities is not None :
query_parameters [ 'includeCapabilities' ] = self . _serialize . query ( 'include_capabilities' , include_capabilities , 'bool' )
if include_assigned_request is not None :
query_parameters [ 'includeAssignedRequest' ] = self . _serialize . query ( 'include_assigned_request' , include_assigned_request , 'bool' )
if include_last_completed_request is not None :
query_parameters [ 'includeLastCompletedRequest' ] = self . _serialize . query ( 'include_last_completed_request' , include_last_completed_request , 'bool' )
if property_filters is not None :
property_filters = "," . join ( property_filters )
query_parameters [ 'propertyFilters' ] = self . _serialize . query ( 'property_filters' , property_filters , 'str' )
if demands is not None :
demands = "," . join ( demands )
query_parameters [ 'demands' ] = self . _serialize . query ( 'demands' , demands , 'str' )
response = self . _send ( http_method = 'GET' , location_id = 'e298ef32-5878-4cab-993c-043836571f42' , version = '5.1-preview.1' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( '[TaskAgent]' , self . _unwrap_collection ( response ) ) |
def _ParseDateTimeValue ( self , parser_mediator , date_time_value ) :
"""Parses a date time value .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
date _ time _ value ( str ) : date time value
( CSSM _ DB _ ATTRIBUTE _ FORMAT _ TIME _ DATE ) in the format : " YYYYMMDDhhmmssZ " .
Returns :
dfdatetime . TimeElements : date and time extracted from the value or None
if the value does not represent a valid string .""" | if date_time_value [ 14 ] != 'Z' :
parser_mediator . ProduceExtractionWarning ( 'invalid date and time value: {0!s}' . format ( date_time_value ) )
return None
try :
year = int ( date_time_value [ 0 : 4 ] , 10 )
month = int ( date_time_value [ 4 : 6 ] , 10 )
day_of_month = int ( date_time_value [ 6 : 8 ] , 10 )
hours = int ( date_time_value [ 8 : 10 ] , 10 )
minutes = int ( date_time_value [ 10 : 12 ] , 10 )
seconds = int ( date_time_value [ 12 : 14 ] , 10 )
except ( TypeError , ValueError ) :
parser_mediator . ProduceExtractionWarning ( 'invalid date and time value: {0!s}' . format ( date_time_value ) )
return None
time_elements_tuple = ( year , month , day_of_month , hours , minutes , seconds )
try :
return dfdatetime_time_elements . TimeElements ( time_elements_tuple = time_elements_tuple )
except ValueError :
parser_mediator . ProduceExtractionWarning ( 'invalid date and time value: {0!s}' . format ( date_time_value ) )
return None |
def guess_headers ( self ) :
"""Attempt to guess what headers may be required in order to use this
type . Returns ` guess _ headers ` of all children recursively .
* If the typename is in the : const : ` KNOWN _ TYPES ` dictionary , use the
header specified there
* If it ' s an STL type , include < { type } >
* If it exists in the ROOT namespace and begins with T ,
include < { type } . h >""" | name = self . name . replace ( "*" , "" )
headers = [ ]
if name in KNOWN_TYPES :
headers . append ( KNOWN_TYPES [ name ] )
elif name in STL :
headers . append ( '<{0}>' . format ( name ) )
elif hasattr ( ROOT , name ) and name . startswith ( "T" ) :
headers . append ( '<{0}.h>' . format ( name ) )
elif '::' in name :
headers . append ( '<{0}.h>' . format ( name . replace ( '::' , '/' ) ) )
elif name == 'allocator' :
headers . append ( '<memory>' )
else :
try : # is this just a basic type ?
CPPGrammar . BASIC_TYPE . parseString ( name , parseAll = True )
except ParseException as e : # nope . . . I don ' t know what it is
log . warning ( "unable to guess headers required for {0}" . format ( name ) )
if self . params :
for child in self . params :
headers . extend ( child . guess_headers )
# remove duplicates
return list ( set ( headers ) ) |
def cleanup_classes ( rdf ) :
"""Remove unnecessary class definitions : definitions of SKOS classes or
unused classes . If a class is also a skos : Concept or skos : Collection ,
remove the ' classness ' of it but leave the Concept / Collection .""" | for t in ( OWL . Class , RDFS . Class ) :
for cl in rdf . subjects ( RDF . type , t ) : # SKOS classes may be safely removed
if cl . startswith ( SKOS ) :
logging . debug ( "removing SKOS class definition: %s" , cl )
replace_subject ( rdf , cl , None )
continue
# if there are instances of the class , keep the class def
if rdf . value ( None , RDF . type , cl , any = True ) is not None :
continue
# if the class is used in a domain / range / equivalentClass
# definition , keep the class def
if rdf . value ( None , RDFS . domain , cl , any = True ) is not None :
continue
if rdf . value ( None , RDFS . range , cl , any = True ) is not None :
continue
if rdf . value ( None , OWL . equivalentClass , cl , any = True ) is not None :
continue
# if the class is also a skos : Concept or skos : Collection , only
# remove its rdf : type
if ( cl , RDF . type , SKOS . Concept ) in rdf or ( cl , RDF . type , SKOS . Collection ) in rdf :
logging . debug ( "removing classiness of %s" , cl )
rdf . remove ( ( cl , RDF . type , t ) )
else : # remove it completely
logging . debug ( "removing unused class definition: %s" , cl )
replace_subject ( rdf , cl , None ) |
def eval_agg_call ( self , exp ) :
"helper for eval _ callx ; evaluator for CallX that consume multiple rows" | if not isinstance ( self . c_row , list ) :
raise TypeError ( 'aggregate function expected a list of rows' )
if len ( exp . args . children ) != 1 :
raise ValueError ( 'aggregate function expected a single value' , exp . args )
arg , = exp . args . children
# intentional : error if len ! = 1
vals = [ Evaluator ( c_r , self . nix , self . tables ) . eval ( arg ) for c_r in self . c_row ]
if not vals :
return None
if exp . f == 'min' :
return min ( vals )
elif exp . f == 'max' :
return max ( vals )
elif exp . f == 'count' :
return len ( vals )
else :
raise NotImplementedError ( 'unk_func' , exp . f ) |
def _get_coordinatenames ( self ) :
"""Create ordered list of coordinate names""" | validnames = ( "direction" , "spectral" , "linear" , "stokes" , "tabular" )
self . _names = [ "" ] * len ( validnames )
n = 0
for key in self . _csys . keys ( ) :
for name in validnames :
if key . startswith ( name ) :
idx = int ( key [ len ( name ) : ] )
self . _names [ idx ] = name
n += 1
# reverse as we are c order in python
self . _names = self . _names [ : n ] [ : : - 1 ]
if len ( self . _names ) == 0 :
raise LookupError ( "Coordinate record doesn't contain valid coordinates" ) |
def _downgrade_v3 ( op ) :
"""Downgrade assets db by adding a not null constraint on
` ` equities . first _ traded ` `""" | op . create_table ( '_new_equities' , sa . Column ( 'sid' , sa . Integer , unique = True , nullable = False , primary_key = True , ) , sa . Column ( 'symbol' , sa . Text ) , sa . Column ( 'company_symbol' , sa . Text ) , sa . Column ( 'share_class_symbol' , sa . Text ) , sa . Column ( 'fuzzy_symbol' , sa . Text ) , sa . Column ( 'asset_name' , sa . Text ) , sa . Column ( 'start_date' , sa . Integer , default = 0 , nullable = False ) , sa . Column ( 'end_date' , sa . Integer , nullable = False ) , sa . Column ( 'first_traded' , sa . Integer , nullable = False ) , sa . Column ( 'auto_close_date' , sa . Integer ) , sa . Column ( 'exchange' , sa . Text ) , )
op . execute ( """
insert into _new_equities
select * from equities
where equities.first_traded is not null
""" , )
op . drop_table ( 'equities' )
op . rename_table ( '_new_equities' , 'equities' )
# we need to make sure the indices have the proper names after the rename
op . create_index ( 'ix_equities_company_symbol' , 'equities' , [ 'company_symbol' ] , )
op . create_index ( 'ix_equities_fuzzy_symbol' , 'equities' , [ 'fuzzy_symbol' ] , ) |
def update_payload ( self , fields = None ) :
"""Wrap submitted data within an extra dict .""" | payload = super ( JobTemplate , self ) . update_payload ( fields )
effective_user = payload . pop ( u'effective_user' , None )
if effective_user :
payload [ u'ssh' ] = { u'effective_user' : effective_user }
return { u'job_template' : payload } |
def get_schema ( self , schema_id ) :
"""Retrieves the schema with the given schema _ id from the registry
and returns it as a ` dict ` .""" | res = requests . get ( self . _url ( '/schemas/ids/{}' , schema_id ) )
raise_if_failed ( res )
return json . loads ( res . json ( ) [ 'schema' ] ) |
def delete ( login ) :
'''Delete user account
login : string
login name
CLI Example :
. . code - block : : bash
salt ' * ' pdbedit . delete wash''' | if login in list_users ( False ) :
res = __salt__ [ 'cmd.run_all' ] ( 'pdbedit --delete {login}' . format ( login = _quote_args ( login ) ) , )
if res [ 'retcode' ] > 0 :
return { login : res [ 'stderr' ] if 'stderr' in res else res [ 'stdout' ] }
return { login : 'deleted' }
return { login : 'absent' } |
def aaa_config_aaa_authentication_login_first ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
aaa_config = ET . SubElement ( config , "aaa-config" , xmlns = "urn:brocade.com:mgmt:brocade-aaa" )
aaa = ET . SubElement ( aaa_config , "aaa" )
authentication = ET . SubElement ( aaa , "authentication" )
login = ET . SubElement ( authentication , "login" )
first = ET . SubElement ( login , "first" )
first . text = kwargs . pop ( 'first' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def stats ( path , hash_type = 'sha256' , follow_symlinks = True ) :
'''Return a dict containing the stats about a given file
Under Windows , ` gid ` will equal ` uid ` and ` group ` will equal ` user ` .
While a file in Windows does have a ' primary group ' , this rarely used
attribute generally has no bearing on permissions unless intentionally
configured and is only used to support Unix compatibility features ( e . g .
Services For Unix , NFS services ) .
Salt , therefore , remaps these properties to keep some kind of
compatibility with Unix behavior . If the ' primary group ' is required , it
can be accessed in the ` pgroup ` and ` pgid ` properties .
Args :
path ( str ) : The path to the file or directory
hash _ type ( str ) : The type of hash to return
follow _ symlinks ( bool ) :
If the object specified by ` ` path ` ` is a symlink , get attributes of
the linked file instead of the symlink itself . Default is True
Returns :
dict : A dictionary of file / directory stats
CLI Example :
. . code - block : : bash
salt ' * ' file . stats / etc / passwd''' | # This is to mirror the behavior of file . py . ` check _ file _ meta ` expects an
# empty dictionary when the file does not exist
if not os . path . exists ( path ) :
raise CommandExecutionError ( 'Path not found: {0}' . format ( path ) )
if follow_symlinks and sys . getwindowsversion ( ) . major >= 6 :
path = _resolve_symlink ( path )
pstat = os . stat ( path )
ret = { }
ret [ 'inode' ] = pstat . st_ino
# don ' t need to resolve symlinks again because we ' ve already done that
ret [ 'uid' ] = get_uid ( path , follow_symlinks = False )
# maintain the illusion that group is the same as user as states need this
ret [ 'gid' ] = ret [ 'uid' ]
ret [ 'user' ] = uid_to_user ( ret [ 'uid' ] )
ret [ 'group' ] = ret [ 'user' ]
ret [ 'pgid' ] = get_pgid ( path , follow_symlinks )
ret [ 'pgroup' ] = gid_to_group ( ret [ 'pgid' ] )
ret [ 'atime' ] = pstat . st_atime
ret [ 'mtime' ] = pstat . st_mtime
ret [ 'ctime' ] = pstat . st_ctime
ret [ 'size' ] = pstat . st_size
ret [ 'mode' ] = six . text_type ( oct ( stat . S_IMODE ( pstat . st_mode ) ) )
if hash_type :
ret [ 'sum' ] = get_sum ( path , hash_type )
ret [ 'type' ] = 'file'
if stat . S_ISDIR ( pstat . st_mode ) :
ret [ 'type' ] = 'dir'
if stat . S_ISCHR ( pstat . st_mode ) :
ret [ 'type' ] = 'char'
if stat . S_ISBLK ( pstat . st_mode ) :
ret [ 'type' ] = 'block'
if stat . S_ISREG ( pstat . st_mode ) :
ret [ 'type' ] = 'file'
if stat . S_ISLNK ( pstat . st_mode ) :
ret [ 'type' ] = 'link'
if stat . S_ISFIFO ( pstat . st_mode ) :
ret [ 'type' ] = 'pipe'
if stat . S_ISSOCK ( pstat . st_mode ) :
ret [ 'type' ] = 'socket'
ret [ 'target' ] = os . path . realpath ( path )
return ret |
def _add_arguments ( self ) :
"""Adds arguments to parser .""" | self . _parser . add_argument ( '-v' , '--version' , action = 'store_true' , help = "show program's version number and exit" )
self . _parser . add_argument ( '-a' , '--alias' , nargs = '?' , const = get_alias ( ) , help = '[custom-alias-name] prints alias for current shell' )
self . _parser . add_argument ( '-l' , '--shell-logger' , action = 'store' , help = 'log shell output to the file' )
self . _parser . add_argument ( '--enable-experimental-instant-mode' , action = 'store_true' , help = 'enable experimental instant mode, use on your own risk' )
self . _parser . add_argument ( '-h' , '--help' , action = 'store_true' , help = 'show this help message and exit' )
self . _add_conflicting_arguments ( )
self . _parser . add_argument ( '-d' , '--debug' , action = 'store_true' , help = 'enable debug output' )
self . _parser . add_argument ( '--force-command' , action = 'store' , help = SUPPRESS )
self . _parser . add_argument ( 'command' , nargs = '*' , help = 'command that should be fixed' ) |
def copy_files_to ( src_fpath_list , dst_dpath = None , dst_fpath_list = None , overwrite = False , verbose = True , veryverbose = False ) :
"""parallel copier
Example :
> > > # DISABLE _ DOCTEST
> > > from utool . util _ path import *
> > > import utool as ut
> > > overwrite = False
> > > veryverbose = False
> > > verbose = True
> > > src _ fpath _ list = [ ut . grab _ test _ imgpath ( key )
> > > for key in ut . get _ valid _ test _ imgkeys ( ) ]
> > > dst _ dpath = ut . get _ app _ resource _ dir ( ' utool ' , ' filecopy _ tests ' )
> > > copy _ files _ to ( src _ fpath _ list , dst _ dpath , overwrite = overwrite ,
> > > verbose = verbose )""" | from utool import util_list
from utool import util_parallel
if verbose :
print ( '[util_path] +--- COPYING FILES ---' )
print ( '[util_path] * len(src_fpath_list) = %r' % ( len ( src_fpath_list ) ) )
print ( '[util_path] * dst_dpath = %r' % ( dst_dpath , ) )
if dst_fpath_list is None :
ensuredir ( dst_dpath , verbose = veryverbose )
dst_fpath_list = [ join ( dst_dpath , basename ( fpath ) ) for fpath in src_fpath_list ]
else :
assert dst_dpath is None , 'dst_dpath was specified but overrided'
assert len ( dst_fpath_list ) == len ( src_fpath_list ) , 'bad correspondence'
exists_list = list ( map ( exists , dst_fpath_list ) )
if verbose :
print ( '[util_path] * %d files already exist dst_dpath' % ( sum ( exists_list ) , ) )
if not overwrite :
notexists_list = util_list . not_list ( exists_list )
dst_fpath_list_ = util_list . compress ( dst_fpath_list , notexists_list )
src_fpath_list_ = util_list . compress ( src_fpath_list , notexists_list )
else :
dst_fpath_list_ = dst_fpath_list
src_fpath_list_ = src_fpath_list
args_list = zip ( src_fpath_list_ , dst_fpath_list_ )
_gen = util_parallel . generate2 ( _copy_worker , args_list , ntasks = len ( src_fpath_list_ ) )
success_list = list ( _gen )
# success _ list = copy _ list ( src _ fpath _ list _ , dst _ fpath _ list _ )
if verbose :
print ( '[util_path] * Copied %d / %d' % ( sum ( success_list ) , len ( src_fpath_list ) ) )
print ( '[util_path] L___ DONE COPYING FILES ___' ) |
def credential_delete_simulate ( self , * ids ) :
"""Show the relationships and dependencies for one or more credentials .
: param ids : one or more credential ids""" | return self . raw_query ( "credential" , "deleteSimulate" , data = { "credentials" : [ { "id" : str ( id ) } for id in ids ] } ) |
def receive_pong ( self , pong : Pong ) :
"""Handles a Pong message .""" | message_id = ( 'ping' , pong . nonce , pong . sender )
async_result = self . messageids_to_asyncresults . get ( message_id )
if async_result is not None :
self . log_healthcheck . debug ( 'Pong received' , sender = pex ( pong . sender ) , message_id = pong . nonce , )
async_result . set ( True )
else :
self . log_healthcheck . warn ( 'Unknown pong received' , message_id = message_id , ) |
def get_all_comments ( chebi_ids ) :
'''Returns all comments''' | all_comments = [ get_comments ( chebi_id ) for chebi_id in chebi_ids ]
return [ x for sublist in all_comments for x in sublist ] |
def change_vlan_id ( self , original , new ) :
"""Change VLAN ID for a single VLAN , cluster VLAN or inline
interface . When changing a single or cluster FW vlan , you
can specify the original VLAN and new VLAN as either single
int or str value . If modifying an inline interface VLAN when
the interface pair has two different VLAN identifiers per
interface , use a str value in form : ' 10-11 ' ( original ) , and
'20-21 ' ( new ) .
Single VLAN id : :
> > > engine = Engine ( ' singlefw ' )
> > > itf = engine . interface . get ( 1)
> > > itf . vlan _ interfaces ( )
[ PhysicalVlanInterface ( vlan _ id = 11 ) , PhysicalVlanInterface ( vlan _ id = 10 ) ]
> > > itf . change _ vlan _ id ( 11 , 100)
> > > itf . vlan _ interfaces ( )
[ PhysicalVlanInterface ( vlan _ id = 100 ) , PhysicalVlanInterface ( vlan _ id = 10 ) ]
Inline interface with unique VLAN on each interface pair : :
> > > itf = engine . interface . get ( 2)
> > > itf . vlan _ interfaces ( )
[ PhysicalVlanInterface ( vlan _ id = 2-3 ) ]
> > > itf . change _ vlan _ id ( ' 2-3 ' , ' 20-30 ' )
> > > itf . vlan _ interfaces ( )
[ PhysicalVlanInterface ( vlan _ id = 20-30 ) ]
: param str , int original : original VLAN to change .
: param str , int new : new VLAN identifier / s .
: raises InterfaceNotFound : VLAN not found
: raises UpdateElementFailed : failed updating the VLAN id
: return : None""" | vlan = self . vlan_interface . get_vlan ( original )
newvlan = str ( new ) . split ( '-' )
splitted = vlan . interface_id . split ( '.' )
vlan . interface_id = '{}.{}' . format ( splitted [ 0 ] , newvlan [ 0 ] )
for interface in vlan . interfaces :
if isinstance ( interface , InlineInterface ) :
interface . change_vlan_id ( new )
else :
interface . change_vlan_id ( newvlan [ 0 ] )
self . update ( ) |
def _site_login ( self , repo ) :
"""Logs the user specified in the repo into the wiki .
: arg repo : an instance of config . RepositorySettings with wiki credentials .""" | try :
if not self . testmode :
self . site . login ( repo . wiki [ "user" ] , repo . wiki [ "password" ] )
except LoginError as e :
print ( e [ 1 ] [ 'result' ] )
self . basepage = repo . wiki [ "basepage" ] |
def markowitz_portfolio ( cov_mat , exp_rets , target_ret , allow_short = False , market_neutral = False ) :
"""Computes a Markowitz portfolio .
Parameters
cov _ mat : pandas . DataFrame
Covariance matrix of asset returns .
exp _ rets : pandas . Series
Expected asset returns ( often historical returns ) .
target _ ret : float
Target return of portfolio .
allow _ short : bool , optional
If ' False ' construct a long - only portfolio .
If ' True ' allow shorting , i . e . negative weights .
market _ neutral : bool , optional
If ' False ' sum of weights equals one .
If ' True ' sum of weights equal zero , i . e . create a
market neutral portfolio ( implies allow _ short = True ) .
Returns
weights : pandas . Series
Optimal asset weights .""" | if not isinstance ( cov_mat , pd . DataFrame ) :
raise ValueError ( "Covariance matrix is not a DataFrame" )
if not isinstance ( exp_rets , pd . Series ) :
raise ValueError ( "Expected returns is not a Series" )
if not isinstance ( target_ret , float ) :
raise ValueError ( "Target return is not a float" )
if not cov_mat . index . equals ( exp_rets . index ) :
raise ValueError ( "Indices do not match" )
if market_neutral and not allow_short :
warnings . warn ( "A market neutral portfolio implies shorting" )
allow_short = True
n = len ( cov_mat )
P = opt . matrix ( cov_mat . values )
q = opt . matrix ( 0.0 , ( n , 1 ) )
# Constraints Gx < = h
if not allow_short : # exp _ rets * x > = target _ ret and x > = 0
G = opt . matrix ( np . vstack ( ( - exp_rets . values , - np . identity ( n ) ) ) )
h = opt . matrix ( np . vstack ( ( - target_ret , + np . zeros ( ( n , 1 ) ) ) ) )
else : # exp _ rets * x > = target _ ret
G = opt . matrix ( - exp_rets . values ) . T
h = opt . matrix ( - target_ret )
# Constraints Ax = b
# sum ( x ) = 1
A = opt . matrix ( 1.0 , ( 1 , n ) )
if not market_neutral :
b = opt . matrix ( 1.0 )
else :
b = opt . matrix ( 0.0 )
# Solve
optsolvers . options [ 'show_progress' ] = False
sol = optsolvers . qp ( P , q , G , h , A , b )
if sol [ 'status' ] != 'optimal' :
warnings . warn ( "Convergence problem" )
# Put weights into a labeled series
weights = pd . Series ( sol [ 'x' ] , index = cov_mat . index )
return weights |
def _add_rhoa ( df , spacing ) :
"""a simple wrapper to compute K factors and add rhoa""" | df [ 'k' ] = redaK . compute_K_analytical ( df , spacing = spacing )
df [ 'rho_a' ] = df [ 'r' ] * df [ 'k' ]
if 'Zt' in df . columns :
df [ 'rho_a_complex' ] = df [ 'Zt' ] * df [ 'k' ]
return df |
def get_split_pos_lines ( data , cgi_input , header ) :
"""Advance across split alleles and return data from each .
CGI var file reports alleles separately for heterozygous sites :
all variant or reference information is called for the first allele ,
then for the second . This function moves forward in the file to
get lines for each ( and ends up with one remaineder line as well ) .""" | s1_data = [ data ]
s2_data = [ ]
next_data = cgi_input . readline ( ) . decode ( 'utf-8' ) . rstrip ( '\n' ) . split ( "\t" )
while next_data [ header [ 'allele' ] ] == "1" :
s1_data . append ( next_data )
next_data = cgi_input . readline ( ) . decode ( 'utf-8' ) . rstrip ( '\n' ) . split ( "\t" )
while next_data [ header [ 'allele' ] ] == "2" :
s2_data . append ( next_data )
next_data = cgi_input . readline ( ) . decode ( 'utf-8' ) . rstrip ( '\n' ) . split ( "\t" )
return s1_data , s2_data , next_data |
def merge_lists ( src , new ) :
"""Update a value list with a list of new or updated values .""" | l_min , l_max = ( src , new ) if len ( src ) < len ( new ) else ( new , src )
l_min . extend ( None for i in range ( len ( l_min ) , len ( l_max ) ) )
for i , val in enumerate ( new ) :
if isinstance ( val , dict ) and isinstance ( src [ i ] , dict ) :
new [ i ] = merge_dicts ( src [ i ] , val )
elif isinstance ( val , list ) and isinstance ( src [ i ] , list ) :
new [ i ] = merge_lists ( src [ i ] , val )
elif val is not None :
new [ i ] = val
else :
new [ i ] = src [ i ]
return new |
def _remove_advices ( target , advices , ctx ) :
"""Remove advices from input target .
: param advices : advices to remove . If None , remove all advices .""" | # if ctx is not None
if ctx is not None : # check if intercepted ctx is ctx
_ , intercepted_ctx = get_intercepted ( target )
if intercepted_ctx is None or intercepted_ctx is not ctx :
return
interception_fn = _get_function ( target )
target_advices = getattr ( interception_fn , _ADVICES , None )
if target_advices is not None :
if advices is None :
target_advices = [ ]
else :
target_advices = [ advice for advice in target_advices if advice not in advices ]
if target_advices : # update target advices
setattr ( interception_fn , _ADVICES , target_advices )
else : # free target advices if necessary
delattr ( interception_fn , _ADVICES )
_unapply_interception ( target , ctx = ctx ) |
def generic_html ( self , result , errors ) :
"""Try to display any object in sensible HTML .""" | h1 = htmlize ( type ( result ) )
out = [ ]
result = pre_process_json ( result )
if not hasattr ( result , 'items' ) : # result is a non - container
header = "<tr><th>Value</th></tr>"
if type ( result ) is list :
result = htmlize_list ( result )
else :
result = htmlize ( result )
out = [ "<tr><td>" + result + "</td></tr>" ]
elif hasattr ( result , 'lower' ) :
out = [ "<tr><td>" + result + "</td></tr>" ]
else : # object is a dict
header = "<tr><th>Key</th><th>Value</th></tr>"
for key , value in result . items ( ) :
v = htmlize ( value )
row = "<tr><td>{0}</td><td>{1}</td></tr>" . format ( key , v )
out . append ( row )
env = Environment ( loader = PackageLoader ( 'giotto' ) )
template = env . get_template ( 'generic.html' )
rendered = template . render ( { 'header' : h1 , 'table_header' : header , 'table_body' : out } )
return { 'body' : rendered , 'mimetype' : 'text/html' } |
def get_doctypes ( self , default_doctypes = DEFAULT_DOCTYPES ) :
"""Returns the list of doctypes to use .""" | for action , value in reversed ( self . steps ) :
if action == 'doctypes' :
return list ( value )
if self . type is not None :
return [ self . type . get_mapping_type_name ( ) ]
return default_doctypes |
def triples_to_graph ( self , triples , top = None ) :
"""Create a Graph from * triples * considering codec configuration .
The Graph class does not know about information in the codec ,
so if Graph instantiation depends on special ` TYPE _ REL ` or
` TOP _ VAR ` values , use this function instead of instantiating
a Graph object directly . This is also where edge
normalization ( de - inversion ) and value type conversion occur
( via handle _ triple ( ) ) .
Args :
triples : an iterable of ( lhs , relation , rhs ) triples
top : node identifier of the top node
Returns :
a Graph object""" | inferred_top = triples [ 0 ] [ 0 ] if triples else None
ts = [ ]
for triple in triples :
if triple [ 0 ] == self . TOP_VAR and triple [ 1 ] == self . TOP_REL :
inferred_top = triple [ 2 ]
else :
ts . append ( self . handle_triple ( * triple ) )
top = self . handle_triple ( self . TOP_VAR , self . TOP_REL , top ) . target
return Graph ( ts , top = top or inferred_top ) |
def get_device_topology ( self , id_or_uri ) :
"""Retrieves the topology information for the rack resource specified by ID or URI .
Args :
id _ or _ uri : Can be either the resource ID or the resource URI .
Return :
dict : Device topology .""" | uri = self . _client . build_uri ( id_or_uri ) + "/deviceTopology"
return self . _client . get ( uri ) |
def get_json ( self , instance = True ) :
'''get _ json
High - level api : get _ json returns json _ val of the config node .
Parameters
instance : ` bool `
True if only one instance of list or leaf - list is required . False if
all instances of list or leaf - list are needed .
Returns
str
A string in JSON format .''' | def get_json_instance ( node ) :
pk = Parker ( xml_fromstring = _fromstring , dict_type = OrderedDict )
default_ns = { }
nodes = [ node ] + node . findall ( './/' )
for item in nodes :
parents = [ p for p in node . findall ( './/{}/..' . format ( item . tag ) ) if item in p . findall ( '*' ) ]
if parents and id ( parents [ 0 ] ) in default_ns :
default_url = default_ns [ id ( parents [ 0 ] ) ]
ns , tag = self . device . convert_tag ( default_url , item . tag , dst = Tag . JSON_NAME )
else :
ns , tag = self . device . convert_tag ( '' , item . tag , dst = Tag . JSON_NAME )
default_ns [ id ( item ) ] = ns
item . tag = tag
return pk . data ( node , preserve_root = True )
def convert_node ( node ) : # lxml . etree does not allow tag name like oc - if : enable
# so it is converted to xml . etree . ElementTree
string = etree . tostring ( node , encoding = 'unicode' , pretty_print = False )
return ElementTree . fromstring ( string )
if instance :
return json . dumps ( get_json_instance ( convert_node ( self . node ) ) )
else :
nodes = [ n for n in self . node . getparent ( ) . iterchildren ( tag = self . node . tag ) ]
if len ( nodes ) > 1 :
return json . dumps ( [ get_json_instance ( convert_node ( n ) ) for n in nodes ] )
else :
return json . dumps ( get_json_instance ( convert_node ( nodes [ 0 ] ) ) ) |
def _extract ( self , raw : str , station : str = None ) -> str :
"""Extracts the raw _ report element from XML response""" | resp = parsexml ( raw )
try :
report = resp [ 'response' ] [ 'data' ] [ self . rtype . upper ( ) ]
except KeyError :
raise self . make_err ( raw )
# Find report string
if isinstance ( report , dict ) :
report = report [ 'raw_text' ]
elif isinstance ( report , list ) and report :
report = report [ 0 ] [ 'raw_text' ]
else :
raise self . make_err ( raw , '"raw_text"' )
# Remove excess leading and trailing data
for item in ( self . rtype . upper ( ) , 'SPECI' ) :
if report . startswith ( item + ' ' ) :
report = report [ len ( item ) + 1 : ]
return report |
def get ( self , sid ) :
"""Constructs a DeploymentContext
: param sid : A string that uniquely identifies the Deployment .
: returns : twilio . rest . preview . deployed _ devices . fleet . deployment . DeploymentContext
: rtype : twilio . rest . preview . deployed _ devices . fleet . deployment . DeploymentContext""" | return DeploymentContext ( self . _version , fleet_sid = self . _solution [ 'fleet_sid' ] , sid = sid , ) |
def _prepare_connection ( ** kwargs ) :
'''Prepare the connection with the remote network device , and clean up the key
value pairs , removing the args used for the connection init .''' | pyeapi_kwargs = __salt__ [ 'config.get' ] ( 'pyeapi' , { } )
pyeapi_kwargs . update ( kwargs )
# merge the CLI args with the opts / pillar
init_kwargs , fun_kwargs = __utils__ [ 'args.prepare_kwargs' ] ( pyeapi_kwargs , PYEAPI_INIT_KWARGS )
if 'transport' not in init_kwargs :
init_kwargs [ 'transport' ] = 'https'
conn = pyeapi . client . connect ( ** init_kwargs )
node = pyeapi . client . Node ( conn , enablepwd = init_kwargs . get ( 'enablepwd' ) )
return node , fun_kwargs |
def tar_to_bigfile ( self , fname , outfile ) :
"""Convert tar of multiple FASTAs to one file .""" | fnames = [ ]
tmpdir = mkdtemp ( )
# Extract files to temporary directory
with tarfile . open ( fname ) as tar :
tar . extractall ( path = tmpdir )
for root , _ , files in os . walk ( tmpdir ) :
fnames += [ os . path . join ( root , fname ) for fname in files ]
# Concatenate
with open ( outfile , "w" ) as out :
for infile in fnames :
for line in open ( infile ) :
out . write ( line )
os . unlink ( infile )
# Remove temp dir
shutil . rmtree ( tmpdir ) |
def get_container_name ( self , repo : str , branch : str , git_repo : Repo ) :
"""Returns the name of the container used for the repo .""" | return "arca_{}_{}_{}" . format ( self . _arca . repo_id ( repo ) , branch , self . _arca . current_git_hash ( repo , branch , git_repo , short = True ) ) |
def modify_key_parity ( key ) :
"""The prior use of the function is to return the parity - validated key .
The incoming key is expected to be hex data binary representation , e . g . b ' E7A3C8B1'""" | validated_key = b''
for byte in key :
if parityOf ( int ( byte ) ) == - 1 :
byte_candidate = int ( byte ) + 1
while parityOf ( byte_candidate ) == - 1 :
byte_candidate = divmod ( byte_candidate + 1 , 256 ) [ 1 ]
validated_key += bytes ( [ byte_candidate ] )
else :
validated_key += bytes ( [ byte ] )
return validated_key |
def track_from_url ( url , timeout = DEFAULT_ASYNC_TIMEOUT ) :
"""Create a track object from a public http URL .
NOTE : Does not create the detailed analysis for the Track . Call
Track . get _ analysis ( ) for that .
Args :
url : A string giving the URL to read from . This must be on a public machine accessible by HTTP .
Example :
> > > t = track . track _ from _ url ( " http : / / www . miaowmusic . com / mp3 / Miaow - 01 - Tempered - song . mp3 " )
< Track >""" | param_dict = dict ( url = url )
return _upload ( param_dict , timeout , data = None ) |
def create ( cls , object_type = None , object_uuid = None , ** kwargs ) :
"""Create a new record identifier .
: param object _ type : The object type . ( Default : ` ` None ` ` )
: param object _ uuid : The object UUID . ( Default : ` ` None ` ` )""" | assert 'pid_value' in kwargs
kwargs . setdefault ( 'status' , cls . default_status )
if object_type and object_uuid :
kwargs [ 'status' ] = PIDStatus . REGISTERED
return super ( OAIIDProvider , cls ) . create ( object_type = object_type , object_uuid = object_uuid , ** kwargs ) |
def set_hostname ( hostname ) :
'''Set the hostname of the windows minion , requires a restart before this will
be updated .
. . versionadded : : 2016.3.0
Args :
hostname ( str ) : The hostname to set
Returns :
bool : ` ` True ` ` if successful , otherwise ` ` False ` `
CLI Example :
. . code - block : : bash
salt ' minion - id ' system . set _ hostname newhostname''' | with salt . utils . winapi . Com ( ) :
conn = wmi . WMI ( )
comp = conn . Win32_ComputerSystem ( ) [ 0 ]
return comp . Rename ( Name = hostname ) |
def filter ( self , func ) :
"""Create a Catalog of a subset of entries based on a condition
Note that , whatever specific class this is performed on , the return
instance is a Catalog . The entries are passed unmodified , so they
will still reference the original catalog instance and include its
details such as directory , .
Parameters
func : function
This should take a CatalogEntry and return True or False . Those
items returning True will be included in the new Catalog , with the
same entry names
Returns
New Catalog""" | return Catalog . from_dict ( { key : entry for key , entry in self . items ( ) if func ( entry ) } ) |
def to_tess ( obj ) :
'''to _ tess ( obj ) yields a Tesselation object that is equivalent to obj ; if obj is a tesselation
object already and no changes are requested ( see options ) then obj is returned unmolested .
The following objects can be converted into tesselations :
* a tesselation object
* a mesh or topology object ( yields their tess objects )
* a 3 x n or n x 3 matrix of integers ( the faces )
* a tuple of coordinates and faces that can be passed to to _ mesh''' | if is_tess ( obj ) :
return obj
elif is_mesh ( obj ) :
return obj . tess
elif is_topo ( obj ) :
return obj . tess
else : # couple things to try : ( 1 ) might specify a tess face matrix , ( 2 ) might be a mesh - like obj
try :
return tess ( obj )
except Exception :
pass
try :
return to_mesh ( obj ) . tess
except Exception :
pass
raise ValueError ( 'Could not convert argument to tesselation object' ) |
def _encode_ndef_uri_type ( self , data ) :
"""Implement NDEF URI Identifier Code .
This is a small hack to replace some well known prefixes ( such as http : / / )
with a one byte code . If the prefix is not known , 0x00 is used .""" | t = 0x0
for ( code , prefix ) in uri_identifiers :
if data [ : len ( prefix ) ] . decode ( 'latin-1' ) . lower ( ) == prefix :
t = code
data = data [ len ( prefix ) : ]
break
data = yubico_util . chr_byte ( t ) + data
return data |
def organization_membership_delete ( self , id , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / core / organization _ memberships # delete - membership" | api_path = "/api/v2/organization_memberships/{id}.json"
api_path = api_path . format ( id = id )
return self . call ( api_path , method = "DELETE" , ** kwargs ) |
def remove_na ( x , y = None , paired = False , axis = 'rows' ) :
"""Remove missing values along a given axis in one or more ( paired ) numpy
arrays .
Parameters
x , y : 1D or 2D arrays
Data . ` ` x ` ` and ` ` y ` ` must have the same number of dimensions .
` ` y ` ` can be None to only remove missing values in ` ` x ` ` .
paired : bool
Indicates if the measurements are paired or not .
axis : str
Axis or axes along which missing values are removed .
Can be ' rows ' or ' columns ' . This has no effect if ` ` x ` ` and ` ` y ` ` are
one - dimensional arrays .
Returns
x , y : np . ndarray
Data without missing values
Examples
Single 1D array
> > > import numpy as np
> > > from pingouin import remove _ na
> > > x = [ 6.4 , 3.2 , 4.5 , np . nan ]
> > > remove _ na ( x )
array ( [ 6.4 , 3.2 , 4.5 ] )
With two paired 1D arrays
> > > y = [ 2.3 , np . nan , 5.2 , 4.6]
> > > remove _ na ( x , y , paired = True )
( array ( [ 6.4 , 4.5 ] ) , array ( [ 2.3 , 5.2 ] ) )
With two independent 2D arrays
> > > x = np . array ( [ [ 4 , 2 ] , [ 4 , np . nan ] , [ 7 , 6 ] ] )
> > > y = np . array ( [ [ 6 , np . nan ] , [ 3 , 2 ] , [ 2 , 2 ] ] )
> > > x _ no _ nan , y _ no _ nan = remove _ na ( x , y , paired = False )""" | # Safety checks
x = np . asarray ( x )
assert x . size > 1 , 'x must have more than one element.'
assert axis in [ 'rows' , 'columns' ] , 'axis must be rows or columns.'
if y is None :
return _remove_na_single ( x , axis = axis )
elif isinstance ( y , ( int , float , str ) ) :
return _remove_na_single ( x , axis = axis ) , y
elif isinstance ( y , ( list , np . ndarray ) ) :
y = np . asarray ( y )
# Make sure that we just pass - through if y have only 1 element
if y . size == 1 :
return _remove_na_single ( x , axis = axis ) , y
if x . ndim != y . ndim or paired is False : # x and y do not have the same dimension
x_no_nan = _remove_na_single ( x , axis = axis )
y_no_nan = _remove_na_single ( y , axis = axis )
return x_no_nan , y_no_nan
# At this point , we assume that x and y are paired and have same dimensions
if x . ndim == 1 : # 1D arrays
x_mask = ~ np . isnan ( x )
y_mask = ~ np . isnan ( y )
else : # 2D arrays
ax = 1 if axis == 'rows' else 0
x_mask = ~ np . any ( np . isnan ( x ) , axis = ax )
y_mask = ~ np . any ( np . isnan ( y ) , axis = ax )
# Check if missing values are present
if ~ x_mask . all ( ) or ~ y_mask . all ( ) :
ax = 0 if axis == 'rows' else 1
ax = 0 if x . ndim == 1 else ax
both = np . logical_and ( x_mask , y_mask )
x = x . compress ( both , axis = ax )
y = y . compress ( both , axis = ax )
return x , y |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.