signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def queue_raw_jobs ( queue , params_list , ** kwargs ) :
"""Queue some jobs on a raw queue""" | from . queue import Queue
queue_obj = Queue ( queue )
queue_obj . enqueue_raw_jobs ( params_list , ** kwargs ) |
def get_slab ( self , shift = 0 , tol = 0.1 , energy = None ) :
"""This method takes in shift value for the c lattice direction and
generates a slab based on the given shift . You should rarely use this
method . Instead , it is used by other generation algorithms to obtain
all slabs .
Arg :
shift ( float ) : A shift value in Angstrom that determines how much a
slab should be shifted .
tol ( float ) : Tolerance to determine primitive cell .
energy ( float ) : An energy to assign to the slab .
Returns :
( Slab ) A Slab object with a particular shifted oriented unit cell .""" | h = self . _proj_height
p = h / self . parent . lattice . d_hkl ( self . miller_index )
if self . in_unit_planes :
nlayers_slab = int ( math . ceil ( self . min_slab_size / p ) )
nlayers_vac = int ( math . ceil ( self . min_vac_size / p ) )
else :
nlayers_slab = int ( math . ceil ( self . min_slab_size / h ) )
nlayers_vac = int ( math . ceil ( self . min_vac_size / h ) )
nlayers = nlayers_slab + nlayers_vac
species = self . oriented_unit_cell . species_and_occu
props = self . oriented_unit_cell . site_properties
props = { k : v * nlayers_slab for k , v in props . items ( ) }
frac_coords = self . oriented_unit_cell . frac_coords
frac_coords = np . array ( frac_coords ) + np . array ( [ 0 , 0 , - shift ] ) [ None , : ]
frac_coords -= np . floor ( frac_coords )
a , b , c = self . oriented_unit_cell . lattice . matrix
new_lattice = [ a , b , nlayers * c ]
frac_coords [ : , 2 ] = frac_coords [ : , 2 ] / nlayers
all_coords = [ ]
for i in range ( nlayers_slab ) :
fcoords = frac_coords . copy ( )
fcoords [ : , 2 ] += i / nlayers
all_coords . extend ( fcoords )
slab = Structure ( new_lattice , species * nlayers_slab , all_coords , site_properties = props )
scale_factor = self . slab_scale_factor
# Whether or not to orthogonalize the structure
if self . lll_reduce :
lll_slab = slab . copy ( sanitize = True )
mapping = lll_slab . lattice . find_mapping ( slab . lattice )
scale_factor = np . dot ( mapping [ 2 ] , scale_factor )
slab = lll_slab
# Whether or not to center the slab layer around the vacuum
if self . center_slab :
avg_c = np . average ( [ c [ 2 ] for c in slab . frac_coords ] )
slab . translate_sites ( list ( range ( len ( slab ) ) ) , [ 0 , 0 , 0.5 - avg_c ] )
if self . primitive :
prim = slab . get_primitive_structure ( tolerance = tol )
if energy is not None :
energy = prim . volume / slab . volume * energy
slab = prim
# Reorient the lattice to get the correct reduced cell
ouc = self . oriented_unit_cell . copy ( )
if self . primitive : # find a reduced ouc
slab_l = slab . lattice
ouc = ouc . get_primitive_structure ( constrain_latt = { "a" : slab_l . a , "b" : slab_l . b , "alpha" : slab_l . alpha , "beta" : slab_l . beta , "gamma" : slab_l . gamma } )
return Slab ( slab . lattice , slab . species_and_occu , slab . frac_coords , self . miller_index , ouc , shift , scale_factor , energy = energy , site_properties = slab . site_properties , reorient_lattice = self . reorient_lattice ) |
def on_select_fit ( self , event ) :
"""Picks out the fit selected in the fit combobox and sets it to the
current fit of the GUI then calls the select function of the fit to
set the GUI ' s bounds boxes and alter other such parameters
Parameters
event : the wx . ComboBoxEvent that triggers this function
Alters
current _ fit , fit _ box selection , tmin _ box selection , tmax _ box
selection""" | fit_val = self . fit_box . GetValue ( )
if self . s not in self . pmag_results_data [ 'specimens' ] or not self . pmag_results_data [ 'specimens' ] [ self . s ] or fit_val == 'None' :
self . clear_boxes ( )
self . current_fit = None
self . fit_box . SetStringSelection ( 'None' )
self . tmin_box . SetStringSelection ( '' )
self . tmax_box . SetStringSelection ( '' )
else :
try :
fit_num = list ( map ( lambda x : x . name , self . pmag_results_data [ 'specimens' ] [ self . s ] ) ) . index ( fit_val )
except ValueError :
fit_num = - 1
self . pmag_results_data [ 'specimens' ] [ self . s ] [ fit_num ] . select ( )
if self . ie_open :
self . ie . change_selected ( self . current_fit ) |
def list_comments ( self , page = 0 ) :
"""Get the comments of current object
: param page : the page starting at 0
: return : the emails
: rtype : list""" | from highton . models . comment import Comment
params = { 'page' : int ( page ) * self . COMMENT_OFFSET }
return fields . ListField ( name = self . ENDPOINT , init_class = Comment ) . decode ( self . element_from_string ( self . _get_request ( endpoint = self . ENDPOINT + '/' + str ( self . id ) + '/' + Comment . ENDPOINT , params = params ) . text ) ) |
def _choose_package_version ( self ) : # type : ( ) - > Union [ str , None ]
"""Tries to select a version of a required package .
Returns the name of the package whose incompatibilities should be
propagated by _ propagate ( ) , or None indicating that version solving is
complete and a solution has been found .""" | unsatisfied = self . _solution . unsatisfied
if not unsatisfied :
return
# Prefer packages with as few remaining versions as possible ,
# so that if a conflict is necessary it ' s forced quickly .
def _get_min ( dependency ) :
if dependency . name in self . _use_latest : # If we ' re forced to use the latest version of a package , it effectively
# only has one version to choose from .
return 1
if dependency . name in self . _locked :
return 1
try :
return len ( self . _provider . search_for ( dependency ) )
except ValueError :
return 0
if len ( unsatisfied ) == 1 :
dependency = unsatisfied [ 0 ]
else :
dependency = min ( * unsatisfied , key = _get_min )
locked = self . _get_locked ( dependency . name )
if locked is None or not dependency . constraint . allows ( locked . version ) :
try :
packages = self . _provider . search_for ( dependency )
except ValueError as e :
self . _add_incompatibility ( Incompatibility ( [ Term ( dependency , True ) ] , PackageNotFoundCause ( e ) ) )
return dependency . name
try :
version = packages [ 0 ]
except IndexError :
version = None
else :
version = locked
if version is None : # If there are no versions that satisfy the constraint ,
# add an incompatibility that indicates that .
self . _add_incompatibility ( Incompatibility ( [ Term ( dependency , True ) ] , NoVersionsCause ( ) ) )
return dependency . name
version = self . _provider . complete_package ( version )
conflict = False
for incompatibility in self . _provider . incompatibilities_for ( version ) :
self . _add_incompatibility ( incompatibility )
# If an incompatibility is already satisfied , then selecting version
# would cause a conflict .
# We ' ll continue adding its dependencies , then go back to
# unit propagation which will guide us to choose a better version .
conflict = conflict or all ( [ term . dependency . name == dependency . name or self . _solution . satisfies ( term ) for term in incompatibility . terms ] )
if not conflict :
self . _solution . decide ( version )
self . _log ( "selecting {} ({})" . format ( version . name , version . full_pretty_version ) )
return dependency . name |
def serialize ( self ) :
"""Convert Document to python dictionary .""" | # Serialize fields to a dict
elements = [ ]
for element in self . elements :
elements . append ( element . serialize ( ) )
data = { 'type' : 'document' , 'elements' : elements }
return data |
def floating_ip_disassociate ( self , server_name , floating_ip ) :
'''Disassociate a floating IP from server
. . versionadded : : 2016.3.0''' | nt_ks = self . compute_conn
server_ = self . server_by_name ( server_name )
server = nt_ks . servers . get ( server_ . __dict__ [ 'id' ] )
server . remove_floating_ip ( floating_ip )
return self . floating_ip_list ( ) [ floating_ip ] |
def calculate_dc_coefficients ( contour ) :
"""Calculate the : math : ` A _ 0 ` and : math : ` C _ 0 ` coefficients of the elliptic Fourier series .
: param numpy . ndarray contour : A contour array of size ` ` [ M x 2 ] ` ` .
: return : The : math : ` A _ 0 ` and : math : ` C _ 0 ` coefficients .
: rtype : tuple""" | dxy = np . diff ( contour , axis = 0 )
dt = np . sqrt ( ( dxy ** 2 ) . sum ( axis = 1 ) )
t = np . concatenate ( [ ( [ 0. , ] ) , np . cumsum ( dt ) ] )
T = t [ - 1 ]
xi = np . cumsum ( dxy [ : , 0 ] ) - ( dxy [ : , 0 ] / dt ) * t [ 1 : ]
A0 = ( 1 / T ) * np . sum ( ( ( dxy [ : , 0 ] / ( 2 * dt ) ) * np . diff ( t ** 2 ) ) + xi * dt )
delta = np . cumsum ( dxy [ : , 1 ] ) - ( dxy [ : , 1 ] / dt ) * t [ 1 : ]
C0 = ( 1 / T ) * np . sum ( ( ( dxy [ : , 1 ] / ( 2 * dt ) ) * np . diff ( t ** 2 ) ) + delta * dt )
# A0 and CO relate to the first point of the contour array as origin .
# Adding those values to the coefficients to make them relate to true origin .
return contour [ 0 , 0 ] + A0 , contour [ 0 , 1 ] + C0 |
def get_logger ( name ) :
"""Get a logger with the specified name .""" | logger = logging . getLogger ( name )
logger . setLevel ( getenv ( 'LOGLEVEL' , 'INFO' ) )
return logger |
def __initialize_ui ( self ) :
"""Initializes the Widget ui .""" | LOGGER . debug ( "> Initializing Application toolBar!" )
self . setIconSize ( QSize ( UiConstants . default_toolbar_icon_size , UiConstants . default_toolbar_icon_size ) )
self . setAllowedAreas ( Qt . TopToolBarArea )
self . setFloatable ( False )
self . setMovable ( False )
self . set_layout_default_geometry ( )
self . setObjectName ( "toolBar" )
self . setWindowTitle ( "{0} - toolBar" . format ( Constants . application_name ) )
self . set_toolbar_children_widgets ( )
# Signals / Slots .
self . __container . layouts_manager . layout_stored . connect ( self . __layouts_manager__layout_stored )
self . __container . layouts_manager . layout_restored . connect ( self . __layouts_manager__layout_restored ) |
def format_full_name ( first_name : str , last_name : str , max_length : int = 20 ) :
"""Limits name length to specified length . Tries to keep name as human - readable an natural as possible .
: param first _ name : First name
: param last _ name : Last name
: param max _ length : Maximum length
: return : Full name of shortened version depending on length""" | # dont allow commas in limited names
first_name = first_name . replace ( ',' , ' ' )
last_name = last_name . replace ( ',' , ' ' )
# accept short full names as is
original_full_name = first_name + ' ' + last_name
if len ( original_full_name ) <= max_length :
return original_full_name
# drop middle names
first_name = first_name . split ( ' ' ) [ 0 ]
full_name = first_name + ' ' + last_name
if len ( full_name ) <= max_length :
return full_name
# drop latter parts of combined first names
first_name = re . split ( r'[\s\-]' , first_name ) [ 0 ]
full_name = first_name + ' ' + last_name
if len ( full_name ) <= max_length :
return full_name
# drop latter parts of multi part last names
last_name = re . split ( r'[\s\-]' , last_name ) [ 0 ]
full_name = first_name + ' ' + last_name
if len ( full_name ) <= max_length :
return full_name
# shorten last name to one letter
last_name = last_name [ : 1 ]
full_name = first_name + ' ' + last_name
if len ( full_name ) > max_length :
raise Exception ( 'Failed to shorten name {}' . format ( original_full_name ) )
return full_name |
def all ( self , res ) :
"Get resources using a filter condition" | B = get_backend ( )
return B . get_objects ( B . get_concrete ( res ) ) |
def _get_params ( mapper_spec , allowed_keys = None , allow_old = True ) :
"""Obtain input reader parameters .
Utility function for input readers implementation . Fetches parameters
from mapreduce specification giving appropriate usage warnings .
Args :
mapper _ spec : The MapperSpec for the job
allowed _ keys : set of all allowed keys in parameters as strings . If it is not
None , then parameters are expected to be in a separate " input _ reader "
subdictionary of mapper _ spec parameters .
allow _ old : Allow parameters to exist outside of the input _ reader
subdictionary for compatability .
Returns :
mapper parameters as dict
Raises :
BadReaderParamsError : if parameters are invalid / missing or not allowed .""" | if "input_reader" not in mapper_spec . params :
message = ( "Input reader's parameters should be specified in " "input_reader subdictionary." )
if not allow_old or allowed_keys :
raise errors . BadReaderParamsError ( message )
params = mapper_spec . params
params = dict ( ( str ( n ) , v ) for n , v in params . iteritems ( ) )
else :
if not isinstance ( mapper_spec . params . get ( "input_reader" ) , dict ) :
raise errors . BadReaderParamsError ( "Input reader parameters should be a dictionary" )
params = mapper_spec . params . get ( "input_reader" )
params = dict ( ( str ( n ) , v ) for n , v in params . iteritems ( ) )
if allowed_keys :
params_diff = set ( params . keys ( ) ) - allowed_keys
if params_diff :
raise errors . BadReaderParamsError ( "Invalid input_reader parameters: %s" % "," . join ( params_diff ) )
return params |
def from_ff_and_topologies ( cls , box , ff , topologies , atom_style = "full" ) :
"""Constructor building LammpsData from a ForceField object and a
list of Topology objects . Do not support intermolecular
topologies since a Topology object includes data for ONE
molecule or structure only .
Args :
box ( LammpsBox ) : Simulation box .
ff ( ForceField ) : ForceField object with data for Masses and
force field sections .
topologies ( [ Topology ] ) : List of Topology objects with data
for Atoms , Velocities and topology sections .
atom _ style ( str ) : Output atom _ style . Default to " full " .""" | atom_types = set . union ( * [ t . species for t in topologies ] )
assert atom_types . issubset ( ff . maps [ "Atoms" ] . keys ( ) ) , "Unknown atom type found in topologies"
items = dict ( box = box , atom_style = atom_style , masses = ff . masses , force_field = ff . force_field )
mol_ids , charges , coords , labels = [ ] , [ ] , [ ] , [ ]
v_collector = [ ] if topologies [ 0 ] . velocities else None
topo_collector = { "Bonds" : [ ] , "Angles" : [ ] , "Dihedrals" : [ ] , "Impropers" : [ ] }
topo_labels = { "Bonds" : [ ] , "Angles" : [ ] , "Dihedrals" : [ ] , "Impropers" : [ ] }
for i , topo in enumerate ( topologies ) :
if topo . topologies :
shift = len ( labels )
for k , v in topo . topologies . items ( ) :
topo_collector [ k ] . append ( np . array ( v ) + shift + 1 )
topo_labels [ k ] . extend ( [ tuple ( [ topo . type_by_sites [ j ] for j in t ] ) for t in v ] )
if isinstance ( v_collector , list ) :
v_collector . append ( topo . velocities )
mol_ids . extend ( [ i + 1 ] * len ( topo . sites ) )
labels . extend ( topo . type_by_sites )
coords . append ( topo . sites . cart_coords )
q = [ 0.0 ] * len ( topo . sites ) if not topo . charges else topo . charges
charges . extend ( q )
atoms = pd . DataFrame ( np . concatenate ( coords ) , columns = [ "x" , "y" , "z" ] )
atoms [ "molecule-ID" ] = mol_ids
atoms [ "q" ] = charges
atoms [ "type" ] = list ( map ( ff . maps [ "Atoms" ] . get , labels ) )
atoms . index += 1
atoms = atoms [ ATOMS_HEADERS [ atom_style ] ]
velocities = None
if v_collector :
velocities = pd . DataFrame ( np . concatenate ( v_collector ) , columns = SECTION_HEADERS [ "Velocities" ] )
velocities . index += 1
topology = { k : None for k , v in topo_labels . items ( ) if len ( v ) > 0 }
for k in topology :
df = pd . DataFrame ( np . concatenate ( topo_collector [ k ] ) , columns = SECTION_HEADERS [ k ] [ 1 : ] )
df [ "type" ] = list ( map ( ff . maps [ k ] . get , topo_labels [ k ] ) )
if any ( pd . isnull ( df [ "type" ] ) ) : # Throw away undefined topologies
warnings . warn ( "Undefined %s detected and removed" % k . lower ( ) )
df . dropna ( subset = [ "type" ] , inplace = True )
df . reset_index ( drop = True , inplace = True )
df . index += 1
topology [ k ] = df [ SECTION_HEADERS [ k ] ]
topology = { k : v for k , v in topology . items ( ) if not v . empty }
items . update ( { "atoms" : atoms , "velocities" : velocities , "topology" : topology } )
return cls ( ** items ) |
def read ( filename ) :
"""Read a file relative to setup . py location .""" | import os
here = os . path . dirname ( os . path . abspath ( __file__ ) )
with open ( os . path . join ( here , filename ) ) as fd :
return fd . read ( ) |
def plot_wigner2d ( iradon_output , bin_centres , cmap = _cm . cubehelix_r , figsize = ( 6 , 6 ) ) :
"""Plots the wigner space representation as a 2D heatmap .
Parameters
iradon _ output : ndarray
2d array of size ( histbins x histbins )
bin _ centres : ndarray
positions of the bin centres
cmap : matplotlib . cm . cmap , optional ( default = cm . cubehelix _ r )
color map to use for Wigner
figsize : tuple , optional ( default = ( 6 , 6 ) )
tuple defining size of figure created
Returns
fig : matplotlib . figure . Figure object
figure showing the wigner function
ax : matplotlib . axes . Axes object
axes containing the object""" | xx , yy = _np . meshgrid ( bin_centres , bin_centres )
resid1 = iradon_output . sum ( axis = 0 )
resid2 = iradon_output . sum ( axis = 1 )
wigner_marginal_seperation = 0.001
left , width = 0.2 , 0.65 - 0.1
# left = left side of hexbin and hist _ x
bottom , height = 0.1 , 0.65 - 0.1
# bottom = bottom of hexbin and hist _ y
bottom_h = height + bottom + wigner_marginal_seperation
left_h = width + left + wigner_marginal_seperation
cbar_pos = [ 0.03 , bottom , 0.05 , 0.02 + width ]
rect_wigner = [ left , bottom , width , height ]
rect_histx = [ left , bottom_h , width , 0.2 ]
rect_histy = [ left_h , bottom , 0.2 , height ]
# start with a rectangular Figure
fig = _plt . figure ( figsize = figsize )
axWigner = _plt . axes ( rect_wigner )
axHistx = _plt . axes ( rect_histx )
axHisty = _plt . axes ( rect_histy )
pcol = axWigner . pcolor ( xx , yy , iradon_output , cmap = cmap )
binwidth = bin_centres [ 1 ] - bin_centres [ 0 ]
axHistx . bar ( bin_centres , resid2 , binwidth )
axHisty . barh ( bin_centres , resid1 , binwidth )
_plt . setp ( axHistx . get_xticklabels ( ) , visible = False )
# sets x ticks to be invisible while keeping gridlines
_plt . setp ( axHisty . get_yticklabels ( ) , visible = False )
# sets x ticks to be invisible while keeping gridlines
for tick in axHisty . get_xticklabels ( ) :
tick . set_rotation ( - 90 )
cbaraxes = fig . add_axes ( cbar_pos )
# This is the position for the colorbar
# cbar = _ plt . colorbar ( axp , cax = cbaraxes )
cbar = fig . colorbar ( pcol , cax = cbaraxes , drawedges = False )
# , orientation = " horizontal "
cbar . solids . set_edgecolor ( "face" )
cbar . solids . set_rasterized ( True )
cbar . ax . set_yticklabels ( cbar . ax . yaxis . get_ticklabels ( ) , y = 0 , rotation = 45 )
# cbar . set _ label ( cbarlabel , labelpad = - 25 , y = 1.05 , rotation = 0)
plotlimits = _np . max ( _np . abs ( bin_centres ) )
axWigner . axis ( ( - plotlimits , plotlimits , - plotlimits , plotlimits ) )
axHistx . set_xlim ( axWigner . get_xlim ( ) )
axHisty . set_ylim ( axWigner . get_ylim ( ) )
return fig , axWigner , axHistx , axHisty , cbar |
def getMusicAlbumList ( self , tagtype = 0 , startnum = 0 , pagingrow = 100 ) :
"""GetMusicAlbumList
Args :
tagtype = ? ? ?
startnum
pagingrow
Returns :
False : Failed to get property""" | url = nurls [ 'setProperty' ]
data = { 'userid' : self . user_id , 'useridx' : self . useridx , 'tagtype' : tagtype , 'startnum' : startnum , 'pagingrow' : pagingrow , }
r = self . session . post ( url = url , data = data )
return resultManager ( r . text ) |
def get_version ( module ) :
"""Return package version as listed in ` _ _ version _ _ ` .""" | init_py = open ( '{0}.py' . format ( module ) ) . read ( )
return re . search ( "__version__ = ['\"]([^'\"]+)['\"]" , init_py ) . group ( 1 ) |
def AddPort ( self , protocol , port , port_to = None ) :
"""Add and commit a single port .
# Add single port
> > > clc . v2 . Server ( " WA1BTDIX01 " ) . PublicIPs ( ) . public _ ips [ 0 ] . AddPort ( protocol = ' TCP ' , port = ' 22 ' ) . WaitUntilComplete ( )
# Add port range
> > > clc . v2 . Server ( " WA1BTDIX01 " ) . PublicIPs ( ) . public _ ips [ 0 ] . AddPort ( protocol = ' UDP ' , port = ' 10000 ' , port _ to = ' 15000 ' ) . WaitUntilComplete ( )""" | self . ports . append ( Port ( self , protocol , port , port_to ) )
return ( self . Update ( ) ) |
def links ( cls , page ) :
"""return all links on a page , including potentially rel = links .""" | for match in cls . HREF_RE . finditer ( page ) :
yield cls . href_match_to_url ( match ) |
async def get_decryption_aes_key ( self , key : bytes , material_description : Dict [ str , Any ] ) -> bytes :
"""Get decryption key for a given S3 object
: param key : Base64 decoded version of x - amz - key - v2
: param material _ description : JSON decoded x - amz - matdesc
: return : Raw AES key bytes""" | raise NotImplementedError ( ) |
def parse_log ( file_path ) :
"""Parse a CISM output log and extract some information .
Args :
file _ path : absolute path to the log file
Return :
A dictionary created by the elements object corresponding to
the results of the bit for bit testing""" | if not os . path . isfile ( file_path ) :
return elements . error ( "Output Log" , "Could not open file: " + file_path . split ( os . sep ) [ - 1 ] )
headers = [ "Converged Iterations" , "Avg. Iterations to Converge" , "Processor Count" , "Dycore Type" ]
with open ( file_path , 'r' ) as f :
dycore_types = { "0" : "Glide" , "1" : "Glam" , "2" : "Glissade" , "3" : "Albany_felix" , "4" : "BISICLES" }
curr_step = 0
proc_count = 0
iter_number = 0
converged_iters = [ ]
iters_to_converge = [ ]
for line in f :
split = line . split ( )
if ( 'CISM dycore type' in line ) :
if line . split ( ) [ - 1 ] == '=' :
dycore_type = dycore_types [ next ( f ) . strip ( ) ]
else :
dycore_type = dycore_types [ line . split ( ) [ - 1 ] ]
elif ( 'total procs' in line ) :
proc_count += int ( line . split ( ) [ - 1 ] )
elif ( 'Nonlinear Solver Step' in line ) :
curr_step = int ( line . split ( ) [ 4 ] )
elif ( 'Compute ice velocities, time = ' in line ) :
converged_iters . append ( curr_step )
curr_step = float ( line . split ( ) [ - 1 ] )
elif ( '"SOLVE_STATUS_CONVERGED"' in line ) :
split = line . split ( )
iters_to_converge . append ( int ( split [ split . index ( '"SOLVE_STATUS_CONVERGED"' ) + 2 ] ) )
elif ( "Compute dH/dt" in line ) :
iters_to_converge . append ( int ( iter_number ) )
elif len ( split ) > 0 and split [ 0 ] . isdigit ( ) :
iter_number = split [ 0 ]
if iters_to_converge == [ ] :
iters_to_converge . append ( int ( iter_number ) )
data = { "Dycore Type" : dycore_type , "Processor Count" : proc_count , "Converged Iterations" : len ( converged_iters ) , "Avg. Iterations to Converge" : np . mean ( iters_to_converge ) }
return elements . table ( "Output Log" , headers , data ) |
def union ( union_a , union_b ) :
"""Union of two vector layers .
Issue https : / / github . com / inasafe / inasafe / issues / 3186
: param union _ a : The vector layer for the union .
: type union _ a : QgsVectorLayer
: param union _ b : The vector layer for the union .
: type union _ b : QgsVectorLayer
: return : The clip vector layer .
: rtype : QgsVectorLayer
. . versionadded : : 4.0""" | output_layer_name = union_steps [ 'output_layer_name' ]
output_layer_name = output_layer_name % ( union_a . keywords [ 'layer_purpose' ] , union_b . keywords [ 'layer_purpose' ] )
keywords_union_1 = union_a . keywords
keywords_union_2 = union_b . keywords
inasafe_fields_union_1 = keywords_union_1 [ 'inasafe_fields' ]
inasafe_fields_union_2 = keywords_union_2 [ 'inasafe_fields' ]
inasafe_fields = inasafe_fields_union_1
inasafe_fields . update ( inasafe_fields_union_2 )
parameters = { 'INPUT' : union_a , 'OVERLAY' : union_b , 'OUTPUT' : 'memory:' }
# TODO implement callback through QgsProcessingFeedback object
initialize_processing ( )
feedback = create_processing_feedback ( )
context = create_processing_context ( feedback = feedback )
result = processing . run ( 'native:union' , parameters , context = context )
if result is None :
raise ProcessingInstallationError
union_layer = result [ 'OUTPUT' ]
union_layer . setName ( output_layer_name )
# use to avoid modifying original source
union_layer . keywords = dict ( union_a . keywords )
union_layer . keywords [ 'inasafe_fields' ] = inasafe_fields
union_layer . keywords [ 'title' ] = output_layer_name
union_layer . keywords [ 'layer_purpose' ] = 'aggregate_hazard'
union_layer . keywords [ 'hazard_keywords' ] = keywords_union_1 . copy ( )
union_layer . keywords [ 'aggregation_keywords' ] = keywords_union_2 . copy ( )
fill_hazard_class ( union_layer )
check_layer ( union_layer )
return union_layer |
def conceptual_info ( subsystem ) :
"""Return the conceptual information for a | Subsystem | .
This is the distance from the subsystem ' s | CauseEffectStructure | to the
null concept .""" | ci = ces_distance ( ces ( subsystem ) , CauseEffectStructure ( ( ) , subsystem = subsystem ) )
return round ( ci , config . PRECISION ) |
def check_selection_for_save ( self , task , releasetype , descriptor ) :
"""Emit warnings if the descriptor is None or the current file
is of a different task .
: param task : the selected task
: type task : : class : ` djadapter . models . Task `
: param releasetype : the releasetype to save ( probably work )
: type releasetype : str
: param descriptor : the descriptor
: type descriptor : str
: returns : True if check was successfull .
: rtype : bool
: raises : None""" | if not descriptor :
self . statusbar . showMessage ( "Please provide a descriptor!" )
return False
try :
jukedj . validators . alphanum_vld ( descriptor )
except ValidationError :
self . statusbar . showMessage ( "Descriptor contains characters other than alphanumerical ones." )
return False
cur = self . get_current_file ( )
if cur and task != cur . task :
self . statusbar . showMessage ( "Task is different. Not supported atm!" )
return False
elif cur and releasetype != cur . releasetype :
self . statusbar . showMessage ( "Releasetype is different. Not supported atm!" )
return False
return True |
def snmp_server_user_ipv4_acl ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
snmp_server = ET . SubElement ( config , "snmp-server" , xmlns = "urn:brocade.com:mgmt:brocade-snmp" )
user = ET . SubElement ( snmp_server , "user" )
username_key = ET . SubElement ( user , "username" )
username_key . text = kwargs . pop ( 'username' )
ipv4_acl = ET . SubElement ( user , "ipv4-acl" )
ipv4_acl . text = kwargs . pop ( 'ipv4_acl' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def set_current_filename ( self , filename , focus = True ) :
"""Set current filename and return the associated editor instance .""" | index = self . has_filename ( filename )
if index is not None :
if focus :
self . set_stack_index ( index )
editor = self . data [ index ] . editor
if focus :
editor . setFocus ( )
else :
self . stack_history . remove_and_append ( index )
return editor |
def load ( self , key = None ) : # type : ( Hashable ) - > Promise
"""Loads a key , returning a ` Promise ` for the value represented by that key .""" | if key is None :
raise TypeError ( ( "The loader.load() function must be called with a value," + "but got: {}." ) . format ( key ) )
cache_key = self . get_cache_key ( key )
# If caching and there is a cache - hit , return cached Promise .
if self . cache :
cached_promise = self . _promise_cache . get ( cache_key )
if cached_promise :
return cached_promise
# Otherwise , produce a new Promise for this value .
promise = Promise ( partial ( self . do_resolve_reject , key ) )
# type : ignore
# If caching , cache this promise .
if self . cache :
self . _promise_cache [ cache_key ] = promise
return promise |
def points_with_surrounding_gaps ( points ) :
"""This function makes sure that any gaps in the sequence provided have stopper points at their beginning
and end so a graph will be drawn with correct 0 ranges . This is more efficient than filling in all points
up to the maximum value . For example :
input : [ 1,2,3,10,11,13]
output [ 1,2,3,4,9,10,11,12,13]""" | points_with_gaps = [ ]
last_point = - 1
for point in points :
if last_point + 1 == point :
pass
elif last_point + 2 == point :
points_with_gaps . append ( last_point + 1 )
else :
points_with_gaps . append ( last_point + 1 )
points_with_gaps . append ( point - 1 )
points_with_gaps . append ( point )
last_point = point
return points_with_gaps |
def content ( self ) :
"""以处理过的Html代码形式返回答案内容 .
: return : 答案内容
: rtype : str""" | answer_wrap = self . soup . find ( 'div' , id = 'zh-question-answer-wrap' )
content = answer_wrap . find ( 'div' , class_ = 'zm-editable-content' )
content = answer_content_process ( content )
return content |
def only ( self , * fields ) :
'''Restricts the fields to be fetched when mapping . Set to ` _ _ model ` to fetch all fields define in the ModelIndex .''' | s = self . _clone ( )
if len ( fields ) == 1 and fields [ 0 ] == '__model' :
s . _only = '__model'
else :
s . _only = fields
return s |
def report ( self , simulation , state ) :
"""Generate a report .
Parameters
simulation : Simulation
The Simulation to generate a report for
state : State
The current state of the simulation""" | if not self . _initialized :
self . _initial_clock_time = datetime . now ( )
self . _initial_simulation_time = state . getTime ( )
self . _initial_steps = simulation . currentStep
self . _initialized = True
steps = simulation . currentStep
time = datetime . now ( ) - self . _initial_clock_time
days = time . total_seconds ( ) / 86400.0
ns = ( state . getTime ( ) - self . _initial_simulation_time ) . value_in_unit ( u . nanosecond )
margin = ' ' * self . margin
ns_day = ns / days
delta = ( ( self . total_steps - steps ) * time . total_seconds ( ) ) / steps
# remove microseconds to have cleaner output
remaining = timedelta ( seconds = int ( delta ) )
percentage = 100.0 * steps / self . total_steps
if ns_day :
template = '{}{}/{} steps ({:.1f}%) - {} left @ {:.1f} ns/day \r'
else :
template = '{}{}/{} steps ({:.1f}%) \r'
report = template . format ( margin , steps , self . total_steps , percentage , remaining , ns_day )
self . _out . write ( report )
if hasattr ( self . _out , 'flush' ) :
self . _out . flush ( ) |
def select ( self , selection_specs = None , ** selection ) :
"""Applies selection by dimension name
Applies a selection along the dimensions of the object using
keyword arguments . The selection may be narrowed to certain
objects using selection _ specs . For container objects the
selection will be applied to all children as well .
Selections may select a specific value , slice or set of values :
* value : Scalar values will select rows along with an exact
match , e . g . :
ds . select ( x = 3)
* slice : Slices may be declared as tuples of the upper and
lower bound , e . g . :
ds . select ( x = ( 0 , 3 ) )
* values : A list of values may be selected using a list or
set , e . g . :
ds . select ( x = [ 0 , 1 , 2 ] )
Args :
selection _ specs : List of specs to match on
A list of types , functions , or type [ . group ] [ . label ]
strings specifying which objects to apply the
selection on .
* * selection : Dictionary declaring selections by dimension
Selections can be scalar values , tuple ranges , lists
of discrete values and boolean arrays
Returns :
Returns an Dimensioned object containing the selected data
or a scalar if a single value was selected""" | if selection_specs is not None and not isinstance ( selection_specs , ( list , tuple ) ) :
selection_specs = [ selection_specs ]
selection = { dim : sel for dim , sel in selection . items ( ) if dim in self . dimensions ( ) + [ 'selection_mask' ] }
if ( selection_specs and not any ( self . matches ( sp ) for sp in selection_specs ) or not selection ) :
return self
data = self . interface . select ( self , ** selection )
if np . isscalar ( data ) :
return data
else :
return self . clone ( data ) |
def get_intern_pattern ( self , url = None ) :
"""Get pattern for intern URL matching .
@ return non - empty regex pattern or None
@ rtype String or None""" | if url is None :
url = absolute_url ( self . base_url , self . base_ref , self . parent_url )
if not url :
return None
return get_intern_pattern ( url ) |
def extend_hierarchy ( levels , strength , CF , keep ) :
"""Extend the multigrid hierarchy .""" | def unpack_arg ( v ) :
if isinstance ( v , tuple ) :
return v [ 0 ] , v [ 1 ]
else :
return v , { }
A = levels [ - 1 ] . A
# Compute the strength - of - connection matrix C , where larger
# C [ i , j ] denote stronger couplings between i and j .
fn , kwargs = unpack_arg ( strength )
if fn == 'symmetric' :
C = symmetric_strength_of_connection ( A , ** kwargs )
elif fn == 'classical' :
C = classical_strength_of_connection ( A , ** kwargs )
elif fn == 'distance' :
C = distance_strength_of_connection ( A , ** kwargs )
elif ( fn == 'ode' ) or ( fn == 'evolution' ) :
C = evolution_strength_of_connection ( A , ** kwargs )
elif fn == 'energy_based' :
C = energy_based_strength_of_connection ( A , ** kwargs )
elif fn == 'algebraic_distance' :
C = algebraic_distance ( A , ** kwargs )
elif fn == 'affinity' :
C = affinity_distance ( A , ** kwargs )
elif fn is None :
C = A
else :
raise ValueError ( 'unrecognized strength of connection method: %s' % str ( fn ) )
# Generate the C / F splitting
fn , kwargs = unpack_arg ( CF )
if fn == 'RS' :
splitting = split . RS ( C , ** kwargs )
elif fn == 'PMIS' :
splitting = split . PMIS ( C , ** kwargs )
elif fn == 'PMISc' :
splitting = split . PMISc ( C , ** kwargs )
elif fn == 'CLJP' :
splitting = split . CLJP ( C , ** kwargs )
elif fn == 'CLJPc' :
splitting = split . CLJPc ( C , ** kwargs )
elif fn == 'CR' :
splitting = CR ( C , ** kwargs )
else :
raise ValueError ( 'unknown C/F splitting method (%s)' % CF )
# Generate the interpolation matrix that maps from the coarse - grid to the
# fine - grid
P = direct_interpolation ( A , C , splitting )
# Generate the restriction matrix that maps from the fine - grid to the
# coarse - grid
R = P . T . tocsr ( )
# Store relevant information for this level
if keep :
levels [ - 1 ] . C = C
# strength of connection matrix
levels [ - 1 ] . splitting = splitting
# C / F splitting
levels [ - 1 ] . P = P
# prolongation operator
levels [ - 1 ] . R = R
# restriction operator
levels . append ( multilevel_solver . level ( ) )
# Form next level through Galerkin product
A = R * A * P
levels [ - 1 ] . A = A |
def _validate ( self , val ) :
"""val must be None , an instance of self . class _ if self . is _ instance = True or a subclass of self _ class if self . is _ instance = False""" | if isinstance ( self . class_ , tuple ) :
class_name = ( '(%s)' % ', ' . join ( cl . __name__ for cl in self . class_ ) )
else :
class_name = self . class_ . __name__
if self . is_instance :
if not ( isinstance ( val , self . class_ ) ) and not ( val is None and self . allow_None ) :
raise ValueError ( "Parameter '%s' value must be an instance of %s, not '%s'" % ( self . name , class_name , val ) )
else :
if not ( val is None and self . allow_None ) and not ( issubclass ( val , self . class_ ) ) :
raise ValueError ( "Parameter '%s' must be a subclass of %s, not '%s'" % ( val . __name__ , class_name , val . __class__ . __name__ ) ) |
def get_assets ( self ) :
"""Gets the asset list resulting from a search .
return : ( osid . repository . AssetList ) - the asset list
raise : IllegalState - the list has already been retrieved
* compliance : mandatory - - This method must be implemented . *""" | if self . retrieved :
raise errors . IllegalState ( 'List has already been retrieved.' )
self . retrieved = True
return objects . AssetList ( self . _results , runtime = self . _runtime ) |
def get ( self , key ) :
"""Gets a value by a key .
Args :
key ( str ) : Key to retrieve the value .
Returns : Retrieved value .""" | self . _create_file_if_none_exists ( )
with open ( self . filename , 'rb' ) as file_object :
cache_pickle = pickle . load ( file_object )
val = cache_pickle . get ( key , None )
return val |
def _try_prop_method ( self , instance , value , method_name ) :
"""Helper method to perform a method on each of the union props
This method gathers all errors and returns them at the end
if the method on each of the props fails .""" | error_messages = [ ]
for prop in self . props :
try :
return getattr ( prop , method_name ) ( instance , value )
except GENERIC_ERRORS as err :
if hasattr ( err , 'error_tuples' ) :
error_messages += [ err_tup . message for err_tup in err . error_tuples ]
if error_messages :
extra = 'Possible explanation:'
for message in error_messages :
extra += '\n - {}' . format ( message )
else :
extra = ''
self . error ( instance , value , extra = extra ) |
def _w_sigma_delta ( self , sigma , delta ) :
"""invert variance
: param sigma :
: param delta :
: return : w parameter""" | sigma2 = sigma ** 2
w2 = sigma2 / ( 1 - 2 * delta ** 2 / np . pi )
w = np . sqrt ( w2 )
return w |
def get_instance ( self , payload ) :
"""Build an instance of AssignedAddOnExtensionInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . api . v2010 . account . incoming _ phone _ number . assigned _ add _ on . assigned _ add _ on _ extension . AssignedAddOnExtensionInstance
: rtype : twilio . rest . api . v2010 . account . incoming _ phone _ number . assigned _ add _ on . assigned _ add _ on _ extension . AssignedAddOnExtensionInstance""" | return AssignedAddOnExtensionInstance ( self . _version , payload , account_sid = self . _solution [ 'account_sid' ] , resource_sid = self . _solution [ 'resource_sid' ] , assigned_add_on_sid = self . _solution [ 'assigned_add_on_sid' ] , ) |
def _import_model ( models , crumbs ) :
"""Change the nested items of the paleoModel data . Overwrite the data in - place .
: param list models : Metadata
: param str crumbs : Crumbs
: return dict _ models : Metadata""" | logger_jsons . info ( "enter import_model" . format ( crumbs ) )
_models = OrderedDict ( )
try :
for _idx , model in enumerate ( models ) : # Keep the original dictionary , but replace the three main entries below
# Do a direct replacement of chronModelTable columns . No table name , no table work needed .
if "summaryTable" in model :
model [ "summaryTable" ] = _idx_table_by_name ( model [ "summaryTable" ] , "{}{}{}" . format ( crumbs , _idx , "summary" ) )
# Do a direct replacement of ensembleTable columns . No table name , no table work needed .
if "ensembleTable" in model :
model [ "ensembleTable" ] = _idx_table_by_name ( model [ "ensembleTable" ] , "{}{}{}" . format ( crumbs , _idx , "ensemble" ) )
if "distributionTable" in model :
model [ "distributionTable" ] = _idx_table_by_name ( model [ "distributionTable" ] , "{}{}{}" . format ( crumbs , _idx , "distribution" ) )
_table_name = "{}{}" . format ( crumbs , _idx )
_models [ _table_name ] = model
except Exception as e :
logger_jsons . error ( "import_model: {}" . format ( e ) )
print ( "Error: import_model: {}" . format ( e ) )
logger_jsons . info ( "exit import_model: {}" . format ( crumbs ) )
return _models |
def get_subdomain_ops_at_txid ( txid , db_path = None , zonefiles_dir = None ) :
"""Static method for getting the list of subdomain operations accepted at a given txid .
Includes unaccepted subdomain operations""" | opts = get_blockstack_opts ( )
if not is_subdomains_enabled ( opts ) :
return [ ]
if db_path is None :
db_path = opts [ 'subdomaindb_path' ]
if zonefiles_dir is None :
zonefiles_dir = opts [ 'zonefiles' ]
db = SubdomainDB ( db_path , zonefiles_dir )
return db . get_subdomain_ops_at_txid ( txid ) |
def __parseResponse ( self , result ) :
"""Parses the server response .""" | response = [ ]
for data in result [ 'data' ] :
result_dict = { }
for k , v in data . items ( ) :
column = self . getOutputColumn ( k )
if column != None :
type = column . getSqlColumnType ( )
if type != None and type . startswith ( 'int' ) :
result_dict . update ( { k : int ( v ) } )
elif type != None and type . startswith ( 'float' ) :
result_dict . update ( { k : float ( v ) } )
elif type != None and type . startswith ( 'timestamp' ) :
( dt , mSecs ) = v . split ( "." )
dt = datetime . strptime ( dt , "%Y-%m-%dT%H:%M:%S" )
mSeconds = timedelta ( microseconds = int ( mSecs ) )
result_dict . update ( { k : dt + mSeconds } )
else :
result_dict . update ( { k : v } )
response . append ( result_dict )
return response |
def swap_atom_keys ( self , swap_dict , dict_key = 'atom_ids' ) :
"""Swap a force field atom id for another user - defined value .
This modified all values in : attr : ` MolecularSystem . system [ ' atom _ ids ' ] `
that match criteria .
This function can be used to decipher a whole forcefield if an
appropriate dictionary is passed to the function .
Example
In this example all atom ids ' he ' will be exchanged to ' H ' .
. . code - block : : python
pywindow . molecular . MolecularSystem . swap _ atom _ keys ( { ' he ' : ' H ' } )
Parameters
swap _ dict : : class : ` dict `
A dictionary containg force field atom ids ( keys ) to be swapped
with corresponding values ( keys ' arguments ) .
dict _ key : : class : ` str `
A key in : attr : ` MolecularSystem . system ` dictionary to perform the
atom keys swapping operation on . ( default = ' atom _ ids ' )
Returns
None : : class : ` NoneType `""" | # Similar situation to the one from decipher _ atom _ keys function .
if 'atom_ids' not in self . system . keys ( ) :
dict_key = 'elements'
for atom_key in range ( len ( self . system [ dict_key ] ) ) :
for key in swap_dict . keys ( ) :
if self . system [ dict_key ] [ atom_key ] == key :
self . system [ dict_key ] [ atom_key ] = swap_dict [ key ] |
def cover ( self ) :
"""album cover as : class : ` Picture ` object""" | if not self . _cover :
self . _cover = Picture ( self . _cover_url , self . _connection )
return self . _cover |
def humanize_filesize ( value ) :
"""Return an humanized file size .""" | value = float ( value )
if value == 1 :
return '1 Byte'
elif value < 1024 :
return '%d Bytes' % value
elif value < 1024 :
return '%dB' % value
for i , s in enumerate ( SUFFIXES ) :
unit = 1024 ** ( i + 2 )
if value < unit :
return '%.1f %s' % ( ( 1024 * value / unit ) , s )
return '%.1f %s' % ( ( 1024 * value / unit ) , s ) |
def stationary_distribution_sensitivity ( T , j ) :
r"""Calculate the sensitivity matrix for entry j the stationary
distribution vector given transition matrix T .
Parameters
T : numpy . ndarray shape = ( n , n )
Transition matrix
j : int
entry of stationary distribution for which the sensitivity is to be computed
Returns
x : ndarray , shape = ( n , n )
Sensitivity matrix for entry index around transition matrix T . Reversibility is not assumed .
Remark
Note , that this function uses a different normalization convention for the sensitivity compared to
eigenvector _ sensitivity . See there for further information .""" | n = len ( T )
lEV = numpy . ones ( n )
rEV = stationary_distribution ( T )
eVal = 1.0
T = numpy . transpose ( T )
vecA = numpy . zeros ( n )
vecA [ j ] = 1.0
matA = T - eVal * numpy . identity ( n )
# normalize s . t . sum is one using rEV which is constant
matA = numpy . concatenate ( ( matA , [ lEV ] ) )
phi = numpy . linalg . lstsq ( numpy . transpose ( matA ) , vecA , rcond = - 1 )
phi = numpy . delete ( phi [ 0 ] , - 1 )
sensitivity = - numpy . outer ( rEV , phi ) + numpy . dot ( phi , rEV ) * numpy . outer ( rEV , lEV )
return sensitivity |
def text_dict_write ( fpath , dict_ ) :
"""Very naive , but readable way of storing a dictionary on disk
FIXME : This broke on RoseMary ' s big dataset . Not sure why . It gave bad
syntax . And the SyntaxError did not seem to be excepted .""" | # dict _ = text _ dict _ read ( fpath )
# dict _ [ key ] = val
dict_text2 = util_str . repr4 ( dict_ , strvals = False )
if VERBOSE :
print ( '[cache] ' + str ( dict_text2 ) )
util_io . write_to ( fpath , dict_text2 ) |
def get_feedback ( self , feedback = None , ** kwargs ) :
"""[ NOT IMPLEMENTED ]
: raises NotImplementedError : because it isn ' t""" | raise NotImplementedError
if feedback is not None :
kwargs [ 'feedback' ] = feedback
kwargs [ 'context' ] = 'feedback'
return self . filter ( ** kwargs ) |
def findPrimerBidi ( primer , seq ) :
"""Look for a primer in a sequence and its reverse complement .
@ param primer : A C { str } primer sequence .
@ param seq : A BioPython C { Bio . Seq } sequence .
@ return : A C { tuple } of two lists . The first contains ( zero - based )
ascending offsets into the sequence at which the primer can be found .
The second is a similar list ascending offsets into the original
sequence where the primer matches the reverse complemented of the
sequence . If no instances are found , the corresponding list in the
returned tuple must be empty .""" | # Note that we reverse complement the primer to find the reverse
# matches . This is much simpler than reverse complementing the sequence
# because it allows us to use findPrimer and to deal with overlapping
# matches correctly .
forward = findPrimer ( primer , seq )
reverse = findPrimer ( reverse_complement ( primer ) , seq )
return forward , reverse |
def iso8601_datetime ( d ) :
"""Return a string representation of a date that the Twilio API understands
Format is YYYY - MM - DD . Returns None if d is not a string , datetime , or date""" | if d == values . unset :
return d
elif isinstance ( d , datetime . datetime ) or isinstance ( d , datetime . date ) :
return d . strftime ( '%Y-%m-%dT%H:%M:%SZ' )
elif isinstance ( d , str ) :
return d |
def _getUniqueName ( self , fileName , jobStoreID = None , sourceFunctionName = "x" ) :
"""Create unique file name within a jobStore directory or tmp directory .
: param fileName : A file name , which can be a full path as only the
basename will be used .
: param jobStoreID : If given , the path returned will be in the jobStore directory .
Otherwise , the tmp directory will be used .
: param sourceFunctionName : This name is the name of the function that
generated this file . Defaults to x if that name was not a normal
name . Used for tracking files .
: return : The full path with a unique file name .""" | fd , absPath = self . _getTempFile ( jobStoreID )
os . close ( fd )
os . unlink ( absPath )
# remove the . tmp extension and add the file name
( noExt , ext ) = os . path . splitext ( absPath )
uniquePath = noExt + '-' + sourceFunctionName + '-' + os . path . basename ( fileName )
if os . path . exists ( absPath ) :
return absPath
# give up , just return temp name to avoid conflicts
return uniquePath |
def get_tab ( self , tab_name , allow_disabled = False ) :
"""Returns a specific tab from this tab group .
If the tab is not allowed or not enabled this method returns ` ` None ` ` .
If the tab is disabled but you wish to return it anyway , you can pass
` ` True ` ` to the allow _ disabled argument .""" | tab = self . _tabs . get ( tab_name , None )
if tab and tab . _allowed and ( tab . _enabled or allow_disabled ) :
return tab
return None |
def option ( func , * args , ** attrs ) :
"""Args :
func ( function ) : Function defining this option
* args : Optional extra short flag name
* * attrs : Optional attr overrides provided by caller
Returns :
function : Click decorator""" | if click is None :
return func
def decorator ( f ) :
name = attrs . pop ( "name" , func . __name__ . replace ( "_" , "-" ) )
attrs . setdefault ( "help" , func . __doc__ )
attrs . setdefault ( "required" , False )
if not attrs . get ( "is_flag" ) :
attrs . setdefault ( "show_default" , True )
attrs . setdefault ( "metavar" , name . replace ( "-" , "_" ) . upper ( ) )
attrs . setdefault ( "type" , str )
if not name . startswith ( "-" ) :
name = "--%s" % name
return click . option ( name , * args , ** attrs ) ( f )
return decorator |
def _clean_flags ( args , caller ) :
'''Sanitize flags passed into df''' | flags = ''
if args is None :
return flags
allowed = ( 'a' , 'B' , 'h' , 'H' , 'i' , 'k' , 'l' , 'P' , 't' , 'T' , 'x' , 'v' )
for flag in args :
if flag in allowed :
flags += flag
else :
raise CommandExecutionError ( 'Invalid flag passed to {0}' . format ( caller ) )
return flags |
def profile ( ids = None , track_ids = None , buckets = None , limit = False ) :
"""get the profiles for multiple songs at once
Args :
ids ( str or list ) : a song ID or list of song IDs
Kwargs :
buckets ( list ) : A list of strings specifying which buckets to retrieve
limit ( bool ) : A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
Returns :
A list of term document dicts
Example :
> > > song _ ids = [ ' SOBSLVH12A8C131F38 ' , ' SOXMSGY1338A5D5873 ' , ' SOJPHZO1376210AFE5 ' , ' SOBHNKR12AB0186218 ' , ' SOSJAHD13770F4D40C ' ]
> > > songs = song . profile ( song _ ids , buckets = [ ' audio _ summary ' ] )
[ < song - Say It Ain ' t So > ,
< song - Island In The Sun > ,
< song - My Name Is Jonas > ,
< song - Buddy Holly > ]
> > > songs [ 0 ] . audio _ summary
{ u ' analysis _ url ' : u ' https : / / echonest - analysis . s3 . amazonaws . com / TR / 7VRBNguufpHAQQ4ZjJ0eWsIQWl2S2 _ lrK - 7Bp2azHOvPN4VFV - YnU7uO0dXgYtOKT - MTEa / 3 / full . json ? Signature = hmNghHwfEsA4JKWFXnRi7mVP6T8%3D & Expires = 1349809918 & AWSAccessKeyId = AKIAJRDFEY23UEVW42BQ ' ,
u ' audio _ md5 ' : u ' b6079b2b88f8265be8bdd5fe9702e05c ' ,
u ' danceability ' : 0.64540643050283253,
u ' duration ' : 255.9211799999,
u ' energy ' : 0.30711665772260549,
u ' key ' : 8,
u ' liveness ' : 0.088994423525370583,
u ' loudness ' : - 9.77999994,
u ' mode ' : 1,
u ' speechiness ' : 0.031970700260699259,
u ' tempo ' : 76.04999997,
u ' time _ signature ' : 4}""" | kwargs = { }
if ids :
if not isinstance ( ids , list ) :
ids = [ ids ]
kwargs [ 'id' ] = ids
if track_ids :
if not isinstance ( track_ids , list ) :
track_ids = [ track_ids ]
kwargs [ 'track_id' ] = track_ids
buckets = buckets or [ ]
if buckets :
kwargs [ 'bucket' ] = buckets
if limit :
kwargs [ 'limit' ] = 'true'
result = util . callm ( "%s/%s" % ( 'song' , 'profile' ) , kwargs )
return [ Song ( ** util . fix ( s_dict ) ) for s_dict in result [ 'response' ] [ 'songs' ] ] |
def _evolve_kwargs ( self ) :
"""Filter None keyword arguments . Intended to be passed on to algorithm . evolve ( . . . )""" | valid_evolve_kwargs = ( 'max_generations' , 'max_evaluations' , 'pop_size' , 'neighborhood_size' , 'tournament_size' , 'mutation_rate' )
filtered_evolve_kwargs = dict ( )
for key in valid_evolve_kwargs :
attr_value = getattr ( self , key )
if attr_value is not None :
filtered_evolve_kwargs [ key ] = attr_value
# return filtered _ evolve _ kwargs
return { } |
def set_state ( self , state , enable ) :
"""Set the state .""" | is_enabled = self . get_state ( state )
if is_enabled == enable :
return True
key = None
desired_states = [ { 'state' : state , 'enabled' : not is_enabled } ]
if state == States . FILTER_LOW_SPEED :
if not self . _multi_speed_pump :
return False
# Send the FILTER key once .
# If the pump is in high speed , it wil switch to low speed .
# If the pump is off the retry mechanism will send an additional
# FILTER key to switch into low speed .
# If the pump is in low speed then we pretend the pump is off ;
# the retry mechanism will send an additional FILTER key
# to switch into high speed .
key = Keys . FILTER
desired_states . append ( { 'state' : States . FILTER , 'enabled' : True } )
else : # See if this state has a corresponding Key
try :
key = Keys [ state . name ]
except KeyError : # TODO : send the appropriate combination of keys
# to enable the state
return False
frame = self . _get_key_event_frame ( key )
# Queue it to send immediately following the reception
# of a keep - alive packet in an attempt to avoid bus collisions .
self . _send_queue . put ( { 'frame' : frame , 'desired_states' : desired_states , 'retries' : 10 } )
return True |
def update ( self , ell , k ) :
"""Update the posterior and estimates after a label is sampled
Parameters
ell : int
sampled label : 0 or 1
k : int
index of stratum where label was sampled""" | self . alpha_ [ k ] += ell
self . beta_ [ k ] += 1 - ell
self . _calc_theta ( )
if self . store_variance :
self . _calc_var_theta ( ) |
def publish ( self ) :
"""Start asynchonous publishing
Publishing takes into account all available and currently
toggled plug - ins and instances .""" | def get_data ( ) :
model = self . data [ "models" ] [ "item" ]
# Communicate with host to retrieve current plugins and instances
# This can potentially take a very long time ; it is run
# asynchronously and initiates processing once complete .
host_plugins = dict ( ( p . id , p ) for p in self . host . cached_discover )
host_context = dict ( ( i . id , i ) for i in self . host . cached_context )
plugins = list ( )
instances = list ( )
for plugin in models . ItemIterator ( model . plugins ) : # Exclude Collectors
if pyblish . lib . inrange ( number = plugin . order , base = pyblish . api . Collector . order ) :
continue
plugins . append ( host_plugins [ plugin . id ] )
for instance in models . ItemIterator ( model . instances ) :
instances . append ( host_context [ instance . id ] )
return plugins , instances
def on_data_received ( args ) :
self . run ( * args , callback = on_finished )
def on_finished ( ) :
self . host . emit ( "published" , context = None )
util . defer ( get_data , callback = on_data_received ) |
def monitor_deletion ( ) :
"""Function for checking for correct deletion of weakref - able objects .
Example usage : :
monitor , is _ alive = monitor _ deletion ( )
obj = set ( )
monitor ( obj , " obj " )
assert is _ alive ( " obj " ) # True because there is a ref to ` obj ` is _ alive
del obj
assert not is _ alive ( " obj " ) # True because there ` obj ` is deleted""" | monitors = { }
def set_deleted ( x ) :
def _ ( weakref ) :
del monitors [ x ]
return _
def monitor ( item , name ) :
monitors [ name ] = ref ( item , set_deleted ( name ) )
def is_alive ( name ) :
return monitors . get ( name , None ) is not None
return monitor , is_alive |
def tag ( self : object , tokens : List [ str ] ) :
"""Docs ( mostly ) inherited from TaggerI ; cf .
https : / / www . nltk . org / _ modules / nltk / tag / api . html # TaggerI . tag
Two tweaks :
1 . Properly handle ' verbose ' listing of current tagger in
the case of None ( i . e . ` ` if tag : etc . ` ` )
2 . Keep track of taggers and change return depending on
' verbose ' flag
: rtype list
: type tokens : list
: param tokens : List of tokens to tag""" | tags = [ ]
taggers = [ ]
for i in range ( len ( tokens ) ) :
tag , tagger = self . tag_one ( tokens , i , tags )
tags . append ( tag )
taggers . append ( str ( tagger ) ) if tag else taggers . append ( None )
if self . VERBOSE :
return list ( zip ( tokens , tags , taggers ) )
else :
return list ( zip ( tokens , tags ) ) |
def getUpperDetectionLimit ( self ) :
"""Returns the Upper Detection Limit ( UDL ) that applies to this
analysis in particular . If no value set or the analysis service
doesn ' t allow manual input of detection limits , returns the value set
by default in the Analysis Service""" | if self . isUpperDetectionLimit ( ) :
result = self . getResult ( )
try : # in this case , the result itself is the LDL .
return float ( result )
except ( TypeError , ValueError ) :
logger . warn ( "The result for the analysis %s is a lower " "detection limit, but not floatable: '%s'. " "Returnig AS's default LDL." % ( self . id , result ) )
return AbstractBaseAnalysis . getUpperDetectionLimit ( self ) |
def updateUserTone ( conversationPayload , toneAnalyzerPayload , maintainHistory ) :
"""updateUserTone processes the Tone Analyzer payload to pull out the emotion ,
writing and social tones , and identify the meaningful tones ( i . e . ,
those tones that meet the specified thresholds ) .
The conversationPayload json object is updated to include these tones .
@ param conversationPayload json object returned by the Watson Conversation
Service
@ param toneAnalyzerPayload json object returned by the Watson Tone Analyzer
Service
@ returns conversationPayload where the user object has been updated with tone
information from the toneAnalyzerPayload""" | emotionTone = None
writingTone = None
socialTone = None
# if there is no context in a
if 'context' not in conversationPayload :
conversationPayload [ 'context' ] = { }
if 'user' not in conversationPayload [ 'context' ] :
conversationPayload [ 'context' ] = initUser ( )
# For convenience sake , define a variable for the user object
user = conversationPayload [ 'context' ] [ 'user' ]
# Extract the tones - emotion , writing and social
if toneAnalyzerPayload and toneAnalyzerPayload [ 'document_tone' ] :
for toneCategory in toneAnalyzerPayload [ 'document_tone' ] [ 'tone_categories' ] :
if toneCategory [ 'category_id' ] == EMOTION_TONE_LABEL :
emotionTone = toneCategory
if toneCategory [ 'category_id' ] == LANGUAGE_TONE_LABEL :
writingTone = toneCategory
if toneCategory [ 'category_id' ] == SOCIAL_TONE_LABEL :
socialTone = toneCategory
updateEmotionTone ( user , emotionTone , maintainHistory )
updateWritingTone ( user , writingTone , maintainHistory )
updateSocialTone ( user , socialTone , maintainHistory )
conversationPayload [ 'context' ] [ 'user' ] = user
return conversationPayload |
def log_predictive_density ( self , y_test , mu_star , var_star , Y_metadata = None ) :
"""assumes independence""" | v = var_star + self . variance
return - 0.5 * np . log ( 2 * np . pi ) - 0.5 * np . log ( v ) - 0.5 * np . square ( y_test - mu_star ) / v |
def get_merge_rules ( schema = None ) :
"""Returns merge rules as key - value pairs , in which the key is a JSON path as a tuple , and the value is a list of
merge properties whose values are ` true ` .""" | schema = schema or get_release_schema_url ( get_tags ( ) [ - 1 ] )
if isinstance ( schema , dict ) :
deref_schema = jsonref . JsonRef . replace_refs ( schema )
else :
deref_schema = _get_merge_rules_from_url_or_path ( schema )
return dict ( _get_merge_rules ( deref_schema [ 'properties' ] ) ) |
def is_uniformly_weighted ( self ) :
"""` ` True ` ` if the weighting is the same for all space points .""" | try :
is_uniformly_weighted = self . __is_uniformly_weighted
except AttributeError :
bdry_fracs = self . partition . boundary_cell_fractions
is_uniformly_weighted = ( np . allclose ( bdry_fracs , 1.0 ) or self . exponent == float ( 'inf' ) or not getattr ( self . tspace , 'is_weighted' , False ) )
self . __is_uniformly_weighted = is_uniformly_weighted
return is_uniformly_weighted |
def subst_dict ( target , source ) :
"""Create a dictionary for substitution of special
construction variables .
This translates the following special arguments :
target - the target ( object or array of objects ) ,
used to generate the TARGET and TARGETS
construction variables
source - the source ( object or array of objects ) ,
used to generate the SOURCES and SOURCE
construction variables""" | dict = { }
if target :
def get_tgt_subst_proxy ( thing ) :
try :
subst_proxy = thing . get_subst_proxy ( )
except AttributeError :
subst_proxy = thing
# probably a string , just return it
return subst_proxy
tnl = NLWrapper ( target , get_tgt_subst_proxy )
dict [ 'TARGETS' ] = Targets_or_Sources ( tnl )
dict [ 'TARGET' ] = Target_or_Source ( tnl )
# This is a total cheat , but hopefully this dictionary goes
# away soon anyway . We just let these expand to $ TARGETS
# because that ' s " good enough " for the use of ToolSurrogates
# ( see test / ToolSurrogate . py ) to generate documentation .
dict [ 'CHANGED_TARGETS' ] = '$TARGETS'
dict [ 'UNCHANGED_TARGETS' ] = '$TARGETS'
else :
dict [ 'TARGETS' ] = NullNodesList
dict [ 'TARGET' ] = NullNodesList
if source :
def get_src_subst_proxy ( node ) :
try :
rfile = node . rfile
except AttributeError :
pass
else :
node = rfile ( )
try :
return node . get_subst_proxy ( )
except AttributeError :
return node
# probably a String , just return it
snl = NLWrapper ( source , get_src_subst_proxy )
dict [ 'SOURCES' ] = Targets_or_Sources ( snl )
dict [ 'SOURCE' ] = Target_or_Source ( snl )
# This is a total cheat , but hopefully this dictionary goes
# away soon anyway . We just let these expand to $ TARGETS
# because that ' s " good enough " for the use of ToolSurrogates
# ( see test / ToolSurrogate . py ) to generate documentation .
dict [ 'CHANGED_SOURCES' ] = '$SOURCES'
dict [ 'UNCHANGED_SOURCES' ] = '$SOURCES'
else :
dict [ 'SOURCES' ] = NullNodesList
dict [ 'SOURCE' ] = NullNodesList
return dict |
def elementTypeName ( self ) :
"""String representation of the element type .""" | fieldName = self . nodeName
return str ( self . _h5Dataset . dtype . fields [ fieldName ] [ 0 ] ) |
def parse ( content ) :
"""Parse the content of a . env file ( a line - delimited KEY = value format ) into a
dictionary mapping keys to values .""" | values = { }
for line in content . splitlines ( ) :
lexer = shlex . shlex ( line , posix = True )
tokens = list ( lexer )
# parses the assignment statement
if len ( tokens ) < 3 :
continue
name , op = tokens [ : 2 ]
value = '' . join ( tokens [ 2 : ] )
if op != '=' :
continue
if not re . match ( r'[A-Za-z_][A-Za-z_0-9]*' , name ) :
continue
value = value . replace ( r'\n' , '\n' )
value = value . replace ( r'\t' , '\t' )
values [ name ] = value
return values |
def item ( self , infohash , prefetch = None , cache = False ) :
"""Fetch a single item by its info hash .""" | return next ( self . items ( infohash , prefetch , cache ) ) |
def explained_variance ( returns , values ) :
"""Calculate how much variance in returns do the values explain""" | exp_var = 1 - torch . var ( returns - values ) / torch . var ( returns )
return exp_var . item ( ) |
def _random_stochastic_matrix ( m , n , k = None , sparse = False , format = 'csr' , random_state = None ) :
"""Generate a " non - square stochastic matrix " of shape ( m , n ) , which
contains as rows m probability vectors of length n with k nonzero
entries .
For other parameters , see ` random _ stochastic _ matrix ` .""" | if k is None :
k = n
# m prob vectors of dimension k , shape ( m , k )
probvecs = probvec ( m , k , random_state = random_state )
if k == n :
P = probvecs
if sparse :
return scipy . sparse . coo_matrix ( P ) . asformat ( format )
else :
return P
# if k < n :
rows = np . repeat ( np . arange ( m ) , k )
cols = sample_without_replacement ( n , k , num_trials = m , random_state = random_state ) . ravel ( )
data = probvecs . ravel ( )
if sparse :
P = scipy . sparse . coo_matrix ( ( data , ( rows , cols ) ) , shape = ( m , n ) )
return P . asformat ( format )
else :
P = np . zeros ( ( m , n ) )
P [ rows , cols ] = data
return P |
def open_outside_spyder ( self , fnames ) :
"""Open file outside Spyder with the appropriate application
If this does not work , opening unknown file in Spyder , as text file""" | for path in sorted ( fnames ) :
path = file_uri ( path )
ok = programs . start_file ( path )
if not ok :
self . sig_edit . emit ( path ) |
def read_handle ( self , handle : int ) -> bytes :
"""Read a handle from the device .""" | if not self . is_connected ( ) :
raise BluetoothBackendException ( 'Not connected to device!' )
return self . _device . char_read_handle ( handle ) |
def render_linked_css ( self , css_files : Iterable [ str ] ) -> str :
"""Default method used to render the final css links for the
rendered webpage .
Override this method in a sub - classed controller to change the output .""" | paths = [ ]
unique_paths = set ( )
# type : Set [ str ]
for path in css_files :
if not is_absolute ( path ) :
path = self . static_url ( path )
if path not in unique_paths :
paths . append ( path )
unique_paths . add ( path )
return "" . join ( '<link href="' + escape . xhtml_escape ( p ) + '" ' 'type="text/css" rel="stylesheet"/>' for p in paths ) |
def container_state ( name = None , remote_addr = None , cert = None , key = None , verify_cert = True ) :
'''Get container state
remote _ addr :
An URL to a remote Server , you also have to give cert and key if
you provide remote _ addr and its a TCP Address !
Examples :
https : / / myserver . lan : 8443
/ var / lib / mysocket . sock
cert :
PEM Formatted SSL Certificate .
Examples :
~ / . config / lxc / client . crt
key :
PEM Formatted SSL Key .
Examples :
~ / . config / lxc / client . key
verify _ cert : True
Wherever to verify the cert , this is by default True
but in the most cases you want to set it off as LXD
normaly uses self - signed certificates .''' | client = pylxd_client_get ( remote_addr , cert , key , verify_cert )
if name is None :
containers = client . containers . all ( )
else :
try :
containers = [ client . containers . get ( name ) ]
except pylxd . exceptions . LXDAPIException :
raise SaltInvocationError ( 'Container \'{0}\' not found' . format ( name ) )
states = [ ]
for container in containers :
state = { }
state = container . state ( )
states . append ( dict ( [ ( container . name , dict ( [ ( k , getattr ( state , k ) ) for k in dir ( state ) if not k . startswith ( '_' ) ] ) ) ] ) )
return states |
def deprecated ( message ) :
"""Deprecated function decorator .""" | def wrapper ( fn ) :
def deprecated_method ( * args , ** kargs ) :
warnings . warn ( message , DeprecationWarning , 2 )
return fn ( * args , ** kargs )
# TODO : use decorator ? functools . wrapper ?
deprecated_method . __name__ = fn . __name__
deprecated_method . __doc__ = "%s\n\n%s" % ( message , fn . __doc__ )
return deprecated_method
return wrapper |
def close_stream ( self ) :
"""Close the underlying socket .""" | if not self . is_connected :
return
self . stream . close ( )
self . state = DISCONNECTED
self . on_close . send ( self ) |
def make_attribute_list ( self , node ) :
"""Produces the " Attributes " section in class docstrings for public
member variables ( attributes ) .""" | atr_nodes = [ ]
for n in self . get_specific_subnodes ( node , 'memberdef' , recursive = 2 ) :
if n . attributes [ 'kind' ] . value == 'variable' and n . attributes [ 'prot' ] . value == 'public' :
atr_nodes . append ( n )
if not atr_nodes :
return
self . add_text ( [ '\n' , 'Attributes' , '\n' , '----------' ] )
for n in atr_nodes :
name = self . extract_text ( self . get_specific_subnodes ( n , 'name' ) )
self . add_text ( [ '\n* ' , '`' , name , '`' , ' : ' ] )
self . add_text ( [ '`' , self . extract_text ( self . get_specific_subnodes ( n , 'type' ) ) , '`' ] )
self . add_text ( ' \n' )
restrict = [ 'briefdescription' , 'detaileddescription' ]
self . subnode_parse ( n , pieces = [ '' ] , indent = 4 , restrict = restrict ) |
def plotly_app_identifier ( name = None , slug = None , da = None , postfix = None ) :
'Return a slug - friendly identifier' | da , app = _locate_daapp ( name , slug , da )
slugified_id = app . slugified_id ( )
if postfix :
return "%s-%s" % ( slugified_id , postfix )
return slugified_id |
def _readfile ( self , filename ) :
"""Read content of specified NEWS file""" | f = open ( filename )
self . content = f . readlines ( )
f . close ( ) |
def mlon2mlt ( self , mlon , datetime , ssheight = 50 * 6371 ) :
"""Computes the magnetic local time at the specified magnetic longitude
and UT .
Parameters
mlon : array _ like
Magnetic longitude ( apex and quasi - dipole longitude are always
equal )
datetime : : class : ` datetime . datetime `
Date and time
ssheight : float , optional
Altitude in km to use for converting the subsolar point from
geographic to magnetic coordinates . A high altitude is used
to ensure the subsolar point is mapped to high latitudes , which
prevents the South - Atlantic Anomaly ( SAA ) from influencing the MLT .
Returns
mlt : ndarray or float
Magnetic local time [ 0 , 24)
Notes
To compute the MLT , we find the apex longitude of the subsolar point at
the given time . Then the MLT of the given point will be computed from
the separation in magnetic longitude from this point ( 1 hour = 15
degrees ) .""" | ssglat , ssglon = helpers . subsol ( datetime )
ssalat , ssalon = self . geo2apex ( ssglat , ssglon , ssheight )
# np . float64 will ensure lists are converted to arrays
return ( 180 + np . float64 ( mlon ) - ssalon ) / 15 % 24 |
def code_from_ipynb ( nb , markdown = False ) :
"""Get the code for a given notebook
nb is passed in as a dictionary that ' s a parsed ipynb file""" | code = PREAMBLE
for cell in nb [ 'cells' ] :
if cell [ 'cell_type' ] == 'code' : # transform the input to executable Python
code += '' . join ( cell [ 'source' ] )
if cell [ 'cell_type' ] == 'markdown' :
code += '\n# ' + '# ' . join ( cell [ 'source' ] )
# We want a blank newline after each cell ' s output .
# And the last line of source doesn ' t have a newline usually .
code += '\n\n'
return code |
def create_response ( version , status , headers ) :
"""Create a HTTP response header .""" | message = [ ]
message . append ( 'HTTP/{} {}\r\n' . format ( version , status ) )
for name , value in headers :
message . append ( name )
message . append ( ': ' )
message . append ( value )
message . append ( '\r\n' )
message . append ( '\r\n' )
return s2b ( '' . join ( message ) ) |
def find_in_subgraph_by_name ( subgraph , target_name , target_package , nodetype ) :
"""Find an entry in a subgraph by name . Any mapping that implements
. items ( ) and maps unique id - > something can be used as the subgraph .
Names are like :
' { nodetype } . { target _ package } . { target _ name } '
You can use ` None ` for the package name as a wildcard .""" | for name , model in subgraph . items ( ) :
if id_matches ( name , target_name , target_package , nodetype , model ) :
return model
return None |
def getDateReceived ( self ) :
"""Used to populate catalog values .
Returns the date the Analysis Request this analysis belongs to was
received . If the analysis was created after , then returns the date
the analysis was created .""" | request = self . getRequest ( )
if request :
ar_date = request . getDateReceived ( )
if ar_date and self . created ( ) > ar_date :
return self . created ( )
return ar_date
return None |
def max_size ( self ) :
"""Gets the largest size of the object over all timesteps .
Returns :
Maximum size of the object in pixels""" | sizes = np . array ( [ m . sum ( ) for m in self . masks ] )
return sizes . max ( ) |
def add_url ( self , issuer , url , ** kwargs ) :
"""Add a set of keys by url . This method will create a
: py : class : ` oidcmsg . key _ bundle . KeyBundle ` instance with the
url as source specification . If no file format is given it ' s assumed
that what ' s on the other side is a JWKS .
: param issuer : Who issued the keys
: param url : Where can the key / - s be found
: param kwargs : extra parameters for instantiating KeyBundle
: return : A : py : class : ` oidcmsg . oauth2 . keybundle . KeyBundle ` instance""" | if not url :
raise KeyError ( "No url given" )
if "/localhost:" in url or "/localhost/" in url :
kb = self . keybundle_cls ( source = url , verify_ssl = False , httpc = self . httpc , ** kwargs )
else :
kb = self . keybundle_cls ( source = url , verify_ssl = self . verify_ssl , httpc = self . httpc , ** kwargs )
kb . update ( )
self . add_kb ( issuer , kb )
return kb |
def delete_comment ( repo : GithubRepository , comment_id : int ) -> None :
"""References :
https : / / developer . github . com / v3 / issues / comments / # delete - a - comment""" | url = ( "https://api.github.com/repos/{}/{}/issues/comments/{}" "?access_token={}" . format ( repo . organization , repo . name , comment_id , repo . access_token ) )
response = requests . delete ( url )
if response . status_code != 204 :
raise RuntimeError ( 'Comment delete failed. Code: {}. Content: {}.' . format ( response . status_code , response . content ) ) |
def graph_attention ( q , k , v , bias , dropout_rate = 0.0 , image_shapes = None , name = None , make_image_summary = True , save_weights_to = None , dropout_broadcast_dims = None , adjacency_matrix = None , num_edge_types = 5 ) :
"""graph attention .
Args :
q : a Tensor with shape [ batch , heads , length _ q , depth _ k ]
k : a Tensor with shape [ batch , heads , length _ kv , depth _ k ]
v : a Tensor with shape [ batch , heads , length _ kv , depth _ v ]
bias : bias Tensor ( see attention _ bias ( ) )
dropout _ rate : a floating point number
image _ shapes : optional tuple of integer scalars .
see comments for attention _ image _ summary ( )
name : an optional string
make _ image _ summary : True if you want an image summary .
save _ weights _ to : an optional dictionary to capture attention weights
for vizualization ; the weights tensor will be appended there under
a string key created from the variable scope ( including name ) .
dropout _ broadcast _ dims : an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions .
saves memory .
adjacency _ matrix : optional matrix of [ batch , length , length ] ids indicating
edge type
num _ edge _ types : an int indicating number of edge types
Returns :
A Tensor of shape [ batch , length , depth ( q ) ]""" | with tf . variable_scope ( name , default_name = "dot_product_attention" , values = [ q , k , v ] ) as scope : # [ batch , num _ heads , query _ length , memory _ length ]
logits = tf . matmul ( q , k , transpose_b = True )
if adjacency_matrix is not None :
key_head_depth = common_layers . shape_list ( q ) [ - 1 ]
adjacency_vectors = make_edge_vectors ( adjacency_matrix , num_edge_types , key_head_depth , name = name )
# transposing q to be [ batch , length _ q , heads , depth _ k ]
# to allow for matmul with [ batch , length _ q , length _ q , depth _ k ]
q_t = tf . transpose ( q , [ 0 , 2 , 1 , 3 ] )
adj_logits = tf . matmul ( q_t , adjacency_vectors , transpose_b = True )
logits += tf . transpose ( adj_logits , [ 0 , 2 , 1 , 3 ] )
# [ batch , depth , num _ nodes , num _ nodes ]
if bias is not None :
logits += bias
weights = tf . nn . softmax ( logits , name = "attention_weights" )
if save_weights_to is not None :
save_weights_to [ scope . name ] = weights
# dropping out the attention links for each of the heads
weights = common_layers . dropout_with_broadcast_dims ( weights , 1.0 - dropout_rate , broadcast_dims = dropout_broadcast_dims )
if common_layers . should_generate_summaries ( ) and make_image_summary :
common_attention . attention_image_summary ( weights , image_shapes )
return tf . matmul ( weights , v ) |
def headers_as_dict ( cls , resp ) :
"""Turns an array of response headers into a dictionary""" | if six . PY2 :
pairs = [ header . split ( ':' , 1 ) for header in resp . msg . headers ]
return dict ( [ ( k , v . strip ( ) ) for k , v in pairs ] )
else :
return dict ( [ ( k , v . strip ( ) ) for k , v in resp . msg . _headers ] ) |
def url ( self ) :
"""Path to local catalog file""" | return os . path . join ( self . path , self . main . value [ 0 ] ) |
def set_access_control ( self , mode , onerror = None ) :
"""Enable use of access control lists at connection setup if mode
is X . EnableAccess , disable if it is X . DisableAccess .""" | request . SetAccessControl ( display = self . display , onerror = onerror , mode = mode ) |
def start_fitting ( self ) :
"""Launches the fitting routine on another thread""" | self . queue = queue . Queue ( )
self . peak_vals = [ ]
self . fit_thread = QThread ( )
# must be assigned as an instance variable , not local , as otherwise thread is garbage
# collected immediately at the end of the function before it runs
self . fitobj = self . do_fit ( str ( self . data_filepath . text ( ) ) , self . matplotlibwidget , self . queue , self . peak_vals , self . peak_locs )
self . fitobj . moveToThread ( self . fit_thread )
self . fit_thread . started . connect ( self . fitobj . run )
self . fitobj . finished . connect ( self . fit_thread . quit )
# clean up . quit thread after script is finished
self . fitobj . status . connect ( self . update_status )
self . fit_thread . start ( ) |
def check_session ( self ) :
"""Make sure a session is open .
If it ' s not and autosession is turned on , create a new session automatically .
If it ' s not and autosession is off , raise an exception .""" | if self . session is None :
if self . autosession :
self . open_session ( )
else :
msg = "must open a session before modifying %s" % self
raise RuntimeError ( msg ) |
def total_write_throughput ( self ) :
"""Combined write throughput of table and global indexes""" | total = self . write_throughput
for index in itervalues ( self . global_indexes ) :
total += index . write_throughput
return total |
def find_hbd ( self , all_atoms , hydroph_atoms ) :
"""Find all possible strong and weak hydrogen bonds donors ( all hydrophobic C - H pairings )""" | donor_pairs = [ ]
data = namedtuple ( 'hbonddonor' , 'd d_orig_atom d_orig_idx h type' )
for donor in [ a for a in all_atoms if a . OBAtom . IsHbondDonor ( ) and a . idx not in self . altconf ] :
in_ring = False
if not in_ring :
for adj_atom in [ a for a in pybel . ob . OBAtomAtomIter ( donor . OBAtom ) if a . IsHbondDonorH ( ) ] :
d_orig_idx = self . Mapper . mapid ( donor . idx , mtype = self . mtype , bsid = self . bsid )
d_orig_atom = self . Mapper . id_to_atom ( d_orig_idx )
donor_pairs . append ( data ( d = donor , d_orig_atom = d_orig_atom , d_orig_idx = d_orig_idx , h = pybel . Atom ( adj_atom ) , type = 'regular' ) )
for carbon in hydroph_atoms :
for adj_atom in [ a for a in pybel . ob . OBAtomAtomIter ( carbon . atom . OBAtom ) if a . GetAtomicNum ( ) == 1 ] :
d_orig_idx = self . Mapper . mapid ( carbon . atom . idx , mtype = self . mtype , bsid = self . bsid )
d_orig_atom = self . Mapper . id_to_atom ( d_orig_idx )
donor_pairs . append ( data ( d = carbon , d_orig_atom = d_orig_atom , d_orig_idx = d_orig_idx , h = pybel . Atom ( adj_atom ) , type = 'weak' ) )
return donor_pairs |
def detectIphone ( self ) :
"""Return detection of an iPhone
Detects if the current device is an iPhone .""" | # The iPad and iPod touch say they ' re an iPhone ! So let ' s disambiguate .
return UAgentInfo . deviceIphone in self . __userAgent and not self . detectIpad ( ) and not self . detectIpod ( ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.