signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_fragment ( self , list_of_indextuples , give_only_index = False , use_lookup = None ) :
"""Get the indices of the atoms in a fragment .
The list _ of _ indextuples contains all bondings from the
molecule to the fragment . ` ` [ ( 1,3 ) , ( 2,4 ) ] ` ` means for example that the
fragment is connected over two bonds . The first bond is from atom 1 in
the molecule to atom 3 in the fragment . The second bond is from atom
2 in the molecule to atom 4 in the fragment .
Args :
list _ of _ indextuples ( list ) :
give _ only _ index ( bool ) : If ` ` True ` ` a set of indices
is returned . Otherwise a new Cartesian instance .
use _ lookup ( bool ) : Use a lookup variable for
: meth : ` ~ chemcoord . Cartesian . get _ bonds ` . The default is
specified in ` ` settings [ ' defaults ' ] [ ' use _ lookup ' ] ` `
Returns :
A set of indices or a new Cartesian instance ."""
|
if use_lookup is None :
use_lookup = settings [ 'defaults' ] [ 'use_lookup' ]
exclude = [ tuple [ 0 ] for tuple in list_of_indextuples ]
index_of_atom = list_of_indextuples [ 0 ] [ 1 ]
fragment_index = self . get_coordination_sphere ( index_of_atom , exclude = set ( exclude ) , n_sphere = float ( 'inf' ) , only_surface = False , give_only_index = True , use_lookup = use_lookup )
if give_only_index :
return fragment_index
else :
return self . loc [ fragment_index , : ]
|
def set_rate ( rate ) :
"""Defines the ideal rate at which computation is to be performed
: arg rate : the frequency in Hertz
: type rate : int or float
: raises : TypeError : if argument ' rate ' is not int or float"""
|
if not ( isinstance ( rate , int ) or isinstance ( rate , float ) ) :
raise TypeError ( "argument to set_rate is expected to be int or float" )
global loop_duration
loop_duration = 1.0 / rate
|
def get_calls ( self , job_name ) :
'''Reads file by given name and returns CallEdge array'''
|
config = self . file_index . get_by_name ( job_name ) . yaml
calls = self . get_calls_from_dict ( config , from_name = job_name )
return calls
|
def attr_to_dict ( obj , attr , dct ) :
"""Add attribute to dict if it exists .
: param dct :
: param obj : object
: param attr : object attribute name
: return : dict"""
|
if hasattr ( obj , attr ) :
dct [ attr ] = getattr ( obj , attr )
return dct
|
def change_email ( self , old_email , new_email ) :
"""Changes the email of the current account
: param old _ email : The current email
: param new _ email : The new email to set"""
|
log . info ( "[+] Changing account email to '{}'" . format ( new_email ) )
return self . _send_xmpp_element ( account . ChangeEmailRequest ( self . password , old_email , new_email ) )
|
def angles ( self ) -> Tuple [ float ] :
"""Returns the angles ( alpha , beta , gamma ) of the lattice ."""
|
m = self . _matrix
lengths = self . lengths
angles = np . zeros ( 3 )
for i in range ( 3 ) :
j = ( i + 1 ) % 3
k = ( i + 2 ) % 3
angles [ i ] = abs_cap ( dot ( m [ j ] , m [ k ] ) / ( lengths [ j ] * lengths [ k ] ) )
angles = np . arccos ( angles ) * 180.0 / pi
return tuple ( angles . tolist ( ) )
|
def database_list_folder ( object_id , input_params = { } , always_retry = True , ** kwargs ) :
"""Invokes the / database - xxxx / listFolder API method .
For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Databases # API - method % 3A - % 2Fdatabase - xxxx % 2FlistFolder"""
|
return DXHTTPRequest ( '/%s/listFolder' % object_id , input_params , always_retry = always_retry , ** kwargs )
|
def dimensionless_contents ( streams , kdims , no_duplicates = True ) :
"""Return a list of stream parameters that have not been associated
with any of the key dimensions ."""
|
names = stream_parameters ( streams , no_duplicates )
return [ name for name in names if name not in kdims ]
|
def loads ( self , config , content , prefer = None ) :
"""An abstract loads method which loads an instance from some content .
: param class config : The config class to load into
: param str content : The content to load from
: param str prefer : The preferred serialization module name
: raises ValueError : If load handler does not provided handler method
: return : A dictionary converted from the given content
: rtype : dict"""
|
loader = self . _prefer_package ( prefer )
loads_hook_name = f"on_{loader}_loads"
loads_hook = getattr ( self , loads_hook_name , None )
if not callable ( loads_hook ) :
raise ValueError ( f"no loads handler for {self.imported!r}, requires method " f"{loads_hook_name!r} in {self!r}" )
return loads_hook ( self . handler , config , content )
|
def has_obstory_metadata ( self , status_id ) :
"""Check for the presence of the given metadata item
: param string status _ id :
The metadata item ID
: return :
True if we have a metadata item with this ID , False otherwise"""
|
self . con . execute ( 'SELECT 1 FROM archive_metadata WHERE publicId=%s;' , ( status_id , ) )
return len ( self . con . fetchall ( ) ) > 0
|
def run ( configobj = None ) :
"""TEAL interface for the ` acscteforwardmodel ` function ."""
|
acscteforwardmodel ( configobj [ 'input' ] , exec_path = configobj [ 'exec_path' ] , time_stamps = configobj [ 'time_stamps' ] , verbose = configobj [ 'verbose' ] , quiet = configobj [ 'quiet' ] , single_core = configobj [ 'single_core' ] )
|
def _read_protos ( self , size ) :
"""Read next layer protocol type .
Positional arguments :
* size - - int , buffer size
Returns :
* str - - next layer ' s protocol name"""
|
_byte = self . _read_unpack ( size )
_prot = ETHERTYPE . get ( _byte )
return _prot
|
def check_diag ( self , jac , name ) :
"""Check matrix ` ` jac ` ` for diagonal elements that equals 0"""
|
system = self . system
pos = [ ]
names = [ ]
pairs = ''
size = jac . size
diag = jac [ 0 : size [ 0 ] ** 2 : size [ 0 ] + 1 ]
for idx in range ( size [ 0 ] ) :
if abs ( diag [ idx ] ) <= 1e-8 :
pos . append ( idx )
for idx in pos :
names . append ( system . varname . __dict__ [ name ] [ idx ] )
if len ( names ) > 0 :
for i , j in zip ( pos , names ) :
pairs += '{0}: {1}\n' . format ( i , j )
logger . debug ( 'Jacobian diagonal check:' )
logger . debug ( pairs )
|
def _digits ( minval , maxval ) :
"""Digits needed to comforatbly display values in [ minval , maxval ]"""
|
if minval == maxval :
return 3
else :
return min ( 10 , max ( 2 , int ( 1 + abs ( np . log10 ( maxval - minval ) ) ) ) )
|
def _is_node_an_element ( self , node ) :
"""Return True if the given node is an ElementTree Element , a fact that
can be tricky to determine if the cElementTree implementation is
used ."""
|
# Try the simplest approach first , works for plain old ElementTree
if isinstance ( node , BaseET . Element ) :
return True
# For cElementTree we need to be more cunning ( or find a better way )
if hasattr ( node , 'makeelement' ) and isinstance ( node . tag , basestring ) :
return True
|
def _read_stderr ( self ) :
'''Continuously read stderr for error messages .'''
|
try :
while self . _process . returncode is None :
line = yield from self . _process . stderr . readline ( )
if not line :
break
if self . _stderr_callback :
yield from self . _stderr_callback ( line )
except Exception :
_logger . exception ( 'Unhandled read stderr exception.' )
raise
|
def do_db ( self , arg ) :
"""[ ~ thread ] db < register > - show memory contents as bytes
[ ~ thread ] db < register - register > - show memory contents as bytes
[ ~ thread ] db < register > < size > - show memory contents as bytes
[ ~ process ] db < address > - show memory contents as bytes
[ ~ process ] db < address - address > - show memory contents as bytes
[ ~ process ] db < address > < size > - show memory contents as bytes"""
|
self . print_memory_display ( arg , HexDump . hexblock )
self . last_display_command = self . do_db
|
def print_all ( self ) :
""": return :"""
|
output = "\n\n# Git information \n" "-------------------------------------------\n" " Branch :\t{0}\n" " Version:\t{1}\n" " Summary:\t{2}\n" "-------------------------------------------\n\n" . format ( self . get_branch ( ) , str ( self . get_version ( ) ) , self . repo . commit ( ) . summary , )
print ( output )
|
def create_image ( self , df_dir_path , image , use_cache = False ) :
"""create image : get atomic - reactor sdist tarball , build image and tag it
: param df _ path :
: param image :
: return :"""
|
logger . debug ( "creating build image: df_dir_path = '%s', image = '%s'" , df_dir_path , image )
if not os . path . isdir ( df_dir_path ) :
raise RuntimeError ( "Directory '%s' does not exist." % df_dir_path )
tmpdir = tempfile . mkdtemp ( )
df_tmpdir = os . path . join ( tmpdir , 'df-%s' % uuid . uuid4 ( ) )
git_tmpdir = os . path . join ( tmpdir , 'git-%s' % uuid . uuid4 ( ) )
os . mkdir ( df_tmpdir )
logger . debug ( "tmp dir with dockerfile '%s' created" , df_tmpdir )
os . mkdir ( git_tmpdir )
logger . debug ( "tmp dir with atomic-reactor '%s' created" , git_tmpdir )
try :
for f in glob ( os . path . join ( df_dir_path , '*' ) ) :
shutil . copy ( f , df_tmpdir )
logger . debug ( "cp '%s' -> '%s'" , f , df_tmpdir )
logger . debug ( "df dir: %s" , os . listdir ( df_tmpdir ) )
reactor_tarball = self . get_reactor_tarball_path ( tmpdir = git_tmpdir )
reactor_tb_path = os . path . join ( df_tmpdir , DOCKERFILE_REACTOR_TARBALL_NAME )
shutil . copy ( reactor_tarball , reactor_tb_path )
image_name = ImageName . parse ( image )
logs_gen = self . tasker . build_image_from_path ( df_tmpdir , image_name , use_cache = use_cache )
wait_for_command ( logs_gen )
finally :
shutil . rmtree ( tmpdir )
|
import re
def swap_whitespace_and_underscore ( input_string ) :
"""This function replaces whitespaces in a string with underscores and vice versa using regular expressions .
Examples :
swap _ whitespace _ and _ underscore ( ' Jumanji The Jungle ' ) - > ' Jumanji _ The _ Jungle '
swap _ whitespace _ and _ underscore ( ' The Avengers ' ) - > ' The _ Avengers '
swap _ whitespace _ and _ underscore ( ' Fast and Furious ' ) - > ' Fast _ and _ Furious '
Args :
input _ string : A string which can contain words separated by spaces or underscores .
Returns :
A string replacing all whitespaces with underscores or underscores with whitespaces ."""
|
if " " in input_string :
return re . sub ( ' ' , '_' , input_string )
else :
return re . sub ( '_' , ' ' , input_string )
|
def selectPeerToIntroduce ( self , otherPeers ) :
"""Choose a peer to introduce . Return a q2q address or None , if there are
no suitable peers to introduce at this time ."""
|
for peer in otherPeers :
if peer not in self . otherPeers :
self . otherPeers . append ( peer )
return peer
|
def list ( self , status = values . unset , unique_name = values . unset , date_created_after = values . unset , date_created_before = values . unset , limit = None , page_size = None ) :
"""Lists RoomInstance records from the API as a list .
Unlike stream ( ) , this operation is eager and will load ` limit ` records into
memory before returning .
: param RoomInstance . RoomStatus status : Only show Rooms with the given status .
: param unicode unique _ name : Only show Rooms with the provided Name .
: param datetime date _ created _ after : Only show Rooms that started on or after this date , given as YYYY - MM - DD .
: param datetime date _ created _ before : Only show Rooms that started before this date , given as YYYY - MM - DD .
: param int limit : Upper limit for the number of records to return . list ( ) guarantees
never to return more than limit . Default is no limit
: param int page _ size : Number of records to fetch per request , when not set will use
the default value of 50 records . If no page _ size is defined
but a limit is defined , list ( ) will attempt to read the limit
with the most efficient page size , i . e . min ( limit , 1000)
: returns : Generator that will yield up to limit results
: rtype : list [ twilio . rest . video . v1 . room . RoomInstance ]"""
|
return list ( self . stream ( status = status , unique_name = unique_name , date_created_after = date_created_after , date_created_before = date_created_before , limit = limit , page_size = page_size , ) )
|
def related_domains ( self , domains ) :
"""Get list of domain names that have been seen requested around the
same time ( up to 60 seconds before or after ) to the given domain name .
Args :
domains : an enumerable of strings domain names
Returns :
An enumerable of [ domain name , scores ]"""
|
api_name = 'opendns-related_domains'
fmt_url_path = u'links/name/{0}.json'
return self . _multi_get ( api_name , fmt_url_path , domains )
|
def recursive_replace ( list_ , target , repl = - 1 ) :
r"""Recursively removes target in all lists and sublists and replaces them with
the repl variable"""
|
repl_list = [ recursive_replace ( item , target , repl ) if isinstance ( item , ( list , np . ndarray ) ) else ( repl if item == target else item ) for item in list_ ]
return repl_list
|
def __xd_iterator_pass_on ( arr , view , fun ) :
"""Like xd _ iterator , but the fun return values are always passed on to the next and only the last returned ."""
|
# create list of iterations
iterations = [ [ None ] if dim in view else list ( range ( arr . shape [ dim ] ) ) for dim in range ( arr . ndim ) ]
# iterate , create slicer , execute function and collect results
passon = None
for indices in itertools . product ( * iterations ) :
slicer = [ slice ( None ) if idx is None else slice ( idx , idx + 1 ) for idx in indices ]
passon = fun ( scipy . squeeze ( arr [ slicer ] ) , passon )
return passon
|
def setup_logger ( logger = None , level = 'INFO' ) :
"""Setup logging for CLI use .
Tries to do some conditionals to prevent handlers from being added twice .
Just to be safe .
Parameters
logger : : py : class : ` Logger `
logger instance for tmuxp"""
|
if not logger : # if no logger exists , make one
logger = logging . getLogger ( )
if not logger . handlers : # setup logger handlers
channel = logging . StreamHandler ( )
channel . setFormatter ( log . DebugLogFormatter ( ) )
# channel . setFormatter ( log . LogFormatter ( ) )
logger . setLevel ( level )
logger . addHandler ( channel )
|
def static ( self , max_steps , step_size ) :
"""Set Back - off to Static
Set the sampler parameters for static back off
Inputs :
max _ steps :
maximum optimization steps to be taken
step _ size :
the step size of the back - off"""
|
self . _dynamic = False
# begin checks
try :
self . _max_steps = int ( max_steps )
except :
print ( "Error: Input 1 (max_steps) has to be an int." )
return 0
try :
assert self . _max_steps >= 0
except :
print ( "Warning: Input 1 (max_steps) has to be non-negative." )
print ( "Setting max_steps to 0." )
self . _max_steps = 0
if max_steps > 0 :
try :
assert step_size == float ( step_size )
except AssertionError :
print ( "Warning: Input 2 (step_size) is not a float. Converted." )
step_size = float ( step_size )
except :
print ( "Error: Input 2 (step_size) has to be a float." )
return 0
try :
assert 0. < step_size < 1.
except :
print ( "Warning: Input 2 (step_size) has to be between 0 and 1." )
print ( "Setting step_size to 0.2." )
step_size = 0.2
self . _step_size = step_size
if step_size ** max_steps < 10 ** ( - 15 ) :
print ( "Warning: Back-off gets dangerously small." )
|
def getmtime ( self , filepath ) :
"""Gets the last time that the file was modified ."""
|
if self . is_ssh ( filepath ) :
self . _check_ftp ( )
source = self . _get_remote ( filepath )
mtime = self . ftp . stat ( source ) . st_mtime
else :
mtime = os . path . getmtime ( filepath )
return mtime
|
def element_should_be_visible ( self , locator , loglevel = 'INFO' ) :
"""Verifies that element identified with locator is visible .
Key attributes for arbitrary elements are ` id ` and ` name ` . See
` introduction ` for details about locating elements .
New in AppiumLibrary 1.4.5"""
|
if not self . _element_find ( locator , True , True ) . is_displayed ( ) :
self . log_source ( loglevel )
raise AssertionError ( "Element '%s' should be visible " "but did not" % locator )
|
def shapeexprlabel_to_IRI ( self , shapeExprLabel : ShExDocParser . ShapeExprLabelContext ) -> Union [ ShExJ . BNODE , ShExJ . IRIREF ] :
"""shapeExprLabel : iri | blankNode"""
|
if shapeExprLabel . iri ( ) :
return self . iri_to_iriref ( shapeExprLabel . iri ( ) )
else :
return ShExJ . BNODE ( shapeExprLabel . blankNode ( ) . getText ( ) )
|
def update_status ( self , reset = False ) :
"""Update device status ."""
|
if self . healthy_update_timer and not reset :
return
# get device features only once
if not self . device_features :
self . handle_features ( self . get_features ( ) )
# Get status from device to register / keep alive UDP
self . handle_status ( )
# Schedule next execution
self . setup_update_timer ( )
|
def pauli_string ( self , qubits = None ) :
"""Return a string representation of this PauliTerm without its coefficient and with
implicit qubit indices .
If a list of qubits is provided , each character in the resulting string represents
a Pauli operator on the corresponding qubit . If qubit indices are not provided as input ,
the returned string will be all non - identity operators in the order . This doesn ' t make
much sense , so please provide a list of qubits . Not providing a list of qubits is
deprecated .
> > > p = PauliTerm ( " X " , 0 ) * PauliTerm ( " Y " , 1 , 1 . j )
> > > p . pauli _ string ( )
" XY "
> > > p . pauli _ string ( qubits = [ 0 ] )
> > > p . pauli _ string ( qubits = [ 0 , 2 ] )
" XI "
: param list qubits : The list of qubits to represent , given as ints . If None , defaults to
all qubits in this PauliTerm .
: return : The string representation of this PauliTerm , sans coefficient"""
|
if qubits is None :
warnings . warn ( "Please provide a list of qubits when using PauliTerm.pauli_string" , DeprecationWarning )
qubits = self . get_qubits ( )
return '' . join ( self [ q ] for q in qubits )
|
def decrypt ( v , key = None , keyfile = None ) :
"""Encrypt an string"""
|
cipher = functions . get_cipher ( key , keyfile )
return cipher . decrypt ( v )
|
def accept_token ( self , require_token = False , scopes_required = None , render_errors = True ) :
"""Use this to decorate view functions that should accept OAuth2 tokens ,
this will most likely apply to API functions .
Tokens are accepted as part of the query URL ( access _ token value ) or
a POST form value ( access _ token ) .
Note that this only works if a token introspection url is configured ,
as that URL will be queried for the validity and scopes of a token .
: param require _ token : Whether a token is required for the current
function . If this is True , we will abort the request if there
was no token provided .
: type require _ token : bool
: param scopes _ required : List of scopes that are required to be
granted by the token before being allowed to call the protected
function .
: type scopes _ required : list
: param render _ errors : Whether or not to eagerly render error objects
as JSON API responses . Set to False to pass the error object back
unmodified for later rendering .
: type render _ errors : callback ( obj ) or None
. . versionadded : : 1.0"""
|
def wrapper ( view_func ) :
@ wraps ( view_func )
def decorated ( * args , ** kwargs ) :
token = None
if 'Authorization' in request . headers and request . headers [ 'Authorization' ] . startswith ( 'Bearer ' ) :
token = request . headers [ 'Authorization' ] . split ( None , 1 ) [ 1 ] . strip ( )
if 'access_token' in request . form :
token = request . form [ 'access_token' ]
elif 'access_token' in request . args :
token = request . args [ 'access_token' ]
validity = self . validate_token ( token , scopes_required )
if ( validity is True ) or ( not require_token ) :
return view_func ( * args , ** kwargs )
else :
response_body = { 'error' : 'invalid_token' , 'error_description' : validity }
if render_errors :
response_body = json . dumps ( response_body )
return response_body , 401 , { 'WWW-Authenticate' : 'Bearer' }
return decorated
return wrapper
|
def create_database ( destroy_existing = False ) :
"""Create db and tables if it doesn ' t exist"""
|
if not os . path . exists ( DB_NAME ) :
logger . info ( 'Create database: {0}' . format ( DB_NAME ) )
open ( DB_NAME , 'a' ) . close ( )
Show . create_table ( )
Episode . create_table ( )
Setting . create_table ( )
|
def entry_point ( ) :
"""The entry that CLI is executed from"""
|
try :
provider_group_factory ( )
notifiers_cli ( obj = { } )
except NotifierException as e :
click . secho ( f"ERROR: {e.message}" , bold = True , fg = "red" )
exit ( 1 )
|
def disconnect_async ( self , conn_id , callback ) :
"""Asynchronously disconnect from a device that has previously been connected
Args :
conn _ id ( int ) : a unique identifier for this connection on the DeviceManager
that owns this adapter .
callback ( callable ) : A function called as callback ( conn _ id , adapter _ id , success , failure _ reason )
when the disconnection finishes . Disconnection can only either succeed or timeout ."""
|
try :
context = self . conns . get_context ( conn_id )
except ArgumentError :
callback ( conn_id , self . id , False , "Could not find connection information" )
return
self . conns . begin_disconnection ( conn_id , callback , self . get_config ( 'default_timeout' ) )
topics = context [ 'topics' ]
disconn_message = { 'key' : context [ 'key' ] , 'client' : self . name , 'type' : 'command' , 'operation' : 'disconnect' }
self . client . publish ( topics . action , disconn_message )
|
def _encode_msg ( self , start_pos , offset , timestamp , key , value , attributes = 0 ) :
"""Encode msg data into the ` msg _ buffer ` , which should be allocated
to at least the size of this message ."""
|
magic = self . _magic
buf = self . _buffer
pos = start_pos
# Write key and value
pos += self . KEY_OFFSET_V0 if magic == 0 else self . KEY_OFFSET_V1
if key is None :
struct . pack_into ( ">i" , buf , pos , - 1 )
pos += self . KEY_LENGTH
else :
key_size = len ( key )
struct . pack_into ( ">i" , buf , pos , key_size )
pos += self . KEY_LENGTH
buf [ pos : pos + key_size ] = key
pos += key_size
if value is None :
struct . pack_into ( ">i" , buf , pos , - 1 )
pos += self . VALUE_LENGTH
else :
value_size = len ( value )
struct . pack_into ( ">i" , buf , pos , value_size )
pos += self . VALUE_LENGTH
buf [ pos : pos + value_size ] = value
pos += value_size
length = ( pos - start_pos ) - self . LOG_OVERHEAD
# Write msg header . Note , that Crc will be updated later
if magic == 0 :
self . HEADER_STRUCT_V0 . pack_into ( buf , start_pos , offset , length , 0 , magic , attributes )
else :
self . HEADER_STRUCT_V1 . pack_into ( buf , start_pos , offset , length , 0 , magic , attributes , timestamp )
# Calculate CRC for msg
crc_data = memoryview ( buf ) [ start_pos + self . MAGIC_OFFSET : ]
crc = calc_crc32 ( crc_data )
struct . pack_into ( ">I" , buf , start_pos + self . CRC_OFFSET , crc )
return crc
|
def get ( self , project_name , updatetime = None , md5sum = None ) :
'''get project data object , return None if not exists'''
|
if time . time ( ) - self . last_check_projects > self . CHECK_PROJECTS_INTERVAL :
self . _check_projects ( )
if self . _need_update ( project_name , updatetime , md5sum ) :
self . _update_project ( project_name )
return self . projects . get ( project_name , None )
|
def interpolate ( G , f_subsampled , keep_inds , order = 100 , reg_eps = 0.005 , ** kwargs ) :
r"""Interpolate a graph signal .
Parameters
G : Graph
f _ subsampled : ndarray
A graph signal on the graph G .
keep _ inds : ndarray
List of indices on which the signal is sampled .
order : int
Degree of the Chebyshev approximation ( default = 100 ) .
reg _ eps : float
The regularized graph Laplacian is $ \ bar { L } = L + \ epsilon I $ .
A smaller epsilon may lead to better regularization ,
but will also require a higher order Chebyshev approximation .
Returns
f _ interpolated : ndarray
Interpolated graph signal on the full vertex set of G .
References
See : cite : ` pesenson2009variational `"""
|
L_reg = G . L + reg_eps * sparse . eye ( G . N )
K_reg = getattr ( G . mr , 'K_reg' , kron_reduction ( L_reg , keep_inds ) )
green_kernel = getattr ( G . mr , 'green_kernel' , filters . Filter ( G , lambda x : 1. / ( reg_eps + x ) ) )
alpha = K_reg . dot ( f_subsampled )
try :
Nv = np . shape ( f_subsampled ) [ 1 ]
f_interpolated = np . zeros ( ( G . N , Nv ) )
except IndexError :
f_interpolated = np . zeros ( ( G . N ) )
f_interpolated [ keep_inds ] = alpha
return _analysis ( green_kernel , f_interpolated , order = order , ** kwargs )
|
def _netstat_bsd ( ) :
'''Return netstat information for BSD flavors'''
|
ret = [ ]
if __grains__ [ 'kernel' ] == 'NetBSD' :
for addr_family in ( 'inet' , 'inet6' ) :
cmd = 'netstat -f {0} -an | tail -n+3' . format ( addr_family )
out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = True )
for line in out . splitlines ( ) :
comps = line . split ( )
entry = { 'proto' : comps [ 0 ] , 'recv-q' : comps [ 1 ] , 'send-q' : comps [ 2 ] , 'local-address' : comps [ 3 ] , 'remote-address' : comps [ 4 ] }
if entry [ 'proto' ] . startswith ( 'tcp' ) :
entry [ 'state' ] = comps [ 5 ]
ret . append ( entry )
else : # Lookup TCP connections
cmd = 'netstat -p tcp -an | tail -n+3'
out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = True )
for line in out . splitlines ( ) :
comps = line . split ( )
ret . append ( { 'proto' : comps [ 0 ] , 'recv-q' : comps [ 1 ] , 'send-q' : comps [ 2 ] , 'local-address' : comps [ 3 ] , 'remote-address' : comps [ 4 ] , 'state' : comps [ 5 ] } )
# Lookup UDP connections
cmd = 'netstat -p udp -an | tail -n+3'
out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = True )
for line in out . splitlines ( ) :
comps = line . split ( )
ret . append ( { 'proto' : comps [ 0 ] , 'recv-q' : comps [ 1 ] , 'send-q' : comps [ 2 ] , 'local-address' : comps [ 3 ] , 'remote-address' : comps [ 4 ] } )
# Add in user and program info
ppid = _ppid ( )
if __grains__ [ 'kernel' ] == 'OpenBSD' :
netinfo = _netinfo_openbsd ( )
elif __grains__ [ 'kernel' ] in ( 'FreeBSD' , 'NetBSD' ) :
netinfo = _netinfo_freebsd_netbsd ( )
for idx in range ( len ( ret ) ) :
local = ret [ idx ] [ 'local-address' ]
remote = ret [ idx ] [ 'remote-address' ]
proto = ret [ idx ] [ 'proto' ]
try : # Make a pointer to the info for this connection for easier
# reference below
ptr = netinfo [ local ] [ remote ] [ proto ]
except KeyError :
continue
# Get the pid - to - ppid mappings for this connection
conn_ppid = dict ( ( x , y ) for x , y in six . iteritems ( ppid ) if x in ptr )
try : # Master pid for this connection will be the pid whose ppid isn ' t
# in the subset dict we created above
master_pid = next ( iter ( x for x , y in six . iteritems ( conn_ppid ) if y not in ptr ) )
except StopIteration :
continue
ret [ idx ] [ 'user' ] = ptr [ master_pid ] [ 'user' ]
ret [ idx ] [ 'program' ] = '/' . join ( ( master_pid , ptr [ master_pid ] [ 'cmd' ] ) )
return ret
|
def copy ( self ) :
"""Return a shallow copy ."""
|
return self . __class__ ( self . operations . copy ( ) , self . collection , self . document )
|
def transaction_type ( self , value ) :
"""Set the TransactionType ( with Input Validation )"""
|
if value is not None :
if value not in self . ALLOWED_TRANSACTION_TYPES :
raise AttributeError ( '%s transaction_type element must be one of %s not %s' % ( self . __class__ . __name__ , ',' . join ( self . ALLOWED_TRANSACTION_TYPES ) , value , ) )
self . _transaction_type = value
|
def _list_records ( self , rtype = None , name = None , content = None ) :
"""List all records .
record _ type , name and content are used to filter the records .
If possible it filters during the query , otherwise afterwards .
An empty list is returned if no records are found ."""
|
filter_query = { }
if rtype :
filter_query [ "record_type" ] = rtype
if name :
name = self . _relative_name ( name )
filter_query [ "name" ] = name
payload = self . _get ( "/v1/domains/{0}/records" . format ( self . domain ) , query_params = filter_query , )
records = [ ]
for data in payload :
record = data [ "record" ]
if content and record [ "content" ] != content :
continue
if record [ "name" ] == "" :
rname = self . domain
else :
rname = "." . join ( ( record [ "name" ] , self . domain ) )
processed_record = { "type" : record [ "record_type" ] , "name" : rname , "ttl" : record [ "ttl" ] , "content" : record [ "content" ] , "id" : record [ "id" ] , }
if record [ "prio" ] :
processed_record [ "options" ] = { "mx" : { "priority" : record [ "prio" ] } }
records . append ( processed_record )
LOGGER . debug ( "list_records: %s" , records )
return records
|
def print_success ( msg ) :
"""Print a warning message"""
|
if IS_POSIX :
print ( u"%s[INFO] %s%s" % ( ANSI_OK , msg , ANSI_END ) )
else :
print ( u"[INFO] %s" % ( msg ) )
|
def addIndex ( self , index ) :
"""Adds the inputted index to this table schema .
: param index | < orb . Index >"""
|
index . setSchema ( self )
self . __indexes [ index . name ( ) ] = index
|
def population_observer ( population , num_generations , num_evaluations , args ) :
"""Print the current population of the evolutionary computation to the screen .
This function displays the current population of the evolutionary
computation to the screen in fitness - sorted order .
. . Arguments :
population - - the population of Individuals
num _ generations - - the number of elapsed generations
num _ evaluations - - the number of candidate solution evaluations
args - - a dictionary of keyword arguments"""
|
population . sort ( reverse = True )
print ( '----------------------------------------------------------------------------' )
print ( ' Current Population' )
print ( '----------------------------------------------------------------------------' )
for ind in population :
print ( str ( ind ) )
print ( '----------------------------------------------------------------------------' )
|
def short_label ( self ) :
"""str : A short description of the group .
> > > device . group . short _ label
' Kitchen + 1'"""
|
group_names = sorted ( [ m . player_name for m in self . members ] )
group_label = group_names [ 0 ]
if len ( group_names ) > 1 :
group_label += " + {}" . format ( len ( group_names ) - 1 )
return group_label
|
def report ( self ) :
"""Present all information that was gathered in an html file that
allows browsing the results ."""
|
# make this prettier
html = u'<hr/><a name="%s"></a>\n' % self . name ( )
# Intro
html = html + "<h2> Plugin <em>" + self . name ( ) + "</em></h2>\n"
# Files
if len ( self . copied_files ) :
html = html + "<p>Files copied:<br><ul>\n"
for afile in self . copied_files :
html = html + '<li><a href="%s">%s</a>' % ( u'..' + _to_u ( afile [ 'dstpath' ] ) , _to_u ( afile [ 'srcpath' ] ) )
if afile [ 'symlink' ] == "yes" :
html = html + " (symlink to %s)" % _to_u ( afile [ 'pointsto' ] )
html = html + '</li>\n'
html = html + "</ul></p>\n"
# Command Output
if len ( self . executed_commands ) :
html = html + "<p>Commands Executed:<br><ul>\n"
# convert file name to relative path from our root
# don ' t use relpath - these are HTML paths not OS paths .
for cmd in self . executed_commands :
if cmd [ "file" ] and len ( cmd [ "file" ] ) :
cmd_rel_path = u"../" + _to_u ( self . commons [ 'cmddir' ] ) + "/" + _to_u ( cmd [ 'file' ] )
html = html + '<li><a href="%s">%s</a></li>\n' % ( cmd_rel_path , _to_u ( cmd [ 'exe' ] ) )
else :
html = html + '<li>%s</li>\n' % ( _to_u ( cmd [ 'exe' ] ) )
html = html + "</ul></p>\n"
# Alerts
if len ( self . alerts ) :
html = html + "<p>Alerts:<br><ul>\n"
for alert in self . alerts :
html = html + '<li>%s</li>\n' % _to_u ( alert )
html = html + "</ul></p>\n"
# Custom Text
if self . custom_text != "" :
html = html + "<p>Additional Information:<br>\n"
html = html + _to_u ( self . custom_text ) + "</p>\n"
if six . PY2 :
return html . encode ( 'utf8' )
else :
return html
|
def build_rotation ( vec3 ) :
"""Build a rotation matrix .
vec3 is a Vector3 defining the axis about which to rotate the object ."""
|
if not isinstance ( vec3 , Vector3 ) :
raise ValueError ( "rotAmt must be a Vector3" )
return Matrix4 . x_rotate ( vec3 . x ) * Matrix4 . y_rotate ( vec3 . y ) * Matrix4 . z_rotate ( vec3 . z )
|
def is_domain_class_domain_attribute ( ent , attr_name ) :
"""Checks if the given attribute name is a resource attribute ( i . e . , either
a member or a aggregate attribute ) of the given registered resource ."""
|
attr = get_domain_class_attribute ( ent , attr_name )
return attr != RESOURCE_ATTRIBUTE_KINDS . TERMINAL
|
def findAction ( self , text ) :
"""Looks up the action based on the inputed text .
: return < QAction > | | None"""
|
for action in self . actionGroup ( ) . actions ( ) :
if ( text in ( action . objectName ( ) , action . text ( ) ) ) :
return action
return None
|
def create_one_time_invoice ( self , charges ) :
'''Charges should be a list of charges to execute immediately . Each
value in the charges diectionary should be a dictionary with the
following keys :
code
Your code for this charge . This code will be displayed in the
user ' s invoice and is limited to 36 characters .
quantity
A positive integer quantity . If not provided this value will
default to 1.
each _ amount
Positive or negative integer or decimal with two digit precision .
A positive number will create a charge ( debit ) . A negative number
will create a credit .
description
An optional description for this charge which will be displayed on
the user ' s invoice .'''
|
data = { }
for n , charge in enumerate ( charges ) :
each_amount = Decimal ( charge [ 'each_amount' ] )
each_amount = each_amount . quantize ( Decimal ( '.01' ) )
data [ 'charges[%d][chargeCode]' % n ] = charge [ 'code' ]
data [ 'charges[%d][quantity]' % n ] = charge . get ( 'quantity' , 1 )
data [ 'charges[%d][eachAmount]' % n ] = '%.2f' % each_amount
if 'description' in charge . keys ( ) :
data [ 'charges[%d][description]' % n ] = charge [ 'description' ]
response = self . product . client . make_request ( path = 'invoices/new' , params = { 'code' : self . code } , data = data , )
return self . load_data_from_xml ( response . content )
|
def imshow ( self , array , * args , ** kwargs ) :
"""Display an image , i . e . data on a 2D regular raster .
If ` ` array ` ` is a : class : ` ~ gwpy . types . Array2D ` ( e . g . a
: class : ` ~ gwpy . spectrogram . Spectrogram ` ) , then the defaults are
_ different _ to those in the upstream
: meth : ` ~ matplotlib . axes . Axes . imshow ` method . Namely , the defaults are
- ` ` origin = ' lower ' ` ` ( coordinates start in lower - left corner )
- ` ` aspect = ' auto ' ` ` ( pixels are not forced to be square )
- ` ` interpolation = ' none ' ` ` ( no image interpolation is used )
In all other usage , the defaults from the upstream matplotlib method
are unchanged .
Parameters
array : array - like or PIL image
The image data .
* args , * * kwargs
All arguments and keywords are passed to the inherited
: meth : ` ~ matplotlib . axes . Axes . imshow ` method .
See Also
matplotlib . axes . Axes . imshow
for details of the image rendering"""
|
if isinstance ( array , Array2D ) :
return self . _imshow_array2d ( array , * args , ** kwargs )
image = super ( Axes , self ) . imshow ( array , * args , ** kwargs )
self . autoscale ( enable = None , axis = 'both' , tight = None )
return image
|
def send ( self , pkt ) :
"""Send a packet"""
|
# Use the routing table to find the output interface
iff = pkt . route ( ) [ 0 ]
if iff is None :
iff = conf . iface
# Assign the network interface to the BPF handle
if self . assigned_interface != iff :
try :
fcntl . ioctl ( self . outs , BIOCSETIF , struct . pack ( "16s16x" , iff . encode ( ) ) )
# noqa : E501
except IOError :
raise Scapy_Exception ( "BIOCSETIF failed on %s" % iff )
self . assigned_interface = iff
# Build the frame
frame = raw ( self . guessed_cls ( ) / pkt )
pkt . sent_time = time . time ( )
# Send the frame
L2bpfSocket . send ( self , frame )
|
def base_dir ( self ) :
"""str : GEM - PRO project folder ."""
|
if self . root_dir :
return op . join ( self . root_dir , self . id )
else :
return None
|
def get_gene_associations ( model ) :
"""Create gene association for class : class : ` . GeneDeletionStrategy ` .
Return a dict mapping reaction IDs to
: class : ` psamm . expression . boolean . Expression ` objects ,
representing relationships between reactions and related genes . This helper
function should be called when creating : class : ` . GeneDeletionStrategy `
objects .
Args :
model : : class : ` psamm . datasource . native . NativeModel ` ."""
|
for reaction in model . reactions :
assoc = None
if reaction . genes is None :
continue
elif isinstance ( reaction . genes , string_types ) :
assoc = boolean . Expression ( reaction . genes )
else :
variables = [ boolean . Variable ( g ) for g in reaction . genes ]
assoc = boolean . Expression ( boolean . And ( * variables ) )
yield reaction . id , assoc
|
def update_dist_caches ( dist_path , fix_zipimporter_caches ) :
"""Fix any globally cached ` dist _ path ` related data
` dist _ path ` should be a path of a newly installed egg distribution ( zipped
or unzipped ) .
sys . path _ importer _ cache contains finder objects that have been cached when
importing data from the original distribution . Any such finders need to be
cleared since the replacement distribution might be packaged differently ,
e . g . a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa . Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader .
zipimport . zipimporter objects are Python loaders charged with importing
data packaged inside zip archives . If stale loaders referencing the
original distribution , are left behind , they can fail to load modules from
the replacement distribution . E . g . if an old zipimport . zipimporter instance
is used to load data from a new zipped egg archive , it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution ' s zip archive directory
information . Such an operation may then fail outright , e . g . report having
read a ' bad local file header ' , or even worse , it may fail silently &
return invalid data .
zipimport . _ zip _ directory _ cache contains cached zip archive directory
information for all existing zipimport . zipimporter instances and all such
instances connected to the same archive share the same cached directory
information .
If asked , and the underlying Python implementation allows it , we can fix
all existing zipimport . zipimporter instances instead of having to track
them down and remove them one by one , by updating their shared cached zip
archive directory information . This , of course , assumes that the
replacement distribution is packaged as a zipped egg .
If not asked to fix existing zipimport . zipimporter instances , we still do
our best to clear any remaining zipimport . zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail . Note that when
tracking down such remaining stale data , we can not catch every conceivable
usage from here , and we clear only those that we know of and have found to
cause problems if left alive . Any remaining caches should be updated by
whomever is in charge of maintaining them , i . e . they should be ready to
handle us replacing their zip archives with new distributions at runtime ."""
|
# There are several other known sources of stale zipimport . zipimporter
# instances that we do not clear here , but might if ever given a reason to
# do so :
# * Global setuptools pkg _ resources . working _ set ( a . k . a . ' master working
# set ' ) may contain distributions which may in turn contain their
# zipimport . zipimporter loaders .
# * Several zipimport . zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation .
# * Already loaded modules may have their _ _ loader _ _ attribute set to the
# exact loader instance used when importing them . Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems .
normalized_path = normalize_path ( dist_path )
_uncache ( normalized_path , sys . path_importer_cache )
if fix_zipimporter_caches :
_replace_zip_directory_cache_data ( normalized_path )
else : # Here , even though we do not want to fix existing and now stale
# zipimporter cache information , we still want to remove it . Related to
# Python ' s zip archive directory information cache , we clear each of
# its stale entries in two phases :
# 1 . Clear the entry so attempting to access zip archive information
# via any existing stale zipimport . zipimporter instances fails .
# 2 . Remove the entry from the cache so any newly constructed
# zipimport . zipimporter instances do not end up using old stale
# zip archive directory information .
# This whole stale data removal step does not seem strictly necessary ,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible , and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed .
_remove_and_clear_zip_directory_cache_data ( normalized_path )
|
def in_casapy ( helper , caltable = None , selectcals = { } , plotoptions = { } , xaxis = None , yaxis = None , figfile = None ) :
"""This function is run inside the weirdo casapy IPython environment ! A
strange set of modules is available , and the
` pwkit . environments . casa . scripting ` system sets up a very particular
environment to allow encapsulated scripting ."""
|
if caltable is None :
raise ValueError ( 'caltable' )
show_gui = ( figfile is None )
cp = helper . casans . cp
helper . casans . tp . setgui ( show_gui )
cp . open ( caltable )
cp . selectcal ( ** selectcals )
cp . plotoptions ( ** plotoptions )
cp . plot ( xaxis , yaxis )
if show_gui :
import pylab as pl
pl . show ( )
else :
cp . savefig ( figfile )
|
def community_post_comment_delete ( self , post_id , id , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / help _ center / post _ comments # delete - comment"
|
api_path = "/api/v2/community/posts/{post_id}/comments/{id}.json"
api_path = api_path . format ( post_id = post_id , id = id )
return self . call ( api_path , method = "DELETE" , ** kwargs )
|
def block ( self , userId , minute ) :
"""封禁用户方法 ( 每秒钟限 100 次 ) 方法
@ param userId : 用户 Id 。 ( 必传 )
@ param minute : 封禁时长 , 单位为分钟 , 最大值为43200分钟 。 ( 必传 )
@ return code : 返回码 , 200 为正常 。
@ return errorMessage : 错误信息 。"""
|
desc = { "name" : "CodeSuccessReslut" , "desc" : " http 成功返回结果" , "fields" : [ { "name" : "code" , "type" : "Integer" , "desc" : "返回码,200 为正常。" } , { "name" : "errorMessage" , "type" : "String" , "desc" : "错误信息。" } ] }
r = self . call_api ( method = ( 'API' , 'POST' , 'application/x-www-form-urlencoded' ) , action = '/user/block.json' , params = { "userId" : userId , "minute" : minute } )
return Response ( r , desc )
|
def flasher ( msg , severity = None ) :
"""Flask ' s flash if available , logging call if not"""
|
try :
flash ( msg , severity )
except RuntimeError :
if severity == 'danger' :
logging . error ( msg )
else :
logging . info ( msg )
|
def _ValidateTimeRange ( timerange ) :
"""Parses a timerange argument and always returns non - None timerange ."""
|
if len ( timerange ) != 2 :
raise ValueError ( "Timerange should be a sequence with 2 items." )
( start , end ) = timerange
precondition . AssertOptionalType ( start , rdfvalue . RDFDatetime )
precondition . AssertOptionalType ( end , rdfvalue . RDFDatetime )
|
def play ( self , sox_effects = ( ) ) :
"""Play the segment ."""
|
audio_data = self . getAudioData ( )
logging . getLogger ( ) . info ( "Playing speech segment (%s): '%s'" % ( self . lang , self ) )
cmd = [ "sox" , "-q" , "-t" , "mp3" , "-" ]
if sys . platform . startswith ( "win32" ) :
cmd . extend ( ( "-t" , "waveaudio" ) )
cmd . extend ( ( "-d" , "trim" , "0.1" , "reverse" , "trim" , "0.07" , "reverse" ) )
# " trim " , " 0.25 " , " - 0.1"
cmd . extend ( sox_effects )
logging . getLogger ( ) . debug ( "Start player process" )
p = subprocess . Popen ( cmd , stdin = subprocess . PIPE , stdout = subprocess . DEVNULL )
p . communicate ( input = audio_data )
if p . returncode != 0 :
raise RuntimeError ( )
logging . getLogger ( ) . debug ( "Done playing" )
|
def _parseIntegerArgument ( args , key , defaultValue ) :
"""Attempts to parse the specified key in the specified argument
dictionary into an integer . If the argument cannot be parsed ,
raises a BadRequestIntegerException . If the key is not present ,
return the specified default value ."""
|
ret = defaultValue
try :
if key in args :
try :
ret = int ( args [ key ] )
except ValueError :
raise exceptions . BadRequestIntegerException ( key , args [ key ] )
except TypeError :
raise Exception ( ( key , args ) )
return ret
|
def _get_ax_freq ( ax ) :
"""Get the freq attribute of the ax object if set .
Also checks shared axes ( eg when using secondary yaxis , sharex = True
or twinx )"""
|
ax_freq = getattr ( ax , 'freq' , None )
if ax_freq is None : # check for left / right ax in case of secondary yaxis
if hasattr ( ax , 'left_ax' ) :
ax_freq = getattr ( ax . left_ax , 'freq' , None )
elif hasattr ( ax , 'right_ax' ) :
ax_freq = getattr ( ax . right_ax , 'freq' , None )
if ax_freq is None : # check if a shared ax ( sharex / twinx ) has already freq set
shared_axes = ax . get_shared_x_axes ( ) . get_siblings ( ax )
if len ( shared_axes ) > 1 :
for shared_ax in shared_axes :
ax_freq = getattr ( shared_ax , 'freq' , None )
if ax_freq is not None :
break
return ax_freq
|
def read_filtering_config ( self ) :
"""Read configuration options in section " filtering " ."""
|
section = "filtering"
if self . has_option ( section , "ignorewarnings" ) :
self . config [ 'ignorewarnings' ] = [ f . strip ( ) . lower ( ) for f in self . get ( section , 'ignorewarnings' ) . split ( ',' ) ]
if self . has_option ( section , "ignore" ) :
for line in read_multiline ( self . get ( section , "ignore" ) ) :
pat = get_link_pat ( line , strict = 1 )
self . config [ "externlinks" ] . append ( pat )
if self . has_option ( section , "nofollow" ) :
for line in read_multiline ( self . get ( section , "nofollow" ) ) :
pat = get_link_pat ( line , strict = 0 )
self . config [ "externlinks" ] . append ( pat )
if self . has_option ( section , "internlinks" ) :
pat = get_link_pat ( self . get ( section , "internlinks" ) )
self . config [ "internlinks" ] . append ( pat )
self . read_boolean_option ( section , "checkextern" )
|
def csrf_failure ( request , reason = '' ) :
"""CSRF - failure view which converts the failed POST request into a GET
and calls the original view with a sensible error message presented
to the user .
: param request : the HttpRequest
: param reason : non - localised failure description"""
|
if _csrf_failed_view . no_moj_csrf :
from django . views . csrf import csrf_failure
return csrf_failure ( request , reason = reason )
# present a sensible error message to users
if reason == REASON_NO_CSRF_COOKIE :
reason = _ ( 'Please try again.' ) + ' ' + _ ( 'Make sure you haven’t disabled cookies.' )
elif reason == REASON_NO_REFERER :
reason = _ ( 'Please try again.' ) + ' ' + _ ( 'Make sure you are using a modern web browser ' 'such as Firefox or Google Chrome.' )
else :
reason = _ ( 'Your browser failed a security check.' ) + ' ' + _ ( 'Please try again.' )
messages . error ( request , reason )
# convert into GET request and show view again
request . method = 'GET'
request . POST = QueryDict ( )
# call the original view but set response status to forbidden
response = _csrf_failed_view . callback ( request , * _csrf_failed_view . args , ** _csrf_failed_view . kwargs )
if response . status_code == 200 :
response . status_code = 403
return response
|
def calc_rfc_sfc_v1 ( self ) :
"""Calculate the corrected fractions rainfall / snowfall and total
precipitation .
Required control parameters :
| NmbZones |
| RfCF |
| SfCF |
Calculated flux sequences :
| RfC |
| SfC |
Basic equations :
: math : ` RfC = RfCF \\ cdot FracRain ` \n
: math : ` SfC = SfCF \\ cdot ( 1 - FracRain ) `
Examples :
Assume five zones with different temperatures and hence
different fractions of rainfall and total precipitation :
> > > from hydpy . models . hland import *
> > > parameterstep ( ' 1d ' )
> > > nmbzones ( 5)
> > > fluxes . fracrain = 0.0 , 0.25 , 0.5 , 0.75 , 1.0
With no rainfall and no snowfall correction ( implied by the
respective factors being one ) , the corrected fraction related
to rainfall is identical with the original fraction and the
corrected fraction related to snowfall behaves opposite :
> > > rfcf ( 1.0)
> > > sfcf ( 1.0)
> > > model . calc _ rfc _ sfc _ v1 ( )
> > > fluxes . rfc
rfc ( 0.0 , 0.25 , 0.5 , 0.75 , 1.0)
> > > fluxes . sfc
sfc ( 1.0 , 0.75 , 0.5 , 0.25 , 0.0)
With a negative rainfall correction of 20 % and a positive
snowfall correction of 20 % the corrected fractions are :
> > > rfcf ( 0.8)
> > > sfcf ( 1.2)
> > > model . calc _ rfc _ sfc _ v1 ( )
> > > fluxes . rfc
rfc ( 0.0 , 0.2 , 0.4 , 0.6 , 0.8)
> > > fluxes . sfc
sfc ( 1.2 , 0.9 , 0.6 , 0.3 , 0.0)"""
|
con = self . parameters . control . fastaccess
flu = self . sequences . fluxes . fastaccess
for k in range ( con . nmbzones ) :
flu . rfc [ k ] = flu . fracrain [ k ] * con . rfcf [ k ]
flu . sfc [ k ] = ( 1. - flu . fracrain [ k ] ) * con . sfcf [ k ]
|
def check_if_numbers_are_consecutive ( list_ ) :
"""Returns True if numbers in the list are consecutive
: param list _ : list of integers
: return : Boolean"""
|
return all ( ( True if second - first == 1 else False for first , second in zip ( list_ [ : - 1 ] , list_ [ 1 : ] ) ) )
|
def showtraceback ( self , * args , ** kwargs ) :
"""Display the exception that just occurred ."""
|
# Override for avoid using sys . excepthook PY - 12600
try :
type , value , tb = sys . exc_info ( )
sys . last_type = type
sys . last_value = value
sys . last_traceback = tb
tblist = traceback . extract_tb ( tb )
del tblist [ : 1 ]
lines = traceback . format_list ( tblist )
if lines :
lines . insert ( 0 , "Traceback (most recent call last):\n" )
lines . extend ( traceback . format_exception_only ( type , value ) )
finally :
tblist = tb = None
sys . stderr . write ( '' . join ( lines ) )
|
def notify_update ( self , x , y , width , height ) :
"""Informs about an update .
Gets called by the display object where this buffer is
registered .
in x of type int
in y of type int
in width of type int
in height of type int"""
|
if not isinstance ( x , baseinteger ) :
raise TypeError ( "x can only be an instance of type baseinteger" )
if not isinstance ( y , baseinteger ) :
raise TypeError ( "y can only be an instance of type baseinteger" )
if not isinstance ( width , baseinteger ) :
raise TypeError ( "width can only be an instance of type baseinteger" )
if not isinstance ( height , baseinteger ) :
raise TypeError ( "height can only be an instance of type baseinteger" )
self . _call ( "notifyUpdate" , in_p = [ x , y , width , height ] )
|
def _parse_url ( host , provided_protocol = None ) :
"""Process the provided host and protocol to return them in a standardized
way that can be subsequently used by Cytomine methods .
If the protocol is not specified , HTTP is the default .
Only HTTP and HTTPS schemes are supported .
Parameters
host : str
The host , with or without the protocol
provided _ protocol : str ( " http " , " http : / / " , " https " , " https : / / " )
The default protocol - used only if the host value does not specify one
Return
( host , protocol ) : tuple
The host and protocol in a standardized way ( host without protocol ,
and protocol in ( " http " , " https " ) )
Examples
> > > Cytomine . _ parse _ url ( " localhost - core " )
( " localhost - core " , " http " )
> > > Cytomine . _ parse _ url ( " https : / / demo . cytomine . coop " , " http " )
( " demo . cytomine . coop " , " https " )"""
|
protocol = "http"
# default protocol
if host . startswith ( "http://" ) :
protocol = "http"
elif host . startswith ( "https://" ) :
protocol = "https"
elif provided_protocol is not None :
provided_protocol = provided_protocol . replace ( "://" , "" )
if provided_protocol in ( "http" , "https" ) :
protocol = provided_protocol
host = host . replace ( "http://" , "" ) . replace ( "https://" , "" )
if host . endswith ( "/" ) :
host = host [ : - 1 ]
return host , protocol
|
def parse_keyring ( self , namespace = None ) :
"""Find settings from keyring ."""
|
results = { }
if not keyring :
return results
if not namespace :
namespace = self . prog
for option in self . _options :
secret = keyring . get_password ( namespace , option . name )
if secret :
results [ option . dest ] = option . type ( secret )
return results
|
def get_as_csv ( self , output_file_path : Optional [ str ] = None ) -> str :
"""Returns the table object as a CSV string .
: param output _ file _ path : The output file to save the CSV to , or None .
: return : CSV representation of the table ."""
|
output = StringIO ( ) if not output_file_path else open ( output_file_path , 'w' )
try :
csv_writer = csv . writer ( output )
csv_writer . writerow ( self . columns )
for row in self . rows :
csv_writer . writerow ( row )
output . seek ( 0 )
return output . read ( )
finally :
output . close ( )
|
def setup_logging ( default_path = 'logging.yaml' , env_key = 'LOG_CFG' ) :
"""Setup logging configuration"""
|
path = default_path
value = os . getenv ( env_key , None )
if value :
path = value
if os . path . exists ( path ) :
with open ( path , 'rt' ) as f :
configs = yaml . safe_load ( f . read ( ) )
logging . config . dictConfig ( configs )
else :
logging . config . dictConfig ( config )
|
def from_file ( cls , fp , format_ = None , fps = None , ** kwargs ) :
"""Read subtitle file from file object .
See : meth : ` SSAFile . load ( ) ` for full description .
Note :
This is a low - level method . Usually , one of : meth : ` SSAFile . load ( ) `
or : meth : ` SSAFile . from _ string ( ) ` is preferable .
Arguments :
fp ( file object ) : A file object , ie . : class : ` io . TextIOBase ` instance .
Note that the file must be opened in text mode ( as opposed to binary ) .
Returns :
SSAFile"""
|
if format_ is None : # Autodetect subtitle format , then read again using correct parser .
# The file might be a pipe and we need to read it twice ,
# so just buffer everything .
text = fp . read ( )
fragment = text [ : 10000 ]
format_ = autodetect_format ( fragment )
fp = io . StringIO ( text )
impl = get_format_class ( format_ )
subs = cls ( )
# an empty subtitle file
subs . format = format_
subs . fps = fps
impl . from_file ( subs , fp , format_ , fps = fps , ** kwargs )
return subs
|
def set_hflip ( self , val ) :
"""Flip all the images in the animation list horizontally ."""
|
self . __horizontal_flip = val
for image in self . images :
image . h_flip = val
|
def cluster_resources ( self ) :
"""Get the current total cluster resources .
Note that this information can grow stale as nodes are added to or
removed from the cluster .
Returns :
A dictionary mapping resource name to the total quantity of that
resource in the cluster ."""
|
resources = defaultdict ( int )
clients = self . client_table ( )
for client in clients : # Only count resources from live clients .
if client [ "IsInsertion" ] :
for key , value in client [ "Resources" ] . items ( ) :
resources [ key ] += value
return dict ( resources )
|
def user_addmedia ( userids , active , mediatypeid , period , sendto , severity , ** kwargs ) :
'''Add new media to multiple users .
. . versionadded : : 2016.3.0
: param userids : ID of the user that uses the media
: param active : Whether the media is enabled ( 0 enabled , 1 disabled )
: param mediatypeid : ID of the media type used by the media
: param period : Time when the notifications can be sent as a time period
: param sendto : Address , user name or other identifier of the recipient
: param severity : Trigger severities to send notifications about
: param _ connection _ user : Optional - zabbix user ( can also be set in opts or pillar , see module ' s docstring )
: param _ connection _ password : Optional - zabbix password ( can also be set in opts or pillar , see module ' s docstring )
: param _ connection _ url : Optional - url of zabbix frontend ( can also be set in opts , pillar , see module ' s docstring )
: return : IDs of the created media .
CLI Example :
. . code - block : : bash
salt ' * ' zabbix . user _ addmedia 4 active = 0 mediatypeid = 1 period = ' 1-7,00:00-24:00 ' sendto = ' support2 @ example . com '
severity = 63'''
|
conn_args = _login ( ** kwargs )
ret = { }
try :
if conn_args :
method = 'user.addmedia'
params = { "users" : [ ] }
# Users
if not isinstance ( userids , list ) :
userids = [ userids ]
for user in userids :
params [ 'users' ] . append ( { "userid" : user } )
# Medias
params [ 'medias' ] = [ { "active" : active , "mediatypeid" : mediatypeid , "period" : period , "sendto" : sendto , "severity" : severity } , ]
ret = _query ( method , params , conn_args [ 'url' ] , conn_args [ 'auth' ] )
return ret [ 'result' ] [ 'mediaids' ]
else :
raise KeyError
except KeyError :
return ret
|
def delete ( self , ** context ) :
"""Removes this record from the database . If the dryRun flag is specified then the command will be logged and not executed .
: note From version 0.6.0 on , this method now accepts a mutable
keyword dictionary of values . You can supply any member
value for either the < orb . LookupOptions > or
< orb . Context > , as well as the keyword ' lookup ' to
an instance of < orb . LookupOptions > and ' options ' for
an instance of the < orb . Context >
: return < int >"""
|
if not self . isRecord ( ) :
return 0
event = orb . events . DeleteEvent ( record = self , context = self . context ( ** context ) )
if self . processEvent ( event ) :
self . onDelete ( event )
if event . preventDefault :
return 0
if self . __delayed :
self . __delayed = False
self . read ( )
with WriteLocker ( self . __dataLock ) :
self . __loaded . clear ( )
context = self . context ( ** context )
conn = context . db . connection ( )
_ , count = conn . delete ( [ self ] , context )
# clear out the old values
if count == 1 :
col = self . schema ( ) . column ( self . schema ( ) . idColumn ( ) )
with WriteLocker ( self . __dataLock ) :
self . __values [ col . name ( ) ] = ( None , None )
return count
|
def full_block_key ( self ) :
"""Returns the " correct " usage key value with the run filled in ."""
|
if self . block_key . run is None : # pylint : disable = unexpected - keyword - arg , no - value - for - parameter
return self . block_key . replace ( course_key = self . course_key )
return self . block_key
|
def retain_error ( self , error , frame = None ) :
"""Adds details of an error to the report .
: param error : The error exception to add to the report ."""
|
if frame is None :
stack = traceback . format_exc ( )
self . labels . add ( "@iopipe/error" )
else :
stack = "\n" . join ( traceback . format_stack ( frame ) )
self . labels . add ( "@iopipe/timeout" )
details = { "name" : type ( error ) . __name__ , "message" : "{}" . format ( error ) , "stack" : stack , }
self . report [ "errors" ] = details
|
def clean_username ( self ) :
"""Ensure the username doesn ' t exist or contain invalid chars .
We limit it to slugifiable chars since it ' s used as the slug
for the user ' s profile view ."""
|
username = self . cleaned_data . get ( "username" )
if username . lower ( ) != slugify ( username ) . lower ( ) :
raise forms . ValidationError ( ugettext ( "Username can only contain letters, numbers, dashes " "or underscores." ) )
lookup = { "username__iexact" : username }
try :
User . objects . exclude ( id = self . instance . id ) . get ( ** lookup )
except User . DoesNotExist :
return username
raise forms . ValidationError ( ugettext ( "This username is already registered" ) )
|
def get_work ( self , worker_id , available_gb = None , lease_time = None , work_spec_names = None , max_jobs = None ) :
'''obtain a WorkUnit instance based on available memory for the
worker process .
: param worker _ id : unique identifier string for a worker to
which a WorkUnit will be assigned , if available .
: param available _ gb : number of gigabytes of RAM available to
this worker
: param lease _ time : how many seconds to lease a WorkUnit
: param int max _ jobs : maximum number of work units to return ( default 1)
: param work _ spec _ names : limit to queue from one work _ spec . NOT IMPLEMENTD . this implementation will return work from any work spec .'''
|
if not isinstance ( available_gb , ( int , float ) ) :
raise ProgrammerError ( 'must specify available_gb' )
if ( max_jobs is not None ) and ( max_jobs != 1 ) :
logger . error ( 'redis rejester does not support max_jobs. ignoring and getting 1' )
if lease_time is None :
lease_time = self . default_lifetime
work_unit = None
try :
with self . registry . lock ( identifier = self . worker_id ) as session : # # use the simple niceness algorithm described in
# # http : / / en . wikipedia . org / wiki / Nice _ ( Unix )
# # where each job gets a ( 20 - niceness ) share
nice_levels = session . pull ( NICE_LEVELS )
for work_spec_name , nice in nice_levels . iteritems ( ) :
nice = min ( 19 , nice )
nice = max ( - 19 , nice )
nice = 20 - nice
nice_levels [ work_spec_name ] = nice
while nice_levels :
total_nice = sum ( nice_levels . values ( ) )
score = random . randrange ( total_nice )
work_spec_name = None
total_score = 0
for wsn , nice in nice_levels . iteritems ( ) :
total_score += nice
if total_score > score :
work_spec_name = wsn
break
assert work_spec_name is not None
nice_levels . pop ( work_spec_name )
# # verify sufficient memory
work_spec = session . get ( WORK_SPECS , work_spec_name )
if available_gb < work_spec [ 'min_gb' ] :
if self . enough_memory :
logger . info ( 'Not enough memory to run work ' 'spec %s (need %.1f GiB, have ' '%.1f GiB) but running anyways' , work_spec_name , work_spec [ 'min_gb' ] , available_gb )
else :
logger . info ( 'Not enough memory to run work ' 'spec %s (need %.1f GiB, have ' '%.1f GiB)' , work_spec_name , work_spec [ 'min_gb' ] , available_gb )
continue
# # try to get a task
wu_expires = time . time ( ) + lease_time
_work_unit = session . getitem_reset ( WORK_UNITS_ + work_spec_name , priority_max = time . time ( ) , new_priority = wu_expires , lock = worker_id , )
if _work_unit :
logger . info ( 'work unit %r' , _work_unit )
work_unit = WorkUnit ( self . registry , work_spec_name , _work_unit [ 0 ] , _work_unit [ 1 ] , worker_id = worker_id , expires = wu_expires , default_lifetime = self . default_lifetime , )
break
except ( LockError , EnvironmentError ) :
logger . error ( 'took to long to get work' , exc_info = True )
logger . debug ( 'get_work %r' , work_unit )
return work_unit
|
def shutdown_abort ( ) :
'''Abort a shutdown . Only available while the dialog box is being
displayed to the user . Once the shutdown has initiated , it cannot be
aborted .
Returns :
bool : ` ` True ` ` if successful , otherwise ` ` False ` `
CLI Example :
. . code - block : : bash
salt ' minion - id ' system . shutdown _ abort'''
|
try :
win32api . AbortSystemShutdown ( '127.0.0.1' )
return True
except pywintypes . error as exc :
( number , context , message ) = exc . args
log . error ( 'Failed to abort system shutdown' )
log . error ( 'nbr: %s' , number )
log . error ( 'ctx: %s' , context )
log . error ( 'msg: %s' , message )
return False
|
def save_element_as_image_file ( self , selector , file_name , folder = None ) :
"""Take a screenshot of an element and save it as an image file .
If no folder is specified , will save it to the current folder ."""
|
element = self . find_element ( selector )
element_png = element . screenshot_as_png
if len ( file_name . split ( '.' ) [ 0 ] ) < 1 :
raise Exception ( "Error: file_name length must be > 0." )
if not file_name . endswith ( ".png" ) :
file_name = file_name + ".png"
image_file_path = None
if folder :
if folder . endswith ( "/" ) :
folder = folder [ : - 1 ]
if len ( folder ) > 0 :
self . create_folder ( folder )
image_file_path = "%s/%s" % ( folder , file_name )
if not image_file_path :
image_file_path = file_name
with open ( image_file_path , "wb" ) as file :
file . write ( element_png )
|
def json2pattern ( s ) :
"""Convert JSON format to a query pattern .
Includes even mongo shell notation without quoted key names ."""
|
# make valid JSON by wrapping field names in quotes
s , _ = re . subn ( r'([{,])\s*([^,{\s\'"]+)\s*:' , ' \\1 "\\2" : ' , s )
# handle shell values that are not valid JSON
s = shell2json ( s )
# convert to 1 where possible , to get rid of things like new Date ( . . . )
s , n = re . subn ( r'([:,\[])\s*([^{}\[\]"]+?)\s*([,}\]])' , '\\1 1 \\3' , s )
# now convert to dictionary , converting unicode to ascii
try :
doc = json . loads ( s , object_hook = _decode_pattern_dict )
return json . dumps ( doc , sort_keys = True , separators = ( ', ' , ': ' ) )
except ValueError as ex :
return None
|
def load ( self , filename ) :
"""Load a frequency list from file ( in the format produced by the save method )"""
|
f = io . open ( filename , 'r' , encoding = 'utf-8' )
for line in f :
data = line . strip ( ) . split ( "\t" )
type , count = data [ : 2 ]
self . count ( type , count )
f . close ( )
|
def _normalize_percent_rgb ( value ) :
"""Normalize ` ` value ` ` for use in a percentage ` ` rgb ( ) ` ` triplet , as
follows :
* If ` ` value ` ` is less than 0 % , convert to 0 % .
* If ` ` value ` ` is greater than 100 % , convert to 100 % .
Examples :
> > > _ normalize _ percent _ rgb ( ' 0 % ' )
> > > _ normalize _ percent _ rgb ( ' 100 % ' )
'100 % '
> > > _ normalize _ percent _ rgb ( ' 62 % ' )
'62 % '
> > > _ normalize _ percent _ rgb ( ' - 5 % ' )
> > > _ normalize _ percent _ rgb ( ' 250 % ' )
'100 % '
> > > _ normalize _ percent _ rgb ( ' 85.49 % ' )
'85.49 % '"""
|
percent = value . split ( '%' ) [ 0 ]
percent = float ( percent ) if '.' in percent else int ( percent )
if 0 <= percent <= 100 :
return '%s%%' % percent
if percent < 0 :
return '0%'
if percent > 100 :
return '100%'
|
def configure_root_iam_credentials ( self , access_key , secret_key , region = None , iam_endpoint = None , sts_endpoint = None , max_retries = - 1 , mount_point = DEFAULT_MOUNT_POINT ) :
"""Configure the root IAM credentials to communicate with AWS .
There are multiple ways to pass root IAM credentials to the Vault server , specified below with the highest
precedence first . If credentials already exist , this will overwrite them .
The official AWS SDK is used for sourcing credentials from env vars , shared files , or IAM / ECS instances .
* Static credentials provided to the API as a payload
* Credentials in the AWS _ ACCESS _ KEY , AWS _ SECRET _ KEY , and AWS _ REGION environment variables on the server
* Shared credentials files
* Assigned IAM role or ECS task role credentials
At present , this endpoint does not confirm that the provided AWS credentials are valid AWS credentials with
proper permissions .
Supported methods :
POST : / { mount _ point } / config / root . Produces : 204 ( empty body )
: param access _ key : Specifies the AWS access key ID .
: type access _ key : str | unicode
: param secret _ key : Specifies the AWS secret access key .
: type secret _ key : str | unicode
: param region : Specifies the AWS region . If not set it will use the AWS _ REGION env var , AWS _ DEFAULT _ REGION env
var , or us - east - 1 in that order .
: type region : str | unicode
: param iam _ endpoint : Specifies a custom HTTP IAM endpoint to use .
: type iam _ endpoint : str | unicode
: param sts _ endpoint : Specifies a custom HTTP STS endpoint to use .
: type sts _ endpoint : str | unicode
: param max _ retries : Number of max retries the client should use for recoverable errors . The default ( - 1 ) falls
back to the AWS SDK ' s default behavior .
: type max _ retries : int
: param mount _ point : The " path " the method / backend was mounted on .
: type mount _ point : str | unicode
: return : The response of the request .
: rtype : requests . Response"""
|
params = { 'access_key' : access_key , 'secret_key' : secret_key , 'region' : region , 'iam_endpoint' : iam_endpoint , 'sts_endpoint' : sts_endpoint , 'max_retries' : max_retries , }
api_path = '/v1/{mount_point}/config/root' . format ( mount_point = mount_point )
return self . _adapter . post ( url = api_path , json = params , )
|
def get_by_slug ( tag_slug ) :
'''Get label by slug .'''
|
label_recs = TabTag . select ( ) . where ( TabTag . slug == tag_slug )
return label_recs . get ( ) if label_recs else False
|
def keyPressEvent ( self , event ) :
"press ESCAPE to quit the application"
|
key = event . key ( )
if key == Qt . Key_Escape :
self . app . quit ( )
|
def print_colormaps ( cmaps , N = 256 , returnrgb = True , savefiles = False ) :
'''Print colormaps in 256 RGB colors to text files .
: param returnrgb = False : Whether or not to return the rgb array . Only makes sense to do if print one colormaps ' rgb .'''
|
rgb = [ ]
for cmap in cmaps :
rgbtemp = cmap ( np . linspace ( 0 , 1 , N ) ) [ np . newaxis , : , : 3 ] [ 0 ]
if savefiles :
np . savetxt ( cmap . name + '-rgb.txt' , rgbtemp )
rgb . append ( rgbtemp )
if returnrgb :
return rgb
|
def _startup ( self ) :
"""Called during _ _ init _ _ . Check consistency of schema in database with
classes in memory . Load all Python modules for stored items , and load
version information for upgrader service to run later ."""
|
typesToCheck = [ ]
for oid , module , typename , version in self . querySchemaSQL ( _schema . ALL_TYPES ) :
if self . debug :
print
print 'SCHEMA:' , oid , module , typename , version
if typename not in _typeNameToMostRecentClass :
try :
namedAny ( module )
except ValueError as err :
raise ImportError ( 'cannot find module ' + module , str ( err ) )
self . typenameAndVersionToID [ typename , version ] = oid
# Can ' t call this until typenameAndVersionToID is populated , since this
# depends on building a reverse map of that .
persistedSchema = self . _loadTypeSchema ( )
# Now that we have persistedSchema , loop over everything again and
# prepare old types .
for ( typename , version ) , typeID in self . typenameAndVersionToID . iteritems ( ) :
cls = _typeNameToMostRecentClass . get ( typename )
if cls is not None :
if version != cls . schemaVersion :
typesToCheck . append ( self . _prepareOldVersionOf ( typename , version , persistedSchema ) )
else :
typesToCheck . append ( cls )
for cls in typesToCheck :
self . _checkTypeSchemaConsistency ( cls , persistedSchema )
# Schema is consistent ! Now , if I forgot to create any indexes last
# time I saw this table , do it now . . .
extantIndexes = self . _loadExistingIndexes ( )
for cls in typesToCheck :
self . _createIndexesFor ( cls , extantIndexes )
self . _upgradeManager . checkUpgradePaths ( )
|
def compare_two_documents ( kls , doc1 , doc2 ) :
"""Compare two documents by converting them into json objects and back to strings and compare"""
|
first = doc1
if isinstance ( doc1 , string_types ) :
try :
first = json . loads ( doc1 )
except ( ValueError , TypeError ) as error :
log . warning ( "Failed to convert doc into a json object\terror=%s" , error )
yield error . args [ 0 ]
return
second = doc2
if isinstance ( doc2 , string_types ) :
try :
second = json . loads ( doc2 )
except ( ValueError , TypeError ) as error :
log . warning ( "Failed to convert doc into a json object\terror=%s" , error )
yield error . args [ 0 ]
return
# Ordering the principals because the ordering amazon gives me hates me
def sort_statement ( statement ) :
for principal in ( statement . get ( "Principal" , None ) , statement . get ( "NotPrincipal" , None ) ) :
if principal :
for principal_type in ( "AWS" , "Federated" , "Service" ) :
if principal_type in principal and type ( principal [ principal_type ] ) is list :
principal [ principal_type ] = sorted ( principal [ principal_type ] )
def sort_key ( statement , key ) :
if key in statement and type ( statement [ key ] ) is list :
statement [ key ] = sorted ( statement [ key ] )
for document in ( first , second ) :
if "Statement" in document :
if type ( document [ "Statement" ] ) is dict :
sort_statement ( document [ "Statement" ] )
sort_key ( document [ "Statement" ] , "Action" )
sort_key ( document [ "Statement" ] , "NotAction" )
sort_key ( document [ "Statement" ] , "Resource" )
sort_key ( document [ "Statement" ] , "NotResource" )
else :
for statement in document [ "Statement" ] :
sort_statement ( statement )
sort_key ( statement , "Action" )
sort_key ( statement , "NotAction" )
sort_key ( statement , "Resource" )
sort_key ( statement , "NotResource" )
difference = diff ( first , second , fromfile = "current" , tofile = "new" ) . stringify ( )
if difference :
lines = difference . split ( '\n' )
if not first or not second or first != second :
for line in lines :
yield line
|
def notify_modified ( room , event , user ) :
"""Notifies about the modification of a chatroom .
: param room : the chatroom
: param event : the event
: param user : the user performing the action"""
|
tpl = get_plugin_template_module ( 'emails/modified.txt' , chatroom = room , event = event , user = user )
_send ( event , tpl )
|
def merge_coords_for_inplace_math ( objs , priority_vars = None ) :
"""Merge coordinate variables without worrying about alignment .
This function is used for merging variables in coordinates . py ."""
|
expanded = expand_variable_dicts ( objs )
variables = merge_variables ( expanded , priority_vars )
assert_unique_multiindex_level_names ( variables )
return variables
|
def set_keyspace ( self , keyspace ) :
"""Change the keyspace which will be used for subsequent requests to this
CassandraClusterPool , and return a Deferred that will fire once it can
be verified that connections can successfully use that keyspace .
If something goes wrong trying to change a connection to that keyspace ,
the Deferred will errback , and the keyspace to be used for future
requests will not be changed .
Requests made between the time this method is called and the time that
the returned Deferred is fired may be made in either the previous
keyspace or the new keyspace . If you may need to make use of multiple
keyspaces at the same time in the same app , consider using the
specialized CassandraKeyspaceConnection interface provided by the
keyspaceConnection method ."""
|
# push a real set _ keyspace on some ( any ) connection ; the idea is that
# if it succeeds there , it is likely to succeed everywhere , and vice
# versa . don ' t bother waiting for all connections to change - some of
# them may be doing long blocking tasks and by the time they ' re done ,
# the keyspace might be changed again anyway
d = self . pushRequest ( ManagedThriftRequest ( 'set_keyspace' , keyspace ) )
def store_keyspace ( _ ) :
self . keyspace = keyspace
d . addCallback ( store_keyspace )
return d
|
def url_to_fn ( url ) :
"""Convert ` url ` to filename used to download the datasets .
` ` http : / / kitakitsune . org / xe ` ` - > ` ` kitakitsune . org _ xe ` ` .
Args :
url ( str ) : URL of the resource .
Returns :
str : Normalized URL ."""
|
url = url . replace ( "http://" , "" ) . replace ( "https://" , "" )
url = url . split ( "?" ) [ 0 ]
return url . replace ( "%" , "_" ) . replace ( "/" , "_" )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.