signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def from_epsg ( cls , code ) :
"""Given an integer code , returns an EPSG - like mapping .
Note : the input code is not validated against an EPSG database ."""
|
if int ( code ) <= 0 :
raise ValueError ( "EPSG codes are positive integers" )
return cls ( { 'init' : "epsg:{}" . format ( code ) , 'no_defs' : True } )
|
def _interval ( dates ) :
"""Return the distance between all dates and 0 if they are different"""
|
interval = ( dates [ 1 ] - dates [ 0 ] ) . days
last = dates [ 0 ]
for dat in dates [ 1 : ] :
if ( dat - last ) . days != interval :
return 0
last = dat
return interval
|
def get_numwords ( ) :
"""Convert number words to integers in a given text ."""
|
numwords = { 'and' : ( 1 , 0 ) , 'a' : ( 1 , 1 ) , 'an' : ( 1 , 1 ) }
for idx , word in enumerate ( UNITS ) :
numwords [ word ] = ( 1 , idx )
for idx , word in enumerate ( TENS ) :
numwords [ word ] = ( 1 , idx * 10 )
for idx , word in enumerate ( SCALES ) :
numwords [ word ] = ( 10 ** ( idx * 3 or 2 ) , 0 )
all_numbers = ur'|' . join ( ur'\b%s\b' % i for i in numwords . keys ( ) if i )
return all_numbers , numwords
|
def _makes_clone ( _func , * args , ** kw ) :
"""A decorator that returns a clone of the current object so that
we can re - use the object for similar requests ."""
|
self = args [ 0 ] . _clone ( )
_func ( self , * args [ 1 : ] , ** kw )
return self
|
def log_random_sleep ( self , minimum = 3.0 , scale = 1.0 , hints = None ) :
"""wrap random sleep .
- log it for debug purpose only"""
|
hints = '{} slept' . format ( hints ) if hints else 'slept'
st = time . time ( )
helper . random_sleep ( minimum , scale )
log . debug ( '{} {} {}s' . format ( self . symbols . get ( 'sleep' , '' ) , hints , self . color_log ( time . time ( ) - st ) ) )
|
def get_event_teams_attendees ( self , id , team_id , ** data ) :
"""GET / events / : id / teams / : team _ id / attendees /
Returns : format : ` attendees ` for a single : format : ` teams ` ."""
|
return self . get ( "/events/{0}/teams/{0}/attendees/" . format ( id , team_id ) , data = data )
|
def convert_str_to_int ( tuple_of_str : tuple ) -> tuple :
"""Function to convert a tuple of string values to a tuple of integer values .
Examples :
> > > convert _ str _ to _ int ( ( ( ' 333 ' , ' 33 ' ) , ( ' 1416 ' , ' 55 ' ) ) )
( ( 333 , 33 ) , ( 1416 , 55 ) )
> > > convert _ str _ to _ int ( ( ( ' 999 ' , ' 99 ' ) , ( ' 1000 ' , ' 500 ' ) ) )
( ( 999 , 99 ) , ( 1000 , 500 ) )
> > > convert _ str _ to _ int ( ( ( ' 666 ' , ' 66 ' ) , ( ' 1500 ' , ' 555 ' ) ) )
( ( 666 , 66 ) , ( 1500 , 555 ) )
Args :
tuple _ of _ str ( tuple ) : Tuple of strings to be converted to integers .
Returns :
tuple : Tuple of integers after conversion ."""
|
tuple_of_int = tuple ( ( int ( x [ 0 ] ) , int ( x [ 1 ] ) ) for x in tuple_of_str )
return tuple_of_int
|
def iter_branches ( self , number = - 1 , etag = None ) :
"""Iterate over the branches in this repository .
: param int number : ( optional ) , number of branches to return . Default :
-1 returns all branches
: param str etag : ( optional ) , ETag from a previous request to the same
endpoint
: returns : generator of
: class : ` Branch < github3 . repos . branch . Branch > ` \ es"""
|
url = self . _build_url ( 'branches' , base_url = self . _api )
return self . _iter ( int ( number ) , url , Branch , etag = etag )
|
def data_response ( self ) :
"""returns the 1d array of the data element that is fitted for ( including masking )
: return : 1d numpy array"""
|
d = [ ]
for i in range ( self . _num_bands ) :
if self . _compute_bool [ i ] is True :
d_i = self . _imageModel_list [ i ] . data_response
if d == [ ] :
d = d_i
else :
d = np . append ( d , d_i )
return d
|
def printcodelist ( thing , to = sys . stdout , heading = None ) :
'''Write the lines of the codelist string list to the given file , or to
the default output .
A little Python 3 problem : if the to - file is in binary mode , we need to
encode the strings , else a TypeError will be raised . Obvious answer , test
for ' b ' in to . mode ? Nope , only " real " file objects have a mode attribute .
StringIO objects , and the variant StringIO used as default sys . stdout , do
not have . mode .
However , all file - like objects that support string output DO have an
encoding attribute . ( StringIO has one that is an empty string , but it
exists . ) So , if hasattr ( to , ' encoding ' ) , just shove the whole string into
it . Otherwise , encode the string utf - 8 and shove that bytestring into it .
( See ? Python 3 not so hard . . . )'''
|
# If we were passed a list , assume that it is a CodeList or
# a manually - assembled list of code tuples .
if not isinstance ( thing , list ) : # Passed something else . Reduce it to a CodeList .
if isinstance ( thing , Code ) :
thing = thing . code
else : # Convert various sources to a code object .
thing = _get_a_code_object_from ( thing )
try :
thing = Code . from_code ( thing ) . code
except Exception as e :
raise ValueError ( 'Invalid input to printcodelist' )
# We have a CodeList or equivalent ,
# get the whole disassembly as a string .
whole_thang = str ( thing )
# if destination not a text file , encode it to bytes
if not hasattr ( to , 'encoding' ) :
whole_thang = whole_thang . encode ( 'UTF-8' )
if heading : # is not None or empty
heading = heading . encode ( 'UTF-8' )
# send it on its way
if heading :
to . write ( '===' + heading + '===\n' )
to . write ( whole_thang )
|
def get_coordinate_selection ( self , selection , out = None , fields = None ) :
"""Retrieve a selection of individual items , by providing the indices
( coordinates ) for each selected item .
Parameters
selection : tuple
An integer ( coordinate ) array for each dimension of the array .
out : ndarray , optional
If given , load the selected data directly into this array .
fields : str or sequence of str , optional
For arrays with a structured dtype , one or more fields can be specified to
extract data for .
Returns
out : ndarray
A NumPy array containing the data for the requested selection .
Examples
Setup a 2 - dimensional array : :
> > > import zarr
> > > import numpy as np
> > > z = zarr . array ( np . arange ( 100 ) . reshape ( 10 , 10 ) )
Retrieve items by specifying their coordinates : :
> > > z . get _ coordinate _ selection ( ( [ 1 , 4 ] , [ 1 , 4 ] ) )
array ( [ 11 , 44 ] )
For convenience , the coordinate selection functionality is also available via the
` vindex ` property , e . g . : :
> > > z . vindex [ [ 1 , 4 ] , [ 1 , 4 ] ]
array ( [ 11 , 44 ] )
Notes
Coordinate indexing is also known as point selection , and is a form of vectorized
or inner indexing .
Slices are not supported . Coordinate arrays must be provided for all dimensions
of the array .
Coordinate arrays may be multidimensional , in which case the output array will
also be multidimensional . Coordinate arrays are broadcast against each other
before being applied . The shape of the output will be the same as the shape of
each coordinate array after broadcasting .
See Also
get _ basic _ selection , set _ basic _ selection , get _ mask _ selection , set _ mask _ selection ,
get _ orthogonal _ selection , set _ orthogonal _ selection , set _ coordinate _ selection ,
vindex , oindex , _ _ getitem _ _ , _ _ setitem _ _"""
|
# refresh metadata
if not self . _cache_metadata :
self . _load_metadata ( )
# check args
check_fields ( fields , self . _dtype )
# setup indexer
indexer = CoordinateIndexer ( selection , self )
# handle output - need to flatten
if out is not None :
out = out . reshape ( - 1 )
out = self . _get_selection ( indexer = indexer , out = out , fields = fields )
# restore shape
out = out . reshape ( indexer . sel_shape )
return out
|
def assign_bin ( start , stop ) :
"""Given an interval ` start : stop ` , return the smallest bin in which it fits .
: arg int start , stop : Interval positions ( zero - based , open - ended ) .
: return : Smallest bin containing ` start : stop ` .
: rtype : int
: raise OutOfRangeError : If ` start : stop ` exceeds the range of the binning
scheme ."""
|
try :
return next ( dropwhile ( lambda r : r [ 0 ] != r [ 1 ] , range_per_level ( start , stop ) ) ) [ 0 ]
except StopIteration :
raise Exception ( 'An unexpected error occured in assigning a bin' )
|
def clear_branding ( self ) :
"""Removes the branding .
raise : NoAccess - ` ` Metadata . isRequired ( ) ` ` is ` ` true ` ` or
` ` Metadata . isReadOnly ( ) ` ` is ` ` true ` `
* compliance : mandatory - - This method must be implemented . *"""
|
if ( self . get_branding_metadata ( ) . is_read_only ( ) or self . get_branding_metadata ( ) . is_required ( ) ) :
raise NoAccess ( )
self . my_osid_object_form . _my_map [ 'brandingIds' ] = self . _branding_default
|
def set_gps_global_origin_send ( self , target_system , latitude , longitude , altitude , force_mavlink1 = False ) :
'''As local waypoints exist , the global MISSION reference allows to
transform between the local coordinate frame and the
global ( GPS ) coordinate frame . This can be necessary
when e . g . in - and outdoor settings are connected and
the MAV should move from in - to outdoor .
target _ system : System ID ( uint8 _ t )
latitude : Latitude ( WGS84 ) , in degrees * 1E7 ( int32 _ t )
longitude : Longitude ( WGS84 , in degrees * 1E7 ( int32 _ t )
altitude : Altitude ( AMSL ) , in meters * 1000 ( positive for up ) ( int32 _ t )'''
|
return self . send ( self . set_gps_global_origin_encode ( target_system , latitude , longitude , altitude ) , force_mavlink1 = force_mavlink1 )
|
def update_replication_schedule ( self , schedule_id , schedule ) :
"""Update a replication schedule .
@ param schedule _ id : The id of the schedule to update .
@ param schedule : The modified schedule .
@ return : The updated replication schedule .
@ since : API v3"""
|
return self . _put ( "replications/%s" % schedule_id , ApiReplicationSchedule , data = schedule , api_version = 3 )
|
def extract_pdbid ( string ) :
"""Use regular expressions to get a PDB ID from a string"""
|
p = re . compile ( "[0-9][0-9a-z]{3}" )
m = p . search ( string . lower ( ) )
try :
return m . group ( )
except AttributeError :
return "UnknownProtein"
|
def get_concept ( entity ) :
"""Return Concept from an Eidos entity ."""
|
# Use the canonical name as the name of the Concept
name = entity [ 'canonicalName' ]
db_refs = EidosProcessor . get_groundings ( entity )
concept = Concept ( name , db_refs = db_refs )
return concept
|
def import_backend ( config ) :
"""Imports and initializes the Backend class ."""
|
backend_name = config [ 'backend' ]
path = backend_name . split ( '.' )
backend_mod_name , backend_class_name = '.' . join ( path [ : - 1 ] ) , path [ - 1 ]
backend_mod = importlib . import_module ( backend_mod_name )
backend_class = getattr ( backend_mod , backend_class_name )
return backend_class ( config [ 'settings' ] )
|
def upgrade ( ) :
"""Upgrade database ."""
|
op . create_table ( 'workflows_workflow' , sa . Column ( 'uuid' , UUIDType , primary_key = True , nullable = False , default = uuid . uuid4 ( ) ) , sa . Column ( 'name' , sa . String ( 255 ) , default = 'Default workflow' , nullable = False ) , sa . Column ( 'created' , sa . DateTime , default = datetime . now , nullable = False ) , sa . Column ( 'modified' , sa . DateTime , default = datetime . now , onupdate = datetime . now , nullable = False ) , sa . Column ( 'id_user' , sa . Integer , default = 0 , nullable = False ) , sa . Column ( 'extra_data' , JSONType ( ) . with_variant ( postgresql . JSON ( none_as_null = True ) , 'postgresql' , ) , default = lambda : dict ( ) , nullable = False ) , sa . Column ( 'status' , ChoiceType ( WorkflowStatus , impl = sa . Integer ( ) ) , default = WorkflowStatus . NEW , nullable = False ) )
op . create_table ( 'workflows_object' , sa . Column ( 'id' , sa . Integer , primary_key = True ) , sa . Column ( 'data' , JSONType ( ) . with_variant ( postgresql . JSON ( none_as_null = True ) , 'postgresql' , ) , default = lambda : dict ( ) , nullable = False ) , sa . Column ( 'extra_data' , JSONType ( ) . with_variant ( postgresql . JSON ( none_as_null = True ) , 'postgresql' , ) , default = lambda : dict ( ) , nullable = False ) , sa . Column ( 'id_workflow' , UUIDType , sa . ForeignKey ( 'workflows_workflow.uuid' , ondelete = 'CASCADE' ) , nullable = True , index = True ) , sa . Column ( 'status' , ChoiceType ( ObjectStatus , impl = sa . Integer ( ) ) , default = ObjectStatus . INITIAL , nullable = False , index = True ) , sa . Column ( 'id_parent' , sa . Integer , sa . ForeignKey ( 'workflows_object.id' , ondelete = 'CASCADE' ) , default = None , index = True ) , sa . Column ( 'id_user' , sa . Integer , default = 0 , nullable = False ) , sa . Column ( 'created' , sa . DateTime , default = datetime . now , nullable = False ) , sa . Column ( 'modified' , sa . DateTime , default = datetime . now , onupdate = datetime . now , nullable = False ) , sa . Column ( 'data_type' , sa . String ( 150 ) , default = '' , nullable = True , index = True ) , sa . Column ( 'id_user' , sa . Integer , default = 0 , nullable = False ) , sa . Column ( 'callback_pos' , JSONType ( ) . with_variant ( postgresql . JSON ( none_as_null = True ) , 'postgresql' , ) , default = lambda : list ( ) , nullable = True ) )
|
def warning ( self , text ) :
"""Ajout d ' un message de log de type WARN"""
|
self . logger . warning ( "{}{}" . format ( self . message_prefix , text ) )
|
def _at_return ( self , calculator , rule , scope , block ) :
"""Implements @ return"""
|
# TODO should assert this only happens within a @ function
ret = calculator . calculate ( block . argument )
raise SassReturn ( ret )
|
def init_from_files ( vocab_file , files , target_vocab_size , threshold , min_count = None , file_byte_limit = 1e6 , reserved_tokens = None ) :
"""Create subtoken vocabulary based on files , and save vocab to file .
Args :
vocab _ file : String name of vocab file to store subtoken vocabulary .
files : List of file paths that will be used to generate vocabulary .
target _ vocab _ size : target vocabulary size to generate .
threshold : int threshold of vocabulary size to accept .
min _ count : int minimum count to use for generating the vocabulary . The min
count is the minimum number of times a subtoken should appear in the
files before it is added to the vocabulary . If set to none , this value
is found using binary search .
file _ byte _ limit : ( Default 1e6 ) Maximum number of bytes of sample text that
will be drawn from the files .
reserved _ tokens : List of string tokens that are guaranteed to be at the
beginning of the subtoken vocabulary list .
Returns :
Subtokenizer object"""
|
if reserved_tokens is None :
reserved_tokens = RESERVED_TOKENS
if tf . gfile . Exists ( vocab_file ) :
tf . logging . info ( "Vocab file already exists (%s)" % vocab_file )
else :
tf . logging . info ( "Begin steps to create subtoken vocabulary..." )
token_counts = _count_tokens ( files , file_byte_limit )
alphabet = _generate_alphabet_dict ( token_counts )
subtoken_list = _generate_subtokens_with_target_vocab_size ( token_counts , alphabet , target_vocab_size , threshold , min_count , reserved_tokens )
tf . logging . info ( "Generated vocabulary with %d subtokens." % len ( subtoken_list ) )
mlperf_log . transformer_print ( key = mlperf_log . PREPROC_VOCAB_SIZE , value = len ( subtoken_list ) )
_save_vocab_file ( vocab_file , subtoken_list )
return Subtokenizer ( vocab_file )
|
def cover ( ctx , html = False ) :
'''Run tests suite with coverage'''
|
params = '--cov-report term --cov-report html' if html else ''
with ctx . cd ( ROOT ) :
ctx . run ( 'pytest --cov flask_fs {0}' . format ( params ) , pty = True )
|
def _split_constraints ( constraints , concrete = True ) :
"""Returns independent constraints , split from this Frontend ' s ` constraints ` ."""
|
splitted = [ ]
for i in constraints :
splitted . extend ( i . split ( [ 'And' ] ) )
l . debug ( "... splitted of size %d" , len ( splitted ) )
concrete_constraints = [ ]
variable_connections = { }
constraint_connections = { }
for n , s in enumerate ( splitted ) :
l . debug ( "... processing constraint with %d variables" , len ( s . variables ) )
connected_variables = set ( s . variables )
connected_constraints = { n }
if len ( connected_variables ) == 0 :
concrete_constraints . append ( s )
for v in s . variables :
if v in variable_connections :
connected_variables |= variable_connections [ v ]
if v in constraint_connections :
connected_constraints |= constraint_connections [ v ]
for v in connected_variables :
variable_connections [ v ] = connected_variables
constraint_connections [ v ] = connected_constraints
unique_constraint_sets = set ( )
for v in variable_connections :
unique_constraint_sets . add ( ( frozenset ( variable_connections [ v ] ) , frozenset ( constraint_connections [ v ] ) ) )
results = [ ]
for v , c_indexes in unique_constraint_sets :
results . append ( ( set ( v ) , [ splitted [ c ] for c in c_indexes ] ) )
if concrete and len ( concrete_constraints ) > 0 :
results . append ( ( { 'CONCRETE' } , concrete_constraints ) )
return results
|
def get_unique_file_path_suffix ( file_path , separator = '-' , i = 0 ) :
"""Return the minimum number to suffix the file to not overwrite one .
Example : / tmp / a . txt exists .
- With file _ path = ' / tmp / b . txt ' will return 0.
- With file _ path = ' / tmp / a . txt ' will return 1 ( / tmp / a - 1 . txt )
: param file _ path : The file to check .
: type file _ path : str
: param separator : The separator to add before the prefix .
: type separator : str
: param i : The minimum prefix to check .
: type i : int
: return : The minimum prefix you should add to not overwrite a file .
: rtype : int"""
|
basename = os . path . splitext ( file_path )
if i != 0 :
file_path_test = os . path . join ( '%s%s%s%s' % ( basename [ 0 ] , separator , i , basename [ 1 ] ) )
else :
file_path_test = file_path
if os . path . isfile ( file_path_test ) :
return OsmDownloaderDialog . get_unique_file_path_suffix ( file_path , separator , i + 1 )
else :
return i
|
def adc ( self , other : 'BitVector' , carry : Bit ) -> tp . Tuple [ 'BitVector' , Bit ] :
"""add with carry
returns a two element tuple of the form ( result , carry )"""
|
T = type ( self )
other = _coerce ( T , other )
carry = _coerce ( T . unsized_t [ 1 ] , carry )
a = self . zext ( 1 )
b = other . zext ( 1 )
c = carry . zext ( T . size )
res = a + b + c
return res [ 0 : - 1 ] , res [ - 1 ]
|
def build_list_type_validator ( item_validator ) :
"""Return a function which validates that the value is a list of items
which are validated using item _ validator ."""
|
def validate_list_of_type ( value ) :
return [ item_validator ( item ) for item in validate_list ( value ) ]
return validate_list_of_type
|
def parse_collection ( self , item_file_prefix : str , base_item_type : Type [ T ] , item_name_for_log : str = None , file_mapping_conf : FileMappingConfiguration = None , options : Dict [ str , Dict [ str , Any ] ] = None ) -> Dict [ str , T ] :
"""Main method to parse a collection of items of type ' base _ item _ type ' .
: param item _ file _ prefix :
: param base _ item _ type :
: param item _ name _ for _ log :
: param file _ mapping _ conf :
: param options :
: return :"""
|
# - - item _ name _ for _ log
item_name_for_log = item_name_for_log or ''
check_var ( item_name_for_log , var_types = str , var_name = 'item_name_for_log' )
# creating the wrapping dictionary type
collection_type = Dict [ str , base_item_type ]
if len ( item_name_for_log ) > 0 :
item_name_for_log = item_name_for_log + ' '
self . logger . debug ( '**** Starting to parse ' + item_name_for_log + 'collection of <' + get_pretty_type_str ( base_item_type ) + '> at location ' + item_file_prefix + ' ****' )
# common steps
return self . _parse__item ( collection_type , item_file_prefix , file_mapping_conf , options = options )
|
def getResiduals ( self ) :
"""regress out fixed effects and results residuals"""
|
X = np . zeros ( ( self . N * self . P , self . n_fixed_effs ) )
ip = 0
for i in range ( self . n_terms ) :
Ki = self . A [ i ] . shape [ 0 ] * self . F [ i ] . shape [ 1 ]
X [ : , ip : ip + Ki ] = np . kron ( self . A [ i ] . T , self . F [ i ] )
ip += Ki
y = np . reshape ( self . Y , ( self . Y . size , 1 ) , order = 'F' )
RV = regressOut ( y , X )
RV = np . reshape ( RV , self . Y . shape , order = 'F' )
return RV
|
def form_field_definitions ( self ) :
"""Hook optional _ login extractor if necessary for form defaults ."""
|
schema = copy . deepcopy ( form_field_definitions . user )
uid , login = self . _get_auth_attrs ( )
if uid != login :
field = schema . get ( login , schema [ 'default' ] )
if field [ 'chain' ] . find ( '*optional_login' ) == - 1 :
field [ 'chain' ] = '%s:%s' % ( '*optional_login' , field [ 'chain' ] )
if not field . get ( 'custom' ) :
field [ 'custom' ] = dict ( )
field [ 'custom' ] [ 'optional_login' ] = ( [ 'context.optional_login' ] , [ ] , [ ] , [ ] , [ ] )
schema [ login ] = field
return schema
|
def ReadPathInfoHistory ( self , client_id , path_type , components ) :
"""Reads a collection of hash and stat entry for given path .
Args :
client _ id : An identifier string for a client .
path _ type : A type of a path to retrieve path history for .
components : A tuple of path components corresponding to path to retrieve
information for .
Returns :
A list of ` rdf _ objects . PathInfo ` ordered by timestamp in ascending order ."""
|
histories = self . ReadPathInfosHistories ( client_id , path_type , [ components ] )
return histories [ components ]
|
def setup ( app ) :
"""Called by Sphinx during phase 0 ( initialization ) .
: param app : Sphinx application object .
: returns : Extension version .
: rtype : dict"""
|
app . add_config_value ( 'disqus_shortname' , None , True )
app . add_directive ( 'disqus' , DisqusDirective )
app . add_node ( DisqusNode , html = ( DisqusNode . visit , DisqusNode . depart ) )
app . config . html_static_path . append ( os . path . relpath ( STATIC_DIR , app . confdir ) )
app . connect ( 'html-page-context' , event_html_page_context )
return dict ( version = __version__ )
|
def build_re_pattern_from_intervals ( intervals : IntervalListType ) -> BuiltInReType :
"""Convert intervals to regular expression pattern .
: param intervals : Unicode codepoint intervals ."""
|
inner = [ f'{chr(lb)}-{chr(ub)}' for lb , ub in intervals ]
joined_inner = '' . join ( inner )
pattern = f'[{joined_inner}]+'
return re . compile ( pattern , re . UNICODE )
|
def md5_hash_file ( fh ) :
"""Return the md5 hash of the given file - object"""
|
md5 = hashlib . md5 ( )
while True :
data = fh . read ( 8192 )
if not data :
break
md5 . update ( data )
return md5 . hexdigest ( )
|
def get_value ( self , key , args , kwargs ) :
"""Override to substitute { ATTRIBUTE } by attributes of our _ item ."""
|
if hasattr ( self . _item , key ) :
return getattr ( self . _item , key )
return super ( ) . get_value ( key , args , kwargs )
|
def delete ( self , cluster ) :
"""Deletes the cluster from memory .
: param cluster : cluster to delete
: type cluster : : py : class : ` elasticluster . cluster . Cluster `"""
|
if cluster . name not in self . clusters :
raise ClusterNotFound ( "Unable to delete non-existent cluster %s" % cluster . name )
del self . clusters [ cluster . name ]
|
def match_range ( self , field , start = None , stop = None , inclusive = True , required = True , new_group = False ) :
"""Add a ` ` field : [ some range ] ` ` term to the query .
Matches will have a ` ` value ` ` in the range in the ` ` field ` ` .
Arguments :
field ( str ) : The field to check for the value .
The field must be namespaced according to Elasticsearch rules
using the dot syntax .
For example , ` ` " mdf . source _ name " ` ` is the ` ` source _ name ` ` field
of the ` ` mdf ` ` dictionary .
start ( str or int ) : The starting value , or ` ` None ` ` for no lower bound .
* * Default : * * ` ` None ` ` .
stop ( str or int ) : The ending value , or ` ` None ` ` for no upper bound .
* * Default : * * ` ` None ` ` .
inclusive ( bool ) : If ` ` True ` ` , the ` ` start ` ` and ` ` stop ` ` values will be included
in the search .
If ` ` False ` ` , the start and stop values will not be included
in the search .
* * Default : * * ` ` True ` ` .
required ( bool ) : If ` ` True ` ` , will add term with ` ` AND ` ` .
If ` ` False ` ` , will use ` ` OR ` ` . * * Default : * * ` ` True ` ` .
new _ group ( bool ) : If ` ` True ` ` , will separate the term into a new parenthetical group .
If ` ` False ` ` , will not .
* * Default : * * ` ` False ` ` .
Returns :
SearchHelper : Self"""
|
# Accept None as *
if start is None :
start = "*"
if stop is None :
stop = "*"
# * - * is the same as field exists
if start == "*" and stop == "*" :
return self . match_exists ( field , required = required , new_group = new_group )
if inclusive :
value = "[" + str ( start ) + " TO " + str ( stop ) + "]"
else :
value = "{" + str ( start ) + " TO " + str ( stop ) + "}"
return self . match_field ( field , value , required = required , new_group = new_group )
|
def get_dummy_dynamic_run ( nsamples , ** kwargs ) :
"""Generate dummy data for a dynamic nested sampling run .
Loglikelihood values of points are generated from a uniform distribution
in ( 0 , 1 ) , sorted , scaled by logl _ range and shifted by logl _ start ( if it is
not - np . inf ) . Theta values of each point are each generated from a uniform
distribution in ( 0 , 1 ) .
Parameters
nsamples : int
Number of samples in thread .
nthread _ init : int
Number of threads in the inital run ( starting at logl = - np . inf ) .
nthread _ dyn : int
Number of threads in the inital run ( starting at randomly chosen points
in the initial run ) .
ndim : int , optional
Number of dimensions .
seed : int , optional
If not False , the seed is set with np . random . seed ( seed ) .
logl _ start : float , optional
logl at which thread starts .
logl _ range : float , optional
Scale factor applied to logl values ."""
|
seed = kwargs . pop ( 'seed' , False )
ndim = kwargs . pop ( 'ndim' , 2 )
nthread_init = kwargs . pop ( 'nthread_init' , 2 )
nthread_dyn = kwargs . pop ( 'nthread_dyn' , 3 )
logl_range = kwargs . pop ( 'logl_range' , 1 )
if kwargs :
raise TypeError ( 'Unexpected **kwargs: {0}' . format ( kwargs ) )
init = get_dummy_run ( nthread_init , nsamples , ndim = ndim , seed = seed , logl_start = - np . inf , logl_range = logl_range )
dyn_starts = list ( np . random . choice ( init [ 'logl' ] , nthread_dyn , replace = True ) )
threads = nestcheck . ns_run_utils . get_run_threads ( init )
# Seed must be False here so it is not set again for each thread
threads += [ get_dummy_thread ( nsamples , ndim = ndim , seed = False , logl_start = start , logl_range = logl_range ) for start in dyn_starts ]
# make sure the threads have unique labels and combine them
for i , _ in enumerate ( threads ) :
threads [ i ] [ 'thread_labels' ] = np . full ( nsamples , i )
run = nestcheck . ns_run_utils . combine_threads ( threads )
# To make sure the thread labelling is same way it would when
# processing a dead points file , tranform into dead points
samples = nestcheck . write_polychord_output . run_dead_birth_array ( run )
return nestcheck . data_processing . process_samples_array ( samples )
|
def get_connection_logging ( self , loadbalancer ) :
"""Returns the connection logging setting for the given load balancer ."""
|
uri = "/loadbalancers/%s/connectionlogging" % utils . get_id ( loadbalancer )
resp , body = self . api . method_get ( uri )
ret = body . get ( "connectionLogging" , { } ) . get ( "enabled" , False )
return ret
|
def get_agent_queues ( self , project = None , queue_name = None , action_filter = None ) :
"""GetAgentQueues .
[ Preview API ] Get a list of agent queues .
: param str project : Project ID or project name
: param str queue _ name : Filter on the agent queue name
: param str action _ filter : Filter by whether the calling user has use or manage permissions
: rtype : [ TaskAgentQueue ]"""
|
route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
query_parameters = { }
if queue_name is not None :
query_parameters [ 'queueName' ] = self . _serialize . query ( 'queue_name' , queue_name , 'str' )
if action_filter is not None :
query_parameters [ 'actionFilter' ] = self . _serialize . query ( 'action_filter' , action_filter , 'str' )
response = self . _send ( http_method = 'GET' , location_id = '900fa995-c559-4923-aae7-f8424fe4fbea' , version = '5.1-preview.1' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( '[TaskAgentQueue]' , self . _unwrap_collection ( response ) )
|
def merge ( self , resource_type , resource_properties ) :
"""Adds global properties to the resource , if necessary . This method is a no - op if there are no global properties
for this resource type
: param string resource _ type : Type of the resource ( Ex : AWS : : Serverless : : Function )
: param dict resource _ properties : Properties of the resource that need to be merged
: return dict : Merged properties of the resource"""
|
if resource_type not in self . template_globals : # Nothing to do . Return the template unmodified
return resource_properties
global_props = self . template_globals [ resource_type ]
return global_props . merge ( resource_properties )
|
def shareWeights ( self , network , listOfLayerNamePairs = None ) :
"""Share weights with another network . Connection
is broken after a randomize or change of size . Layers must have the same
names and sizes for shared connections in both networks .
Example : net . shareWeights ( otherNet , [ [ " hidden " , " output " ] ] )
This example will take the weights between the hidden and output layers
of otherNet and share them with net . Also , the bias values of
otherNet [ " output " ] will be shared with net [ " output " ] .
If no list is given , will share all weights ."""
|
if listOfLayerNamePairs == None :
listOfLayerNamePairs = [ ]
for c in self . connections :
listOfLayerNamePairs . append ( [ c . fromLayer . name , c . toLayer . name ] )
if self . verbosity > 1 :
print ( "sharing weights:" , self . name , listOfLayerNamePairs )
# first , check to see if this will work :
count = 0
for ( fromLayerName , toLayerName ) in listOfLayerNamePairs :
for c1 in range ( len ( self . connections ) ) :
if self . connections [ c1 ] . fromLayer . name == fromLayerName and self . connections [ c1 ] . toLayer . name == toLayerName :
for c2 in range ( len ( network . connections ) ) :
if network . connections [ c2 ] . fromLayer . name == fromLayerName and network . connections [ c2 ] . toLayer . name == toLayerName :
if ( self . connections [ c1 ] . fromLayer . size != network . connections [ c2 ] . fromLayer . size ) or ( self . connections [ c1 ] . toLayer . size != network . connections [ c2 ] . toLayer . size ) :
raise AttributeError ( "shareSomeWeights: layer sizes did not match" )
count += 1
if count != len ( listOfLayerNamePairs ) :
raise AttributeError ( "shareSomeWeights: layer names did not match" )
# ok , now let ' s share !
self . sharedWeights = 1
network . sharedWeights = 1
for ( fromLayerName , toLayerName ) in listOfLayerNamePairs :
for c1 in range ( len ( self . connections ) ) :
if self . connections [ c1 ] . fromLayer . name == fromLayerName and self . connections [ c1 ] . toLayer . name == toLayerName :
for c2 in range ( len ( network . connections ) ) :
if network . connections [ c2 ] . fromLayer . name == fromLayerName and network . connections [ c2 ] . toLayer . name == toLayerName :
self . connections [ c1 ] . weight = network . connections [ c2 ] . weight
for ( fromLayerName , toLayerName ) in listOfLayerNamePairs :
for l1 in range ( len ( self . layers ) ) :
if self . layers [ l1 ] . name == toLayerName :
for l2 in range ( len ( network . layers ) ) :
if network . layers [ l2 ] . name == toLayerName :
self . layers [ l1 ] . weight = network . layers [ l2 ] . weight
|
def _set_text_from_file_dialog ( self , text ) :
"""Sets text making it a * * relative path * *"""
|
text = os . path . relpath ( text , "." )
self . edit . setText ( text )
self . dialog_path = text
self . _act_on_change ( )
|
def list_inverse_take ( list_ , index_list ) :
r"""Args :
list _ ( list ) : list in sorted domain
index _ list ( list ) : index list of the unsorted domain
Note :
Seems to be logically equivalent to
ut . take ( list _ , ut . argsort ( index _ list ) ) , but faster
Returns :
list : output _ list _ - the input list in the unsorted domain
CommandLine :
python - m utool . util _ list - - test - list _ inverse _ take
Example :
> > > # ENABLE _ DOCTEST
> > > from utool . util _ list import * # NOQA
> > > import utool as ut
> > > # build test data
> > > rank _ list = [ 3 , 2 , 4 , 1 , 9 , 2]
> > > prop _ list = [ 0 , 1 , 2 , 3 , 4 , 5]
> > > index _ list = ut . argsort ( rank _ list )
> > > sorted _ prop _ list = ut . take ( prop _ list , index _ list )
> > > # execute function
> > > list _ = sorted _ prop _ list
> > > output _ list _ = list _ inverse _ take ( list _ , index _ list )
> > > output _ list2 _ = ut . take ( list _ , ut . argsort ( index _ list ) )
> > > assert output _ list _ = = prop _ list
> > > assert output _ list2 _ = = prop _ list
> > > # verify results
> > > result = str ( output _ list _ )
> > > print ( result )
Timeit : :
% timeit list _ inverse _ take ( list _ , index _ list )
% timeit ut . take ( list _ , ut . argsort ( index _ list ) )"""
|
output_list_ = [ None ] * len ( index_list )
for item , index in zip ( list_ , index_list ) :
output_list_ [ index ] = item
return output_list_
|
def build_opener ( self ) :
"""Builds url opener , initializing proxy .
@ return : OpenerDirector"""
|
http_handler = urllib2 . HTTPHandler ( )
# debuglevel = self . transport . debug
if util . empty ( self . transport . proxy_url ) :
return urllib2 . build_opener ( http_handler )
proxy_handler = urllib2 . ProxyHandler ( { self . transport . proxy_url [ : 4 ] : self . transport . proxy_url } )
return urllib2 . build_opener ( http_handler , proxy_handler )
|
def remote_sys_name_uneq_store ( self , remote_system_name ) :
"""This function saves the system name , if different from stored ."""
|
if remote_system_name != self . remote_system_name :
self . remote_system_name = remote_system_name
return True
return False
|
def endElement ( self , name ) :
"""End current xml element , parse and add to to parent node ."""
|
if self . current : # we have nested elements
obj = self . current
else : # text only node
text = '' . join ( self . chardata ) . strip ( )
obj = self . _parse_node_data ( text )
newcurrent , self . chardata = self . stack . pop ( )
self . current = self . _element_to_node ( newcurrent , name , obj )
|
def version ( which = "num" ) :
"""Get the Coconut version ."""
|
if which in VERSIONS :
return VERSIONS [ which ]
else :
raise CoconutException ( "invalid version type " + ascii ( which ) , extra = "valid versions are " + ", " . join ( VERSIONS ) , )
|
def enabled ( name , ** kwargs ) :
'''Check to see if the named service is enabled to start on boot
CLI Example :
. . code - block : : bash
salt ' * ' service . enabled < service name >'''
|
# The property that reveals whether a service is enabled
# can only be queried using the full FMRI
# We extract the FMRI and then do the query
fmri_cmd = '/usr/bin/svcs -H -o FMRI {0}' . format ( name )
fmri = __salt__ [ 'cmd.run' ] ( fmri_cmd , python_shell = False )
cmd = '/usr/sbin/svccfg -s {0} listprop general/enabled' . format ( fmri )
comps = __salt__ [ 'cmd.run' ] ( cmd , python_shell = False ) . split ( )
if comps [ 2 ] == 'true' :
return True
else :
return False
|
def pick_action_todo ( ) :
"""only for testing and AI - user will usually choose an action
Sort of works"""
|
for ndx , todo in enumerate ( things_to_do ) : # print ( ' todo = ' , todo )
if roll_dice ( todo [ "chance" ] ) :
cur_act = actions [ get_action_by_name ( todo [ "name" ] ) ]
if todo [ "WHERE_COL" ] == "energy" and my_char [ "energy" ] > todo [ "WHERE_VAL" ] :
return cur_act
if todo [ "WHERE_COL" ] == "gold" and my_char [ "gold" ] > todo [ "WHERE_VAL" ] :
return cur_act
return actions [ 3 ]
|
def set ( self , section , option , value = '' ) :
'''This is overridden from the RawConfigParser merely to change the
default value for the ' value ' argument .'''
|
self . _string_check ( value )
super ( GitConfigParser , self ) . set ( section , option , value )
|
def train_sub ( sess , x , y , bbox_preds , x_sub , y_sub , nb_classes , nb_epochs_s , batch_size , learning_rate , data_aug , lmbda , aug_batch_size , rng , img_rows = 28 , img_cols = 28 , nchannels = 1 ) :
"""This function creates the substitute by alternatively
augmenting the training data and training the substitute .
: param sess : TF session
: param x : input TF placeholder
: param y : output TF placeholder
: param bbox _ preds : output of black - box model predictions
: param x _ sub : initial substitute training data
: param y _ sub : initial substitute training labels
: param nb _ classes : number of output classes
: param nb _ epochs _ s : number of epochs to train substitute model
: param batch _ size : size of training batches
: param learning _ rate : learning rate for training
: param data _ aug : number of times substitute training data is augmented
: param lmbda : lambda from arxiv . org / abs / 1602.02697
: param rng : numpy . random . RandomState instance
: return :"""
|
# Define TF model graph ( for the black - box model )
model_sub = ModelSubstitute ( 'model_s' , nb_classes )
preds_sub = model_sub . get_logits ( x )
loss_sub = CrossEntropy ( model_sub , smoothing = 0 )
print ( "Defined TensorFlow model graph for the substitute." )
# Define the Jacobian symbolically using TensorFlow
grads = jacobian_graph ( preds_sub , x , nb_classes )
# Train the substitute and augment dataset alternatively
for rho in xrange ( data_aug ) :
print ( "Substitute training epoch #" + str ( rho ) )
train_params = { 'nb_epochs' : nb_epochs_s , 'batch_size' : batch_size , 'learning_rate' : learning_rate }
with TemporaryLogLevel ( logging . WARNING , "cleverhans.utils.tf" ) :
train ( sess , loss_sub , x_sub , to_categorical ( y_sub , nb_classes ) , init_all = False , args = train_params , rng = rng , var_list = model_sub . get_params ( ) )
# If we are not at last substitute training iteration , augment dataset
if rho < data_aug - 1 :
print ( "Augmenting substitute training data." )
# Perform the Jacobian augmentation
lmbda_coef = 2 * int ( int ( rho / 3 ) != 0 ) - 1
x_sub = jacobian_augmentation ( sess , x , x_sub , y_sub , grads , lmbda_coef * lmbda , aug_batch_size )
print ( "Labeling substitute training data." )
# Label the newly generated synthetic points using the black - box
y_sub = np . hstack ( [ y_sub , y_sub ] )
x_sub_prev = x_sub [ int ( len ( x_sub ) / 2 ) : ]
eval_params = { 'batch_size' : batch_size }
bbox_val = batch_eval ( sess , [ x ] , [ bbox_preds ] , [ x_sub_prev ] , args = eval_params ) [ 0 ]
# Note here that we take the argmax because the adversary
# only has access to the label ( not the probabilities ) output
# by the black - box model
y_sub [ int ( len ( x_sub ) / 2 ) : ] = np . argmax ( bbox_val , axis = 1 )
return model_sub , preds_sub
|
def close ( self ) :
"""Closes the gzip with care to handle multiple members ."""
|
if self . fileobj is None :
return
if self . mode == WRITE :
self . close_member ( )
self . fileobj = None
elif self . mode == READ :
self . fileobj = None
if self . myfileobj :
self . myfileobj . close ( )
self . myfileobj = None
|
def jsonapi ( f ) :
"""Declare the view as a JSON API method
This converts view return value into a : cls : JsonResponse .
The following return types are supported :
- tuple : a tuple of ( response , status , headers )
- any other object is converted to JSON"""
|
@ wraps ( f )
def wrapper ( * args , ** kwargs ) :
rv = f ( * args , ** kwargs )
return make_json_response ( rv )
return wrapper
|
def pop_max_vocab_size ( params : Params ) -> Union [ int , Dict [ str , int ] ] :
"""max _ vocab _ size limits the size of the vocabulary , not including the @ @ UNKNOWN @ @ token .
max _ vocab _ size is allowed to be either an int or a Dict [ str , int ] ( or nothing ) .
But it could also be a string representing an int ( in the case of environment variable
substitution ) . So we need some complex logic to handle it ."""
|
size = params . pop ( "max_vocab_size" , None )
if isinstance ( size , Params ) : # This is the Dict [ str , int ] case .
return size . as_dict ( )
elif size is not None : # This is the int / str case .
return int ( size )
else :
return None
|
def endElementNS ( self , name , qname ) :
"""End a previously started element . ` name ` must be a ` ` ( namespace _ uri ,
localname ) ` ` tuple and ` qname ` is ignored ."""
|
if self . _ns_prefixes_floating_out :
raise RuntimeError ( "namespace prefix has not been closed" )
if self . _pending_start_element == name :
self . _pending_start_element = False
self . _write ( b"/>" )
else :
self . _write ( b"</" )
self . _write ( self . _qname ( name ) . encode ( "utf-8" ) )
self . _write ( b">" )
self . _curr_ns_map , self . _ns_prefixes_floating_out , self . _ns_counter = self . _ns_map_stack . pop ( )
|
def query ( self , query , stacked = False ) :
"""TRANSLATE JSON QUERY EXPRESSION ON SINGLE TABLE TO SQL QUERY"""
|
from jx_base . query import QueryOp
query = QueryOp . wrap ( query )
sql , post = self . _subquery ( query , isolate = False , stacked = stacked )
query . data = post ( sql )
return query . data
|
def submit ( ** kwargs ) :
"""Shortcut that takes an alert to evaluate and makes the appropriate API
call based on the results .
: param kwargs : A list of keyword arguments
: type kwargs : dict"""
|
if 'alert' not in kwargs :
raise ValueError ( 'Alert required' )
if 'value' not in kwargs :
raise ValueError ( 'Value required' )
alert = kwargs . pop ( 'alert' )
value = kwargs [ 'value' ]
if alert ( value ) :
fail ( kwargs )
else :
ok ( kwargs )
|
def trades ( self , pair , limit = 150 , ignore_invalid = 0 ) :
"""This method provides the information about the last trades .
: param str or iterable pair : pair ( ex . ' btc _ usd ' or [ ' btc _ usd ' , ' eth _ usd ' ] )
: param limit : how many orders should be displayed ( 150 by default , max 5000)
: param int ignore _ invalid : ignore non - existing pairs"""
|
return self . _public_api_call ( 'trades' , pair = pair , limit = limit , ignore_invalid = ignore_invalid )
|
def _sameFrag ( f , g ) :
"""returns 1 if two ParaFrags map out the same"""
|
if ( hasattr ( f , 'cbDefn' ) or hasattr ( g , 'cbDefn' ) or hasattr ( f , 'lineBreak' ) or hasattr ( g , 'lineBreak' ) ) :
return 0
for a in ( 'fontName' , 'fontSize' , 'textColor' , 'backColor' , 'rise' , 'underline' , 'strike' , 'link' ) :
if getattr ( f , a , None ) != getattr ( g , a , None ) :
return 0
return 1
|
def iter_segments ( obj , neurite_filter = None , neurite_order = NeuriteIter . FileOrder ) :
'''Return an iterator to the segments in a collection of neurites
Parameters :
obj : neuron , population , neurite , section , or iterable containing neurite objects
neurite _ filter : optional top level filter on properties of neurite neurite objects
neurite _ order : order upon which neurite should be iterated . Values :
- NeuriteIter . FileOrder : order of appearance in the file
- NeuriteIter . NRN : NRN simulator order : soma - > axon - > basal - > apical
Note :
This is a convenience function provided for generic access to
neuron segments . It may have a performance overhead WRT custom - made
segment analysis functions that leverage numpy and section - wise iteration .'''
|
sections = iter ( ( obj , ) if isinstance ( obj , Section ) else iter_sections ( obj , neurite_filter = neurite_filter , neurite_order = neurite_order ) )
return chain . from_iterable ( zip ( sec . points [ : - 1 ] , sec . points [ 1 : ] ) for sec in sections )
|
def _draw_text ( self , pos , text , font , ** kw ) :
"""Remember a single drawable tuple to paint later ."""
|
self . drawables . append ( ( pos , text , font , kw ) )
|
def sdiff ( self , * other_sets ) :
"""Union between Sets .
Returns a set of common members . Uses Redis . sdiff ."""
|
return self . db . sdiff ( [ self . key ] + [ s . key for s in other_sets ] )
|
def get_panels ( self ) :
"""Returns the Panel instances registered with this dashboard in order .
Panel grouping information is not included ."""
|
all_panels = [ ]
panel_groups = self . get_panel_groups ( )
for panel_group in panel_groups . values ( ) :
all_panels . extend ( panel_group )
return all_panels
|
def _subscribe_all ( self ) :
"""Subscribes all streams to their input .
Subscribes all plugins to all their inputs .
Subscribes all plugin outputs to the plugin ."""
|
for stream in ( self . inbound_streams + self . outbound_streams ) :
for input_ in stream . inputs :
if not type ( input_ ) is int and input_ is not None :
self . _subscribe ( stream , input_ )
for plugin in self . plugins :
for input_ in plugin . inputs :
self . _subscribe ( plugin , input_ )
for output in plugin . outputs : # Find output stream instance
subscriber = next ( ( x for x in self . outbound_streams if x . name == output ) , None )
if subscriber is None :
log . warn ( 'The outbound stream {} does not ' 'exist so will not receive messages ' 'from {}' . format ( output , plugin ) )
else :
self . _subscribe ( subscriber , plugin . name )
|
def hessian ( self , x , y , kwargs , diff = diff ) :
"""computes the differentials f _ xx , f _ yy , f _ xy from f _ x and f _ y
: return : f _ xx , f _ xy , f _ yx , f _ yy"""
|
alpha_ra , alpha_dec = self . alpha ( x , y , kwargs )
alpha_ra_dx , alpha_dec_dx = self . alpha ( x + diff , y , kwargs )
alpha_ra_dy , alpha_dec_dy = self . alpha ( x , y + diff , kwargs )
dalpha_rara = ( alpha_ra_dx - alpha_ra ) / diff
dalpha_radec = ( alpha_ra_dy - alpha_ra ) / diff
dalpha_decra = ( alpha_dec_dx - alpha_dec ) / diff
dalpha_decdec = ( alpha_dec_dy - alpha_dec ) / diff
f_xx = dalpha_rara
f_yy = dalpha_decdec
f_xy = dalpha_radec
f_yx = dalpha_decra
return f_xx , f_xy , f_yx , f_yy
|
def search_dependencies ( self ) :
"""Returns a list of other modules that this module and its members and executables
depend on in order to run correctly . This differs from the self . dependencies attribute
that just hase explicit module names from the ' use ' clauses ."""
|
result = [ self . module . name ]
# First we look at the explicit use references from this module and all
# its dependencies until the chain terminates .
stack = self . needs
while len ( stack ) > 0 :
module = stack . pop ( )
if module in result :
continue
self . parent . load_dependency ( module , True , True , False )
if module in self . parent . modules :
for dep in self . parent . modules [ module ] . needs :
modname = dep . split ( "." ) [ 0 ]
if modname not in result :
result . append ( modname )
if modname not in stack :
stack . append ( modname )
# Add any incidentals from the automatic construction of code . These can be from
# executables that use special precision types without importing them or from
# derived types . Same applies to the local members of this module .
for ekey , anexec in list ( self . executables . items ( ) ) :
for dep in anexec . search_dependencies ( ) :
if dep is not None and dep not in result :
result . append ( dep )
for member in list ( self . members . values ( ) ) :
dep = member . dependency ( )
if dep is not None and dep not in result :
result . append ( dep )
return result
|
def get_expr_summ_id ( self , experiment_id , time_slide_id , veto_def_name , datatype , sim_proc_id = None ) :
"""Return the expr _ summ _ id for the row in the table whose experiment _ id ,
time _ slide _ id , veto _ def _ name , and datatype match the given . If sim _ proc _ id ,
will retrieve the injection run matching that sim _ proc _ id .
If a matching row is not found , returns None ."""
|
# look for the ID
for row in self :
if ( row . experiment_id , row . time_slide_id , row . veto_def_name , row . datatype , row . sim_proc_id ) == ( experiment_id , time_slide_id , veto_def_name , datatype , sim_proc_id ) : # found it
return row . experiment_summ_id
# if get to here , experiment not found in table
return None
|
def jsonify_timedelta ( value ) :
"""Converts a ` datetime . timedelta ` to an ISO 8601 duration
string for JSON - ification .
: param value : something to convert
: type value : datetime . timedelta
: return : the value after conversion
: rtype unicode"""
|
assert isinstance ( value , datetime . timedelta )
# split seconds to larger units
seconds = value . total_seconds ( )
minutes , seconds = divmod ( seconds , 60 )
hours , minutes = divmod ( minutes , 60 )
days , hours = divmod ( hours , 24 )
days , hours , minutes = map ( int , ( days , hours , minutes ) )
seconds = round ( seconds , 6 )
# build date
date = ''
if days :
date = '%sD' % days
# build time
time = u'T'
# hours
bigger_exists = date or hours
if bigger_exists :
time += '{:02}H' . format ( hours )
# minutes
bigger_exists = bigger_exists or minutes
if bigger_exists :
time += '{:02}M' . format ( minutes )
# seconds
if seconds . is_integer ( ) :
seconds = '{:02}' . format ( int ( seconds ) )
else : # 9 chars long w / leading 0 , 6 digits after decimal
seconds = '%09.6f' % seconds
# remove trailing zeros
seconds = seconds . rstrip ( '0' )
time += '{}S' . format ( seconds )
return u'P' + date + time
|
def add ( self , submission , archive , _ ) :
"""Add a new submission to the repo ( add the to queue , will be saved async )"""
|
self . queue . put ( ( submission , submission [ "result" ] , submission [ "grade" ] , submission [ "problems" ] , submission [ "tests" ] , submission [ "custom" ] , archive ) )
|
def redirect_from_callback ( self ) :
'''Redirect to the callback URL after a successful authentication .'''
|
state = toolkit . request . params . get ( 'state' )
came_from = get_came_from ( state )
toolkit . response . status = 302
toolkit . response . location = came_from
|
def build_html_listbox ( lst , nme ) :
"""returns the html to display a listbox"""
|
res = '<select name="' + nme + '" multiple="multiple">\n'
for l in lst :
res += ' <option>' + str ( l ) + '</option>\n'
res += '</select>\n'
return res
|
def commits ( self , branch , since = 0 , to = int ( time . time ( ) ) + 86400 ) :
"""For given branch return a list of commits .
Each commit contains basic information about itself .
: param branch : git branch
: type branch : [ str ] { }
: param since : minimal timestamp for commit ' s commit date
: type since : int
: param to : maximal timestamp for commit ' s commit date
: type to : int"""
|
# checkout the branch
self . repo . create_head ( branch , "refs/remotes/origin/%s" % branch )
since_str = datetime . datetime . fromtimestamp ( since ) . strftime ( '%Y-%m-%d %H:%M:%S' )
commits = { }
for commit in self . repo . iter_commits ( branch , since = since_str ) : # filter out all commits younger then to
if commit . committed_date > to :
continue
commits [ commit . hexsha ] = self . _commitData ( commit )
return commits
|
def list_prefix ( arg , opts , shell_opts ) :
"""List prefixes matching ' arg '"""
|
search_string = ''
if type ( arg ) == list or type ( arg ) == tuple :
search_string = ' ' . join ( arg )
v = get_vrf ( opts . get ( 'vrf_rt' ) , default_var = 'default_list_vrf_rt' , abort = True )
if v . rt == 'all' :
vrf_text = 'any VRF'
vrf_q = None
else :
vrf_text = vrf_format ( v )
vrf_q = { 'operator' : 'equals' , 'val1' : 'vrf_rt' , 'val2' : v . rt }
print ( "Searching for prefixes in %s..." % vrf_text )
col_def = { 'added' : { 'title' : 'Added' } , 'alarm_priority' : { 'title' : 'Alarm Prio' } , 'authoritative_source' : { 'title' : 'Auth source' } , 'children' : { 'title' : 'Children' } , 'comment' : { 'title' : 'Comment' } , 'customer_id' : { 'title' : 'Customer ID' } , 'description' : { 'title' : 'Description' } , 'expires' : { 'title' : 'Expires' } , 'free_addresses' : { 'title' : 'Free addresses' } , 'monitor' : { 'title' : 'Monitor' } , 'last_modified' : { 'title' : 'Last mod' } , 'node' : { 'title' : 'Node' } , 'order_id' : { 'title' : 'Order ID' } , 'pool_name' : { 'title' : 'Pool name' } , 'prefix' : { 'title' : 'Prefix' } , 'status' : { 'title' : 'Status' } , 'tags' : { 'title' : '#' } , 'total_addresses' : { 'title' : 'Total addresses' } , 'type' : { 'title' : '' } , 'used_addresses' : { 'title' : 'Used addresses' } , 'vlan' : { 'title' : 'VLAN' } , 'vrf_rt' : { 'title' : 'VRF RT' } , }
# default columns
columns = [ 'vrf_rt' , 'prefix' , 'type' , 'tags' , 'node' , 'order_id' , 'customer_id' , 'description' ]
# custom columns ? prefer shell opts , then look in config file
custom_columns = None
if shell_opts . columns and len ( shell_opts . columns ) > 0 :
custom_columns = shell_opts . columns
elif cfg . get ( 'global' , 'prefix_list_columns' ) :
custom_columns = cfg . get ( 'global' , 'prefix_list_columns' )
# parse custom columns
if custom_columns : # Clear out default columns , unless user whishes to append
if custom_columns [ 0 ] != '+' :
columns = [ ]
# read in custom columns
for col in list ( csv . reader ( [ custom_columns . lstrip ( '+' ) or '' ] , escapechar = '\\' ) ) [ 0 ] :
col = col . strip ( )
if col not in col_def :
print ( "Invalid column:" , col , file = sys . stderr )
sys . exit ( 1 )
columns . append ( col )
offset = 0
# small initial limit for " instant " result
limit = 50
prefix_str = ""
while True :
res = Prefix . smart_search ( search_string , { 'parents_depth' : - 1 , 'include_neighbors' : True , 'offset' : offset , 'max_result' : limit } , vrf_q )
if offset == 0 : # first time in loop ?
if shell_opts . show_interpretation :
print ( "Query interpretation:" )
_parse_interp_prefix ( res [ 'interpretation' ] )
if res [ 'error' ] :
print ( "Query failed: %s" % res [ 'error_message' ] )
return
if len ( res [ 'result' ] ) == 0 :
print ( "No addresses matching '%s' found." % search_string )
return
# guess column width by looking at the initial result set
for p in res [ 'result' ] :
for colname , col in col_def . items ( ) :
val = getattr ( p , colname , '' )
col [ 'width' ] = max ( len ( colname ) , col . get ( 'width' , 0 ) , len ( str ( val ) ) )
# special handling of a few columns
col_def [ 'vrf_rt' ] [ 'width' ] = max ( col_def [ 'vrf_rt' ] . get ( 'width' , 8 ) , len ( str ( p . vrf . rt ) ) )
col_def [ 'prefix' ] [ 'width' ] = max ( col_def [ 'prefix' ] . get ( 'width' , 0 ) - 12 , p . indent * 2 + len ( p . prefix ) ) + 12
try :
col_def [ 'pool_name' ] [ 'width' ] = max ( col_def [ 'pool_name' ] . get ( 'width' , 8 ) , len ( str ( p . pool . name ) ) )
except :
pass
# override certain column widths
col_def [ 'type' ] [ 'width' ] = 1
col_def [ 'tags' ] [ 'width' ] = 2
col_header_data = { }
# build prefix formatting string
for colname , col in [ ( k , col_def [ k ] ) for k in columns ] :
prefix_str += "{%s:<%d} " % ( colname , col [ 'width' ] )
col_header_data [ colname ] = col [ 'title' ]
column_header = prefix_str . format ( ** col_header_data )
print ( column_header )
print ( "" . join ( "=" for i in range ( len ( column_header ) ) ) )
for p in res [ 'result' ] :
if p . display == False :
continue
col_data = { }
try :
for colname , col in col_def . items ( ) :
col_data [ colname ] = str ( getattr ( p , colname , None ) )
# overwrite some columns due to special handling
col_data [ 'tags' ] = '-'
if len ( p . tags ) > 0 :
col_data [ 'tags' ] = '#%d' % len ( p . tags )
try :
col_data [ 'pool_name' ] = p . pool . name
except :
pass
col_data [ 'prefix' ] = "" . join ( " " for i in range ( p . indent ) ) + p . display_prefix
col_data [ 'type' ] = p . type [ 0 ] . upper ( )
col_data [ 'vrf_rt' ] = p . vrf . rt or '-'
print ( prefix_str . format ( ** col_data ) )
except UnicodeEncodeError as e :
print ( "\nCrazy encoding for prefix %s\n" % p . prefix , file = sys . stderr )
if len ( res [ 'result' ] ) < limit :
break
offset += limit
# let consecutive limit be higher to tax the XML - RPC backend less
limit = 200
|
def _signal_handler ( self , signum , frame ) :
"""Method called when handling signals"""
|
if self . _options . config :
with open ( self . _options . config , "w" ) as cfg :
yaml . dump ( self . _home_assistant_config ( ) , cfg )
print ( "Dumped home assistant configuration at" , self . _options . config )
self . _connection . close ( )
sys . exit ( 0 )
|
def pixelizeCatalog ( infiles , config , force = False ) :
"""Break catalog into chunks by healpix pixel .
Parameters :
infiles : List of input files
config : Configuration file
force : Overwrite existing files ( depricated )
Returns :
None"""
|
nside_catalog = config [ 'coords' ] [ 'nside_catalog' ]
nside_pixel = config [ 'coords' ] [ 'nside_pixel' ]
coordsys = config [ 'coords' ] [ 'coordsys' ] . upper ( )
outdir = mkdir ( config [ 'catalog' ] [ 'dirname' ] )
filenames = config . getFilenames ( )
lon_field = config [ 'catalog' ] [ 'lon_field' ] . upper ( )
lat_field = config [ 'catalog' ] [ 'lat_field' ] . upper ( )
# ADW : It would probably be better ( and more efficient ) to do the
# pixelizing and the new column insertion separately .
for i , filename in enumerate ( infiles ) :
logger . info ( '(%i/%i) %s' % ( i + 1 , len ( infiles ) , filename ) )
data = fitsio . read ( filename )
logger . info ( "%i objects found" % len ( data ) )
if not len ( data ) :
continue
columns = map ( str . upper , data . dtype . names )
names , arrs = [ ] , [ ]
if ( lon_field in columns ) and ( lat_field in columns ) :
lon , lat = data [ lon_field ] , data [ lat_field ]
elif coordsys == 'GAL' :
msg = "Columns '%s' and '%s' not found." % ( lon_field , lat_field )
msg += "\nConverting from RA,DEC"
logger . warning ( msg )
lon , lat = cel2gal ( data [ 'RA' ] , data [ 'DEC' ] )
names += [ lon_field , lat_field ]
arrs += [ lon , lat ]
elif coordsys == 'CEL' :
msg = "Columns '%s' and '%s' not found." % ( lon_field , lat_field )
msg += "\nConverting from GLON,GLAT"
lon , lat = gal2cel ( data [ 'GLON' ] , data [ 'GLAT' ] )
names += [ lon_field , lat_field ]
arrs += [ lon , lat ]
cat_pix = ang2pix ( nside_catalog , lon , lat )
pix_pix = ang2pix ( nside_pixel , lon , lat )
cat_pix_name = 'PIX%i' % nside_catalog
pix_pix_name = 'PIX%i' % nside_pixel
try :
names += [ cat_pix_name , pix_pix_name ]
arrs += [ cat_pix , pix_pix ]
data = mlab . rec_append_fields ( data , names = names , arrs = arrs )
except ValueError as e :
logger . warn ( str ( e ) + '; not adding column.' )
# data [ cat _ pix _ name ] = cat _ pix
# data [ pix _ pix _ name ] = pix _ pix
for pix in np . unique ( cat_pix ) :
logger . debug ( "Processing pixel %s" % pix )
arr = data [ cat_pix == pix ]
outfile = filenames . data [ 'catalog' ] [ pix ]
if not os . path . exists ( outfile ) :
logger . debug ( "Creating %s" % outfile )
out = fitsio . FITS ( outfile , mode = 'rw' )
out . write ( arr )
hdr = healpix . header_odict ( nside = nside_catalog , coord = coordsys [ 0 ] )
for key in [ 'PIXTYPE' , 'ORDERING' , 'NSIDE' , 'COORDSYS' ] :
out [ 1 ] . write_key ( * list ( hdr [ key ] . values ( ) ) )
out [ 1 ] . write_key ( 'PIX' , pix , comment = 'HEALPIX pixel for this file' )
else :
out = fitsio . FITS ( outfile , mode = 'rw' )
out [ 1 ] . append ( arr )
logger . debug ( "Writing %s" % outfile )
out . close ( )
|
def process ( self , session : AppSession ) :
'''Populate the visits from the CDX into the URL table .'''
|
if not session . args . warc_dedup :
return
iterable = wpull . warc . format . read_cdx ( session . args . warc_dedup , encoding = session . args . local_encoding or 'utf-8' )
missing_url_msg = _ ( 'The URL ("a") is missing from the CDX file.' )
missing_id_msg = _ ( 'The record ID ("u") is missing from the CDX file.' )
missing_checksum_msg = _ ( 'The SHA1 checksum ("k") is missing from the CDX file.' )
counter = 0
def visits ( ) :
nonlocal counter
checked_fields = False
for record in iterable :
if not checked_fields :
if 'a' not in record :
raise ValueError ( missing_url_msg )
if 'u' not in record :
raise ValueError ( missing_id_msg )
if 'k' not in record :
raise ValueError ( missing_checksum_msg )
checked_fields = True
yield record [ 'a' ] , record [ 'u' ] , record [ 'k' ]
counter += 1
url_table = session . factory [ 'URLTable' ]
url_table . add_visits ( visits ( ) )
_logger . info ( __ ( gettext . ngettext ( 'Loaded {num} record from CDX file.' , 'Loaded {num} records from CDX file.' , counter ) , num = counter ) )
|
def close ( self ) :
"""Set some objects to None to hopefully free up some memory ."""
|
self . _target_context_errors = None
self . _query_context_errors = None
self . _general_errors = None
for ae in self . _alignment_errors :
ae . close ( )
self . _alignment_errors = None
|
def from_socket ( controller , host = None , port = None , track_path = None , log_level = logging . ERROR ) :
"""Create rocket instance using socket connector"""
|
rocket = Rocket ( controller , track_path = track_path , log_level = log_level )
rocket . connector = SocketConnector ( controller = controller , tracks = rocket . tracks , host = host , port = port )
return rocket
|
def _build_session ( username , password , trans_label = None ) :
'''Create a session to be used when connecting to iControl REST .'''
|
bigip = requests . session ( )
bigip . auth = ( username , password )
bigip . verify = False
bigip . headers . update ( { 'Content-Type' : 'application/json' } )
if trans_label : # pull the trans id from the grain
trans_id = __salt__ [ 'grains.get' ] ( 'bigip_f5_trans:{label}' . format ( label = trans_label ) )
if trans_id :
bigip . headers . update ( { 'X-F5-REST-Coordination-Id' : trans_id } )
else :
bigip . headers . update ( { 'X-F5-REST-Coordination-Id' : None } )
return bigip
|
def allow_domain ( self , domain , to_ports = None , secure = True ) :
"""Allows access from ` ` domain ` ` , which may be either a full
domain name , or a wildcard ( e . g . , ` ` * . example . com ` ` , or simply
` ` * ` ` ) . Due to security concerns , it is strongly recommended
that you use explicit domains rather than wildcards .
For socket policy files , pass a list of ports or port ranges
as the keyword argument ` ` to _ ports ` ` . As with ` ` domain ` ` , a
wildcard value - - ` ` * ` ` - - will allow all ports .
To disable Flash ' s requirement of security matching ( e . g . ,
retrieving a policy via HTTPS will require that SWFs also be
retrieved via HTTPS ) , pass ` ` secure = False ` ` . Due to security
concerns , it is strongly recommended that you not disable
this ."""
|
if self . site_control == SITE_CONTROL_NONE :
raise TypeError ( METAPOLICY_ERROR . format ( "allow a domain" ) )
self . domains [ domain ] = { 'to_ports' : to_ports , 'secure' : secure }
|
def read_metadata ( self , key ) :
"""return the meta data array for this key"""
|
if getattr ( getattr ( self . group , 'meta' , None ) , key , None ) is not None :
return self . parent . select ( self . _get_metadata_path ( key ) )
return None
|
def buffer ( self , distance ) :
"""Return a cylinder primitive which covers the source cylinder
by distance : radius is inflated by distance , height by twice
the distance .
Parameters
distance : float
Distance to inflate cylinder radius and height
Returns
buffered : Cylinder
Cylinder primitive inflated by distance"""
|
distance = float ( distance )
buffered = Cylinder ( height = self . primitive . height + distance * 2 , radius = self . primitive . radius + distance , transform = self . primitive . transform . copy ( ) )
return buffered
|
def replace_namespaced_custom_object_status ( self , group , version , namespace , plural , name , body , ** kwargs ) :
"""replace status of the specified namespace scoped custom object
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . replace _ namespaced _ custom _ object _ status ( group , version , namespace , plural , name , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str group : the custom resource ' s group ( required )
: param str version : the custom resource ' s version ( required )
: param str namespace : The custom resource ' s namespace ( required )
: param str plural : the custom resource ' s plural name . For TPRs this would be lowercase plural kind . ( required )
: param str name : the custom object ' s name ( required )
: param object body : ( required )
: return : object
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . replace_namespaced_custom_object_status_with_http_info ( group , version , namespace , plural , name , body , ** kwargs )
else :
( data ) = self . replace_namespaced_custom_object_status_with_http_info ( group , version , namespace , plural , name , body , ** kwargs )
return data
|
def invert ( self , pos = None ) :
"""Invert one or many bits from 0 to 1 or vice versa .
pos - - Either a single bit position or an iterable of bit positions .
Negative numbers are treated in the same way as slice indices .
Raises IndexError if pos < - self . len or pos > = self . len ."""
|
if pos is None :
self . _invert_all ( )
return
if not isinstance ( pos , collections . Iterable ) :
pos = ( pos , )
length = self . len
for p in pos :
if p < 0 :
p += length
if not 0 <= p < length :
raise IndexError ( "Bit position {0} out of range." . format ( p ) )
self . _invert ( p )
|
def U ( self ) :
"""Returns a distributed matrix whose columns are the left
singular vectors of the SingularValueDecomposition if computeU was set to be True ."""
|
u = self . call ( "U" )
if u is not None :
mat_name = u . getClass ( ) . getSimpleName ( )
if mat_name == "RowMatrix" :
return RowMatrix ( u )
elif mat_name == "IndexedRowMatrix" :
return IndexedRowMatrix ( u )
else :
raise TypeError ( "Expected RowMatrix/IndexedRowMatrix got %s" % mat_name )
|
def _find_new_additions ( self ) :
"""Find any nodes in the graph that need to be added to the internal
queue and add them .
Callers must hold the lock ."""
|
for node , in_degree in self . graph . in_degree_iter ( ) :
if not self . _already_known ( node ) and in_degree == 0 :
self . inner . put ( ( self . _scores [ node ] , node ) )
self . queued . add ( node )
|
def get_my_contacts ( self ) :
"""Fetches list of added contacts
: return : List of contacts
: rtype : list [ Contact ]"""
|
my_contacts = self . wapi_functions . getMyContacts ( )
return [ Contact ( contact , self ) for contact in my_contacts ]
|
def _systemd_notify_once ( ) :
"""Send notification once to Systemd that service is ready .
Systemd sets NOTIFY _ SOCKET environment variable with the name of the
socket listening for notifications from services .
This method removes the NOTIFY _ SOCKET environment variable to ensure
notification is sent only once ."""
|
notify_socket = os . getenv ( 'NOTIFY_SOCKET' )
if notify_socket :
if notify_socket . startswith ( '@' ) : # abstract namespace socket
notify_socket = '\0%s' % notify_socket [ 1 : ]
sock = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
with contextlib . closing ( sock ) :
try :
sock . connect ( notify_socket )
sock . sendall ( b'READY=1' )
del os . environ [ 'NOTIFY_SOCKET' ]
except EnvironmentError :
LOG . debug ( "Systemd notification failed" , exc_info = True )
|
def snooze_alert ( self , id , ** kwargs ) : # noqa : E501
"""Snooze a specific alert for some number of seconds # noqa : E501
# noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . snooze _ alert ( id , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str id : ( required )
: param int seconds :
: return : ResponseContainerAlert
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . snooze_alert_with_http_info ( id , ** kwargs )
# noqa : E501
else :
( data ) = self . snooze_alert_with_http_info ( id , ** kwargs )
# noqa : E501
return data
|
def _run_with_different_python ( executable ) :
"""Run bootstrap . py with a different python executable"""
|
args = [ arg for arg in sys . argv if arg != VIRTUALENV_OPTION ]
args . insert ( 0 , executable )
print ( "Running bootstrap.py with {0}" . format ( executable ) )
exit ( subprocess . call ( args ) )
|
def initialise ( self , projectname : str , xmlfile : str ) -> None :
"""Initialise a * HydPy * project based on the given XML configuration
file agreeing with ` HydPyConfigMultipleRuns . xsd ` .
We use the ` LahnH ` project and its rather complex XML configuration
file ` multiple _ runs . xml ` as an example ( module | xmltools | provides
information on interpreting this file ) :
> > > from hydpy . core . examples import prepare _ full _ example _ 1
> > > prepare _ full _ example _ 1 ( )
> > > from hydpy import print _ values , TestIO
> > > from hydpy . exe . servertools import ServerState
> > > state = ServerState ( )
> > > with TestIO ( ) : # doctest : + ELLIPSIS
. . . state . initialise ( ' LahnH ' , ' multiple _ runs . xml ' )
Start HydPy project ` LahnH ` ( . . . ) .
Read configuration file ` multiple _ runs . xml ` ( . . . ) .
Interpret the defined options ( . . . ) .
Interpret the defined period ( . . . ) .
Read all network files ( . . . ) .
Activate the selected network ( . . . ) .
Read the required control files ( . . . ) .
Read the required condition files ( . . . ) .
Read the required time series files ( . . . ) .
After initialisation , all defined exchange items are available :
> > > for item in state . parameteritems :
. . . print ( item )
SetItem ( ' alpha ' , ' hland _ v1 ' , ' control . alpha ' , 0)
SetItem ( ' beta ' , ' hland _ v1 ' , ' control . beta ' , 0)
SetItem ( ' lag ' , ' hstream _ v1 ' , ' control . lag ' , 0)
SetItem ( ' damp ' , ' hstream _ v1 ' , ' control . damp ' , 0)
AddItem ( ' sfcf _ 1 ' , ' hland _ v1 ' , ' control . sfcf ' , ' control . rfcf ' , 0)
AddItem ( ' sfcf _ 2 ' , ' hland _ v1 ' , ' control . sfcf ' , ' control . rfcf ' , 0)
AddItem ( ' sfcf _ 3 ' , ' hland _ v1 ' , ' control . sfcf ' , ' control . rfcf ' , 1)
> > > for item in state . conditionitems :
. . . print ( item )
SetItem ( ' sm _ lahn _ 2 ' , ' hland _ v1 ' , ' states . sm ' , 0)
SetItem ( ' sm _ lahn _ 1 ' , ' hland _ v1 ' , ' states . sm ' , 1)
SetItem ( ' quh ' , ' hland _ v1 ' , ' logs . quh ' , 0)
> > > for item in state . getitems :
. . . print ( item )
GetItem ( ' hland _ v1 ' , ' fluxes . qt ' )
GetItem ( ' hland _ v1 ' , ' fluxes . qt . series ' )
GetItem ( ' hland _ v1 ' , ' states . sm ' )
GetItem ( ' hland _ v1 ' , ' states . sm . series ' )
GetItem ( ' nodes ' , ' nodes . sim . series ' )
The initialisation also memorises the initial conditions of
all elements :
> > > for element in state . init _ conditions :
. . . print ( element )
land _ dill
land _ lahn _ 1
land _ lahn _ 2
land _ lahn _ 3
stream _ dill _ lahn _ 2
stream _ lahn _ 1 _ lahn _ 2
stream _ lahn _ 2 _ lahn _ 3
Initialisation also prepares all selected series arrays and
reads the required input data :
> > > print _ values (
. . . state . hp . elements . land _ dill . model . sequences . inputs . t . series )
-0.298846 , - 0.811539 , - 2.493848 , - 5.968849 , - 6.999618
> > > state . hp . nodes . dill . sequences . sim . series
InfoArray ( [ nan , nan , nan , nan , nan ] )"""
|
write = commandtools . print_textandtime
write ( f'Start HydPy project `{projectname}`' )
hp = hydpytools . HydPy ( projectname )
write ( f'Read configuration file `{xmlfile}`' )
interface = xmltools . XMLInterface ( xmlfile )
write ( 'Interpret the defined options' )
interface . update_options ( )
write ( 'Interpret the defined period' )
interface . update_timegrids ( )
write ( 'Read all network files' )
hp . prepare_network ( )
write ( 'Activate the selected network' )
hp . update_devices ( interface . fullselection )
write ( 'Read the required control files' )
hp . init_models ( )
write ( 'Read the required condition files' )
interface . conditions_io . load_conditions ( )
write ( 'Read the required time series files' )
interface . series_io . prepare_series ( )
interface . exchange . prepare_series ( )
interface . series_io . load_series ( )
self . hp = hp
self . parameteritems = interface . exchange . parameteritems
self . conditionitems = interface . exchange . conditionitems
self . getitems = interface . exchange . getitems
self . conditions = { }
self . parameteritemvalues = collections . defaultdict ( lambda : { } )
self . modifiedconditionitemvalues = collections . defaultdict ( lambda : { } )
self . getitemvalues = collections . defaultdict ( lambda : { } )
self . init_conditions = hp . conditions
self . timegrids = { }
|
def show_highlight ( self , artist ) :
"""Show or create a highlight for a givent artist ."""
|
# This is a separate method to make subclassing easier .
if artist in self . highlights :
self . highlights [ artist ] . set_visible ( True )
else :
self . highlights [ artist ] = self . create_highlight ( artist )
return self . highlights [ artist ]
|
def check_instance ( function ) :
"""Wrapper that tests the type of _ session .
Purpose : This decorator function is used by all functions within
| the Jaide class that interact with a device to ensure the
| proper session type is in use . If it is not , it will
| attempt to migrate _ session to that type before moving
| to the originally requested function .
| > * * NOTE : * * This function is a decorator , and should not be
| > used directly . All other methods in this class that touch
| > the Junos device are wrapped by this function to ensure the
| > proper connection type is used .
@ param function : the function that is being wrapped around
@ type function : function
@ returns : the originally requested function
@ rtype : function"""
|
def wrapper ( self , * args , ** kwargs ) :
func_trans = { "commit" : manager . Manager , "compare_config" : manager . Manager , "commit_check" : manager . Manager , "device_info" : manager . Manager , "diff_config" : manager . Manager , "health_check" : manager . Manager , "interface_errors" : manager . Manager , "op_cmd" : paramiko . client . SSHClient , "shell_cmd" : paramiko . client . SSHClient , "scp_pull" : paramiko . client . SSHClient , "scp_push" : paramiko . client . SSHClient }
# when doing an operational command , logging in as root
# brings you to shell , so we need to enter the device as a shell
# connection , and move to cli to perform the command
# this is a one - off because the isinstance ( ) check will be bypassed
if self . username == "root" and function . __name__ == "op_cmd" :
if not self . _session :
self . conn_type = "paramiko"
self . connect ( )
if not self . _shell :
self . conn_type = "root"
self . connect ( )
self . shell_to_cli ( )
# check if we ' re in the cli
# Have to call shell command separately , since we are using _ shell
# for comparison , not _ session .
elif function . __name__ == 'shell_cmd' :
if not self . _shell :
self . conn_type = "shell"
self . connect ( )
self . cli_to_shell ( )
# check if we ' re in shell .
if isinstance ( self . _session , func_trans [ function . __name__ ] ) : # If they ' re doing SCP , we have to check for both _ session and
# _ scp
if function . __name__ in [ 'scp_pull' , 'scp_push' ] :
if not isinstance ( self . _scp , SCPClient ) :
self . conn_type = "scp"
self . connect ( )
else :
self . disconnect ( )
if function . __name__ == "op_cmd" :
self . conn_type = "paramiko"
elif function . __name__ in [ "scp_pull" , "scp_push" ] :
self . conn_type = "scp"
else :
self . conn_type = "ncclient"
self . connect ( )
return function ( self , * args , ** kwargs )
return wrapper
|
def find_username_from_user_id ( session , user_id ) :
"""Look up a MAL username ' s user ID .
: type session : : class : ` myanimelist . session . Session `
: param session : A valid MAL session .
: type user _ id : int
: param user _ id : The user ID for which we want to look up a username .
: raises : : class : ` . InvalidUserError `
: rtype : str
: return : The given user ' s username ."""
|
comments_page = session . session . get ( u'http://myanimelist.net/comments.php?' + urllib . urlencode ( { 'id' : int ( user_id ) } ) ) . text
comments_page = bs4 . BeautifulSoup ( comments_page )
username_elt = comments_page . find ( 'h1' )
if "'s Comments" not in username_elt . text :
raise InvalidUserError ( user_id , message = "Invalid user ID given when looking up username" )
return username_elt . text . replace ( "'s Comments" , "" )
|
def rename ( self , path , destination , ** kwargs ) :
"""Renames Path src to Path dst .
: returns : true if rename is successful
: rtype : bool"""
|
return _json ( self . _put ( path , 'RENAME' , destination = destination , ** kwargs ) ) [ 'boolean' ]
|
def get_feature_report ( self , report_id , length ) :
"""Get a feature report from the device .
: param report _ id : The Report ID of the report to be read
: type report _ id : int
: return : The report data
: rtype : str / bytes"""
|
self . _check_device_status ( )
bufp = ffi . new ( "unsigned char[]" , length + 1 )
buf = ffi . buffer ( bufp , length + 1 )
buf [ 0 ] = report_id
rv = hidapi . hid_get_feature_report ( self . _device , bufp , length + 1 )
if rv == - 1 :
raise IOError ( "Failed to get feature report from HID device: {0}" . format ( self . _get_last_error_string ( ) ) )
return buf [ 1 : ]
|
def upload ( self , * args , ** kwargs ) :
"""Convenience method for attaching files to a fetched record
: param args : args to pass along to ` Attachment . upload `
: param kwargs : kwargs to pass along to ` Attachment . upload `
: return : upload response object"""
|
return self . _resource . attachments . upload ( self [ 'sys_id' ] , * args , ** kwargs )
|
def _init_template ( self , cls , base_init_template ) :
'''This would be better as an override for Gtk . Widget'''
|
# TODO : could disallow using a metaclass . . but this is good enough
# . . if you disagree , feel free to fix it and issue a PR : )
if self . __class__ is not cls :
raise TypeError ( "Inheritance from classes with @GtkTemplate decorators " "is not allowed at this time" )
connected_signals = set ( )
self . __connected_template_signals__ = connected_signals
base_init_template ( self )
for name in self . __gtemplate_widgets__ :
widget = self . get_template_child ( cls , name )
self . __dict__ [ name ] = widget
if widget is None : # Bug : if you bind a template child , and one of them was
# not present , then the whole template is broken ( and
# it ' s not currently possible for us to know which
# one is broken either - - but the stderr should show
# something useful with a Gtk - CRITICAL message )
raise AttributeError ( "A missing child widget was set using " "GtkTemplate.Child and the entire " "template is now broken (widgets: %s)" % ', ' . join ( self . __gtemplate_widgets__ ) )
for name in self . __gtemplate_methods__ . difference ( connected_signals ) :
errmsg = ( "Signal '%s' was declared with @GtkTemplate.Callback " + "but was not present in template" ) % name
warnings . warn ( errmsg , GtkTemplateWarning )
|
def send ( self , sender : PytgbotApiBot ) :
"""Send the message via pytgbot .
: param sender : The bot instance to send with .
: type sender : pytgbot . bot . Bot
: rtype : PytgbotApiMessage"""
|
return sender . send_invoice ( # receiver , self . media , disable _ notification = self . disable _ notification , reply _ to _ message _ id = reply _ id
title = self . title , description = self . description , payload = self . payload , provider_token = self . provider_token , start_parameter = self . start_parameter , currency = self . currency , prices = self . prices , chat_id = self . receiver , reply_to_message_id = self . reply_id , provider_data = self . provider_data , photo_url = self . photo_url , photo_size = self . photo_size , photo_width = self . photo_width , photo_height = self . photo_height , need_name = self . need_name , need_phone_number = self . need_phone_number , need_email = self . need_email , need_shipping_address = self . need_shipping_address , send_phone_number_to_provider = self . send_phone_number_to_provider , send_email_to_provider = self . send_email_to_provider , is_flexible = self . is_flexible , disable_notification = self . disable_notification , reply_markup = self . reply_markup )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.