signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def serverinfo ( url = 'http://localhost:8080/manager' , timeout = 180 ) :
'''return details about the server
url : http : / / localhost : 8080 / manager
the URL of the server manager webapp
timeout : 180
timeout for HTTP request
CLI Examples :
. . code - block : : bash
salt ' * ' tomcat . serverinfo
salt ' * ' tomcat . serverinfo http : / / localhost : 8080 / manager'''
|
data = _wget ( 'serverinfo' , { } , url , timeout = timeout )
if data [ 'res' ] is False :
return { 'error' : data [ 'msg' ] }
ret = { }
data [ 'msg' ] . pop ( 0 )
for line in data [ 'msg' ] :
tmp = line . split ( ':' )
ret [ tmp [ 0 ] . strip ( ) ] = tmp [ 1 ] . strip ( )
return ret
|
def create_scope ( self , name , status = ScopeStatus . ACTIVE , description = None , tags = None , start_date = None , due_date = None , team = None , ** kwargs ) :
"""Create a Scope .
This will create a scope if the client has the right to do so . Sufficient permissions to create a scope are a
superuser , a user in the ` GG : Configurators ` group or ` GG : Managers ` group .
. . versionadded : 2.6
: param name : Name of the scope
: type name : basestring
: param status : choose one of the : class : ` enums . ScopeStatus ` , defaults to ` ScopeStatus . ACTIVE `
: type status : basestring or None
: param description : ( optional ) Description of the scope
: type description : basestring or None
: param tags : ( optional ) List of tags to be added to the new scope
: type tags : list or None
: param start _ date : ( optional ) start date of the scope . Will default to ' now ' if not provided .
: type start _ date : datetime . datetime or None
: param due _ date : ( optional ) due date of the scope
: type due _ date : datetime . datetime or None
: param team : ( optional ) team _ id or Team object to assign membership of scope to a team .
: type team : basestring or : class : ` models . Team ` or None
: param kwargs : optional additional search arguments
: type kwargs : dict or None
: return : the created : class : ` models . Scope `
: raises APIError : In case of failure of the creation of new Scope"""
|
if not isinstance ( name , ( str , text_type ) ) :
raise IllegalArgumentError ( "'Name' should be provided as a string, was provided as '{}'" . format ( type ( name ) ) )
if status not in ScopeStatus . values ( ) :
raise IllegalArgumentError ( "Please provide a valid scope status, please use one of `enums.ScopeStatus`. " "Got: '{}'" . format ( status ) )
if description and not isinstance ( description , ( str , text_type ) ) :
raise IllegalArgumentError ( "'Description' should be provided as a string, was provided as '{}'" . format ( type ( description ) ) )
if tags and not isinstance ( tags , list ) :
raise IllegalArgumentError ( "'Tags' should be provided as a list, was provided as '{}'" . format ( type ( tags ) ) )
if tags and not ( all ( [ isinstance ( t , ( str , text_type ) ) for t in tags ] ) ) :
raise IllegalArgumentError ( "Each tag in the list of tags should be provided as a string" )
if not start_date :
start_date = datetime . datetime . now ( )
if not tags :
tags = list ( )
data_dict = { 'name' : name , 'status' : status , 'text' : description , 'tags' : tags , }
if start_date is not None :
if isinstance ( start_date , datetime . datetime ) :
if not start_date . tzinfo :
warnings . warn ( "The duedate '{}' is naive and not timezone aware, use pytz.timezone info. " "This date is interpreted as UTC time." . format ( start_date . isoformat ( sep = ' ' ) ) )
data_dict [ 'start_date' ] = start_date . isoformat ( sep = 'T' )
else :
raise IllegalArgumentError ( 'Start date should be a datetime.datetime() object' )
else : # defaults to now
data_dict [ 'start_date' ] = datetime . datetime . now ( )
if due_date is not None :
if isinstance ( due_date , datetime . datetime ) :
if not due_date . tzinfo :
warnings . warn ( "The duedate '{}' is naive and not timezone aware, use pytz.timezone info. " "This date is interpreted as UTC time." . format ( due_date . isoformat ( sep = ' ' ) ) )
data_dict [ 'due_date' ] = due_date . isoformat ( sep = 'T' )
else :
raise IllegalArgumentError ( 'Due date should be a datetime.datetime() object' )
if team is not None :
if isinstance ( team , Team ) :
team_id = team . id
elif is_uuid ( team ) :
team_id = team
elif isinstance ( team , ( text_type , string_types ) ) :
team_id = self . team ( name = team ) . id
else :
raise IllegalArgumentError ( "'Team' should be provided as a `models.Team` object or UUID or team name, " "was provided as a {}" . format ( type ( team ) ) )
data_dict [ 'team' ] = team_id
# injecting additional kwargs for those cases that you need to add extra options .
data_dict . update ( kwargs )
response = self . _request ( 'POST' , self . _build_url ( 'scopes' ) , data = data_dict )
if response . status_code != requests . codes . created : # pragma : no cover
raise APIError ( "Could not create scope, {}:\n\n{}'" . format ( str ( response ) , response . json ( ) ) )
return Scope ( response . json ( ) [ 'results' ] [ 0 ] , client = self )
|
def event ( self , name , payload = None , coalesce = True ) :
"""Send an event to the cluster . Can take an optional payload as well ,
which will be sent in the form that it ' s provided ."""
|
return self . connection . call ( 'event' , { 'Name' : name , 'Payload' : payload , 'Coalesce' : coalesce } , expect_body = False )
|
def get_wd_data2 ( self ) :
"""Get 2.5 data from self . WD and put it into
ErMagicBuilder object .
Called by get _ dm _ and _ wd"""
|
wait = wx . BusyInfo ( 'Reading in data from current working directory, please wait...' )
# wx . Yield ( )
print ( '-I- Read in any available data from working directory (data model 2)' )
self . er_magic = builder . ErMagicBuilder ( self . WD , data_model = self . data_model )
del wait
|
async def _reload_message ( self ) :
"""Re - fetches this message to reload the sender and chat entities ,
along with their input versions ."""
|
try :
chat = await self . get_input_chat ( ) if self . is_channel else None
msg = await self . _client . get_messages ( chat , ids = self . id )
except ValueError :
return
# We may not have the input chat / get message failed
if not msg :
return
# The message may be deleted and it will be None
self . _sender = msg . _sender
self . _input_sender = msg . _input_sender
self . _chat = msg . _chat
self . _input_chat = msg . _input_chat
self . _via_bot = msg . _via_bot
self . _via_input_bot = msg . _via_input_bot
self . _forward = msg . _forward
self . _action_entities = msg . _action_entities
|
def get_modpath_from_modname ( modname , prefer_pkg = False , prefer_main = False ) :
"""Same as get _ modpath but doesnt import directly
SeeAlso :
get _ modpath"""
|
from os . path import dirname , basename , join , exists
initname = '__init__.py'
mainname = '__main__.py'
if modname in sys . modules :
modpath = sys . modules [ modname ] . __file__ . replace ( '.pyc' , '.py' )
else :
import pkgutil
loader = pkgutil . find_loader ( modname )
modpath = loader . filename . replace ( '.pyc' , '.py' )
if '.' not in basename ( modpath ) :
modpath = join ( modpath , initname )
if prefer_pkg :
if modpath . endswith ( initname ) or modpath . endswith ( mainname ) :
modpath = dirname ( modpath )
if prefer_main :
if modpath . endswith ( initname ) :
main_modpath = modpath [ : - len ( initname ) ] + mainname
if exists ( main_modpath ) :
modpath = main_modpath
return modpath
|
def CLASSDEF ( self , node ) :
"""Check names used in a class definition , including its decorators ,
base classes , and the body of its definition .
Additionally , add its name to the current scope ."""
|
for deco in node . decorator_list :
self . handleNode ( deco , node )
for baseNode in node . bases :
self . handleNode ( baseNode , node )
if not PY2 :
for keywordNode in node . keywords :
self . handleNode ( keywordNode , node )
self . push_scope ( ClassScope )
if self . settings . get ( 'run_doctests' , False ) :
self . defer_function ( lambda : self . handle_doctests ( node ) )
for stmt in node . body :
self . handleNode ( stmt , node )
self . pop_scope ( )
self . add_binding ( node , ClassDefinition ( node . name , node ) )
|
def et2lst ( et , body , lon , typein , timlen = _default_len_out , ampmlen = _default_len_out ) :
"""Given an ephemeris epoch , compute the local solar time for
an object on the surface of a body at a specified longitude .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / et2lst _ c . html
: param et : Epoch in seconds past J2000 epoch .
: type et : float
: param body : ID - code of the body of interest .
: type body : int
: param lon : Longitude of surface point ( RADIANS ) .
: type lon : float
: param typein : Type of longitude " PLANETOCENTRIC " , etc .
: type typein : str
: param timlen : Available room in output time string .
: type timlen : int
: param ampmlen : Available room in output ampm string .
: type ampmlen : int
: return :
Local hour on a " 24 hour " clock ,
Minutes past the hour ,
Seconds past the minute ,
String giving local time on 24 hour clock ,
String giving time on A . M . / P . M . scale .
: rtype : tuple"""
|
et = ctypes . c_double ( et )
body = ctypes . c_int ( body )
lon = ctypes . c_double ( lon )
typein = stypes . stringToCharP ( typein )
timlen = ctypes . c_int ( timlen )
ampmlen = ctypes . c_int ( ampmlen )
hr = ctypes . c_int ( )
mn = ctypes . c_int ( )
sc = ctypes . c_int ( )
time = stypes . stringToCharP ( timlen )
ampm = stypes . stringToCharP ( ampmlen )
libspice . et2lst_c ( et , body , lon , typein , timlen , ampmlen , ctypes . byref ( hr ) , ctypes . byref ( mn ) , ctypes . byref ( sc ) , time , ampm )
return hr . value , mn . value , sc . value , stypes . toPythonString ( time ) , stypes . toPythonString ( ampm )
|
def set_help ( self ) :
"""Set help text markup ."""
|
if not ( self . field . help_text and self . attrs . get ( "_help" ) ) :
return
self . values [ "help" ] = HELP_TEMPLATE . format ( self . field . help_text )
|
def setup ( template_paths = { } , autoescape = False , cache_size = 100 , auto_reload = True , bytecode_cache = True ) :
"""Setup Jinja enviroment
eg . sketch . jinja . setup ( {
' app ' : self . config . paths [ ' app _ template _ basedir ' ] ,
' sketch ' : self . config . paths [ ' sketch _ template _ dir ' ] ,
: param template _ paths : Dictionary of paths to templates ( template _ name = > template _ path )
: param autoescape : Autoescape
: param cache _ size :
: param auto _ reload :
: param bytecode _ cache :"""
|
global _jinja_env , _jinja_loaders
if not _jinja_env :
_jinja_env = JinjaEnviroment ( autoescape = autoescape , cache_size = cache_size , auto_reload = auto_reload , bytecode_cache = None )
# @ TODO alter so Marshall is not used
# if bytecode _ cache and GAE _ CACHE :
# _ jinja _ env . bytecode _ cache = GAEMemcacheBytecodeCache ( )
if type ( template_paths ) == type ( '' ) :
template_paths = { 'site' : template_paths }
if len ( template_paths ) < 1 :
logging . exception ( 'Sketch: jinja.setup: no template sets configured' )
return False
if len ( template_paths ) == 1 :
template_set_name = template_paths . keys ( ) [ 0 ]
tp = template_paths [ template_set_name ]
if tp in _jinja_loaders :
_jinja_env . loader = _jinja_loaders [ tp ]
else :
_jinja_env . loader = _jinja_loaders [ tp ] = jinja2 . FileSystemLoader ( tp )
return True
if len ( template_paths ) > 1 :
loaders = { }
for dirn , path in template_paths . items ( ) :
loaders [ dirn ] = jinja2 . FileSystemLoader ( path )
_jinja_env . loader = SubdirLoader ( loaders )
return True
logging . error ( 'Sketch: jinja.setup: no template sets configured (fallthrough)' )
logging . error ( _jinja_loaders )
|
def batch_add_ips ( ips ) :
"""Adds the given list of IPs to the database if the IP is not already there .
: param ips : list of IPs
: return : number of created IPs
: type ips : list
: rtype : int"""
|
ips_created = 0
if len ( ips ) > 0 : # for each ip , check if already existent , if not add
for ip in ips :
( s0 , s1 , s2 , s3 ) = ip . split ( '.' )
( ip_db , is_ip_created ) = IP . objects . get_or_create ( seg_0 = s0 , seg_1 = s1 , seg_2 = s2 , seg_3 = s3 , )
if is_ip_created :
ips_created += 1
return ips_created
|
def get_color_list ( method_list ) :
"""获取method对应的color列表 ."""
|
color_list = [ ]
for method in method_list :
color = tuple ( [ random ( ) for _ in range ( 3 ) ] )
# 随机颜色画线
color_list . append ( color )
return color_list
|
def create_from_path ( self ) :
"""Create a file loader from the file extension to loading file .
Supported file extensions are as follows :
Extension Loader
` ` " csv " ` ` : py : class : ` ~ . CsvTableTextLoader `
` ` " xls " ` ` / ` ` " xlsx " ` ` : py : class : ` ~ . ExcelTableFileLoader `
` ` " htm " ` ` / ` ` " html " ` ` / ` ` " asp " ` ` / ` ` " aspx " ` ` : py : class : ` ~ . HtmlTableTextLoader `
` ` " json " ` ` : py : class : ` ~ . JsonTableTextLoader `
` ` " jsonl " ` ` / ` ` " ldjson " ` ` / ` ` " ndjson " ` ` : py : class : ` ~ . JsonLinesTableTextLoader `
` ` " ltsv " ` ` : py : class : ` ~ . LtsvTableTextLoader `
` ` " md " ` ` : py : class : ` ~ . MarkdownTableTextLoader `
` ` " sqlite " ` ` / ` ` " sqlite3 " ` ` : py : class : ` ~ . SqliteFileLoader `
` ` " tsv " ` ` : py : class : ` ~ . TsvTableTextLoader `
: return :
Loader that coincides with the file extension of the URL .
: raises pytablereader . UrlError : If unacceptable URL format .
: raises pytablereader . LoaderNotFoundError :
| LoaderNotFoundError _ desc | loading the URL ."""
|
import requests
url_path = urlparse ( self . __url ) . path
try :
url_extension = get_extension ( url_path . rstrip ( "/" ) )
except InvalidFilePathError :
raise UrlError ( "url must include path" )
logger . debug ( "TableUrlLoaderFactory: extension={}" . format ( url_extension ) )
loader_class = self . _get_loader_class ( self . _get_extension_loader_mapping ( ) , url_extension )
try :
self . _fetch_source ( loader_class )
except requests . exceptions . ProxyError as e :
raise ProxyError ( e )
loader = self . _create_from_extension ( url_extension )
logger . debug ( "TableUrlLoaderFactory: loader={}" . format ( loader . format_name ) )
return loader
|
def generic_type_name ( v ) :
"""Return a descriptive type name that isn ' t Python specific . For example , an
int type will return ' integer ' rather than ' int ' ."""
|
if isinstance ( v , AstExampleRef ) :
return "reference"
elif isinstance ( v , numbers . Integral ) : # Must come before real numbers check since integrals are reals too
return 'integer'
elif isinstance ( v , numbers . Real ) :
return 'float'
elif isinstance ( v , ( tuple , list ) ) :
return 'list'
elif isinstance ( v , six . string_types ) :
return 'string'
elif v is None :
return 'null'
else :
return type ( v ) . __name__
|
def main ( argv = None ) :
"""Make a confidence report and save it to disk ."""
|
try :
_name_of_script , filepath = argv
except ValueError :
raise ValueError ( argv )
make_confidence_report ( filepath = filepath , test_start = FLAGS . test_start , test_end = FLAGS . test_end , which_set = FLAGS . which_set , report_path = FLAGS . report_path , mc_batch_size = FLAGS . mc_batch_size , nb_iter = FLAGS . nb_iter , base_eps_iter = FLAGS . base_eps_iter , batch_size = FLAGS . batch_size , save_advx = FLAGS . save_advx )
|
def setChecked ( src , ids = [ ] , dpth = 0 , key = '' ) :
"""Recursively find checked item ."""
|
# tabs = lambda n : ' ' * n * 4 # or 2 or 8 or . . .
# brace = lambda s , n : ' % s % s % s ' % ( ' [ ' * n , s , ' ] ' * n )
if isinstance ( src , dict ) :
for key , value in src . iteritems ( ) :
setChecked ( value , ids , dpth + 1 , key )
elif isinstance ( src , list ) :
for litem in src :
if isinstance ( litem , types . DictType ) :
if "id" in litem and litem [ "id" ] in ids :
litem [ "checked" ] = True
litem [ "select" ] = True
setChecked ( litem , ids , dpth + 2 )
|
def conv ( self , num_out_channels , k_height , k_width , d_height = 1 , d_width = 1 , mode = "SAME" , input_layer = None , num_channels_in = None , use_batch_norm = None , stddev = None , activation = "relu" , bias = 0.0 ) :
"""Construct a conv2d layer on top of cnn ."""
|
if input_layer is None :
input_layer = self . top_layer
if num_channels_in is None :
num_channels_in = self . top_size
kernel_initializer = None
if stddev is not None :
kernel_initializer = tf . truncated_normal_initializer ( stddev = stddev )
name = "conv" + str ( self . counts [ "conv" ] )
self . counts [ "conv" ] += 1
with tf . variable_scope ( name ) :
strides = [ 1 , d_height , d_width , 1 ]
if self . data_format == "NCHW" :
strides = [ strides [ 0 ] , strides [ 3 ] , strides [ 1 ] , strides [ 2 ] ]
if mode != "SAME_RESNET" :
conv = self . _conv2d_impl ( input_layer , num_channels_in , num_out_channels , kernel_size = [ k_height , k_width ] , strides = [ d_height , d_width ] , padding = mode , kernel_initializer = kernel_initializer )
else : # Special padding mode for ResNet models
if d_height == 1 and d_width == 1 :
conv = self . _conv2d_impl ( input_layer , num_channels_in , num_out_channels , kernel_size = [ k_height , k_width ] , strides = [ d_height , d_width ] , padding = "SAME" , kernel_initializer = kernel_initializer )
else :
rate = 1
# Unused ( for ' a trous ' convolutions )
kernel_height_effective = k_height + ( k_height - 1 ) * ( rate - 1 )
pad_h_beg = ( kernel_height_effective - 1 ) // 2
pad_h_end = kernel_height_effective - 1 - pad_h_beg
kernel_width_effective = k_width + ( k_width - 1 ) * ( rate - 1 )
pad_w_beg = ( kernel_width_effective - 1 ) // 2
pad_w_end = kernel_width_effective - 1 - pad_w_beg
padding = [ [ 0 , 0 ] , [ pad_h_beg , pad_h_end ] , [ pad_w_beg , pad_w_end ] , [ 0 , 0 ] ]
if self . data_format == "NCHW" :
padding = [ padding [ 0 ] , padding [ 3 ] , padding [ 1 ] , padding [ 2 ] ]
input_layer = tf . pad ( input_layer , padding )
conv = self . _conv2d_impl ( input_layer , num_channels_in , num_out_channels , kernel_size = [ k_height , k_width ] , strides = [ d_height , d_width ] , padding = "VALID" , kernel_initializer = kernel_initializer )
if use_batch_norm is None :
use_batch_norm = self . use_batch_norm
if not use_batch_norm :
if bias is not None :
biases = self . get_variable ( "biases" , [ num_out_channels ] , self . variable_dtype , self . dtype , initializer = tf . constant_initializer ( bias ) )
biased = tf . reshape ( tf . nn . bias_add ( conv , biases , data_format = self . data_format ) , conv . get_shape ( ) )
else :
biased = conv
else :
self . top_layer = conv
self . top_size = num_out_channels
biased = self . batch_norm ( ** self . batch_norm_config )
if activation == "relu" :
conv1 = tf . nn . relu ( biased )
elif activation == "linear" or activation is None :
conv1 = biased
elif activation == "tanh" :
conv1 = tf . nn . tanh ( biased )
else :
raise KeyError ( "Invalid activation type \"%s\"" % activation )
self . top_layer = conv1
self . top_size = num_out_channels
return conv1
|
def prepare_exception ( obj , messages = None , response = None , verbs = None ) :
"""Prepare excetion params or only an exception message
parameters :
messages : list of strings , that will be separated by new line
response : response from a request to SFDC REST API
verbs : list of options about verbosity"""
|
# pylint : disable = too - many - branches
verbs = set ( verbs or [ ] )
known_options = [ 'method+url' ]
if messages is None :
messages = [ ]
if isinstance ( messages , ( text_type , str ) ) :
messages = [ messages ]
assert isinstance ( messages , list )
assert not verbs . difference ( known_options )
data = None
# a boolean from a failed response is False , though error messages in json should be decoded
if response is not None and 'json' in response . headers . get ( 'Content-Type' , '' ) and response . text :
data = json . loads ( response . text )
if data :
data_0 = data [ 0 ]
if 'errorCode' in data_0 :
subreq = ''
if 'referenceId' in data_0 :
subreq = " (in subrequest {!r})" . format ( data_0 [ 'referenceId' ] )
messages = [ data_0 [ 'errorCode' ] + subreq ] + messages
if data_0 . get ( 'fields' ) :
messages . append ( 'FIELDS: {}' . format ( data_0 [ 'fields' ] ) )
if len ( data ) > 1 :
messages . append ( 'MORE_ERRORS ({})' . format ( len ( data ) ) )
if 'method+url' in verbs :
method = response . request . method
url = response . request . url
if len ( url ) > 100 :
url = url [ : 100 ] + '...'
data_info = ''
if ( method in ( 'POST' , 'PATCH' ) and ( not response . request . body or 'json' not in response . request . headers [ 'content-type' ] ) ) :
data_info = ' (without json request data)'
messages . append ( 'in {} "{}"{}' . format ( method , url , data_info ) )
separ = '\n '
if not PY3 :
messages = [ x if isinstance ( x , str ) else x . encode ( 'utf-8' ) for x in messages ]
messages = [ x . replace ( '\n' , separ ) for x in messages ]
message = separ . join ( messages )
if obj :
obj . data = data
obj . response = response
obj . verbs = verbs
return message
|
def trace_region_count ( self ) :
"""Retrieves a count of the number of available trace regions .
Args :
self ( JLink ) : the ` ` JLink ` ` instance .
Returns :
Count of the number of available trace regions ."""
|
cmd = enums . JLinkTraceCommand . GET_NUM_REGIONS
data = ctypes . c_uint32 ( 0 )
res = self . _dll . JLINKARM_TRACE_Control ( cmd , ctypes . byref ( data ) )
if ( res == 1 ) :
raise errors . JLinkException ( 'Failed to get trace region count.' )
return data . value
|
def getdminfo ( self , columnname = None ) :
"""Get data manager info .
Each column in a table is stored using a data manager . A storage
manager is a data manager storing the physically in a file .
A virtual column engine is a data manager that does not store data
but calculates it on the fly ( e . g . scaling floats to short to
reduce storage needs ) .
By default this method returns a dict telling the data managers used .
Each field in the dict is a dict containing :
- NAME telling the ( unique ) name of the data manager
- TYPE telling the type of data manager ( e . g . TiledShapeStMan )
- SEQNR telling the sequence number of the data manager
( is ' ' i ' ' in table . f < i > for storage managers )
- SPEC is a dict holding the data manager specification
- COLUMNS is a list giving the columns stored by this data manager
When giving a column name the data manager info of that particular
column is returned ( without the COLUMNS field ) .
It can , for instance , be used when adding a column using
: func : ` addcols ` that should use the same data manager type as an
existing column . However , when doing that care should be taken to
change the NAME because each data manager name has to be unique ."""
|
dminfo = self . _getdminfo ( )
if columnname is None :
return dminfo
# Find the info for the given column
for fld in dminfo . values ( ) :
if columnname in fld [ "COLUMNS" ] :
fldc = fld . copy ( )
del fldc [ 'COLUMNS' ]
# remove COLUMNS field
return fldc
raise KeyError ( "Column " + columnname + " does not exist" )
|
def unweave ( target , * advices ) :
"""Unweave advices from input target ."""
|
advices = ( advice if isinstance ( advice , Advice ) else Advice ( advice ) for advice in advices )
unweave ( target = target , * advices )
|
def spellcheck_region ( region_lines , valid_words_dictionary = None , technical_words_dictionary = None , user_dictionary_words = None ) :
"""Perform spellcheck on each word in : region _ lines : .
Each word will be checked for existence in : valid _ words _ dictionary : .
If it is not in : valid _ words _ dictionary : then corrections will be
suggested .
If the word isn ' t one which is an ordinary word , then it will be checked
against the available symbols in : technical _ words _ dictionary : . If it is
not in : technical _ words _ dictionary : then corrections will be suggested ."""
|
user_dictionary_words = user_dictionary_words or set ( )
spellcheckable_words_regex = re . compile ( _SPELLCHECKABLE_WORDS )
line_offset = 0
for line in region_lines :
for col_offset , word in _split_line_with_offsets ( line ) :
word = word . strip ( )
if len ( word ) == 0 :
continue
# If this word exists in the user dictionary , then always allow
# it , even if it might be technical in nature
if word in user_dictionary_words :
continue
if ( valid_words_dictionary and spellcheckable_words_regex . match ( word ) ) :
error = _error_if_word_invalid ( word , valid_words_dictionary , technical_words_dictionary , line_offset , col_offset )
if error :
yield error
# Check for symbols appearing in comments or
# docstrings .
elif technical_words_dictionary :
for symbol_word in _split_into_symbol_words ( word ) :
if not re . compile ( _VALID_SYMBOL_WORDS ) . match ( symbol_word ) :
continue
error = _error_if_symbol_unused ( symbol_word , technical_words_dictionary , line_offset , col_offset )
if error :
yield error
line_offset += 1
|
def _read_mode_utopt ( self , size , kind ) :
"""Read User Timeout option .
Positional arguments :
* size - int , length of option
* kind - int , 28 ( User Timeout Option )
Returns :
* dict - - extracted User Timeout ( TIMEOUT ) option
Structure of TCP TIMEOUT [ RFC 5482 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| Kind = 28 | Length = 4 | G | User Timeout |
Octets Bits Name Description
0 0 tcp . timeout . kind Kind ( 28)
1 8 tcp . timeout . length Length ( 4)
2 16 tcp . timeout . granularity Granularity
2 17 tcp . timeout . timeout User Timeout"""
|
temp = self . _read_fileng ( size )
data = dict ( kind = kind , length = size , granularity = 'minutes' if int ( temp [ 0 ] ) else 'seconds' , timeout = bytes ( chr ( int ( temp [ 0 : ] , base = 2 ) ) , encoding = 'utf-8' ) , )
return data
|
def check_range ( number , min_r , max_r , name = "" ) :
"""Check if a number is within a given range"""
|
try :
number = float ( number )
if number < min_r or number > max_r :
raise FFmpegNormalizeError ( "{} must be within [{},{}]" . format ( name , min_r , max_r ) )
return number
pass
except Exception as e :
raise e
|
def is_connected ( self ) :
"""Return ` True ` if the Xmrs represents a connected graph .
Subgraphs can be connected through things like arguments ,
QEQs , and label equalities ."""
|
nids = set ( self . _nodeids )
# the nids left to find
if len ( nids ) == 0 :
raise XmrsError ( 'Cannot compute connectedness of an empty Xmrs.' )
# build a basic dict graph of relations
edges = [ ]
# label connections
for lbl in self . labels ( ) :
lblset = self . labelset ( lbl )
edges . extend ( ( x , y ) for x in lblset for y in lblset if x != y )
# argument connections
_vars = self . _vars
for nid in nids :
for rarg , tgt in self . args ( nid ) . items ( ) :
if tgt not in _vars :
continue
if IVARG_ROLE in _vars [ tgt ] [ 'refs' ] :
tgtnids = list ( _vars [ tgt ] [ 'refs' ] [ IVARG_ROLE ] )
elif tgt in self . _hcons :
tgtnids = list ( self . labelset ( self . hcon ( tgt ) [ 2 ] ) )
elif 'LBL' in _vars [ tgt ] [ 'refs' ] :
tgtnids = list ( _vars [ tgt ] [ 'refs' ] [ 'LBL' ] )
else :
tgtnids = [ ]
# connections are bidirectional
edges . extend ( ( nid , t ) for t in tgtnids if nid != t )
edges . extend ( ( t , nid ) for t in tgtnids if nid != t )
g = { nid : set ( ) for nid in nids }
for x , y in edges :
g [ x ] . add ( y )
connected_nids = _bfs ( g )
if connected_nids == nids :
return True
elif connected_nids . difference ( nids ) :
raise XmrsError ( 'Possibly bogus nodeids: {}' . format ( ', ' . join ( connected_nids . difference ( nids ) ) ) )
return False
|
def GetParserAndPluginNames ( cls , parser_filter_expression = None ) :
"""Retrieves the parser and parser plugin names .
Args :
parser _ filter _ expression ( Optional [ str ] ) : parser filter expression ,
where None represents all parsers and plugins .
Returns :
list [ str ] : parser and parser plugin names ."""
|
parser_and_plugin_names = [ ]
for parser_name , parser_class in cls . GetParsers ( parser_filter_expression = parser_filter_expression ) :
parser_and_plugin_names . append ( parser_name )
if parser_class . SupportsPlugins ( ) :
for plugin_name , _ in parser_class . GetPlugins ( ) :
parser_and_plugin_names . append ( '{0:s}/{1:s}' . format ( parser_name , plugin_name ) )
return parser_and_plugin_names
|
def create_environment ( self , name , default = False , zone = None ) :
"""Creates environment and returns Environment object ."""
|
from qubell . api . private . environment import Environment
return Environment . new ( organization = self , name = name , zone_id = zone , default = default , router = self . _router )
|
def child_elements ( self , by = By . ID , value = None , el_class = None ) :
"""alias for ` ` find _ elements ` `
: param by :
: param value :
: param el _ class :
: return :"""
|
el , selector = define_selector ( by , value , el_class )
return self . _init_element ( elements . PageElementsList ( selector , el ) )
|
def _get_next_empty_bitmap ( self ) :
"""Returns the next empty entry .
Returns :
int : The value of the empty entry"""
|
# TODO probably not the best way , redo
for i , byte in enumerate ( self . _bitmap ) :
if byte != 255 :
for offset in range ( 8 ) :
if not byte & ( 1 << offset ) :
return ( i * 8 ) + offset
|
def _recv_callback ( self , msg ) :
"""Method is called when there is a message coming from a Mongrel2 server .
This message should be a valid Request String ."""
|
m2req = MongrelRequest . parse ( msg [ 0 ] )
MongrelConnection ( m2req , self . _sending_stream , self . request_callback , no_keep_alive = self . no_keep_alive , xheaders = self . xheaders )
|
def move_nodes_constrained ( self , partition , constrained_partition , consider_comms = None ) :
"""Move nodes to alternative communities for * refining * the partition .
Parameters
partition
The partition for which to move nodes .
constrained _ partition
The partition within which we may move nodes .
consider _ comms
If ` ` None ` ` uses : attr : ` refine _ consider _ comms ` , but can be set
to something else .
Returns
float
Improvement in quality function .
Notes
The idea is constrain the movement of nodes to alternative communities to
another partition . In other words , if there is a partition ` ` P ` ` which we
want to refine , we can then initialize a new singleton partition , and move
nodes in that partition constrained to ` ` P ` ` .
See Also
: func : ` Optimiser . move _ nodes `
: func : ` Optimiser . merge _ nodes _ constrained `
Examples
> > > G = ig . Graph . Famous ( ' Zachary ' )
> > > optimiser = la . Optimiser ( )
> > > partition = la . ModularityVertexPartition ( G )
> > > diff = optimiser . optimise _ partition ( partition )
> > > refine _ partition = la . ModularityVertexPartition ( G )
> > > diff = optimiser . move _ nodes _ constrained ( refine _ partition , partition )"""
|
if ( consider_comms is None ) :
consider_comms = self . refine_consider_comms
diff = _c_leiden . _Optimiser_move_nodes_constrained ( self . _optimiser , partition . _partition , constrained_partition . _partition , consider_comms )
partition . _update_internal_membership ( )
return diff
|
def host_install ( self , user_name , host_names , ssh_port = None , password = None , private_key = None , passphrase = None , parallel_install_count = None , cm_repo_url = None , gpg_key_custom_url = None , java_install_strategy = None , unlimited_jce = None ) :
"""Install Cloudera Manager Agent on a set of hosts .
@ param user _ name : The username used to authenticate with the hosts . Root access
to your hosts is required to install Cloudera packages . The
installer will connect to your hosts via SSH and log in either
directly as root or as another user with password - less sudo
privileges to become root .
@ param host _ names : List of names of hosts to configure for use with
Cloudera Manager . A host may be specified by a
hostname ( FQDN ) or an IP address .
@ param ssh _ port : SSH port . If unset , defaults to 22.
@ param password : The password used to authenticate with the hosts . Specify
either this or a private key . For password - less login , use
an empty string as password .
@ param private _ key : The private key to authenticate with the hosts . Specify
either this or a password .
@ param passphrase : The passphrase associated with the private key used to
authenticate with the hosts ( optional ) .
@ param parallel _ install _ count : Number of simultaneous installations .
Defaults to 10 . Running a large number of
installations at once can consume large amounts
of network bandwidth and other system resources .
@ param cm _ repo _ url : The Cloudera Manager repository URL to use ( optional ) .
Example for SLES , Redhat or other RPM based distributions :
http : / / archive - primary . cloudera . com / cm5 / redhat / 6 / x86_64 / cm / 5/
Example for Ubuntu or other Debian based distributions :
" deb http : / / archive . cloudera . com / cm5 / ubuntu / lucid / amd64 / cm / lucid - cm5 contrib "
@ param gpg _ key _ custom _ url : The Cloudera Manager public GPG key ( optional ) .
Example for SLES , Redhat or other RPM based distributions :
http : / / archive - primary . cloudera . com / cm5 / redhat / 6 / x86_64 / cm / RPM - GPG - KEY - cloudera
Example for Ubuntu or other Debian based distributions :
http : / / archive . cloudera . com / debian / archive . key
@ param java _ install _ strategy : Added in v8 : Strategy to use for JDK installation . Valid values are 1.
AUTO ( default ) : Cloudera Manager will install the JDK versions that are
required when the " AUTO " option is selected . Cloudera Manager may
overwrite any of the existing JDK installations . 2 . NONE : Cloudera
Manager will not install any JDK when " NONE " option is selected . It
should be used if an existing JDK installation has to be used .
@ param unlimited _ jce : Added in v8 : Flag for unlimited strength JCE policy files installation If
unset , defaults to false
@ return : Information about the submitted command .
@ since : API v6"""
|
host_install_args = { }
if user_name :
host_install_args [ 'userName' ] = user_name
if host_names :
host_install_args [ 'hostNames' ] = host_names
if ssh_port :
host_install_args [ 'sshPort' ] = ssh_port
if password :
host_install_args [ 'password' ] = password
if private_key :
host_install_args [ 'privateKey' ] = private_key
if passphrase :
host_install_args [ 'passphrase' ] = passphrase
if parallel_install_count :
host_install_args [ 'parallelInstallCount' ] = parallel_install_count
if cm_repo_url :
host_install_args [ 'cmRepoUrl' ] = cm_repo_url
if gpg_key_custom_url :
host_install_args [ 'gpgKeyCustomUrl' ] = gpg_key_custom_url
if java_install_strategy is not None :
host_install_args [ 'javaInstallStrategy' ] = java_install_strategy
if unlimited_jce :
host_install_args [ 'unlimitedJCE' ] = unlimited_jce
return self . _cmd ( 'hostInstall' , data = host_install_args )
|
def resultsFor ( self , ps ) :
"""Retrieve a list of all results associated with the given parameters .
: param ps : the parameters
: returns : a list of results , which may be empty"""
|
k = self . _parametersAsIndex ( ps )
if k in self . _results . keys ( ) : # filter out pending job ids , which can be anything except dicts
return [ res for res in self . _results [ k ] if isinstance ( res , dict ) ]
else :
return [ ]
|
def connect_post_node_proxy_with_path ( self , name , path , ** kwargs ) : # noqa : E501
"""connect _ post _ node _ proxy _ with _ path # noqa : E501
connect POST requests to proxy of Node # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . connect _ post _ node _ proxy _ with _ path ( name , path , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the NodeProxyOptions ( required )
: param str path : path to the resource ( required )
: param str path2 : Path is the URL path to use for the current proxy request to node .
: return : str
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . connect_post_node_proxy_with_path_with_http_info ( name , path , ** kwargs )
# noqa : E501
else :
( data ) = self . connect_post_node_proxy_with_path_with_http_info ( name , path , ** kwargs )
# noqa : E501
return data
|
def get_uncompleted_tasks ( self ) :
"""Return a list of all uncompleted tasks in this project .
. . warning : : Requires Todoist premium .
: return : A list of all uncompleted tasks in this project .
: rtype : list of : class : ` pytodoist . todoist . Task `
> > > from pytodoist import todoist
> > > user = todoist . login ( ' john . doe @ gmail . com ' , ' password ' )
> > > project = user . get _ project ( ' PyTodoist ' )
> > > project . add _ task ( ' Install PyTodoist ' )
> > > uncompleted _ tasks = project . get _ uncompleted _ tasks ( )
> > > for task in uncompleted _ tasks :
. . . task . complete ( )"""
|
all_tasks = self . get_tasks ( )
completed_tasks = self . get_completed_tasks ( )
return [ t for t in all_tasks if t not in completed_tasks ]
|
def paint ( self , painter , option , widget ) :
"""Overloads the paint method from QGraphicsPathItem to handle custom drawing of the path using this items pens and polygons .
: param painter < QPainter >
: param option < QGraphicsItemStyleOption >
: param widget < QWidget >"""
|
# following the arguments required by Qt
# pylint : disable - msg = W0613
painter . setOpacity ( self . opacity ( ) )
# show the connection selected
if not self . isEnabled ( ) :
pen = QPen ( self . disabledPen ( ) )
elif self . isSelected ( ) :
pen = QPen ( self . highlightPen ( ) )
else :
pen = QPen ( self . pen ( ) )
if self . _textItem :
self . _textItem . setOpacity ( self . opacity ( ) )
self . _textItem . setDefaultTextColor ( pen . color ( ) . darker ( 110 ) )
# rebuild first if necessary
if self . isDirty ( ) :
self . setPath ( self . rebuild ( ) )
# store the initial hint
hint = painter . renderHints ( )
painter . setRenderHint ( painter . Antialiasing )
pen . setWidthF ( 1.25 )
painter . setPen ( pen )
painter . drawPath ( self . path ( ) )
# redraw the polys to force - fill them
for poly in self . _polygons :
if not poly . isClosed ( ) :
continue
painter . setBrush ( pen . color ( ) )
painter . drawPolygon ( poly )
# restore the render hints
painter . setRenderHints ( hint )
|
def read_with_buffer ( self , command , fct = 0 , ext = 0 ) :
"""Test read info with buffered command ( ZK6 : 1503)"""
|
if self . tcp :
MAX_CHUNK = 0xFFc0
else :
MAX_CHUNK = 16 * 1024
command_string = pack ( '<bhii' , 1 , command , fct , ext )
if self . verbose :
print ( "rwb cs" , command_string )
response_size = 1024
data = [ ]
start = 0
cmd_response = self . __send_command ( 1503 , command_string , response_size )
if not cmd_response . get ( 'status' ) :
raise ZKErrorResponse ( "RWB Not supported" )
if cmd_response [ 'code' ] == const . CMD_DATA :
if self . tcp :
if self . verbose :
print ( "DATA! is {} bytes, tcp length is {}" . format ( len ( self . __data ) , self . __tcp_length ) )
if len ( self . __data ) < ( self . __tcp_length - 8 ) :
need = ( self . __tcp_length - 8 ) - len ( self . __data )
if self . verbose :
print ( "need more data: {}" . format ( need ) )
more_data = self . __recieve_raw_data ( need )
return b'' . join ( [ self . __data , more_data ] ) , len ( self . __data ) + len ( more_data )
else :
if self . verbose :
print ( "Enough data" )
size = len ( self . __data )
return self . __data , size
else :
size = len ( self . __data )
return self . __data , size
size = unpack ( 'I' , self . __data [ 1 : 5 ] ) [ 0 ]
if self . verbose :
print ( "size fill be %i" % size )
remain = size % MAX_CHUNK
packets = ( size - remain ) // MAX_CHUNK
# should be size / 16k
if self . verbose :
print ( "rwb: #{} packets of max {} bytes, and extra {} bytes remain" . format ( packets , MAX_CHUNK , remain ) )
for _wlk in range ( packets ) :
data . append ( self . __read_chunk ( start , MAX_CHUNK ) )
start += MAX_CHUNK
if remain :
data . append ( self . __read_chunk ( start , remain ) )
start += remain
self . free_data ( )
if self . verbose :
print ( "_read w/chunk %i bytes" % start )
return b'' . join ( data ) , start
|
def guess_timefmt ( datestr ) :
"""Try to guess the format a date is written in .
The following formats are supported :
Format Example Python format
` ` YYYY - MM - DD ` ` 2002-04-21 % Y - % m - % d
` ` YYYY . MM . DD ` ` 2002.04.21 % Y . % m . % d
` ` YYYY MM DD ` ` 2002 04 21 % Y % m % d
` ` DD - MM - YYYY ` ` 21-04-2002 % d - % m - % Y
` ` DD . MM . YYYY ` ` 21.04.2002 % d . % m . % Y
` ` DD MM YYYY ` ` 21 04 2002 % d % m % Y
` ` DD / MM / YYYY ` ` 21/04/2002 % d / % m / % Y
These formats can also be used for seasonal ( yearly recurring ) time series .
The year needs to be replaced by ` ` 9999 ` ` or another configurable year
representing the seasonal year . .
The following formats are recognised depending on your locale setting .
There is no guarantee that this will work .
Format Example Python format
` ` DD - mmm - YYYY ` ` 21 - Apr - 2002 % d - % b - % Y
` ` DD . mmm . YYYY ` ` 21 . Apr . 2002 % d . % b . % Y
` ` DD mmm YYYY ` ` 21 Apr 2002 % d % b % Y
` ` mmm DD YYYY ` ` Apr 21 2002 % b % d % Y
` ` Mmmmm DD YYYY ` ` April 21 2002 % B % d % Y
. . note : :
- The time needs to follow this definition without exception :
` % H : % M : % S . % f ` . A complete date and time should therefore look like
this : :
2002-04-21 15:29:37.522
- Be aware that in a file with comma separated values you should not
use a date format that contains commas ."""
|
if isinstance ( datestr , float ) or isinstance ( datestr , int ) :
return None
seasonal_key = str ( config . get ( 'DEFAULT' , 'seasonal_key' , '9999' ) )
# replace ' T ' with space to handle ISO times .
if datestr . find ( 'T' ) > 0 :
dt_delim = 'T'
else :
dt_delim = ' '
delimiters = [ '-' , '.' , ' ' , '/' ]
formatstrings = [ [ '%Y' , '%m' , '%d' ] , [ '%d' , '%m' , '%Y' ] , [ '%d' , '%b' , '%Y' ] , [ 'XXXX' , '%m' , '%d' ] , [ '%d' , '%m' , 'XXXX' ] , [ '%d' , '%b' , 'XXXX' ] , [ seasonal_key , '%m' , '%d' ] , [ '%d' , '%m' , seasonal_key ] , [ '%d' , '%b' , seasonal_key ] ]
timeformats = [ '%H:%M:%S.%f' , '%H:%M:%S' , '%H:%M' , '%H:%M:%S.%f000Z' , '%H:%M:%S.%fZ' ]
# Check if a time is indicated or not
for timefmt in timeformats :
try :
datetime . strptime ( datestr . split ( dt_delim ) [ - 1 ] . strip ( ) , timefmt )
usetime = True
break
except ValueError :
usetime = False
# Check the simple ones :
for fmt in formatstrings :
for delim in delimiters :
datefmt = fmt [ 0 ] + delim + fmt [ 1 ] + delim + fmt [ 2 ]
if usetime :
for timefmt in timeformats :
complfmt = datefmt + dt_delim + timefmt
try :
datetime . strptime ( datestr , complfmt )
return complfmt
except ValueError :
pass
else :
try :
datetime . strptime ( datestr , datefmt )
return datefmt
except ValueError :
pass
# Check for other formats :
custom_formats = [ '%d/%m/%Y' , '%b %d %Y' , '%B %d %Y' , '%d/%m/XXXX' , '%d/%m/' + seasonal_key ]
for fmt in custom_formats :
if usetime :
for timefmt in timeformats :
complfmt = fmt + dt_delim + timefmt
try :
datetime . strptime ( datestr , complfmt )
return complfmt
except ValueError :
pass
else :
try :
datetime . strptime ( datestr , fmt )
return fmt
except ValueError :
pass
return None
|
def add_endpoint ( self ) :
'''Set endpoits'''
|
self . app . add_url_rule ( '/rest/mavlink/<path:arg>' , 'rest' , self . request )
self . app . add_url_rule ( '/rest/mavlink/' , 'rest' , self . request )
|
def get_object_or_404 ( queryset , * args , ** kwargs ) :
"""replacement of rest _ framework . generics and django . shrtcuts analogues"""
|
try :
return queryset . get ( * args , ** kwargs )
except ( ValueError , TypeError , DoesNotExist , ValidationError ) :
raise Http404 ( )
|
def compute_overlaps ( self , * args ) :
"""compute overlaps"""
|
olaps = [ ]
Y = self . _Y
psd = self . _catalog_object . psd
for i in np . arange ( 0 , Y . shape [ 0 ] ) :
olaps . append ( _overlap ( Y [ i , : ] , self . _Y_rec [ i , : ] , psd ) )
return olaps
|
def _generate_struct_class_m ( self , struct ) :
"""Defines an Obj C implementation file that represents a struct in Stone ."""
|
self . emit ( )
self . _generate_imports_m ( self . _get_imports_m ( struct , default_imports = [ 'DBStoneSerializers' , 'DBStoneValidators' ] ) )
struct_name = fmt_class_prefix ( struct )
self . emit ( '#pragma mark - API Object' )
self . emit ( )
with self . block_m ( struct_name ) :
self . emit ( '#pragma mark - Constructors' )
self . emit ( )
self . _generate_struct_cstor ( struct )
self . _generate_struct_cstor_default ( struct )
self . emit ( '#pragma mark - Serialization methods' )
self . emit ( )
self . _generate_serializable_funcs ( struct_name )
self . emit ( '#pragma mark - Description method' )
self . emit ( )
self . _generate_description_func ( struct_name )
self . emit ( '#pragma mark - Copyable method' )
self . emit ( )
self . _generate_copyable_func ( )
self . emit ( '#pragma mark - Hash method' )
self . emit ( )
self . _generate_hash_func ( struct )
self . emit ( '#pragma mark - Equality method' )
self . emit ( )
self . _generate_equality_func ( struct )
self . emit ( )
self . emit ( )
self . emit ( '#pragma mark - Serializer Object' )
self . emit ( )
with self . block_m ( fmt_serial_class ( struct_name ) ) :
self . _generate_struct_serializer ( struct )
self . _generate_struct_deserializer ( struct )
|
def run ( self , stat_name , criticity , commands , repeat , mustache_dict = None ) :
"""Run the commands ( in background ) .
- stats _ name : plugin _ name ( + header )
- criticity : criticity of the trigger
- commands : a list of command line with optional { { mustache } }
- If True , then repeat the action
- mustache _ dict : Plugin stats ( can be use within { { mustache } } )
Return True if the commands have been ran ."""
|
if ( self . get ( stat_name ) == criticity and not repeat ) or not self . start_timer . finished ( ) : # Action already executed = > Exit
return False
logger . debug ( "{} action {} for {} ({}) with stats {}" . format ( "Repeat" if repeat else "Run" , commands , stat_name , criticity , mustache_dict ) )
# Run all actions in background
for cmd in commands : # Replace { { arg } } by the dict one ( Thk to { Mustache } )
if pystache_tag :
cmd_full = pystache . render ( cmd , mustache_dict )
else :
cmd_full = cmd
# Execute the action
logger . info ( "Action triggered for {} ({}): {}" . format ( stat_name , criticity , cmd_full ) )
logger . debug ( "Stats value for the trigger: {}" . format ( mustache_dict ) )
try :
Popen ( cmd_full , shell = True )
except OSError as e :
logger . error ( "Can't execute the action ({})" . format ( e ) )
self . set ( stat_name , criticity )
return True
|
def exists ( self , filename ) :
"""Report whether a file exists on all distribution points .
Determines file type by extension .
Args :
filename : Filename you wish to check . ( No path ! e . g . :
" AdobeFlashPlayer - 14.0.0.176 . pkg " )
Returns :
Boolean"""
|
result = True
for repo in self . _children :
if not repo . exists ( filename ) :
result = False
return result
|
def put_object ( self , obj ) : # TODO consider putting into a ES class
self . pr_dbg ( 'put_obj: %s' % self . json_dumps ( obj ) )
"""Wrapper for es . index , determines metadata needed to index from obj .
If you have a raw object json string you can hard code these :
index is . kibana ( as of kibana4 ) ;
id can be A - Za - z0-9 \ - and must be unique ;
doc _ type is either visualization , dashboard , search
or for settings docs : config , or index - pattern ."""
|
if obj [ '_index' ] is None or obj [ '_index' ] == "" :
raise Exception ( "Invalid Object, no index" )
if obj [ '_id' ] is None or obj [ '_id' ] == "" :
raise Exception ( "Invalid Object, no _id" )
if obj [ '_type' ] is None or obj [ '_type' ] == "" :
raise Exception ( "Invalid Object, no _type" )
if obj [ '_source' ] is None or obj [ '_source' ] == "" :
raise Exception ( "Invalid Object, no _source" )
self . connect_es ( )
self . es . indices . create ( index = obj [ '_index' ] , ignore = 400 , timeout = "2m" )
try :
resp = self . es . index ( index = obj [ '_index' ] , id = obj [ '_id' ] , doc_type = obj [ '_type' ] , body = obj [ '_source' ] , timeout = "2m" )
except RequestError as e :
self . pr_err ( 'RequestError: %s, info: %s' % ( e . error , e . info ) )
raise
return resp
|
async def requests_get ( self , path : str , ** kwargs ) -> ClientResponse :
"""Requests GET wrapper in order to use API parameters .
: param path : the request path
: return :"""
|
logging . debug ( "Request : {0}" . format ( self . reverse_url ( self . connection_handler . http_scheme , path ) ) )
url = self . reverse_url ( self . connection_handler . http_scheme , path )
response = await self . connection_handler . session . get ( url , params = kwargs , headers = self . headers , proxy = self . connection_handler . proxy , timeout = 15 )
if response . status != 200 :
try :
error_data = parse_error ( await response . text ( ) )
raise DuniterError ( error_data )
except ( TypeError , jsonschema . ValidationError ) :
raise ValueError ( 'status code != 200 => %d (%s)' % ( response . status , ( await response . text ( ) ) ) )
return response
|
def parse ( cls , compoundIdStr ) :
"""Parses the specified compoundId string and returns an instance
of this CompoundId class .
: raises : An ObjectWithIdNotFoundException if parsing fails . This is
because this method is a client - facing method , and if a malformed
identifier ( under our internal rules ) is provided , the response should
be that the identifier does not exist ."""
|
if not isinstance ( compoundIdStr , basestring ) :
raise exceptions . BadIdentifierException ( compoundIdStr )
try :
deobfuscated = cls . deobfuscate ( compoundIdStr )
except TypeError : # When a string that cannot be converted to base64 is passed
# as an argument , b64decode raises a TypeError . We must treat
# this as an ID not found error .
raise exceptions . ObjectWithIdNotFoundException ( compoundIdStr )
try :
encodedSplits = cls . split ( deobfuscated )
splits = [ cls . decode ( split ) for split in encodedSplits ]
except ( UnicodeDecodeError , ValueError ) : # Sometimes base64 decoding succeeds but we ' re left with
# unicode gibberish . This is also and IdNotFound .
raise exceptions . ObjectWithIdNotFoundException ( compoundIdStr )
# pull the differentiator out of the splits before instantiating
# the class , if the differentiator exists
fieldsLength = len ( cls . fields )
if cls . differentiator is not None :
differentiatorIndex = cls . fields . index ( cls . differentiatorFieldName )
if differentiatorIndex < len ( splits ) :
del splits [ differentiatorIndex ]
else :
raise exceptions . ObjectWithIdNotFoundException ( compoundIdStr )
fieldsLength -= 1
if len ( splits ) != fieldsLength :
raise exceptions . ObjectWithIdNotFoundException ( compoundIdStr )
return cls ( None , * splits )
|
def product ( target , prop1 , prop2 , ** kwargs ) :
r"""Calculates the product of multiple property values
Parameters
target : OpenPNM Object
The object which this model is associated with . This controls the
length of the calculated array , and also provides access to other
necessary properties .
prop1 : string
The name of the first argument
prop2 : string
The name of the second argument
Notes
Additional properties can be specified beyond just ` ` prop1 ` ` and ` ` prop2 ` `
by including additional arguments in the function call ( i . e . ` ` prop3 =
' pore . foo ' ` ` ) ."""
|
value = target [ prop1 ] * target [ prop2 ]
for item in kwargs . values ( ) :
value *= target [ item ]
return value
|
def process_temporary_file ( self , tmp_file ) :
"""Truncates the filename if necessary , saves the model , and returns a response"""
|
# Truncate filename if necessary
if len ( tmp_file . filename ) > 100 :
base_filename = tmp_file . filename [ : tmp_file . filename . rfind ( "." ) ]
tmp_file . filename = "%s.%s" % ( base_filename [ : 99 - len ( tmp_file . extension ) ] , tmp_file . extension )
tmp_file . save ( )
data = { 'uuid' : str ( tmp_file . uuid ) }
response = HttpResponse ( json . dumps ( data ) , status = 201 )
response [ 'Content-type' ] = "text/plain"
return response
|
def calc_cost ( y , yhat , cost_matrix ) :
"""Calculate the cost with given cost matrix
y : ground truth
yhat : estimation
cost _ matrix : array - like , shape = ( n _ classes , n _ classes )
The ith row , jth column represents the cost of the ground truth being
ith class and prediction as jth class ."""
|
return np . mean ( cost_matrix [ list ( y ) , list ( yhat ) ] )
|
def handle_json_GET_neareststops ( self , params ) :
"""Return a list of the nearest ' limit ' stops to ' lat ' , ' lon '"""
|
schedule = self . server . schedule
lat = float ( params . get ( 'lat' ) )
lon = float ( params . get ( 'lon' ) )
limit = int ( params . get ( 'limit' ) )
stops = schedule . GetNearestStops ( lat = lat , lon = lon , n = limit )
return [ StopToTuple ( s ) for s in stops ]
|
def _make_boundary ( self ) :
"""creates a boundary for multipart post ( form post )"""
|
if self . PY2 :
return '===============%s==' % uuid . uuid4 ( ) . get_hex ( )
elif self . PY3 :
return '===============%s==' % uuid . uuid4 ( ) . hex
else :
from random import choice
digits = "0123456789"
letters = "abcdefghijklmnopqrstuvwxyz"
return '===============%s==' % '' . join ( choice ( letters + digits ) for i in range ( 15 ) )
|
def common ( self , other ) :
"""Return two objects with the same dimensions if they lie in the same orthogonal plane .
> > > l = Location ( pop = 1 , snap = 2)
> > > m = Location ( crackle = 1 , snap = 3)
> > > l . common ( m )
( < Location snap : 2 > , < Location snap : 3 > )"""
|
selfDim = set ( self . keys ( ) )
otherDim = set ( other . keys ( ) )
dims = selfDim | otherDim
newSelf = None
newOther = None
for dim in dims :
sd = self . get ( dim , None )
od = other . get ( dim , None )
if sd is None or od is None : # axis is missing in one or the other
continue
if - _EPSILON < sd < _EPSILON and - _EPSILON < od < _EPSILON : # values are both zero
continue
if newSelf is None :
newSelf = self . __class__ ( )
if newOther is None :
newOther = self . __class__ ( )
newSelf [ dim ] = self [ dim ]
newOther [ dim ] = other [ dim ]
return newSelf , newOther
|
def _send_container_healthcheck_sc ( self , containers_by_id ) :
"""Send health service checks for containers ."""
|
for container in containers_by_id . itervalues ( ) :
healthcheck_tags = self . _get_tags ( container , HEALTHCHECK )
match = False
for tag in healthcheck_tags :
for rule in self . whitelist_patterns :
if re . match ( rule , tag ) :
match = True
self . _submit_healthcheck_sc ( container )
break
if match :
break
|
def change_task_size ( self , size ) :
"""Blocking request to change number of running tasks"""
|
self . _pause . value = True
self . log . debug ( "About to change task size to {0}" . format ( size ) )
try :
size = int ( size )
except ValueError :
self . log . error ( "Cannot change task size, non integer size provided" )
return False
if size < 0 :
self . log . error ( "Cannot change task size, less than 0 size provided" )
return False
self . max_tasks = size
if size < self . max_tasks :
diff = self . max_tasks - size
self . log . debug ( "Reducing size offset by {0}" . format ( diff ) )
while True :
self . _update_tasks ( )
if len ( self . free_tasks ) >= diff :
for i in range ( diff ) :
task_id = self . free_tasks . pop ( 0 )
del self . current_tasks [ task_id ]
break
time . sleep ( 0.5 )
if not size :
self . _reset_and_pause ( )
return True
elif size > self . max_tasks :
diff = size - self . max_tasks
for i in range ( diff ) :
task_id = str ( uuid . uuid4 ( ) )
self . current_tasks [ task_id ] = { }
self . free_tasks . append ( task_id )
self . _pause . value = False
self . log . debug ( "Task size changed to {0}" . format ( size ) )
return True
|
def get_as_bytes ( self , s3_path ) :
"""Get the contents of an object stored in S3 as bytes
: param s3 _ path : URL for target S3 location
: return : File contents as pure bytes"""
|
( bucket , key ) = self . _path_to_bucket_and_key ( s3_path )
obj = self . s3 . Object ( bucket , key )
contents = obj . get ( ) [ 'Body' ] . read ( )
return contents
|
def on_size ( self , event ) :
'''handle window size changes'''
|
state = self . state
self . need_redraw = True
if state . report_size_changes : # tell owner the new size
size = self . frame . GetSize ( )
if size != self . last_size :
self . last_size = size
state . out_queue . put ( MPImageNewSize ( size ) )
|
def formatbyindex ( string , fg = None , bg = None , indices = [ ] ) :
"""Wrap color syntax around characters using indices and return it .
fg and bg specify foreground - and background colors , respectively ."""
|
if not string or not indices or ( fg is bg is None ) :
return string
result , p = '' , 0
# The lambda syntax is necessary to support both Python 2 and 3
for k , g in itertools . groupby ( enumerate ( sorted ( indices ) ) , lambda x : x [ 0 ] - x [ 1 ] ) :
tmp = list ( map ( operator . itemgetter ( 1 ) , g ) )
s , e = tmp [ 0 ] , tmp [ - 1 ] + 1
if s < len ( string ) :
result += string [ p : s ]
result += formatcolor ( string [ s : e ] , fg , bg )
p = e
if p < len ( string ) :
result += string [ p : ]
return result
|
def print_textandtime ( text : str ) -> None :
"""Print the given string and the current date and time with high
precision for logging purposes .
> > > from hydpy . exe . commandtools import print _ textandtime
> > > from hydpy . core . testtools import mock _ datetime _ now
> > > from datetime import datetime
> > > with mock _ datetime _ now ( datetime ( 2000 , 1 , 1 , 12 , 30 , 0 , 123456 ) ) :
. . . print _ textandtime ( ' something happens ' )
something happens ( 2000-01-01 12:30:00.123456 ) ."""
|
timestring = datetime . datetime . now ( ) . strftime ( '%Y-%m-%d %H:%M:%S.%f' )
print ( f'{text} ({timestring}).' )
|
def _deserialize ( self , response ) :
"""Attempt to deserialize resource from response .
: param requests . Response response : latest REST call response ."""
|
# Hacking response with initial status _ code
previous_status = response . status_code
response . status_code = self . initial_status_code
resource = self . get_outputs ( response )
response . status_code = previous_status
# Hack for Storage or SQL , to workaround the bug in the Python generator
if resource is None :
previous_status = response . status_code
for status_code_to_test in [ 200 , 201 ] :
try :
response . status_code = status_code_to_test
resource = self . get_outputs ( response )
except ClientException :
pass
else :
return resource
finally :
response . status_code = previous_status
return resource
|
def to_dict ( self ) :
"""Pack the load averages into a nicely - keyed dictionary ."""
|
result = { }
for meta in self . intervals . values ( ) :
result [ meta . display ] = meta . value
return result
|
def _shutdown_multicast_socket ( self ) :
"""Shutdown multicast socket
: rtype : None"""
|
self . debug ( "()" )
self . _drop_membership_multicast_socket ( )
self . _listening . remove ( self . _multicast_socket )
self . _multicast_socket . close ( )
self . _multicast_socket = None
|
def action_remove ( cls , request , category_list ) :
"""Handles ` remove ` action from CategoryList editor .
Removes an actual category if a target object is not set for the list .
Removes a tie - to - category object if a target object is set for the list .
: param Request request : Django request object
: param CategoryList category _ list : CategoryList object to operate upon .
: return : True on success otherwise and exception from SitecatsException family is raised ."""
|
if not category_list . editor . allow_remove :
raise SitecatsSecurityException ( '`action_remove()` is not supported by parent `%s`category.' % category_list . alias )
category_id = int ( request . POST . get ( 'category_id' , 0 ) )
if not category_id :
raise SitecatsSecurityException ( 'Unsupported `category_id` value - `%s` - is passed to `action_remove()`.' % category_id )
category = get_cache ( ) . get_category_by_id ( category_id )
if not category :
raise SitecatsSecurityException ( 'Unable to get `%s` category in `action_remove()`.' % category_id )
cat_ident = category . alias or category . id
if category . is_locked :
raise SitecatsSecurityException ( '`action_remove()` is not supported by `%s` category.' % cat_ident )
if category . parent_id != category_list . get_id ( ) :
raise SitecatsSecurityException ( '`action_remove()` is unable to remove `%s`: ' 'not a child of parent `%s` category.' % ( cat_ident , category_list . alias ) )
min_num = category_list . editor . min_num
def check_min_num ( num ) :
if min_num is not None and num - 1 < min_num :
subcats_str = ungettext_lazy ( 'subcategory' , 'subcategories' , min_num )
error_msg = _ ( 'Unable to remove "%(target_category)s" category from "%(parent_category)s": ' 'parent category requires at least %(num)s %(subcats_str)s.' ) % { 'target_category' : category . title , 'parent_category' : category_list . get_title ( ) , 'num' : min_num , 'subcats_str' : subcats_str }
raise SitecatsValidationError ( error_msg )
child_ids = get_cache ( ) . get_child_ids ( category_list . alias )
check_min_num ( len ( child_ids ) )
if category_list . obj is None : # Remove category itself and children .
category . delete ( )
else : # Remove just a category - to - object tie .
# TODO filter user / status
check_min_num ( category_list . obj . get_ties_for_categories_qs ( child_ids ) . count ( ) )
category_list . obj . remove_from_category ( category )
return True
|
def get_small_file ( context , path ) :
"""Basic in - memory caching module fetcher . This generates one roundtrip for
every previously unseen file , so it is only a temporary solution .
: param context :
Context we should direct FileService requests to . For now ( and probably
forever ) this is just the top - level Mitogen connection manager process .
: param path :
Path to fetch from FileService , must previously have been registered by
a privileged context using the ` register ` command .
: returns :
Bytestring file data ."""
|
pool = mitogen . service . get_or_create_pool ( router = context . router )
service = pool . get_service ( u'mitogen.service.PushFileService' )
return service . get ( path )
|
def set_state ( self , updater = None , ** kwargs ) :
"""Update the datastore .
: param func | dict updater : ( state ) = > state _ change or dict state _ change
: rtype : Iterable [ tornado . concurrent . Future ]"""
|
if callable ( updater ) :
state_change = updater ( self )
elif updater is not None :
state_change = updater
else :
state_change = kwargs
return [ callback_result for k , v in state_change . items ( ) for callback_result in self . set ( k , v ) ]
|
def sixlowpan_fragment ( packet , datagram_tag = 1 ) :
"""Split a packet into different links to transmit as 6lowpan packets .
Usage example :
> > > ipv6 = . . . . . ( very big packet )
> > > pkts = sixlowpan _ fragment ( ipv6 , datagram _ tag = 0x17)
> > > send = [ Dot15d4 ( ) / Dot15d4Data ( ) / x for x in pkts ]
> > > wireshark ( send )"""
|
if not packet . haslayer ( IPv6 ) :
raise Exception ( "SixLoWPAN only fragments IPv6 packets !" )
str_packet = raw ( packet [ IPv6 ] )
if len ( str_packet ) <= MAX_SIZE :
return [ packet ]
def chunks ( l , n ) :
return [ l [ i : i + n ] for i in range ( 0 , len ( l ) , n ) ]
new_packet = chunks ( str_packet , MAX_SIZE )
new_packet [ 0 ] = LoWPANFragmentationFirst ( datagramTag = datagram_tag , datagramSize = len ( str_packet ) ) / new_packet [ 0 ]
# noqa : E501
i = 1
while i < len ( new_packet ) :
new_packet [ i ] = LoWPANFragmentationSubsequent ( datagramTag = datagram_tag , datagramSize = len ( str_packet ) , datagramOffset = MAX_SIZE // 8 * i ) / new_packet [ i ]
# noqa : E501
i += 1
return new_packet
|
def get_mean_and_stddevs ( self , sites , rup , dists , imt , stddev_types ) :
"""See : meth : ` superclass method
< . base . GroundShakingIntensityModel . get _ mean _ and _ stddevs > `
for spec of input and result values ."""
|
C = self . COEFFS [ imt ]
mag = rup . mag - 6
d = np . sqrt ( dists . rjb ** 2 + C [ 'c7' ] ** 2 )
mean = np . zeros_like ( d )
mean += C [ 'c1' ] + C [ 'c2' ] * mag + C [ 'c3' ] * mag ** 2 + C [ 'c6' ]
idx = d <= 100.
mean [ idx ] = mean [ idx ] + C [ 'c5' ] * np . log10 ( d [ idx ] )
idx = d > 100.
mean [ idx ] = ( mean [ idx ] + C [ 'c5' ] * np . log10 ( 100. ) - np . log10 ( d [ idx ] / 100. ) + C [ 'c4' ] * ( d [ idx ] - 100. ) )
# convert from log10 to ln and from cm / s * * 2 to g
mean = np . log ( ( 10.0 ** ( mean - 2.0 ) ) / g )
stddevs = self . _get_stddevs ( C , stddev_types , dists . rjb . shape [ 0 ] )
return mean , stddevs
|
def _central2 ( f , fx , x , h ) :
"""Eq . 8"""
|
n = len ( x )
ee = np . diag ( h )
dtype = np . result_type ( fx )
g = np . empty ( n , dtype = dtype )
gg = np . empty ( n , dtype = dtype )
for i in range ( n ) :
g [ i ] = f ( x + ee [ i ] )
gg [ i ] = f ( x - ee [ i ] )
hess = np . empty ( ( n , n ) , dtype = dtype )
np . outer ( h , h , out = hess )
for i in range ( n ) :
for j in range ( i , n ) :
hess [ i , j ] = ( f ( x + ee [ i , : ] + ee [ j , : ] ) + f ( x - ee [ i , : ] - ee [ j , : ] ) - g [ i ] - g [ j ] + fx - gg [ i ] - gg [ j ] + fx ) / ( 2 * hess [ j , i ] )
hess [ j , i ] = hess [ i , j ]
return hess
|
def noise ( mesh , magnitude = None ) :
"""Add gaussian noise to every vertex of a mesh .
Makes no effort to maintain topology or sanity .
Parameters
mesh : Trimesh object ( will not be mutated )
magnitude : float , what is the maximum distance per axis we can displace a vertex .
Default value is mesh . scale / 100.0
Returns
permutated : Trimesh object , input mesh with noise applied"""
|
if magnitude is None :
magnitude = mesh . scale / 100.0
random = ( np . random . random ( mesh . vertices . shape ) - .5 ) * magnitude
vertices_noise = mesh . vertices . copy ( ) + random
# make sure we ' ve re - ordered faces randomly
triangles = np . random . permutation ( vertices_noise [ mesh . faces ] )
mesh_type = util . type_named ( mesh , 'Trimesh' )
permutated = mesh_type ( ** triangles_module . to_kwargs ( triangles ) )
return permutated
|
def post ( self , path , data ) :
"""Call the Infoblox device to post the obj for the data passed in
: param str obj : The object type
: param dict data : The data for the post
: rtype : requests . Response"""
|
LOGGER . debug ( 'Posting data: %r' , data )
return self . session . post ( self . _request_url ( path ) , data = json . dumps ( data or { } ) , headers = self . HEADERS , auth = self . auth , verify = False )
|
def listar_por_id ( self , id ) :
"""Obtém um equipamento a partir do seu identificador .
: param id : ID do equipamento .
: return : Dicionário com a seguinte estrutura :
{ ' equipamento ' : { ' id ' : < id _ equipamento > ,
' nome ' : < nome _ equipamento > ,
' id _ tipo _ equipamento ' : < id _ tipo _ equipamento > ,
' nome _ tipo _ equipamento ' : < nome _ tipo _ equipamento > ,
' id _ modelo ' : < id _ modelo > ,
' nome _ modelo ' : < nome _ modelo > ,
' id _ marca ' : < id _ marca > ,
' nome _ marca ' : < nome _ marca > } }
: raise EquipamentoNaoExisteError : Equipamento com o
id informado não cadastrado .
: raise InvalidParameterError : O nome do equipamento é nulo ou vazio .
: raise DataBaseError : Falha na networkapi ao acessar o banco de dados .
: raise XMLError : Falha na networkapi ao gerar o XML de resposta ."""
|
if id is None :
raise InvalidParameterError ( u'O id do equipamento não foi informado.' )
url = 'equipamento/id/' + urllib . quote ( id ) + '/'
code , xml = self . submit ( None , 'GET' , url )
return self . response ( code , xml )
|
def set_option ( self , optionname , value ) :
"""Set the named option to value . Ensure the original type
of the option value is preserved ."""
|
for name , parms in zip ( self . opt_names , self . opt_parms ) :
if name == optionname : # FIXME : ensure that the resulting type of the set option
# matches that of the default value . This prevents a string
# option from being coerced to int simply because it holds
# a numeric value ( e . g . a password ) .
# See PR # 1526 and Issue # 1597
defaulttype = type ( parms [ 'enabled' ] )
if defaulttype != type ( value ) and defaulttype != type ( None ) :
value = ( defaulttype ) ( value )
parms [ 'enabled' ] = value
return True
else :
return False
|
def get_all_anchors_fpn ( strides = None , sizes = None ) :
"""Returns :
[ anchors ] : each anchors is a SxSx NUM _ ANCHOR _ RATIOS x4 array ."""
|
if strides is None :
strides = cfg . FPN . ANCHOR_STRIDES
if sizes is None :
sizes = cfg . RPN . ANCHOR_SIZES
assert len ( strides ) == len ( sizes )
foas = [ ]
for stride , size in zip ( strides , sizes ) :
foa = get_all_anchors ( stride = stride , sizes = ( size , ) )
foas . append ( foa )
return foas
|
def list_node ( self , ** kwargs ) :
"""list or watch objects of kind Node
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . list _ node ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str pretty : If ' true ' , then the output is pretty printed .
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V1NodeList
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . list_node_with_http_info ( ** kwargs )
else :
( data ) = self . list_node_with_http_info ( ** kwargs )
return data
|
def __to_file ( self , message_no ) :
"""Write a single message to file"""
|
filename = self . __create_file_name ( message_no )
try :
with codecs . open ( filename , mode = 'w' , encoding = self . messages [ message_no ] . encoding ) as file__ :
file__ . write ( self . messages [ message_no ] . output )
except IOError as excep :
print 'Unable for open the file \'{0}\' for writing. The ' 'following exception was raised:' . format ( filename )
print excep
print 'Exiting!'
sys . exit ( 2 )
return filename
|
def get_savename_from_varname ( varname , varname_prefix = None , savename_prefix = None ) :
"""Args :
varname ( str ) : a variable name in the graph
varname _ prefix ( str ) : an optional prefix that may need to be removed in varname
savename _ prefix ( str ) : an optional prefix to append to all savename
Returns :
str : the name used to save the variable"""
|
name = varname
if varname_prefix is not None and name . startswith ( varname_prefix ) :
name = name [ len ( varname_prefix ) + 1 : ]
if savename_prefix is not None :
name = savename_prefix + '/' + name
return name
|
def start ( self , min_nodes = None ) :
"""Starts up all the instances in the cloud .
To speed things up , all
instances are started in a seperate thread . To make sure
ElastiCluster is not stopped during creation of an instance , it will
overwrite the sigint handler . As soon as the last started instance
is returned and saved to the repository , sigint is executed as usual .
A VM instance is considered ' up and running ' as soon as an SSH
connection can be established . If the startup timeout is reached before
all instances are started , ElastiCluster stops the cluster and
terminates all VM instances .
This method is blocking and might take some time depending on the
amount of instances to start .
: param min _ nodes : minimum number of nodes to start in case the quota
is reached before all instances are up
: type min _ nodes : dict [ node _ kind ] = number"""
|
nodes = self . get_all_nodes ( )
log . info ( "Starting cluster nodes ..." )
if log . DO_NOT_FORK :
nodes = self . _start_nodes_sequentially ( nodes )
else :
nodes = self . _start_nodes_parallel ( nodes , self . thread_pool_max_size )
# checkpoint cluster state
self . repository . save_or_update ( self )
not_started_nodes = self . _check_starting_nodes ( nodes , self . startup_timeout )
# now that all nodes are up , checkpoint cluster state again
self . repository . save_or_update ( self )
# Try to connect to each node to gather IP addresses and SSH host keys
log . info ( "Checking SSH connection to nodes ..." )
pending_nodes = nodes - not_started_nodes
self . _gather_node_ip_addresses ( pending_nodes , self . startup_timeout )
# It might be possible that the node . connect ( ) call updated
# the ` preferred _ ip ` attribute , so , let ' s save the cluster
# again .
self . repository . save_or_update ( self )
# A lot of things could go wrong when starting the cluster . To
# ensure a stable cluster fitting the needs of the user in terms of
# cluster size , we check the minimum nodes within the node groups to
# match the current setup .
min_nodes = self . _compute_min_nodes ( min_nodes )
self . _check_cluster_size ( min_nodes )
|
def ask_bool ( question : str , default : bool = True ) -> bool :
"""Asks a question yes no style"""
|
default_q = "Y/n" if default else "y/N"
answer = input ( "{0} [{1}]: " . format ( question , default_q ) )
lower = answer . lower ( )
if not lower :
return default
return lower == "y"
|
async def plonks ( self , ctx ) :
"""Shows members banned from the bot ."""
|
plonks = self . config . get ( 'plonks' , { } )
guild = ctx . message . server
db = plonks . get ( guild . id , [ ] )
members = '\n' . join ( map ( str , filter ( None , map ( guild . get_member , db ) ) ) )
if members :
await self . bot . responses . basic ( title = "Plonked Users:" , message = members )
else :
await self . bot . responses . failure ( message = 'No members are banned in this server.' )
|
def list_buckets ( self , offset = 0 , limit = 100 ) :
"""Limit breaks above 100"""
|
# TODO : If limit > 100 , do multiple fetches
if limit > 100 :
raise Exception ( "Zenobase can't handle limits over 100" )
return self . _get ( "/users/{}/buckets/?order=label&offset={}&limit={}" . format ( self . client_id , offset , limit ) )
|
def read_tf_records ( batch_size , tf_records , num_repeats = 1 , shuffle_records = True , shuffle_examples = True , shuffle_buffer_size = None , interleave = True , filter_amount = 1.0 ) :
"""Args :
batch _ size : batch size to return
tf _ records : a list of tf _ record filenames
num _ repeats : how many times the data should be read ( default : One )
shuffle _ records : whether to shuffle the order of files read
shuffle _ examples : whether to shuffle the tf . Examples
shuffle _ buffer _ size : how big of a buffer to fill before shuffling .
interleave : iwhether to interleave examples from multiple tf _ records
filter _ amount : what fraction of records to keep
Returns :
a tf dataset of batched tensors"""
|
if shuffle_examples and not shuffle_buffer_size :
raise ValueError ( "Must set shuffle buffer size if shuffling examples" )
tf_records = list ( tf_records )
if shuffle_records :
random . shuffle ( tf_records )
record_list = tf . data . Dataset . from_tensor_slices ( tf_records )
# compression _ type here must agree with write _ tf _ examples
map_func = functools . partial ( tf . data . TFRecordDataset , buffer_size = 8 * 1024 * 1024 , compression_type = 'ZLIB' )
if interleave : # cycle _ length = how many tfrecord files are read in parallel
# The idea is to shuffle both the order of the files being read ,
# and the examples being read from the files .
dataset = record_list . apply ( tf . contrib . data . parallel_interleave ( map_func , cycle_length = 64 , sloppy = True ) )
else :
dataset = record_list . flat_map ( map_func )
if filter_amount < 1.0 :
dataset = dataset . filter ( lambda _ : tf . random_uniform ( [ ] ) < filter_amount )
dataset = dataset . repeat ( num_repeats )
if shuffle_examples :
dataset = dataset . shuffle ( buffer_size = shuffle_buffer_size )
dataset = dataset . batch ( batch_size )
return dataset
|
def get_dashboard ( self , id , ** kwargs ) :
"""" Retrieve a ( v2 ) dashboard by id ."""
|
resp = self . _get_object_by_name ( self . _DASHBOARD_ENDPOINT_SUFFIX , id , ** kwargs )
return resp
|
def _prepare_consume_payload ( did , service_agreement_id , service_definition_id , signature , consumer_address ) :
"""Prepare a payload to send to ` Brizo ` .
: param did : DID , str
: param service _ agreement _ id : Service Agreement Id , str
: param service _ definition _ id : identifier of the service inside the asset DDO , str
service in the DDO ( DID document )
: param signature : the signed agreement message hash which includes
conditions and their parameters values and other details of the agreement , str
: param consumer _ address : ethereum address of the consumer signing this agreement , hex - str
: return : dict"""
|
return json . dumps ( { 'did' : did , 'serviceAgreementId' : service_agreement_id , ServiceAgreement . SERVICE_DEFINITION_ID : service_definition_id , 'signature' : signature , 'consumerAddress' : consumer_address } )
|
def _set_profile ( self , v , load = False ) :
"""Setter method for profile , mapped from YANG variable / protocol / lldp / profile ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ profile is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ profile ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "profile_name" , profile . profile , yang_name = "profile" , rest_name = "profile" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'profile-name' , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'info' : u'The LLDP Profile table.' , u'callpoint' : u'lldp_global_profile_conf' } } ) , is_container = 'list' , yang_name = "profile" , rest_name = "profile" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'info' : u'The LLDP Profile table.' , u'callpoint' : u'lldp_global_profile_conf' } } , namespace = 'urn:brocade.com:mgmt:brocade-lldp' , defining_module = 'brocade-lldp' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """profile must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("profile_name",profile.profile, yang_name="profile", rest_name="profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='profile-name', extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'The LLDP Profile table.', u'callpoint': u'lldp_global_profile_conf'}}), is_container='list', yang_name="profile", rest_name="profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'The LLDP Profile table.', u'callpoint': u'lldp_global_profile_conf'}}, namespace='urn:brocade.com:mgmt:brocade-lldp', defining_module='brocade-lldp', yang_type='list', is_config=True)""" , } )
self . __profile = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def create ( model , trust_region , entropy_coefficient , q_coefficient , max_grad_norm , discount_factor , rho_cap = 10.0 , retrace_rho_cap = 1.0 , average_model_alpha = 0.99 , trust_region_delta = 1.0 ) :
"""Vel factory function"""
|
return AcerPolicyGradient ( trust_region = trust_region , model_factory = model , entropy_coefficient = entropy_coefficient , q_coefficient = q_coefficient , rho_cap = rho_cap , retrace_rho_cap = retrace_rho_cap , max_grad_norm = max_grad_norm , discount_factor = discount_factor , average_model_alpha = average_model_alpha , trust_region_delta = trust_region_delta )
|
def get_coordinate_system ( self ) :
"""Check self . Data for available coordinate systems .
Returns
initial _ coordinate , coordinate _ list : str , list
i . e . , ' geographic ' , [ ' specimen ' , ' geographic ' ]"""
|
coordinate_list = [ 'specimen' ]
initial_coordinate = 'specimen'
for specimen in self . specimens :
if 'geographic' not in coordinate_list and self . Data [ specimen ] [ 'zijdblock_geo' ] :
coordinate_list . append ( 'geographic' )
initial_coordinate = 'geographic'
if 'tilt-corrected' not in coordinate_list and self . Data [ specimen ] [ 'zijdblock_tilt' ] :
coordinate_list . append ( 'tilt-corrected' )
return initial_coordinate , coordinate_list
|
def filter_for_probability ( key : str , population : Union [ pd . DataFrame , pd . Series , Index ] , probability : Array , index_map : IndexMap = None ) -> Union [ pd . DataFrame , pd . Series , Index ] :
"""Decide an event outcome for each individual in a population from probabilities .
Given a population or its index and an array of associated probabilities
for some event to happen , we create and return the sub - population for whom
the event occurred .
Parameters
key :
A string used to create a seed for the random number generation .
population :
A view on the simulants for which we are determining the
outcome of an event .
probability :
A 1d list of probabilities of the event under consideration
occurring which corresponds ( i . e . ` len ( population ) = = len ( probability ) ` )
to the population array passed in .
index _ map :
A mapping between the provided index ( which may contain ints , floats ,
datetimes or any arbitrary combination of them ) and an integer index
into the random number array .
Returns
pd . core . generic . PandasObject
The sub - population of the simulants for whom the event occurred .
The return type will be the same as type ( population )"""
|
if population . empty :
return population
index = population if isinstance ( population , pd . Index ) else population . index
draw = random ( key , index , index_map )
mask = np . array ( draw < probability )
return population [ mask ]
|
def _get_model_metadata ( model_class , metadata , version = None ) :
"""Returns user - defined metadata , making sure information all models should
have is also available , as a dictionary"""
|
from turicreate import __version__
info = { 'turicreate_version' : __version__ , 'type' : model_class , }
if version is not None :
info [ 'version' ] = str ( version )
info . update ( metadata )
return info
|
def HasTable ( self , table_name ) :
"""Determines if a specific table exists .
Args :
table _ name ( str ) : table name .
Returns :
bool : True if the table exists .
Raises :
RuntimeError : if the database is not opened ."""
|
if not self . _connection :
raise RuntimeError ( 'Cannot determine if table exists database not opened.' )
sql_query = self . _HAS_TABLE_QUERY . format ( table_name )
self . _cursor . execute ( sql_query )
if self . _cursor . fetchone ( ) :
return True
return False
|
def connect ( self , output_name , input_method ) :
"""Connect an output to any callable object .
: py : meth : ` on _ connect ` is called after the connection is made to
allow components to do something when an output is conected .
: param str output _ name : the output to connect . Must be a member
of : py : attr : ` ~ Component . outputs ` .
: param callable input _ method : the thread - safe callable to invoke
when : py : meth : ` send ` is called ."""
|
self . logger . debug ( 'connect "%s"' , output_name )
if self . running ( ) :
raise RuntimeError ( 'Cannot connect running component' )
self . _component_connections [ output_name ] . append ( input_method )
self . on_connect ( output_name )
|
def trace_generator ( trace , start = 0 , stop = None , step = 1 ) :
"""Return a generator returning values from the object ' s trace .
Ex :
T = trace _ generator ( theta . trace )
T . next ( )
for t in T : . . ."""
|
i = start
stop = stop or np . inf
size = min ( trace . length ( ) , stop )
while i < size :
index = slice ( i , i + 1 )
yield trace . gettrace ( slicing = index ) [ 0 ]
i += step
|
def __SendChunk ( self , start , additional_headers = None ) :
"""Send the specified chunk ."""
|
self . EnsureInitialized ( )
no_log_body = self . total_size is None
request = http_wrapper . Request ( url = self . url , http_method = 'PUT' )
if self . __gzip_encoded :
request . headers [ 'Content-Encoding' ] = 'gzip'
body_stream , read_length , exhausted = compression . CompressStream ( self . stream , self . chunksize )
end = start + read_length
# If the stream length was previously unknown and the input stream
# is exhausted , then we ' re at the end of the stream .
if self . total_size is None and exhausted :
self . __total_size = end
elif self . total_size is None : # For the streaming resumable case , we need to detect when
# we ' re at the end of the stream .
body_stream = buffered_stream . BufferedStream ( self . stream , start , self . chunksize )
end = body_stream . stream_end_position
if body_stream . stream_exhausted :
self . __total_size = end
# TODO : Here , change body _ stream from a stream to a string object ,
# which means reading a chunk into memory . This works around
# https : / / code . google . com / p / httplib2 / issues / detail ? id = 176 which can
# cause httplib2 to skip bytes on 401 ' s for file objects .
# Rework this solution to be more general .
body_stream = body_stream . read ( self . chunksize )
else :
end = min ( start + self . chunksize , self . total_size )
body_stream = stream_slice . StreamSlice ( self . stream , end - start )
# TODO ( craigcitro ) : Think about clearer errors on " no data in
# stream " .
request . body = body_stream
request . headers [ 'Content-Type' ] = self . mime_type
if no_log_body : # Disable logging of streaming body .
# TODO : Remove no _ log _ body and rework as part of a larger logs
# refactor .
request . loggable_body = '<media body>'
if self . total_size is None : # Streaming resumable upload case , unknown total size .
range_string = 'bytes %s-%s/*' % ( start , end - 1 )
elif end == start : # End of an upload with 0 bytes left to send ; just finalize .
range_string = 'bytes */%s' % self . total_size
else : # Normal resumable upload case with known sizes .
range_string = 'bytes %s-%s/%s' % ( start , end - 1 , self . total_size )
request . headers [ 'Content-Range' ] = range_string
if additional_headers :
request . headers . update ( additional_headers )
return self . __SendMediaRequest ( request , end )
|
def sanitize_parse_mode ( mode ) :
"""Converts the given parse mode into an object with
` ` parse ` ` and ` ` unparse ` ` callable properties ."""
|
if not mode :
return None
if callable ( mode ) :
class CustomMode :
@ staticmethod
def unparse ( text , entities ) :
raise NotImplementedError
CustomMode . parse = mode
return CustomMode
elif ( all ( hasattr ( mode , x ) for x in ( 'parse' , 'unparse' ) ) and all ( callable ( x ) for x in ( mode . parse , mode . unparse ) ) ) :
return mode
elif isinstance ( mode , str ) :
try :
return { 'md' : markdown , 'markdown' : markdown , 'htm' : html , 'html' : html } [ mode . lower ( ) ]
except KeyError :
raise ValueError ( 'Unknown parse mode {}' . format ( mode ) )
else :
raise TypeError ( 'Invalid parse mode type {}' . format ( mode ) )
|
def _compute_and_write_row_block ( i , left_matrix , right_matrix , train_indices_out_path , test_indices_out_path , remove_empty_rows ) :
"""Compute row block ( shard ) of expansion for row i of the left _ matrix .
Compute a shard of the randomized Kronecker product and dump it on the fly .
A standard Kronecker product between matrices A and B produces
[ [ a _ 11 B , . . . , a _ 1n B ] ,
[ a _ m1 B , . . . , a _ mn B ] ]
( if A ' s size is ( m , n ) and B ' s size is ( p , q ) then A Kronecker B has size
( m p , n q ) ) .
Here we modify the standard Kronecker product expanding matrices in
https : / / cs . stanford . edu / ~ jure / pubs / kronecker - jmlr10 . pdf
and randomize each block - wise operation a _ ij B in the Kronecker product as
in https : / / arxiv . org / pdf / 1901.08910 . pdf section III . 4.
The matrix we produce is
[ [ F ( a _ 11 , B , w _ 11 ) , . . . , F ( a _ 1n , B , w _ 1n ) ] ,
[ F ( a _ m1 , B , w _ m1 ) , . . . , F ( a _ mn , B , w _ mn ) ] ]
where ( w _ ij ) is a sequence of pseudo random numbers and F is randomized
operator which will :
1 ) Shuffle rows and columns of B independently at random ;
2 ) Dropout elements of B with a rate 1 - a _ ij to compute
F ( a _ ij , B , w _ ij ) .
( It is noteworthy that there is an abuse of notation above when writing
F ( a _ ij , B , w _ ij ) as each block - wise operation will in fact consume
multiple elements of the sequence ( w _ ij ) ) .
Each shard of index i consists of [ F ( a _ i1 , B , w _ i1 ) , . . . , F ( a _ in , B , w _ in ) ]
Args :
i : index of the shard . The rows i * m to ( i + 1 ) * m of the full synthetic
matrix matrix will be computed and dumpted to file .
left _ matrix : sparse SciPy csr matrix with values in [ 0 , 1 ] .
right _ matrix : sparse SciPy coo signed binary matrix . + 1 values correspond
to train set and - 1 values correspond to test set .
train _ indices _ out _ path : path to output train file . The non zero indices of
the resulting sparse matrix are dumped as a series of pickled records .
' _ i ' will be used as a suffix for the shard ' s output file . The shard
contains a pickled list of list each of which corresponds to a users .
test _ indices _ out _ path : path to output train file . The non zero indices of
the resulting sparse matrix are dumped as a series of pickled records .
' _ i ' will be used as a suffix for the shard ' s output file . The shard
contains a pickled list of list each of which corresponds to a users .
remove _ empty _ rows : whether to remove rows from the synthetic train and
test matrices which are not present in the train or the test matrix .
Returns :
( num _ removed _ rows , metadata , train _ metadata , test _ metadata ) : an integer
specifying the number of rows dropped because of dropout followed by
a triplet of SparseMatrixMetadata corresponding to the overall shard ,
train shard and test shard ."""
|
kron_blocks = [ ]
num_rows = 0
num_removed_rows = 0
num_interactions = 0
num_train_interactions = 0
num_test_interactions = 0
# Construct blocks
for j in xrange ( left_matrix . shape [ 1 ] ) :
dropout_rate = 1.0 - left_matrix [ i , j ]
kron_block = shuffle_sparse_coo_matrix ( right_matrix , dropout_rate )
if not set ( kron_block . data ) . issubset ( { 1 , - 1 } ) :
raise ValueError ( "Values of sparse matrix should be -1 or 1 but are: " , set ( kron_block . data ) )
kron_blocks . append ( kron_block )
logging . info ( "Done with element (%d, %d)" , i , j )
rows_to_write = sparse . hstack ( kron_blocks ) . tocoo ( )
train_rows_to_write = util . sparse_where_equal ( rows_to_write , 1 )
test_rows_to_write = util . sparse_where_equal ( rows_to_write , - 1 )
logging . info ( "Producing data set row by row" )
all_train_items_to_write = [ ]
all_test_items_to_write = [ ]
# Write Kronecker product line per line .
for k in xrange ( right_matrix . shape [ 0 ] ) :
train_items_to_write = train_rows_to_write . getrow ( k ) . indices
test_items_to_write = test_rows_to_write . getrow ( k ) . indices
# for users with > 1 test items , keep only the first one
if len ( test_items_to_write ) > 1 :
test_items_to_write = test_items_to_write [ : 1 ]
num_train = train_items_to_write . shape [ 0 ]
num_test = test_items_to_write . shape [ 0 ]
if remove_empty_rows and ( ( not num_train ) or ( not num_test ) ) :
logging . info ( "Removed empty output row %d." , i * left_matrix . shape [ 0 ] + k )
num_removed_rows += 1
continue
num_rows += 1
num_interactions += num_train + num_test
num_train_interactions += num_train
num_test_interactions += num_test
all_train_items_to_write . append ( train_items_to_write )
all_test_items_to_write . append ( test_items_to_write )
if k % 1000 == 0 :
logging . info ( "Done producing data set row %d." , k )
logging . info ( "Done producing data set row by row." )
util . savez_two_column ( all_train_items_to_write , row_offset = ( i * right_matrix . shape [ 0 ] ) , file_name = train_indices_out_path + ( "_%d" % i ) )
util . savez_two_column ( all_test_items_to_write , row_offset = ( i * right_matrix . shape [ 0 ] ) , file_name = test_indices_out_path + ( "_%d" % i ) )
num_cols = rows_to_write . shape [ 1 ]
metadata = SparseMatrixMetadata ( num_interactions = num_interactions , num_rows = num_rows , num_cols = num_cols )
train_metadata = SparseMatrixMetadata ( num_interactions = num_train_interactions , num_rows = num_rows , num_cols = num_cols )
test_metadata = SparseMatrixMetadata ( num_interactions = num_test_interactions , num_rows = num_rows , num_cols = num_cols )
logging . info ( "Done with left matrix row %d." , i )
logging . info ( "%d interactions written in shard." , num_interactions )
logging . info ( "%d rows removed in shard." , num_removed_rows )
logging . info ( "%d train interactions written in shard." , num_train_interactions )
logging . info ( "%d test interactions written in shard." , num_test_interactions )
return ( num_removed_rows , metadata , train_metadata , test_metadata )
|
def lib_dir ( self ) :
"""Return standard library directory path used by RPM libs ."""
|
if not self . _lib_dir :
lib_files = glob . glob ( "/usr/lib/*/librpm.so*" )
if not lib_files :
raise InstallError ( "Can not find lib directory." )
self . _lib_dir = os . path . dirname ( lib_files [ 0 ] )
return self . _lib_dir
|
def init_atom_feed ( self , feed ) :
"""Initializing an atom feed ` feedgen . feed . FeedGenerator ` given a feed object
: param feed : a feed object
: return : an atom feed ` feedgen . feed . FeedGenerator `"""
|
atom_feed = FeedGenerator ( )
atom_feed . id ( id = self . request . route_url ( self . get_atom_feed_url , id = feed . id ) )
atom_feed . link ( href = self . request . route_url ( self . get_atom_feed_url , id = feed . id ) , rel = 'self' )
atom_feed . language ( 'nl-BE' )
self . link_to_sibling ( feed , 'previous' , atom_feed )
self . link_to_sibling ( feed , 'next' , atom_feed )
return atom_feed
|
def titles ( self , key , value ) :
"""Populate the ` ` titles ` ` key ."""
|
if not key . startswith ( '245' ) :
return { 'source' : value . get ( '9' ) , 'subtitle' : value . get ( 'b' ) , 'title' : value . get ( 'a' ) , }
self . setdefault ( 'titles' , [ ] ) . insert ( 0 , { 'source' : value . get ( '9' ) , 'subtitle' : value . get ( 'b' ) , 'title' : value . get ( 'a' ) , } )
|
def _get_translation_field_names ( ) :
"""Returns Translation base model field names ( excepted " id " field ) ."""
|
from . models import Translation
fields = [ f . name for f in Translation . _meta . get_fields ( ) ]
fields . remove ( "id" )
return fields
|
def kvlclient ( self ) :
'''Return a thread local ` ` kvlayer ` ` client .'''
|
if self . _kvlclient is None :
self . _kvlclient = kvlayer . client ( )
return self . _kvlclient
|
def load_from_package ( ) :
'''Try to load category ranges from module .
: returns : category ranges dict or None
: rtype : None or dict of RangeGroup'''
|
try :
import pkg_resources
f = pkg_resources . resource_stream ( meta . __app__ , 'cache/unicategories.cache' )
dversion , mversion , data = pickle . load ( f )
if dversion == data_version and mversion == module_version :
return data
warnings . warn ( 'Unicode unicategories database is outdated. ' 'Please reinstall unicategories module to regenerate it.' if dversion < data_version else 'Incompatible unicategories database. ' 'Please reinstall unicategories module to regenerate it.' )
except ( ValueError , EOFError ) :
warnings . warn ( 'Incompatible unicategories database. ' 'Please reinstall unicategories module to regenerate it.' )
except ( ImportError , FileNotFoundError ) :
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.