signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
async def apply_command ( self , cmd ) :
"""applies a command
This calls the pre and post hooks attached to the command ,
as well as : meth : ` cmd . apply ` .
: param cmd : an applicable command
: type cmd : : class : ` ~ alot . commands . Command `""" | # FIXME : What are we guarding for here ? We don ' t mention that None is
# allowed as a value fo cmd .
if cmd :
if cmd . prehook :
await cmd . prehook ( ui = self , dbm = self . dbman , cmd = cmd )
try :
if asyncio . iscoroutinefunction ( cmd . apply ) :
await cmd . apply ( self )
else :
cmd . apply ( self )
except Exception as e :
self . _error_handler ( e )
else :
if cmd . posthook :
logging . info ( 'calling post-hook' )
await cmd . posthook ( ui = self , dbm = self . dbman , cmd = cmd ) |
def clear_intersection ( self , other_dict ) :
"""Clears out locals and globals from this scope where the key - value pair matches
with other _ dict .
This allows cleanup of temporary variables that may have washed up into this
Scope .
Arguments
other _ dict : a : class : ` dict ` to be used to determine scope clearance .
Returns
Scope
The updated scope ( self ) .""" | for key , value in other_dict . items ( ) :
if key in self . globals and self . globals [ key ] is value :
del self . globals [ key ]
if key in self . locals and self . locals [ key ] is value :
del self . locals [ key ]
return self |
def nl_socket_set_buffer_size ( sk , rxbuf , txbuf ) :
"""Set socket buffer size of Netlink socket .
https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / socket . c # L675
Sets the socket buffer size of a Netlink socket to the specified values ` rxbuf ` and ` txbuf ` . Providing a value of 0
assumes a good default value .
Positional arguments :
sk - - Netlink socket ( nl _ sock class instance ) .
rxbuf - - new receive socket buffer size in bytes ( integer ) .
txbuf - - new transmit socket buffer size in bytes ( integer ) .
Returns :
0 on success or a negative error code .""" | rxbuf = 32768 if rxbuf <= 0 else rxbuf
txbuf = 32768 if txbuf <= 0 else txbuf
if sk . s_fd == - 1 :
return - NLE_BAD_SOCK
try :
sk . socket_instance . setsockopt ( socket . SOL_SOCKET , socket . SO_SNDBUF , txbuf )
except OSError as exc :
return - nl_syserr2nlerr ( exc . errno )
try :
sk . socket_instance . setsockopt ( socket . SOL_SOCKET , socket . SO_RCVBUF , rxbuf )
except OSError as exc :
return - nl_syserr2nlerr ( exc . errno )
sk . s_flags |= NL_SOCK_BUFSIZE_SET
return 0 |
def auto_discretize ( self , max_freq = 50. , wave_frac = 0.2 ) :
"""Subdivide the layers to capture strain variation .
Parameters
max _ freq : float
Maximum frequency of interest [ Hz ] .
wave _ frac : float
Fraction of wavelength required . Typically 1/3 to 1/5.
Returns
profile : Profile
A new profile with modified layer thicknesses""" | layers = [ ]
for l in self :
if l . soil_type . is_nonlinear :
opt_thickness = l . shear_vel / max_freq * wave_frac
count = np . ceil ( l . thickness / opt_thickness ) . astype ( int )
thickness = l . thickness / count
for _ in range ( count ) :
layers . append ( Layer ( l . soil_type , thickness , l . shear_vel ) )
else :
layers . append ( l )
return Profile ( layers , wt_depth = self . wt_depth ) |
def time ( ctx : Context , command : str ) :
"""Time the output of a command .""" | with timer . Timing ( verbose = True ) :
proc = run ( command , shell = True )
ctx . exit ( proc . returncode ) |
def delete_insight ( self , project_key , insight_id ) :
"""Delete an existing insight .
: params project _ key : Project identifier , in the form of
projectOwner / projectId
: type project _ key : str
: params insight _ id : Insight unique id
: type insight _ id : str
: raises RestApiException : If a server error occurs
Examples
> > > import datadotworld as dw
> > > api _ client = dw . api _ client ( )
> > > del _ insight = api _ client . delete _ insight (
. . . ' username / project ' , ' insightid ' ) # doctest : + SKIP""" | projectOwner , projectId = parse_dataset_key ( project_key )
try :
self . _insights_api . delete_insight ( projectOwner , projectId , insight_id )
except _swagger . rest . ApiException as e :
raise RestApiError ( cause = e ) |
def _naturalize_numbers ( self , string ) :
"""Makes any integers into very zero - padded numbers .
e . g . ' 1 ' becomes ' 000001 ' .""" | def naturalize_int_match ( match ) :
return '%08d' % ( int ( match . group ( 0 ) ) , )
string = re . sub ( r'\d+' , naturalize_int_match , string )
return string |
def _coerce_method ( converter ) :
"""Install the scalar coercion methods .""" | def wrapper ( self ) :
if len ( self ) == 1 :
return converter ( self . iloc [ 0 ] )
raise TypeError ( "cannot convert the series to " "{0}" . format ( str ( converter ) ) )
wrapper . __name__ = "__{name}__" . format ( name = converter . __name__ )
return wrapper |
def create_namespaced_network_policy ( self , namespace , body , ** kwargs ) :
"""create a NetworkPolicy
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . create _ namespaced _ network _ policy ( namespace , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param V1beta1NetworkPolicy body : ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: param str field _ manager : fieldManager is a name associated with the actor or entity that is making these changes . The value must be less than or 128 characters long , and only contain printable characters , as defined by https : / / golang . org / pkg / unicode / # IsPrint .
: return : V1beta1NetworkPolicy
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . create_namespaced_network_policy_with_http_info ( namespace , body , ** kwargs )
else :
( data ) = self . create_namespaced_network_policy_with_http_info ( namespace , body , ** kwargs )
return data |
def add_value_option ( self , * args , ** kwargs ) :
"""Add a value option .
@ keyword dest : Destination attribute , derived from long option name if not given .
@ keyword action : How to handle the option .
@ keyword help : Option description .
@ keyword default : If given , add this value to the help string .""" | kwargs [ 'metavar' ] = args [ - 1 ]
if 'dest' not in kwargs :
kwargs [ 'dest' ] = [ o for o in args if o . startswith ( "--" ) ] [ 0 ] . replace ( "--" , "" ) . replace ( "-" , "_" )
if 'default' in kwargs and kwargs [ 'default' ] :
kwargs [ 'help' ] += " [%s]" % kwargs [ 'default' ]
self . parser . add_option ( * args [ : - 1 ] , ** kwargs ) |
def _authenticate ( self ) :
"""Authenticate with netcup server . Must be called first .""" | login_info = self . _apicall ( 'login' )
self . api_session_id = login_info [ 'apisessionid' ]
if not self . api_session_id :
raise Exception ( 'Login failed' )
# query ttl and verify access to self . domain :
zone_info = self . _apicall ( 'infoDnsZone' , domainname = self . domain )
self . zone_ttl = zone_info [ 'ttl' ] |
def transformFilter ( actor , transformation ) :
"""Transform a ` ` vtkActor ` ` and return a new object .""" | tf = vtk . vtkTransformPolyDataFilter ( )
tf . SetTransform ( transformation )
prop = None
if isinstance ( actor , vtk . vtkPolyData ) :
tf . SetInputData ( actor )
else :
tf . SetInputData ( actor . polydata ( ) )
prop = vtk . vtkProperty ( )
prop . DeepCopy ( actor . GetProperty ( ) )
tf . Update ( )
tfa = Actor ( tf . GetOutput ( ) )
if prop :
tfa . SetProperty ( prop )
return tfa |
def b58decode_check ( v : str ) -> bytes :
'''Decode and verify the checksum of a Base58 encoded string''' | result = b58decode ( v )
result , check = result [ : - 4 ] , result [ - 4 : ]
digest = sha256 ( sha256 ( result ) . digest ( ) ) . digest ( )
if check != digest [ : 4 ] :
raise ValueError ( "Invalid checksum" )
return result |
def parse ( url ) :
"""Parses a cache URL .""" | config = { }
url = urlparse . urlparse ( url )
# Handle python 2.6 broken url parsing
path , query = url . path , url . query
if '?' in path and query == '' :
path , query = path . split ( '?' , 1 )
cache_args = dict ( [ ( key . upper ( ) , ';' . join ( val ) ) for key , val in urlparse . parse_qs ( query ) . items ( ) ] )
# Update with environment configuration .
backend = BACKENDS . get ( url . scheme )
if not backend :
raise Exception ( 'Unknown backend: "{0}"' . format ( url . scheme ) )
config [ 'BACKEND' ] = BACKENDS [ url . scheme ]
redis_options = { }
if url . scheme == 'hiredis' :
redis_options [ 'PARSER_CLASS' ] = 'redis.connection.HiredisParser'
# File based
if not url . netloc :
if url . scheme in ( 'memcached' , 'pymemcached' , 'djangopylibmc' ) :
config [ 'LOCATION' ] = 'unix:' + path
elif url . scheme in ( 'redis' , 'hiredis' ) :
match = re . match ( r'.+?(?P<db>\d+)' , path )
if match :
db = match . group ( 'db' )
path = path [ : path . rfind ( '/' ) ]
else :
db = '0'
config [ 'LOCATION' ] = 'unix:%s:%s' % ( path , db )
else :
config [ 'LOCATION' ] = path
# URL based
else : # Handle multiple hosts
config [ 'LOCATION' ] = ';' . join ( url . netloc . split ( ',' ) )
if url . scheme in ( 'redis' , 'hiredis' ) :
if url . password :
redis_options [ 'PASSWORD' ] = url . password
# Specifying the database is optional , use db 0 if not specified .
db = path [ 1 : ] or '0'
port = url . port if url . port else 6379
config [ 'LOCATION' ] = "redis://%s:%s/%s" % ( url . hostname , port , db )
if redis_options :
config . setdefault ( 'OPTIONS' , { } ) . update ( redis_options )
if url . scheme == 'uwsgicache' :
config [ 'LOCATION' ] = config . get ( 'LOCATION' , 'default' ) or 'default'
# Pop special options from cache _ args
# https : / / docs . djangoproject . com / en / 1.10 / topics / cache / # cache - arguments
options = { }
for key in [ 'MAX_ENTRIES' , 'CULL_FREQUENCY' ] :
val = cache_args . pop ( key , None )
if val is not None :
options [ key ] = int ( val )
if options :
config . setdefault ( 'OPTIONS' , { } ) . update ( options )
config . update ( cache_args )
return config |
def gof_plot ( simdata , trueval , name = None , bins = None , format = 'png' , suffix = '-gof' , path = './' , fontmap = None , verbose = 0 ) :
"""Plots histogram of replicated data , indicating the location of the observed data
: Arguments :
simdata : array or PyMC object
Trace of simulated data or the PyMC stochastic object containing trace .
trueval : numeric
True ( observed ) value of the data
bins : int or string
The number of bins , or a preferred binning method . Available methods include
' doanes ' , ' sturges ' and ' sqrt ' ( defaults to ' doanes ' ) .
format ( optional ) : string
Graphic output format ( defaults to png ) .
suffix ( optional ) : string
Filename suffix .
path ( optional ) : string
Specifies location for saving plots ( defaults to local directory ) .
fontmap ( optional ) : dict
Font map for plot .""" | if fontmap is None :
fontmap = { 1 : 10 , 2 : 8 , 3 : 6 , 4 : 5 , 5 : 4 }
if not isinstance ( simdata , ndarray ) : # # Can ' t just try and catch because ndarray objects also have
# # ` trace ` method .
simdata = simdata . trace ( )
if ndim ( trueval ) == 1 and ndim ( simdata == 2 ) : # Iterate over more than one set of data
for i in range ( len ( trueval ) ) :
n = name or 'MCMC'
gof_plot ( simdata [ : , i ] , trueval [ i ] , '%s[%i]' % ( n , i ) , bins = bins , format = format , suffix = suffix , path = path , fontmap = fontmap , verbose = verbose )
return
if verbose > 0 :
print_ ( 'Plotting' , ( name or 'MCMC' ) + suffix )
figure ( )
# Specify number of bins
if bins is None :
bins = 'sqrt'
uniquevals = len ( unique ( simdata ) )
if bins == 'sturges' :
bins = uniquevals * ( uniquevals <= 25 ) or _sturges ( len ( simdata ) )
elif bins == 'doanes' :
bins = uniquevals * ( uniquevals <= 25 ) or _doanes ( simdata , len ( simdata ) )
elif bins == 'sqrt' :
bins = uniquevals * ( uniquevals <= 25 ) or _sqrt_choice ( len ( simdata ) )
elif isinstance ( bins , int ) :
bins = bins
else :
raise ValueError ( 'Invalid bins argument in gof_plot' )
# Generate histogram
hist ( simdata , bins )
# Plot options
xlabel ( name or 'Value' , fontsize = 'x-small' )
ylabel ( "Frequency" , fontsize = 'x-small' )
# Smaller tick labels
tlabels = gca ( ) . get_xticklabels ( )
setp ( tlabels , 'fontsize' , fontmap [ 1 ] )
tlabels = gca ( ) . get_yticklabels ( )
setp ( tlabels , 'fontsize' , fontmap [ 1 ] )
# Plot vertical line at location of true data value
axvline ( x = trueval , linewidth = 2 , color = 'r' , linestyle = 'dotted' )
if not os . path . exists ( path ) :
os . mkdir ( path )
if not path . endswith ( '/' ) :
path += '/'
# Save to file
savefig ( "%s%s%s.%s" % ( path , name or 'MCMC' , suffix , format ) ) |
def read ( self , visibility_timeout = None ) :
"""Read a single message from the queue .
: type visibility _ timeout : int
: param visibility _ timeout : The timeout for this message in seconds
: rtype : : class : ` boto . sqs . message . Message `
: return : A single message or None if queue is empty""" | rs = self . get_messages ( 1 , visibility_timeout )
if len ( rs ) == 1 :
return rs [ 0 ]
else :
return None |
def pull ( self ) :
"""This action does some state checking ( adds a object in the session
that will identify this chat participant and adds a coroutine to manage
it ' s state ) and gets new messages or bail out in 10 seconds if there are
no messages .""" | if not 'client' in session or session [ 'client' ] . dead :
client = Client ( str ( request . environ [ 'pylons.routes_dict' ] [ 'id' ] ) )
print 'Adding new client:' , client
session [ 'client' ] = client
session . save ( )
yield request . environ [ 'cogen.core' ] . events . AddCoro ( client . watch , prio = priority . CORO )
return
else :
client = session [ 'client' ]
yield request . environ [ 'cogen.call' ] ( client . messages . get ) ( timeout = 10 )
if isinstance ( request . environ [ 'cogen.wsgi' ] . result , events . OperationTimeout ) :
pass
elif isinstance ( request . environ [ 'cogen.wsgi' ] . result , Exception ) :
import traceback
traceback . print_exception ( * request . environ [ 'cogen.wsgi' ] . exception )
else :
yield "%s\r\n" % '\r\n' . join ( request . environ [ 'cogen.wsgi' ] . result ) |
def set ( obj , glob , value , separator = "/" , afilter = None ) :
"""Given a path glob , set all existing elements in the document
to the given value . Returns the number of elements changed .""" | changed = 0
globlist = __safe_path__ ( glob , separator )
for path in _inner_search ( obj , globlist , separator ) :
changed += 1
dpath . path . set ( obj , path , value , create_missing = False , afilter = afilter )
return changed |
def _divide ( divisor , remainder , quotient , remainders , base , precision = None ) :
"""Given a divisor and dividend , continue until precision in is reached .
: param int divisor : the divisor
: param int remainder : the remainder
: param int base : the base
: param precision : maximum number of fractional digits to compute
: type precision : int or NoneType
: returns : the remainder
: rtype : int
` ` quotient ` ` and ` ` remainders ` ` are set by side effects
Complexity : O ( precision ) if precision is not None else O ( divisor )""" | # pylint : disable = too - many - arguments
indices = itertools . count ( ) if precision is None else range ( precision )
for _ in indices :
if remainder == 0 or remainder in remainders :
break
remainders . append ( remainder )
( quot , rem ) = divmod ( remainder , divisor )
quotient . append ( quot )
if quot > 0 :
remainder = rem * base
else :
remainder = remainder * base
return remainder |
def create_plugin ( self , name , plugin_data_dir , gzip = False ) :
"""Create a new plugin .
Args :
name ( string ) : The name of the plugin . The ` ` : latest ` ` tag is
optional , and is the default if omitted .
plugin _ data _ dir ( string ) : Path to the plugin data directory .
Plugin data directory must contain the ` ` config . json ` `
manifest file and the ` ` rootfs ` ` directory .
gzip ( bool ) : Compress the context using gzip . Default : False
Returns :
` ` True ` ` if successful""" | url = self . _url ( '/plugins/create' )
with utils . create_archive ( root = plugin_data_dir , gzip = gzip , files = set ( utils . build . walk ( plugin_data_dir , [ ] ) ) ) as archv :
res = self . _post ( url , params = { 'name' : name } , data = archv )
self . _raise_for_status ( res )
return True |
def load_arguments ( self , command ) :
"""Load the arguments for the specified command
: param command : The command to load arguments for
: type command : str""" | from knack . arguments import ArgumentsContext
self . cli_ctx . raise_event ( EVENT_CMDLOADER_LOAD_ARGUMENTS , cmd_tbl = self . command_table , command = command )
try :
self . command_table [ command ] . load_arguments ( )
except KeyError :
return
# ensure global ' cmd ' is ignored
with ArgumentsContext ( self , '' ) as c :
c . ignore ( 'cmd' )
self . _apply_parameter_info ( command , self . command_table [ command ] ) |
def _get_facet_chempots ( self , facet ) :
"""Calculates the chemical potentials for each element within a facet .
Args :
facet : Facet of the phase diagram .
Returns :
{ element : chempot } for all elements in the phase diagram .""" | complist = [ self . qhull_entries [ i ] . composition for i in facet ]
energylist = [ self . qhull_entries [ i ] . energy_per_atom for i in facet ]
m = [ [ c . get_atomic_fraction ( e ) for e in self . elements ] for c in complist ]
chempots = np . linalg . solve ( m , energylist )
return dict ( zip ( self . elements , chempots ) ) |
def is_collection ( item ) :
"""Returns True if the item is a collection class : list , tuple , set , frozenset
or any other class that resembles one of these ( using abstract base classes ) .
> > > is _ collection ( 0)
False
> > > is _ collection ( 0.1)
False
> > > is _ collection ( ' ' )
False
> > > is _ collection ( { } )
False
> > > is _ collection ( { } . keys ( ) )
True
> > > is _ collection ( [ ] )
True
> > > is _ collection ( ( ) )
True
> > > is _ collection ( set ( ) )
True
> > > is _ collection ( frozenset ( ) )
True
> > > from coaster . utils import InspectableSet
> > > is _ collection ( InspectableSet ( { 1 , 2 } ) )
True""" | return not isinstance ( item , six . string_types ) and isinstance ( item , ( collections . Set , collections . Sequence ) ) |
def reset_ttl ( self , other ) :
"""Sets this record ' s TTL and created time to that of
another record .""" | self . created = other . created
self . ttl = other . ttl |
def event_actions ( self ) :
"""Take actions for timed events
Returns
None""" | system = self . system
dae = system . dae
if self . switch :
system . Breaker . apply ( self . t )
for item in system . check_event ( self . t ) :
system . __dict__ [ item ] . apply ( self . t )
dae . rebuild = True
self . switch = False |
def get_release_info ( self , name , version ) : # type : ( str , str ) - > dict
"""Return the release information given a package name and a version .
The information is returned from the cache if it exists
or retrieved from the remote server .""" | if self . _disable_cache :
return self . _get_release_info ( name , version )
cached = self . _cache . remember_forever ( "{}:{}" . format ( name , version ) , lambda : self . _get_release_info ( name , version ) )
cache_version = cached . get ( "_cache_version" , "0.0.0" )
if parse_constraint ( cache_version ) != self . CACHE_VERSION : # The cache must be updated
self . _log ( "The cache for {} {} is outdated. Refreshing." . format ( name , version ) , level = "debug" , )
cached = self . _get_release_info ( name , version )
self . _cache . forever ( "{}:{}" . format ( name , version ) , cached )
return cached |
def get_symbol ( units ) -> str :
"""Get default symbol type .
Parameters
units _ str : string
Units .
Returns
string
LaTeX formatted symbol .""" | if kind ( units ) == "energy" :
d = { }
d [ "nm" ] = r"\lambda"
d [ "wn" ] = r"\bar\nu"
d [ "eV" ] = r"\hslash\omega"
d [ "Hz" ] = r"f"
d [ "THz" ] = r"f"
d [ "GHz" ] = r"f"
return d . get ( units , "E" )
elif kind ( units ) == "delay" :
return r"\tau"
elif kind ( units ) == "fluence" :
return r"\mathcal{F}"
elif kind ( units ) == "pulse_width" :
return r"\sigma"
elif kind ( units ) == "temperature" :
return r"T"
else :
return kind ( units ) |
def remove_user_from_group ( uid , gid ) :
"""Removes a user from a group within DCOS Enterprise .
: param uid : user id
: type uid : str
: param gid : group id
: type gid : str""" | acl_url = urljoin ( _acl_url ( ) , 'groups/{}/users/{}' . format ( gid , uid ) )
try :
r = http . delete ( acl_url )
assert r . status_code == 204
except dcos . errors . DCOSBadRequest :
pass |
def directive ( apply_globally = False , api = None ) :
"""A decorator that registers a single hug directive""" | def decorator ( directive_method ) :
if apply_globally :
hug . defaults . directives [ underscore ( directive_method . __name__ ) ] = directive_method
else :
apply_to_api = hug . API ( api ) if api else hug . api . from_object ( directive_method )
apply_to_api . add_directive ( directive_method )
directive_method . directive = True
return directive_method
return decorator |
def make_assignment ( instr , queue , stack ) :
"""Make an ast . Assign node .""" | value = make_expr ( stack )
# Make assignment targets .
# If there are multiple assignments ( e . g . ' a = b = c ' ) ,
# each LHS expression except the last is preceded by a DUP _ TOP instruction .
# Thus , we make targets until we don ' t see a DUP _ TOP , and then make one
# more .
targets = [ ]
while isinstance ( instr , instrs . DUP_TOP ) :
targets . append ( make_assign_target ( queue . popleft ( ) , queue , stack ) )
instr = queue . popleft ( )
targets . append ( make_assign_target ( instr , queue , stack ) )
return ast . Assign ( targets = targets , value = value ) |
def get_property_chain_axioms ( self , nid ) :
"""Retrieves property chain axioms for a class id
Arguments
nid : str
Node identifier for relation to be queried
Returns
PropertyChainAxiom""" | pcas = self . all_property_chain_axioms
if pcas is not None :
return [ x for x in pcas if x . predicate_id == nid ]
else :
return [ ] |
def drawrectangle ( self , xa , xb , ya , yb , colour = None , label = None ) :
"""Draws a 1 - pixel wide frame AROUND the region you specify . Same convention as for crop ( ) .""" | self . checkforpilimage ( )
colour = self . defaultcolour ( colour )
self . changecolourmode ( colour )
self . makedraw ( )
( pilxa , pilya ) = self . pilcoords ( ( xa , ya ) )
( pilxb , pilyb ) = self . pilcoords ( ( xb , yb ) )
self . draw . rectangle ( [ ( pilxa , pilyb - 1 ) , ( pilxb + 1 , pilya ) ] , outline = colour )
if label != None : # The we write it :
self . loadlabelfont ( )
textwidth = self . draw . textsize ( label , font = self . labelfont ) [ 0 ]
self . draw . text ( ( ( pilxa + pilxb ) / 2.0 - float ( textwidth ) / 2.0 + 1 , pilya + 2 ) , label , fill = colour , font = self . labelfont ) |
def batchget ( self , agent_id , media_type , offset = 0 , count = 20 ) :
"""批量获取永久素材列表
详情请参考
https : / / qydev . weixin . qq . com / wiki / index . php ? title = % E8%8E % B7 % E5%8F % 96 % E7 % B4 % A0 % E6%9D % 90 % E5%88%97 % E8 % A1 % A8
: param agent _ id : 企业应用的id
: param media _ type : 媒体文件类型 , 分别有图文 ( mpnews ) 、 图片 ( image ) 、
语音 ( voice ) 、 视频 ( video ) 和文件 ( file )
: param offset : 从全部素材的该偏移位置开始返回 , 0 表示从第一个素材返回
: param count : 返回素材的数量 , 取值在1到20之间
: return : 返回的 JSON 数据包""" | return self . _post ( 'material/batchget' , data = { 'agent_id' : agent_id , 'type' : media_type , 'offset' : offset , 'count' : count } ) |
def _ReadFlowResponseCounts ( self , request_keys , cursor = None ) :
"""Reads counts of responses for the given requests .""" | query = """
SELECT
flow_requests.client_id, flow_requests.flow_id,
flow_requests.request_id, COUNT(*)
FROM flow_responses, flow_requests
WHERE ({conditions}) AND
flow_requests.client_id = flow_responses.client_id AND
flow_requests.flow_id = flow_responses.flow_id AND
flow_requests.request_id = flow_responses.request_id AND
flow_requests.needs_processing = FALSE
GROUP BY
flow_requests.client_id,
flow_requests.flow_id,
flow_requests.request_id
"""
condition_template = """
(flow_requests.client_id=%s AND
flow_requests.flow_id=%s AND
flow_requests.request_id=%s)"""
conditions = [ condition_template ] * len ( request_keys )
args = [ ]
for client_id , flow_id , request_id in request_keys :
args . append ( db_utils . ClientIDToInt ( client_id ) )
args . append ( db_utils . FlowIDToInt ( flow_id ) )
args . append ( request_id )
query = query . format ( conditions = " OR " . join ( conditions ) )
cursor . execute ( query , args )
response_counts = { }
for ( client_id_int , flow_id_int , request_id , count ) in cursor . fetchall ( ) :
request_key = ( db_utils . IntToClientID ( client_id_int ) , db_utils . IntToFlowID ( flow_id_int ) , request_id )
response_counts [ request_key ] = count
return response_counts |
def default_depart ( self , mdnode ) :
"""Default node depart handler
If there is a matching ` ` visit _ < type > ` ` method for a container node ,
then we should make sure to back up to it ' s parent element when the node
is exited .""" | if mdnode . is_container ( ) :
fn_name = 'visit_{0}' . format ( mdnode . t )
if not hasattr ( self , fn_name ) :
warn ( "Container node skipped: type={0}" . format ( mdnode . t ) )
else :
self . current_node = self . current_node . parent |
def prepare_initial ( self , X ) :
"""Prepare the initial embedding which can be optimized as needed .
Parameters
X : np . ndarray
The data matrix to be embedded .
Returns
TSNEEmbedding
An unoptimized : class : ` TSNEEmbedding ` object , prepared for
optimization .""" | # If initial positions are given in an array , use a copy of that
if isinstance ( self . initialization , np . ndarray ) :
init_checks . num_samples ( self . initialization . shape [ 0 ] , X . shape [ 0 ] )
init_checks . num_dimensions ( self . initialization . shape [ 1 ] , self . n_components )
embedding = np . array ( self . initialization )
variance = np . var ( embedding , axis = 0 )
if any ( variance > 1e-4 ) :
log . warning ( "Variance of embedding is greater than 0.0001. Initial " "embeddings with high variance may have display poor convergence." )
elif self . initialization == "pca" :
embedding = initialization_scheme . pca ( X , self . n_components , random_state = self . random_state )
elif self . initialization == "random" :
embedding = initialization_scheme . random ( X , self . n_components , random_state = self . random_state )
else :
raise ValueError ( f"Unrecognized initialization scheme `{self.initialization}`." )
affinities = PerplexityBasedNN ( X , self . perplexity , method = self . neighbors_method , metric = self . metric , metric_params = self . metric_params , n_jobs = self . n_jobs , random_state = self . random_state , )
gradient_descent_params = { # Degrees of freedom of the Student ' s t - distribution . The
# suggestion degrees _ of _ freedom = n _ components - 1 comes from [ 3 ] _ .
"dof" : max ( self . n_components - 1 , 1 ) , "negative_gradient_method" : self . negative_gradient_method , "learning_rate" : self . learning_rate , # By default , use the momentum used in unexaggerated phase
"momentum" : self . final_momentum , # Barnes - Hut params
"theta" : self . theta , # Interpolation params
"n_interpolation_points" : self . n_interpolation_points , "min_num_intervals" : self . min_num_intervals , "ints_in_interval" : self . ints_in_interval , "n_jobs" : self . n_jobs , # Callback params
"callbacks" : self . callbacks , "callbacks_every_iters" : self . callbacks_every_iters , }
return TSNEEmbedding ( embedding , affinities = affinities , random_state = self . random_state , ** gradient_descent_params , ) |
def param_mean ( ns_run , logw = None , simulate = False , param_ind = 0 , handle_indexerror = False ) :
"""Mean of a single parameter ( single component of theta ) .
Parameters
ns _ run : dict
Nested sampling run dict ( see the data _ processing module
docstring for more details ) .
logw : None or 1d numpy array , optional
Log weights of samples .
simulate : bool , optional
Passed to ns _ run _ utils . get _ logw if logw needs to be
calculated .
param _ ind : int , optional
Index of parameter for which the mean should be calculated . This
corresponds to the column of ns _ run [ ' theta ' ] which contains the
parameter .
handle _ indexerror : bool , optional
Make the function function return nan rather than raising an
IndexError if param _ ind > = ndim . This is useful when applying
the same list of estimators to data sets of different dimensions .
Returns
float""" | if logw is None :
logw = nestcheck . ns_run_utils . get_logw ( ns_run , simulate = simulate )
w_relative = np . exp ( logw - logw . max ( ) )
try :
return ( np . sum ( w_relative * ns_run [ 'theta' ] [ : , param_ind ] ) / np . sum ( w_relative ) )
except IndexError :
if handle_indexerror :
return np . nan
else :
raise |
def update ( self ) :
"""Handle update events on bokeh server .""" | if not self . _queue :
return
dim , widget_type , attr , old , new = self . _queue [ - 1 ]
self . _queue = [ ]
dim_label = dim . pprint_label
label , widget = self . widgets [ dim_label ]
if widget_type == 'label' :
if isinstance ( label , AutocompleteInput ) :
value = [ new ]
widget . value = value
else :
widget . value = float ( new )
elif label :
lookups = self . lookups . get ( dim_label )
if not self . editable :
if lookups :
new = lookups [ widget . value ] [ 1 ]
label . text = '<b>%s</b>' % dim . pprint_value_string ( new )
elif isinstance ( label , AutocompleteInput ) :
text = lookups [ new ] [ 1 ]
label . value = text
else :
label . value = dim . pprint_value ( new )
key = [ ]
for dim , ( label , widget ) in self . widgets . items ( ) :
lookups = self . lookups . get ( dim )
if label and lookups :
val = lookups [ widget . value ] [ 0 ]
else :
val = widget . value
key . append ( val )
key = wrap_tuple_streams ( tuple ( key ) , self . plot . dimensions , self . plot . streams )
self . plot . update ( key )
self . _active = False |
def cart2polar ( x , y , center = np . array ( [ 0 , 0 ] ) ) :
"""transforms cartesian coords [ x , y ] into polar coords [ r , phi ] in the frame of the lense center
: param coord : set of coordinates
: type coord : array of size ( n , 2)
: param center : rotation point
: type center : array of size ( 2)
: returns : array of same size with coords [ r , phi ]
: raises : AttributeError , KeyError""" | coordShift_x = x - center [ 0 ]
coordShift_y = y - center [ 1 ]
r = np . sqrt ( coordShift_x ** 2 + coordShift_y ** 2 )
phi = np . arctan2 ( coordShift_y , coordShift_x )
return r , phi |
def certify_iterable_schema ( value , schema = None , required = True ) :
"""Certify an iterable against a schema .
: param iterable value :
The iterable to certify against the schema .
: param iterable schema :
The schema to use
: param bool required :
Whether the value can ' t be ` None ` . Defaults to True .
: return :
The validated iterable .
: rtype :
iterable""" | if schema is not None :
if len ( schema ) != len ( value ) :
raise CertifierValueError ( "encountered {extra} extra items" . format ( extra = len ( value ) - len ( schema ) ) , value = value , required = required , )
for index , certifier in enumerate ( schema ) :
try :
certifier ( value = value [ index ] )
except CertifierError as exc :
six . raise_from ( CertifierValueError ( message = "invalid value {value!r} for item {index}" . format ( index = index , value = value [ index ] ) , value = value , required = required , ) , exc , ) |
def POST_AUTH ( self ) : # pylint : disable = arguments - differ
"""POST request""" | msg = ""
error = False
user_data = self . database . users . find_one ( { "username" : self . user_manager . session_username ( ) } )
if not user_data :
raise web . notfound ( )
user_input = web . input ( )
auth_methods = self . user_manager . get_auth_methods ( )
if "auth_binding" in user_input :
auth_binding = user_input [ "auth_binding" ]
if auth_binding not in auth_methods . keys ( ) :
error = True
msg = _ ( "Incorrect authentication binding." )
elif auth_binding not in user_data . get ( "bindings" , { } ) :
raise web . seeother ( "/auth/signin/" + auth_binding )
elif "revoke_auth_binding" in user_input :
auth_id = user_input [ "revoke_auth_binding" ]
if auth_id not in auth_methods . keys ( ) :
error = True
msg = _ ( "Incorrect authentication binding." )
elif len ( user_data . get ( "bindings" , { } ) . keys ( ) ) > 1 or "password" in user_data :
user_data = self . database . users . find_one_and_update ( { "username" : self . user_manager . session_username ( ) } , { "$unset" : { "bindings." + auth_id : 1 } } )
else :
error = True
msg = _ ( "You must set a password before removing all bindings." )
bindings = user_data . get ( "bindings" , { } )
return self . template_helper . get_renderer ( ) . preferences . bindings ( bindings , auth_methods , msg , error ) |
def use_npm_ci ( path ) :
"""Return true if npm ci should be used in lieu of npm install .""" | # https : / / docs . npmjs . com / cli / ci # description
with open ( os . devnull , 'w' ) as fnull :
if ( ( os . path . isfile ( os . path . join ( path , 'package-lock.json' ) ) or os . path . isfile ( os . path . join ( path , 'npm-shrinkwrap.json' ) ) ) and subprocess . call ( [ NPM_BIN , 'ci' , '-h' ] , stdout = fnull , stderr = subprocess . STDOUT ) == 0 ) :
return True
return False |
def add_special_file ( self , mask , path , from_quick_server , ctype = None ) :
"""Adds a special file that might have a different actual path than
its address .
Parameters
mask : string
The URL that must be matched to perform this request .
path : string
The actual file path .
from _ quick _ server : bool
If set the file path is relative to * this * script otherwise it is
relative to the process .
ctype : string
Optional content type .""" | full_path = path if not from_quick_server else os . path . join ( os . path . dirname ( __file__ ) , path )
def read_file ( _req , _args ) :
with open ( full_path , 'rb' ) as f_out :
return Response ( f_out . read ( ) , ctype = ctype )
self . add_text_get_mask ( mask , read_file )
self . set_file_argc ( mask , 0 ) |
def _spawn ( self , func , * args , ** kwargs ) :
"""Spawn a handler function .
Spawns the supplied ` ` func ` ` with ` ` * args ` ` and ` ` * * kwargs ` `
as a gevent greenlet .
: param func : A callable to call .
: param args : Arguments to ` ` func ` ` .
: param kwargs : Keyword arguments to ` ` func ` ` .""" | gevent . spawn ( func , * args , ** kwargs ) |
def setup ( self ) :
"""Set up filesystem in user space for http and https
so that we can retrieve tiles from remote sources .
Parameters
tmp _ dir : string
The temporary directory where to create the
http and https directories""" | from simple_httpfs import HttpFs
if not op . exists ( self . http_directory ) :
os . makedirs ( self . http_directory )
if not op . exists ( self . https_directory ) :
os . makedirs ( self . https_directory )
if not op . exists ( self . diskcache_directory ) :
os . makedirs ( self . diskcache_directory )
self . teardown ( )
disk_cache_size = 2 ** 25
disk_cache_dir = self . diskcache_directory
lru_capacity = 400
print ( "self.diskcache_directory" , self . diskcache_directory , op . exists ( self . diskcache_directory ) , )
def start_fuse ( directory , protocol ) :
print ( "starting fuse" )
fuse = FUSE ( HttpFs ( protocol , disk_cache_size = disk_cache_size , disk_cache_dir = self . diskcache_directory , lru_capacity = lru_capacity , ) , directory , foreground = False , allow_other = True )
proc1 = mp . Process ( target = start_fuse , args = [ self . http_directory , 'http' ] )
proc1 . start ( )
proc1 . join ( )
proc2 = mp . Process ( target = start_fuse , args = [ self . https_directory , 'https' ] )
proc2 . start ( )
proc2 . join ( ) |
def start_scan ( self , active ) :
"""Start the scanning task""" | self . _command_task . sync_command ( [ '_start_scan' , active ] )
self . scanning = True |
def page ( request ) :
"""Adds the current page to the template context and runs its
` ` set _ helper ` ` method . This was previously part of
` ` PageMiddleware ` ` , but moved to a context processor so that
we could assign these template context variables without
the middleware depending on Django ' s ` ` TemplateResponse ` ` .""" | context = { }
page = getattr ( request , "page" , None )
if isinstance ( page , Page ) : # set _ helpers has always expected the current template context ,
# but here we ' re just passing in our context dict with enough
# variables to satisfy it .
context = { "request" : request , "page" : page , "_current_page" : page }
page . set_helpers ( context )
return context |
def add_var_opt ( self , opt , value , short = False ) :
"""Add a variable ( macro ) option for this node . If the option
specified does not exist in the CondorJob , it is added so the submit
file will be correct when written .
@ param opt : option name .
@ param value : value of the option for this node in the DAG .""" | macro = self . __bad_macro_chars . sub ( r'' , opt )
self . __opts [ 'macro' + macro ] = value
self . __job . add_var_opt ( opt , short ) |
def __extract_model_summary_value ( model , value ) :
"""Extract a model summary field value""" | field_value = None
if isinstance ( value , _precomputed_field ) :
field_value = value . field
else :
field_value = model . _get ( value )
if isinstance ( field_value , float ) :
try :
field_value = round ( field_value , 4 )
except :
pass
return field_value |
def recurse ( self , fn , * args ) :
"""Calls fn on a hypercat and all its child hypercats ( not resources )""" | fn ( self , * args )
for i in self . items :
if isinstance ( i , Hypercat ) :
self . recurse ( i , * args ) |
def _parse_apps_to_ignore ( self ) :
"""Parse the applications to ignore in the config .
Returns :
set""" | # We ignore nothing by default
apps_to_ignore = set ( )
# Is the " [ applications _ to _ ignore ] " in the cfg file ?
section_title = 'applications_to_ignore'
if self . _parser . has_section ( section_title ) :
apps_to_ignore = set ( self . _parser . options ( section_title ) )
return apps_to_ignore |
def encrypt ( self , msg ) :
"""encrypts a message""" | iv = self . random_bytes ( AES . block_size )
ctr = Counter . new ( AES . block_size * 8 , initial_value = self . bin2long ( iv ) )
cipher = AES . AESCipher ( self . _cipherkey , AES . MODE_CTR , counter = ctr )
cipher_text = cipher . encrypt ( msg )
intermediate = iv + cipher_text
signature = self . sign ( intermediate )
return signature + intermediate |
def _reset ( self ) -> None :
"""Reset some of the state in the class for multi - searches .""" | self . project : str = namesgenerator . get_random_name ( )
self . _processed : List = list ( )
self . results : List = list ( ) |
def transfer ( self , volume , source , dest , ** kwargs ) :
"""Transfer will move a volume of liquid from a source location ( s )
to a dest location ( s ) . It is a higher - level command , incorporating
other : any : ` Pipette ` commands , like : any : ` aspirate ` and
: any : ` dispense ` , designed to make protocol writing easier at the
cost of specificity .
Parameters
volumes : number , list , or tuple
The amount of volume to remove from each ` sources ` : any : ` Placeable `
and add to each ` targets ` : any : ` Placeable ` . If ` volumes ` is a list ,
each volume will be used for the sources / targets at the
matching index . If ` volumes ` is a tuple with two elements ,
like ` ( 20 , 100 ) ` , then a list of volumes will be generated with
a linear gradient between the two volumes in the tuple .
source : Placeable or list
Single : any : ` Placeable ` or list of : any : ` Placeable ` s , from where
liquid will be : any : ` aspirate ` ed from .
dest : Placeable or list
Single : any : ` Placeable ` or list of : any : ` Placeable ` s , where
liquid will be : any : ` dispense ` ed to .
new _ tip : str
The number of clean tips this transfer command will use . If
' never ' , no tips will be picked up nor dropped . If ' once ' , a
single tip will be used for all commands . If ' always ' , a new tip
will be used for each transfer . Default is ' once ' .
trash : boolean
If ` False ` ( default behavior ) tips will be returned to their
tip rack . If ` True ` and a trash container has been attached
to this ` Pipette ` , then the tip will be sent to the trash
container .
touch _ tip : boolean
If ` True ` , a : any : ` touch _ tip ` will occur following each
: any : ` aspirate ` and : any : ` dispense ` . If set to ` False ` ( default ) ,
no : any : ` touch _ tip ` will occur .
blow _ out : boolean
If ` True ` , a : any : ` blow _ out ` will occur following each
: any : ` dispense ` , but only if the pipette has no liquid left in it .
If set to ` False ` ( default ) , no : any : ` blow _ out ` will occur .
mix _ before : tuple
Specify the number of repetitions volume to mix , and a : any : ` mix `
will proceed each : any : ` aspirate ` during the transfer and dispense .
The tuple ' s values is interpreted as ( repetitions , volume ) .
mix _ after : tuple
Specify the number of repetitions volume to mix , and a : any : ` mix `
will following each : any : ` dispense ` during the transfer or
consolidate . The tuple ' s values is interpreted as
( repetitions , volume ) .
carryover : boolean
If ` True ` ( default ) , any ` volumes ` that exceed the maximum volume
of this ` Pipette ` will be split into multiple smaller volumes .
repeat : boolean
( Only applicable to : any : ` distribute ` and : any : ` consolidate ` ) If
` True ` ( default ) , sequential : any : ` aspirate ` volumes will be
combined into one tip for the purpose of saving time . If ` False ` ,
all volumes will be transferred seperately .
gradient : lambda
Function for calculated the curve used for gradient volumes .
When ` volumes ` is a tuple of length 2 , it ' s values are used
to create a list of gradient volumes . The default curve for
this gradient is linear ( lambda x : x ) , however a method can
be passed with the ` gradient ` keyword argument to create a
custom curve .
Returns
This instance of : class : ` Pipette ` .
Examples
> > > from opentrons import instruments , labware , robot # doctest : + SKIP
> > > robot . reset ( ) # doctest : + SKIP
> > > plate = labware . load ( ' 96 - flat ' , ' 5 ' ) # doctest : + SKIP
> > > p300 = instruments . P300 _ Single ( mount = ' right ' ) # doctest : + SKIP
> > > p300 . transfer ( 50 , plate [ 0 ] , plate [ 1 ] ) # doctest : + SKIP""" | # Note : currently it varies whether the pipette should have a tip on
# or not depending on the parameters for this call , so we cannot
# create a very reliable assertion on tip status
kwargs [ 'mode' ] = kwargs . get ( 'mode' , 'transfer' )
touch_tip = kwargs . get ( 'touch_tip' , False )
if touch_tip is True :
touch_tip = - 1
kwargs [ 'touch_tip' ] = touch_tip
tip_options = { 'once' : 1 , 'never' : 0 , 'always' : float ( 'inf' ) }
tip_option = kwargs . get ( 'new_tip' , 'once' )
tips = tip_options . get ( tip_option )
if tips is None :
raise ValueError ( 'Unknown "new_tip" option: {}' . format ( tip_option ) )
plan = self . _create_transfer_plan ( volume , source , dest , ** kwargs )
self . _run_transfer_plan ( tips , plan , ** kwargs )
return self |
def compute ( self , inputVector , learn , activeArray , burstingColumns , predictedCells ) :
"""This is the primary public method of the class . This function takes an input
vector and outputs the indices of the active columns .
New parameters defined here :
@ param inputVector : The active cells from a Temporal Memory
@ param learn : A Boolean specifying whether learning will be
performed
@ param activeArray : An array representing the active columns
produced by this method
@ param burstingColumns : A numpy array with numColumns elements having
binary values with 1 representing a
currently bursting column in Temporal Memory .
@ param predictedCells : A numpy array with numInputs elements . A 1
indicates that this cell switching from
predicted state in the previous time step to
active state in the current timestep""" | assert ( numpy . size ( inputVector ) == self . _numInputs )
assert ( numpy . size ( predictedCells ) == self . _numInputs )
self . _updateBookeepingVars ( learn )
inputVector = numpy . array ( inputVector , dtype = realDType )
predictedCells = numpy . array ( predictedCells , dtype = realDType )
inputVector . reshape ( - 1 )
if self . _spVerbosity > 3 :
print " Input bits: " , inputVector . nonzero ( ) [ 0 ]
print " predictedCells: " , predictedCells . nonzero ( ) [ 0 ]
# Phase 1 : Calculate overlap scores
# The overlap score has 4 components :
# (1 ) Overlap between correctly predicted input cells and pooling TP cells
# (2 ) Overlap between active input cells and all TP cells
# ( like standard SP calculation )
# (3 ) Overlap between correctly predicted input cells and all TP cells
# (4 ) Overlap from bursting columns in TM and all TP cells
# 1 ) Calculate pooling overlap
if self . usePoolingRule :
overlapsPooling = self . _calculatePoolingActivity ( predictedCells , learn )
if self . _spVerbosity > 4 :
print "usePoolingRule: Overlaps after step 1:"
print " " , overlapsPooling
else :
overlapsPooling = 0
# 2 ) Calculate overlap between active input cells and connected synapses
overlapsAllInput = self . _calculateOverlap ( inputVector )
# 3 ) overlap with predicted inputs
# NEW : Isn ' t this redundant with 1 and 2 ) ? This looks at connected synapses
# only .
# If 1 ) is called with learning = False connected synapses are used and
# it is somewhat redundant although there is a boosting factor in 1 ) which
# makes 1 ' s effect stronger . If 1 ) is called with learning = True it ' s less
# redundant
overlapsPredicted = self . _calculateOverlap ( predictedCells )
if self . _spVerbosity > 4 :
print "Overlaps with all inputs:"
print " Number of On Bits: " , inputVector . sum ( )
print " " , overlapsAllInput
print "Overlaps with predicted inputs:"
print " " , overlapsPredicted
# 4 ) consider bursting columns
if self . useBurstingRule :
overlapsBursting = self . _calculateBurstingColumns ( burstingColumns )
if self . _spVerbosity > 4 :
print "Overlaps with bursting inputs:"
print " " , overlapsBursting
else :
overlapsBursting = 0
overlaps = ( overlapsPooling + overlapsPredicted + overlapsAllInput + overlapsBursting )
# Apply boosting when learning is on
if learn :
boostedOverlaps = self . _boostFactors * overlaps
if self . _spVerbosity > 4 :
print "Overlaps after boosting:"
print " " , boostedOverlaps
else :
boostedOverlaps = overlaps
# Apply inhibition to determine the winning columns
activeColumns = self . _inhibitColumns ( boostedOverlaps )
if learn :
self . _adaptSynapses ( inputVector , activeColumns , predictedCells )
self . _updateDutyCycles ( overlaps , activeColumns )
self . _bumpUpWeakColumns ( )
self . _updateBoostFactors ( )
if self . _isUpdateRound ( ) :
self . _updateInhibitionRadius ( )
self . _updateMinDutyCycles ( )
activeArray . fill ( 0 )
if activeColumns . size > 0 :
activeArray [ activeColumns ] = 1
# update pooling state of cells
activeColumnIndices = numpy . where ( overlapsPredicted [ activeColumns ] > 0 ) [ 0 ]
activeColWithPredictedInput = activeColumns [ activeColumnIndices ]
numUnPredictedInput = float ( len ( burstingColumns . nonzero ( ) [ 0 ] ) )
numPredictedInput = float ( len ( predictedCells ) )
fracUnPredicted = numUnPredictedInput / ( numUnPredictedInput + numPredictedInput )
self . _updatePoolingState ( activeColWithPredictedInput , fracUnPredicted )
if self . _spVerbosity > 2 :
activeColumns . sort ( )
print "The following columns are finally active:"
print " " , activeColumns
print "The following columns are in pooling state:"
print " " , self . _poolingActivation . nonzero ( ) [ 0 ]
# print " Inputs to pooling columns "
# print " " , overlapsPredicted [ self . _ poolingColumns ]
return activeColumns |
def get_bigram_pair_string ( self , text ) :
"""Return a string of text containing part - of - speech , lemma pairs .""" | bigram_pairs = [ ]
if len ( text ) <= 2 :
text_without_punctuation = text . translate ( self . punctuation_table )
if len ( text_without_punctuation ) >= 1 :
text = text_without_punctuation
document = self . nlp ( text )
if len ( text ) <= 2 :
bigram_pairs = [ token . lemma_ . lower ( ) for token in document ]
else :
tokens = [ token for token in document if token . is_alpha and not token . is_stop ]
if len ( tokens ) < 2 :
tokens = [ token for token in document if token . is_alpha ]
for index in range ( 1 , len ( tokens ) ) :
bigram_pairs . append ( '{}:{}' . format ( tokens [ index - 1 ] . pos_ , tokens [ index ] . lemma_ . lower ( ) ) )
if not bigram_pairs :
bigram_pairs = [ token . lemma_ . lower ( ) for token in document ]
return ' ' . join ( bigram_pairs ) |
def format_prompt ( prompt = None , default = None , enable_quit = False , quit_string = 'q' , quit_message = '(enter q to Quit)' ) :
"""Format the prompt .
: param prompt : the prompt message .
: param default : the default answer if user does not provide a response .
: param enable _ quit : specifies whether the user can cancel out of the input prompt .
: param quit _ string : the string whcih the user must input in order to quit .
: param quit _ message : the message to explain how to quit .
: return : the formatted prompt string .""" | if prompt is None :
return None
prompt = prompt . rstrip ( )
prompt = prompt . rstrip ( ':' )
if enable_quit :
prompt = "{0} {1}" . format ( prompt , quit_message )
if default :
prompt = "{0} [{1}]" . format ( prompt , default )
return "{0}: " . format ( prompt ) |
def sni_certs ( self ) :
""". . versionadded : : 2.2.0
: return : Return a tuple of : py : class : ` ~ . SSLSNICertificate ` instances for each of the certificates that are configured .
: rtype : tuple""" | if not g_ssl_has_server_sni or self . _ssl_sni_entries is None :
return tuple ( )
return tuple ( entry . certificate for entry in self . _ssl_sni_entries . values ( ) ) |
def tag_labels ( self ) :
"""Tag named entity labels in the ` ` words ` ` layer .""" | if not self . is_tagged ( ANALYSIS ) :
self . tag_analysis ( )
if self . __ner_tagger is None :
self . __ner_tagger = load_default_ner_tagger ( )
self . __ner_tagger . tag_document ( self )
return self |
def set_azure_secret_access_key ( config_fpath , container , az_secret_access_key ) :
"""Write the ECS access key id to the dtool config file .
: param config _ fpath : path to the dtool config file
: param container : azure storage container name
: param az _ secret _ access _ key : azure secret access key for the container""" | key = AZURE_KEY_PREFIX + container
return write_config_value_to_file ( key , az_secret_access_key , config_fpath ) |
def symmetric_difference ( self , other ) :
"""Constructs an unminimized DFA recognizing
the symmetric difference of the languages of two given DFAs .
Args :
other ( DFA ) : The other DFA that will be used
for the symmetric difference operation
Returns :
DFA : The resulting DFA""" | operation = bool . __xor__
self . cross_product ( other , operation )
return self |
def _save_potentials ( self , directory ) :
"""save potentials to a directory""" | print ( 'saving potentials' )
digits = int ( np . ceil ( np . log10 ( self . configs . configs . shape [ 0 ] ) ) )
for i in range ( 0 , self . configs . configs . shape [ 0 ] ) :
pot_data = self . get_potential ( i )
filename_raw = 'pot{0:0' + '{0}' . format ( digits ) + '}.dat'
filename = directory + os . sep + filename_raw . format ( i + 1 )
nodes = self . grid . nodes [ 'sorted' ] [ : , 1 : 3 ]
all_data = np . hstack ( ( nodes , pot_data [ 0 ] [ : , np . newaxis ] , pot_data [ 1 ] [ : , np . newaxis ] , ) )
with open ( filename , 'wb' ) as fid :
np . savetxt ( fid , all_data ) |
def safe_mkdir_for ( path , clean = False ) :
"""Ensure that the parent directory for a file is present .
If it ' s not there , create it . If it is , no - op .""" | safe_mkdir ( os . path . dirname ( path ) , clean = clean ) |
def get_agent_queues_by_ids ( self , queue_ids , project = None , action_filter = None ) :
"""GetAgentQueuesByIds .
[ Preview API ] Get a list of agent queues by their IDs
: param [ int ] queue _ ids : A comma - separated list of agent queue IDs to retrieve
: param str project : Project ID or project name
: param str action _ filter : Filter by whether the calling user has use or manage permissions
: rtype : [ TaskAgentQueue ]""" | route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
query_parameters = { }
if queue_ids is not None :
queue_ids = "," . join ( map ( str , queue_ids ) )
query_parameters [ 'queueIds' ] = self . _serialize . query ( 'queue_ids' , queue_ids , 'str' )
if action_filter is not None :
query_parameters [ 'actionFilter' ] = self . _serialize . query ( 'action_filter' , action_filter , 'str' )
response = self . _send ( http_method = 'GET' , location_id = '900fa995-c559-4923-aae7-f8424fe4fbea' , version = '5.1-preview.1' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( '[TaskAgentQueue]' , self . _unwrap_collection ( response ) ) |
def linked_parameters ( self ) :
"""Get a dictionary with all parameters in this model in a linked status . A parameter is in a linked status
if it is linked to another parameter ( i . e . it is forced to have the same value of the other parameter ) , or
if it is linked with another parameter or an independent variable through a law .
: return : dictionary of linked parameters""" | # Refresh the list
self . _update_parameters ( )
# Filter selecting only free parameters
linked_parameter_dictionary = collections . OrderedDict ( )
for parameter_name , parameter in self . _parameters . iteritems ( ) :
if parameter . has_auxiliary_variable ( ) :
linked_parameter_dictionary [ parameter_name ] = parameter
return linked_parameter_dictionary |
def relpath ( self , current_file , rel_path ) :
"""Compute path given current file and relative path .""" | script_dir = os . path . dirname ( os . path . abspath ( current_file ) )
rel_path = os . path . abspath ( os . path . join ( script_dir , rel_path ) )
return rel_path |
def get_aes_mode ( mode ) :
"""Return pycrypto ' s AES mode , raise exception if not supported""" | aes_mode_attr = "MODE_{}" . format ( mode . upper ( ) )
try :
aes_mode = getattr ( AES , aes_mode_attr )
except AttributeError :
raise Exception ( "Pycrypto/pycryptodome does not seem to support {}. " . format ( aes_mode_attr ) + "If you use pycrypto, you need a version >= 2.7a1 (or a special branch)." )
return aes_mode |
def colored_line ( x , y , c , ** kwargs ) :
"""Create a multi - colored line .
Takes a set of points and turns them into a collection of lines colored by another array .
Parameters
x : array - like
x - axis coordinates
y : array - like
y - axis coordinates
c : array - like
values used for color - mapping
kwargs : dict
Other keyword arguments passed to : class : ` matplotlib . collections . LineCollection `
Returns
The created : class : ` matplotlib . collections . LineCollection ` instance .""" | # Mask out any NaN values
nan_mask = ~ ( np . isnan ( x ) | np . isnan ( y ) | np . isnan ( c ) )
x = x [ nan_mask ]
y = y [ nan_mask ]
c = c [ nan_mask ]
# Paste values end to end
points = concatenate ( [ x , y ] )
# Exploit numpy ' s strides to present a view of these points without copying .
# Dimensions are ( segment , start / end , x / y ) . Since x and y are concatenated back to back ,
# moving between segments only moves one item ; moving start to end is only an item ;
# The move between x any moves from one half of the array to the other
num_pts = points . size // 2
final_shape = ( num_pts - 1 , 2 , 2 )
final_strides = ( points . itemsize , points . itemsize , num_pts * points . itemsize )
segments = np . lib . stride_tricks . as_strided ( points , shape = final_shape , strides = final_strides )
# Create a LineCollection from the segments and set it to colormap based on c
lc = LineCollection ( segments , ** kwargs )
lc . set_array ( c )
return lc |
def send ( self , commands ) :
"""Ship commands to the daemon
Arguments :
commands : e . g . , ' ? WATCH = { { ' enable ' : true , ' json ' : true } } ' | ' ? VERSION ; ' | ' ? DEVICES ; ' | ' ? DEVICE ; ' | ' ? POLL ; '""" | try :
self . streamSock . send ( bytes ( commands , encoding = 'utf-8' ) )
except TypeError :
self . streamSock . send ( commands )
# 2.7 chokes on ' bytes ' and ' encoding = '
except ( OSError , IOError ) as error : # HEY MOE , LEAVE THIS ALONE FOR NOW !
sys . stderr . write ( f'\nAGPS3 send command fail with {error}\n' ) |
def delete_countries_geo_zone_by_id ( cls , countries_geo_zone_id , ** kwargs ) :
"""Delete CountriesGeoZone
Delete an instance of CountriesGeoZone by its ID .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . delete _ countries _ geo _ zone _ by _ id ( countries _ geo _ zone _ id , async = True )
> > > result = thread . get ( )
: param async bool
: param str countries _ geo _ zone _ id : ID of countriesGeoZone to delete . ( required )
: return : None
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _delete_countries_geo_zone_by_id_with_http_info ( countries_geo_zone_id , ** kwargs )
else :
( data ) = cls . _delete_countries_geo_zone_by_id_with_http_info ( countries_geo_zone_id , ** kwargs )
return data |
def pull_env_credential ( env , param , value ) :
"""Dissects a keyring credential lookup string from the supernova config file
and returns the username / password combo""" | rex = "USE_KEYRING\[([\x27\x22])(.*)\\1\]"
# This is the old - style , per - environment keyring credential
if value == "USE_KEYRING" :
username = utils . assemble_username ( env , param )
# This is the new - style , global keyring credential that can be applied
# to multiple environments
else :
global_identifier = re . match ( rex , value ) . group ( 2 )
username = utils . assemble_username ( 'global' , global_identifier )
return ( username , password_get ( username ) ) |
def _loadstr ( ins ) :
"""Loads a string value from a memory address .""" | temporal , output = _str_oper ( ins . quad [ 2 ] , no_exaf = True )
if not temporal :
output . append ( 'call __LOADSTR' )
REQUIRES . add ( 'loadstr.asm' )
output . append ( 'push hl' )
return output |
def _update_panic_status ( self , status = None ) :
"""Updates the panic status of the alarm panel .
: param status : status to use to update
: type status : boolean
: returns : boolean indicating the new status""" | if status is None :
return
if status != self . _panic_status :
self . _panic_status , old_status = status , self . _panic_status
if old_status is not None :
self . on_panic ( status = self . _panic_status )
return self . _panic_status |
def validate ( self ) :
"""validate : Makes sure content node is valid
Args : None
Returns : boolean indicating if content node is valid""" | assert isinstance ( self . author , str ) , "Assumption Failed: Author is not a string"
assert isinstance ( self . aggregator , str ) , "Assumption Failed: Aggregator is not a string"
assert isinstance ( self . provider , str ) , "Assumption Failed: Provider is not a string"
assert isinstance ( self . files , list ) , "Assumption Failed: Files is not a list"
assert isinstance ( self . questions , list ) , "Assumption Failed: Questions is not a list"
assert isinstance ( self . extra_fields , dict ) , "Assumption Failed: Extra fields is not a dict"
return super ( TreeNode , self ) . validate ( ) |
def predict ( self , Xnew = None , filteronly = False , include_likelihood = True , balance = None , ** kw ) :
"""Inputs :
balance : bool
Whether to balance or not the model as a whole""" | if balance is None :
p_balance = self . balance
else :
p_balance = balance
# Run the Kalman filter to get the state
( m , V ) = self . _raw_predict ( Xnew , filteronly = filteronly , p_balance = p_balance )
# Add the noise variance to the state variance
if include_likelihood :
V += float ( self . likelihood . variance )
# Lower and upper bounds
# lower = m - 2 * np . sqrt ( V )
# upper = m + 2 * np . sqrt ( V )
# Return mean and variance
return m , V |
def _clone ( self , deepcopy = True , base = None ) :
"""Internal clone helper .""" | if not base :
if self . __explicit_session :
base = self . _clone_base ( self . __session )
else :
base = self . _clone_base ( None )
values_to_clone = ( "spec" , "projection" , "skip" , "limit" , "max_time_ms" , "max_await_time_ms" , "comment" , "max" , "min" , "ordering" , "explain" , "hint" , "batch_size" , "max_scan" , "manipulate" , "query_flags" , "modifiers" , "collation" )
data = dict ( ( k , v ) for k , v in iteritems ( self . __dict__ ) if k . startswith ( '_Cursor__' ) and k [ 9 : ] in values_to_clone )
if deepcopy :
data = self . _deepcopy ( data )
base . __dict__ . update ( data )
return base |
def SpamsumDistance ( ssA , ssB ) :
'''returns the spamsum distance between ssA and ssB
if they use a different block size , assume maximum distance
otherwise returns the LevDistance''' | mA = re . match ( '^(\d+)[:](.*)$' , ssA )
mB = re . match ( '^(\d+)[:](.*)$' , ssB )
if mA == None or mB == None :
raise "do not appear to be spamsum signatures"
if mA . group ( 1 ) != mB . group ( 1 ) :
return max ( [ len ( mA . group ( 2 ) ) , len ( mB . group ( 2 ) ) ] )
else :
return LevDistance ( mA . group ( 2 ) , mB . group ( 2 ) ) |
def _setupaA ( self , pot = None , type = 'staeckel' , ** kwargs ) :
"""NAME :
_ setupaA
PURPOSE :
set up an actionAngle module for this Orbit
INPUT :
pot - potential
type = ( ' staeckel ' ) type of actionAngle module to use
1 ) ' adiabatic '
2 ) ' staeckel '
3 ) ' isochroneApprox '
4 ) ' spherical '
OUTPUT :
HISTORY :
2010-11-30 - Written - Bovy ( NYU )
2013-11-27 - Re - written in terms of new actionAngle modules - Bovy ( IAS )
2017-12-25 - Changed default method to ' staeckel ' and automatic delta estimation - Bovy ( UofT )""" | if hasattr ( self , '_aA' ) :
if not self . _resetaA ( pot = pot , type = type ) :
return None
if pot is None :
try :
pot = self . _pot
except AttributeError :
raise AttributeError ( "Integrate orbit or specify pot=" )
self . _aAPot = pot
self . _aAType = type
# Setup
if self . _aAType . lower ( ) == 'adiabatic' :
self . _aA = actionAngle . actionAngleAdiabatic ( pot = self . _aAPot , ** kwargs )
elif self . _aAType . lower ( ) == 'staeckel' :
try :
delta = kwargs . pop ( 'delta' , actionAngle . estimateDeltaStaeckel ( self . _aAPot , self . R ( use_physical = False ) , self . z ( use_physical = False ) + ( nu . fabs ( self . z ( use_physical = False ) ) < 1e-8 ) * ( 2. * ( self . z ( use_physical = False ) >= 0 ) - 1. ) * 1e-10 ) )
# try to make sure this is not 0
except PotentialError as e :
if 'deriv' in str ( e ) :
raise PotentialError ( 'Automagic calculation of delta parameter for Staeckel approximation failed because the necessary second derivatives of the given potential are not implemented; set delta= explicitly' )
elif 'non-axi' in str ( e ) :
raise PotentialError ( 'Automagic calculation of delta parameter for Staeckel approximation failed because the given potential is not axisymmetric; pass an axisymmetric potential instead' )
else : # pragma : no cover
raise
if delta < 1e-6 :
self . _setupaA ( pot = pot , type = 'spherical' )
else :
self . _aA = actionAngle . actionAngleStaeckel ( pot = self . _aAPot , delta = delta , ** kwargs )
elif self . _aAType . lower ( ) == 'isochroneapprox' :
from galpy . actionAngle import actionAngleIsochroneApprox
self . _aA = actionAngleIsochroneApprox ( pot = self . _aAPot , ** kwargs )
elif self . _aAType . lower ( ) == 'spherical' :
self . _aA = actionAngle . actionAngleSpherical ( pot = self . _aAPot , ** kwargs )
return None |
def parse ( ) :
"""parse arguments supplied by cmd - line""" | parser = argparse . ArgumentParser ( description = 'BabelFy Entity Tagger' , formatter_class = argparse . RawTextHelpFormatter )
group = parser . add_mutually_exclusive_group ( )
group . add_argument ( '-t' , '--text' , help = 'text to be annotated by BabelFy API' , metavar = '' , )
group . add_argument ( '-tf' , '--text-file' , help = 'path to the file containing the input text' , metavar = '' , )
parser . add_argument ( '-key' , '--api-key' , help = 'BabelFy API key' , metavar = '' , required = False , )
parser . add_argument ( '-e' , '--entities' , help = 'get entity data' , required = False , action = 'store_true' , )
parser . add_argument ( '-ae' , '--all-entities' , help = 'get entity and non-entity data' , required = False , action = 'store_true' , )
parser . add_argument ( '-m' , '--merged-entities' , help = 'get merged entities only' , required = False , action = 'store_true' , )
parser . add_argument ( '-am' , '--all-merged-entities' , help = 'get all merged entities' , required = False , action = 'store_true' , )
parser . add_argument ( '-p' , '--print' , help = 'dump all babelfy data to stdout' , required = False , action = 'store_true' , )
parser . add_argument ( '-ex' , '--export' , help = 'filename of the output file' , required = False , metavar = '' , )
return vars ( parser . parse_args ( ) ) |
def tai_jd ( self , jd ) :
"""Build a ` Time ` from a TAI Julian date .
Supply the International Atomic Time ( TAI ) as a Julian date :
> > > t = ts . tai _ jd ( 2456675.56640625)
> > > t . tai
2456675.56640625
> > > t . tai _ calendar ( )
(2014 , 1 , 18 , 1 , 35 , 37.5)""" | tai = _to_array ( jd )
t = Time ( self , tai + tt_minus_tai )
t . tai = tai
return t |
def user_agents ( self ) :
"""Retrieve user - agents , sorted by most common to least common .""" | return ( self . get_query ( ) . select ( PageView . headers [ 'User-Agent' ] , fn . Count ( PageView . id ) ) . group_by ( PageView . headers [ 'User-Agent' ] ) . order_by ( fn . Count ( PageView . id ) . desc ( ) ) . tuples ( ) ) |
def _create_fw_fab_dev_te ( self , tenant_id , drvr_name , fw_dict ) :
"""Prepares the Fabric and configures the device .
This routine calls the fabric class to prepare the fabric when
a firewall is created . It also calls the device manager to
configure the device . It updates the database with the final
result .""" | is_fw_virt = self . is_device_virtual ( )
ret = self . fabric . prepare_fabric_fw ( tenant_id , fw_dict , is_fw_virt , fw_constants . RESULT_FW_CREATE_INIT )
if not ret :
LOG . error ( "Prepare Fabric failed" )
return
else :
self . update_fw_db_final_result ( fw_dict . get ( 'fw_id' ) , ( fw_constants . RESULT_FW_CREATE_DONE ) )
ret = self . create_fw_device ( tenant_id , fw_dict . get ( 'fw_id' ) , fw_dict )
if ret :
self . fwid_attr [ tenant_id ] . fw_drvr_created ( True )
self . update_fw_db_dev_status ( fw_dict . get ( 'fw_id' ) , 'SUCCESS' )
LOG . info ( "FW device create returned success for tenant %s" , tenant_id )
else :
LOG . error ( "FW device create returned failure for tenant %s" , tenant_id ) |
def transition_matrix_non_reversible ( C ) :
"""implementation of transition _ matrix""" | if not scipy . sparse . issparse ( C ) :
C = scipy . sparse . csr_matrix ( C )
rowsum = C . tocsr ( ) . sum ( axis = 1 )
# catch div by zero
if np . min ( rowsum ) == 0.0 :
raise ValueError ( "matrix C contains rows with sum zero." )
rowsum = np . array ( 1. / rowsum ) . flatten ( )
norm = scipy . sparse . diags ( rowsum , 0 )
return norm * C |
def _parse_response ( resp ) :
"""Get xmlrpc response from scgi response""" | # Assume they care for standards and send us CRLF ( not just LF )
try :
headers , payload = resp . split ( "\r\n\r\n" , 1 )
except ( TypeError , ValueError ) as exc :
raise SCGIException ( "No header delimiter in SCGI response of length %d (%s)" % ( len ( resp ) , exc , ) )
headers = _parse_headers ( headers )
clen = headers . get ( "Content-Length" )
if clen is not None : # Check length , just in case the transport is bogus
assert len ( payload ) == int ( clen )
return payload , headers |
def collect_spans ( ast : AST ) -> List [ Tuple [ str , Tuple [ int , int ] ] ] :
"""Collect flattened list of spans of BEL syntax types
Provide simple list of BEL syntax type spans for highlighting .
Function names , NSargs , NS prefix , NS value and StrArgs will be
tagged .
Args :
ast : AST of BEL assertion
Returns :
List [ Tuple [ str , Tuple [ int , int ] ] ] : list of span objects ( < type > , ( < start > , < end > ) )""" | spans = [ ]
if ast . get ( "subject" , False ) :
spans . extend ( collect_spans ( ast [ "subject" ] ) )
if ast . get ( "object" , False ) :
spans . extend ( collect_spans ( ast [ "object" ] ) )
if ast . get ( "nested" , False ) :
spans . extend ( collect_spans ( ast [ "nested" ] ) )
if ast . get ( "function" , False ) :
log . debug ( f"Processing function" )
spans . append ( ( "Function" , ast [ "function" ] [ "name_span" ] ) )
log . debug ( f"Spans: {spans}" )
if ast . get ( "args" , False ) :
for idx , arg in enumerate ( ast [ "args" ] ) :
log . debug ( f"Arg {arg}" )
if arg . get ( "function" , False ) :
log . debug ( f"Recursing on arg function" )
results = collect_spans ( arg )
log . debug ( f"Results {results}" )
spans . extend ( results )
# Recurse arg function
elif arg . get ( "nsarg" , False ) :
log . debug ( f"Processing NSArg Arg {arg}" )
spans . append ( ( "NSArg" , arg [ "span" ] ) )
spans . append ( ( "NSPrefix" , arg [ "nsarg" ] [ "ns_span" ] ) )
spans . append ( ( "NSVal" , arg [ "nsarg" ] [ "ns_val_span" ] ) )
elif arg [ "type" ] == "StrArg" :
spans . append ( ( "StrArg" , arg [ "span" ] ) )
log . debug ( f"Spans: {spans}" )
return spans |
def get_by_id ( self , business_id , ** url_params ) :
"""Make a request to the business details endpoint . More info at
https : / / www . yelp . com / developers / documentation / v3 / business
Args :
business _ id ( str ) : The business alias ( i . e . yelp - san - francisco ) or
ID ( i . e . 4kMBvIEWPxWkWKFN _ _ 8SxQ .
* * url _ params : Dict corresponding to business API params
https : / / www . yelp . com / developers / documentation / v3 / business
Returns :
yelp . obj . business . Business object that wraps the response .""" | business_path = BUSINESS_PATH . format ( business_id = business_id )
response = self . client . _make_request ( business_path , url_params = url_params )
return Business ( response ) |
def CopyConfig ( self ) :
"""Make a complete new copy of the current config .
This includes all options as they currently are . If you want a base config
with defaults use MakeNewConfig .
Returns :
A new config object with the same data as self .""" | newconf = self . MakeNewConfig ( )
newconf . raw_data = copy . deepcopy ( self . raw_data )
newconf . files = copy . deepcopy ( self . files )
newconf . secondary_config_parsers = copy . deepcopy ( self . secondary_config_parsers )
newconf . writeback = copy . deepcopy ( self . writeback )
newconf . writeback_data = copy . deepcopy ( self . writeback_data )
newconf . global_override = copy . deepcopy ( self . global_override )
newconf . context_descriptions = copy . deepcopy ( self . context_descriptions )
newconf . constants = copy . deepcopy ( self . constants )
newconf . initialized = copy . deepcopy ( self . initialized )
return newconf |
def get ( self , vrf = None ) :
"""Returns the OSPF routing configuration
Args :
vrf ( str ) : VRF name to return OSPF routing config for
Returns :
dict :
keys : router _ id ( int ) : OSPF router - id
vrf ( str ) : VRF of the OSPF process
networks ( dict ) : All networks that
are advertised in OSPF
ospf _ process _ id ( int ) : OSPF proc id
redistribution ( dict ) : All protocols that
are configured to be
redistributed in OSPF
shutdown ( bool ) : Gives the current shutdown
off the process""" | match = '^router ospf .*'
if vrf :
match += ' vrf %s' % vrf
config = self . get_block ( match )
if not config :
return None
response = dict ( )
response . update ( self . _parse_router_id ( config ) )
response . update ( self . _parse_vrf ( config ) )
response . update ( self . _parse_networks ( config ) )
response . update ( self . _parse_ospf_process_id ( config ) )
response . update ( self . _parse_redistribution ( config ) )
response . update ( self . _parse_shutdown ( config ) )
return response |
def column_types ( self ) :
"""Return a dict mapping column name to type for all columns in table""" | column_types = { }
for c in self . sqla_columns :
column_types [ c . name ] = c . type
return column_types |
def strseq ( prefix : str , first : int , last : int , suffix : str = "" ) -> List [ str ] :
"""Makes a string of the format ` ` < prefix > < number > < suffix > ` ` for every number
from ` ` first ` ` to ` ` last ` ` inclusive , and returns them as a list .""" | return [ strnum ( prefix , n , suffix ) for n in range ( first , last + 1 ) ] |
def mem_size ( self ) :
"""Returns the memory size in bytes of the remote host""" | result = self . shell . execute ( self . commands . mem_size . value )
stdout = self . shell . decode ( result [ 'stdout' ] )
stderr = self . shell . decode ( result [ 'stderr' ] )
return int ( stdout ) |
def hist_results ( self ) :
"""process flash numeric histograms""" | self . hist_data = OrderedDict ( )
for histfile in self . find_log_files ( 'flash/hist' ) :
self . hist_data . update ( self . parse_hist_files ( histfile ) )
# ignore sample names
self . hist_data = self . ignore_samples ( self . hist_data )
try :
if not self . hist_data :
raise UserWarning
log . info ( "Found %d histogram reports" , len ( self . hist_data ) )
self . add_section ( name = 'Frequency polygons of merged read lengths' , anchor = 'flash-histogram' , description = 'This plot is made from the numerical histograms output by FLASh.' , plot = self . freqpoly_plot ( self . hist_data ) )
except UserWarning :
pass
except Exception as err :
log . error ( err )
log . debug ( traceback . format_exc ( ) )
return len ( self . hist_data ) |
def protege_data ( datas_str , sens ) :
"""Used to crypt / decrypt data before saving locally .
Override if securit is needed .
bytes - > str when decrypting
str - > bytes when crypting
: param datas _ str : When crypting , str . when decrypting bytes
: param sens : True to crypt , False to decrypt""" | return bytes ( datas_str , encoding = "utf8" ) if sens else str ( datas_str , encoding = "utf8" ) |
def voropp_for_non_orthorhombic_cells ( _a , q = '%v' , voropp_path = VOROPP_PATH , fast = False , dump = None ) :
"""Run voro + + on current configuration and return selected quantities .
Parameter * q * can be a list of voro + + output quantities .
Run ' voro + + - hc ' to see options . Will take care of Lees - Edwards boundary
conditions by embedding a sheared cell in its periodic images and then
throwing away the border ( will hence be slower ) .""" | # Make a copy because we will modify the Atoms object
a = _a . copy ( )
nat = len ( a )
# Wrap into cell
a . set_scaled_positions ( a . get_scaled_positions ( ) % 1.0 )
# shear _ dx should go into the cell
if 'shear_dx' in a . info :
lx , ly , lz = a . get_cell ( ) . diagonal ( )
cx , cy , cz = a . get_cell ( )
assert abs ( cz [ 0 ] ) < 1e-12
assert abs ( cz [ 1 ] ) < 1e-12
shear_dx = a . info [ 'shear_dx' ]
cz [ 0 ] = shear_dx [ 0 ]
cz [ 1 ] = shear_dx [ 1 ]
a . set_cell ( [ cx , cy , cz ] , scale_atoms = False )
a . set_scaled_positions ( a . get_scaled_positions ( ) % 1.0 )
cx , cy , cz = a . get_cell ( )
if fast : # Make 2 copies of the box in each direction . Could fail !
a *= ( 2 , 2 , 2 )
# Translate such that the box with the lowest indices sits in the middle
a . translate ( cx / 2 + cy / 2 + cz / 2 )
else : # Make 3 copies of the box in each direction
a *= ( 3 , 3 , 3 )
# Translate such that the box with the lowest indices sits in the middle
a . translate ( cx + cy + cz )
# Wrap back to box
a . set_scaled_positions ( a . get_scaled_positions ( ) % 1.0 )
# Get enclosing box
lower , upper = get_enclosing_orthorhombic_box ( a . get_cell ( ) )
elx , ely , elz = upper - lower
# Shift and set cell such that the general system is enclosed in the
# orthorhombic box
a . translate ( - lower )
a . set_cell ( [ elx , ely , elz ] , scale_atoms = False )
# Dump snapshot for debugging purposes
if dump :
write ( dump , a )
# Do Voronoi analysis
x , y , z = a . get_positions ( ) . T
f = open ( 'tmp.voronoi' , 'w' )
for jn , ( jx , jy , jz ) in enumerate ( zip ( x , y , z ) ) :
print >> f , jn , jx , jy , jz
f . close ( )
if isinstance ( q , str ) or isinstance ( q , unicode ) :
c = '%s' % format ( q )
else :
c = reduce ( lambda x , y : '{0} {1}' . format ( x , y ) , map ( lambda x : '%' + x , q ) )
os . system ( '{0} -o -p -c "%i {1}" 0 {2} 0 {3} 0 {4} tmp.voronoi' . format ( voropp_path , c , elx , ely , elz ) )
r = np . loadtxt ( 'tmp.voronoi.vol' , unpack = True )
os . remove ( 'tmp.voronoi' )
os . remove ( 'tmp.voronoi.vol' )
# Sort particles according to their ids
r = r [ : , np . array ( r [ 0 , : ] , dtype = int ) ]
# Use only the lowest indices ( i . e . the box in the middle )
if r . shape [ 0 ] == 2 :
return r [ 1 , : nat ]
else :
return r [ 1 : , : nat ] |
def _get_neighbor_feat_idx ( self , n_features , feat_idx , abs_corr_mat ) :
"""Get a list of other features to predict ` ` feat _ idx ` ` .
If self . n _ nearest _ features is less than or equal to the total
number of features , then use a probability proportional to the absolute
correlation between ` ` feat _ idx ` ` and each other feature to randomly
choose a subsample of the other features ( without replacement ) .
Parameters
n _ features : int
Number of features in ` ` X ` ` .
feat _ idx : int
Index of the feature currently being imputed .
abs _ corr _ mat : ndarray , shape ( n _ features , n _ features )
Absolute correlation matrix of ` ` X ` ` . The diagonal has been zeroed
out and each feature has been normalized to sum to 1 . Can be None .
Returns
neighbor _ feat _ idx : array - like
The features to use to impute ` ` feat _ idx ` ` .""" | if ( self . n_nearest_features is not None and self . n_nearest_features < n_features ) :
p = abs_corr_mat [ : , feat_idx ]
neighbor_feat_idx = self . random_state_ . choice ( np . arange ( n_features ) , self . n_nearest_features , replace = False , p = p )
else :
inds_left = np . arange ( feat_idx )
inds_right = np . arange ( feat_idx + 1 , n_features )
neighbor_feat_idx = np . concatenate ( ( inds_left , inds_right ) )
return neighbor_feat_idx |
def setup_panel_params ( self , coord ) :
"""Calculate the x & y range & breaks information for each panel
Parameters
coord : coord
Coordinate""" | if not self . panel_scales_x :
raise PlotnineError ( 'Missing an x scale' )
if not self . panel_scales_y :
raise PlotnineError ( 'Missing a y scale' )
self . panel_params = [ ]
cols = [ 'SCALE_X' , 'SCALE_Y' ]
for i , j in self . layout [ cols ] . itertuples ( index = False ) :
i , j = i - 1 , j - 1
params = coord . setup_panel_params ( self . panel_scales_x [ i ] , self . panel_scales_y [ j ] )
self . panel_params . append ( params ) |
def load_mgh ( filename , to = 'auto' ) :
'''load _ mgh ( filename ) yields the MGHImage referened by the given filename by using the
nibabel . freesurfer . mghformat . load function .
The optional argument ' to ' may be used to coerce the resulting data to a particular format ; the
following arguments are understood :
* ' header ' will yield just the image header
* ' data ' will yield the image ' s data - array
* ' field ' will yield a squeezed version of the image ' s data - array and will raise an error if
the data object has more than 2 non - unitary dimensions ( appropriate for loading surface
properties stored in image files )
* ' affine ' will yield the image ' s affine transformation
* ' image ' will yield the raw image object
* ' auto ' is equivalent to ' image ' unless the image has no more than 2 non - unitary dimensions ,
in which case it is assumed to be a surface - field and the return value is equivalent to
the ' field ' value .''' | img = fsmgh . load ( filename )
to = to . lower ( )
if to == 'image' :
return img
elif to == 'data' :
return img . get_data ( )
elif to == 'affine' :
return img . affine
elif to == 'header' :
return img . header
elif to == 'field' :
dat = np . squeeze ( img . get_data ( ) )
if len ( dat . shape ) > 2 :
raise ValueError ( 'image requested as field has more than 2 non-unitary dimensions' )
return dat
elif to in [ 'auto' , 'automatic' ] :
dims = set ( img . dataobj . shape )
if 1 < len ( dims ) < 4 and 1 in dims :
return np . squeeze ( img . get_data ( ) )
else :
return img
else :
raise ValueError ( 'unrecognized \'to\' argument \'%s\'' % to ) |
def mac_address_table_static_interface_type ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
mac_address_table = ET . SubElement ( config , "mac-address-table" , xmlns = "urn:brocade.com:mgmt:brocade-mac-address-table" )
static = ET . SubElement ( mac_address_table , "static" )
mac_address_key = ET . SubElement ( static , "mac-address" )
mac_address_key . text = kwargs . pop ( 'mac_address' )
forward_key = ET . SubElement ( static , "forward" )
forward_key . text = kwargs . pop ( 'forward' )
interface_name_key = ET . SubElement ( static , "interface-name" )
interface_name_key . text = kwargs . pop ( 'interface_name' )
vlan_key = ET . SubElement ( static , "vlan" )
vlan_key . text = kwargs . pop ( 'vlan' )
vlanid_key = ET . SubElement ( static , "vlanid" )
vlanid_key . text = kwargs . pop ( 'vlanid' )
interface_type = ET . SubElement ( static , "interface-type" )
interface_type . text = kwargs . pop ( 'interface_type' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def birth_inds_given_contours ( birth_logl_arr , logl_arr , ** kwargs ) :
"""Maps the iso - likelihood contours on which points were born to the
index of the dead point on this contour .
MultiNest and PolyChord use different values to identify the inital live
points which were sampled from the whole prior ( PolyChord uses - 1e + 30
and MultiNest - 0.179769313486231571E + 309 ) . However in each case the first
dead point must have been sampled from the whole prior , so for either
package we can use
init _ birth = birth _ logl _ arr [ 0]
If there are many points with the same logl _ arr and dup _ assert is False ,
these points are randomly assigned an order ( to ensure results are
consistent , random seeding is used ) .
Parameters
logl _ arr : 1d numpy array
logl values of each point .
birth _ logl _ arr : 1d numpy array
Birth contours - i . e . logl values of the iso - likelihood contour from
within each point was sampled ( on which it was born ) .
dup _ assert : bool , optional
See ns _ run _ utils . check _ ns _ run _ logls docstring .
dup _ warn : bool , optional
See ns _ run _ utils . check _ ns _ run _ logls docstring .
Returns
birth _ inds : 1d numpy array of ints
Step at which each element of logl _ arr was sampled . Points sampled from
the whole prior are assigned value - 1.""" | dup_assert = kwargs . pop ( 'dup_assert' , False )
dup_warn = kwargs . pop ( 'dup_warn' , False )
if kwargs :
raise TypeError ( 'Unexpected **kwargs: {0}' . format ( kwargs ) )
assert logl_arr . ndim == 1 , logl_arr . ndim
assert birth_logl_arr . ndim == 1 , birth_logl_arr . ndim
# Check for duplicate logl values ( if specified by dup _ assert or dup _ warn )
nestcheck . ns_run_utils . check_ns_run_logls ( { 'logl' : logl_arr } , dup_assert = dup_assert , dup_warn = dup_warn )
# Random seed so results are consistent if there are duplicate logls
state = np . random . get_state ( )
# Save random state before seeding
np . random . seed ( 0 )
# Calculate birth inds
init_birth = birth_logl_arr [ 0 ]
assert np . all ( birth_logl_arr <= logl_arr ) , ( logl_arr [ birth_logl_arr > logl_arr ] )
birth_inds = np . full ( birth_logl_arr . shape , np . nan )
birth_inds [ birth_logl_arr == init_birth ] = - 1
for i , birth_logl in enumerate ( birth_logl_arr ) :
if not np . isnan ( birth_inds [ i ] ) : # birth ind has already been assigned
continue
dup_deaths = np . where ( logl_arr == birth_logl ) [ 0 ]
if dup_deaths . shape == ( 1 , ) : # death index is unique
birth_inds [ i ] = dup_deaths [ 0 ]
continue
# The remainder of this loop deals with the case that multiple points
# have the same logl value ( = birth _ logl ) . This can occur due to limited
# precision , or for likelihoods with contant regions . In this case we
# randomly assign the duplicates birth steps in a manner
# that provides a valid division into nested sampling runs
dup_births = np . where ( birth_logl_arr == birth_logl ) [ 0 ]
assert dup_deaths . shape [ 0 ] > 1 , dup_deaths
if np . all ( birth_logl_arr [ dup_deaths ] != birth_logl ) : # If no points both are born and die on this contour , we can just
# randomly assign an order
np . random . shuffle ( dup_deaths )
inds_to_use = dup_deaths
else : # If some points are both born and die on the contour , we need to
# take care that the assigned birth inds do not result in some
# points dying before they are born
try :
inds_to_use = sample_less_than_condition ( dup_deaths , dup_births )
except ValueError :
raise ValueError ( ( 'There is no way to allocate indexes dup_deaths={} such ' 'that each is less than dup_births={}.' ) . format ( dup_deaths , dup_births ) )
try : # Add our selected inds _ to _ use values to the birth _ inds array
# Note that dup _ deaths ( and hence inds to use ) may have more
# members than dup _ births , because one of the duplicates may be
# the final point in a thread . We therefore include only the first
# dup _ births . shape [ 0 ] elements
birth_inds [ dup_births ] = inds_to_use [ : dup_births . shape [ 0 ] ]
except ValueError :
warnings . warn ( ( 'for logl={}, the number of points born (indexes=' '{}) is bigger than the number of points dying ' '(indexes={}). This indicates a problem with your ' 'nested sampling software - it may be caused by ' 'a bug in PolyChord which was fixed in PolyChord ' 'v1.14, so try upgrading. I will try to give an ' 'approximate allocation of threads but this may ' 'fail.' ) . format ( birth_logl , dup_births , inds_to_use ) , UserWarning )
extra_inds = np . random . choice ( inds_to_use , size = dup_births . shape [ 0 ] - inds_to_use . shape [ 0 ] )
inds_to_use = np . concatenate ( ( inds_to_use , extra_inds ) )
np . random . shuffle ( inds_to_use )
birth_inds [ dup_births ] = inds_to_use [ : dup_births . shape [ 0 ] ]
assert np . all ( ~ np . isnan ( birth_inds ) ) , np . isnan ( birth_inds ) . sum ( )
np . random . set_state ( state )
# Reset random state
return birth_inds . astype ( int ) |
def from_dict ( data , ctx ) :
"""Instantiate a new LiquidityRegenerationSchedule from a dict ( generally
from loading a JSON response ) . The data used to instantiate the
LiquidityRegenerationSchedule is a shallow copy of the dict passed in ,
with any complex child types instantiated appropriately .""" | data = data . copy ( )
if data . get ( 'steps' ) is not None :
data [ 'steps' ] = [ ctx . transaction . LiquidityRegenerationScheduleStep . from_dict ( d , ctx ) for d in data . get ( 'steps' ) ]
return LiquidityRegenerationSchedule ( ** data ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.