signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def add_text ( self , text , x , y , ** kws ) :
"""add text to plot"""
|
self . panel . add_text ( text , x , y , ** kws )
|
def add_circle ( self , center_lat = None , center_lng = None , radius = None , ** kwargs ) :
"""Adds a circle dict to the Map . circles attribute
The circle in a sphere is called " spherical cap " and is defined in the
Google Maps API by at least the center coordinates and its radius , in
meters . A circle has color and opacity both for the border line and the
inside area .
It accepts a circle dict representation as well .
Args :
center _ lat ( float ) : The circle center latitude
center _ lng ( float ) : The circle center longitude
radius ( float ) : The circle radius , in meters
. . _ Circle :
https : / / developers . google . com / maps / documen
tation / javascript / reference # Circle"""
|
kwargs . setdefault ( 'center' , { } )
if center_lat :
kwargs [ 'center' ] [ 'lat' ] = center_lat
if center_lng :
kwargs [ 'center' ] [ 'lng' ] = center_lng
if radius :
kwargs [ 'radius' ] = radius
if set ( ( 'lat' , 'lng' ) ) != set ( kwargs [ 'center' ] . keys ( ) ) :
raise AttributeError ( 'circle center coordinates required' )
if 'radius' not in kwargs :
raise AttributeError ( 'circle radius definition required' )
kwargs . setdefault ( 'stroke_color' , '#FF0000' )
kwargs . setdefault ( 'stroke_opacity' , .8 )
kwargs . setdefault ( 'stroke_weight' , 2 )
kwargs . setdefault ( 'fill_color' , '#FF0000' )
kwargs . setdefault ( 'fill_opacity' , .3 )
self . circles . append ( kwargs )
|
def discover ( glob_pattern ) :
"""Find all files matching given glob _ pattern ,
parse them , and return list of environments :
> > > envs = discover ( " requirements / * . in " )
> > > # print ( envs )
> > > envs = = [
. . . { ' name ' : ' base ' , ' refs ' : set ( ) } ,
. . . { ' name ' : ' py27 ' , ' refs ' : set ( ) } ,
. . . { ' name ' : ' test ' , ' refs ' : { ' base ' } } ,
. . . { ' name ' : ' local ' , ' refs ' : { ' test ' } } ,
. . . { ' name ' : ' local27 ' , ' refs ' : { ' test ' , ' py27 ' } } ,
. . . { ' name ' : ' testwin ' , ' refs ' : { ' test ' } } ,
True"""
|
in_paths = glob . glob ( glob_pattern )
names = { extract_env_name ( path ) : path for path in in_paths }
return order_by_refs ( [ { 'name' : name , 'refs' : Environment . parse_references ( in_path ) } for name , in_path in names . items ( ) ] )
|
def delete_role ( resource_root , service_name , name , cluster_name = "default" ) :
"""Delete a role by name
@ param resource _ root : The root Resource object .
@ param service _ name : Service name
@ param name : Role name
@ param cluster _ name : Cluster name
@ return : The deleted ApiRole object"""
|
return call ( resource_root . delete , _get_role_path ( cluster_name , service_name , name ) , ApiRole )
|
def getunicode ( self , name , default = None , encoding = None ) :
'''Return the value as a unicode string , or the default .'''
|
try :
return self . _fix ( self [ name ] , encoding )
except ( UnicodeError , KeyError ) :
return default
|
def basemz ( df ) :
"""The mz of the most abundant ion ."""
|
# returns the
d = np . array ( df . columns ) [ df . values . argmax ( axis = 1 ) ]
return Trace ( d , df . index , name = 'basemz' )
|
def summarize_address_range ( first , last ) :
"""Summarize a network range given the first and last IP addresses .
Example :
> > > summarize _ address _ range ( IPv4Address ( ' 1.1.1.0 ' ) ,
IPv4Address ( ' 1.1.1.130 ' ) )
[ IPv4Network ( ' 1.1.1.0/25 ' ) , IPv4Network ( ' 1.1.1.128/31 ' ) ,
IPv4Network ( ' 1.1.1.130/32 ' ) ]
Args :
first : the first IPv4Address or IPv6Address in the range .
last : the last IPv4Address or IPv6Address in the range .
Returns :
The address range collapsed to a list of IPv4Network ' s or
IPv6Network ' s .
Raise :
TypeError :
If the first and last objects are not IP addresses .
If the first and last objects are not the same version .
ValueError :
If the last object is not greater than the first .
If the version is not 4 or 6."""
|
if not ( isinstance ( first , _BaseIP ) and isinstance ( last , _BaseIP ) ) :
raise TypeError ( 'first and last must be IP addresses, not networks' )
if first . version != last . version :
raise TypeError ( "%s and %s are not of the same version" % ( str ( first ) , str ( last ) ) )
if first > last :
raise ValueError ( 'last IP address must be greater than first' )
networks = [ ]
if first . version == 4 :
ip = IPv4Network
elif first . version == 6 :
ip = IPv6Network
else :
raise ValueError ( 'unknown IP version' )
ip_bits = first . _max_prefixlen
first_int = first . _ip
last_int = last . _ip
while first_int <= last_int :
nbits = _count_righthand_zero_bits ( first_int , ip_bits )
current = None
while nbits >= 0 :
addend = 2 ** nbits - 1
current = first_int + addend
nbits -= 1
if current <= last_int :
break
prefix = _get_prefix_length ( first_int , current , ip_bits )
net = ip ( '%s/%d' % ( str ( first ) , prefix ) )
networks . append ( net )
if current == ip . _ALL_ONES :
break
first_int = current + 1
first = IPAddress ( first_int , version = first . _version )
return networks
|
def register ( self , es , append = None , modulo = None ) :
"""register a ` CMAEvolutionStrategy ` instance for logging ,
` ` append = True ` ` appends to previous data logged under the same name ,
by default previous data are overwritten ."""
|
if not isinstance ( es , CMAEvolutionStrategy ) :
raise TypeError ( "only class CMAEvolutionStrategy can be " + "registered for logging" )
self . es = es
if append is not None :
self . append = append
if modulo is not None :
self . modulo = modulo
self . registered = True
return self
|
def arc ( pRA , pDecl , sRA , sDecl , mcRA , lat ) :
"""Returns the arc of direction between a Promissor
and Significator . It uses the generic proportional
semi - arc method ."""
|
pDArc , pNArc = utils . dnarcs ( pDecl , lat )
sDArc , sNArc = utils . dnarcs ( sDecl , lat )
# Select meridian and arcs to be used
# Default is MC and Diurnal arcs
mdRA = mcRA
sArc = sDArc
pArc = pDArc
if not utils . isAboveHorizon ( sRA , sDecl , mcRA , lat ) : # Use IC and Nocturnal arcs
mdRA = angle . norm ( mcRA + 180 )
sArc = sNArc
pArc = pNArc
# Promissor and Significator distance to meridian
pDist = angle . closestdistance ( mdRA , pRA )
sDist = angle . closestdistance ( mdRA , sRA )
# Promissor should be after significator ( in degrees )
if pDist < sDist :
pDist += 360
# Meridian distances proportional to respective semi - arcs
sPropDist = sDist / ( sArc / 2.0 )
pPropDist = pDist / ( pArc / 2.0 )
# The arc is how much of the promissor ' s semi - arc is
# needed to reach the significator
return ( pPropDist - sPropDist ) * ( pArc / 2.0 )
|
def create_local_arrays ( reified_arrays , array_factory = None ) :
"""Function that creates arrays , given the definitions in
the reified _ arrays dictionary and the array _ factory
keyword argument .
Arguments
reified _ arrays : dictionary
Dictionary keyed on array name and array definitions .
Can be obtained via cube . arrays ( reify = True )
Keyword Arguments
array _ factory : function
A function used to create array objects . It ' s signature should
be array _ factory ( shape , dtype ) and should return a constructed
array of the supplied shape and data type . If None ,
numpy . empty will be used .
Returns
A dictionary of array objects , keyed on array names"""
|
# By default , create numpy arrays
if array_factory is None :
array_factory = np . empty
# Construct the array dictionary by calling the
# array _ factory for each array
return { n : array_factory ( ra . shape , ra . dtype ) for n , ra in reified_arrays . iteritems ( ) }
|
def assigned_add_ons ( self ) :
"""Access the assigned _ add _ ons
: returns : twilio . rest . api . v2010 . account . incoming _ phone _ number . assigned _ add _ on . AssignedAddOnList
: rtype : twilio . rest . api . v2010 . account . incoming _ phone _ number . assigned _ add _ on . AssignedAddOnList"""
|
if self . _assigned_add_ons is None :
self . _assigned_add_ons = AssignedAddOnList ( self . _version , account_sid = self . _solution [ 'account_sid' ] , resource_sid = self . _solution [ 'sid' ] , )
return self . _assigned_add_ons
|
def _set_mct_l2ys_state ( self , v , load = False ) :
"""Setter method for mct _ l2ys _ state , mapped from YANG variable / mct _ l2ys _ state ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ mct _ l2ys _ state is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ mct _ l2ys _ state ( ) directly .
YANG Description : MCT L2sys Operational Information"""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = mct_l2ys_state . mct_l2ys_state , is_container = 'container' , presence = False , yang_name = "mct-l2ys-state" , rest_name = "mct-l2ys-state" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'l2sys-mct-l2ys' , u'cli-suppress-show-path' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-l2sys-operational' , defining_module = 'brocade-l2sys-operational' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """mct_l2ys_state must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=mct_l2ys_state.mct_l2ys_state, is_container='container', presence=False, yang_name="mct-l2ys-state", rest_name="mct-l2ys-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'l2sys-mct-l2ys', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-l2sys-operational', defining_module='brocade-l2sys-operational', yang_type='container', is_config=True)""" , } )
self . __mct_l2ys_state = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def parse_content_type_header ( value ) :
"""maintype " / " subtype * ( " ; " parameter )
The maintype and substype are tokens . Theoretically they could
be checked against the official IANA list + x - token , but we
don ' t do that ."""
|
ctype = ContentType ( )
recover = False
if not value :
ctype . defects . append ( errors . HeaderMissingRequiredValue ( "Missing content type specification" ) )
return ctype
try :
token , value = get_token ( value )
except errors . HeaderParseError :
ctype . defects . append ( errors . InvalidHeaderDefect ( "Expected content maintype but found {!r}" . format ( value ) ) )
_find_mime_parameters ( ctype , value )
return ctype
ctype . append ( token )
# XXX : If we really want to follow the formal grammer we should make
# mantype and subtype specialized TokenLists here . Probably not worth it .
if not value or value [ 0 ] != '/' :
ctype . defects . append ( errors . InvalidHeaderDefect ( "Invalid content type" ) )
if value :
_find_mime_parameters ( ctype , value )
return ctype
ctype . maintype = token . value . strip ( ) . lower ( )
ctype . append ( ValueTerminal ( '/' , 'content-type-separator' ) )
value = value [ 1 : ]
try :
token , value = get_token ( value )
except errors . HeaderParseError :
ctype . defects . append ( errors . InvalidHeaderDefect ( "Expected content subtype but found {!r}" . format ( value ) ) )
_find_mime_parameters ( ctype , value )
return ctype
ctype . append ( token )
ctype . subtype = token . value . strip ( ) . lower ( )
if not value :
return ctype
if value [ 0 ] != ';' :
ctype . defects . append ( errors . InvalidHeaderDefect ( "Only parameters are valid after content type, but " "found {!r}" . format ( value ) ) )
# The RFC requires that a syntactically invalid content - type be treated
# as text / plain . Perhaps we should postel this , but we should probably
# only do that if we were checking the subtype value against IANA .
del ctype . maintype , ctype . subtype
_find_mime_parameters ( ctype , value )
return ctype
ctype . append ( ValueTerminal ( ';' , 'parameter-separator' ) )
ctype . append ( parse_mime_parameters ( value [ 1 : ] ) )
return ctype
|
def autocorrelation ( self , x , lag ) :
"""As in tsfresh ` autocorrelation < https : / / github . com / blue - yonder / tsfresh / blob / master / tsfresh / feature _ extraction / feature _ calculators . py # L1457 > ` _
Calculates the autocorrelation of the specified lag , according to the ` formula < https : / / en . wikipedia . org / wiki / Autocorrelation # Estimation > ` _ :
. . math : :
\\ frac { 1 } { ( n - l ) \ sigma ^ { 2 } } \\ sum _ { t = 1 } ^ { n - l } ( X _ { t } - \\ mu ) ( X _ { t + l } - \\ mu )
where : math : ` n ` is the length of the time series : math : ` X _ i ` , : math : ` \ sigma ^ 2 ` its variance and : math : ` \ mu ` its
mean . ` l ` denotes the lag .
: param x : the time series to calculate the feature of
: type x : pandas . Series
: param lag : the lag
: type lag : int
: return : the value of this feature
: rtype : float"""
|
# This is important : If a series is passed , the product below is calculated
# based on the index , which corresponds to squaring the series .
if lag is None :
lag = 0
_autoc = feature_calculators . autocorrelation ( x , lag )
logging . debug ( "autocorrelation by tsfresh calculated" )
return _autoc
|
def get_config_var_data ( self , index , offset ) :
"""Get a chunk of data for a config variable ."""
|
if index == 0 or index > len ( self . config_database . entries ) :
return [ Error . INVALID_ARRAY_KEY , b'' ]
entry = self . config_database . entries [ index - 1 ]
if not entry . valid :
return [ ConfigDatabaseError . OBSOLETE_ENTRY , b'' ]
if offset >= len ( entry . data ) :
return [ Error . INVALID_ARRAY_KEY , b'' ]
data_chunk = entry . data [ offset : offset + 16 ]
return [ Error . NO_ERROR , data_chunk ]
|
def decode_from_bytes ( cls , data ) :
"""Decode an AMQP message from a bytearray .
The returned message will not have a delivery context and
therefore will be considered to be in an " already settled " state .
: param data : The AMQP wire - encoded bytes to decode .
: type data : bytes or bytearray"""
|
decoded_message = c_uamqp . decode_message ( len ( data ) , data )
return cls ( message = decoded_message )
|
def blame ( self , committer = True , by = 'repository' , ignore_globs = None , include_globs = None ) :
"""Returns the blame from the current HEAD of the repositories as a DataFrame . The DataFrame is grouped by committer
name , so it will be the sum of all contributions to all repositories by each committer . As with the commit history
method , extensions and ignore _ dirs parameters can be passed to exclude certain directories , or focus on certain
file extensions . The DataFrame will have the columns :
* committer
* loc
: param committer : ( optional , default = True ) true if committer should be reported , false if author
: param by : ( optional , default = repository ) whether to group by repository or by file
: param ignore _ globs : ( optional , default = None ) a list of globs to ignore , default none excludes nothing
: param include _ globs : ( optinal , default = None ) a list of globs to include , default of None includes everything .
: return : DataFrame"""
|
df = None
for repo in self . repos :
try :
if df is None :
df = repo . blame ( committer = committer , by = by , ignore_globs = ignore_globs , include_globs = include_globs )
else :
df = df . append ( repo . blame ( committer = committer , by = by , ignore_globs = ignore_globs , include_globs = include_globs ) )
except GitCommandError as err :
print ( 'Warning! Repo: %s couldnt be blamed' % ( repo , ) )
pass
df = df . reset_index ( level = 1 )
df = df . reset_index ( level = 1 )
if committer :
if by == 'repository' :
df = df . groupby ( 'committer' ) . agg ( { 'loc' : np . sum } )
elif by == 'file' :
df = df . groupby ( [ 'committer' , 'file' ] ) . agg ( { 'loc' : np . sum } )
else :
if by == 'repository' :
df = df . groupby ( 'author' ) . agg ( { 'loc' : np . sum } )
elif by == 'file' :
df = df . groupby ( [ 'author' , 'file' ] ) . agg ( { 'loc' : np . sum } )
df = df . sort_values ( by = [ 'loc' ] , ascending = False )
return df
|
async def connect ( self , url , headers = { } , transports = None , engineio_path = 'engine.io' ) :
"""Connect to an Engine . IO server .
: param url : The URL of the Engine . IO server . It can include custom
query string parameters if required by the server .
: param headers : A dictionary with custom headers to send with the
connection request .
: param transports : The list of allowed transports . Valid transports
are ` ` ' polling ' ` ` and ` ` ' websocket ' ` ` . If not
given , the polling transport is connected first ,
then an upgrade to websocket is attempted .
: param engineio _ path : The endpoint where the Engine . IO server is
installed . The default value is appropriate for
most cases .
Note : this method is a coroutine .
Example usage : :
eio = engineio . Client ( )
await eio . connect ( ' http : / / localhost : 5000 ' )"""
|
if self . state != 'disconnected' :
raise ValueError ( 'Client is not in a disconnected state' )
valid_transports = [ 'polling' , 'websocket' ]
if transports is not None :
if isinstance ( transports , six . text_type ) :
transports = [ transports ]
transports = [ transport for transport in transports if transport in valid_transports ]
if not transports :
raise ValueError ( 'No valid transports provided' )
self . transports = transports or valid_transports
self . queue = self . create_queue ( )
return await getattr ( self , '_connect_' + self . transports [ 0 ] ) ( url , headers , engineio_path )
|
def _expand ( self , pos ) :
"""Splits sublists that are more than double the load level .
Updates the index when the sublist length is less than double the load
level . This requires incrementing the nodes in a traversal from the
leaf node to the root . For an example traversal see self . _ loc ."""
|
_lists = self . _lists
_keys = self . _keys
_index = self . _index
if len ( _keys [ pos ] ) > self . _dual :
_maxes = self . _maxes
_load = self . _load
_lists_pos = _lists [ pos ]
_keys_pos = _keys [ pos ]
half = _lists_pos [ _load : ]
half_keys = _keys_pos [ _load : ]
del _lists_pos [ _load : ]
del _keys_pos [ _load : ]
_maxes [ pos ] = _keys_pos [ - 1 ]
_lists . insert ( pos + 1 , half )
_keys . insert ( pos + 1 , half_keys )
_maxes . insert ( pos + 1 , half_keys [ - 1 ] )
del _index [ : ]
else :
if _index :
child = self . _offset + pos
while child :
_index [ child ] += 1
child = ( child - 1 ) >> 1
_index [ 0 ] += 1
|
def import_family ( self , rfa_file ) :
"""Append a import family entry to the journal .
This instructs Revit to import a family into the opened model .
Args :
rfa _ file ( str ) : full path of the family file"""
|
self . _add_entry ( templates . IMPORT_FAMILY . format ( family_file = rfa_file ) )
|
def compute_path ( self ) :
"""Compute the min cost path between the two waves , and return it .
Return the computed path as a tuple with two elements ,
each being a : class : ` numpy . ndarray ` ( 1D ) of ` ` int ` ` indices : : :
( [ r _ 1 , r _ 2 , . . . , r _ k ] , [ s _ 1 , s _ 2 , . . . , s _ k ] )
where ` ` r _ i ` ` are the indices in the real wave
and ` ` s _ i ` ` are the indices in the synthesized wave ,
and ` ` k ` ` is the length of the min cost path .
Return ` ` None ` ` if the accumulated cost matrix cannot be computed
because one of the two waves is empty after masking ( if requested ) .
: rtype : tuple ( see above )
: raises : RuntimeError : if both the C extension and
the pure Python code did not succeed ."""
|
self . _setup_dtw ( )
if self . dtw is None :
self . log ( u"Inner self.dtw is None => returning None" )
return None
self . log ( u"Computing path..." )
wave_path = self . dtw . compute_path ( )
self . log ( u"Computing path... done" )
self . log ( u"Translating path to full wave indices..." )
real_indices = numpy . array ( [ t [ 0 ] for t in wave_path ] )
synt_indices = numpy . array ( [ t [ 1 ] for t in wave_path ] )
if self . rconf . mmn :
self . log ( u"Translating real indices with masked_middle_map..." )
real_indices = self . real_wave_mfcc . masked_middle_map [ real_indices ]
real_indices [ 0 ] = self . real_wave_mfcc . head_length
self . log ( u"Translating real indices with masked_middle_map... done" )
self . log ( u"Translating synt indices with masked_middle_map..." )
synt_indices = self . synt_wave_mfcc . masked_middle_map [ synt_indices ]
self . log ( u"Translating synt indices with masked_middle_map... done" )
else :
self . log ( u"Translating real indices by adding head_length..." )
real_indices += self . real_wave_mfcc . head_length
self . log ( u"Translating real indices by adding head_length... done" )
self . log ( u"Nothing to do with synt indices" )
self . log ( u"Translating path to full wave indices... done" )
return ( real_indices , synt_indices )
|
def do_tap ( self , x , y ) :
"""Simulate click operation
Args :
x , y ( int ) : position"""
|
rx , ry = x / self . scale , y / self . scale
self . session . tap ( rx , ry )
|
def create_heroku_connect_schema ( using = DEFAULT_DB_ALIAS ) :
"""Create Heroku Connect schema .
Note :
This function is only meant to be used for local development .
In a production environment the schema will be created by
Heroku Connect .
Args :
using ( str ) : Alias for database connection .
Returns :
bool : ` ` True ` ` if the schema was created , ` ` False ` ` if the
schema already exists ."""
|
connection = connections [ using ]
with connection . cursor ( ) as cursor :
cursor . execute ( _SCHEMA_EXISTS_QUERY , [ settings . HEROKU_CONNECT_SCHEMA ] )
schema_exists = cursor . fetchone ( ) [ 0 ]
if schema_exists :
return False
cursor . execute ( "CREATE SCHEMA %s;" , [ AsIs ( settings . HEROKU_CONNECT_SCHEMA ) ] )
with connection . schema_editor ( ) as editor :
for model in get_heroku_connect_models ( ) :
editor . create_model ( model )
# Needs PostgreSQL and database superuser privileges ( which is the case on Heroku ) :
editor . execute ( 'CREATE EXTENSION IF NOT EXISTS "hstore";' )
from heroku_connect . models import ( TriggerLog , TriggerLogArchive )
for cls in [ TriggerLog , TriggerLogArchive ] :
editor . create_model ( cls )
return True
|
def prune_by_ngram_count ( self , minimum = None , maximum = None , label = None ) :
"""Removes results rows whose total n - gram count ( across all
works bearing this n - gram ) is outside the range specified by
` minimum ` and ` maximum ` .
For each text , the count used as part of the sum across all
works is the maximum count across the witnesses for that work .
If ` label ` is specified , the works checked are restricted to
those associated with ` label ` .
: param minimum : minimum n - gram count
: type minimum : ` int `
: param maximum : maximum n - gram count
: type maximum : ` int `
: param label : optional label to restrict requirement to
: type label : ` str `"""
|
self . _logger . info ( 'Pruning results by n-gram count' )
def calculate_total ( group ) :
work_grouped = group . groupby ( constants . WORK_FIELDNAME , sort = False )
total_count = work_grouped [ constants . COUNT_FIELDNAME ] . max ( ) . sum ( )
group [ 'total_count' ] = pd . Series ( [ total_count ] * len ( group . index ) , index = group . index )
return group
# self . _ matches may be empty , in which case not only is there
# no point trying to do the pruning , but it will raise an
# exception due to referencing the column ' total _ count ' which
# won ' t have been added . Therefore just return immediately .
if self . _matches . empty :
return
matches = self . _matches
if label is not None :
matches = matches [ matches [ constants . LABEL_FIELDNAME ] == label ]
matches = matches . groupby ( constants . NGRAM_FIELDNAME , sort = False ) . apply ( calculate_total )
ngrams = None
if minimum :
ngrams = matches [ matches [ 'total_count' ] >= minimum ] [ constants . NGRAM_FIELDNAME ] . unique ( )
if maximum :
max_ngrams = matches [ matches [ 'total_count' ] <= maximum ] [ constants . NGRAM_FIELDNAME ] . unique ( )
if ngrams is None :
ngrams = max_ngrams
else :
ngrams = list ( set ( ngrams ) & set ( max_ngrams ) )
self . _matches = self . _matches [ self . _matches [ constants . NGRAM_FIELDNAME ] . isin ( ngrams ) ]
|
def setup_logger ( debug , color ) :
"""Configure the logger ."""
|
if debug :
log_level = logging . DEBUG
else :
log_level = logging . INFO
logger = logging . getLogger ( 'exifread' )
stream = Handler ( log_level , debug , color )
logger . addHandler ( stream )
logger . setLevel ( log_level )
|
def text_to_data ( self , text , elt , ps ) :
'''convert text into typecode specific data .'''
|
if text is None :
return None
m = Duration . lex_pattern . match ( text )
if m is None :
raise EvaluateException ( 'Illegal duration' , ps . Backtrace ( elt ) )
d = m . groupdict ( )
if d [ 'T' ] and ( d [ 'h' ] is None and d [ 'm' ] is None and d [ 's' ] is None ) :
raise EvaluateException ( 'Duration has T without time' )
try :
retval = _dict_to_tuple ( d )
except ValueError , e :
raise EvaluateException ( str ( e ) )
if self . pyclass is not None :
return self . pyclass ( retval )
return retval
|
def read ( self , size ) :
"""Read bytes from the stream and block until sample rate is achieved .
Args :
size : number of bytes to read from the stream ."""
|
now = time . time ( )
missing_dt = self . _sleep_until - now
if missing_dt > 0 :
time . sleep ( missing_dt )
self . _sleep_until = time . time ( ) + self . _sleep_time ( size )
data = ( self . _wavep . readframes ( size ) if self . _wavep else self . _fp . read ( size ) )
# When reach end of audio stream , pad remainder with silence ( zeros ) .
if not data :
return b'\x00' * size
return data
|
def get_thread_block_dimensions ( params , block_size_names = None ) :
"""thread block size from tuning params , currently using convention"""
|
if not block_size_names :
block_size_names = default_block_size_names
block_size_x = params . get ( block_size_names [ 0 ] , 256 )
block_size_y = params . get ( block_size_names [ 1 ] , 1 )
block_size_z = params . get ( block_size_names [ 2 ] , 1 )
return ( int ( block_size_x ) , int ( block_size_y ) , int ( block_size_z ) )
|
def metadata ( self , filename ) :
'''Get some metadata for a given file .
Can vary from a backend to another but some are always present :
- ` filename ` : the base filename ( without the path / prefix )
- ` url ` : the file public URL
- ` checksum ` : a checksum expressed in the form ` algo : hash `
- ' mime ' : the mime type
- ` modified ` : the last modification date'''
|
metadata = self . backend . metadata ( filename )
metadata [ 'filename' ] = os . path . basename ( filename )
metadata [ 'url' ] = self . url ( filename , external = True )
return metadata
|
def result ( self , timeout = None ) :
"""Return the last result of the callback chain or raise the last
exception thrown and not caught by an errback .
This will block until the result is available .
If a timeout is given and the call times out raise a TimeoutError
If SIGINT is caught while waiting raises CancelledError .
If cancelled while waiting raises CancelledError
This acts much like a pythonfutures . Future . result ( ) call
except the entire callback processing chain is performed first ."""
|
self . _do_wait ( timeout )
if self . _exception :
self . _last_exception . exception = None
self . _last_exception . tb_info = None
raise self . _result
else :
return self . _result
|
def host ( self , hostname = None ) :
"""Get or set host ( IPv4 / IPv6 or hostname like ' plc . domain . net ' )
: param hostname : hostname or IPv4 / IPv6 address or None for get value
: type hostname : str or None
: returns : hostname or None if set fail
: rtype : str or None"""
|
if ( hostname is None ) or ( hostname == self . __hostname ) :
return self . __hostname
# when hostname change ensure old socket is close
self . close ( )
# IPv4 ?
try :
socket . inet_pton ( socket . AF_INET , hostname )
self . __hostname = hostname
return self . __hostname
except socket . error :
pass
# IPv6 ?
try :
socket . inet_pton ( socket . AF_INET6 , hostname )
self . __hostname = hostname
return self . __hostname
except socket . error :
pass
# DNS name ?
if re . match ( '^[a-z][a-z0-9\.\-]+$' , hostname ) :
self . __hostname = hostname
return self . __hostname
else :
return None
|
def resolve ( self ) :
"""Resolves references in this model ."""
|
model = self . copy ( )
for ct in model . component_types :
model . resolve_component_type ( ct )
for c in model . components :
if c . id not in model . fat_components :
model . add ( model . fatten_component ( c ) )
for c in ct . constants :
c2 = c . copy ( )
c2 . numeric_value = model . get_numeric_value ( c2 . value , c2 . dimension )
model . add ( c2 )
return model
|
def add ( self , key , value , expire = 0 , noreply = None ) :
"""The memcached " add " command .
Args :
key : str , see class docs for details .
value : str , see class docs for details .
expire : optional int , number of seconds until the item is expired
from the cache , or zero for no expiry ( the default ) .
noreply : optional bool , True to not wait for the reply ( defaults to
self . default _ noreply ) .
Returns :
If noreply is True , the return value is always True . Otherwise the
return value is True if the value was stored , and False if it was
not ( because the key already existed ) ."""
|
if noreply is None :
noreply = self . default_noreply
return self . _store_cmd ( b'add' , { key : value } , expire , noreply ) [ key ]
|
def __at_om_to_im ( self , om ) :
"""Convert an " outer " access mode to an " inner " access mode .
Returns a tuple of :
( < system access mode > , < is append > , < is universal newlines > ) ."""
|
original_om = om
if om [ 0 ] == 'U' :
om = om [ 1 : ]
is_um = True
else :
is_um = False
if om == 'r' :
return ( original_om , O_RDONLY , False , is_um )
elif om == 'w' :
return ( original_om , O_WRONLY | O_CREAT | O_TRUNC , False , is_um )
elif om == 'a' :
return ( original_om , O_WRONLY | O_CREAT , False , is_um )
elif om == 'r+' :
return ( original_om , O_RDWR | O_CREAT , False , is_um )
elif om == 'w+' :
return ( original_om , O_RDWR | O_CREAT | O_TRUNC , False , is_um )
elif om == 'a+' :
return ( original_om , O_RDWR | O_CREAT , True , is_um )
else :
raise Exception ( "Outer access mode [%s] is invalid." % ( original_om ) )
|
def repr_tree ( self ) :
"""reconstruct represented tree as a DiGraph to
preserve the current rootedness"""
|
import utool as ut
import networkx as nx
repr_tree = nx . DiGraph ( )
for u , v in ut . itertwo ( self . values ( ) ) :
if not repr_tree . has_edge ( v , u ) :
repr_tree . add_edge ( u , v )
return repr_tree
|
def _loadData ( self , data ) :
"""Load attribute values from Plex XML response ."""
|
self . _data = data
self . listType = 'video'
self . addedAt = utils . toDatetime ( data . attrib . get ( 'addedAt' ) )
self . key = data . attrib . get ( 'key' , '' )
self . lastViewedAt = utils . toDatetime ( data . attrib . get ( 'lastViewedAt' ) )
self . librarySectionID = data . attrib . get ( 'librarySectionID' )
self . ratingKey = utils . cast ( int , data . attrib . get ( 'ratingKey' ) )
self . summary = data . attrib . get ( 'summary' )
self . thumb = data . attrib . get ( 'thumb' )
self . title = data . attrib . get ( 'title' )
self . titleSort = data . attrib . get ( 'titleSort' , self . title )
self . type = data . attrib . get ( 'type' )
self . updatedAt = utils . toDatetime ( data . attrib . get ( 'updatedAt' ) )
self . viewCount = utils . cast ( int , data . attrib . get ( 'viewCount' , 0 ) )
|
def compose_headers ( self , req , headers = None , opt = None , as_dict = False ) :
"""a utility to compose headers from pyswagger . io . Request and customized headers
: return : list of tuple ( key , value ) when as _ dict is False , else dict"""
|
if headers is None :
return list ( req . header . items ( ) ) if not as_dict else req . header
opt = opt or { }
join_headers = opt . pop ( BaseClient . join_headers , None )
if as_dict and not join_headers : # pick the most efficient way for special case
headers = dict ( headers ) if isinstance ( headers , list ) else headers
headers . update ( req . header )
return headers
# include Request . headers
aggregated_headers = list ( req . header . items ( ) )
# include customized headers
if isinstance ( headers , list ) :
aggregated_headers . extend ( headers )
elif isinstance ( headers , dict ) :
aggregated_headers . extend ( headers . items ( ) )
else :
raise Exception ( 'unknown type as header: {}' . format ( str ( type ( headers ) ) ) )
if join_headers :
joined = { }
for h in aggregated_headers :
key = h [ 0 ]
if key in joined :
joined [ key ] = ',' . join ( [ joined [ key ] , h [ 1 ] ] )
else :
joined [ key ] = h [ 1 ]
aggregated_headers = list ( joined . items ( ) )
return dict ( aggregated_headers ) if as_dict else aggregated_headers
|
def toggle_shade ( self , shade ) :
"""This method will overlay a semi - transparent shade on top of the
tile ' s image .
Inputs :
shade - This will designate which shade you wish to turn on or off .
Blue and red shades are available by default .
( doc string updated ver 0.1)"""
|
# First toggle the user specified shade
if self . shades [ shade ] [ 0 ] :
self . shades [ shade ] [ 0 ] = 0
else :
self . shades [ shade ] [ 0 ] = 1
# Now draw the image with the active shades
self . image . blit ( self . pic , ( 0 , 0 ) )
for key in self . shades :
if self . shades [ key ] [ 0 ] :
self . image . blit ( self . shades [ key ] [ 1 ] , ( 0 , 0 ) )
|
def key ( seq : Sequence , tooth : Callable [ [ Sequence ] , str ] = ( lambda seq : str ( random . SystemRandom ( ) . choice ( seq ) ) . strip ( ) ) , nteeth : int = 6 , delimiter : str = ' ' , ) -> str :
"""Concatenate strings generated by the tooth function ."""
|
return delimiter . join ( tooth ( seq ) for _ in range ( nteeth ) )
|
def find_bad_footnote_urls ( tagged_lines , include_tags = None ) :
"""Find lines in the list of 2 - tuples of adoc - tagged lines that contain bad footnotes ( only urls )
> > > sections = get _ tagged _ sections ( BOOK _ PATH )
> > > tagged _ lines = list ( sections [ 0 ] [ 1 ] )
> > > find _ bad _ footnote _ urls ( tagged _ lines )
[ [ 30 , ' https : / / spacy . io / usage / linguistic - features # rule - based - morphology ' ] ]"""
|
section_baddies = [ ]
logger . debug ( tagged_lines [ : 2 ] )
for lineno , ( tag , line ) in enumerate ( tagged_lines ) :
line_baddies = None
if tag is None or include_tags is None or tag in include_tags or any ( ( tag . startswith ( t ) for t in include_tags ) ) :
line_baddies = get_line_bad_footnotes ( line = line , tag = tag )
if line_baddies and len ( line_baddies ) > 1 :
section_baddies . append ( [ lineno ] + line_baddies [ 1 : ] )
else :
pass
# section _ baddies . append ( line )
return section_baddies
|
def is_uid ( str ) :
"""Input : string to check
Output : True if UID , otherwise False"""
|
import re
if len ( str ) != 16 :
return False
pattern = r'[^\.a-f0-9]'
if re . search ( pattern , str . lower ( ) ) :
return False
return True
|
def count ( start = 0 , step = 1 , * , interval = 0 ) :
"""Generate consecutive numbers indefinitely .
Optional starting point and increment can be defined ,
respectively defaulting to ` ` 0 ` ` and ` ` 1 ` ` .
An optional interval can be given to space the values out ."""
|
agen = from_iterable . raw ( itertools . count ( start , step ) )
return time . spaceout . raw ( agen , interval ) if interval else agen
|
def sample ( self , start_state = None , size = 1 , return_type = "dataframe" ) :
"""Sample from the Markov Chain .
Parameters :
start _ state : dict or array - like iterable
Representing the starting states of the variables . If None is passed , a random start _ state is chosen .
size : int
Number of samples to be generated .
return _ type : string ( dataframe | recarray )
Return type for samples , either of ' dataframe ' or ' recarray ' .
Defaults to ' dataframe '
Returns
sampled : A pandas . DataFrame or a numpy . recarray object depending upon return _ type argument
the generated samples
Examples :
> > > from pgmpy . factors import DiscreteFactor
> > > from pgmpy . sampling import GibbsSampling
> > > from pgmpy . models import MarkovModel
> > > model = MarkovModel ( [ ( ' A ' , ' B ' ) , ( ' C ' , ' B ' ) ] )
> > > factor _ ab = DiscreteFactor ( [ ' A ' , ' B ' ] , [ 2 , 2 ] , [ 1 , 2 , 3 , 4 ] )
> > > factor _ cb = DiscreteFactor ( [ ' C ' , ' B ' ] , [ 2 , 2 ] , [ 5 , 6 , 7 , 8 ] )
> > > model . add _ factors ( factor _ ab , factor _ cb )
> > > gibbs = GibbsSampling ( model )
> > > gibbs . sample ( size = 4 , return _ tupe = ' dataframe ' )
A B C
0 0 1 1
1 1 0 0
2 1 1 0
3 1 1 1"""
|
if start_state is None and self . state is None :
self . state = self . random_state ( )
elif start_state is not None :
self . set_start_state ( start_state )
types = [ ( var_name , 'int' ) for var_name in self . variables ]
sampled = np . zeros ( size , dtype = types ) . view ( np . recarray )
sampled [ 0 ] = tuple ( [ st for var , st in self . state ] )
for i in range ( size - 1 ) :
for j , ( var , st ) in enumerate ( self . state ) :
other_st = tuple ( st for v , st in self . state if var != v )
next_st = sample_discrete ( list ( range ( self . cardinalities [ var ] ) ) , self . transition_models [ var ] [ other_st ] ) [ 0 ]
self . state [ j ] = State ( var , next_st )
sampled [ i + 1 ] = tuple ( [ st for var , st in self . state ] )
return _return_samples ( return_type , sampled )
|
def _on_cluster_discovery ( self , future ) :
"""Invoked when the Redis server has responded to the ` ` CLUSTER _ NODES ` `
command .
: param future : The future containing the response from Redis
: type future : tornado . concurrent . Future"""
|
LOGGER . debug ( '_on_cluster_discovery(%r)' , future )
common . maybe_raise_exception ( future )
nodes = future . result ( )
for node in nodes :
name = '{}:{}' . format ( node . ip , node . port )
if name in self . _cluster :
LOGGER . debug ( 'Updating cluster connection info for %s:%s' , node . ip , node . port )
self . _cluster [ name ] . set_slots ( node . slots )
self . _cluster [ name ] . set_read_only ( 'slave' in node . flags )
else :
self . _create_cluster_connection ( node )
self . _discovery = True
|
def create_ver_browser ( self , layout ) :
"""Create a version browser and insert it into the given layout
: param layout : the layout to insert the browser into
: type layout : QLayout
: returns : the created browser
: rtype : : class : ` jukeboxcore . gui . widgets . browser . ComboBoxBrowser `
: raises : None"""
|
brws = ComboBoxBrowser ( 1 , headers = [ 'Version:' ] )
layout . insertWidget ( 1 , brws )
return brws
|
def selective_download ( name , oldest , newest ) :
"""Note : RSS feeds are counted backwards , default newest is 0 , the most recent ."""
|
if six . PY3 :
name = name . encode ( "utf-8" )
feed = resolve_name ( name )
if six . PY3 :
feed = feed . decode ( )
d = feedparser . parse ( feed )
logger . debug ( d )
try :
d . entries [ int ( oldest ) ]
except IndexError :
print ( "Error feed does not contain this many items." )
print ( "Hitman thinks there are %d items in this feed." % len ( d . entries ) )
return
for url in [ q . enclosures [ 0 ] [ 'href' ] for q in d . entries [ int ( newest ) : int ( oldest ) ] ] : # iterate over urls in feed from newest to oldest feed items .
url = str ( url )
with Database ( "downloads" ) as db :
if url . split ( '/' ) [ - 1 ] not in db : # download ( url , name , feed )
with Database ( "settings" ) as settings :
if 'dl' in settings :
dl_dir = settings [ 'dl' ]
else :
dl_dir = os . path . join ( os . path . expanduser ( "~" ) , "Downloads" )
requests_get ( url , dl_dir )
|
def random_string ( ** kwargs ) :
"""By default generates a random string of 10 chars composed
of digits and ascii lowercase letters . String length and pool can
be override by using kwargs . Pool must be a list of strings"""
|
n = kwargs . get ( 'length' , 10 )
pool = kwargs . get ( 'pool' ) or string . digits + string . ascii_lowercase
return '' . join ( random . SystemRandom ( ) . choice ( pool ) for _ in range ( n ) )
|
def save ( self , data ) :
"""Save a document or list of documents"""
|
if not self . is_connected :
raise Exception ( "No database selected" )
if not data :
return False
if isinstance ( data , dict ) :
doc = couchdb . Document ( )
doc . update ( data )
self . db . create ( doc )
elif isinstance ( data , couchdb . Document ) :
self . db . update ( data )
elif isinstance ( data , list ) :
self . db . update ( data )
return True
|
def parse_changelog ( args : Any ) -> Tuple [ str , str ] :
"""Return an updated changelog and and the list of changes ."""
|
with open ( "CHANGELOG.rst" , "r" ) as file :
match = re . match ( pattern = r"(.*?Unreleased\n---+\n)(.+?)(\n*[^\n]+\n---+\n.*)" , string = file . read ( ) , flags = re . DOTALL , )
assert match
header , changes , tail = match . groups ( )
tag = "%s - %s" % ( args . tag , datetime . date . today ( ) . isoformat ( ) )
tagged = "\n%s\n%s\n%s" % ( tag , "-" * len ( tag ) , changes )
if args . verbose :
print ( tagged )
return "" . join ( ( header , tagged , tail ) ) , changes
|
def create ( self , phone_number , sms_capability , account_sid = values . unset , friendly_name = values . unset , unique_name = values . unset , cc_emails = values . unset , sms_url = values . unset , sms_method = values . unset , sms_fallback_url = values . unset , sms_fallback_method = values . unset , status_callback_url = values . unset , status_callback_method = values . unset , sms_application_sid = values . unset , address_sid = values . unset , email = values . unset , verification_type = values . unset , verification_document_sid = values . unset ) :
"""Create a new HostedNumberOrderInstance
: param unicode phone _ number : An E164 formatted phone number .
: param bool sms _ capability : Specify SMS capability to host .
: param unicode account _ sid : Account Sid .
: param unicode friendly _ name : A human readable description of this resource .
: param unicode unique _ name : A unique , developer assigned name of this HostedNumberOrder .
: param unicode cc _ emails : A list of emails .
: param unicode sms _ url : SMS URL .
: param unicode sms _ method : SMS Method .
: param unicode sms _ fallback _ url : SMS Fallback URL .
: param unicode sms _ fallback _ method : SMS Fallback Method .
: param unicode status _ callback _ url : Status Callback URL .
: param unicode status _ callback _ method : Status Callback Method .
: param unicode sms _ application _ sid : SMS Application Sid .
: param unicode address _ sid : Address sid .
: param unicode email : Email .
: param HostedNumberOrderInstance . VerificationType verification _ type : Verification Type .
: param unicode verification _ document _ sid : Verification Document Sid
: returns : Newly created HostedNumberOrderInstance
: rtype : twilio . rest . preview . hosted _ numbers . hosted _ number _ order . HostedNumberOrderInstance"""
|
data = values . of ( { 'PhoneNumber' : phone_number , 'SmsCapability' : sms_capability , 'AccountSid' : account_sid , 'FriendlyName' : friendly_name , 'UniqueName' : unique_name , 'CcEmails' : serialize . map ( cc_emails , lambda e : e ) , 'SmsUrl' : sms_url , 'SmsMethod' : sms_method , 'SmsFallbackUrl' : sms_fallback_url , 'SmsFallbackMethod' : sms_fallback_method , 'StatusCallbackUrl' : status_callback_url , 'StatusCallbackMethod' : status_callback_method , 'SmsApplicationSid' : sms_application_sid , 'AddressSid' : address_sid , 'Email' : email , 'VerificationType' : verification_type , 'VerificationDocumentSid' : verification_document_sid , } )
payload = self . _version . create ( 'POST' , self . _uri , data = data , )
return HostedNumberOrderInstance ( self . _version , payload , )
|
def _FlowProcessingRequestHandlerLoop ( self , handler ) :
"""The main loop for the flow processing request queue ."""
|
while not self . flow_processing_request_handler_stop :
try :
msgs = self . _LeaseFlowProcessingReqests ( )
if msgs :
for m in msgs :
self . flow_processing_request_handler_pool . AddTask ( target = handler , args = ( m , ) )
else :
time . sleep ( self . _FLOW_REQUEST_POLL_TIME_SECS )
except Exception as e : # pylint : disable = broad - except
logging . exception ( "_FlowProcessingRequestHandlerLoop raised %s." , e )
break
|
def density_2d ( self , x , y , Rs , rho0 , r_core , center_x = 0 , center_y = 0 ) :
"""projected two dimenstional NFW profile ( kappa * Sigma _ crit )
: param R : radius of interest
: type R : float / numpy array
: param Rs : scale radius
: type Rs : float
: param rho0 : density normalization ( characteristic density )
: type rho0 : float
: param r200 : radius of ( sub ) halo
: type r200 : float > 0
: return : Epsilon ( R ) projected density at radius R"""
|
x_ = x - center_x
y_ = y - center_y
R = np . sqrt ( x_ ** 2 + y_ ** 2 )
b = r_core * Rs ** - 1
x = R * Rs ** - 1
Fx = self . _F ( x , b )
return 2 * rho0 * Rs * Fx
|
def prepare_bucket ( self ) :
"""Resets and creates the destination bucket (
only called if - - create is true ) .
: return :"""
|
self . logger . info ( 'Deleting old bucket first' )
del_url = '{0}/buckets/{1}' . format ( self . cluster_prefix , self . bucket )
r = self . _htsess . delete ( del_url )
try :
r . raise_for_status ( )
except :
self . logger . exception ( "Couldn't delete bucket" )
cr_url = '{0}/buckets' . format ( self . cluster_prefix )
data = { 'name' : self . bucket , 'ramQuotaMB' : '{0}' . format ( self . quota ) , 'bucketType' : 'couchbase' , 'authType' : 'sasl' , 'saslPassword' : '' , 'replicaNumber' : '0' }
r = self . _htsess . post ( cr_url , data )
r . raise_for_status ( )
|
def dump ( self , config , instance , file_object , prefer = None , ** kwargs ) :
"""An abstract method that dumps to a given file object .
: param class config : The config class of the instance
: param object instance : The instance to dump
: param file file _ object : The file object to dump to
: param str prefer : The preferred serialization module name"""
|
file_object . write ( self . dumps ( config , instance , prefer = prefer , ** kwargs ) )
|
def buildcss ( app , buildpath , imagefile ) :
"""Create CSS file ."""
|
# set default values
div = 'body'
repeat = 'repeat-y'
position = 'center'
attachment = 'scroll'
if app . config . sphinxmark_div != 'default' :
div = app . config . sphinxmark_div
if app . config . sphinxmark_repeat is False :
repeat = 'no-repeat'
if app . config . sphinxmark_fixed is True :
attachment = 'fixed'
border = app . config . sphinxmark_border
if border == 'left' or border == 'right' :
css = template ( 'border' , div = div , image = imagefile , side = border )
else :
css = template ( 'watermark' , div = div , image = imagefile , repeat = repeat , position = position , attachment = attachment )
LOG . debug ( '[sphinxmark] Template: ' + css )
cssname = 'sphinxmark.css'
cssfile = os . path . join ( buildpath , cssname )
with open ( cssfile , 'w' ) as f :
f . write ( css )
return ( cssname )
|
def metric_detail ( slug , with_data_table = False ) :
"""Template Tag to display a metric ' s * current * detail .
* ` ` slug ` ` - - the metric ' s unique slug
* ` ` with _ data _ table ` ` - - if True , prints the raw data in a table ."""
|
r = get_r ( )
granularities = list ( r . _granularities ( ) )
metrics = r . get_metric ( slug )
metrics_data = [ ]
for g in granularities :
metrics_data . append ( ( g , metrics [ g ] ) )
return { 'granularities' : [ g . title ( ) for g in granularities ] , 'slug' : slug , 'metrics' : metrics_data , 'with_data_table' : with_data_table , }
|
def updateIDs ( self , ginfo , tag = None , debug = False ) :
"""ensure all player ' s playerIDs are correct given game ' s info"""
|
# SC2APIProtocol . ResponseGameInfo attributes :
# map _ name
# mod _ names
# local _ map _ path
# player _ info
# start _ raw
# options
thisPlayer = self . whoAmI ( )
for pInfo in ginfo . player_info : # parse ResponseGameInfo . player _ info to validate player information ( SC2APIProtocol . PlayerInfo ) against the specified configuration
pID = pInfo . player_id
if pID == thisPlayer . playerID :
continue
# already updated
pCon = c . types . PlayerControls ( pInfo . type )
rReq = c . types . SelectRaces ( pInfo . race_requested )
for p in self . players : # ensure joined player is identified appropriately
if p . playerID and p . playerID != pID :
continue
# if this non - matching player already has a set playerID , it can ' t match
if p . control == pCon and p . selectedRace == rReq : # matched player
p . playerID = pID
# updated player IDs should be saved into the game configuration
if debug :
print ( "[%s] match contains %s." % ( tag , p ) )
pID = 0
# declare that the player has been identified
break
if pID :
raise c . UnknownPlayer ( "could not match %s %s %s to any " "existing player of %s" % ( pID , pCon , rReq , self . players ) )
|
def get_docker_memory ( self , container_id , all_stats ) :
"""Return the container MEMORY .
Input : id is the full container id
all _ stats is the output of the stats method of the Docker API
Output : a dict { ' rss ' : 1015808 , ' cache ' : 356352 , ' usage ' : . . . , ' max _ usage ' : . . . }"""
|
ret = { }
# Read the stats
try : # Do not exist anymore with Docker 1.11 ( issue # 848)
# ret [ ' rss ' ] = all _ stats [ ' memory _ stats ' ] [ ' stats ' ] [ ' rss ' ]
# ret [ ' cache ' ] = all _ stats [ ' memory _ stats ' ] [ ' stats ' ] [ ' cache ' ]
ret [ 'usage' ] = all_stats [ 'memory_stats' ] [ 'usage' ]
ret [ 'limit' ] = all_stats [ 'memory_stats' ] [ 'limit' ]
ret [ 'max_usage' ] = all_stats [ 'memory_stats' ] [ 'max_usage' ]
except ( KeyError , TypeError ) as e : # all _ stats do not have MEM information
logger . debug ( "docker plugin - Cannot grab MEM usage for container {} ({})" . format ( container_id , e ) )
logger . debug ( all_stats )
# Return the stats
return ret
|
def handle_client_stream ( self , stream , is_unix = False ) :
"""Handles stream of data received from client ."""
|
assert stream
data = [ ]
stream . settimeout ( 2 )
while True :
try :
if is_unix :
buf = stream . recv ( 1024 )
else :
buf = stream . read ( 1024 )
if not buf :
break
data . append ( buf )
except ( AttributeError , ValueError ) as message :
logger . error ( message )
return
except ( ssl . SSLError ) as exception :
logger . debug ( 'Error: %s' , exception [ 0 ] )
break
except ( socket . timeout ) as exception :
logger . debug ( 'Error: %s' , exception )
break
data = b'' . join ( data )
if len ( data ) <= 0 :
logger . debug ( "Empty client stream" )
return
try :
response = self . handle_command ( data )
except OSPDError as exception :
response = exception . as_xml ( )
logger . debug ( 'Command error: %s' , exception . message )
except Exception :
logger . exception ( 'While handling client command:' )
exception = OSPDError ( 'Fatal error' , 'error' )
response = exception . as_xml ( )
if is_unix :
send_method = stream . send
else :
send_method = stream . write
self . write_to_stream ( send_method , response )
|
def marshal_value ( datatype , value ) :
"""Marshal a given string into a relevant Python type given the uPnP datatype .
Assumes that the value has been pre - validated , so performs no checks .
Returns a tuple pair of a boolean to say whether the value was marshalled and the ( un ) marshalled
value ."""
|
for types , func in MARSHAL_FUNCTIONS :
if datatype in types :
return True , func ( value )
return False , value
|
def unwatch ( connection , volume_id ) :
"""Remove watching of a volume
: type connection : boto . ec2 . connection . EC2Connection
: param connection : EC2 connection object
: type volume _ id : str
: param volume _ id : VolumeID to add to the watchlist
: returns : bool - True if the watch was successful"""
|
try :
volume = connection . get_all_volumes ( volume_ids = [ volume_id ] ) [ 0 ]
volume . remove_tag ( 'AutomatedEBSSnapshots' )
except EC2ResponseError :
pass
logger . info ( 'Removed {} from the watchlist' . format ( volume_id ) )
return True
|
def _select_designated_port ( self , root_port ) :
"""DESIGNATED _ PORT is a port of the side near the root bridge
of each link . It is determined by the cost of each path , etc
same as ROOT _ PORT ."""
|
d_ports = [ ]
root_msg = root_port . designated_priority
for port in self . ports . values ( ) :
port_msg = port . designated_priority
if ( port . state is PORT_STATE_DISABLE or port . ofport . port_no == root_port . ofport . port_no ) :
continue
if ( port_msg is None or ( port_msg . root_id . value != root_msg . root_id . value ) ) :
d_ports . append ( port . ofport . port_no )
else :
result = Stp . compare_root_path ( root_msg . root_path_cost , port_msg . root_path_cost - port . path_cost , self . bridge_id . value , port_msg . designated_bridge_id . value , port . port_id . value , port_msg . designated_port_id . value )
if result is SUPERIOR :
d_ports . append ( port . ofport . port_no )
return d_ports
|
def page_slice ( self ) :
"""Return the query from : size tuple ( 0 - based ) ."""
|
return ( None if self . query is None else ( self . query . get ( "from" , 0 ) , self . query . get ( "size" , 10 ) ) )
|
def toFile ( self , filename ) :
"""Save the suffix array instance including all features attached in
filename . Accept any filename following the _ open conventions ,
for example if it ends with . gz the file created will be a compressed
GZip file ."""
|
start = _time ( )
fd = _open ( filename , "w" )
savedData = [ self . string , self . unit , self . voc , self . vocSize , self . SA , self . features ]
for featureName in self . features :
featureValues = getattr ( self , "_%s_values" % featureName )
featureDefault = getattr ( self , "%s_default" % featureName )
savedData . append ( ( featureValues , featureDefault ) )
fd . write ( _dumps ( savedData , _HIGHEST_PROTOCOL ) )
fd . flush ( )
try :
self . sizeOfSavedFile = getsize ( fd . name )
except OSError : # if stdout is used
self . sizeOfSavedFile = "-1"
self . toFileTime = _time ( ) - start
if _trace :
print >> _stderr , "toFileTime %.2fs" % self . toFileTime
if _trace :
print >> _stderr , "sizeOfSavedFile %sb" % self . sizeOfSavedFile
fd . close ( )
|
def all_table_names_in_database ( self , cache = False , cache_timeout = None , force = False ) :
"""Parameters need to be passed as keyword arguments ."""
|
if not self . allow_multi_schema_metadata_fetch :
return [ ]
return self . db_engine_spec . fetch_result_sets ( self , 'table' )
|
def search_videohub ( cls , query , filters = None , status = None , sort = None , size = None , page = None ) :
"""searches the videohub given a query and applies given filters and other bits
: see : https : / / github . com / theonion / videohub / blob / master / docs / search / post . md
: see : https : / / github . com / theonion / videohub / blob / master / docs / search / get . md
: param query : query terms to search by
: type query : str
: example query : " brooklyn hipsters " # although , this is a little redundant . . .
: param filters : video field value restrictions
: type filters : dict
: default filters : None
: example filters : { " channel " : " onion " } or { " series " : " Today NOW " }
: param status : limit the results to videos that are published , scheduled , draft
: type status : str
: default status : None
: example status : " published " or " draft " or " scheduled "
: param sort : video field related sorting
: type sort : dict
: default sort : None
: example sort : { " title " : " desc " } or { " description " : " asc " }
: param size : the page size ( number of results )
: type size : int
: default size : None
: example size " : { " size " : 20}
: param page : the page number of the results
: type page : int
: default page : None
: example page : { " page " : 2 } # note , you should use ` size ` in conjunction with ` page `
: return : a dictionary of results and meta information
: rtype : dict"""
|
# construct url
url = getattr ( settings , "VIDEOHUB_API_SEARCH_URL" , cls . DEFAULT_VIDEOHUB_API_SEARCH_URL )
# construct auth headers
headers = { "Content-Type" : "application/json" , "Authorization" : settings . VIDEOHUB_API_TOKEN , }
# construct payload
payload = { "query" : query , }
if filters :
assert isinstance ( filters , dict )
payload [ "filters" ] = filters
if status :
assert isinstance ( status , six . string_types )
payload . setdefault ( "filters" , { } )
payload [ "filters" ] [ "status" ] = status
if sort :
assert isinstance ( sort , dict )
payload [ "sort" ] = sort
if size :
assert isinstance ( size , ( six . string_types , int ) )
payload [ "size" ] = size
if page :
assert isinstance ( page , ( six . string_types , int ) )
payload [ "page" ] = page
# send request
res = requests . post ( url , data = json . dumps ( payload ) , headers = headers )
# raise if not 200
if res . status_code != 200 :
res . raise_for_status ( )
# parse and return response
return json . loads ( res . content )
|
def remove_storage ( self , storage ) :
"""Remove Storage from a Server .
The Storage must be a reference to an object in
Server . storage _ devices or the method will throw and Exception .
A Storage from get _ storage ( uuid ) will not work as it is missing the ' address ' property ."""
|
if not hasattr ( storage , 'address' ) :
raise Exception ( ( 'Storage does not have an address. ' 'Access the Storage via Server.storage_devices ' 'so they include an address. ' '(This is due how the API handles Storages)' ) )
self . cloud_manager . detach_storage ( server = self . uuid , address = storage . address )
self . storage_devices . remove ( storage )
|
def _validate_scales ( self , proposal ) :
"""Validates the ` scales ` based on the mark ' s scaled attributes metadata .
First checks for missing scale and then for ' rtype ' compatibility ."""
|
# Validate scales ' ' rtype ' versus data attribute ' rtype ' decoration
# At this stage it is already validated that all values in self . scales
# are instances of Scale .
scales = proposal . value
for name in self . trait_names ( scaled = True ) :
trait = self . traits ( ) [ name ]
if name not in scales : # Check for missing scale
if not trait . allow_none :
raise TraitError ( "Missing scale for data attribute %s." % name )
else : # Check scale range type compatibility
if scales [ name ] . rtype != trait . get_metadata ( 'rtype' ) :
raise TraitError ( "Range type mismatch for scale %s." % name )
return scales
|
def detach_lb_from_subnets ( self , name , subnets ) :
"""Detaches load balancer from one or more subnets .
: type name : string
: param name : The name of the Load Balancer
: type subnets : List of strings
: param subnets : The name of the subnet ( s ) to detach .
: rtype : List of strings
: return : An updated list of subnets for this Load Balancer ."""
|
params = { 'LoadBalancerName' : name }
self . build_list_params ( params , subnets , 'Subnets.member.%d' )
return self . get_list ( 'DettachLoadBalancerFromSubnets' , params , None )
|
def ensure_dtraj_list ( dtrajs ) :
r"""Makes sure that dtrajs is a list of discrete trajectories ( array of int )"""
|
if isinstance ( dtrajs , list ) : # elements are ints ? then wrap into a list
if is_list_of_int ( dtrajs ) :
return [ np . array ( dtrajs , dtype = int ) ]
else :
for i , dtraj in enumerate ( dtrajs ) :
dtrajs [ i ] = ensure_dtraj ( dtraj )
return dtrajs
else :
return [ ensure_dtraj ( dtrajs ) ]
|
def put ( self , entity ) :
"""Remember an entity ' s state to be saved during : meth : ` commit ` .
. . note : :
Any existing properties for the entity will be replaced by those
currently set on this instance . Already - stored properties which do
not correspond to keys set on this instance will be removed from
the datastore .
. . note : :
Property values which are " text " ( ' unicode ' in Python2 , ' str ' in
Python3 ) map to ' string _ value ' in the datastore ; values which are
" bytes " ( ' str ' in Python2 , ' bytes ' in Python3 ) map to ' blob _ value ' .
When an entity has a partial key , calling : meth : ` commit ` sends it as
an ` ` insert ` ` mutation and the key is completed . On return ,
the key for the ` ` entity ` ` passed in is updated to match the key ID
assigned by the server .
: type entity : : class : ` google . cloud . datastore . entity . Entity `
: param entity : the entity to be saved .
: raises : : class : ` ~ exceptions . ValueError ` if the batch is not in
progress , if entity has no key assigned , or if the key ' s
` ` project ` ` does not match ours ."""
|
if self . _status != self . _IN_PROGRESS :
raise ValueError ( "Batch must be in progress to put()" )
if entity . key is None :
raise ValueError ( "Entity must have a key" )
if self . project != entity . key . project :
raise ValueError ( "Key must be from same project as batch" )
if entity . key . is_partial :
entity_pb = self . _add_partial_key_entity_pb ( )
self . _partial_key_entities . append ( entity )
else :
entity_pb = self . _add_complete_key_entity_pb ( )
_assign_entity_to_pb ( entity_pb , entity )
|
def run ( self ) :
has_npm = npm_installation_check ( )
if has_npm :
run_npm_install ( )
else :
print ( "Warning: npm not installed using prebuilded js files!" , file = sys . stderr )
"""Download npm packages required by package . json and extract required
files from them"""
|
for js in JS_FILES :
downloaded_js_name = os . path . join ( TOP_DIR , js )
installed_js_name = os . path . join ( TOP_DIR , "sphinx_hwt" , "html" , js )
if has_npm :
assert os . path . exists ( downloaded_js_name ) , downloaded_js_name
os . makedirs ( os . path . dirname ( installed_js_name ) , exist_ok = True )
copyfile ( downloaded_js_name , installed_js_name )
print ( "copy generated from NPM packages" , installed_js_name )
else :
if os . path . exists ( installed_js_name ) :
print ( "using prebuilded" , installed_js_name )
else :
raise Exception ( "Can not find npm," " which is required for the installation " "and this is pacpage has not js prebuilded" )
|
def write ( notebook , file_or_stream , fmt , version = nbformat . NO_CONVERT , ** kwargs ) :
"""Write a notebook to a file"""
|
# Python 2 compatibility
text = u'' + writes ( notebook , fmt , version , ** kwargs )
file_or_stream . write ( text )
# Add final newline # 165
if not text . endswith ( u'\n' ) :
file_or_stream . write ( u'\n' )
|
def _trim_value ( self , value ) :
"""Trim double quotes off the ends of a value , un - escaping inner
double quotes and literal backslashes . Also convert escapes to unicode .
If the string is not quoted , return it unmodified ."""
|
if value [ 0 ] == '"' :
assert value [ - 1 ] == '"'
value = value [ 1 : - 1 ] . replace ( '\\"' , '"' ) . replace ( "\\\\" , "\\" )
return Parser . _unescape_re . sub ( Parser . _unescape_fn , value )
return value
|
def _bytes_to_uint_48 ( self , bytes_ ) :
"""Converts an array of 6 bytes to a 48bit integer .
: param data : bytearray to be converted to a 48bit integer
: type data : bytearray
: return : the integer
: rtype : int"""
|
return ( ( bytes_ [ 0 ] * pow ( 2 , 40 ) ) + ( bytes_ [ 1 ] * pow ( 2 , 32 ) ) + ( bytes_ [ 2 ] * pow ( 2 , 24 ) ) + ( bytes_ [ 3 ] << 16 ) + ( bytes_ [ 4 ] << 8 ) + bytes_ [ 4 ] )
|
def projection ( radius = 5e-6 , sphere_index = 1.339 , medium_index = 1.333 , wavelength = 550e-9 , pixel_size = 1e-7 , grid_size = ( 80 , 80 ) , center = ( 39.5 , 39.5 ) ) :
"""Optical path difference projection of a dielectric sphere
Parameters
radius : float
Radius of the sphere [ m ]
sphere _ index : float
Refractive index of the sphere
medium _ index : float
Refractive index of the surrounding medium
wavelength : float
Vacuum wavelength of the imaging light [ m ]
pixel _ size : float
Pixel size [ m ]
grid _ size : tuple of floats
Resulting image size in x and y [ px ]
center : tuple of floats
Center position in image coordinates [ px ]
Returns
qpi : qpimage . QPImage
Quantitative phase data set"""
|
# grid
x = np . arange ( grid_size [ 0 ] ) . reshape ( - 1 , 1 )
y = np . arange ( grid_size [ 1 ] ) . reshape ( 1 , - 1 )
cx , cy = center
# sphere location
rpx = radius / pixel_size
r = rpx ** 2 - ( x - cx ) ** 2 - ( y - cy ) ** 2
# distance
z = np . zeros_like ( r )
rvalid = r > 0
z [ rvalid ] = 2 * np . sqrt ( r [ rvalid ] ) * pixel_size
# phase = delta _ n * 2PI * z / wavelength
phase = ( sphere_index - medium_index ) * 2 * np . pi * z / wavelength
meta_data = { "pixel size" : pixel_size , "wavelength" : wavelength , "medium index" : medium_index , "sim center" : center , "sim radius" : radius , "sim index" : sphere_index , "sim model" : "projection" , }
qpi = qpimage . QPImage ( data = phase , which_data = "phase" , meta_data = meta_data )
return qpi
|
def start_sync ( self ) :
"""Starts all the synchonization loop ( sensor / effector controllers ) ."""
|
if self . _syncing :
return
[ c . start ( ) for c in self . _controllers ]
[ c . wait_to_start ( ) for c in self . _controllers ]
self . _primitive_manager . start ( )
self . _primitive_manager . _running . wait ( )
self . _syncing = True
logger . info ( 'Starting robot synchronization.' )
|
def show_code ( self , x , file = None ) :
"""Print details of methods , functions , or code to * file * .
If * file * is not provided , the output is printed on stdout ."""
|
return _show_code ( x , self . opc . version , file )
|
def auth_refresh ( self , apikey = None , secret = None , email = None , password = None ) :
"""Renew authentication token manually . Uses POST to / auth interface
: param apikey : Unique identifier for authorized use of the API
: type apikey : str or None
: param secret : The secret password corresponding to the API key .
: type secret : str or None
: param email : Email to use for authentication
: type apikey : str or None
: param apikey : Password corresponding to email
: type apikey : str or None
: Returns : None"""
|
jwt = self . auth_token ( apikey = apikey , secret = secret , email = email , password = password )
self . _headers [ "Authorization" ] = "Bearer %s" % jwt
self . _auth_token = jwt
self . _last_auth = datetime . utcnow ( )
|
def linsert ( self , key , pivot , value , before = False ) :
"""Inserts value in the list stored at key either before or
after the reference value pivot ."""
|
where = b'AFTER' if not before else b'BEFORE'
return self . execute ( b'LINSERT' , key , where , pivot , value )
|
def process_git_configs ( git_short = '' ) :
"""Retrieve _ application . json _ files from GitLab .
Args :
git _ short ( str ) : Short Git representation of repository , e . g .
forrest / core .
Returns :
collections . defaultdict : Configurations stored for each environment
found ."""
|
LOG . info ( 'Processing application.json files from GitLab "%s".' , git_short )
file_lookup = FileLookup ( git_short = git_short )
app_configs = process_configs ( file_lookup , RUNWAY_BASE_PATH + '/application-master-{env}.json' , RUNWAY_BASE_PATH + '/pipeline.json' )
commit_obj = file_lookup . project . commits . get ( 'master' )
config_commit = commit_obj . attributes [ 'id' ]
LOG . info ( 'Commit ID used: %s' , config_commit )
app_configs [ 'pipeline' ] [ 'config_commit' ] = config_commit
return app_configs
|
def webhook ( ) :
"""When the Flask server gets a request at the ` / webhook ` URL , it will run
this function . Most of the time , that request will be a genuine webhook
notification from Nylas . However , it ' s possible that the request could
be a fake notification from someone else , trying to fool our app . This
function needs to verify that the webhook is genuine !"""
|
# When you first tell Nylas about your webhook , it will test that webhook
# URL with a GET request to make sure that it responds correctly .
# We just need to return the ` challenge ` parameter to indicate that this
# is a valid webhook URL .
if request . method == "GET" and "challenge" in request . args :
print ( " * Nylas connected to the webhook!" )
return request . args [ "challenge" ]
# Alright , this is a POST request , which means it ' s a webhook notification .
# The question is , is it genuine or fake ? Check the signature to find out .
is_genuine = verify_signature ( message = request . data , key = app . config [ "NYLAS_OAUTH_CLIENT_SECRET" ] . encode ( "utf8" ) , signature = request . headers . get ( "X-Nylas-Signature" ) , )
if not is_genuine :
return "Signature verification failed!" , 401
# Alright , we have a genuine webhook notification from Nylas !
# Let ' s find out what it says . . .
data = request . get_json ( )
for delta in data [ "deltas" ] : # Processing the data might take awhile , or it might fail .
# As a result , instead of processing it right now , we ' ll push a task
# onto the Celery task queue , to handle it later . That way ,
# we ' ve got the data saved , and we can return a response to the
# Nylas webhook notification right now .
process_delta . delay ( delta )
# Now that all the ` process _ delta ` tasks have been queued , we can
# return an HTTP response to Nylas , to let them know that we processed
# the webhook notification successfully .
return "Deltas have been queued" , 200
|
def init_app ( self , app ) :
"""绑定app"""
|
if app . config . GRIDFS_SETTINGS and isinstance ( app . config . GRIDFS_SETTINGS , dict ) :
self . GRIDFS_SETTINGS = app . config . GRIDFS_SETTINGS
self . app = app
else :
raise ValueError ( "nonstandard sanic config GRIDFS_URIS,GRIDFS_URIS must be a Dict[Bucket_name,Tuple[dburl,collection]]" )
@ app . listener ( "before_server_start" )
async def init_mongo_connection ( app , loop ) :
for bucket_name , ( dburl , collection ) in app . config . GRIDFS_SETTINGS . items ( ) :
if isinstance ( dburl , str ) :
bucket = GridFSBucket ( dburl , ioloop = loop , collection = collection ) . bucket
else :
bucket = GridFSBucket ( ioloop = loop , collection = collection , ** dburl ) . bucket
self . GridFSs [ bucket_name ] = bucket
@ app . listener ( "before_server_stop" )
async def sub_close ( app , loop ) :
log . info ( "mongo connection {numbr}" . format ( numbr = len ( self . GridFSs ) ) )
for bucket_name , bucket in self . GridFSs . items ( ) :
bucket . client . close
log . info ( "{bucket_name} connection closed" . format ( bucket_name = bucket_name ) )
if "extensions" not in app . __dir__ ( ) :
app . extensions = { }
app . extensions [ 'SanicGridFS' ] = self
app . GridFS = self . GridFSs
return self
|
def get_api_keys_of_account_group ( self , account_id , group_id , ** kwargs ) : # noqa : E501
"""Get API keys of a group . # noqa : E501
An endpoint for listing the API keys of the group with details . * * Example usage : * * ` curl https : / / api . us - east - 1 . mbedcloud . com / v3 / accounts / { accountID } / policy - groups / { groupID } / api - keys - H ' Authorization : Bearer API _ KEY ' ` # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass asynchronous = True
> > > thread = api . get _ api _ keys _ of _ account _ group ( account _ id , group _ id , asynchronous = True )
> > > result = thread . get ( )
: param asynchronous bool
: param str account _ id : Account ID . ( required )
: param str group _ id : The ID of the group whose API keys are retrieved . ( required )
: param int limit : The number of results to return ( 2-1000 ) , default is 50.
: param str after : The entity ID to fetch after the given one .
: param str order : The order of the records based on creation time , ASC or DESC ; by default ASC
: param str include : Comma separated additional data to return . Currently supported : total _ count
: return : ApiKeyInfoRespList
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'asynchronous' ) :
return self . get_api_keys_of_account_group_with_http_info ( account_id , group_id , ** kwargs )
# noqa : E501
else :
( data ) = self . get_api_keys_of_account_group_with_http_info ( account_id , group_id , ** kwargs )
# noqa : E501
return data
|
def set_actor ( user , sender , instance , signal_duid , ** kwargs ) :
"""Signal receiver with an extra , required ' user ' kwarg . This method becomes a real ( valid ) signal receiver when
it is curried with the actor ."""
|
if hasattr ( threadlocal , 'auditlog' ) :
if signal_duid != threadlocal . auditlog [ 'signal_duid' ] :
return
try :
app_label , model_name = settings . AUTH_USER_MODEL . split ( '.' )
auth_user_model = apps . get_model ( app_label , model_name )
except ValueError :
auth_user_model = apps . get_model ( 'auth' , 'user' )
if sender == LogEntry and isinstance ( user , auth_user_model ) and instance . actor is None :
instance . actor = user
instance . remote_addr = threadlocal . auditlog [ 'remote_addr' ]
|
def record_markdown ( text , cellid ) :
"""Records the specified markdown text to the acorn database .
Args :
text ( str ) : the * raw * markdown text entered into the cell in the ipython
notebook ."""
|
from acorn . logging . database import record
from time import time
ekey = "nb-{}" . format ( cellid )
global _cellid_map
if cellid not in _cellid_map :
from acorn . logging . database import active_db
from difflib import SequenceMatcher
from acorn . logging . diff import cascade
taskdb = active_db ( )
if ekey not in taskdb . entities : # Compute a new ekey if possible with the most similar markdown cell
# in the database .
possible = [ k for k in taskdb . entities if k [ 0 : 3 ] == "nb-" ]
maxkey , maxvalue = None , 0.
for pkey in possible :
sequence = [ e [ "c" ] for e in taskdb . entities [ pkey ] ]
state = '' . join ( cascade ( sequence ) )
matcher = SequenceMatcher ( a = state , b = text )
ratio = matcher . quick_ratio ( )
if ratio > maxvalue and ratio > 0.5 :
maxkey , maxvalue = pkey , ratio
# We expect the similarity to be at least 0.5 ; otherwise we decide
# that it is a new cell .
if maxkey is not None :
ekey = pkey
_cellid_map [ cellid ] = ekey
ekey = _cellid_map [ cellid ]
entry = { "m" : "md" , "a" : None , "s" : time ( ) , "r" : None , "c" : text , }
record ( ekey , entry , diff = True )
|
def CUnescape ( text ) :
"""Unescape a text string with C - style escape sequences to UTF - 8 bytes ."""
|
def ReplaceHex ( m ) : # Only replace the match if the number of leading back slashes is odd . i . e .
# the slash itself is not escaped .
if len ( m . group ( 1 ) ) & 1 :
return m . group ( 1 ) + 'x0' + m . group ( 2 )
return m . group ( 0 )
# This is required because the ' string _ escape ' encoding doesn ' t
# allow single - digit hex escapes ( like ' \ xf ' ) .
result = _CUNESCAPE_HEX . sub ( ReplaceHex , text )
if str is bytes : # PY2
return result . decode ( 'string_escape' )
result = '' . join ( _cescape_highbit_to_str [ ord ( c ) ] for c in result )
return ( result . encode ( 'ascii' ) # Make it bytes to allow decode .
. decode ( 'unicode_escape' ) # Make it bytes again to return the proper type .
. encode ( 'raw_unicode_escape' ) )
|
def rotate ( self , l , u ) :
"""rotate l radians around axis u"""
|
cl = math . cos ( l )
sl = math . sin ( l )
x = ( cl + u . x * u . x * ( 1 - cl ) ) * self . x + ( u . x * u . y * ( 1 - cl ) - u . z * sl ) * self . y + ( u . x * u . z * ( 1 - cl ) + u . y * sl ) * self . z
y = ( u . y * u . x * ( 1 - cl ) + u . z * sl ) * self . x + ( cl + u . y * u . y * ( 1 - cl ) ) * self . y + ( u . y * u . z * ( 1 - cl ) - u . x * sl ) * self . z
z = ( u . z * u . x * ( 1 - cl ) - u . y * sl ) * self . x + ( u . z * u . y * ( 1 - cl ) + u . x * sl ) * self . y + ( cl + u . z * u . z * ( 1 - cl ) ) * self . z
self . x , self . y , self . z = x , y , z
return self
|
def optimize_structure_handler ( rule , handler ) :
"""Produce an " optimized " version of handler for the dispatcher to
limit reference lookups ."""
|
def runner ( walk , dispatcher , node ) :
handler ( dispatcher , node )
return
yield
# pragma : no cover
return runner
|
def cli ( ctx , list , dir , files , project_dir , sayno ) :
"""Manage verilog examples . \n
Install with ` apio install examples `"""
|
exit_code = 0
if list :
exit_code = Examples ( ) . list_examples ( )
elif dir :
exit_code = Examples ( ) . copy_example_dir ( dir , project_dir , sayno )
elif files :
exit_code = Examples ( ) . copy_example_files ( files , project_dir , sayno )
else :
click . secho ( ctx . get_help ( ) )
click . secho ( Examples ( ) . examples_of_use_cad ( ) )
ctx . exit ( exit_code )
|
def change_dcv ( gandi , resource , dcv_method ) :
"""Change the DCV for a running certificate operation .
Resource can be a CN or an ID"""
|
ids = gandi . certificate . usable_ids ( resource )
if len ( ids ) > 1 :
gandi . echo ( 'Will not update, %s is not precise enough.' % resource )
gandi . echo ( ' * cert : ' + '\n * cert : ' . join ( [ str ( id_ ) for id_ in ids ] ) )
return
id_ = ids [ 0 ]
opers = gandi . oper . list ( { 'cert_id' : id_ } )
if not opers :
gandi . echo ( 'Can not find any operation for this certificate.' )
return
oper = opers [ 0 ]
if ( oper [ 'step' ] != 'RUN' and oper [ 'params' ] [ 'inner_step' ] != 'comodo_oper_updated' ) :
gandi . echo ( 'This certificate operation is not in the good step to ' 'update the DCV method.' )
return
gandi . certificate . change_dcv ( oper [ 'id' ] , dcv_method )
cert = gandi . certificate . info ( id_ )
csr = oper [ 'params' ] [ 'csr' ]
package = cert [ 'package' ]
altnames = oper [ 'params' ] . get ( 'altnames' )
gandi . certificate . advice_dcv_method ( csr , package , altnames , dcv_method , cert_id = id_ )
|
def check_sender_and_entity_handle_match ( sender_handle , entity_handle ) :
"""Ensure that sender and entity handles match .
Basically we ' ve already verified the sender is who they say when receiving the payload . However , the sender might
be trying to set another author in the payload itself , since Diaspora has the sender in both the payload headers
AND the object . We must ensure they ' re the same ."""
|
if sender_handle != entity_handle :
logger . warning ( "sender_handle and entity_handle don't match, aborting! sender_handle: %s, entity_handle: %s" , sender_handle , entity_handle )
return False
return True
|
def appendPath ( self , path ) :
"""Appends the inputted path to the end of the sys . path variable ,
provided the path does not already exist in it .
: param path
: type str
: return bool : success"""
|
# normalize the path
path = os . path . normcase ( nstr ( path ) ) . strip ( )
if path and path != '.' and path not in sys . path :
sys . path . append ( path )
self . _addedpaths . append ( path )
return True
return False
|
def continuous_binary_search ( f , lo , hi , gap = 1e-4 ) :
"""Binary search for a function
: param f : boolean monotone function with f ( hi ) = True
: param int lo :
: param int hi : with hi > = lo
: param float gap :
: returns : first value x in [ lo , hi ] such that f ( x ) ,
x is computed up to some precision
: complexity : ` O ( log ( ( hi - lo ) / gap ) ) `"""
|
while hi - lo > gap : # in other languages you can force floating division by using 2.0
mid = ( lo + hi ) / 2.
if f ( mid ) :
hi = mid
else :
lo = mid
return lo
|
def _get_course_descriptor_path ( self , courseid ) :
""": param courseid : the course id of the course
: raise InvalidNameException , CourseNotFoundException
: return : the path to the descriptor of the course"""
|
if not id_checker ( courseid ) :
raise InvalidNameException ( "Course with invalid name: " + courseid )
course_fs = self . get_course_fs ( courseid )
if course_fs . exists ( "course.yaml" ) :
return courseid + "/course.yaml"
if course_fs . exists ( "course.json" ) :
return courseid + "/course.json"
raise CourseNotFoundException ( )
|
def _fetch_remote_json ( service_url , params = None , use_http_post = False ) :
"""Retrieves a JSON object from a URL ."""
|
if not params :
params = { }
request_url , response = _fetch_remote ( service_url , params , use_http_post )
if six . PY3 :
str_response = response . read ( ) . decode ( 'utf-8' )
return ( request_url , json . loads ( str_response , parse_float = Decimal ) )
return ( request_url , json . load ( response , parse_float = Decimal ) )
|
def find_by_hash ( self , hash = None , book = - 1 ) :
'''Search notes for a given ( possibly abbreviated ) hash'''
|
if hash :
self . fyi ( "nota.find_by_hash() with abbreviated hash %s; book=%s" % ( hash , book ) )
try :
if book < 0 :
rows = self . cur . execute ( "SELECT noteId, hash FROM note WHERE book > 0;" ) . fetchall ( )
else :
rows = self . cur . execute ( "SELECT noteId, hash FROM note WHERE book=?;" , [ book ] ) . fetchall ( )
except :
self . error ( "nota.find_by_hash() cannot look up note list" )
# Possibly save time by finding IDs first .
noteIds = [ ]
if hash :
l = len ( hash )
for r in rows :
if hash == r [ 1 ] [ 0 : l ] :
noteIds . append ( ( r [ 0 ] , ) )
else :
for r in rows :
noteIds . append ( ( r [ 0 ] , ) )
self . fyi ( "noteIds: %s" % noteIds )
rval = [ ]
for n in noteIds :
try :
note = self . cur . execute ( "SELECT noteId, authorId, date, title, content, due, privacy, modified, hash, book FROM note WHERE noteId=?;" , n ) . fetchone ( )
except :
self . warning ( "Problem extracting note %s from database" % n )
next
if note :
date = note [ 2 ]
due = note [ 5 ]
privacy = note [ 6 ]
keywordIds = [ ]
keywordIds . extend ( self . con . execute ( "SELECT keywordid FROM notekeyword WHERE notekeyword.noteid=?;" , n ) )
keywords = [ ]
for k in keywordIds :
keywords . append ( self . cur . execute ( "SELECT keyword FROM keyword WHERE keywordId=?;" , k ) . fetchone ( ) [ 0 ] )
rval . append ( { "noteId" : note [ 0 ] , "title" : note [ 3 ] , "keywords" : keywords , "content" : note [ 4 ] , "due" : note [ 5 ] , "privacy" : note [ 6 ] , "date" : note [ 2 ] , "modified" : note [ 7 ] , "hash" : note [ 8 ] , "book" : note [ 9 ] } )
return rval
|
def cycle_data ( self , verbose = False , result_cycle = None , result_size = None , result_edges = None , changelog = True ) :
"""Get data from JIRA for cycle / flow times and story points size change .
Build a numerically indexed data frame with the following ' fixed '
columns : ` key ` , ' url ' , ' issue _ type ' , ` summary ` , ` status ` , and
` resolution ` from JIRA , as well as the value of any fields set in
the ` fields ` dict in ` settings ` . If ` known _ values ` is set ( a dict of
lists , with field names as keys and a list of known values for each
field as values ) and a field in ` fields ` contains a list of values ,
only the first value in the list of known values will be used .
If ' query _ attribute ' is set in ` settings ` , a column with this name
will be added , and populated with the ` value ` key , if any , from each
criteria block under ` queries ` in settings .
In addition , ` cycle _ time ` will be set to the time delta between the
first ` accepted ` - type column and the first ` complete ` column , or None .
The remaining columns are the names of the items in the configured
cycle , in order .
Each cell contains the last date / time stamp when the relevant status
was set .
If an item moves backwards through the cycle , subsequent date / time
stamps in the cycle are erased ."""
|
cycle_names = [ s [ 'name' ] for s in self . settings [ 'cycle' ] ]
accepted_steps = set ( s [ 'name' ] for s in self . settings [ 'cycle' ] if s [ 'type' ] == StatusTypes . accepted )
completed_steps = set ( s [ 'name' ] for s in self . settings [ 'cycle' ] if s [ 'type' ] == StatusTypes . complete )
series = { 'key' : { 'data' : [ ] , 'dtype' : str } , 'url' : { 'data' : [ ] , 'dtype' : str } , 'issue_type' : { 'data' : [ ] , 'dtype' : str } , 'summary' : { 'data' : [ ] , 'dtype' : str } , 'status' : { 'data' : [ ] , 'dtype' : str } , 'resolution' : { 'data' : [ ] , 'dtype' : str } , 'cycle_time' : { 'data' : [ ] , 'dtype' : 'timedelta64[ns]' } , 'completed_timestamp' : { 'data' : [ ] , 'dtype' : 'datetime64[ns]' } , 'created_timestamp' : { 'data' : [ ] , 'dtype' : 'datetime64[ns]' } }
if sys . platform . startswith ( 'win' ) :
buffer = open ( "cycledata.tmp" , "w+" , 1 )
# Opens a file for writing only in binary format . Overwrites the file if the file exists .
# buffering value is 1
# Windows users seem to have a problem with spooled file
else :
buffer = tempfile . SpooledTemporaryFile ( max_size = 50000 , mode = 'w+t' )
# issuelinks = open ( " issuelinks . csv " , " w + " , 1)
# df _ edges = pd . DataFrame ( )
# df _ edges = pd . DataFrame ( columns = [ ' Source ' , ' OutwardLink ' , ' Target ' , ' Inwardlink ' , ' LinkType ' ] )
# df _ edges . to _ csv ( issuelinks , columns = [ ' Source ' , ' OutwardLink ' , ' Target ' , ' Inwardlink ' , ' LinkType ' ] , header = True , index = None , sep = ' \ t ' , encoding = ' utf - 8 ' )
df_size_history = pd . DataFrame ( columns = [ 'key' , 'fromDate' , 'toDate' , 'size' ] )
df_size_history . to_csv ( buffer , columns = [ 'key' , 'fromDate' , 'toDate' , 'size' ] , header = True , index = None , sep = '\t' , encoding = 'utf-8' )
for cycle_name in cycle_names :
series [ cycle_name ] = { 'data' : [ ] , 'dtype' : 'datetime64[ns]' }
for name in self . fields . keys ( ) :
series [ name ] = { 'data' : [ ] , 'dtype' : 'object' }
if self . settings [ 'query_attribute' ] :
series [ self . settings [ 'query_attribute' ] ] = { 'data' : [ ] , 'dtype' : str }
for criteria in self . settings [ 'queries' ] :
for issue in self . find_issues ( criteria , order = 'updatedDate DESC' , verbose = verbose , changelog = changelog ) : # Deal with the differences in strings between Python 2 & 3
if ( sys . version_info > ( 3 , 0 ) ) : # Python 3 code in this block
item = { 'key' : issue . key , 'url' : "%s/browse/%s" % ( self . jira . _options [ 'server' ] , issue . key , ) , 'issue_type' : issue . fields . issuetype . name , 'summary' : issue . fields . summary , # . encode ( ' utf - 8 ' ) ,
'status' : issue . fields . status . name , 'resolution' : issue . fields . resolution . name if issue . fields . resolution else None , 'cycle_time' : None , 'completed_timestamp' : None , 'created_timestamp' : issue . fields . created [ : 19 ] }
else : # Python 2 code in this block
item = { 'key' : issue . key , 'url' : "%s/browse/%s" % ( self . jira . _options [ 'server' ] , issue . key , ) , 'issue_type' : issue . fields . issuetype . name , 'summary' : issue . fields . summary . encode ( 'utf-8' ) , 'status' : issue . fields . status . name , 'resolution' : issue . fields . resolution . name if issue . fields . resolution else None , 'cycle_time' : None , 'completed_timestamp' : None , 'created_timestamp' : issue . fields . created [ : 19 ] }
for name , field_name in self . fields . items ( ) :
item [ name ] = self . resolve_field_value ( issue , name , field_name )
if self . settings [ 'query_attribute' ] :
item [ self . settings [ 'query_attribute' ] ] = criteria . get ( 'value' , None )
for cycle_name in cycle_names :
item [ cycle_name ] = None
# Get the relationships for this issue
edges = [ ]
# Source , Target , Inward Link , Outward Link , Type
issuelinks = issue . fields . issuelinks
# It is seems that having an Epic Parent does not record an Epic Link , just the name " Epic Name "
# Creating Epic relationship requires more work . Also each Jira instance will have different customfields for Epic data
# Remove this code .
# issueEpic = issue . fields . customfield _ 10008 if issue . fields . customfield _ 10008 else None # Epic Link
# if issueEpic is not None :
# data = { ' Source ' : issueEpic , ' Target ' : issue . key , ' InwardLink ' : ' Belongs to Epic ' , ' OutwardLink ' : ' Issue in Epic ' , ' LinkType ' : ' EpicIssue ' }
# edges . append ( data )
for link in issuelinks :
inwardissue = None
outwardissue = None
try :
inwardissue = link . inwardIssue . key
except :
outwardissue = link . outwardIssue . key
if inwardissue is not None :
data = { 'LinkID' : link . id , 'Source' : inwardissue , 'Target' : issue . key , 'InwardLink' : link . type . inward , 'OutwardLink' : link . type . outward , 'LinkType' : link . type . name }
else :
data = { 'LinkID' : link . id , 'Source' : issue . key , 'Target' : outwardissue , 'InwardLink' : link . type . inward , 'OutwardLink' : link . type . outward , 'LinkType' : link . type . name }
edges . append ( data )
if len ( edges ) > 0 :
try :
df_edges
except NameError : # print ( ' Not found ' )
df_edges = pd . DataFrame ( edges )
else :
df_links = pd . DataFrame ( edges )
df_edges = df_edges . append ( df_links )
# = pd . DataFrame ( edges )
# Got all the relationships for this issue
rows = [ ]
try :
for snapshot in self . iter_size_changes ( issue ) :
data = { 'key' : snapshot . key , 'fromDate' : snapshot . date , 'size' : snapshot . size }
rows . append ( data )
df = pd . DataFrame ( rows )
# Create the toDate column
df_toDate = df [ 'fromDate' ] . shift ( - 1 )
df_toDate . loc [ len ( df_toDate ) - 1 ] = datetime . datetime . now ( pytz . utc )
df [ 'toDate' ] = df_toDate
except :
df = pd . DataFrame ( columns = [ 'key' , 'fromDate' , 'toDate' , 'size' ] )
# Round Down datetimes to full dates
df [ 'fromDate' ] = df [ 'fromDate' ] . apply ( lambda dt : datetime . datetime ( dt . year , dt . month , dt . day ) )
df [ 'toDate' ] = df [ 'toDate' ] . apply ( lambda dt : datetime . datetime ( dt . year , dt . month , dt . day ) )
# If we only have one row of size changes and current issue has a size then it must have been created with a size value at creation .
# This size will not be recorded in the size _ change record .
# Hence update the single row we have with the current issue size .
# Get Story Points size changes history
# If condition is met update the size cell
if getattr ( item , 'StoryPoints' , None ) is not None and ( df . shape [ 0 ] == 1 ) : # if ( item [ ' StoryPoints ' ] is not None ) and ( len ( df ) = = 1 ) :
df . loc [ df . index [ 0 ] , 'size' ] = item [ 'StoryPoints' ]
# Append to csv file
df . to_csv ( buffer , columns = [ 'key' , 'fromDate' , 'toDate' , 'size' ] , header = None , mode = 'a' , sep = '\t' , date_format = '%Y-%m-%d' , encoding = 'utf-8' )
# print ( rows )
# If the first column in item lifecycle was scipted put the created data in it .
if item [ cycle_names [ 0 ] ] is None :
item [ cycle_names [ 0 ] ] = dateutil . parser . parse ( item [ 'created_timestamp' ] )
# item [ ' created _ timestamp ' ]
# Figure out why the first Column does not have created date
# print ( dateutil . parser . parse ( item [ ' created _ timestamp ' ] ) )
# Record date of status changes
for snapshot in self . iter_changes ( issue , True ) :
snapshot_cycle_step = self . settings [ 'cycle_lookup' ] . get ( snapshot . status . lower ( ) , None )
if snapshot_cycle_step is None :
if verbose :
print ( issue . key , "transitioned to unknown JIRA status" , snapshot . status )
continue
snapshot_cycle_step_name = snapshot_cycle_step [ 'name' ]
# Keep the first time we entered a step
if item [ snapshot_cycle_step_name ] is None :
item [ snapshot_cycle_step_name ] = snapshot . date
# Wipe any subsequent dates , in case this was a move backwards
found_cycle_name = False
for cycle_name in cycle_names :
if not found_cycle_name and cycle_name == snapshot_cycle_step_name :
found_cycle_name = True
continue
elif found_cycle_name and item [ cycle_name ] is not None :
if verbose :
print ( issue . key , "moved backwards to" , snapshot_cycle_step_name , "wiping date for subsequent step" , cycle_name )
item [ cycle_name ] = None
# Wipe timestamps if items have moved backwards ; calculate cycle time
previous_timestamp = None
accepted_timestamp = None
completed_timestamp = None
for cycle_name in cycle_names :
if item [ cycle_name ] is not None :
previous_timestamp = item [ cycle_name ]
if accepted_timestamp is None and previous_timestamp is not None and cycle_name in accepted_steps :
accepted_timestamp = previous_timestamp
if completed_timestamp is None and previous_timestamp is not None and cycle_name in completed_steps :
completed_timestamp = previous_timestamp
if accepted_timestamp is not None and completed_timestamp is not None :
item [ 'cycle_time' ] = completed_timestamp - accepted_timestamp
item [ 'completed_timestamp' ] = completed_timestamp
for k , v in item . items ( ) :
series [ k ] [ 'data' ] . append ( v )
data = { }
for k , v in series . items ( ) :
data [ k ] = pd . Series ( v [ 'data' ] , dtype = v [ 'dtype' ] )
result_cycle = pd . DataFrame ( data , columns = [ 'key' , 'url' , 'issue_type' , 'summary' , 'status' , 'resolution' ] + sorted ( self . fields . keys ( ) ) + ( [ self . settings [ 'query_attribute' ] ] if self . settings [ 'query_attribute' ] else [ ] ) + [ 'cycle_time' , 'completed_timestamp' ] + cycle_names )
result_size = pd . DataFrame ( )
buffer . seek ( 0 )
result_size = result_size . from_csv ( buffer , sep = '\t' )
buffer . close ( )
try :
df_edges
except NameError : # print ( ' Not found ' )
df_edges = pd . DataFrame ( )
try :
df_edges = df_edges [ [ 'Source' , 'OutwardLink' , 'Target' , 'InwardLink' , 'LinkType' , 'LinkID' ] ]
# Specify dataframe sort order
# df _ edges . to _ csv ( " myedges . csv " , sep = ' \ t ' , index = False , encoding = ' utf - 8 ' )
except KeyError :
print ( 'Info: No issue edges found.' )
result_edges = df_edges
# There maybe no result _ size data is we might not have any change history
try :
result_size . set_index ( 'key' )
except KeyError :
result_size = pd . DataFrame ( index = [ 'key' ] , columns = [ 'fromDate' , 'toDate' , 'size' ] )
result_size [ 'toDate' ] = pd . to_datetime ( result_size [ 'toDate' ] , format = ( '%Y-%m-%d' ) )
result_size [ 'fromDate' ] = pd . to_datetime ( result_size [ 'fromDate' ] , format = ( '%Y-%m-%d' ) )
return result_cycle , result_size , result_edges
|
def _parse_planar_geometry_surface ( self , node ) :
"""Parses a planar geometry surface"""
|
nodes = [ ]
for key in [ "topLeft" , "topRight" , "bottomRight" , "bottomLeft" ] :
nodes . append ( geo . Point ( getattr ( node , key ) [ "lon" ] , getattr ( node , key ) [ "lat" ] , getattr ( node , key ) [ "depth" ] ) )
top_left , top_right , bottom_right , bottom_left = tuple ( nodes )
return geo . PlanarSurface . from_corner_points ( top_left , top_right , bottom_right , bottom_left )
|
def emitSortingChanged ( self , index ) :
"""Emits the sorting changed signal if the user clicks on a sorting
column .
: param index | < int >"""
|
if not self . signalsBlocked ( ) and self . isSortingEnabled ( ) :
self . sortingChanged . emit ( index , self . header ( ) . sortIndicatorOrder ( ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.