signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def repr_feature ( feature , max_keys = 100 , indent = 8 , lexigraphic = False ) :
'''generate a pretty - printed string for a feature
Currently implemented :
* StringCounter
@ max _ keys : truncate long counters
@ indent : indent multi - line displays by this many spaces
@ lexigraphic : instead of sorting counters by count ( default ) , sort
keys lexigraphically'''
|
if isinstance ( feature , ( str , bytes ) ) :
try :
ustr = feature . decode ( 'utf8' )
return ustr
except : # failure to decode , not actually utf8 , other binary data
return repr ( feature )
if isinstance ( feature , StringCounter ) :
return repr_stringcounter ( feature , max_keys , indent , lexigraphic )
elif isinstance ( feature , unicode ) :
return feature
else :
return repr ( feature )
assert False , 'internal logic failure, no branch taken'
|
def send_messages ( self , email_messages ) :
"""Sends one or more EmailMessage objects and returns the number of
email messages sent ."""
|
if not email_messages :
return
new_conn_created = self . open ( )
if not self . connection : # Failed silently
return
num_sent = 0
source = settings . AWS_SES_RETURN_PATH
for message in email_messages : # SES Configuration sets . If the AWS _ SES _ CONFIGURATION _ SET setting
# is not None , append the appropriate header to the message so that
# SES knows which configuration set it belongs to .
# If settings . AWS _ SES _ CONFIGURATION _ SET is a callable , pass it the
# message object and dkim settings and expect it to return a string
# containing the SES Configuration Set name .
if ( settings . AWS_SES_CONFIGURATION_SET and 'X-SES-CONFIGURATION-SET' not in message . extra_headers ) :
if callable ( settings . AWS_SES_CONFIGURATION_SET ) :
message . extra_headers [ 'X-SES-CONFIGURATION-SET' ] = settings . AWS_SES_CONFIGURATION_SET ( message , dkim_domain = self . dkim_domain , dkim_key = self . dkim_key , dkim_selector = self . dkim_selector , dkim_headers = self . dkim_headers )
else :
message . extra_headers [ 'X-SES-CONFIGURATION-SET' ] = settings . AWS_SES_CONFIGURATION_SET
# Automatic throttling . Assumes that this is the only SES client
# currently operating . The AWS _ SES _ AUTO _ THROTTLE setting is a
# factor to apply to the rate limit , with a default of 0.5 to stay
# well below the actual SES throttle .
# Set the setting to 0 or None to disable throttling .
if self . _throttle :
global recent_send_times
now = datetime . now ( )
# Get and cache the current SES max - per - second rate limit
# returned by the SES API .
rate_limit = self . get_rate_limit ( )
logger . debug ( u"send_messages.throttle rate_limit='{}'" . format ( rate_limit ) )
# Prune from recent _ send _ times anything more than a few seconds
# ago . Even though SES reports a maximum per - second , the way
# they enforce the limit may not be on a one - second window .
# To be safe , we use a two - second window ( but allow 2 times the
# rate limit ) and then also have a default rate limit factor of
# 0.5 so that we really limit the one - second amount in two
# seconds .
window = 2.0
# seconds
window_start = now - timedelta ( seconds = window )
new_send_times = [ ]
for time in recent_send_times :
if time > window_start :
new_send_times . append ( time )
recent_send_times = new_send_times
# If the number of recent send times in the last 1 / _ throttle
# seconds exceeds the rate limit , add a delay .
# Since I ' m not sure how Amazon determines at exactly what
# point to throttle , better be safe than sorry and let in , say ,
# half of the allowed rate .
if len ( new_send_times ) > rate_limit * window * self . _throttle : # Sleep the remainder of the window period .
delta = now - new_send_times [ 0 ]
total_seconds = ( delta . microseconds + ( delta . seconds + delta . days * 24 * 3600 ) * 10 ** 6 ) / 10 ** 6
delay = window - total_seconds
if delay > 0 :
sleep ( delay )
recent_send_times . append ( now )
# end of throttling
try :
response = self . connection . send_raw_email ( source = source or message . from_email , destinations = message . recipients ( ) , raw_message = dkim_sign ( message . message ( ) . as_string ( ) , dkim_key = self . dkim_key , dkim_domain = self . dkim_domain , dkim_selector = self . dkim_selector , dkim_headers = self . dkim_headers ) )
message . extra_headers [ 'status' ] = 200
message . extra_headers [ 'message_id' ] = response [ 'SendRawEmailResponse' ] [ 'SendRawEmailResult' ] [ 'MessageId' ]
message . extra_headers [ 'request_id' ] = response [ 'SendRawEmailResponse' ] [ 'ResponseMetadata' ] [ 'RequestId' ]
num_sent += 1
if 'X-SES-CONFIGURATION-SET' in message . extra_headers :
logger . debug ( u"send_messages.sent from='{}' recipients='{}' message_id='{}' request_id='{}' ses-configuration-set='{}'" . format ( message . from_email , ", " . join ( message . recipients ( ) ) , message . extra_headers [ 'message_id' ] , message . extra_headers [ 'request_id' ] , message . extra_headers [ 'X-SES-CONFIGURATION-SET' ] ) )
else :
logger . debug ( u"send_messages.sent from='{}' recipients='{}' message_id='{}' request_id='{}'" . format ( message . from_email , ", " . join ( message . recipients ( ) ) , message . extra_headers [ 'message_id' ] , message . extra_headers [ 'request_id' ] ) )
except SESConnection . ResponseError as err : # Store failure information so to post process it if required
error_keys = [ 'status' , 'reason' , 'body' , 'request_id' , 'error_code' , 'error_message' ]
for key in error_keys :
message . extra_headers [ key ] = getattr ( err , key , None )
if not self . fail_silently :
raise
if new_conn_created :
self . close ( )
return num_sent
|
def ilist ( self , in_list = [ ] ) :
"""Return a list that uses this server ' s IRC casemapping .
All strings in this list are lowercased using the server ' s casemapping before inserting
them into the list , and the ` ` in ` ` operator takes casemapping into account ."""
|
new_list = IList ( in_list )
new_list . set_std ( self . features . get ( 'casemapping' ) )
if not self . _casemap_set :
self . _imaps . append ( new_list )
return new_list
|
def update ( self , data , default = False ) :
"""Update this : attr : ` Config ` with ` ` data ` ` .
: param data : must be a ` ` Mapping ` ` like object exposing the ` ` item ` `
method for iterating through key - value pairs .
: param default : if ` ` True ` ` the updated : attr : ` settings ` will also
set their : attr : ` ~ Setting . default ` attribute with the
updating value ( provided it is a valid one ) ."""
|
for name , value in data . items ( ) :
if value is not None :
self . set ( name , value , default )
|
def send ( self , data ) :
"""Send ` data ` across the data channel to the remote peer ."""
|
if self . readyState != 'open' :
raise InvalidStateError
if not isinstance ( data , ( str , bytes ) ) :
raise ValueError ( 'Cannot send unsupported data type: %s' % type ( data ) )
self . transport . _data_channel_send ( self , data )
|
def google_cloud_datastore_delete_expired_sessions ( dormant_for = 86400 , limit = 500 ) :
"""Deletes expired sessions
A session is expired if it expires date is set and has passed or
if it has not been accessed for a given period of time .
: param dormant _ for : seconds since last access to delete sessions , defaults to 24 hours .
: type dormant _ for : int
: param limit : amount to delete in one call of the method , the maximum and default for this is the NDB fetch limit of 500
: type limit : int"""
|
from vishnu . backend . client . google_cloud_datastore import TABLE_NAME
from google . cloud import datastore
from datetime import datetime
from datetime import timedelta
now = datetime . utcnow ( )
last_accessed = now - timedelta ( seconds = dormant_for )
client = datastore . Client ( )
accessed_query = client . query ( kind = TABLE_NAME )
accessed_query . add_filter ( "last_accessed" , "<=" , last_accessed )
accessed_results = accessed_query . fetch ( limit = limit )
expires_query = client . query ( kind = TABLE_NAME )
expires_query . add_filter ( "expires" , "<=" , now )
expires_results = expires_query . fetch ( limit = limit )
keys = list ( )
for result in accessed_results :
keys . append ( result . key )
for result in expires_results :
if result . key not in keys :
keys . append ( result . key )
client . delete_multi ( keys )
return len ( keys ) < limit
|
def antenna_uvw ( uvw , antenna1 , antenna2 , chunks , nr_of_antenna , check_missing = False , check_decomposition = False , max_err = 100 ) :
"""Computes per - antenna UVW coordinates from baseline ` ` uvw ` ` ,
` ` antenna1 ` ` and ` ` antenna2 ` ` coordinates logically grouped
into baseline chunks .
The example below illustrates two baseline chunks
of size 6 and 5 , respectively .
. . code - block : : python
uvw = . . .
ant1 = np . array ( [ 0 , 0 , 0 , 1 , 1 , 2 , 0 , 0 , 0 , 1 , 1 ] , dtype = np . int32)
ant2 = np . array ( [ 1 , 2 , 3 , 2 , 3 , 3 , 1 , 2 , 3 , 1 , 2 ] , dtype = np . int32)
chunks = np . array ( [ 6 , 5 ] , dtype = np . int32)
ant _ uv = antenna _ uvw ( uvw , ant1 , ant2 , chunks , nr _ of _ antenna = 4)
The first antenna of the first baseline of a chunk is chosen as the origin
of the antenna coordinate system , while the second antenna is set to the
negative of the baseline UVW coordinate . Subsequent antenna UVW coordinates
are iteratively derived from the first two coordinates . Thus ,
the baseline indices need not be properly ordered ( within the chunk ) .
If it is not possible to derive coordinates for an antenna ,
it ' s coordinate will be set to nan .
Parameters
uvw : np . ndarray
Baseline UVW coordinates of shape ( row , 3)
antenna1 : np . ndarray
Baseline first antenna of shape ( row , )
antenna2 : np . ndarray
Baseline second antenna of shape ( row , )
chunks : np . ndarray
Number of baselines per unique timestep with shape ( chunks , )
: code : ` np . sum ( chunks ) = = row ` should hold .
nr _ of _ antenna : int
Total number of antenna in the solution .
check _ missing ( optional ) : bool
If ` ` True ` ` raises an exception if it was not possible
to compute UVW coordinates for all antenna ( i . e . some were nan ) .
Defaults to ` ` False ` ` .
check _ decomposition ( optional ) : bool
If ` ` True ` ` , checks that the antenna decomposition accurately
reproduces the coordinates in ` ` uvw ` ` , or that
: code : ` ant _ uvw [ c , ant1 , : ] - ant _ uvw [ c , ant2 , : ] = = uvw [ s : e , : ] `
where ` ` s ` ` and ` ` e ` ` are the start and end rows
of chunk ` ` c ` ` respectively . Defaults to ` ` False ` ` .
max _ err ( optional ) : integer
Maximum numbers of errors when checking for missing antenna
or innacurate decompositions . Defaults to ` ` 100 ` ` .
Returns
np . ndarray
Antenna UVW coordinates of shape ( chunks , nr _ of _ antenna , 3)"""
|
ant_uvw = _antenna_uvw ( uvw , antenna1 , antenna2 , chunks , nr_of_antenna )
if check_missing :
_raise_missing_antenna_errors ( ant_uvw , max_err = max_err )
if check_decomposition :
_raise_decomposition_errors ( uvw , antenna1 , antenna2 , chunks , ant_uvw , max_err = max_err )
return ant_uvw
|
def iter ( self , * , dates = None , start = None , stop = None , step = None , strict = True , ** kwargs ) :
"""Ephemeris generator based on the data of this one , but with different dates
Keyword Arguments :
dates ( list of : py : class : ` ~ beyond . dates . date . Date ` ) : Dates from which iterate over
start ( Date or None ) : Date of the first point
stop ( Date , timedelta or None ) : Date of the last point
step ( timedelta or None ) : Step to use during the computation . Use the same step as
` self ` if ` None `
listeners ( list of : py : class : ` ~ beyond . orbits . listeners . Listener ` ) :
strict ( bool ) : If True , the method will return a ValueError if ` ` start ` ` or ` ` stop ` ` is
not in the range of the ephemeris . If False , it will take the closest point in each
case .
Yield :
: py : class : ` Orbit ` :
There is two ways to use the iter ( ) method .
If * dates * is defined , it should be an iterable of dates . This could be
a generator as per : py : meth : ` Date . range < beyond . dates . date . Date . range > ` , or a list .
. . code - block : : python
# Create two successive ranges of dates , with different steps
dates = list ( Date . range ( Date ( 2019 , 3 , 23 ) , Date ( 2019 , 3 , 24 ) , timedelta ( minutes = 3 ) ) )
dates . extend ( Date . range ( Date ( 2019 , 3 , 24 ) , Date ( 2019 , 3 , 25 ) , timedelta ( minutes = 10 ) , inclusive = True ) )
ephem . iter ( dates = dates )
The alternative , is the use of * start * , * stop * and * step * keyword arguments
which work exactly as : code : ` Date . range ( start , stop , step , inclusive = True ) `
If one of * start * , * stop * or * step * arguments is set to ` ` None ` ` it will keep
the same property as the generating ephemeris .
. . code - block : : python
# In the examples below , we consider the ' ephem ' object to be an ephemeris starting on
# 2017-01-01 00:00:00 UTC and ending and 2017-01-02 00:00:00 UTC ( included ) with a fixed
# step of 3 minutes .
# These two calls will generate exactly the same points starting at 00:00 and ending at
# 12:00 , as 12:02 does not fall on a date included in the original ' ephem ' object .
ephem . iter ( stop = Date ( 2017 , 1 , 1 , 12 ) )
ephem . iter ( stop = Date ( 2017 , 1 , 1 , 12 , 2 ) )
# Similarly , these calls will generate the same points starting at 12:00 and ending at
# 00:00 , as 11:58 does not fall on date included in the ' ephem ' object .
ephem . iter ( start = Date ( 2017 , 1 , 1 , 11 , 58 ) )
ephem . iter ( start = Date ( 2017 , 1 , 1 , 12 ) )
# This call will generate an ephemeris , wich is a subpart of the initial one
ephem . iter ( start = Date ( 2017 , 1 , 1 , 8 ) , stop = Date ( 2017 , 1 , 1 , 16 ) )"""
|
# To allow for a loose control of the dates we have to compute
# the real starting date of the iterator
listeners = kwargs . get ( 'listeners' , [ ] )
if dates :
for date in dates :
orb = self . propagate ( date )
# Listeners
for listen_orb in self . listen ( orb , listeners ) :
yield listen_orb
yield orb
else :
real_start = None
if start is None :
start = self . start
elif start < self . start :
if strict :
raise ValueError ( "Start date not in range" )
else :
real_start = self . start
if stop is None :
stop = self . stop
else :
if isinstance ( stop , timedelta ) :
stop = start + stop
if stop > self . stop :
if strict :
raise ValueError ( "Stop date not in range" )
else :
stop = self . stop
if real_start is not None :
start = real_start
if step is None : # The step stays the same as the original ephemeris
for orb in self :
if orb . date < start :
continue
if orb . date > stop :
break
# Listeners
for listen_orb in self . listen ( orb , listeners ) :
yield listen_orb
# yield a copy of the recorded orbit to avoid later modification
# which could have dire consequences
yield orb . copy ( )
else : # create as ephemeris with a different step than the original
date = start
while date <= stop :
orb = self . propagate ( date )
# Listeners
for listen_orb in self . listen ( orb , listeners ) :
yield listen_orb
yield orb
date += step
|
def repay_funding ( self , amount , currency ) :
"""Repay funding . Repays the older funding records first .
Args :
amount ( int ) : Amount of currency to repay
currency ( str ) : The currency , example USD
Returns :
Not specified by cbpro ."""
|
params = { 'amount' : amount , 'currency' : currency # example : USD
}
return self . _send_message ( 'post' , '/funding/repay' , data = json . dumps ( params ) )
|
def validateRegex ( value , regex , flags = 0 , blank = False , strip = None , allowlistRegexes = None , blocklistRegexes = None , excMsg = None ) :
"""Raises ValidationException if value does not match the regular expression in regex .
Returns the value argument .
This is similar to calling inputStr ( ) and using the allowlistRegexes
keyword argument , however , validateRegex ( ) allows you to pass regex
flags such as re . IGNORECASE or re . VERBOSE . You can also pass a regex
object directly .
If you want to check if a string is a regular expression string , call
validateRegexStr ( ) .
* value ( str ) : The value being validated as a regular expression string .
* regex ( str , regex ) : The regular expression to match the value against .
* flags ( int ) : Identical to the flags argument in re . compile ( ) . Pass re . VERBOSE et al here .
* blank ( bool ) : If True , a blank string will be accepted . Defaults to False .
* strip ( bool , str , None ) : If None , whitespace is stripped from value . If a str , the characters in it are stripped from value . If False , nothing is stripped .
* allowlistRegexes ( Sequence , None ) : A sequence of regex str that will explicitly pass validation , even if they aren ' t numbers .
* blocklistRegexes ( Sequence , None ) : A sequence of regex str or ( regex _ str , response _ str ) tuples that , if matched , will explicitly fail validation .
* excMsg ( str ) : A custom message to use in the raised ValidationException .
> > > pysv . validateRegex ( ' cat bat rat ' , r ' ( cat ) | ( dog ) | ( moose ) ' , re . IGNORECASE )
' cat '
> > > pysv . validateRegex ( ' He said " Hello " . ' , r ' " ( . * ? ) " ' , re . IGNORECASE )
' " Hello " '"""
|
# Validate parameters .
_validateGenericParameters ( blank = blank , strip = strip , allowlistRegexes = allowlistRegexes , blocklistRegexes = blocklistRegexes )
returnNow , value = _prevalidationCheck ( value , blank , strip , allowlistRegexes , blocklistRegexes , excMsg )
if returnNow :
return value
# Search value with regex , whether regex is a str or regex object .
if isinstance ( regex , str ) : # TODO - check flags to see they ' re valid regex flags .
mo = re . compile ( regex , flags ) . search ( value )
elif isinstance ( regex , REGEX_TYPE ) :
mo = regex . search ( value )
else :
raise PySimpleValidateException ( 'regex must be a str or regex object' )
if mo is not None :
return mo . group ( )
else :
_raiseValidationException ( _ ( '%r does not match the specified pattern.' ) % ( _errstr ( value ) ) , excMsg )
|
def _instance_callable ( obj ) :
"""Given an object , return True if the object is callable .
For classes , return True if instances would be callable ."""
|
if not isinstance ( obj , ClassTypes ) : # already an instance
return getattr ( obj , '__call__' , None ) is not None
if six . PY3 : # * could * be broken by a class overriding _ _ mro _ _ or _ _ dict _ _ via
# a metaclass
for base in ( obj , ) + obj . __mro__ :
if base . __dict__ . get ( '__call__' ) is not None :
return True
else :
klass = obj
# uses _ _ bases _ _ instead of _ _ mro _ _ so that we work with old style classes
if klass . __dict__ . get ( '__call__' ) is not None :
return True
for base in klass . __bases__ :
if _instance_callable ( base ) :
return True
return False
|
def find_all_template ( im_source , im_search , threshold = 0.5 , maxcnt = 0 , rgb = False , bgremove = False ) :
'''Locate image position with cv2 . templateFind
Use pixel match to find pictures .
Args :
im _ source ( string ) : 图像 、 素材
im _ search ( string ) : 需要查找的图片
threshold : 阈值 , 当相识度小于该阈值的时候 , 就忽略掉
Returns :
A tuple of found [ ( point , score ) , . . . ]
Raises :
IOError : when file read error'''
|
# method = cv2 . TM _ CCORR _ NORMED
# method = cv2 . TM _ SQDIFF _ NORMED
method = cv2 . TM_CCOEFF_NORMED
if rgb :
s_bgr = cv2 . split ( im_search )
# Blue Green Red
i_bgr = cv2 . split ( im_source )
weight = ( 0.3 , 0.3 , 0.4 )
resbgr = [ 0 , 0 , 0 ]
for i in range ( 3 ) : # bgr
resbgr [ i ] = cv2 . matchTemplate ( i_bgr [ i ] , s_bgr [ i ] , method )
res = resbgr [ 0 ] * weight [ 0 ] + resbgr [ 1 ] * weight [ 1 ] + resbgr [ 2 ] * weight [ 2 ]
else :
s_gray = cv2 . cvtColor ( im_search , cv2 . COLOR_BGR2GRAY )
i_gray = cv2 . cvtColor ( im_source , cv2 . COLOR_BGR2GRAY )
# 边界提取 ( 来实现背景去除的功能 )
if bgremove :
s_gray = cv2 . Canny ( s_gray , 100 , 200 )
i_gray = cv2 . Canny ( i_gray , 100 , 200 )
res = cv2 . matchTemplate ( i_gray , s_gray , method )
w , h = im_search . shape [ 1 ] , im_search . shape [ 0 ]
result = [ ]
while True :
min_val , max_val , min_loc , max_loc = cv2 . minMaxLoc ( res )
if method in [ cv2 . TM_SQDIFF , cv2 . TM_SQDIFF_NORMED ] :
top_left = min_loc
else :
top_left = max_loc
if DEBUG :
print ( 'templmatch_value(thresh:%.1f) = %.3f' % ( threshold , max_val ) )
# not show debug
if max_val < threshold :
break
# calculator middle point
middle_point = ( top_left [ 0 ] + w / 2 , top_left [ 1 ] + h / 2 )
result . append ( dict ( result = middle_point , rectangle = ( top_left , ( top_left [ 0 ] , top_left [ 1 ] + h ) , ( top_left [ 0 ] + w , top_left [ 1 ] ) , ( top_left [ 0 ] + w , top_left [ 1 ] + h ) ) , confidence = max_val ) )
if maxcnt and len ( result ) >= maxcnt :
break
# floodfill the already found area
cv2 . floodFill ( res , None , max_loc , ( - 1000 , ) , max_val - threshold + 0.1 , 1 , flags = cv2 . FLOODFILL_FIXED_RANGE )
return result
|
def _open ( self ) :
"""Bind , use tls"""
|
try :
self . ldap . start_tls_s ( )
# pylint : disable = no - member
except ldap . CONNECT_ERROR : # pylint : enable = no - member
logging . error ( 'Unable to establish a connection to the LDAP server, ' + 'please check the connection string ' + 'and ensure the remote certificate is signed by a trusted authority.' )
raise
self . ldap . simple_bind_s ( self . user , self . password )
|
def mpl_outside_legend ( ax , ** kwargs ) :
"""Places a legend box outside a matplotlib Axes instance ."""
|
box = ax . get_position ( )
ax . set_position ( [ box . x0 , box . y0 , box . width * 0.75 , box . height ] )
# Put a legend to the right of the current axis
ax . legend ( loc = 'upper left' , bbox_to_anchor = ( 1 , 1 ) , ** kwargs )
|
def filename_to_task_id ( fname ) :
"""Map filename to the task id that created it assuming 1k tasks ."""
|
# This matches the order and size in WikisumBase . out _ filepaths
fname = os . path . basename ( fname )
shard_id_increment = { "train" : 0 , "dev" : 800 , "test" : 900 , }
parts = fname . split ( "-" )
split = parts [ 1 ]
shard_id = parts [ 2 ]
task_id = int ( shard_id ) + shard_id_increment [ split ]
return task_id
|
def from_notebook_node ( self , nb , resources = None , ** kw ) :
"""Uses nbconvert ' s HTMLExporter to generate HTML , with slight modifications .
Notes
This exporter will only save cells generated with Altair / Vega if they have an SVG image type
stored with them . This data is only stored if our fork of ` ipyvega ` is installed or the onecodex
# renderer is used - - otherwise , they will be low - resolution PNGs , which will not be exported ."""
|
nb = copy . deepcopy ( nb )
# setup our dictionary that ' s accessible from within jinja templates
if resources is None :
resources = { "metadata" : { } }
elif "metadata" not in resources :
resources [ "metadata" ] = { }
# iterate over cells in the notebook and transform data as necessary
do_not_insert_date = False
for cell in nb . cells :
if cell [ "cell_type" ] == "code" :
for out in cell [ "outputs" ] : # base64 encode SVGs otherwise Weasyprint can ' t render them . delete other
# types of output in jupyter - vega cells ( e . g . , image / png , or javascript )
if out . get ( "metadata" ) and out [ "metadata" ] . get ( "jupyter-vega" ) :
for mimetype in out . get ( "data" , [ ] ) :
if mimetype == "image/svg+xml" :
img = b64encode ( bytes ( out [ "data" ] [ "image/svg+xml" ] , encoding = "UTF-8" ) ) . decode ( )
img = '<img src="data:image/svg+xml;charset=utf-8;base64,%s">' % ( img , )
out [ "data" ] = { "image/svg+xml" : img }
break
else :
out [ "data" ] = { }
# transfer text / css blocks to HTML < head > tag
elif out . get ( "metadata" ) and out [ "metadata" ] . get ( "onecodex" ) == "head.style" :
for mimetype in out . get ( "data" , [ ] ) :
if mimetype == "text/css" :
style_block = '<style type="text/css">{}</style>' . format ( out [ "data" ] [ "text/css" ] )
head_block = ( resources [ "metadata" ] . get ( "head_block" , "" ) + style_block )
resources [ "metadata" ] [ "head_block" ] = head_block
break
# we don ' t want this to be output as text , so clear it
out [ "data" ] = { "text/plain" : "" }
# if there ' s a custom date specified , don ' t insert it
elif out . get ( "metadata" ) and out [ "metadata" ] . get ( "onecodex" ) == "customdate" :
do_not_insert_date = True
# add one codex logo unless told not to
if not os . environ . get ( "ONE_CODEX_REPORT_NO_LOGO" , False ) :
img = b64encode ( bytes ( open ( os . path . join ( ASSETS_PATH , "one_codex_logo.png" ) , "rb" ) . read ( ) ) ) . decode ( )
img = "data:image/png;charset=utf-8;base64,%s" % ( img , )
logo_html = report . set_logo ( img , position = "right" ) . _repr_mimebundle_ ( ) [ "text/html" ]
head_block = resources [ "metadata" ] . get ( "head_block" , "" ) + logo_html
resources [ "metadata" ] [ "head_block" ] = head_block
# add today ' s date unless told not to ( i . e . a custom date was specified )
if not do_not_insert_date :
date_div = report . set_date ( ) . _repr_mimebundle_ ( ) [ 0 ] [ "text/html" ]
head_block = resources [ "metadata" ] . get ( "head_block" , "" ) + date_div
resources [ "metadata" ] [ "head_block" ] = head_block
# embed the default CSS
css = open ( os . path . join ( ASSETS_PATH , CSS_TEMPLATE_FILE ) , "r" ) . read ( )
css = '<style type="text/css">{}</style>' . format ( css )
head_block = resources [ "metadata" ] . get ( "head_block" , "" ) + css
resources [ "metadata" ] [ "head_block" ] = head_block
# tag this report for traceability , if run from notebook service . these will be transferred
# to PDF metadata if the HTML output of this function is used as input for PDF generation
meta_tags = [ ( "dcterms.created" , datetime . datetime . now ( pytz . utc ) . isoformat ( ) ) ]
user_uuid = os . environ . get ( "ONE_CODEX_USER_UUID" )
if user_uuid is not None :
meta_tags . append ( ( "author" , "one_codex_user_uuid_{}" . format ( user_uuid ) ) )
nb_uuid = os . environ . get ( "ONE_CODEX_NOTEBOOK_UUID" )
if nb_uuid is not None :
meta_tags . append ( ( "author" , "one_codex_notebook_uuid_{}" . format ( nb_uuid ) ) )
meta_html = ""
for meta_name , meta_val in meta_tags :
meta_html += '<meta name="{}" content="{}" />\n' . format ( meta_name , meta_val )
head_block = resources [ "metadata" ] . get ( "head_block" , "" ) + meta_html
resources [ "metadata" ] [ "head_block" ] = head_block
output , resources = super ( OneCodexHTMLExporter , self ) . from_notebook_node ( nb , resources = resources , ** kw )
return output , resources
|
def get_part ( self , vertex_in , vertices_border ) :
"""List all vertices that are connected to vertex _ in , but are not
included in or ' behind ' vertices _ border ."""
|
vertices_new = set ( self . neighbors [ vertex_in ] )
vertices_part = set ( [ vertex_in ] )
while len ( vertices_new ) > 0 :
pivot = vertices_new . pop ( )
if pivot in vertices_border :
continue
vertices_part . add ( pivot )
pivot_neighbors = set ( self . neighbors [ pivot ] )
pivot_neighbors -= vertices_part
vertices_new |= pivot_neighbors
return vertices_part
|
def do_function ( self , prov , func , kwargs ) :
'''Perform a function against a cloud provider'''
|
matches = self . lookup_providers ( prov )
if len ( matches ) > 1 :
raise SaltCloudSystemExit ( 'More than one results matched \'{0}\'. Please specify ' 'one of: {1}' . format ( prov , ', ' . join ( [ '{0}:{1}' . format ( alias , driver ) for ( alias , driver ) in matches ] ) ) )
alias , driver = matches . pop ( )
fun = '{0}.{1}' . format ( driver , func )
if fun not in self . clouds :
raise SaltCloudSystemExit ( 'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does ' 'not define the function \'{2}\'' . format ( alias , driver , func ) )
log . debug ( 'Trying to execute \'%s\' with the following kwargs: %s' , fun , kwargs )
with salt . utils . context . func_globals_inject ( self . clouds [ fun ] , __active_provider_name__ = ':' . join ( [ alias , driver ] ) ) :
if kwargs :
return { alias : { driver : self . clouds [ fun ] ( call = 'function' , kwargs = kwargs ) } }
return { alias : { driver : self . clouds [ fun ] ( call = 'function' ) } }
|
def lsst_doc_shortlink_role ( name , rawtext , text , lineno , inliner , options = None , content = None ) :
"""Link to LSST documents given their handle using LSST ' s ls . st link
shortener .
Example : :
: ldm : ` 151 `"""
|
options = options or { }
content = content or [ ]
node = nodes . reference ( text = '{0}-{1}' . format ( name . upper ( ) , text ) , refuri = 'https://ls.st/{0}-{1}' . format ( name , text ) , ** options )
return [ node ] , [ ]
|
def delete_domain ( self , domain_or_name ) :
"""Delete a SimpleDB domain .
. . caution : : This will delete the domain and all items within the domain .
: type domain _ or _ name : string or : class : ` boto . sdb . domain . Domain ` object .
: param domain _ or _ name : Either the name of a domain or a Domain object
: rtype : bool
: return : True if successful"""
|
domain , domain_name = self . get_domain_and_name ( domain_or_name )
params = { 'DomainName' : domain_name }
return self . get_status ( 'DeleteDomain' , params )
|
def get_channel_info ( self ) :
"""Returns a dictionary of channel information key / value pairs ."""
|
self . assert_open ( )
channel_info = self . handle [ self . global_key + 'channel_id' ] . attrs . items ( )
channel_info = { key : _clean ( value ) for key , value in channel_info }
channel_info [ 'channel_number' ] = int ( channel_info [ 'channel_number' ] )
return channel_info
|
def parse_datetime ( value ) :
"""Attempts to parse ` value ` into an instance of ` ` datetime . datetime ` ` . If
` value ` is ` ` None ` ` , this function will return ` ` None ` ` .
Args :
value : A timestamp . This can be a string or datetime . datetime value ."""
|
if not value :
return None
elif isinstance ( value , datetime . datetime ) :
return value
return dateutil . parser . parse ( value )
|
def magic_plan_b ( filename ) :
'''Use this in instances where
python - magic is MIA and can ' t be installed
for whatever reason'''
|
cmd = shlex . split ( 'file --mime-type --mime-encoding ' + filename )
stdout , stderr = Popen ( cmd , stdout = PIPE ) . communicate ( )
stdout = stdout . decode ( "utf-8" )
mime_str = stdout . split ( filename + ': ' ) [ 1 ] . strip ( )
return mime_str
|
def version ( versioninfo = False ) :
'''. . versionadded : : 2015.8.0
Returns the version of Git installed on the minion
versioninfo : False
If ` ` True ` ` , return the version in a versioninfo list ( e . g . ` ` [ 2 , 5,
CLI Example :
. . code - block : : bash
salt myminion git . version'''
|
contextkey = 'git.version'
contextkey_info = 'git.versioninfo'
if contextkey not in __context__ :
try :
version_ = _git_run ( [ 'git' , '--version' ] ) [ 'stdout' ]
except CommandExecutionError as exc :
log . error ( 'Failed to obtain the git version (error follows):\n%s' , exc )
version_ = 'unknown'
try :
__context__ [ contextkey ] = version_ . split ( ) [ - 1 ]
except IndexError : # Somehow git - - version returned no stdout while not raising an
# error . Should never happen but we should still account for this
# possible edge case .
log . error ( 'Running \'git --version\' returned no stdout' )
__context__ [ contextkey ] = 'unknown'
if not versioninfo :
return __context__ [ contextkey ]
if contextkey_info not in __context__ : # Set ptr to the memory location of _ _ context _ _ [ contextkey _ info ] to
# prevent repeated dict lookups
ptr = __context__ . setdefault ( contextkey_info , [ ] )
for part in __context__ [ contextkey ] . split ( '.' ) :
try :
ptr . append ( int ( part ) )
except ValueError :
ptr . append ( part )
return __context__ [ contextkey_info ]
|
def grab_names_from_emails ( email_list ) :
"""Return a dictionary mapping names to email addresses .
Only gives a response if the email is found
in the staff API / JSON .
Expects an API of the format =
' email ' : ' foo @ bar . net ' ,
' fullName ' : ' Frank Oo '"""
|
all_staff = STAFF_LIST
emails_names = { }
for email in email_list :
for person in all_staff :
if email == person [ 'email' ] and email not in emails_names :
emails_names [ email ] = person [ 'fullName' ]
# print emails _ names [ email ]
for email in email_list :
matched = False
for assignment in emails_names :
if email == assignment :
matched = True
if not matched :
emails_names [ email ] = email
return emails_names
|
def _addAccountRights ( sidObject , user_right ) :
'''helper function to add an account right to a user'''
|
try :
if sidObject :
_polHandle = win32security . LsaOpenPolicy ( None , win32security . POLICY_ALL_ACCESS )
user_rights_list = [ user_right ]
_ret = win32security . LsaAddAccountRights ( _polHandle , sidObject , user_rights_list )
return True
# TODO : This needs to be more specific
except Exception as e :
log . exception ( 'Error attempting to add account right, exception was %s' , e )
return False
|
def _connect_secureish ( * args , ** kwargs ) :
"""Connect using the safest available options .
This turns on encryption ( works in all supported boto versions )
and certificate validation ( in the subset of supported boto
versions that can handle certificate validation , namely , those
after 2.6.0 ) .
Versions below 2.6 don ' t support the validate _ certs option to
S3Connection , and enable it via configuration option just seems to
cause an error ."""
|
if tuple ( int ( x ) for x in boto . __version__ . split ( '.' ) ) >= ( 2 , 6 , 0 ) :
kwargs [ 'validate_certs' ] = True
kwargs [ 'is_secure' ] = True
auth_region_name = kwargs . pop ( 'auth_region_name' , None )
conn = connection . S3Connection ( * args , ** kwargs )
if auth_region_name :
conn . auth_region_name = auth_region_name
return conn
|
def create_port ( name , network , device_id = None , admin_state_up = True , profile = None ) :
'''Creates a new port
CLI Example :
. . code - block : : bash
salt ' * ' neutron . create _ port network - name port - name
: param name : Name of port to create
: param network : Network name or ID
: param device _ id : ID of device ( Optional )
: param admin _ state _ up : Set admin state up to true or false ,
default : true ( Optional )
: param profile : Profile to build on ( Optional )
: return : Created port information'''
|
conn = _auth ( profile )
return conn . create_port ( name , network , device_id , admin_state_up )
|
def factoring_qaoa ( n_step , num , minimizer = None , sampler = None , verbose = True ) :
"""Do the Number partition QAOA .
: param num : The number to be factoring .
: param n _ step : The number of step of QAOA
: param edges : The edges list of the graph .
: returns result of QAOA"""
|
def get_nbit ( n ) :
m = 1
while 2 ** m < n :
m += 1
return m
n1_bits = get_nbit ( int ( num ** 0.5 ) ) - 1
n2_bits = get_nbit ( int ( num ** 0.5 ) )
def mk_expr ( offset , n ) :
expr = pauli . Expr . from_number ( 1 )
for i in range ( n ) :
expr = expr + 2 ** ( i + 1 ) * q ( i + offset )
return expr
def bitseparator ( bits ) :
assert len ( bits ) == n1_bits + n2_bits
p = 1
m = 1
for b in bits [ : n1_bits ] :
if b :
p += 2 ** m
m += 1
q = 1
m = 1
for b in bits [ n1_bits : ] :
if b :
q += 2 ** m
m += 1
return p , q
hamiltonian = ( num - mk_expr ( 0 , n1_bits ) * mk_expr ( n1_bits , n2_bits ) ) ** 2
return vqe . Vqe ( vqe . QaoaAnsatz ( hamiltonian , n_step ) , minimizer , sampler ) , bitseparator
|
def unformat_rule ( celf , rule ) :
"converts a match rule string from the standard syntax to a dict of { key : value } entries ."
|
if isinstance ( rule , dict ) :
pass
elif isinstance ( rule , str ) :
PARSE = celf . PARSE
parsed = { }
chars = iter ( rule )
state = PARSE . EXPECT_NAME
curname = None
curval = None
while True :
ch = next ( chars , None )
if ch == None :
if state == PARSE . EXPECT_ESCAPED :
raise SyntaxError ( "missing character after backslash" )
elif state == PARSE . EXPECT_QUOTED_VALUE :
raise SyntaxError ( "missing closing apostrophe" )
else : # state in ( PARSE . EXPECT _ NAME , PARSE . EXPECT _ UNQUOTED _ VALUE )
if curname != None :
if curval != None :
if curname in parsed :
raise SyntaxError ( "duplicated attribute “%s”" % curname )
# end if
parsed [ curname ] = curval
else :
raise SyntaxError ( "missing value for attribute “%s”" % curname )
# end if
# end if
# end if
break
# end if
if state == PARSE . EXPECT_ESCAPED :
if ch == "'" :
usech = ch
nextch = None
else :
usech = "\\"
nextch = ch
# end if
ch = usech
if curval == None :
curval = ch
else :
curval += ch
# end if
ch = nextch
# None indicates already processed
state = PARSE . EXPECT_UNQUOTED_VALUE
# end if
if ch != None :
if ch == "," and state != PARSE . EXPECT_QUOTED_VALUE :
if state == PARSE . EXPECT_UNQUOTED_VALUE :
if curname in parsed :
raise SyntaxError ( "duplicated attribute “%s”" % curname )
# end if
if curval == None :
curval = ""
# end if
parsed [ curname ] = curval
curname = None
curval = None
state = PARSE . EXPECT_NAME
else :
raise SyntaxError ( "unexpected comma" )
# end if
elif ch == "\\" and state != PARSE . EXPECT_QUOTED_VALUE :
if state == PARSE . EXPECT_UNQUOTED_VALUE :
state = PARSE . EXPECT_ESCAPED
else :
raise SyntaxError ( "unexpected backslash" )
# end if
elif ch == "=" and state != PARSE . EXPECT_QUOTED_VALUE :
if curname == None :
raise SyntaxError ( "empty attribute name" )
# end if
if state == PARSE . EXPECT_NAME :
state = PARSE . EXPECT_UNQUOTED_VALUE
else :
raise SyntaxError ( "unexpected equals sign" )
# end if
elif ch == "'" :
if state == PARSE . EXPECT_UNQUOTED_VALUE :
state = PARSE . EXPECT_QUOTED_VALUE
elif state == PARSE . EXPECT_QUOTED_VALUE :
state = PARSE . EXPECT_UNQUOTED_VALUE
else :
raise SyntaxError ( "unexpected apostrophe" )
# end if
else :
if state == PARSE . EXPECT_NAME :
if curname == None :
curname = ch
else :
curname += ch
# end if
elif state in ( PARSE . EXPECT_QUOTED_VALUE , PARSE . EXPECT_UNQUOTED_VALUE ) :
if curval == None :
curval = ch
else :
curval += ch
# end if
else :
raise AssertionError ( "shouldn’t occur: parse state %s" % repr ( state ) )
# end if
# end if
# end if
# end while
rule = parsed
else :
raise TypeError ( "rule “%s” must be a dict or string" % repr ( rule ) )
# end if
return rule
|
def validate_usage ( self , key_usage , extended_key_usage = None , extended_optional = False ) :
"""Validates the certificate path and that the certificate is valid for
the key usage and extended key usage purposes specified .
: param key _ usage :
A set of unicode strings of the required key usage purposes . Valid
values include :
- " digital _ signature "
- " non _ repudiation "
- " key _ encipherment "
- " data _ encipherment "
- " key _ agreement "
- " key _ cert _ sign "
- " crl _ sign "
- " encipher _ only "
- " decipher _ only "
: param extended _ key _ usage :
A set of unicode strings of the required extended key usage
purposes . These must be either dotted number OIDs , or one of the
following extended key usage purposes :
- " server _ auth "
- " client _ auth "
- " code _ signing "
- " email _ protection "
- " ipsec _ end _ system "
- " ipsec _ tunnel "
- " ipsec _ user "
- " time _ stamping "
- " ocsp _ signing "
- " wireless _ access _ points "
An example of a dotted number OID :
- " 1.3.6.1.5.5.7.3.1"
: param extended _ optional :
A bool - if the extended _ key _ usage extension may be ommited and still
considered valid
: raises :
certvalidator . errors . PathValidationError - when an error occurs validating the path
certvalidator . errors . RevokedError - when the certificate or another certificate in its path has been revoked
certvalidator . errors . InvalidCertificateError - when the certificate is not valid for the usages specified
: return :
A certvalidator . path . ValidationPath object of the validated
certificate validation path"""
|
self . _validate_path ( )
validate_usage ( self . _context , self . _certificate , key_usage , extended_key_usage , extended_optional )
return self . _path
|
def write_temp_file ( content ) :
"""Writes some content into a temporary file and returns it .
: param content : The file content
: type : string
: returns : The temporary file
: rtype : file - like object"""
|
f_temp = NamedTemporaryFile ( delete = True )
f_temp . file . write ( content )
f_temp . file . flush ( )
return f_temp
|
def read_object_counter ( node_uri , epoch_field , dry_run = False ) :
"""Reads the object counter for the given epoch / field on the specified node .
@ param node _ uri :
@ param epoch _ field :
@ param dry _ run :
@ return : the current object count ."""
|
return get_property ( node_uri , build_counter_tag ( epoch_field , dry_run ) , ossos_base = True )
|
def get_markable ( self , markable_id ) :
"""Returns the markable object for the supplied identifier
@ type markable _ id : string
@ param markable _ id : term identifier"""
|
if markable_id in self . idx :
return Cmarkable ( self . idx [ markable_id ] , self . type )
else :
return None
|
def pick_sdf ( filename , directory = None ) :
"""Returns a full path to the chosen SDF file . The supplied file
is not expected to contain a recognised SDF extension , this is added
automatically .
If a file with the extension ` . sdf . gz ` or ` . sdf ` is found the path to it
( excluding the extension ) is returned . If this fails , ` None ` is returned .
: param filename : The SDF file basename , whose path is required .
: type filename : ` ` str ` `
: param directory : An optional directory .
If not provided it is calculated automatically .
: type directory : ` ` str ` `
: return : The full path to the file without extension ,
or None if it does not exist
: rtype : ` ` str ` `"""
|
if directory is None :
directory = utils . get_undecorated_calling_module ( )
# If the ' cwd ' is not ' / output ' ( which indicates we ' re in a Container )
# then remove the CWD and the anticipated ' / '
# from the front of the module
if os . getcwd ( ) not in [ '/output' ] :
directory = directory [ len ( os . getcwd ( ) ) + 1 : ]
file_path = os . path . join ( directory , filename )
if os . path . isfile ( file_path + '.sdf.gz' ) :
return file_path + '.sdf.gz'
elif os . path . isfile ( file_path + '.sdf' ) :
return file_path + '.sdf'
# Couldn ' t find a suitable SDF file
return None
|
def delete ( self ) :
"""Deletes a folder from the Exchange store . : :
folder = service . folder ( ) . get _ folder ( id )
print ( " Deleting folder : % s " % folder . display _ name )
folder . delete ( )"""
|
if not self . id :
raise TypeError ( u"You can't delete a folder that hasn't been created yet." )
body = soap_request . delete_folder ( self )
response_xml = self . service . send ( body )
# noqa
# TODO : verify deletion
self . _id = None
self . _change_key = None
return None
|
def candidates ( self ) :
"""Generates list of candidates from the DOM ."""
|
dom = self . dom
if dom is None or len ( dom ) == 0 :
return None
candidates , unlikely_candidates = find_candidates ( dom )
drop_nodes_with_parents ( unlikely_candidates )
return candidates
|
def get_or_create_user ( self , username , password ) :
'''Get or create the given user'''
|
# Get the groups for this user
info = self . get_ad_info ( username , password )
self . debug ( "INFO found: {}" . format ( info ) )
# Find the user
try :
user = User . objects . get ( username = username )
except User . DoesNotExist :
user = User ( username = username )
# Update user
user . first_name = info . get ( 'first_name' , '' )
user . last_name = info . get ( 'last_name' , '' )
user . email = info . get ( 'email' , '' )
# Check if the user is in the Administrators groups
is_admin = False
for domain in info [ 'groups' ] :
if 'Domain Admins' in info [ 'groups' ] [ domain ] :
is_admin = True
break
# Set the user permissions
user . is_staff = is_admin
user . is_superuser = is_admin
# Refresh the password
user . set_password ( password )
# Validate the selected user and gotten information
user = self . validate ( user , info )
if user :
self . debug ( "User got validated!" )
# Autosave the user until this point
user . save ( )
# Synchronize user
self . synchronize ( user , info )
else :
self . debug ( "User didn't pass validation!" )
# Finally return user
return user
|
def save ( self ) :
"""Persist config changes"""
|
with open ( self . _config_file_path , 'w' ) as file :
self . _config_parser . write ( file )
|
def index_name ( table , columns ) :
"""Generate an artificial index name ."""
|
sig = '||' . join ( columns )
key = sha1 ( sig . encode ( 'utf-8' ) ) . hexdigest ( ) [ : 16 ]
return 'ix_%s_%s' % ( table , key )
|
def left_overlaps ( self , other , min_overlap_size = 1 ) :
"""Does this VariantSequence overlap another on the left side ?"""
|
if self . alt != other . alt : # allele must match !
return False
if len ( other . prefix ) > len ( self . prefix ) : # only consider strings that overlap like :
# self : ppppAssss
# other : ppAsssss
# which excludes cases where the other sequence has a longer
# prefix
return False
elif len ( other . suffix ) < len ( self . suffix ) : # similarly , we throw away cases where the other sequence is shorter
# after the alt nucleotides than this sequence
return False
# is the other sequence a prefix of this sequence ?
# Example :
# p1 a1 s1 = XXXXX Y ZZZZZ
# p2 a2 s2 = XX Y ZZZZZ
# then we can combine them into a longer sequence
sequence_overlaps = ( self . prefix . endswith ( other . prefix ) and other . suffix . startswith ( self . suffix ) )
prefix_overlap_size = min ( len ( self . prefix ) , len ( other . prefix ) )
suffix_overlap_size = min ( len ( other . suffix ) , len ( self . suffix ) )
overlap_size = ( prefix_overlap_size + suffix_overlap_size + len ( self . alt ) )
return sequence_overlaps and overlap_size >= min_overlap_size
|
def run ( self , data_dir = None ) :
"""Note : this function will check the experiments directory for a
special file , scheduler . info , that details how often each
experiment should be run and the last time the experiment was
run . If the time since the experiment was run is shorter than
the scheduled interval in seconds , then the experiment will
not be run .
: param data _ dir :
: return :"""
|
# XXX : android build needs this . refactor
if data_dir :
centinel_home = data_dir
self . config [ 'dirs' ] [ 'results_dir' ] = os . path . join ( centinel_home , 'results' )
logging . info ( 'Centinel started.' )
if not os . path . exists ( self . config [ 'dirs' ] [ 'results_dir' ] ) :
logging . warn ( "Creating results directory in " "%s" % ( self . config [ 'dirs' ] [ 'results_dir' ] ) )
os . makedirs ( self . config [ 'dirs' ] [ 'results_dir' ] )
logging . debug ( "Results directory: %s" % ( self . config [ 'dirs' ] [ 'results_dir' ] ) )
# load scheduler information
sched_filename = os . path . join ( self . config [ 'dirs' ] [ 'experiments_dir' ] , 'scheduler.info' )
logging . debug ( "Loading scheduler file." )
sched_info = { }
if os . path . exists ( sched_filename ) :
with open ( sched_filename , 'r' ) as file_p :
try :
sched_info = json . load ( file_p )
except Exception as exp :
logging . error ( "Failed to load the " "scheduler: %s" % str ( exp ) )
return
logging . debug ( "Scheduler file loaded." )
logging . debug ( "Processing the experiment schedule." )
for name in sched_info : # check if we should preempt on the experiment ( if the
# time to run next is greater than the current time ) and
# store the last run time as now
# Note : if the experiment is not in the scheduler , then it
# will not be run at all .
run_next = sched_info [ name ] [ 'last_run' ]
run_next += sched_info [ name ] [ 'frequency' ]
if run_next > time . time ( ) :
run_next_str = datetime . fromtimestamp ( long ( run_next ) )
logging . debug ( "Skipping %s, it will " "be run on or after %s." % ( name , run_next_str ) )
continue
# backward compatibility with older - style scheduler
if 'python_exps' not in sched_info [ name ] :
self . run_exp ( name = name )
else :
exps = sched_info [ name ] [ 'python_exps' ] . items ( )
for python_exp , exp_config in exps :
logging . debug ( "Running %s." % python_exp )
self . run_exp ( name = python_exp , exp_config = exp_config , schedule_name = name )
logging . debug ( "Finished running %s." % python_exp )
sched_info [ name ] [ 'last_run' ] = time . time ( )
logging . debug ( "Updating timeout values in scheduler." )
# write out the updated last run times
with open ( sched_filename , 'w' ) as file_p :
json . dump ( sched_info , file_p , indent = 2 , separators = ( ',' , ': ' ) )
self . consolidate_results ( )
logging . info ( "Finished running experiments. " "Look in %s for results." % ( self . config [ 'dirs' ] [ 'results_dir' ] ) )
|
def unstructure_attrs_asdict ( self , obj ) : # type : ( Any ) - > Dict [ str , Any ]
"""Our version of ` attrs . asdict ` , so we can call back to us ."""
|
attrs = obj . __class__ . __attrs_attrs__
dispatch = self . _unstructure_func . dispatch
rv = self . _dict_factory ( )
for a in attrs :
name = a . name
v = getattr ( obj , name )
rv [ name ] = dispatch ( v . __class__ ) ( v )
return rv
|
def _show_tables ( self , * args ) :
"""print the existing tables within the ' doc ' schema"""
|
v = self . connection . lowest_server_version
schema_name = "table_schema" if v >= TABLE_SCHEMA_MIN_VERSION else "schema_name"
table_filter = " AND table_type = 'BASE TABLE'" if v >= TABLE_TYPE_MIN_VERSION else ""
self . _exec ( "SELECT format('%s.%s', {schema}, table_name) AS name " "FROM information_schema.tables " "WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')" "{table_filter}" . format ( schema = schema_name , table_filter = table_filter ) )
|
def _is_converged ( self , dual_threshold = None , integrality_gap_threshold = None ) :
"""This method checks the integrality gap to ensure either :
* we have found a near to exact solution or
* stuck on a local minima .
Parameters
dual _ threshold : double
This sets the minimum width between the dual objective decrements . If the decrement is lesser
than the threshold , then that means we have stuck on a local minima .
integrality _ gap _ threshold : double
This sets the threshold for the integrality gap below which we say that the solution
is satisfactory .
References
code presented by Sontag in 2012 here : http : / / cs . nyu . edu / ~ dsontag / code / README _ v2 . html"""
|
# Find the new objective after the message updates
new_dual_lp = sum ( [ np . amax ( self . objective [ obj ] . values ) for obj in self . objective ] )
# Update the dual _ gap as the difference between the dual objective of the previous and the current iteration .
self . dual_gap = abs ( self . dual_lp - new_dual_lp )
# Update the integrality _ gap as the difference between our best result vs the dual objective of the lp .
self . integrality_gap = abs ( self . dual_lp - self . best_int_objective )
# As the decrement of the dual _ lp gets very low , we assume that we might have stuck in a local minima .
if dual_threshold and self . dual_gap < dual_threshold :
return True
# Check the threshold for the integrality gap
elif integrality_gap_threshold and self . integrality_gap < integrality_gap_threshold :
return True
else :
self . dual_lp = new_dual_lp
return False
|
def find_path_join_using_plus ( node ) :
"""Finds joining path with plus"""
|
return ( isinstance ( node , ast . BinOp ) and isinstance ( node . op , ast . Add ) and isinstance ( node . left , ast . BinOp ) and isinstance ( node . left . op , ast . Add ) and isinstance ( node . left . right , ast . Str ) and node . left . right . s in [ '/' , "\\" ] )
|
def _import_next_layer ( self , proto , length ) :
"""Import next layer extractor .
Positional arguments :
* proto - - str , next layer protocol name
* length - - int , valid ( not padding ) length
Returns :
* bool - - flag if extraction of next layer succeeded
* Info - - info of next layer
* ProtoChain - - protocol chain of next layer
* str - - alias of next layer"""
|
if self . _exproto == 'null' and self . _exlayer == 'None' :
from pcapkit . protocols . raw import Raw as NextLayer
else :
from pcapkit . foundation . analysis import analyse as NextLayer
# from pcapkit . foundation . analysis import analyse as NextLayer
if length == 0 :
next_ = NoPayload ( )
elif self . _onerror :
next_ = beholder_ng ( NextLayer ) ( self . _file , length , _termination = self . _sigterm )
else :
next_ = NextLayer ( self . _file , length , _termination = self . _sigterm )
return next_
|
def _generate_docstring_return_section ( self , return_vals , header , return_element_name , return_element_type , placeholder , indent ) :
"""Generate the Returns section of a function / method docstring ."""
|
# If all return values are None , return none
non_none_vals = [ return_val for return_val in return_vals if return_val and return_val != 'None' ]
if not non_none_vals :
return header + indent + 'None.'
# Get only values with matching brackets that can be cleaned up
non_none_vals = [ return_val . strip ( ' ()\t\n' ) . rstrip ( ',' ) for return_val in non_none_vals ]
non_none_vals = [ re . sub ( '([\"\'])(?:(?=(\\\\?))\\2.)*?\\1' , '"string"' , return_val ) for return_val in non_none_vals ]
unambiguous_vals = [ ]
for return_val in non_none_vals :
try :
cleaned_val = self . find_top_level_bracket_locations ( return_val )
except IndexError :
continue
unambiguous_vals . append ( cleaned_val )
if not unambiguous_vals :
return header + placeholder
# If remaining are a mix of tuples and not , return single placeholder
single_vals , tuple_vals = [ ] , [ ]
for return_val in unambiguous_vals :
( tuple_vals . append ( return_val ) if ',' in return_val else single_vals . append ( return_val ) )
if single_vals and tuple_vals :
return header + placeholder
# If return values are tuples of different length , return a placeholder
if tuple_vals :
num_elements = [ return_val . count ( ',' ) + 1 for return_val in tuple_vals ]
if num_elements . count ( num_elements [ 0 ] ) != len ( num_elements ) :
return header + placeholder
num_elements = num_elements [ 0 ]
else :
num_elements = 1
# If all have the same len but some ambiguous return that placeholders
if len ( unambiguous_vals ) != len ( non_none_vals ) :
return header + '\n' . join ( [ placeholder for __ in range ( num_elements ) ] )
# Handle tuple ( or single ) values position by position
return_vals_grouped = zip ( * [ [ return_element . strip ( ) for return_element in return_val . split ( ',' ) ] for return_val in unambiguous_vals ] )
return_elements_out = [ ]
for return_vals_group in return_vals_grouped :
return_elements_out . append ( self . parse_return_elements ( return_vals_group , return_element_name , return_element_type , placeholder ) )
return header + '\n' . join ( return_elements_out )
|
def accounts_balances ( self , accounts ) :
"""Returns how many RAW is owned and how many have not yet been received
by * * accounts * * list
: param accounts : list of accounts to return balances for
: type accounts : list of str
: raises : : py : exc : ` nano . rpc . RPCException `
> > > rpc . accounts _ balances (
. . . accounts = [
. . . " xrb _ 3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000 " ,
. . . " xrb _ 3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7"
" xrb _ 3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000 " : {
" balance " : 10000,
" pending " : 10000
" xrb _ 3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7 " : {
" balance " : 100000,
" pending " : 0"""
|
accounts = self . _process_value ( accounts , 'list' )
payload = { "accounts" : accounts }
resp = self . call ( 'accounts_balances' , payload )
accounts_balances = resp . get ( 'balances' ) or { }
for account , balances in accounts_balances . items ( ) :
for k in balances :
balances [ k ] = int ( balances [ k ] )
return accounts_balances
|
def getSamplingWorkflowEnabled ( self ) :
"""Returns True if the sample of this Analysis Request has to be
collected by the laboratory personnel"""
|
template = self . getTemplate ( )
if template :
return template . getSamplingRequired ( )
return self . bika_setup . getSamplingWorkflowEnabled ( )
|
def to_json ( self ) :
"""Returns an input shard state for the remaining inputs .
Returns :
A json - izable version of the remaining InputReader ."""
|
result = super ( _ReducerReader , self ) . to_json ( )
result [ "current_key" ] = self . encode_data ( self . current_key )
result [ "current_values" ] = self . encode_data ( self . current_values )
return result
|
def get_json ( environ ) :
'''Return the request body as JSON'''
|
content_type = environ . get ( 'CONTENT_TYPE' , '' )
if content_type != 'application/json' :
raise HTTPError ( 406 , 'JSON required' )
try :
return salt . utils . json . loads ( read_body ( environ ) )
except ValueError as exc :
raise HTTPError ( 400 , exc )
|
def add_phase ( self ) :
"""Context manager for when adding all the tokens"""
|
# add stuff
yield self
# Make sure we output eveything
self . finish_hanging ( )
# Remove trailing indents and dedents
while len ( self . result ) > 1 and self . result [ - 2 ] [ 0 ] in ( INDENT , ERRORTOKEN , NEWLINE ) :
self . result . pop ( - 2 )
|
def fetch_token ( self , ** kwargs ) :
"""Fetch a new token using the supplied code .
: param str code : A previously obtained auth code ."""
|
if 'client_secret' not in kwargs :
kwargs . update ( client_secret = self . client_secret )
return self . session . fetch_token ( token_url , ** kwargs )
|
def get_tracks_from_album ( album_name ) :
'''Gets tracks from an album using Spotify ' s API'''
|
spotify = spotipy . Spotify ( )
album = spotify . search ( q = 'album:' + album_name , limit = 1 )
album_id = album [ 'tracks' ] [ 'items' ] [ 0 ] [ 'album' ] [ 'id' ]
results = spotify . album_tracks ( album_id = str ( album_id ) )
songs = [ ]
for items in results [ 'items' ] :
songs . append ( items [ 'name' ] )
return songs
|
def update ( self , * args , ** kwargs ) :
"""update ( ) method will * recursively * update nested dict :
> > > d = Dict ( { ' a ' : { ' b ' : { ' c ' : 3 , ' d ' : 4 } , ' h ' : 4 } } )
> > > d . update ( { ' a ' : { ' b ' : { ' c ' : ' 888 ' } } } )
{ ' a ' : { ' b ' : { ' c ' : ' 888 ' , ' d ' : 4 } , ' h ' : 4 } }
please use update _ dict ( ) if you do not want this behaviour"""
|
for arg in args :
if not arg :
continue
elif isinstance ( arg , dict ) :
for k , v in arg . items ( ) :
self . _update_kv ( k , v )
elif isinstance ( arg , ( list , tuple ) ) and ( not isinstance ( arg [ 0 ] , ( list , tuple ) ) ) :
k = arg [ 0 ]
v = arg [ 1 ]
self . _update_kv ( k , v )
elif isinstance ( arg , ( list , tuple ) ) or isgenerator ( arg ) :
for k , v in arg :
self . _update_kv ( k , v )
else :
raise TypeError ( "update does not understand " "{0} types" . format ( type ( arg ) ) )
for k , v in kwargs . items ( ) :
self . _update_kv ( k , v )
|
def baseline ( y_true , y_score = None ) :
"""Number of positive labels divided by number of labels ,
or zero if there are no labels"""
|
if len ( y_true ) > 0 :
return np . nansum ( y_true ) / count ( y_true , countna = False )
else :
return 0.0
|
def printClassTree ( self , element = None , showids = False , labels = False , showtype = False ) :
"""Print nicely into stdout the class tree of an ontology
Note : indentation is made so that ids up to 3 digits fit in , plus a space .
[123]1 - -
[1]123 - -
[12]12 - -"""
|
TYPE_MARGIN = 11
# length for owl : class etc . .
if not element : # first time
for x in self . toplayer_classes :
printGenericTree ( x , 0 , showids , labels , showtype , TYPE_MARGIN )
else :
printGenericTree ( element , 0 , showids , labels , showtype , TYPE_MARGIN )
|
def get_setter ( cls , prop_name , # @ NoSelf
user_setter = None , setter_takes_name = False , user_getter = None , getter_takes_name = False ) :
"""The setter follows the rules of the getter . First search
for property variable , then logical custom setter . If no setter
is found , None is returned ( i . e . the property is read - only . )"""
|
has_prop_variable = cls . has_prop_attribute ( prop_name )
# WARNING ! These are deprecated
has_specific_setter = hasattr ( cls , SET_PROP_NAME % { 'prop_name' : prop_name } )
has_general_setter = hasattr ( cls , SET_GENERIC_NAME )
if not ( has_prop_variable or has_specific_setter or has_general_setter or user_setter ) :
return None
if has_prop_variable :
if has_specific_setter or user_setter :
logger . warning ( "In class %s.%s ignoring custom logical " "setter for property '%s' as a " "corresponding attribute exists" % ( cls . __module__ , cls . __name__ , prop_name ) )
user_setter = user_getter = None
setter_takes_name = getter_takes_name = False
else :
if user_setter :
pass
else :
if has_specific_setter :
def __setter ( self , val ) :
_setter = getattr ( self , SET_PROP_NAME % { 'prop_name' : prop_name } )
_setter ( val )
return
user_setter = __setter
# user _ setter = getattr ( cls , SET _ PROP _ NAME % \
# { ' prop _ name ' : prop _ name } )
setter_takes_name = False
else :
assert has_general_setter
def __setter ( self , name , val ) :
_setter = getattr ( self , SET_GENERIC_NAME )
_setter ( name , val )
return
user_setter = __setter
# user _ setter = getattr ( cls , SET _ GENERIC _ NAME )
setter_takes_name = True
# the final setter is a combination of a basic setter , and
# the getter ( see how inner _ { getter , setter } are used in
# _ setter below )
_inner_setter = PropertyMeta . get_setter ( cls , prop_name , user_setter , setter_takes_name , user_getter , getter_takes_name )
_inner_getter = type ( cls ) . get_getter ( cls , prop_name , user_getter , getter_takes_name )
def _setter ( self , val ) :
curr_frame = len ( self . _notify_stack )
if prop_name not in self . _notify_stack :
self . _notify_stack . append ( prop_name )
old = _inner_getter ( self )
new = type ( self ) . create_value ( prop_name , val , self )
# to track dependencies
olds = self . __before_property_value_change__ ( prop_name ) if self . _has_observer ( ) else ( )
self . _notify_stack . extend ( map ( operator . itemgetter ( 1 ) , olds ) )
# this is the unique place where the value is set :
_inner_setter ( self , new )
if type ( self ) . check_value_change ( old , new ) :
self . _reset_property_notification ( prop_name , old )
self . notify_property_value_change ( prop_name , old , val )
# to notify dependencies
self . __after_property_value_change__ ( prop_name , olds )
del self . _notify_stack [ curr_frame : ]
return _setter
|
def rooms ( request , template = "rooms.html" ) :
"""Homepage - lists all rooms ."""
|
context = { "rooms" : ChatRoom . objects . all ( ) }
return render ( request , template , context )
|
def make_table ( data , col_names ) :
"""Code for this RST - formatted table generator comes from
http : / / stackoverflow . com / a / 11350643"""
|
n_cols = len ( data [ 0 ] )
assert n_cols == len ( col_names )
col_sizes = [ max ( len ( r [ i ] ) for r in data ) for i in range ( n_cols ) ]
for i , cname in enumerate ( col_names ) :
if col_sizes [ i ] < len ( cname ) :
col_sizes [ i ] = len ( cname )
formatter = ' ' . join ( '{:<%d}' % c for c in col_sizes )
rows = '\n' . join ( [ formatter . format ( * row ) for row in data ] )
header = formatter . format ( * col_names )
divider = formatter . format ( * [ '=' * c for c in col_sizes ] )
output = '\n' . join ( ( divider , header , divider , rows , divider ) )
return output
|
def read_wavefront ( fname_obj ) :
"""Returns mesh dictionary along with their material dictionary from a wavefront ( . obj and / or . mtl ) file ."""
|
fname_mtl = ''
geoms = read_objfile ( fname_obj )
for line in open ( fname_obj ) :
if line :
split_line = line . strip ( ) . split ( ' ' , 1 )
if len ( split_line ) < 2 :
continue
prefix , data = split_line [ 0 ] , split_line [ 1 ]
if 'mtllib' in prefix :
fname_mtl = data
break
if fname_mtl :
materials = read_mtlfile ( path . join ( path . dirname ( fname_obj ) , fname_mtl ) )
for geom in geoms . values ( ) :
geom [ 'material' ] = materials [ geom [ 'usemtl' ] ]
return geoms
|
def _find_script ( script_name ) :
"""Find the script .
If the input is not a file , then $ PATH will be searched ."""
|
if os . path . isfile ( script_name ) :
return script_name
path = os . getenv ( 'PATH' , os . defpath ) . split ( os . pathsep )
for dir in path :
if dir == '' :
continue
fn = os . path . join ( dir , script_name )
if os . path . isfile ( fn ) :
return fn
print >> sys . stderr , 'Could not find script {0}' . format ( script_name )
raise SystemExit ( 1 )
|
def or_filter ( self , filter_or_string , * args , ** kwargs ) :
"""Adds a list of : class : ` ~ es _ fluent . filters . core . Or ` clauses , automatically
generating the an : class : ` ~ es _ fluent . filters . core . Or ` filter if it does not
exist ."""
|
or_filter = self . find_filter ( Or )
if or_filter is None :
or_filter = Or ( )
self . filters . append ( or_filter )
or_filter . add_filter ( build_filter ( filter_or_string , * args , ** kwargs ) )
return or_filter
|
def export ( self , name , columns , points ) :
"""Write the points in MQTT ."""
|
WHITELIST = '_-' + string . ascii_letters + string . digits
SUBSTITUTE = '_'
def whitelisted ( s , whitelist = WHITELIST , substitute = SUBSTITUTE ) :
return '' . join ( c if c in whitelist else substitute for c in s )
for sensor , value in zip ( columns , points ) :
try :
sensor = [ whitelisted ( name ) for name in sensor . split ( '.' ) ]
tobeexport = [ self . topic , self . hostname , name ]
tobeexport . extend ( sensor )
topic = '/' . join ( tobeexport )
self . client . publish ( topic , value )
except Exception as e :
logger . error ( "Can not export stats to MQTT server (%s)" % e )
|
def run_graphviz ( program , code , options = [ ] , format = 'png' ) :
"""Runs graphviz programs and returns image data
Copied from https : / / github . com / tkf / ipython - hierarchymagic / blob / master / hierarchymagic . py"""
|
import os
from subprocess import Popen , PIPE
dot_args = [ program ] + options + [ '-T' , format ]
if os . name == 'nt' : # Avoid opening shell window .
# * https : / / github . com / tkf / ipython - hierarchymagic / issues / 1
# * http : / / stackoverflow . com / a / 2935727/727827
p = Popen ( dot_args , stdout = PIPE , stdin = PIPE , stderr = PIPE , creationflags = 0x08000000 )
else :
p = Popen ( dot_args , stdout = PIPE , stdin = PIPE , stderr = PIPE )
wentwrong = False
try : # Graphviz may close standard input when an error occurs ,
# resulting in a broken pipe on communicate ( )
stdout , stderr = p . communicate ( code . encode ( 'utf-8' ) )
except ( OSError , IOError ) as err :
if err . errno != EPIPE :
raise
wentwrong = True
except IOError as err :
if err . errno != EINVAL :
raise
wentwrong = True
if wentwrong : # in this case , read the standard output and standard error streams
# directly , to get the error message ( s )
stdout , stderr = p . stdout . read ( ) , p . stderr . read ( )
p . wait ( )
if p . returncode != 0 :
raise RuntimeError ( 'dot exited with error:\n[stderr]\n{0}' . format ( stderr . decode ( 'utf-8' ) ) )
return stdout
|
def _expand_json ( self , j ) :
"""Decompress the BLOB portion of the usernotes .
Arguments :
j : the JSON returned from the wiki page ( dict )
Returns a Dict with the ' blob ' key removed and a ' users ' key added"""
|
decompressed_json = copy . copy ( j )
decompressed_json . pop ( 'blob' , None )
# Remove BLOB portion of JSON
# Decode and decompress JSON
compressed_data = base64 . b64decode ( j [ 'blob' ] )
original_json = zlib . decompress ( compressed_data ) . decode ( 'utf-8' )
decompressed_json [ 'users' ] = json . loads ( original_json )
# Insert users
return decompressed_json
|
def framesToFrameRange ( frames , sort = True , zfill = 0 , compress = False ) :
"""Converts an iterator of frames into a
frame range string .
Args :
frames ( collections . Iterable ) : sequence of frames to process
sort ( bool ) : sort the sequence before processing
zfill ( int ) : width for zero padding
compress ( bool ) : remove any duplicates before processing
Returns :
str :"""
|
if compress :
frames = unique ( set ( ) , frames )
frames = list ( frames )
if not frames :
return ''
if len ( frames ) == 1 :
return pad ( frames [ 0 ] , zfill )
if sort :
frames . sort ( )
return ',' . join ( FrameSet . framesToFrameRanges ( frames , zfill ) )
|
def gettempdir ( ) :
"""Accessor for tempfile . tempdir ."""
|
global tempdir
if tempdir is None :
_once_lock . acquire ( )
try :
if tempdir is None :
tempdir = _get_default_tempdir ( )
finally :
_once_lock . release ( )
return tempdir
|
def fix_config ( self , options ) :
"""Fixes the options , if necessary . I . e . , it adds all required elements to the dictionary .
: param options : the options to fix
: type options : dict
: return : the ( potentially ) fixed options
: rtype : dict"""
|
options = super ( ClassSelector , self ) . fix_config ( options )
opt = "index"
if opt not in options :
options [ opt ] = "last"
if opt not in self . help :
self . help [ opt ] = "The class index (1-based number); 'first' and 'last' are accepted as well (string)."
opt = "unset"
if opt not in options :
options [ opt ] = False
if opt not in self . help :
self . help [ opt ] = "Whether to unset the class index (bool)."
return options
|
def grid_distortion ( img , num_steps = 10 , xsteps = [ ] , ysteps = [ ] , interpolation = cv2 . INTER_LINEAR , border_mode = cv2 . BORDER_REFLECT_101 , value = None ) :
"""Reference :
http : / / pythology . blogspot . sg / 2014/03 / interpolation - on - regular - distorted - grid . html"""
|
height , width = img . shape [ : 2 ]
x_step = width // num_steps
xx = np . zeros ( width , np . float32 )
prev = 0
for idx , x in enumerate ( range ( 0 , width , x_step ) ) :
start = x
end = x + x_step
if end > width :
end = width
cur = width
else :
cur = prev + x_step * xsteps [ idx ]
xx [ start : end ] = np . linspace ( prev , cur , end - start )
prev = cur
y_step = height // num_steps
yy = np . zeros ( height , np . float32 )
prev = 0
for idx , y in enumerate ( range ( 0 , height , y_step ) ) :
start = y
end = y + y_step
if end > height :
end = height
cur = height
else :
cur = prev + y_step * ysteps [ idx ]
yy [ start : end ] = np . linspace ( prev , cur , end - start )
prev = cur
map_x , map_y = np . meshgrid ( xx , yy )
map_x = map_x . astype ( np . float32 )
map_y = map_y . astype ( np . float32 )
img = cv2 . remap ( img , map_x , map_y , interpolation = interpolation , borderMode = border_mode , borderValue = value )
return img
|
def create_snapshot ( self , name ) :
"""POST / : login / machines / : id / snapshots
: param name : identifier for snapshot
: type name : : py : class : ` basestring `
: rtype : : py : class : ` smartdc . machine . Snapshot `
Create a snapshot for this machine ' s current state with the given
` name ` ."""
|
params = { 'name' : name }
j , _ = self . datacenter . request ( 'POST' , self . path + '/snapshots' , data = params )
return Snapshot ( machine = self , data = j , name = name )
|
def simulate_phases ( self , phase_map : Dict [ Tuple [ int , ... ] , float ] ) :
"""Simulate a set of phase gates on the xmon architecture .
Args :
phase _ map : A map from a tuple of indices to a value , one for each
phase gate being simulated . If the tuple key has one index , then
this is a Z phase gate on the index - th qubit with a rotation
angle of pi times the value of the map . If the tuple key has two
indices , then this is a | 11 > phasing gate , acting on the qubits
at the two indices , and a rotation angle of pi times the value
of the map ."""
|
self . _pool . map ( _clear_scratch , self . _shard_num_args ( ) )
# Iterate over the map of phase data .
for indices , half_turns in phase_map . items ( ) :
args = self . _shard_num_args ( { 'indices' : indices , 'half_turns' : half_turns } )
if len ( indices ) == 1 :
self . _pool . map ( _single_qubit_accumulate_into_scratch , args )
elif len ( indices ) == 2 :
self . _pool . map ( _two_qubit_accumulate_into_scratch , args )
# Exponentiate the phases and add them into the state .
self . _pool . map ( _apply_scratch_as_phase , self . _shard_num_args ( ) )
|
def tolist ( self ) :
"""The fastest way to convert the vector into a python list ."""
|
the_list = [ ]
self . _fill_list ( self . _root , self . _shift , the_list )
the_list . extend ( self . _tail )
return the_list
|
def get ( self ) :
"""Flushes the call _ queue and returns the data .
Note : Since this object is a simple wrapper , just return the data .
Returns :
The object that was ` put ` ."""
|
if self . call_queue :
return self . apply ( lambda df : df ) . data
else :
return self . data . copy ( )
|
def get_selected_terms ( term_doc_matrix , scores , num_term_to_keep = None ) :
'''Parameters
term _ doc _ matrix : TermDocMatrix or descendant
scores : array - like
Same length as number of terms in TermDocMatrix .
num _ term _ to _ keep : int , default = 4000.
Should be > 0 . Number of terms to keep . Will keep between num _ terms _ to _ keep / 2 and num _ terms _ to _ keep .
Returns
set , terms that should be shown'''
|
num_term_to_keep = AutoTermSelector . _add_default_num_terms_to_keep ( num_term_to_keep )
term_doc_freq = term_doc_matrix . get_term_freq_df ( )
term_doc_freq [ 'count' ] = term_doc_freq . sum ( axis = 1 )
term_doc_freq [ 'score' ] = scores
score_terms = AutoTermSelector . _get_score_terms ( num_term_to_keep , term_doc_freq )
background_terms = AutoTermSelector . _get_background_terms ( num_term_to_keep , term_doc_matrix )
frequent_terms = AutoTermSelector . _get_frequent_terms ( num_term_to_keep , term_doc_freq )
terms_to_show = score_terms | background_terms | frequent_terms
return terms_to_show
|
def _get_block_publisher ( self , state_hash ) :
"""Returns the block publisher based on the consensus module set by the
" sawtooth _ settings " transaction family .
Args :
state _ hash ( str ) : The current state root hash for reading settings .
Raises :
InvalidGenesisStateError : if any errors occur getting the
BlockPublisher ."""
|
state_view = self . _state_view_factory . create_view ( state_hash )
try :
class BatchPublisher :
def send ( self , transactions ) : # Consensus implementations are expected to have handling
# in place for genesis operation . This should includes
# adding any authorization and registrations required
# for the genesis node to the Genesis Batch list and
# detecting validation of the Genesis Block and handle it
# correctly . Batch publication is not allowed during
# genesis operation since there is no network to validate
# the batch yet .
raise InvalidGenesisConsensusError ( 'Consensus cannot send transactions during genesis.' )
consensus = ConsensusFactory . get_configured_consensus_module ( NULL_BLOCK_IDENTIFIER , state_view )
return consensus . BlockPublisher ( BlockCache ( self . _block_store ) , state_view_factory = self . _state_view_factory , batch_publisher = BatchPublisher ( ) , data_dir = self . _data_dir , config_dir = self . _config_dir , validator_id = self . _identity_signer . get_public_key ( ) . as_hex ( ) )
except UnknownConsensusModuleError as e :
raise InvalidGenesisStateError ( e )
|
def _get_table_info ( self ) :
"""Database - specific method to get field names"""
|
self . rowid = None
self . fields = [ ]
self . field_info = { }
self . cursor . execute ( 'DESCRIBE %s' % self . name )
for row in self . cursor . fetchall ( ) :
field , typ , null , key , default , extra = row
self . fields . append ( field )
self . field_info [ field ] = { 'type' : typ , 'NOT NULL' : null , 'key' : key , 'DEFAULT' : default , 'extra' : extra }
if extra == 'auto_increment' :
self . rowid = field
|
def get ( self , path , params = None ) :
"""Perform GET request"""
|
r = requests . get ( url = self . url + path , params = params , timeout = self . timeout )
r . raise_for_status ( )
return r . json ( )
|
def _at_warn ( self , calculator , rule , scope , block ) :
"""Implements @ warn"""
|
value = calculator . calculate ( block . argument )
log . warn ( repr ( value ) )
|
def review_metadata_csv ( filedir , input_filepath ) :
"""Check validity of metadata fields .
: param filedir : This field is the filepath of the directory whose csv
has to be made .
: param outputfilepath : This field is the file path of the output csv .
: param max _ bytes : This field is the maximum file size to consider . Its
default value is 128m ."""
|
try :
metadata = load_metadata_csv ( input_filepath )
except ValueError as e :
print_error ( e )
return False
with open ( input_filepath ) as f :
csv_in = csv . reader ( f )
header = next ( csv_in )
n_headers = len ( header )
if header [ 0 ] == 'filename' :
res = review_metadata_csv_single_user ( filedir , metadata , csv_in , n_headers )
return res
if header [ 0 ] == 'project_member_id' :
res = review_metadata_csv_multi_user ( filedir , metadata , csv_in , n_headers )
return res
|
def if_else ( self , pred , likely = None ) :
"""A context manager which sets up two conditional basic blocks based
on the given predicate ( a i1 value ) .
A tuple of context managers is yield ' ed . Each context manager
acts as a if _ then ( ) block .
* likely * has the same meaning as in if _ then ( ) .
Typical use : :
with builder . if _ else ( pred ) as ( then , otherwise ) :
with then :
# emit instructions for when the predicate is true
with otherwise :
# emit instructions for when the predicate is false"""
|
bb = self . basic_block
bbif = self . append_basic_block ( name = _label_suffix ( bb . name , '.if' ) )
bbelse = self . append_basic_block ( name = _label_suffix ( bb . name , '.else' ) )
bbend = self . append_basic_block ( name = _label_suffix ( bb . name , '.endif' ) )
br = self . cbranch ( pred , bbif , bbelse )
if likely is not None :
br . set_weights ( [ 99 , 1 ] if likely else [ 1 , 99 ] )
then = self . _branch_helper ( bbif , bbend )
otherwise = self . _branch_helper ( bbelse , bbend )
yield then , otherwise
self . position_at_end ( bbend )
|
def resource_reaches_status ( self , resource , resource_id , expected_stat = 'available' , msg = 'resource' , max_wait = 120 ) :
"""Wait for an openstack resources status to reach an
expected status within a specified time . Useful to confirm that
nova instances , cinder vols , snapshots , glance images , heat stacks
and other resources eventually reach the expected status .
: param resource : pointer to os resource type , ex : heat _ client . stacks
: param resource _ id : unique id for the openstack resource
: param expected _ stat : status to expect resource to reach
: param msg : text to identify purpose in logging
: param max _ wait : maximum wait time in seconds
: returns : True if successful , False if status is not reached"""
|
tries = 0
resource_stat = resource . get ( resource_id ) . status
while resource_stat != expected_stat and tries < ( max_wait / 4 ) :
self . log . debug ( '{} status check: ' '{} [{}:{}] {}' . format ( msg , tries , resource_stat , expected_stat , resource_id ) )
time . sleep ( 4 )
resource_stat = resource . get ( resource_id ) . status
tries += 1
self . log . debug ( '{}: expected, actual status = {}, ' '{}' . format ( msg , resource_stat , expected_stat ) )
if resource_stat == expected_stat :
return True
else :
self . log . debug ( '{} never reached expected status: ' '{}' . format ( resource_id , expected_stat ) )
return False
|
def to_wgs84 ( east , north , crs ) :
"""Convert any CRS with ( east , north ) coordinates to WGS84
: param east : east coordinate
: type east : float
: param north : north coordinate
: type north : float
: param crs : CRS enum constants
: type crs : constants . CRS
: return : latitude and longitude coordinates in WGS84 system
: rtype : float , float"""
|
return transform_point ( ( east , north ) , crs , CRS . WGS84 )
|
def add_line ( preso , x1 , y1 , x2 , y2 , width = "3pt" , color = "red" ) :
"""Arrow pointing up to right :
context . xml :
office : automatic - styles /
< style : style style : name = " gr1 " style : family = " graphic " style : parent - style - name = " objectwithoutfill " >
< style : graphic - properties
draw : marker - end = " Arrow "
draw : marker - end - width = " 0.3cm "
draw : fill = " none "
draw : textarea - vertical - align = " middle " / >
< / style : style >
3pt width color red
< style : style style : name = " gr2 " style : family = " graphic " style : parent - style - name = " objectwithoutfill " >
< style : graphic - properties
svg : stroke - width = " 0.106cm "
svg : stroke - color = " # ed1c24"
draw : marker - start - width = " 0.359cm "
draw : marker - end = " Arrow "
draw : marker - end - width = " 0.459cm "
draw : fill = " none "
draw : textarea - vertical - align = " middle "
fo : padding - top = " 0.178cm "
fo : padding - bottom = " 0.178cm "
fo : padding - left = " 0.303cm "
fo : padding - right = " 0.303cm " / >
< / style : style >
office : presentation / draw : page
< draw : line draw : style - name = " gr1 " draw : text - style - name = " P2 " draw : layer = " layout " svg : x1 = " 6.35cm " svg : y1 = " 10.16cm " svg : x2 = " 10.668cm " svg : y2 = " 5.842cm " > < text : p / > < / draw : line >"""
|
marker_end_ratio = .459 / 3
# .459cm / 3pt
marker_start_ratio = .359 / 3
# .359cm / 3pt
stroke_ratio = .106 / 3
# .106cm / 3pt
w = float ( width [ 0 : width . index ( "pt" ) ] )
sw = w * stroke_ratio
mew = w * marker_end_ratio
msw = w * marker_start_ratio
attribs = { "svg:stroke-width" : "{}cm" . format ( sw ) , "svg:stroke-color" : color , # " # ed1c24 " ,
"draw:marker-start-width" : "{}cm" . format ( msw ) , "draw:marker-end" : "Arrow" , "draw:marker-end-width" : "{}cm" . format ( mew ) , "draw:fill" : "none" , "draw:textarea-vertical-align" : "middle" , }
style = LineStyle ( ** attribs )
# node = style . style _ node ( )
preso . add_style ( style )
line_attrib = { "draw:style-name" : style . name , "draw:layer" : "layout" , "svg:x1" : x1 , "svg:y1" : y1 , "svg:x2" : x2 , "svg:y2" : y2 , }
line_node = el ( "draw:line" , attrib = line_attrib )
preso . slides [ - 1 ] . _page . append ( line_node )
|
def collect_usage_pieces ( self , ctx ) :
"""Prepend " [ - - ] " before " [ ARGV ] . . . " ."""
|
pieces = super ( ProfilingCommand , self ) . collect_usage_pieces ( ctx )
assert pieces [ - 1 ] == '[ARGV]...'
pieces . insert ( - 1 , 'SCRIPT' )
pieces . insert ( - 1 , '[--]' )
return pieces
|
def find_mode ( data ) :
"""Returns the appropriate QR Code mode ( an integer constant ) for the
provided ` data ` .
: param bytes data : Data to check .
: rtype : int"""
|
if data . isdigit ( ) :
return consts . MODE_NUMERIC
if is_alphanumeric ( data ) :
return consts . MODE_ALPHANUMERIC
if is_kanji ( data ) :
return consts . MODE_KANJI
return consts . MODE_BYTE
|
def _attr_display ( value , html_element ) :
'''Set the display value'''
|
if value == 'block' :
html_element . display = Display . block
elif value == 'none' :
html_element . display = Display . none
else :
html_element . display = Display . inline
|
def field_singleton_schema ( # noqa : C901 ( ignore complexity )
field : Field , * , by_alias : bool , model_name_map : Dict [ Type [ 'main.BaseModel' ] , str ] , schema_overrides : bool = False , ref_prefix : Optional [ str ] = None , ) -> Tuple [ Dict [ str , Any ] , Dict [ str , Any ] ] :
"""This function is indirectly used by ` ` field _ schema ( ) ` ` , you should probably be using that function .
Take a single Pydantic ` ` Field ` ` , and return its schema and any additional definitions from sub - models ."""
|
ref_prefix = ref_prefix or default_prefix
definitions : Dict [ str , Any ] = { }
if field . sub_fields :
return field_singleton_sub_fields_schema ( field . sub_fields , by_alias = by_alias , model_name_map = model_name_map , schema_overrides = schema_overrides , ref_prefix = ref_prefix , )
if field . type_ is Any :
return { } , definitions
# no restrictions
if is_callable_type ( field . type_ ) :
raise SkipField ( f'Callable {field.name} was excluded from schema since JSON schema has no equivalent type.' )
f_schema : Dict [ str , Any ] = { }
if issubclass ( field . type_ , Enum ) :
f_schema . update ( { 'enum' : [ item . value for item in field . type_ ] } )
# type : ignore
# Don ' t return immediately , to allow adding specific types
for field_name , schema_name in validation_attribute_to_schema_keyword . items ( ) :
field_value = getattr ( field . type_ , field_name , None )
if field_value is not None :
if field_name == 'regex' :
field_value = field_value . pattern
f_schema [ schema_name ] = field_value
for type_ , t_schema in field_class_to_schema_enum_enabled :
if issubclass ( field . type_ , type_ ) :
f_schema . update ( t_schema )
break
# Return schema , with or without enum definitions
if f_schema :
return f_schema , definitions
for type_ , t_schema in field_class_to_schema_enum_disabled :
if issubclass ( field . type_ , type_ ) :
return t_schema , definitions
# Handle dataclass - based models
field_type = field . type_
if lenient_issubclass ( getattr ( field_type , '__pydantic_model__' , None ) , main . BaseModel ) :
field_type = cast ( Type [ 'dataclasses.DataclassType' ] , field_type )
field_type = field_type . __pydantic_model__
if issubclass ( field_type , main . BaseModel ) :
sub_schema , sub_definitions = model_process_schema ( field_type , by_alias = by_alias , model_name_map = model_name_map , ref_prefix = ref_prefix )
definitions . update ( sub_definitions )
if not schema_overrides :
model_name = model_name_map [ field_type ]
definitions [ model_name ] = sub_schema
return { '$ref' : f'{ref_prefix}{model_name}' } , definitions
else :
return sub_schema , definitions
raise ValueError ( f'Value not declarable with JSON Schema, field: {field}' )
|
async def grab ( self , * , countries = None , limit = 0 ) :
"""Gather proxies from the providers without checking .
: param list countries : ( optional ) List of ISO country codes
where should be located proxies
: param int limit : ( optional ) The maximum number of proxies
: ref : ` Example of usage < proxybroker - examples - grab > ` ."""
|
self . _countries = countries
self . _limit = limit
task = asyncio . ensure_future ( self . _grab ( check = False ) )
self . _all_tasks . append ( task )
|
def updatepLvlNextFunc ( self ) :
'''A method that creates the pLvlNextFunc attribute as a sequence of
linear functions , indicating constant expected permanent income growth
across permanent income levels . Draws on the attribute PermGroFac , and
installs a special retirement function when it exists .
Parameters
None
Returns
None'''
|
orig_time = self . time_flow
self . timeFwd ( )
pLvlNextFunc = [ ]
for t in range ( self . T_cycle ) :
pLvlNextFunc . append ( LinearInterp ( np . array ( [ 0. , 1. ] ) , np . array ( [ 0. , self . PermGroFac [ t ] ] ) ) )
self . pLvlNextFunc = pLvlNextFunc
self . addToTimeVary ( 'pLvlNextFunc' )
if not orig_time :
self . timeRev ( )
|
def main_callback ( self , * args , ** kwargs ) :
"""Main callback called when an event is received from an entry point .
: returns : The entry point ' s callback .
: rtype : function
: raises NotImplementedError : When the entrypoint doesn ' t have the required attributes ."""
|
if not self . callback :
raise NotImplementedError ( 'Entrypoints must declare `callback`' )
if not self . settings :
raise NotImplementedError ( 'Entrypoints must declare `settings`' )
self . callback . im_self . db = None
# 1 . Start all the middlewares
with self . debug ( * args , ** kwargs ) :
with self . database ( ) : # 2 . ` Real ` callback
result = self . callback ( * args , ** kwargs )
# pylint : disable = not - callable
return result
|
def bundle ( context , name ) :
"""Add a new bundle ."""
|
if context . obj [ 'db' ] . bundle ( name ) :
click . echo ( click . style ( 'bundle name already exists' , fg = 'yellow' ) )
context . abort ( )
new_bundle = context . obj [ 'db' ] . new_bundle ( name )
context . obj [ 'db' ] . add_commit ( new_bundle )
# add default version
new_version = context . obj [ 'db' ] . new_version ( created_at = new_bundle . created_at )
new_version . bundle = new_bundle
context . obj [ 'db' ] . add_commit ( new_version )
click . echo ( click . style ( f"new bundle added: {new_bundle.name} ({new_bundle.id})" , fg = 'green' ) )
|
def create ( self , sid ) :
"""Create a new ShortCodeInstance
: param unicode sid : The SID of a Twilio ShortCode resource
: returns : Newly created ShortCodeInstance
: rtype : twilio . rest . proxy . v1 . service . short _ code . ShortCodeInstance"""
|
data = values . of ( { 'Sid' : sid , } )
payload = self . _version . create ( 'POST' , self . _uri , data = data , )
return ShortCodeInstance ( self . _version , payload , service_sid = self . _solution [ 'service_sid' ] , )
|
def wait_ready ( self , name , timeout = 5.0 , sleep_interval = 0.2 ) :
"""Wait for a newly created bucket to be ready .
: param string name : the name to wait for
: param seconds timeout : the maximum amount of time to wait
: param seconds sleep _ interval : the number of time to sleep
between each probe
: raise : : exc : ` . CouchbaseError ` on internal HTTP error
: raise : : exc : ` NotReadyError ` if all nodes could not be
ready in time"""
|
end = time ( ) + timeout
while True :
try :
info = self . bucket_info ( name ) . value
for node in info [ 'nodes' ] :
if node [ 'status' ] != 'healthy' :
raise NotReadyError . pyexc ( 'Not all nodes are healthy' )
return
# No error and all OK
except E . CouchbaseError :
if time ( ) + sleep_interval > end :
raise
sleep ( sleep_interval )
|
def handle ( client_message , handle_event_partition_lost = None , to_object = None ) :
"""Event handler"""
|
message_type = client_message . get_message_type ( )
if message_type == EVENT_PARTITIONLOST and handle_event_partition_lost is not None :
partition_id = client_message . read_int ( )
lost_backup_count = client_message . read_int ( )
source = None
if not client_message . read_bool ( ) :
source = AddressCodec . decode ( client_message , to_object )
handle_event_partition_lost ( partition_id = partition_id , lost_backup_count = lost_backup_count , source = source )
|
def rr_history ( self , query , query_type = "A" ) :
'''Get the RR ( Resource Record ) History of the given domain or IP .
The default query type is for ' A ' records , but the following query types
are supported :
A , NS , MX , TXT , CNAME
For details , see https : / / investigate . umbrella . com / docs / api # dnsrr _ domain'''
|
if query_type not in Investigate . SUPPORTED_DNS_TYPES :
raise Investigate . UNSUPPORTED_DNS_QUERY
# if this is an IP address , query the IP
if Investigate . IP_PATTERN . match ( query ) :
return self . _ip_rr_history ( query , query_type )
# otherwise , query the domain
return self . _domain_rr_history ( query , query_type )
|
def read_spec ( filename , fname = '' , ** kwargs ) :
"""Read FITS or ASCII spectrum .
Parameters
filename : str or file pointer
Spectrum file name or pointer .
fname : str
Filename . This is * only * used if ` ` filename ` ` is a pointer .
kwargs : dict
Keywords acceptable by : func : ` read _ fits _ spec ` ( if FITS ) or
: func : ` read _ ascii _ spec ` ( if ASCII ) .
Returns
header : dict
Metadata .
wavelengths , fluxes : ` ~ astropy . units . quantity . Quantity `
Wavelength and flux of the spectrum .
Raises
synphot . exceptions . SynphotError
Read failed ."""
|
if isinstance ( filename , str ) :
fname = filename
elif not fname : # pragma : no cover
raise exceptions . SynphotError ( 'Cannot determine filename.' )
if fname . endswith ( 'fits' ) or fname . endswith ( 'fit' ) :
read_func = read_fits_spec
else :
read_func = read_ascii_spec
return read_func ( filename , ** kwargs )
|
def list_container_objects ( container_name , profile , ** libcloud_kwargs ) :
'''List container objects ( e . g . files ) for the given container _ id on the given profile
: param container _ name : Container name
: type container _ name : ` ` str ` `
: param profile : The profile key
: type profile : ` ` str ` `
: param libcloud _ kwargs : Extra arguments for the driver ' s list _ container _ objects method
: type libcloud _ kwargs : ` ` dict ` `
CLI Example :
. . code - block : : bash
salt myminion libcloud _ storage . list _ container _ objects MyFolder profile1'''
|
conn = _get_driver ( profile = profile )
container = conn . get_container ( container_name )
libcloud_kwargs = salt . utils . args . clean_kwargs ( ** libcloud_kwargs )
objects = conn . list_container_objects ( container , ** libcloud_kwargs )
ret = [ ]
for obj in objects :
ret . append ( { 'name' : obj . name , 'size' : obj . size , 'hash' : obj . hash , 'container' : obj . container . name , 'extra' : obj . extra , 'meta_data' : obj . meta_data } )
return ret
|
def to_output ( data ) :
"""Convert WA - KAT frontend dataset to three output datasets - ` MRC ` , ` MARC `
and ` Dublin core ` .
Conversion is implemented as filling ofthe MRC template , which is then
converted to MARC record . Dublin core is converted standalone from the
input dataset ."""
|
data = json . loads ( data )
# postprocessing
if "keywords" in data :
mdt , cz_keywords , en_keywords = compile_keywords ( data [ "keywords" ] )
del data [ "keywords" ]
data [ "mdt" ] = mdt
data [ "cz_keywords" ] = cz_keywords
data [ "en_keywords" ] = en_keywords
data [ "annotation" ] = data [ "annotation" ] . replace ( "\n" , " " )
data [ "time" ] = time
# for date generation
# convert additional info values to MRC
ai_key = "additional_info"
if data . get ( ai_key ) is None :
data [ ai_key ] = { }
# parse regularity - see https : / / github . com / WebArchivCZ / WA - KAT / issues / 66
# for details
fld_008 = data [ ai_key ] . get ( "008" )
data [ "regularity" ] = fld_008 [ 18 ] if fld_008 and len ( fld_008 ) > 18 else "-"
# put lang code to 008
data [ "lang_code_008" ] = get_008_lang_code ( data . get ( "language" ) )
# special format of date - viz # 100
data [ "date_of_generation" ] = _to_date_in_588 ( time . strftime ( "%d.%m.%Y" ) )
data [ ai_key ] = { key : "\n" . join ( item_to_mrc ( key , val ) ) for key , val in data [ ai_key ] . iteritems ( ) if val }
alt_end_date = data [ ai_key ] . get ( "alt_end_date" , None )
# handle date range in the 008
from_year , to_year = parse_date_range ( data [ "creation_date" ] , alt_end_date )
data [ "from_year" ] = from_year
data [ "to_year" ] = to_year
# serialize author
if data [ "author" ] :
data [ "serialized_author" ] = serialize_author ( data [ "author" ] )
# send data to seeder
if data . get ( "url_id" ) :
send_update ( data [ "url_id" ] , data )
# convert to MRC format
mrc = render_mrc ( data ) . encode ( "utf-8" )
# create all output formats
out = { "fn" : url_to_fn ( data [ "url" ] ) , "mrc" : mrc , "oai" : mrc_to_marc ( mrc ) , "dc" : to_dc ( data ) , }
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.