signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def save_related ( self , request , form , formsets , change ) :
"""Given the ` ` HttpRequest ` ` , the parent ` ` ModelForm ` ` instance , the
list of inline formsets and a boolean value based on whether the
parent is being added or changed , save the related objects to the
database . Note that at this point save _ form ( ) and save _ model ( ) have
already been called ."""
|
form . save_m2m ( )
for formset in formsets :
self . save_formset ( request , form , formset , change = change )
|
def _FormatMessage ( template , parameters ) :
"""Formats the message . Unescapes ' $ $ ' with ' $ ' .
Args :
template : message template ( e . g . ' a = $ 0 , b = $ 1 ' ) .
parameters : substitution parameters for the format .
Returns :
Formatted message with parameters embedded in template placeholders ."""
|
def GetParameter ( m ) :
try :
return parameters [ int ( m . group ( 0 ) [ 1 : ] ) ]
except IndexError :
return INVALID_EXPRESSION_INDEX
parts = template . split ( '$$' )
return '$' . join ( re . sub ( r'\$\d+' , GetParameter , part ) for part in parts )
|
def put ( self , device_id : int ) -> Device :
"""Updates the Device Resource with the
name ."""
|
device = self . _get_or_abort ( device_id )
self . update ( device )
session . commit ( )
session . add ( device )
return device
|
def normalize_pts ( pts , ymax , scaler = 2 ) :
"""scales all coordinates and flip y axis due to different
origin coordinates ( top left vs . bottom left )"""
|
return [ ( x * scaler , ymax - ( y * scaler ) ) for x , y in pts ]
|
def rfft2d_freqs ( h , w ) :
"""Computes 2D spectrum frequencies ."""
|
fy = np . fft . fftfreq ( h ) [ : , None ]
# when we have an odd input dimension we need to keep one additional
# frequency and later cut off 1 pixel
if w % 2 == 1 :
fx = np . fft . fftfreq ( w ) [ : w // 2 + 2 ]
else :
fx = np . fft . fftfreq ( w ) [ : w // 2 + 1 ]
return np . sqrt ( fx * fx + fy * fy )
|
def samples ( self , nsamples , rstate = None ) :
"""Draw ` nsamples ` samples randomly distributed within the unit cube .
Returns
x : ` ~ numpy . ndarray ` with shape ( nsamples , ndim )
A collection of coordinates within the unit cube ."""
|
if rstate is None :
rstate = np . random
xs = np . array ( [ self . sample ( rstate = rstate ) for i in range ( nsamples ) ] )
return xs
|
def hist ( self , dimension = None , num_bins = 20 , bin_range = None , adjoin = True , ** kwargs ) :
"""Computes and adjoins histogram along specified dimension ( s ) .
Defaults to first value dimension if present otherwise falls
back to first key dimension .
Args :
dimension : Dimension ( s ) to compute histogram on
num _ bins ( int , optional ) : Number of bins
bin _ range ( tuple optional ) : Lower and upper bounds of bins
adjoin ( bool , optional ) : Whether to adjoin histogram
Returns :
AdjointLayout of element and histogram or just the
histogram"""
|
from . . operation import histogram
if not isinstance ( dimension , list ) :
dimension = [ dimension ]
hists = [ ]
for d in dimension [ : : - 1 ] :
hist = histogram ( self , num_bins = num_bins , bin_range = bin_range , dimension = d , ** kwargs )
hists . append ( hist )
if adjoin :
layout = self
for didx in range ( len ( dimension ) ) :
layout = layout << hists [ didx ]
elif len ( dimension ) > 1 :
layout = Layout ( hists )
else :
layout = hists [ 0 ]
return layout
|
def certificate ( self , certificate ) :
"""Sets the certificate of this V1beta1CertificateSigningRequestStatus .
If request was approved , the controller will place the issued certificate here . # noqa : E501
: param certificate : The certificate of this V1beta1CertificateSigningRequestStatus . # noqa : E501
: type : str"""
|
if certificate is not None and not re . search ( r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$' , certificate ) : # noqa : E501
raise ValueError ( r"Invalid value for `certificate`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`" )
# noqa : E501
self . _certificate = certificate
|
def _set_site ( self , v , load = False ) :
"""Setter method for site , mapped from YANG variable / overlay _ gateway / site ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ site is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ site ( ) directly .
YANG Description : Site represents a remote VCS to which tunnel need to be
setup . Site is identified by a name ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "name" , site . site , yang_name = "site" , rest_name = "site" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'name' , extensions = { u'tailf-common' : { u'info' : u'Configure remote extension site' , u'cli-no-key-completion' : None , u'cli-full-no' : None , u'cli-suppress-list-no' : None , u'cli-suppress-key-abbreviation' : None , u'cli-no-match-completion' : None , u'cli-full-command' : None , u'callpoint' : u'overlay-site-cp' } } ) , is_container = 'list' , yang_name = "site" , rest_name = "site" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Configure remote extension site' , u'cli-no-key-completion' : None , u'cli-full-no' : None , u'cli-suppress-list-no' : None , u'cli-suppress-key-abbreviation' : None , u'cli-no-match-completion' : None , u'cli-full-command' : None , u'callpoint' : u'overlay-site-cp' } } , namespace = 'urn:brocade.com:mgmt:brocade-tunnels' , defining_module = 'brocade-tunnels' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """site must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("name",site.site, yang_name="site", rest_name="site", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure remote extension site', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'overlay-site-cp'}}), is_container='list', yang_name="site", rest_name="site", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure remote extension site', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'overlay-site-cp'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)""" , } )
self . __site = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def is45 ( msg ) :
"""Check if a message is likely to be BDS code 4,5.
Meteorological hazard report
Args :
msg ( String ) : 28 bytes hexadecimal message string
Returns :
bool : True or False"""
|
if allzeros ( msg ) :
return False
d = hex2bin ( data ( msg ) )
# status bit 1 , 4 , 7 , 10 , 13 , 16 , 27 , 39
if wrongstatus ( d , 1 , 2 , 3 ) :
return False
if wrongstatus ( d , 4 , 5 , 6 ) :
return False
if wrongstatus ( d , 7 , 8 , 9 ) :
return False
if wrongstatus ( d , 10 , 11 , 12 ) :
return False
if wrongstatus ( d , 13 , 14 , 15 ) :
return False
if wrongstatus ( d , 16 , 17 , 26 ) :
return False
if wrongstatus ( d , 27 , 28 , 38 ) :
return False
if wrongstatus ( d , 39 , 40 , 51 ) :
return False
# reserved
if bin2int ( d [ 51 : 56 ] ) != 0 :
return False
temp = temp45 ( msg )
if temp :
if temp > 60 or temp < - 80 :
return False
return True
|
def content_ids ( params ) :
"""does the same this as ` pageviews ` , except it includes content ids and then optionally filters
the response by a list of content ids passed as query params - note , this load can be a little
heavy and could take a minute"""
|
# set up default values
default_from , default_to , yesterday , _ = make_default_times ( )
# get params
try :
series = params . get ( "site" , [ DEFAULT_SERIES ] ) [ 0 ]
from_date = params . get ( "from" , [ default_from ] ) [ 0 ]
to_date = params . get ( "to" , [ default_to ] ) [ 0 ]
group_by = params . get ( "group_by" , [ DEFAULT_GROUP_BY ] ) [ 0 ]
ids = params . get ( "content_id" , [ ] )
except Exception as e :
LOGGER . exception ( e )
return json . dumps ( { "error" : e . message } ) , "500 Internal Error"
# check the cache
cache_key = "{}:{}:{}:{}:{}:{}:{}" . format ( memcached_prefix , "contentids.json" , series , from_date , to_date , group_by , ids )
try :
data = MEMCACHED_CLIENT . get ( cache_key )
if data :
return data , "200 OK"
except Exception as e :
LOGGER . exception ( e )
# enforce content ids
if not len ( ids ) :
return json . dumps ( { "error" : "you must pass at least one content id'" } ) , "400 Bad Request"
# parse from date
from_date = parse_datetime ( from_date )
if from_date is None :
LOGGER . error ( "could not parse 'from'" )
return json . dumps ( { "error" : "could not parse 'from'" } ) , "400 Bad Request"
# parse to date
to_date = parse_datetime ( to_date )
if to_date is None :
LOGGER . error ( "could not parse 'to'" )
return json . dumps ( { "error" : "could not parse 'to'" } ) , "400 Bad Request"
# influx will only keep non - aggregated data for a day , so if the from param is beyond that point
# we need to update the series name to use the rolled up values
if from_date < yesterday :
series = update_series ( series , "pageviews" , "content_id" )
# format times
from_date = format_datetime ( from_date )
to_date = format_datetime ( to_date )
# start building the query
query = "SELECT content_id, sum(value) as value " "FROM {series} " "WHERE time > '{from_date}' AND time < '{to_date}' " "GROUP BY content_id, time({group_by}) " "fill(0);"
args = { "series" : series , "from_date" : from_date , "to_date" : to_date , "group_by" : group_by }
# send the request
try :
res = INFLUXDB_CLIENT . query ( query . format ( ** args ) )
# capture errors and send them back along with the query ( for inspection / debugging )
except Exception as e :
LOGGER . exception ( e )
return json . dumps ( { "error" : e . message , "query" : query . format ( ** args ) } ) , "500 Internal Error"
# build the response object
response = flatten_response ( res )
# filter by content ids
if len ( ids ) :
for site , points in response . items ( ) :
filtered = filter ( lambda p : p [ "content_id" ] in ids , points )
response [ site ] = filtered
res = json . dumps ( response )
# cache the response
try :
MEMCACHED_CLIENT . set ( cache_key , res , time = MEMCACHED_EXPIRATION )
except Exception as e :
LOGGER . exception ( e )
return res , "200 OK"
|
def should_run_now ( self , force = False ) :
from django_cron . models import CronJobLog
cron_job = self . cron_job
"""Returns a boolean determining whether this cron should run now or not !"""
|
self . user_time = None
self . previously_ran_successful_cron = None
# If we pass - - force options , we force cron run
if force :
return True
if cron_job . schedule . run_every_mins is not None : # We check last job - success or not
last_job = None
try :
last_job = CronJobLog . objects . filter ( code = cron_job . code ) . latest ( 'start_time' )
except CronJobLog . DoesNotExist :
pass
if last_job :
if not last_job . is_success and cron_job . schedule . retry_after_failure_mins :
if get_current_time ( ) > last_job . start_time + timedelta ( minutes = cron_job . schedule . retry_after_failure_mins ) :
return True
else :
return False
try :
self . previously_ran_successful_cron = CronJobLog . objects . filter ( code = cron_job . code , is_success = True , ran_at_time__isnull = True ) . latest ( 'start_time' )
except CronJobLog . DoesNotExist :
pass
if self . previously_ran_successful_cron :
if get_current_time ( ) > self . previously_ran_successful_cron . start_time + timedelta ( minutes = cron_job . schedule . run_every_mins ) :
return True
else :
return True
if cron_job . schedule . run_at_times :
for time_data in cron_job . schedule . run_at_times :
user_time = time . strptime ( time_data , "%H:%M" )
now = get_current_time ( )
actual_time = time . strptime ( "%s:%s" % ( now . hour , now . minute ) , "%H:%M" )
if actual_time >= user_time :
qset = CronJobLog . objects . filter ( code = cron_job . code , ran_at_time = time_data , is_success = True ) . filter ( Q ( start_time__gt = now ) | Q ( end_time__gte = now . replace ( hour = 0 , minute = 0 , second = 0 , microsecond = 0 ) ) )
if not qset :
self . user_time = time_data
return True
return False
|
def _readPPN ( self , fname , sldir ) :
'''Private method that reads in and organizes the . ppn file
Loads the data of the . ppn file into the variable cols .'''
|
if sldir . endswith ( os . sep ) : # Making sure fname will be formatted correctly
fname = str ( sldir ) + str ( fname )
else :
fname = str ( sldir ) + os . sep + str ( fname )
self . sldir += os . sep
f = open ( fname , 'r' )
lines = f . readlines ( )
for i in range ( len ( lines ) ) :
lines [ i ] = lines [ i ] . strip ( )
cols = [ 'ISOTP' , 'ABUNDANCE_MF' ]
# These are constant , . ppn files have no header to read from
for i in range ( len ( lines ) ) :
if not lines [ i ] . startswith ( 'H' ) :
index = i - 1
break
return cols , index
|
def next_channel_from_routes ( available_routes : List [ 'RouteState' ] , channelidentifiers_to_channels : Dict , transfer_amount : PaymentWithFeeAmount , lock_timeout : BlockTimeout , ) -> Optional [ NettingChannelState ] :
"""Returns the first route that may be used to mediated the transfer .
The routing service can race with local changes , so the recommended routes
must be validated .
Args :
available _ routes : Current available routes that may be used , it ' s
assumed that the available _ routes list is ordered from best to
worst .
channelidentifiers _ to _ channels : Mapping from channel identifier
to NettingChannelState .
transfer _ amount : The amount of tokens that will be transferred
through the given route .
lock _ timeout : Number of blocks until the lock expires , used to filter
out channels that have a smaller settlement window .
Returns :
The next route ."""
|
for route in available_routes :
channel_state = channelidentifiers_to_channels . get ( route . channel_identifier )
if not channel_state :
continue
if is_channel_usable ( channel_state , transfer_amount , lock_timeout ) :
return channel_state
return None
|
def insert_into_last_element ( html , element ) :
"""function to insert an html element into another html fragment
example :
html = ' < p > paragraph1 < / p > < p > paragraph2 . . . < / p > '
element = ' < a href = " / read - more / " > read more < / a > '
- - - > ' < p > paragraph1 < / p > < p > paragraph2 . . . < a href = " / read - more / " > read more < / a > < / p > '"""
|
try :
item = fragment_fromstring ( element )
except ( ParserError , TypeError ) as e :
item = fragment_fromstring ( '<span></span>' )
try :
doc = fragments_fromstring ( html )
doc [ - 1 ] . append ( item )
return '' . join ( tostring ( e ) for e in doc )
except ( ParserError , TypeError ) as e :
return ''
|
def interpolate ( self , factor , minGlyph , maxGlyph , round = True , suppressError = True ) :
"""Interpolate the contents of this glyph at location ` ` factor ` `
in a linear interpolation between ` ` minGlyph ` ` and ` ` maxGlyph ` ` .
> > > glyph . interpolate ( 0.5 , otherGlyph1 , otherGlyph2)
` ` factor ` ` may be a : ref : ` type - int - float ` or a tuple containing
two : ref : ` type - int - float ` values representing x and y factors .
> > > glyph . interpolate ( ( 0.5 , 1.0 ) , otherGlyph1 , otherGlyph2)
` ` minGlyph ` ` must be a : class : ` BaseGlyph ` and will be located at 0.0
in the interpolation range . ` ` maxGlyph ` ` must be a : class : ` BaseGlyph `
and will be located at 1.0 in the interpolation range . If ` ` round ` `
is ` ` True ` ` , the contents of the glyph will be rounded to integers
after the interpolation is performed .
> > > glyph . interpolate ( 0.5 , otherGlyph1 , otherGlyph2 , round = True )
This method assumes that ` ` minGlyph ` ` and ` ` maxGlyph ` ` are completely
compatible with each other for interpolation . If not , any errors
encountered will raise a : class : ` FontPartsError ` . If ` ` suppressError ` `
is ` ` True ` ` , no exception will be raised and errors will be silently
ignored ."""
|
factor = normalizers . normalizeInterpolationFactor ( factor )
if not isinstance ( minGlyph , BaseGlyph ) :
raise TypeError ( ( "Interpolation to an instance of %r can not be " "performed from an instance of %r." ) % ( self . __class__ . __name__ , minGlyph . __class__ . __name__ ) )
if not isinstance ( maxGlyph , BaseGlyph ) :
raise TypeError ( ( "Interpolation to an instance of %r can not be " "performed from an instance of %r." ) % ( self . __class__ . __name__ , maxGlyph . __class__ . __name__ ) )
round = normalizers . normalizeBoolean ( round )
suppressError = normalizers . normalizeBoolean ( suppressError )
self . _interpolate ( factor , minGlyph , maxGlyph , round = round , suppressError = suppressError )
|
def list_all_variants ( cls , ** kwargs ) :
"""List Variants
Return a list of Variants
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . list _ all _ variants ( async = True )
> > > result = thread . get ( )
: param async bool
: param int page : page number
: param int size : page size
: param str sort : page order
: return : page [ Variant ]
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _list_all_variants_with_http_info ( ** kwargs )
else :
( data ) = cls . _list_all_variants_with_http_info ( ** kwargs )
return data
|
def choose_meas_file ( self , event = None ) :
"""Opens a dialog allowing the user to pick a measurement file"""
|
dlg = wx . FileDialog ( self , message = "Please choose a measurement file" , defaultDir = self . WD , defaultFile = "measurements.txt" , wildcard = "measurement files (*.magic,*.txt)|*.magic;*.txt" , style = wx . FD_OPEN | wx . FD_CHANGE_DIR )
if self . show_dlg ( dlg ) == wx . ID_OK :
meas_file = dlg . GetPath ( )
dlg . Destroy ( )
else :
meas_file = ''
self . data_model = 2.5
dlg . Destroy ( )
return meas_file
|
def capture_url_missing_namespace ( self , node ) :
"""Capture missing namespace in url include ."""
|
for arg in node . args :
if not ( isinstance ( arg , ast . Call ) and isinstance ( arg . func , ast . Name ) ) :
continue
if arg . func . id != 'include' :
continue
for keyword in arg . keywords :
if keyword . arg == 'namespace' :
return
return DJ05 ( lineno = node . lineno , col = node . col_offset , )
|
def get_attribute ( self , colourmode , mode , name , part = None ) :
"""returns requested attribute
: param mode : ui - mode ( e . g . ` search ` , ` thread ` . . . )
: type mode : str
: param name : of the atttribute
: type name : str
: param colourmode : colour mode ; in [ 1 , 16 , 256]
: type colourmode : int
: rtype : urwid . AttrSpec"""
|
thmble = self . _config [ mode ] [ name ]
if part is not None :
thmble = thmble [ part ]
thmble = thmble or DUMMYDEFAULT
return thmble [ self . _colours . index ( colourmode ) ]
|
def create_missing ( self ) :
"""Automatically populate additional instance attributes .
When a new lifecycle environment is created , it must either :
* Reference a parent lifecycle environment in the tree of lifecycle
environments via the ` ` prior ` ` field , or
* have a name of " Library " .
Within a given organization , there can only be a single lifecycle
environment with a name of ' Library ' . This lifecycle environment is at
the root of a tree of lifecycle environments , so its ` ` prior ` ` field is
blank .
This method finds the ' Library ' lifecycle environment within the
current organization and points to it via the ` ` prior ` ` field . This is
not done if the current lifecycle environment has a name of ' Library ' ."""
|
# We call ` super ` first b / c it populates ` self . organization ` , and we
# need that field to perform a search a little later .
super ( LifecycleEnvironment , self ) . create_missing ( )
if ( self . name != 'Library' and # pylint : disable = no - member
not hasattr ( self , 'prior' ) ) :
results = self . search ( { 'organization' } , { u'name' : u'Library' } )
if len ( results ) != 1 :
raise APIResponseError ( u'Could not find the "Library" lifecycle environment for ' u'organization {0}. Search results: {1}' . format ( self . organization , results ) # pylint : disable = E1101
)
self . prior = results [ 0 ]
|
def get_airmass ( self , times = None , solar_position = None , model = 'kastenyoung1989' ) :
"""Calculate the relative and absolute airmass .
Automatically chooses zenith or apparant zenith
depending on the selected model .
Parameters
times : None or DatetimeIndex , default None
Only used if solar _ position is not provided .
solar _ position : None or DataFrame , default None
DataFrame with with columns ' apparent _ zenith ' , ' zenith ' .
model : str , default ' kastenyoung1989'
Relative airmass model
Returns
airmass : DataFrame
Columns are ' airmass _ relative ' , ' airmass _ absolute '"""
|
if solar_position is None :
solar_position = self . get_solarposition ( times )
if model in atmosphere . APPARENT_ZENITH_MODELS :
zenith = solar_position [ 'apparent_zenith' ]
elif model in atmosphere . TRUE_ZENITH_MODELS :
zenith = solar_position [ 'zenith' ]
else :
raise ValueError ( '{} is not a valid airmass model' . format ( model ) )
airmass_relative = atmosphere . get_relative_airmass ( zenith , model )
pressure = atmosphere . alt2pres ( self . altitude )
airmass_absolute = atmosphere . get_absolute_airmass ( airmass_relative , pressure )
airmass = pd . DataFrame ( index = solar_position . index )
airmass [ 'airmass_relative' ] = airmass_relative
airmass [ 'airmass_absolute' ] = airmass_absolute
return airmass
|
def reload ( self ) :
"""Reload the metadata for this instance .
For example :
. . literalinclude : : snippets . py
: start - after : [ START bigtable _ reload _ instance ]
: end - before : [ END bigtable _ reload _ instance ]"""
|
instance_pb = self . _client . instance_admin_client . get_instance ( self . name )
# NOTE : _ update _ from _ pb does not check that the project and
# instance ID on the response match the request .
self . _update_from_pb ( instance_pb )
|
def write_warning ( self , url_data ) :
"""Write url _ data . warning ."""
|
self . write ( self . part ( "warning" ) + self . spaces ( "warning" ) )
warning_msgs = [ u"[%s] %s" % x for x in url_data . warnings ]
self . writeln ( self . wrap ( warning_msgs , 65 ) , color = self . colorwarning )
|
def obj_update ( obj , data : dict , * , update_fields = UNSET , save : bool = True ) -> bool :
"""Fancy way to update ` obj ` with ` data ` dict .
Parameters
obj : Django model instance
data
The data to update ` ` obj ` ` with
update _ fields
Use your ` ` update _ fields ` ` instead of our generated one . If you need
an auto _ now or auto _ now _ add field to get updated , set this to ` ` None ` `
to get the default Django behavior .
save
If save = False , then don ' t actually save . This can be useful if you
just want to utilize the verbose logging .
DEPRECRATED in favor of the more standard ` ` update _ fields = [ ] ` `
Returns
bool
True if data changed"""
|
for field_name , value in data . items ( ) :
set_field ( obj , field_name , value )
dirty_data = getattr ( obj , DIRTY , None )
if not dirty_data :
return False
logger . debug ( human_log_formatter ( dirty_data ) , extra = { 'model' : obj . _meta . object_name , 'pk' : obj . pk , 'changes' : json_log_formatter ( dirty_data ) , } )
if update_fields == UNSET :
update_fields = list ( map ( itemgetter ( 'field_name' ) , dirty_data ) )
if not save :
update_fields = ( )
obj . save ( update_fields = update_fields )
delattr ( obj , DIRTY )
return True
|
def snap_momentum_by_name ( self , name , velocity , at = None ) :
"""Changes the velocity of a momentum named ` name ` .
: param name : the momentum name .
: param velocity : a new velocity .
: param at : the time to snap . ( default : now )
: returns : a momentum updated .
: raises TypeError : ` name ` is ` ` None ` ` .
: raises KeyError : failed to find a momentum named ` name ` ."""
|
at = now_or ( at )
self . forget_past ( at = at )
return self . update_momentum_by_name ( name , velocity = velocity , since = at )
|
def as_binary ( self , content , encoding = 'utf8' ) :
'''Perform content encoding for binary write'''
|
if hasattr ( content , 'read' ) :
return content . read ( )
elif isinstance ( content , six . text_type ) :
return content . encode ( encoding )
else :
return content
|
def hsetnx ( self , key , field , value ) :
"""Set the value of a hash field , only if the field does not exist ."""
|
return self . execute ( b'HSETNX' , key , field , value )
|
def dumper ( args , config , transform_func = None ) :
"""Dumper main function ."""
|
args = process_args ( args )
submit_args = get_submit_args ( args )
submit_outcome = submit_if_ready ( args , submit_args , config )
if submit_outcome is not None : # submitted , nothing more to do
return submit_outcome
import_time = datetime . datetime . utcnow ( )
try :
records = dump2polarion . import_results ( args . input_file , older_than = import_time )
testrun_id = get_testrun_id ( args , config , records . testrun )
exporter = dump2polarion . XunitExport ( testrun_id , records , config , transform_func = transform_func )
output = exporter . export ( )
except NothingToDoException as info :
logger . info ( info )
return 0
except ( EnvironmentError , Dump2PolarionException ) as err :
logger . fatal ( err )
return 1
if args . output_file or args . no_submit : # when no output file is specified , the ' testrun _ TESTRUN _ ID - TIMESTAMP '
# file will be created in current directory
exporter . write_xml ( output , args . output_file )
if not args . no_submit :
response = dump2polarion . submit_and_verify ( output , config = config , ** submit_args )
__ , ext = os . path . splitext ( args . input_file )
if ext . lower ( ) in dbtools . SQLITE_EXT and response :
dbtools . mark_exported_sqlite ( args . input_file , import_time )
return 0 if response else 2
return 0
|
def _lstrip_word ( word , prefix ) :
'''Return a copy of the string after the specified prefix was removed
from the beginning of the string'''
|
if six . text_type ( word ) . startswith ( prefix ) :
return six . text_type ( word ) [ len ( prefix ) : ]
return word
|
def _buildNewKeyname ( self , key , prepend ) :
"""Builds a new keyword based on original keyword name and
a prepend string ."""
|
if len ( prepend + key ) <= 8 :
_new_key = prepend + key
else :
_new_key = str ( prepend + key ) [ : 8 ]
return _new_key
|
def get_group ( value ) :
"""group = display - name " : " [ group - list ] " ; " [ CFWS ]"""
|
group = Group ( )
token , value = get_display_name ( value )
if not value or value [ 0 ] != ':' :
raise errors . HeaderParseError ( "expected ':' at end of group " "display name but found '{}'" . format ( value ) )
group . append ( token )
group . append ( ValueTerminal ( ':' , 'group-display-name-terminator' ) )
value = value [ 1 : ]
if value and value [ 0 ] == ';' :
group . append ( ValueTerminal ( ';' , 'group-terminator' ) )
return group , value [ 1 : ]
token , value = get_group_list ( value )
group . append ( token )
if not value :
group . defects . append ( errors . InvalidHeaderDefect ( "end of header in group" ) )
if value [ 0 ] != ';' :
raise errors . HeaderParseError ( "expected ';' at end of group but found {}" . format ( value ) )
group . append ( ValueTerminal ( ';' , 'group-terminator' ) )
value = value [ 1 : ]
if value and value [ 0 ] in CFWS_LEADER :
token , value = get_cfws ( value )
group . append ( token )
return group , value
|
def identify ( self , token ) :
"""Identifies to the websocket endpoint
Args :
token ( string ) : Discord bot token"""
|
payload = { 'op' : 2 , 'd' : { 'token' : self . token , 'properties' : { '$os' : sys . platform , '$browser' : 'legobot' , '$device' : 'legobot' } , 'compress' : False , 'large_threshold' : 250 } }
payload [ 'd' ] [ 'synced_guilds' ] = [ ]
logger . info ( "Identifying with the following message: \
{}" . format ( payload ) )
self . ws . send ( json . dumps ( payload ) )
return
|
def decode_to_pixbuf ( image_data , width = None , height = None ) :
"""Decode an image from memory with GDK - PixBuf .
The file format is detected automatically .
: param image _ data : A byte string
: param width : Integer width in pixels or None
: param height : Integer height in pixels or None
: returns :
A tuple of a new : class : ` PixBuf ` object
and the name of the detected image format .
: raises :
: exc : ` ImageLoadingError ` if the image data is invalid
or in an unsupported format ."""
|
loader = ffi . gc ( gdk_pixbuf . gdk_pixbuf_loader_new ( ) , gobject . g_object_unref )
error = ffi . new ( 'GError **' )
if width and height :
gdk_pixbuf . gdk_pixbuf_loader_set_size ( loader , width , height )
handle_g_error ( error , gdk_pixbuf . gdk_pixbuf_loader_write ( loader , ffi . new ( 'guchar[]' , image_data ) , len ( image_data ) , error ) )
handle_g_error ( error , gdk_pixbuf . gdk_pixbuf_loader_close ( loader , error ) )
format_ = gdk_pixbuf . gdk_pixbuf_loader_get_format ( loader )
format_name = ( ffi . string ( gdk_pixbuf . gdk_pixbuf_format_get_name ( format_ ) ) . decode ( 'ascii' ) if format_ != ffi . NULL else None )
pixbuf = gdk_pixbuf . gdk_pixbuf_loader_get_pixbuf ( loader )
if pixbuf == ffi . NULL : # pragma : no cover
raise ImageLoadingError ( 'Not enough image data (got a NULL pixbuf.)' )
return Pixbuf ( pixbuf ) , format_name
|
async def parseform ( self , limit = 67108864 , tostr = True , safename = True ) :
'''Parse form - data with multipart / form - data or application / x - www - form - urlencoded
In Python3 , the keys of form and files are unicode , but values are bytes
If the key ends with ' [ ] ' , it is considered to be a list :
a = 1 & b = 2 & b = 3 = > { ' a ' : 1 , ' b ' : 3}
a [ ] = 1 & b [ ] = 2 & b [ ] = 3 = > { ' a ' : [ 1 ] , ' b ' : [ 2,3 ] }
: param limit : limit total input size , default to 64MB . None = no limit . Note that all the form
data is stored in memory ( including upload files ) , so it is dangerous to accept a very large input .
: param tostr : convert values to str in Python3 . Only apply to form , files data are always bytes
: param safename : if True , extra security checks are performed on filenames to reduce known security risks .'''
|
if tostr :
def _str ( s ) :
try :
if not isinstance ( s , str ) :
return s . decode ( self . encoding )
else :
return s
except Exception :
raise HttpInputException ( 'Invalid encoding in post data: ' + repr ( s ) )
else :
def _str ( s ) :
return s
try :
form = { }
files = { }
# If there is not a content - type header , maybe there is not a content .
if b'content-type' in self . headerdict and self . inputstream is not None :
contenttype = self . headerdict [ b'content-type' ]
m = Message ( )
# Email library expects string , which is unicode in Python 3
try :
m . add_header ( 'Content-Type' , str ( contenttype . decode ( 'ascii' ) ) )
except UnicodeDecodeError :
raise HttpInputException ( 'Content-Type has non-ascii characters' )
if m . get_content_type ( ) == 'multipart/form-data' :
fp = BytesFeedParser ( )
fp . feed ( b'Content-Type: ' + contenttype + b'\r\n\r\n' )
total_length = 0
while True :
try :
await self . inputstream . prepareRead ( self . container )
data = self . inputstream . readonce ( )
total_length += len ( data )
if limit is not None and total_length > limit :
raise HttpInputException ( 'Data is too large' )
fp . feed ( data )
except EOFError :
break
msg = fp . close ( )
if not msg . is_multipart ( ) or msg . defects : # Reject the data
raise HttpInputException ( 'Not valid multipart/form-data format' )
for part in msg . get_payload ( ) :
if part . is_multipart ( ) or part . defects :
raise HttpInputException ( 'Not valid multipart/form-data format' )
disposition = part . get_params ( header = 'content-disposition' )
if not disposition :
raise HttpInputException ( 'Not valid multipart/form-data format' )
disposition = dict ( disposition )
if 'form-data' not in disposition or 'name' not in disposition :
raise HttpInputException ( 'Not valid multipart/form-data format' )
if 'filename' in disposition :
name = disposition [ 'name' ]
filename = disposition [ 'filename' ]
if safename :
filename = _safename ( filename )
if name . endswith ( '[]' ) :
files . setdefault ( name [ : - 2 ] , [ ] ) . append ( { 'filename' : filename , 'content' : part . get_payload ( decode = True ) } )
else :
files [ name ] = { 'filename' : filename , 'content' : part . get_payload ( decode = True ) }
else :
name = disposition [ 'name' ]
if name . endswith ( '[]' ) :
form . setdefault ( name [ : - 2 ] , [ ] ) . append ( _str ( part . get_payload ( decode = True ) ) )
else :
form [ name ] = _str ( part . get_payload ( decode = True ) )
elif m . get_content_type ( ) == 'application/x-www-form-urlencoded' or m . get_content_type ( ) == 'application/x-url-encoded' :
if limit is not None :
data = await self . inputstream . read ( self . container , limit + 1 )
if len ( data ) > limit :
raise HttpInputException ( 'Data is too large' )
else :
data = await self . inputstream . read ( self . container )
result = parse_qs ( data , True )
def convert ( k , v ) :
try :
k = str ( k . decode ( 'ascii' ) )
except Exception :
raise HttpInputException ( 'Form-data key must be ASCII' )
if not k . endswith ( '[]' ) :
v = _str ( v [ - 1 ] )
else :
k = k [ : - 2 ]
v = [ _str ( i ) for i in v ]
return ( k , v )
form = dict ( convert ( k , v ) for k , v in result . items ( ) )
else : # Other formats , treat like no data
pass
self . form = form
self . files = files
except Exception as exc :
raise HttpInputException ( 'Failed to parse form-data: ' + str ( exc ) )
|
def send ( self ) :
"""Sends the broadcast message .
: returns : tuple of ( : class : ` adnpy . models . Message ` , : class : ` adnpy . models . APIMeta ` )"""
|
parse_links = self . parse_links or self . parse_markdown_links
message = { 'annotations' : [ ] , 'entities' : { 'parse_links' : parse_links , 'parse_markdown_links' : self . parse_markdown_links , } }
if self . photo :
photo , photo_meta = _upload_file ( self . api , self . photo )
message [ 'annotations' ] . append ( { 'type' : 'net.app.core.oembed' , 'value' : { '+net.app.core.file' : { 'file_id' : photo . id , 'file_token' : photo . file_token , 'format' : 'oembed' , } } } )
if self . attachment :
attachment , attachment_meta = _upload_file ( self . api , self . attachment )
message [ 'annotations' ] . append ( { 'type' : 'net.app.core.attachments' , 'value' : { '+net.app.core.file_list' : [ { 'file_id' : attachment . id , 'file_token' : attachment . file_token , 'format' : 'metadata' , } ] } } )
if self . text :
message [ 'text' ] = self . text
else :
message [ 'machine_only' ] = True
if self . headline :
message [ 'annotations' ] . append ( { 'type' : 'net.app.core.broadcast.message.metadata' , 'value' : { 'subject' : self . headline , } , } )
if self . read_more_link :
message [ 'annotations' ] . append ( { 'type' : 'net.app.core.crosspost' , 'value' : { 'canonical_url' : self . read_more_link , } } )
return self . api . create_message ( self . channel_id , data = message )
|
def calculate ( self , token_list_x , token_list_y ) :
'''Calculate similarity with the so - called Cosine similarity of Tf - Idf vectors .
Concrete method .
Args :
token _ list _ x : [ token , token , token , . . . ]
token _ list _ y : [ token , token , token , . . . ]
Returns :
Similarity .'''
|
if len ( token_list_x ) == 0 or len ( token_list_y ) == 0 :
return 0.0
document_list = token_list_x . copy ( )
[ document_list . append ( v ) for v in token_list_y ]
document_list = list ( set ( document_list ) )
tfidf_vectorizer = TfidfVectorizer ( document_list )
vector_list_x = tfidf_vectorizer . vectorize ( token_list_x )
vector_list_y = tfidf_vectorizer . vectorize ( token_list_y )
if len ( vector_list_x ) > len ( vector_list_y ) :
[ vector_list_y . append ( 0.0 ) for _ in range ( len ( vector_list_x ) - len ( vector_list_y ) ) ]
elif len ( vector_list_y ) > len ( vector_list_x ) :
[ vector_list_x . append ( 0.0 ) for _ in range ( len ( vector_list_y ) - len ( vector_list_x ) ) ]
dot_prod = np . dot ( vector_list_x , vector_list_y )
norm_x = np . linalg . norm ( vector_list_x )
norm_y = np . linalg . norm ( vector_list_y )
try :
result = dot_prod / ( norm_x * norm_y )
if np . isnan ( result ) is True :
return 0.0
else :
return result
except ZeroDivisionError :
return 0.0
|
def RgbToHsl ( r , g , b ) :
'''Convert the color from RGB coordinates to HSL .
Parameters :
The Red component value [ 0 . . . 1]
The Green component value [ 0 . . . 1]
The Blue component value [ 0 . . . 1]
Returns :
The color as an ( h , s , l ) tuple in the range :
h [ 0 . . . 360 ] ,
s [ 0 . . . 1 ] ,
l [ 0 . . . 1]
> > > Color . RgbToHsl ( 1 , 0.5 , 0)
(30.0 , 1.0 , 0.5)'''
|
minVal = min ( r , g , b )
# min RGB value
maxVal = max ( r , g , b )
# max RGB value
l = ( maxVal + minVal ) / 2.0
if minVal == maxVal :
return ( 0.0 , 0.0 , l )
# achromatic ( gray )
d = maxVal - minVal
# delta RGB value
if l < 0.5 :
s = d / ( maxVal + minVal )
else :
s = d / ( 2.0 - maxVal - minVal )
dr , dg , db = [ ( maxVal - val ) / d for val in ( r , g , b ) ]
if r == maxVal :
h = db - dg
elif g == maxVal :
h = 2.0 + dr - db
else :
h = 4.0 + dg - dr
h = ( h * 60.0 ) % 360.0
return ( h , s , l )
|
def address_inline ( request , prefix = "" , country_code = None , template_name = "postal/form.html" ) :
"""Displays postal address with localized fields"""
|
country_prefix = "country"
prefix = request . POST . get ( 'prefix' , prefix )
if prefix :
country_prefix = prefix + '-country'
country_code = request . POST . get ( country_prefix , country_code )
form_class = form_factory ( country_code = country_code )
if request . method == "POST" :
data = { }
for ( key , val ) in request . POST . items ( ) :
if val is not None and len ( val ) > 0 :
data [ key ] = val
data . update ( { country_prefix : country_code } )
form = form_class ( prefix = prefix , initial = data )
else :
form = form_class ( prefix = prefix )
return render_to_string ( template_name , RequestContext ( request , { "form" : form , "prefix" : prefix , } ) )
|
def _extract_table ( table_data , current , pc , ts , tt ) :
"""Use the given table data to create a time series entry for each column in the table .
: param dict table _ data : Table data
: param dict current : LiPD root data
: param str pc : paleoData or chronData
: param list ts : Time series ( so far )
: param bool summary : Summary Table or not
: return list ts : Time series ( so far )"""
|
current [ "tableType" ] = tt
# Get root items for this table
current = _extract_table_root ( table_data , current , pc )
# Add in modelNumber and tableNumber if this is " ens " or " summ " table
current = _extract_table_model ( table_data , current , tt )
# Add age , depth , and year columns to root if available
_table_tmp = _extract_special ( current , table_data )
try : # Start creating entries using dictionary copies .
for _col_name , _col_data in table_data [ "columns" ] . items ( ) : # Add column data onto root items . Copy so we don ' t ruin original data
_col_tmp = _extract_columns ( _col_data , copy . deepcopy ( _table_tmp ) , pc )
try :
ts . append ( _col_tmp )
except Exception as e :
logger_ts . warn ( "extract_table: Unable to create ts entry, {}" . format ( e ) )
except Exception as e :
logger_ts . error ( "extract_table: {}" . format ( e ) )
return ts
|
def _merge_layout_objs ( obj , subobj ) :
"""Merge layout objects recursively
Note : This function mutates the input obj dict , but it does not mutate
the subobj dict
Parameters
obj : dict
dict into which the sub - figure dict will be merged
subobj : dict
dict that sill be copied and merged into ` obj `"""
|
for prop , val in subobj . items ( ) :
if isinstance ( val , dict ) and prop in obj : # recursion
_merge_layout_objs ( obj [ prop ] , val )
elif ( isinstance ( val , list ) and obj . get ( prop , None ) and isinstance ( obj [ prop ] [ 0 ] , dict ) ) : # append
obj [ prop ] . extend ( val )
else : # init / overwrite
obj [ prop ] = copy . deepcopy ( val )
|
def make_thematic_png ( self , outpath = None ) :
"""Convert a thematic map into png format with a legend
: param outpath : if specified , will save the image instead of showing it"""
|
from matplotlib . patches import Patch
fig , previewax = plt . subplots ( )
shape = self . thmap . shape
previewax . imshow ( self . thmap , origin = 'lower' , interpolation = 'nearest' , cmap = self . config . solar_cmap , vmin = - 1 , vmax = len ( self . config . solar_classes ) - 1 )
legend_elements = [ Patch ( facecolor = c , label = sc , edgecolor = 'k' ) for sc , c in self . config . solar_colors . items ( ) ]
previewax . legend ( handles = legend_elements , fontsize = 'x-small' , bbox_to_anchor = ( 0. , 1.02 , 1. , .102 ) , loc = 3 , ncol = 2 , mode = "expand" , borderaxespad = 0. )
previewax . set_xlim ( [ 0 , shape [ 0 ] ] )
previewax . set_ylim ( [ 0 , shape [ 0 ] ] )
previewax . set_aspect ( "equal" )
previewax . set_axis_off ( )
if outpath :
fig . savefig ( outpath , dpi = 300 , transparent = True , bbox_inches = 'tight' , pad_inches = 0. )
plt . close ( )
else :
plt . show ( )
|
def table_row ( text_array , pad = - 1 ) :
"""Return a single table row .
Keyword arguments :
pad - - The pad should be an array of the same size as the input text array .
It will be used to format the row ' s padding .
> > > table _ row ( [ " First column " , " Second " , " Third " ] )
' | First column | Second | Third | '
> > > table _ row ( [ " First column " , " Second " , " Third " ] , [ 10 , 10 , 10 ] )
' | First column | Second | Third | '"""
|
if pad == - 1 :
pad = [ 0 ] * len ( text_array )
row = "|"
for column_number in range ( len ( text_array ) ) :
padding = pad [ column_number ] + 1
row += ( " " + esc_format ( text_array [ column_number ] ) ) . ljust ( padding ) + " |"
return row
|
def max_send_data_size ( self ) :
"""The maximum number of octets that can be send with the
: meth : ` exchange ` method in the established operating mode ."""
|
with self . lock :
if self . device is None :
raise IOError ( errno . ENODEV , os . strerror ( errno . ENODEV ) )
else :
return self . device . get_max_send_data_size ( self . target )
|
def _Close ( self ) :
"""Closes the file - like object .
If the file - like object was passed in the init function
the encoded stream file - like object does not control
the file - like object and should not actually close it ."""
|
if not self . _file_object_set_in_init :
self . _file_object . close ( )
self . _file_object = None
self . _decoder = None
self . _decoded_data = b''
self . _encoded_data = b''
|
def require ( * args , ** kwargs ) :
'''Install a set of packages using pip
This is designed to be an interface for IPython notebooks that
replicates the requirements . txt pip format . This lets notebooks
specify which versions of packages they need inside the notebook
itself .
This function is the general - purpose interface that lets
the caller specify any version string for any package .'''
|
# If called with no arguments , returns requirements list
if not args and not kwargs :
return freeze ( )
# Construct array of requirements
requirements = list ( args )
extra = [ '{}{}' . format ( kw , kwargs [ kw ] ) for kw in kwargs ]
requirements . extend ( extra )
args = [ 'install' , '-q' ]
args . extend ( requirements )
pip . main ( args )
|
def build_props ( self ) :
"""Build the props dictionary ."""
|
props = { }
if self . filters :
props [ "filters" ] = { }
for grp in self . filters :
props [ "filters" ] [ grp ] = [ f . params for f in self . filters [ grp ] ]
if self . charts :
props [ "charts" ] = [ c . params for c in self . charts ]
props [ "type" ] = self . layout
return props
|
def get_if_raw_addr6 ( iff ) :
"""Returns the main global unicast address associated with provided
interface , in network format . If no global address is found , None
is returned ."""
|
# r = filter ( lambda x : x [ 2 ] = = iff and x [ 1 ] = = IPV6 _ ADDR _ GLOBAL , in6 _ getifaddr ( ) )
r = [ x for x in in6_getifaddr ( ) if x [ 2 ] == iff and x [ 1 ] == IPV6_ADDR_GLOBAL ]
if len ( r ) == 0 :
return None
else :
r = r [ 0 ] [ 0 ]
return inet_pton ( socket . AF_INET6 , r )
|
def get_sof_term ( self , C , rup ) :
"""In the case of the upper mantle events separate coefficients
are considered for normal , reverse and strike - slip"""
|
if rup . rake <= - 45.0 and rup . rake >= - 135.0 : # Normal faulting
return C [ "FN_UM" ]
elif rup . rake > 45.0 and rup . rake < 135.0 : # Reverse faulting
return C [ "FRV_UM" ]
else : # No adjustment for strike - slip faulting
return 0.0
|
async def scroll ( self , value , mode = 'relative' ) :
"""Scroll the cursor in the result set to a new position
according to mode . Same as : meth : ` Cursor . scroll ` , but move cursor
on server side one by one row . If you want to move 20 rows forward
scroll will make 20 queries to move cursor . Currently only forward
scrolling is supported .
: param int value : move cursor to next position according to mode .
: param str mode : scroll mode , possible modes : ` relative ` and ` absolute `"""
|
self . _check_executed ( )
if mode == 'relative' :
if value < 0 :
raise NotSupportedError ( "Backwards scrolling not supported " "by this cursor" )
for _ in range ( value ) :
await self . _read_next ( )
self . _rownumber += value
elif mode == 'absolute' :
if value < self . _rownumber :
raise NotSupportedError ( "Backwards scrolling not supported by this cursor" )
end = value - self . _rownumber
for _ in range ( end ) :
await self . _read_next ( )
self . _rownumber = value
else :
raise ProgrammingError ( "unknown scroll mode %s" % mode )
|
def async_new_device_callback ( self , device ) :
"""Log that our new device callback worked ."""
|
_LOGGING . info ( 'New Device: %s cat: 0x%02x subcat: 0x%02x desc: %s, model: %s' , device . id , device . cat , device . subcat , device . description , device . model )
for state in device . states :
device . states [ state ] . register_updates ( self . async_state_change_callback )
|
def get_default_image_build_conf ( self ) :
"""Create a default image build config
: rtype : ConfigParser
: return : Initialized config with defaults"""
|
target = self . koji_target
vcs_info = self . workflow . source . get_vcs_info ( )
ksurl = '{}#{}' . format ( vcs_info . vcs_url , vcs_info . vcs_ref )
base_urls = [ ]
for repo in self . repos :
for url in self . extract_base_url ( repo ) : # Imagefactory only supports $ arch variable .
url = url . replace ( '$basearch' , '$arch' )
base_urls . append ( url )
install_tree = base_urls [ 0 ] if base_urls else ''
repo = ',' . join ( base_urls )
kwargs = { 'target' : target , 'ksurl' : ksurl , 'install_tree' : install_tree , 'repo' : repo , }
config_fp = StringIO ( self . DEFAULT_IMAGE_BUILD_CONF . format ( ** kwargs ) )
config = ConfigParser ( )
config . readfp ( config_fp )
self . update_config_from_dockerfile ( config )
return config
|
def export_project ( project , temporary_dir , include_images = False , keep_compute_id = False , allow_all_nodes = False , ignore_prefixes = None ) :
"""Export the project as zip . It ' s a ZipStream object .
The file will be read chunk by chunk when you iterate on
the zip .
It will ignore some files like snapshots and
: param temporary _ dir : A temporary dir where to store intermediate data
: param keep _ compute _ id : If false replace all compute id by local it ' s the standard behavior for . gns3project to make them portable
: param allow _ all _ nodes : Allow all nodes type to be include in the zip even if not portable default False
: returns : ZipStream object"""
|
# To avoid issue with data not saved we disallow the export of a running topologie
if project . is_running ( ) :
raise aiohttp . web . HTTPConflict ( text = "Running topology could not be exported" )
# Make sure we save the project
project . dump ( )
z = zipstream . ZipFile ( allowZip64 = True )
if not os . path . exists ( project . _path ) :
raise aiohttp . web . HTTPNotFound ( text = "The project doesn't exist at location {}" . format ( project . _path ) )
# First we process the . gns3 in order to be sure we don ' t have an error
for file in os . listdir ( project . _path ) :
if file . endswith ( ".gns3" ) :
images = yield from _export_project_file ( project , os . path . join ( project . _path , file ) , z , include_images , keep_compute_id , allow_all_nodes , temporary_dir )
for root , dirs , files in os . walk ( project . _path , topdown = True ) :
files = [ f for f in files if not _filter_files ( os . path . join ( root , f ) ) ]
for file in files :
path = os . path . join ( root , file )
# Try open the file
try :
open ( path ) . close ( )
except OSError as e :
msg = "Could not export file {}: {}" . format ( path , e )
log . warn ( msg )
project . controller . notification . emit ( "log.warning" , { "message" : msg } )
continue
if file . endswith ( ".gns3" ) :
pass
else :
z . write ( path , os . path . relpath ( path , project . _path ) , compress_type = zipfile . ZIP_DEFLATED )
downloaded_files = set ( )
for compute in project . computes :
if compute . id != "local" :
compute_files = yield from compute . list_files ( project )
for compute_file in compute_files :
if not _filter_files ( compute_file [ "path" ] ) :
( fd , temp_path ) = tempfile . mkstemp ( dir = temporary_dir )
f = open ( fd , "wb" , closefd = True )
response = yield from compute . download_file ( project , compute_file [ "path" ] )
while True :
data = yield from response . content . read ( 512 )
if not data :
break
f . write ( data )
response . close ( )
f . close ( )
z . write ( temp_path , arcname = compute_file [ "path" ] , compress_type = zipfile . ZIP_DEFLATED )
downloaded_files . add ( compute_file [ 'path' ] )
return z
|
def _build_trigram_indices ( trigram_index ) :
"""Build a dictionary of trigrams and their indices from a csv"""
|
result = { }
trigram_count = 0
for key , val in csv . reader ( open ( trigram_index ) ) :
result [ key ] = int ( val )
trigram_count += 1
return result , trigram_count
|
def box ( self , x0 , y0 , width , height ) :
"""Create a box on ASCII canvas .
Args :
x0 ( int ) : x coordinate of the box corner .
y0 ( int ) : y coordinate of the box corner .
width ( int ) : box width .
height ( int ) : box height ."""
|
assert width > 1
assert height > 1
width -= 1
height -= 1
for x in range ( x0 , x0 + width ) :
self . point ( x , y0 , "-" )
self . point ( x , y0 + height , "-" )
for y in range ( y0 , y0 + height ) :
self . point ( x0 , y , "|" )
self . point ( x0 + width , y , "|" )
self . point ( x0 , y0 , "+" )
self . point ( x0 + width , y0 , "+" )
self . point ( x0 , y0 + height , "+" )
self . point ( x0 + width , y0 + height , "+" )
|
def _get_runner ( classpath , main , jvm_options , args , executor , cwd , distribution , create_synthetic_jar , synthetic_jar_dir ) :
"""Gets the java runner for execute _ java and execute _ java _ async ."""
|
executor = executor or SubprocessExecutor ( distribution )
safe_cp = classpath
if create_synthetic_jar :
safe_cp = safe_classpath ( classpath , synthetic_jar_dir )
logger . debug ( 'Bundling classpath {} into {}' . format ( ':' . join ( classpath ) , safe_cp ) )
return executor . runner ( safe_cp , main , args = args , jvm_options = jvm_options , cwd = cwd )
|
def _parse_tmx ( path ) :
"""Generates examples from TMX file ."""
|
def _get_tuv_lang ( tuv ) :
for k , v in tuv . items ( ) :
if k . endswith ( "}lang" ) :
return v
raise AssertionError ( "Language not found in `tuv` attributes." )
def _get_tuv_seg ( tuv ) :
segs = tuv . findall ( "seg" )
assert len ( segs ) == 1 , "Invalid number of segments: %d" % len ( segs )
return segs [ 0 ] . text
with tf . io . gfile . GFile ( path ) as f :
for _ , elem in ElementTree . iterparse ( f ) :
if elem . tag == "tu" :
yield { _get_tuv_lang ( tuv ) : _get_tuv_seg ( tuv ) for tuv in elem . iterfind ( "tuv" ) }
elem . clear ( )
|
def format_v1_score_response ( response , limit_to_model = None ) :
"""The response format looks like this : :
" < rev _ id > " : {
" < model _ name > " : < score >
" < model _ name > " : < score >
" < rev _ id > " : {
" < model _ name > " : < score >
" < model _ name > " : < score >"""
|
response_doc = defaultdict ( dict )
for rev_id , rev_scores in response . scores . items ( ) :
for model_name , score in rev_scores . items ( ) :
response_doc [ rev_id ] [ model_name ] = score
for rev_id , rev_errors in response . errors . items ( ) :
for model_name , error in rev_errors . items ( ) :
response_doc [ rev_id ] [ model_name ] = util . format_error ( error )
if limit_to_model is not None :
return util . jsonify ( { rev_id : model_scores [ limit_to_model ] for rev_id , model_scores in response_doc . items ( ) } )
else :
return util . jsonify ( response_doc )
|
def load ( self , data , many = None , partial = None ) :
"""Deserialize a data structure to an object ."""
|
result = super ( ResumptionTokenSchema , self ) . load ( data , many = many , partial = partial )
result . data . update ( result . data . get ( 'resumptionToken' , { } ) . get ( 'kwargs' , { } ) )
return result
|
def serve ( files , immutable , host , port , debug , reload , cors , sqlite_extensions , inspect_file , metadata , template_dir , plugins_dir , static , memory , config , version_note , help_config , ) :
"""Serve up specified SQLite database files with a web UI"""
|
if help_config :
formatter = formatting . HelpFormatter ( )
with formatter . section ( "Config options" ) :
formatter . write_dl ( [ ( option . name , '{} (default={})' . format ( option . help , option . default ) ) for option in CONFIG_OPTIONS ] )
click . echo ( formatter . getvalue ( ) )
sys . exit ( 0 )
if reload :
import hupper
reloader = hupper . start_reloader ( "datasette.cli.serve" )
reloader . watch_files ( files )
if metadata :
reloader . watch_files ( [ metadata . name ] )
inspect_data = None
if inspect_file :
inspect_data = json . load ( open ( inspect_file ) )
metadata_data = None
if metadata :
metadata_data = json . loads ( metadata . read ( ) )
click . echo ( "Serve! files={} (immutables={}) on port {}" . format ( files , immutable , port ) )
ds = Datasette ( files , immutables = immutable , cache_headers = not debug and not reload , cors = cors , inspect_data = inspect_data , metadata = metadata_data , sqlite_extensions = sqlite_extensions , template_dir = template_dir , plugins_dir = plugins_dir , static_mounts = static , config = dict ( config ) , memory = memory , version_note = version_note , )
# Force initial hashing / table counting
ds . inspect ( )
ds . app ( ) . run ( host = host , port = port , debug = debug )
|
def _get_demand_array_construct ( self ) :
"""Returns a construct for an array of power demand data ."""
|
bus_no = integer . setResultsName ( "bus_no" )
s_rating = real . setResultsName ( "s_rating" )
# MVA
p_direction = real . setResultsName ( "p_direction" )
# p . u .
q_direction = real . setResultsName ( "q_direction" )
# p . u .
p_bid_max = real . setResultsName ( "p_bid_max" )
# p . u .
p_bid_min = real . setResultsName ( "p_bid_min" )
# p . u .
p_optimal_bid = Optional ( real ) . setResultsName ( "p_optimal_bid" )
p_fixed = real . setResultsName ( "p_fixed" )
# $ / hr
p_proportional = real . setResultsName ( "p_proportional" )
# $ / MWh
p_quadratic = real . setResultsName ( "p_quadratic" )
# $ / MW ^ 2h
q_fixed = real . setResultsName ( "q_fixed" )
# $ / hr
q_proportional = real . setResultsName ( "q_proportional" )
# $ / MVArh
q_quadratic = real . setResultsName ( "q_quadratic" )
# $ / MVAr ^ 2h
commitment = boolean . setResultsName ( "commitment" )
cost_tie_break = real . setResultsName ( "cost_tie_break" )
# $ / MWh
cost_cong_up = real . setResultsName ( "cost_cong_up" )
cost_cong_down = real . setResultsName ( "cost_cong_down" )
status = Optional ( boolean ) . setResultsName ( "status" )
demand_data = bus_no + s_rating + p_direction + q_direction + p_bid_max + p_bid_min + p_optimal_bid + p_fixed + p_proportional + p_quadratic + q_fixed + q_proportional + q_quadratic + commitment + cost_tie_break + cost_cong_up + cost_cong_down + status + scolon
demand_data . setParseAction ( self . push_demand )
demand_array = Literal ( "Demand.con" ) + "=" + "[" + "..." + ZeroOrMore ( demand_data + Optional ( "]" + scolon ) )
return demand_array
|
def run_cgmlst ( blast_runner , full = False ) :
"""Perform in silico cgMLST on an input genome
Args :
blast _ runner ( sistr . src . blast _ wrapper . BlastRunner ) : blastn runner object with genome fasta initialized
Returns :
dict : cgMLST ref genome match , distance to closest ref genome , subspecies and serovar predictions
dict : marker allele match results ( seq , allele name , blastn results )"""
|
from sistr . src . serovar_prediction . constants import genomes_to_serovar
df_cgmlst_profiles = ref_cgmlst_profiles ( )
logging . debug ( '{} distinct cgMLST330 profiles' . format ( df_cgmlst_profiles . shape [ 0 ] ) )
logging . info ( 'Running BLAST on serovar predictive cgMLST330 alleles' )
cgmlst_fasta_path = CGMLST_CENTROID_FASTA_PATH if not full else CGMLST_FULL_FASTA_PATH
blast_outfile = blast_runner . blast_against_query ( cgmlst_fasta_path )
logging . info ( 'Reading BLAST output file "{}"' . format ( blast_outfile ) )
blast_reader = BlastReader ( blast_outfile )
if blast_reader . df is None :
logging . error ( 'No cgMLST330 alleles found!' )
return ( { 'distance' : 1.0 , 'genome_match' : None , 'serovar' : None , 'matching_alleles' : 0 , 'subspecies' : None , 'cgmlst330_ST' : None , } , { } , )
logging . info ( 'Found {} cgMLST330 allele BLAST results' . format ( blast_reader . df . shape [ 0 ] ) )
df_cgmlst_blastn = process_cgmlst_results ( blast_reader . df )
marker_match_results = matches_to_marker_results ( df_cgmlst_blastn [ df_cgmlst_blastn . is_match ] )
contig_blastn_records = alleles_to_retrieve ( df_cgmlst_blastn )
retrieved_marker_alleles = get_allele_sequences ( blast_runner . fasta_path , contig_blastn_records , full = full )
logging . info ( 'Type retrieved_marker_alleles %s' , type ( retrieved_marker_alleles ) )
all_marker_results = marker_match_results . copy ( )
for marker , res in retrieved_marker_alleles . items ( ) :
all_marker_results [ marker ] = res
for marker in df_cgmlst_profiles . columns :
if marker not in all_marker_results :
all_marker_results [ marker ] = { 'blast_result' : None , 'name' : None , 'seq' : None , }
cgmlst_results = { }
for marker , res in all_marker_results . items ( ) :
try :
cgmlst_results [ marker ] = int ( res [ 'name' ] )
except :
logging . error ( 'Missing cgmlst_results for %s' , marker )
logging . debug ( res )
logging . info ( 'Calculating number of matching alleles to serovar predictive cgMLST330 profiles' )
df_relatives = find_closest_related_genome ( cgmlst_results , df_cgmlst_profiles )
genome_serovar_dict = genomes_to_serovar ( )
df_relatives [ 'serovar' ] = [ genome_serovar_dict [ genome ] for genome in df_relatives . index ]
logging . debug ( 'Top 5 serovar predictive cgMLST profiles:\n{}' . format ( df_relatives . head ( ) ) )
spp = None
subspeciation_tuple = cgmlst_subspecies_call ( df_relatives )
if subspeciation_tuple is not None :
spp , distance , spp_counter = subspeciation_tuple
logging . info ( 'Top subspecies by cgMLST is "{}" (min dist={}, Counter={})' . format ( spp , distance , spp_counter ) )
else :
logging . warning ( 'Subspeciation by cgMLST was not possible!' )
cgmlst_serovar = None
cgmlst_matching_genome = None
cgmlst_matching_alleles = 0
cgmlst_distance = 1.0
for idx , row in df_relatives . iterrows ( ) :
cgmlst_distance = row [ 'distance' ]
cgmlst_matching_alleles = row [ 'matching' ]
cgmlst_serovar = row [ 'serovar' ] if cgmlst_distance <= 1.0 else None
cgmlst_matching_genome = idx if cgmlst_distance <= 1.0 else None
logging . info ( 'Top serovar by cgMLST profile matching: "{}" with {} matching alleles, distance={:.1%}' . format ( cgmlst_serovar , cgmlst_matching_alleles , cgmlst_distance ) )
break
cgmlst_st = None
cgmlst_markers_sorted = sorted ( all_marker_results . keys ( ) )
cgmlst_allele_names = [ ]
marker = None
for marker in cgmlst_markers_sorted :
try :
aname = all_marker_results [ marker ] [ 'name' ]
if aname :
cgmlst_allele_names . append ( str ( aname ) )
else :
break
except :
break
if len ( cgmlst_allele_names ) == len ( cgmlst_markers_sorted ) :
cgmlst_st = allele_name ( '-' . join ( cgmlst_allele_names ) )
logging . info ( 'cgMLST330 Sequence Type=%s' , cgmlst_st )
else :
logging . warning ( 'Could not compute cgMLST330 Sequence Type due to missing data (marker %s)' , marker )
return ( { 'distance' : cgmlst_distance , 'genome_match' : cgmlst_matching_genome , 'serovar' : cgmlst_serovar , 'matching_alleles' : cgmlst_matching_alleles , 'subspecies' : spp , 'cgmlst330_ST' : cgmlst_st , } , all_marker_results , )
|
def cnst_A1 ( self , X , Xf = None ) :
r"""Compute : math : ` A _ 1 \ mathbf { x } ` component of ADMM problem
constraint . In this case : math : ` A _ 1 \ mathbf { x } = ( \ Gamma _ 0 ^ T \ ; \ ;
\ Gamma _ 1 ^ T \ ; \ ; \ ldots ) ^ T \ mathbf { x } ` ."""
|
if Xf is None :
Xf = sl . rfftn ( X , axes = self . cri . axisN )
return sl . irfftn ( sl . inner ( self . GDf , Xf [ ... , np . newaxis ] , axis = self . cri . axisM ) , self . cri . Nv , self . cri . axisN )
|
def update_trainer ( self , trainer , username = None , start_date = None , has_cheated = None , last_cheated = None , currently_cheats = None , statistics = None , daily_goal = None , total_goal = None , prefered = None ) :
"""Update parts of a trainer in a database"""
|
args = locals ( )
if not isinstance ( trainer , Trainer ) :
raise ValueError
url = api_url + 'trainers/' + str ( trainer . id ) + '/'
payload = { 'last_modified' : maya . now ( ) . iso8601 ( ) }
for i in args :
if args [ i ] is not None and i not in [ 'self' , 'trainer' , 'start_date' ] :
payload [ i ] = args [ i ]
elif args [ i ] is not None and i == 'start_date' :
payload [ i ] = args [ i ] . date ( ) . isoformat ( )
r = requests . patch ( url , data = json . dumps ( payload ) , headers = self . headers )
print ( request_status ( r ) )
r . raise_for_status ( )
return Trainer ( r . json ( ) )
|
def _line_opt ( self ) :
"""Perform a line search along the current direction"""
|
direction = self . search_direction . direction
if self . constraints is not None :
try :
direction = self . constraints . project ( self . x , direction )
except ConstraintError :
self . _screen ( "CONSTRAINT PROJECT FAILED" , newline = True )
return False
direction_norm = np . linalg . norm ( direction )
if direction_norm == 0 :
return False
self . line . configure ( self . x , direction / direction_norm )
success , wolfe , qopt , fopt = self . line_search ( self . line , self . initial_step_size , self . epsilon )
if success :
self . step = qopt * self . line . axis
self . initial_step_size = np . linalg . norm ( self . step )
self . x = self . x + self . step
self . f = fopt
if wolfe :
self . _screen ( "W" )
else :
self . _screen ( " " )
self . search_direction . reset ( )
return True
else :
if self . debug_line :
import matplotlib . pyplot as pt
import datetime
pt . clf ( )
qs = np . arange ( 0.0 , 100.1 ) * ( 5 * self . initial_step_size / 100.0 )
fs = np . array ( [ self . line ( q ) for q in qs ] )
pt . plot ( qs , fs )
pt . xlim ( qs [ 0 ] , qs [ - 1 ] )
fdelta = fs . max ( ) - fs . min ( )
if fdelta == 0.0 :
fdelta = fs . mean ( )
fmargin = fdelta * 0.1
pt . ylim ( fs . min ( ) - fmargin , fs . max ( ) + fmargin )
pt . title ( 'fdelta = %.2e fmean = %.2e' % ( fdelta , fs . mean ( ) ) )
pt . xlabel ( 'Line coordinate, q' )
pt . ylabel ( 'Function value, f' )
pt . savefig ( 'line_failed_%s.png' % ( datetime . datetime . now ( ) . isoformat ( ) ) )
self . _reset_state ( )
return False
|
def _genenare_callmap_sif ( self , filepath ) :
"""Generate a sif file from the call map .
: param filepath : Path of the sif file
: return : None"""
|
with open ( filepath , "wb" ) as f :
for src , dst in self . callgraph . edges ( ) :
f . write ( "%#x\tDirectEdge\t%#x\n" % ( src , dst ) )
|
def list ( self , end_date = values . unset , friendly_name = values . unset , minutes = values . unset , start_date = values . unset , task_channel = values . unset , split_by_wait_time = values . unset , limit = None , page_size = None ) :
"""Lists TaskQueuesStatisticsInstance records from the API as a list .
Unlike stream ( ) , this operation is eager and will load ` limit ` records into
memory before returning .
: param datetime end _ date : Filter cumulative statistics by an end date .
: param unicode friendly _ name : Filter the TaskQueue stats based on a TaskQueue ' s name
: param unicode minutes : Filter cumulative statistics by up to ' x ' minutes in the past .
: param datetime start _ date : Filter cumulative statistics by a start date .
: param unicode task _ channel : Filter real - time and cumulative statistics by TaskChannel .
: param unicode split _ by _ wait _ time : A comma separated values for viewing splits of tasks canceled and accepted above the given threshold in seconds .
: param int limit : Upper limit for the number of records to return . list ( ) guarantees
never to return more than limit . Default is no limit
: param int page _ size : Number of records to fetch per request , when not set will use
the default value of 50 records . If no page _ size is defined
but a limit is defined , list ( ) will attempt to read the limit
with the most efficient page size , i . e . min ( limit , 1000)
: returns : Generator that will yield up to limit results
: rtype : list [ twilio . rest . taskrouter . v1 . workspace . task _ queue . task _ queues _ statistics . TaskQueuesStatisticsInstance ]"""
|
return list ( self . stream ( end_date = end_date , friendly_name = friendly_name , minutes = minutes , start_date = start_date , task_channel = task_channel , split_by_wait_time = split_by_wait_time , limit = limit , page_size = page_size , ) )
|
def js ( self , name = None ) :
"""Returns all needed Javascript filepaths for given config name ( if
given ) or every registred config instead ( if no name is given ) .
Keyword Arguments :
name ( string ) : Specific config name to use instead of all .
Returns :
list : List of Javascript file paths ."""
|
filepaths = copy . copy ( settings . CODEMIRROR_BASE_JS )
configs = self . get_configs ( name )
names = sorted ( configs )
# Addons first
for name in names :
opts = configs [ name ]
for item in opts . get ( 'addons' , [ ] ) :
if item not in filepaths :
filepaths . append ( item )
# Process modes
for name in names :
opts = configs [ name ]
for item in opts [ 'modes' ] :
resolved = self . resolve_mode ( item )
if resolved not in filepaths :
filepaths . append ( resolved )
return filepaths
|
def _get_inference_input ( self , trans_inputs : List [ TranslatorInput ] ) -> Tuple [ mx . nd . NDArray , int , Optional [ lexicon . TopKLexicon ] , List [ Optional [ constrained . RawConstraintList ] ] , List [ Optional [ constrained . RawConstraintList ] ] , mx . nd . NDArray ] :
"""Assembles the numerical data for the batch . This comprises an NDArray for the source sentences ,
the bucket key ( padded source length ) , and a list of raw constraint lists , one for each sentence in the batch ,
an NDArray of maximum output lengths for each sentence in the batch .
Each raw constraint list contains phrases in the form of lists of integers in the target language vocabulary .
: param trans _ inputs : List of TranslatorInputs .
: return NDArray of source ids ( shape = ( batch _ size , bucket _ key , num _ factors ) ) ,
bucket key , lexicon for vocabulary restriction , list of raw constraint
lists , and list of phrases to avoid , and an NDArray of maximum output
lengths ."""
|
batch_size = len ( trans_inputs )
bucket_key = data_io . get_bucket ( max ( len ( inp . tokens ) for inp in trans_inputs ) , self . buckets_source )
source = mx . nd . zeros ( ( batch_size , bucket_key , self . num_source_factors ) , ctx = self . context )
restrict_lexicon = None
# type : Optional [ lexicon . TopKLexicon ]
raw_constraints = [ None ] * batch_size
# type : List [ Optional [ constrained . RawConstraintList ] ]
raw_avoid_list = [ None ] * batch_size
# type : List [ Optional [ constrained . RawConstraintList ] ]
max_output_lengths = [ ]
# type : List [ int ]
for j , trans_input in enumerate ( trans_inputs ) :
num_tokens = len ( trans_input )
max_output_lengths . append ( self . models [ 0 ] . get_max_output_length ( data_io . get_bucket ( num_tokens , self . buckets_source ) ) )
source [ j , : num_tokens , 0 ] = data_io . tokens2ids ( trans_input . tokens , self . source_vocabs [ 0 ] )
factors = trans_input . factors if trans_input . factors is not None else [ ]
num_factors = 1 + len ( factors )
if num_factors != self . num_source_factors :
logger . warning ( "Input %d factors, but model(s) expect %d" , num_factors , self . num_source_factors )
for i , factor in enumerate ( factors [ : self . num_source_factors - 1 ] , start = 1 ) : # fill in as many factors as there are tokens
source [ j , : num_tokens , i ] = data_io . tokens2ids ( factor , self . source_vocabs [ i ] ) [ : num_tokens ]
# Check if vocabulary selection / restriction is enabled :
# - First , see if the translator input provides a lexicon ( used for multiple lexicons )
# - If not , see if the translator itself provides a lexicon ( used for single lexicon )
# - The same lexicon must be used for all inputs in the batch .
if trans_input . restrict_lexicon is not None :
if restrict_lexicon is not None and restrict_lexicon is not trans_input . restrict_lexicon :
logger . warning ( "Sentence %s: different restrict_lexicon specified, will overrule previous. " "All inputs in batch must use same lexicon." % trans_input . sentence_id )
restrict_lexicon = trans_input . restrict_lexicon
elif self . restrict_lexicon is not None :
if isinstance ( self . restrict_lexicon , dict ) : # This code should not be reachable since the case is checked when creating
# translator inputs . It is included here to guarantee that the translator can
# handle any valid input regardless of whether it was checked at creation time .
logger . warning ( "Sentence %s: no restrict_lexicon specified for input when using multiple lexicons, " "defaulting to first lexicon for entire batch." % trans_input . sentence_id )
restrict_lexicon = list ( self . restrict_lexicon . values ( ) ) [ 0 ]
else :
restrict_lexicon = self . restrict_lexicon
if trans_input . constraints is not None :
raw_constraints [ j ] = [ data_io . tokens2ids ( phrase , self . vocab_target ) for phrase in trans_input . constraints ]
if trans_input . avoid_list is not None :
raw_avoid_list [ j ] = [ data_io . tokens2ids ( phrase , self . vocab_target ) for phrase in trans_input . avoid_list ]
if any ( self . unk_id in phrase for phrase in raw_avoid_list [ j ] ) :
logger . warning ( "Sentence %s: %s was found in the list of phrases to avoid; " "this may indicate improper preprocessing." , trans_input . sentence_id , C . UNK_SYMBOL )
return source , bucket_key , restrict_lexicon , raw_constraints , raw_avoid_list , mx . nd . array ( max_output_lengths , ctx = self . context , dtype = 'int32' )
|
def verify_signature ( self , signature ) :
"""Verifies signature"""
|
devices = [ DeviceRegistration . wrap ( device ) for device in self . __get_u2f_devices ( ) ]
challenge = session . pop ( '_u2f_challenge_' )
try :
counter , touch = verify_authenticate ( devices , challenge , signature , self . __facets_list )
except Exception as e :
if self . __call_fail_sign :
self . __call_fail_sign ( e )
return { 'status' : 'failed' , 'error' : 'Invalid signature!' }
finally :
pass
if self . verify_counter ( signature , counter ) :
self . __call_success_sign ( )
self . disable_sign ( )
return { 'status' : 'ok' , 'counter' : counter , 'message' : 'Successfully verified your second factor!' }
else :
if self . __call_fail_sign :
self . __call_fail_sign ( )
return { 'status' : 'failed' , 'error' : 'Device clone detected!' }
|
def errback ( self , failure ) :
"""Errbacks the deferreds previously produced by this object .
@ param failure : The object which will be passed to the
C { errback } method of all C { Deferred } s previously produced by
this object ' s C { tee } method .
@ raise AlreadyCalledError : If L { callback } or L { errback } has
already been called on this object ."""
|
self . _setResult ( failure )
self . _isFailure = True
for d in self . _deferreds :
d . errback ( failure )
|
async def _write_to_container_stdin ( self , write_stream , message ) :
"""Send a message to the stdin of a container , with the right data
: param write _ stream : asyncio write stream to the stdin of the container
: param message : dict to be msgpacked and sent"""
|
msg = msgpack . dumps ( message , encoding = "utf8" , use_bin_type = True )
self . _logger . debug ( "Sending %i bytes to container" , len ( msg ) )
write_stream . write ( struct . pack ( 'I' , len ( msg ) ) )
write_stream . write ( msg )
await write_stream . drain ( )
|
def get_cache_subnet_group ( name , region = None , key = None , keyid = None , profile = None ) :
'''Get information about a cache subnet group .
CLI example : :
salt myminion boto _ elasticache . get _ cache _ subnet _ group mycache _ subnet _ group'''
|
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
try :
csg = conn . describe_cache_subnet_groups ( name )
csg = csg [ 'DescribeCacheSubnetGroupsResponse' ]
csg = csg [ 'DescribeCacheSubnetGroupsResult' ] [ 'CacheSubnetGroups' ] [ 0 ]
except boto . exception . BotoServerError as e :
msg = 'Failed to get cache subnet group {0}.' . format ( name )
log . error ( msg )
log . debug ( e )
return False
except ( IndexError , TypeError , KeyError ) :
msg = 'Failed to get cache subnet group {0} (2).' . format ( name )
log . error ( msg )
return False
ret = { }
for key , val in six . iteritems ( csg ) :
if key == 'CacheSubnetGroupName' :
ret [ 'cache_subnet_group_name' ] = val
elif key == 'CacheSubnetGroupDescription' :
ret [ 'cache_subnet_group_description' ] = val
elif key == 'VpcId' :
ret [ 'vpc_id' ] = val
elif key == 'Subnets' :
ret [ 'subnets' ] = [ ]
for subnet in val :
_subnet = { }
_subnet [ 'subnet_id' ] = subnet [ 'SubnetIdentifier' ]
_az = subnet [ 'SubnetAvailabilityZone' ] [ 'Name' ]
_subnet [ 'subnet_availability_zone' ] = _az
ret [ 'subnets' ] . append ( _subnet )
else :
ret [ key ] = val
return ret
|
def verification_add ( self , domain_resource_id , port , is_ssl ) :
"""Sends a POST to / 1.0 / verifications / using this post - data :
{ " domain _ href " : " / 1.0 / domains / 2 " ,
" port " : 80,
" ssl " : false }
: param domain _ resource _ id : The domain id to verify
: param port : The TCP port
: param is _ ssl : Boolean indicating if we should use ssl
: return : The newly created resource"""
|
data = { "domain_href" : self . build_api_path ( 'domains' , domain_resource_id ) , "port" : port , "ssl" : 'true' if is_ssl else 'false' }
url = self . build_full_url ( self . VERIFICATIONS )
return self . create_resource ( url , data )
|
def _print_lines ( self , lines , start , breaks = ( ) , frame = None ) :
"""Print a range of lines ."""
|
if frame :
current_lineno = frame . f_lineno
exc_lineno = self . tb_lineno . get ( frame , - 1 )
else :
current_lineno = exc_lineno = - 1
for lineno , line in enumerate ( lines , start ) :
s = str ( lineno ) . rjust ( 3 )
if len ( s ) < 4 :
s += ' '
if lineno in breaks :
s += 'B'
else :
s += ' '
if lineno == current_lineno :
s += '->'
elif lineno == exc_lineno :
s += '>>'
self . message ( s + '\t' + line . rstrip ( ) )
|
def dict_to_path ( as_dict ) :
"""Turn a pure dict into a dict containing entity objects that
can be sent directly to a Path constructor .
Parameters
as _ dict : dict
Has keys : ' vertices ' , ' entities '
Returns
kwargs : dict
Has keys : ' vertices ' , ' entities '"""
|
# start kwargs with initial value
result = as_dict . copy ( )
# map of constructors
loaders = { 'Arc' : Arc , 'Line' : Line }
# pre - allocate entity array
entities = [ None ] * len ( as_dict [ 'entities' ] )
# run constructor for dict kwargs
for entity_index , entity in enumerate ( as_dict [ 'entities' ] ) :
entities [ entity_index ] = loaders [ entity [ 'type' ] ] ( points = entity [ 'points' ] , closed = entity [ 'closed' ] )
result [ 'entities' ] = entities
return result
|
def request ( self , path , args = [ ] , files = [ ] , opts = { } , stream = False , decoder = None , headers = { } , data = None ) :
"""Makes an HTTP request to the IPFS daemon .
This function returns the contents of the HTTP response from the IPFS
daemon .
Raises
~ ipfsapi . exceptions . ErrorResponse
~ ipfsapi . exceptions . ConnectionError
~ ipfsapi . exceptions . ProtocolError
~ ipfsapi . exceptions . StatusError
~ ipfsapi . exceptions . TimeoutError
Parameters
path : str
The REST command path to send
args : list
Positional parameters to be sent along with the HTTP request
files : : class : ` io . RawIOBase ` | : obj : ` str ` | : obj : ` list `
The file object ( s ) or path ( s ) to stream to the daemon
opts : dict
Query string paramters to be sent along with the HTTP request
decoder : str
The encoder to use to parse the HTTP response
kwargs : dict
Additional arguments to pass to : mod : ` requests `"""
|
url = self . base + path
params = [ ]
params . append ( ( 'stream-channels' , 'true' ) )
for opt in opts . items ( ) :
params . append ( opt )
for arg in args :
params . append ( ( 'arg' , arg ) )
method = 'post' if ( files or data ) else 'get'
parser = encoding . get_encoding ( decoder if decoder else "none" )
return self . _request ( method , url , params , parser , stream , files , headers , data )
|
def process ( self , items_block ) :
"""Return items as they come , updating their metadata _ _ enriched _ on field .
: param items _ block :
: return : hits blocks as they come , updating their metadata _ _ enriched _ on field . Namedtuple containing :
- processed : number of processed hits
- out _ items : a list containing items ready to be written ."""
|
out_items = [ ]
for hit in items_block :
if __name__ == '__main__' :
hit [ '_source' ] [ 'metadata__enriched_on' ] = datetime . datetime_utcnow ( ) . isoformat ( )
out_items . append ( hit )
return self . ProcessResults ( processed = 0 , out_items = out_items )
|
def read_data ( self , variable_instance ) :
"""read values from the device"""
|
if self . inst is None :
return
if variable_instance . visavariable . device_property . upper ( ) == 'PRESENT_VALUE' :
return self . parse_value ( self . inst . query ( '?U6P0' ) )
elif variable_instance . visavariable . device_property . upper ( ) == 'PRESENT_VALUE_MANUAL_C_FREQ' :
freq = VariableProperty . objects . get_property ( variable = variable_instance , name = 'VISA:FREQ' )
if freq is None :
freq = 500
return self . parse_value ( self . inst . query ( '?MAM1SR9HT3ST2SM%dE' % freq ) )
return None
|
def fetcher ( date = datetime . today ( ) , url_pattern = URL_PATTERN ) :
"""Fetch json data from n . pl
Args :
date ( date ) - default today
url _ patter ( string ) - default URL _ PATTERN
Returns :
dict - data from api"""
|
api_url = url_pattern % date . strftime ( '%Y-%m-%d' )
headers = { 'Referer' : 'http://n.pl/program-tv' }
raw_result = requests . get ( api_url , headers = headers ) . json ( )
return raw_result
|
def get_shape ( self ) :
"""Return a tuple of this array ' s dimensions . This is done by
querying the Dim children . Note that once it has been
created , it is also possible to examine an Array object ' s
. array attribute directly , and doing that is much faster ."""
|
return tuple ( int ( c . pcdata ) for c in self . getElementsByTagName ( ligolw . Dim . tagName ) ) [ : : - 1 ]
|
def _cholesky ( self , A , ** kwargs ) :
"""method to handle potential problems with the cholesky decomposition .
will try to increase L2 regularization of the penalty matrix to
do away with non - positive - definite errors
Parameters
A : np . array
Returns
np . array"""
|
# create appropriate - size diagonal matrix
if sp . sparse . issparse ( A ) :
diag = sp . sparse . eye ( A . shape [ 0 ] )
else :
diag = np . eye ( A . shape [ 0 ] )
constraint_l2 = self . _constraint_l2
while constraint_l2 <= self . _constraint_l2_max :
try :
L = cholesky ( A , ** kwargs )
self . _constraint_l2 = constraint_l2
return L
except NotPositiveDefiniteError :
if self . verbose :
warnings . warn ( 'Matrix is not positive definite. \n' 'Increasing l2 reg by factor of 10.' , stacklevel = 2 )
A -= constraint_l2 * diag
constraint_l2 *= 10
A += constraint_l2 * diag
raise NotPositiveDefiniteError ( 'Matrix is not positive \n' 'definite.' )
|
def clearSelection ( self ) :
"""Clears the selected text for this edit ."""
|
first = None
editors = self . editors ( )
for editor in editors :
if not editor . selectedText ( ) :
continue
first = first or editor
editor . backspace ( )
for editor in editors :
editor . setFocus ( )
if first :
first . setFocus ( )
|
def create_plane ( width = 1 , height = 1 , width_segments = 1 , height_segments = 1 , direction = '+z' ) :
"""Generate vertices & indices for a filled and outlined plane .
Parameters
width : float
Plane width .
height : float
Plane height .
width _ segments : int
Plane segments count along the width .
height _ segments : float
Plane segments count along the height .
direction : unicode
` ` { ' - x ' , ' + x ' , ' - y ' , ' + y ' , ' - z ' , ' + z ' } ` `
Direction the plane will be facing .
Returns
vertices : array
Array of vertices suitable for use as a VertexBuffer .
faces : array
Indices to use to produce a filled plane .
outline : array
Indices to use to produce an outline of the plane .
References
. . [ 1 ] Cabello , R . ( n . d . ) . PlaneBufferGeometry . js . Retrieved May 12 , 2015,
from http : / / git . io / vU1Fh"""
|
x_grid = width_segments
y_grid = height_segments
x_grid1 = x_grid + 1
y_grid1 = y_grid + 1
# Positions , normals and texcoords .
positions = np . zeros ( x_grid1 * y_grid1 * 3 )
normals = np . zeros ( x_grid1 * y_grid1 * 3 )
texcoords = np . zeros ( x_grid1 * y_grid1 * 2 )
y = np . arange ( y_grid1 ) * height / y_grid - height / 2
x = np . arange ( x_grid1 ) * width / x_grid - width / 2
positions [ : : 3 ] = np . tile ( x , y_grid1 )
positions [ 1 : : 3 ] = - np . repeat ( y , x_grid1 )
normals [ 2 : : 3 ] = 1
texcoords [ : : 2 ] = np . tile ( np . arange ( x_grid1 ) / x_grid , y_grid1 )
texcoords [ 1 : : 2 ] = np . repeat ( 1 - np . arange ( y_grid1 ) / y_grid , x_grid1 )
# Faces and outline .
faces , outline = [ ] , [ ]
for i_y in range ( y_grid ) :
for i_x in range ( x_grid ) :
a = i_x + x_grid1 * i_y
b = i_x + x_grid1 * ( i_y + 1 )
c = ( i_x + 1 ) + x_grid1 * ( i_y + 1 )
d = ( i_x + 1 ) + x_grid1 * i_y
faces . extend ( ( ( a , b , d ) , ( b , c , d ) ) )
outline . extend ( ( ( a , b ) , ( b , c ) , ( c , d ) , ( d , a ) ) )
positions = np . reshape ( positions , ( - 1 , 3 ) )
texcoords = np . reshape ( texcoords , ( - 1 , 2 ) )
normals = np . reshape ( normals , ( - 1 , 3 ) )
faces = np . reshape ( faces , ( - 1 , 3 ) ) . astype ( np . uint32 )
outline = np . reshape ( outline , ( - 1 , 2 ) ) . astype ( np . uint32 )
direction = direction . lower ( )
if direction in ( '-x' , '+x' ) :
shift , neutral_axis = 1 , 0
elif direction in ( '-y' , '+y' ) :
shift , neutral_axis = - 1 , 1
elif direction in ( '-z' , '+z' ) :
shift , neutral_axis = 0 , 2
sign = - 1 if '-' in direction else 1
positions = np . roll ( positions , shift , - 1 )
normals = np . roll ( normals , shift , - 1 ) * sign
colors = np . ravel ( positions )
colors = np . hstack ( ( np . reshape ( np . interp ( colors , ( np . min ( colors ) , np . max ( colors ) ) , ( 0 , 1 ) ) , positions . shape ) , np . ones ( ( positions . shape [ 0 ] , 1 ) ) ) )
colors [ ... , neutral_axis ] = 0
vertices = np . zeros ( positions . shape [ 0 ] , [ ( 'position' , np . float32 , 3 ) , ( 'texcoord' , np . float32 , 2 ) , ( 'normal' , np . float32 , 3 ) , ( 'color' , np . float32 , 4 ) ] )
vertices [ 'position' ] = positions
vertices [ 'texcoord' ] = texcoords
vertices [ 'normal' ] = normals
vertices [ 'color' ] = colors
return vertices , faces , outline
|
def gpg_version ( sp = subprocess ) :
"""Get a keygrip of the primary GPG key of the specified user ."""
|
args = gpg_command ( [ '--version' ] )
output = check_output ( args = args , sp = sp )
line = output . split ( b'\n' ) [ 0 ]
# b ' gpg ( GnuPG ) 2.1.11'
line = line . split ( b' ' ) [ - 1 ]
# b ' 2.1.11'
line = line . split ( b'-' ) [ 0 ]
# remove trailing version parts
return line . split ( b'v' ) [ - 1 ]
|
def set_url ( self ) :
'''打开豆瓣网页'''
|
import webbrowser
url = "http://music.douban.com" + self . data . playingsong [ 'album' ] . replace ( '\/' , '/' )
webbrowser . open ( url )
|
def write_assoc ( self , assoc ) :
"""Write a single association to a line in the output file"""
|
if assoc . get ( "header" , False ) :
return
subj = assoc [ 'subject' ]
db , db_object_id = self . _split_prefix ( subj )
rel = assoc [ 'relation' ]
qualifier = rel [ 'id' ]
if assoc [ 'negated' ] :
qualifier = 'NOT|' + qualifier
goid = assoc [ 'object' ] [ 'id' ]
ev = assoc [ 'evidence' ]
evidence = self . ecomap . coderef_to_ecoclass ( ev [ 'type' ] )
withfrom = "|" . join ( ev [ 'with_support_from' ] )
reference = "|" . join ( ev [ 'has_supporting_reference' ] )
date = assoc [ 'date' ]
assigned_by = assoc [ 'provided_by' ]
annotation_properties = ''
# TODO
interacting_taxon_id = assoc [ 'interacting_taxon' ]
vals = [ db , db_object_id , qualifier , goid , reference , evidence , withfrom , interacting_taxon_id , # TODO
date , assigned_by , self . _extension_expression ( assoc [ 'object_extensions' ] ) , annotation_properties ]
self . _write_row ( vals )
|
def xnormpath ( path ) :
"""Cross - platform version of os . path . normpath"""
|
# replace escapes and Windows slashes
normalized = posixpath . normpath ( path ) . replace ( b'\\' , b'/' )
# fold the result
return posixpath . normpath ( normalized )
|
def claim ( self , owner , access ) :
"""Claim the lock ( lock must be available )"""
|
debuglog ( "%s claim(%s, %s)" % ( self , owner , access . mode ) )
assert owner is not None
assert self . isAvailable ( owner , access ) , "ask for isAvailable() first"
assert isinstance ( access , LockAccess )
assert access . mode in [ 'counting' , 'exclusive' ]
self . waiting = [ w for w in self . waiting if w [ 0 ] is not owner ]
self . _addOwner ( owner , access )
debuglog ( " %s is claimed '%s'" % ( self , access . mode ) )
|
def infer_devices ( devices = None ) :
"""Returns the list of devices that multi - replica code should use .
: param devices : list of string device names , e . g . [ " / GPU : 0 " ]
If the user specifies this , ` infer _ devices ` checks that it is
valid , and then uses this user - specified list .
If the user does not specify this , infer _ devices uses :
- All available GPUs , if there are any
- CPU otherwise"""
|
if devices is None :
devices = get_available_gpus ( )
if len ( devices ) == 0 :
warnings . warn ( "No GPUS, running on CPU" )
# Set device to empy string , tf will figure out whether to use
# XLA or not , etc . , automatically
devices = [ "" ]
else :
assert len ( devices ) > 0
for device in devices :
assert isinstance ( device , six . string_types ) , type ( device )
return devices
|
def walk ( self , head = None ) :
"""Do a breadth - first walk of the graph , yielding on each node ,
starting at ` head ` ."""
|
head = head or self . _root_node
queue = [ ]
queue . insert ( 0 , head )
while queue :
node = queue . pop ( )
yield node . num , node . previous , node . siblings
for child in node . siblings :
if child in self . _graph :
queue . insert ( 0 , self . _graph [ child ] )
|
def _parse_reports_by_type ( self ) :
"""Returns a data dictionary
Goes through logs and parses them based on ' No errors found ' , VERBOSE or SUMMARY type ."""
|
data = dict ( )
for file_meta in self . find_log_files ( 'picard/sam_file_validation' , filehandles = True ) :
sample = file_meta [ 's_name' ]
if sample in data :
log . debug ( "Duplicate sample name found! Overwriting: {}" . format ( sample ) )
filehandle = file_meta [ 'f' ]
first_line = filehandle . readline ( ) . rstrip ( )
filehandle . seek ( 0 )
# Rewind reading of the file
if 'No errors found' in first_line :
sample_data = _parse_no_error_report ( )
elif first_line . startswith ( 'ERROR' ) or first_line . startswith ( 'WARNING' ) :
sample_data = _parse_verbose_report ( filehandle )
else :
sample_data = _parse_summary_report ( filehandle )
data [ sample ] = sample_data
return data
|
def AddSerializedFile ( self , serialized_file_desc_proto ) :
"""Adds the FileDescriptorProto and its types to this pool .
Args :
serialized _ file _ desc _ proto : A bytes string , serialization of the
FileDescriptorProto to add ."""
|
# pylint : disable = g - import - not - at - top
from google . protobuf import descriptor_pb2
file_desc_proto = descriptor_pb2 . FileDescriptorProto . FromString ( serialized_file_desc_proto )
self . Add ( file_desc_proto )
|
def unique_everseen ( iterable , filterfalse_ = itertools . filterfalse ) :
"""Unique elements , preserving order ."""
|
# Itertools recipes :
# https : / / docs . python . org / 3 / library / itertools . html # itertools - recipes
seen = set ( )
seen_add = seen . add
for element in filterfalse_ ( seen . __contains__ , iterable ) :
seen_add ( element )
yield element
|
def load_model_data ( self , path , model ) :
"""Loads the data for the specified model from the given path ."""
|
if os . path . isdir ( path ) : # try find a model data collection
if os . path . isfile ( os . path . join ( path , '_all.yml' ) ) :
self . load_model_data_collection ( path , model )
self . load_model_data_from_files ( path , model )
self . session . commit ( )
|
def shelf_from_config ( config , ** default_init ) :
"""Get a ` Shelf ` instance dynamically based on config .
` config ` is a dictionary containing ` ` shelf _ * ` ` keys as defined in
: mod : ` birding . config ` ."""
|
shelf_cls = import_name ( config [ 'shelf_class' ] , default_ns = 'birding.shelf' )
init = { }
init . update ( default_init )
init . update ( config [ 'shelf_init' ] )
shelf = shelf_cls ( ** init )
if hasattr ( shelf , 'set_expiration' ) and 'shelf_expiration' in config :
shelf . set_expiration ( config [ 'shelf_expiration' ] )
return shelf
|
def build_rrule ( count = None , interval = None , bysecond = None , byminute = None , byhour = None , byweekno = None , bymonthday = None , byyearday = None , bymonth = None , until = None , bysetpos = None , wkst = None , byday = None , freq = None ) :
"""Build rrule dictionary for vRecur class .
: param count : int
: param interval : int
: param bysecond : int
: param byminute : int
: param byhour : int
: param byweekno : int
: param bymonthday : int
: param byyearday : int
: param bymonth : int
: param until : datetime
: param bysetpos : int
: param wkst : str , two - letter weekday
: param byday : weekday
: param freq : str , frequency name ( ' WEEK ' , ' MONTH ' , etc )
: return : dict"""
|
result = { }
if count is not None :
result [ 'COUNT' ] = count
if interval is not None :
result [ 'INTERVAL' ] = interval
if bysecond is not None :
result [ 'BYSECOND' ] = bysecond
if byminute is not None :
result [ 'BYMINUTE' ] = byminute
if byhour is not None :
result [ 'BYHOUR' ] = byhour
if byweekno is not None :
result [ 'BYWEEKNO' ] = byweekno
if bymonthday is not None :
result [ 'BYMONTHDAY' ] = bymonthday
if byyearday is not None :
result [ 'BYYEARDAY' ] = byyearday
if bymonth is not None :
result [ 'BYMONTH' ] = bymonth
if until is not None :
result [ 'UNTIL' ] = until
if bysetpos is not None :
result [ 'BYSETPOS' ] = bysetpos
if wkst is not None :
result [ 'WKST' ] = wkst
if byday is not None :
result [ 'BYDAY' ] = byday
if freq is not None :
if freq not in vRecur . frequencies :
raise ValueError ( 'Frequency value should be one of: {0}' . format ( vRecur . frequencies ) )
result [ 'FREQ' ] = freq
return result
|
def discover_by_directory ( self , start_directory , top_level_directory = None , pattern = 'test*.py' ) :
"""Run test discovery in a directory .
Parameters
start _ directory : str
The package directory in which to start test discovery .
top _ level _ directory : str
The path to the top - level directoy of the project . This is
the parent directory of the project ' stop - level Python
package .
pattern : str
The glob pattern to match the filenames of modules to search
for tests ."""
|
start_directory = os . path . abspath ( start_directory )
if top_level_directory is None :
top_level_directory = find_top_level_directory ( start_directory )
logger . debug ( 'Discovering tests in directory: start_directory=%r, ' 'top_level_directory=%r, pattern=%r' , start_directory , top_level_directory , pattern )
assert_start_importable ( top_level_directory , start_directory )
if top_level_directory not in sys . path :
sys . path . insert ( 0 , top_level_directory )
tests = self . _discover_tests ( start_directory , top_level_directory , pattern )
return self . _loader . create_suite ( list ( tests ) )
|
def _restore_clipboard_text ( self , backup : str ) :
"""Restore the clipboard content ."""
|
# Pasting takes some time , so wait a bit before restoring the content . Otherwise the restore is done before
# the pasting happens , causing the backup to be pasted instead of the desired clipboard content .
time . sleep ( 0.2 )
self . clipboard . text = backup if backup is not None else ""
|
def _calc_eta ( self ) :
"""Calculates estimated time left until completion ."""
|
elapsed = self . _elapsed ( )
if self . cnt == 0 or elapsed < 0.001 :
return None
rate = float ( self . cnt ) / elapsed
self . eta = ( float ( self . max_iter ) - float ( self . cnt ) ) / rate
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.