signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def toggle_grid ( self , evt = None , show = None ) :
"toggle grid display" | if show is None :
show = not self . conf . show_grid
self . conf . enable_grid ( show ) |
def offload_all_service_containers ( self , service ) :
"""Deletes all containers related to the service .""" | def anonymous ( anonymous_service ) :
if not isinstance ( anonymous_service , Service ) :
raise TypeError ( "service must be an instance of Service." )
containers = self . find_service_containers ( anonymous_service )
if containers :
logger . info ( "Deleting service: {0} containers." . format ( anonymous_service . name ) )
for container in six . itervalues ( containers ) :
container . delete ( )
self . _service_map ( service , anonymous , descending = True ) |
def cleanup ( self ) :
"""Cleans up jobs on the remote run location . The job ids are read from the submission file
which has to exist for obvious reasons .""" | task = self . task
# get job ids from submission data
job_ids = [ d [ "job_id" ] for d in self . submission_data . jobs . values ( ) if d [ "job_id" ] not in ( self . submission_data . dummy_job_id , None ) ]
if not job_ids :
return
# cleanup jobs
task . publish_message ( "going to cleanup {} jobs" . format ( len ( job_ids ) ) )
errors = self . job_manager . cleanup_batch ( job_ids )
# print errors
if errors :
print ( "{} error(s) occured while cleaning up {} job(s) of task {}:" . format ( len ( errors ) , len ( job_ids ) , task . task_id ) )
tmpl = " {}"
for i , err in enumerate ( errors ) :
print ( tmpl . format ( err ) )
if i + 1 >= self . show_errors :
remaining = len ( errors ) - self . show_errors
if remaining > 0 :
print ( " ... and {} more" . format ( remaining ) )
break |
def _dumpOnlineConf ( self , format ) :
"""dump element configuration json string for online modeling ,
in which control configuration should be overwritten simulation conf ,
e . g . in simuinfo : { ' k1 ' : 10 , ' l ' : 1 } , ctrlinfo : { ' k1 ' : pv _ name , . . . }
the k1 value should be replaced by pv _ name , and from which
the value read into , take note the simuinfo and ctrlinfo
would not be changed .""" | oinfod = { k : v for k , v in self . simuinfo . items ( ) }
for k in ( set ( oinfod . keys ( ) ) & set ( self . ctrlkeys ) ) :
oinfod [ k ] = self . ctrlinfo [ k ]
return { self . name . upper ( ) : { self . typename : oinfod } } |
def currentRecord ( self ) :
"""Returns the current record based on the active index from the model .
: return < orb . Table > | | None""" | completion = nativestring ( self . _pywidget . text ( ) )
options = map ( str , self . model ( ) . stringList ( ) )
try :
index = options . index ( completion )
except ValueError :
return None
return self . _records [ index ] |
def pie_plot ( df , value = 'value' , category = 'variable' , ax = None , legend = False , title = True , cmap = None , ** kwargs ) :
"""Plot data as a bar chart .
Parameters
df : pd . DataFrame
Data to plot as a long - form data frame
value : string , optional
The column to use for data values
default : value
category : string , optional
The column to use for labels
default : variable
ax : matplotlib . Axes , optional
legend : bool , optional
Include a legend
default : False
title : bool or string , optional
Display a default or custom title .
cmap : string , optional
A colormap to use .
default : None
kwargs : Additional arguments to pass to the pd . DataFrame . plot ( ) function""" | for col in set ( SORT_IDX ) - set ( [ category ] ) :
if len ( df [ col ] . unique ( ) ) > 1 :
msg = 'Can not plot multiple {}s in pie_plot with value={},' + ' category={}'
raise ValueError ( msg . format ( col , value , category ) )
if ax is None :
fig , ax = plt . subplots ( )
# get data , set negative values to explode
_df = df . groupby ( category ) [ value ] . sum ( )
where = _df > 0
explode = tuple ( 0 if _ else 0.2 for _ in where )
_df = _df . abs ( )
# explicitly get colors
defaults = default_props ( reset = True , num_colors = len ( _df . index ) , colormap = cmap ) [ 'color' ]
rc = run_control ( )
color = [ ]
for key , c in zip ( _df . index , defaults ) :
if 'color' in rc and category in rc [ 'color' ] and key in rc [ 'color' ] [ category ] :
c = rc [ 'color' ] [ category ] [ key ]
color . append ( c )
# plot data
_df . plot ( kind = 'pie' , colors = color , ax = ax , explode = explode , ** kwargs )
# add legend
ax . legend ( loc = 'center left' , bbox_to_anchor = ( 1.0 , 0.5 ) , labels = _df . index )
if not legend :
ax . legend_ . remove ( )
# remove label
ax . set_ylabel ( '' )
return ax |
def add_section ( self , section ) :
"""Create a new section in the configuration .
Raise DuplicateSectionError if a section by the specified name
already exists . Raise ValueError if name is DEFAULT .""" | if section == self . default_section :
raise ValueError ( 'Invalid section name: %r' % section )
if section in self . _sections :
raise DuplicateSectionError ( section )
self . _sections [ section ] = self . _dict ( )
self . _proxies [ section ] = SectionProxy ( self , section ) |
def update ( self , ** kwargs ) :
"""Update the Account resource with specified content .
Args :
name ( str ) : Human - readable name for the account
Returns : the updated Account object .""" | return self . __class__ ( self . resource . update ( kwargs ) , self . client , wallet = self . wallet ) |
def show_image ( kwargs , call = None ) :
"""Show the details of an image""" | if call != 'function' :
raise SaltCloudSystemExit ( 'The show_image action must be called with -f or --function.' )
name = kwargs [ 'image' ]
log . info ( "Showing image %s" , name )
machine = vb_get_machine ( name )
ret = { machine [ "name" ] : treat_machine_dict ( machine ) }
del machine [ "name" ]
return ret |
def getPeer ( self , url ) :
"""Finds a peer by URL and return the first peer record with that URL .""" | peers = list ( models . Peer . select ( ) . where ( models . Peer . url == url ) )
if len ( peers ) == 0 :
raise exceptions . PeerNotFoundException ( url )
return peers [ 0 ] |
def skip_class_parameters ( ) :
"""Can be used with : meth : ` add _ parametric _ object _ params ` , this removes
duplicate variables cluttering the sphinx docs .
This is only intended to be used with * sphinx autodoc *
In your * sphinx * ` ` config . py ` ` file : :
from cqparts . utils . sphinx import skip _ class _ parameters
def setup ( app ) :
app . connect ( " autodoc - skip - member " , skip _ class _ parameters ( ) )""" | from . . params import Parameter
def callback ( app , what , name , obj , skip , options ) :
if ( what == 'class' ) and isinstance ( obj , Parameter ) :
return True
# yes , skip this object
return None
return callback |
def retrieveVals ( self ) :
"""Retrieve values for graphs .""" | apacheInfo = ApacheInfo ( self . _host , self . _port , self . _user , self . _password , self . _statuspath , self . _ssl )
stats = apacheInfo . getServerStats ( )
if self . hasGraph ( 'apache_access' ) :
self . setGraphVal ( 'apache_access' , 'reqs' , stats [ 'Total Accesses' ] )
if self . hasGraph ( 'apache_bytes' ) :
self . setGraphVal ( 'apache_bytes' , 'bytes' , stats [ 'Total kBytes' ] * 1000 )
if self . hasGraph ( 'apache_workers' ) :
self . setGraphVal ( 'apache_workers' , 'busy' , stats [ 'BusyWorkers' ] )
self . setGraphVal ( 'apache_workers' , 'idle' , stats [ 'IdleWorkers' ] )
self . setGraphVal ( 'apache_workers' , 'max' , stats [ 'MaxWorkers' ] ) |
def libvlc_video_set_spu_delay ( p_mi , i_delay ) :
'''Set the subtitle delay . This affects the timing of when the subtitle will
be displayed . Positive values result in subtitles being displayed later ,
while negative values will result in subtitles being displayed earlier .
The subtitle delay will be reset to zero each time the media changes .
@ param p _ mi : media player .
@ param i _ delay : time ( in microseconds ) the display of subtitles should be delayed .
@ return : 0 on success , - 1 on error .
@ version : LibVLC 2.0.0 or later .''' | f = _Cfunctions . get ( 'libvlc_video_set_spu_delay' , None ) or _Cfunction ( 'libvlc_video_set_spu_delay' , ( ( 1 , ) , ( 1 , ) , ) , None , ctypes . c_int , MediaPlayer , ctypes . c_int64 )
return f ( p_mi , i_delay ) |
def _compute_acq ( self , x ) :
"""Integrated GP - Lower Confidence Bound""" | means , stds = self . model . predict ( x )
f_acqu = 0
for m , s in zip ( means , stds ) :
f_acqu += - m + self . exploration_weight * s
return f_acqu / ( len ( means ) ) |
def weather_from_dictionary ( d ) :
"""Builds a * Weather * object out of a data dictionary . Only certain
properties of the dictionary are used : if these properties are not
found or cannot be read , an error is issued .
: param d : a data dictionary
: type d : dict
: returns : a * Weather * instance
: raises : * KeyError * if it is impossible to find or read the data
needed to build the instance""" | # - - times
if 'dt' in d :
reference_time = d [ 'dt' ]
elif 'dt' in d [ 'last' ] :
reference_time = d [ 'last' ] [ 'dt' ]
if 'sys' in d and 'sunset' in d [ 'sys' ] :
sunset_time = d [ 'sys' ] [ 'sunset' ]
else :
sunset_time = 0
if 'sys' in d and 'sunrise' in d [ 'sys' ] :
sunrise_time = d [ 'sys' ] [ 'sunrise' ]
else :
sunrise_time = 0
# - - calc
if 'calc' in d :
if 'dewpoint' in d [ 'calc' ] :
dewpoint = d [ 'calc' ] [ 'dewpoint' ]
else :
dewpoint = None
if 'humidex' in d [ 'calc' ] :
humidex = d [ 'calc' ] [ 'humidex' ]
else :
humidex = None
if 'heatindex' in d [ 'calc' ] :
heat_index = d [ 'calc' ] [ 'heatindex' ]
else :
heat_index = None
elif 'last' in d :
if 'calc' in d [ 'last' ] :
if 'dewpoint' in d [ 'last' ] [ 'calc' ] :
dewpoint = d [ 'last' ] [ 'calc' ] [ 'dewpoint' ]
else :
dewpoint = None
if 'humidex' in d [ 'last' ] [ 'calc' ] :
humidex = d [ 'last' ] [ 'calc' ] [ 'humidex' ]
else :
humidex = None
if 'heatindex' in d [ 'last' ] [ 'calc' ] :
heat_index = d [ 'last' ] [ 'calc' ] [ 'heatindex' ]
else :
heat_index = None
else :
dewpoint = None
humidex = None
heat_index = None
# - - visibility
if 'visibility' in d :
if isinstance ( d [ 'visibility' ] , int ) :
visibility_distance = d [ 'visibility' ]
elif 'distance' in d [ 'visibility' ] :
visibility_distance = d [ 'visibility' ] [ 'distance' ]
else :
visibility_distance = None
elif 'last' in d and 'visibility' in d [ 'last' ] :
if isinstance ( d [ 'last' ] [ 'visibility' ] , int ) :
visibility_distance = d [ 'last' ] [ 'visibility' ]
elif 'distance' in d [ 'last' ] [ 'visibility' ] :
visibility_distance = d [ 'last' ] [ 'visibility' ] [ 'distance' ]
else :
visibility_distance = None
else :
visibility_distance = None
# - - clouds
if 'clouds' in d :
if isinstance ( d [ 'clouds' ] , int ) or isinstance ( d [ 'clouds' ] , float ) :
clouds = d [ 'clouds' ]
elif 'all' in d [ 'clouds' ] :
clouds = d [ 'clouds' ] [ 'all' ]
else :
clouds = 0
else :
clouds = 0
# - - rain
if 'rain' in d :
if isinstance ( d [ 'rain' ] , int ) or isinstance ( d [ 'rain' ] , float ) :
rain = { 'all' : d [ 'rain' ] }
else :
if d [ 'rain' ] is not None :
rain = d [ 'rain' ] . copy ( )
else :
rain = dict ( )
else :
rain = dict ( )
# - - wind
if 'wind' in d and d [ 'wind' ] is not None :
wind = d [ 'wind' ] . copy ( )
elif 'last' in d :
if 'wind' in d [ 'last' ] and d [ 'last' ] [ 'wind' ] is not None :
wind = d [ 'last' ] [ 'wind' ] . copy ( )
else :
wind = dict ( )
else :
wind = dict ( )
if 'speed' in d :
wind [ 'speed' ] = d [ 'speed' ]
if 'deg' in d :
wind [ 'deg' ] = d [ 'deg' ]
# - - humidity
if 'humidity' in d :
humidity = d [ 'humidity' ]
elif 'main' in d and 'humidity' in d [ 'main' ] :
humidity = d [ 'main' ] [ 'humidity' ]
elif 'last' in d and 'main' in d [ 'last' ] and 'humidity' in d [ 'last' ] [ 'main' ] :
humidity = d [ 'last' ] [ 'main' ] [ 'humidity' ]
else :
humidity = 0
# - - snow
if 'snow' in d :
if isinstance ( d [ 'snow' ] , int ) or isinstance ( d [ 'snow' ] , float ) :
snow = { 'all' : d [ 'snow' ] }
else :
if d [ 'snow' ] is not None :
snow = d [ 'snow' ] . copy ( )
else :
snow = dict ( )
else :
snow = dict ( )
# - - pressure
if 'pressure' in d :
atm_press = d [ 'pressure' ]
elif 'main' in d and 'pressure' in d [ 'main' ] :
atm_press = d [ 'main' ] [ 'pressure' ]
elif 'last' in d :
if 'main' in d [ 'last' ] :
atm_press = d [ 'last' ] [ 'main' ] [ 'pressure' ]
else :
atm_press = None
if 'main' in d and 'sea_level' in d [ 'main' ] :
sea_level_press = d [ 'main' ] [ 'sea_level' ]
else :
sea_level_press = None
pressure = { 'press' : atm_press , 'sea_level' : sea_level_press }
# - - temperature
if 'temp' in d :
if d [ 'temp' ] is not None :
temperature = d [ 'temp' ] . copy ( )
else :
temperature = dict ( )
elif 'main' in d and 'temp' in d [ 'main' ] :
temp = d [ 'main' ] [ 'temp' ]
if 'temp_kf' in d [ 'main' ] :
temp_kf = d [ 'main' ] [ 'temp_kf' ]
else :
temp_kf = None
if 'temp_max' in d [ 'main' ] :
temp_max = d [ 'main' ] [ 'temp_max' ]
else :
temp_max = None
if 'temp_min' in d [ 'main' ] :
temp_min = d [ 'main' ] [ 'temp_min' ]
else :
temp_min = None
temperature = { 'temp' : temp , 'temp_kf' : temp_kf , 'temp_max' : temp_max , 'temp_min' : temp_min }
elif 'last' in d :
if 'main' in d [ 'last' ] :
temperature = dict ( temp = d [ 'last' ] [ 'main' ] [ 'temp' ] )
else :
temperature = dict ( )
# - - weather status info
if 'weather' in d :
status = d [ 'weather' ] [ 0 ] [ 'main' ]
detailed_status = d [ 'weather' ] [ 0 ] [ 'description' ]
weather_code = d [ 'weather' ] [ 0 ] [ 'id' ]
weather_icon_name = d [ 'weather' ] [ 0 ] [ 'icon' ]
else :
status = ''
detailed_status = ''
weather_code = 0
weather_icon_name = ''
return Weather ( reference_time , sunset_time , sunrise_time , clouds , rain , snow , wind , humidity , pressure , temperature , status , detailed_status , weather_code , weather_icon_name , visibility_distance , dewpoint , humidex , heat_index ) |
def name ( self ) :
"""The descriptive device name as advertised by the kernel
and / or the hardware itself .
To get the sysname for this device , use : attr : ` sysname ` .
Returns :
str : The device name .""" | pchar = self . _libinput . libinput_device_get_name ( self . _handle )
return string_at ( pchar ) . decode ( ) |
def _valcache_lookup ( self , cache , branch , turn , tick ) :
"""Return the value at the given time in ` ` cache ` `""" | if branch in cache :
branc = cache [ branch ]
try :
if turn in branc and branc [ turn ] . rev_gettable ( tick ) :
return branc [ turn ] [ tick ]
elif branc . rev_gettable ( turn - 1 ) :
turnd = branc [ turn - 1 ]
return turnd [ turnd . end ]
except HistoryError as ex : # probably shouldn ' t ever happen , empty branches shouldn ' t be kept in the cache at all . . .
# but it ' s easy to handle
if ex . deleted :
raise
for b , r , t in self . db . _iter_parent_btt ( branch , turn , tick ) :
if b in cache :
if r in cache [ b ] and cache [ b ] [ r ] . rev_gettable ( t ) :
try :
return cache [ b ] [ r ] [ t ]
except HistoryError as ex :
if ex . deleted :
raise
elif cache [ b ] . rev_gettable ( r - 1 ) :
cbr = cache [ b ] [ r - 1 ]
try :
return cbr [ cbr . end ]
except HistoryError as ex :
if ex . deleted :
raise |
def del_option ( self , section , option ) :
"""Deletes an option if the section and option exist""" | if self . config . has_section ( section ) :
if self . config . has_option ( section , option ) :
self . config . remove_option ( section , option )
return ( True , self . config . options ( section ) )
return ( False , 'Option: ' + option + ' does not exist' )
return ( False , 'Section: ' + section + ' does not exist' ) |
def _load ( self , ** kwargs ) :
"""Must check if rule actually exists before proceeding with load .""" | if self . _check_existence_by_collection ( self . _meta_data [ 'container' ] , kwargs [ 'name' ] ) :
return super ( Rules , self ) . _load ( ** kwargs )
msg = 'The rule named, {}, does not exist on the device.' . format ( kwargs [ 'name' ] )
raise NonExtantPolicyRule ( msg ) |
def put_skeleton_files_on_disk ( metadata_type , where , github_template = None , params = { } ) :
"""Generates file based on jinja2 templates""" | api_name = params [ "api_name" ]
file_name = github_template [ "file_name" ]
template_source = config . connection . get_plugin_client_setting ( 'mm_template_source' , 'joeferraro/MavensMate-Templates/master' )
template_location = config . connection . get_plugin_client_setting ( 'mm_template_location' , 'remote' )
try :
if template_location == 'remote' :
if 'linux' in sys . platform :
template_body = os . popen ( "wget https://raw.githubusercontent.com/{0}/{1}/{2} -q -O -" . format ( template_source , metadata_type , file_name ) ) . read ( )
else :
template_body = urllib2 . urlopen ( "https://raw.githubusercontent.com/{0}/{1}/{2}" . format ( template_source , metadata_type , file_name ) ) . read ( )
else :
template_body = get_file_as_string ( os . path . join ( template_source , metadata_type , file_name ) )
except :
template_body = get_file_as_string ( os . path . join ( config . base_path , config . support_dir , "templates" , "github-local" , metadata_type , file_name ) )
template = env . from_string ( template_body )
file_body = template . render ( params )
metadata_type = get_meta_type_by_name ( metadata_type )
os . makedirs ( "{0}/{1}" . format ( where , metadata_type [ 'directoryName' ] ) )
f = open ( "{0}/{1}/{2}" . format ( where , metadata_type [ 'directoryName' ] , api_name + "." + metadata_type [ 'suffix' ] ) , 'w' )
f . write ( file_body )
f . close ( )
template = env . get_template ( 'meta.html' )
file_body = template . render ( api_name = api_name , sfdc_api_version = SFDC_API_VERSION , meta_type = metadata_type [ 'xmlName' ] )
f = open ( "{0}/{1}/{2}" . format ( where , metadata_type [ 'directoryName' ] , api_name + "." + metadata_type [ 'suffix' ] ) + "-meta.xml" , 'w' )
f . write ( file_body )
f . close ( ) |
def trees_by_path ( self , path ) :
"""Search trees by ` path ` .
Args :
path ( str ) : : attr : ` . Tree . path ` property of : class : ` . Tree ` .
Returns :
set : Set of matching : class : ` Tree ` instances .""" | return set ( self . path_db . get ( path , OOSet ( ) ) . keys ( ) ) |
def rsdl_s ( self , Yprev , Y ) :
"""Compute dual residual vector .""" | return self . rho * self . cnst_AT ( self . U ) |
def _list_nodes_full ( location = None ) :
'''Return a list of the VMs that in this location''' | provider = __active_provider_name__ or 'ec2'
if ':' in provider :
comps = provider . split ( ':' )
provider = comps [ 0 ]
params = { 'Action' : 'DescribeInstances' }
instances = aws . query ( params , location = location , provider = provider , opts = __opts__ , sigver = '4' )
if 'error' in instances :
raise SaltCloudSystemExit ( 'An error occurred while listing nodes: {0}' . format ( instances [ 'error' ] [ 'Errors' ] [ 'Error' ] [ 'Message' ] ) )
ret = _extract_instance_info ( instances )
__utils__ [ 'cloud.cache_node_list' ] ( ret , provider , __opts__ )
return ret |
def _evaluate ( self , R , z , phi = 0. , t = 0. , dR = 0 , dphi = 0 ) :
"""NAME :
_ evaluate
PURPOSE :
evaluate the potential at ( R , z )
INPUT :
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
potential at ( R , z )
HISTORY :
2010-04-16 - Written - Bovy ( NYU )
2012-12-26 - New method using Gaussian quadrature between zeros - Bovy ( IAS )
DOCTEST :
> > > doubleExpPot = DoubleExponentialDiskPotential ( )
> > > r = doubleExpPot ( 1 . , 0 ) # doctest : + ELLIPSIS
> > > assert ( r + 1.89595350484 ) * * 2 . < 10 . * * - 6.""" | if True :
if isinstance ( R , float ) :
floatIn = True
R = nu . array ( [ R ] )
z = nu . array ( [ z ] )
else :
floatIn = False
out = nu . empty ( len ( R ) )
indx = ( R <= 6. )
out [ True ^ indx ] = self . _kp ( R [ True ^ indx ] , z [ True ^ indx ] )
R4max = nu . copy ( R )
R4max [ ( R < 1. ) ] = 1.
kmax = self . _kmaxFac * self . _beta
for jj in range ( len ( R ) ) :
if not indx [ jj ] :
continue
maxj0zeroIndx = nu . argmin ( ( self . _j0zeros - kmax * R4max [ jj ] ) ** 2. )
# close enough
ks = nu . array ( [ 0.5 * ( self . _glx + 1. ) * self . _dj0zeros [ ii + 1 ] + self . _j0zeros [ ii ] for ii in range ( maxj0zeroIndx ) ] ) . flatten ( )
weights = nu . array ( [ self . _glw * self . _dj0zeros [ ii + 1 ] for ii in range ( maxj0zeroIndx ) ] ) . flatten ( )
evalInt = special . jn ( 0 , ks * R [ jj ] ) * ( self . _alpha ** 2. + ks ** 2. ) ** - 1.5 * ( self . _beta * nu . exp ( - ks * nu . fabs ( z [ jj ] ) ) - ks * nu . exp ( - self . _beta * nu . fabs ( z [ jj ] ) ) ) / ( self . _beta ** 2. - ks ** 2. )
out [ jj ] = - 2. * nu . pi * self . _alpha * nu . sum ( weights * evalInt )
if floatIn :
return out [ 0 ]
else :
return out |
def count ( self ) :
"Return a count of rows this Query would return ." | return self . rpc_model . search_count ( self . domain , context = self . context ) |
def _get_files ( path ) :
"""Returns the list of files contained in path ( recursively ) .""" | ret_val = [ ]
for root , _ , files in os . walk ( path ) :
for f in files :
ret_val . append ( os . path . join ( root , f ) )
return ret_val |
def evaluate_precision_recall ( self , dataset , cutoffs = list ( range ( 1 , 11 , 1 ) ) + list ( range ( 11 , 50 , 5 ) ) , skip_set = None , exclude_known = True , verbose = True , ** kwargs ) :
"""Compute a model ' s precision and recall scores for a particular dataset .
Parameters
dataset : SFrame
An SFrame in the same format as the one used during training .
This will be compared to the model ' s recommendations , which exclude
the ( user , item ) pairs seen at training time .
cutoffs : list , optional
A list of cutoff values for which one wants to evaluate precision
and recall , i . e . the value of k in " precision at k " .
skip _ set : SFrame , optional
Passed to : meth : ` recommend ` as ` ` exclude ` ` .
exclude _ known : bool , optional
Passed to : meth : ` recommend ` as ` ` exclude _ known ` ` . If True , exclude
training item from recommendation .
verbose : bool , optional
Enables verbose output . Default is verbose .
* * kwargs
Additional keyword arguments are passed to the recommend
function , whose returned recommendations are used for evaluating
precision and recall of the model .
Returns
out : dict
Contains the precision and recall at each cutoff value and each
user in ` ` dataset ` ` .
Examples
> > > import turicreate as tc
> > > sf = tc . SFrame ( ' https : / / static . turi . com / datasets / audioscrobbler ' )
> > > train , test = tc . recommender . util . random _ split _ by _ user ( sf )
> > > m = tc . recommender . create ( train )
> > > m . evaluate _ precision _ recall ( test )
See Also
turicreate . recommender . util . precision _ recall _ by _ user""" | user_column = self . user_id
item_column = self . item_id
assert user_column in dataset . column_names ( ) and item_column in dataset . column_names ( ) , 'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
dataset = self . __prepare_dataset_parameter ( dataset )
users = dataset [ self . user_id ] . unique ( )
dataset = dataset [ [ self . user_id , self . item_id ] ]
recs = self . recommend ( users = users , k = max ( cutoffs ) , exclude = skip_set , exclude_known = exclude_known , verbose = verbose , ** kwargs )
precision_recall_by_user = self . __proxy__ . precision_recall_by_user ( dataset , recs , cutoffs )
ret = { 'precision_recall_by_user' : precision_recall_by_user }
pr_agg = precision_recall_by_user . groupby ( 'cutoff' , operations = { 'precision' : _Aggregate . MEAN ( 'precision' ) , 'recall' : _Aggregate . MEAN ( 'recall' ) } )
pr_agg = pr_agg [ [ 'cutoff' , 'precision' , 'recall' ] ]
ret [ "precision_recall_overall" ] = pr_agg . sort ( "cutoff" )
return ret |
def BuildLegacySubject ( subject_id , approval_type ) :
"""Builds a legacy AFF4 urn string for a given subject and approval type .""" | at = rdf_objects . ApprovalRequest . ApprovalType
if approval_type == at . APPROVAL_TYPE_CLIENT :
return "aff4:/%s" % subject_id
elif approval_type == at . APPROVAL_TYPE_HUNT :
return "aff4:/hunts/%s" % subject_id
elif approval_type == at . APPROVAL_TYPE_CRON_JOB :
return "aff4:/cron/%s" % subject_id
raise ValueError ( "Invalid approval type." ) |
def convert_to_namespace ( file , output , keyword ) :
"""Convert an annotation file to a namespace file .""" | resource = parse_bel_resource ( file )
write_namespace ( namespace_keyword = ( keyword or resource [ 'AnnotationDefinition' ] [ 'Keyword' ] ) , namespace_name = resource [ 'AnnotationDefinition' ] [ 'Keyword' ] , namespace_description = resource [ 'AnnotationDefinition' ] [ 'DescriptionString' ] , author_name = 'Charles Tapley Hoyt' , namespace_domain = NAMESPACE_DOMAIN_OTHER , values = resource [ 'Values' ] , citation_name = resource [ 'Citation' ] [ 'NameString' ] , file = output ) |
def face_locations ( img , number_of_times_to_upsample = 1 , model = "hog" ) :
"""Returns an array of bounding boxes of human faces in a image
: param img : An image ( as a numpy array )
: param number _ of _ times _ to _ upsample : How many times to upsample the image looking for faces . Higher numbers find smaller faces .
: param model : Which face detection model to use . " hog " is less accurate but faster on CPUs . " cnn " is a more accurate
deep - learning model which is GPU / CUDA accelerated ( if available ) . The default is " hog " .
: return : A list of tuples of found face locations in css ( top , right , bottom , left ) order""" | if model == "cnn" :
return [ _trim_css_to_bounds ( _rect_to_css ( face . rect ) , img . shape ) for face in _raw_face_locations ( img , number_of_times_to_upsample , "cnn" ) ]
else :
return [ _trim_css_to_bounds ( _rect_to_css ( face ) , img . shape ) for face in _raw_face_locations ( img , number_of_times_to_upsample , model ) ] |
def current_timestamp ( ) :
"""Returns current time as ISO8601 formatted string in the Zulu TZ""" | now = datetime . utcnow ( )
timestamp = now . isoformat ( ) [ 0 : 19 ] + 'Z'
debug ( "generated timestamp: {now}" . format ( now = timestamp ) )
return timestamp |
def strip ( self , text ) :
'''Return string with markup tags removed .''' | tags , results = [ ] , [ ]
return self . re_tag . sub ( lambda m : self . clear_tag ( m , tags , results ) , text ) |
def make_query ( args , other = None , limit = None , strand = None , featuretype = None , extra = None , order_by = None , reverse = False , completely_within = False ) :
"""Multi - purpose , bare - bones ORM function .
This function composes queries given some commonly - used kwargs that can be
passed to FeatureDB methods ( like . parents ( ) , . children ( ) , . all _ features ( ) ,
. features _ of _ type ( ) ) . It handles , in one place , things like restricting to
featuretype , limiting to a genomic range , limiting to one strand , or
returning results ordered by different criteria .
Additional filtering / subsetting / sorting behavior should be added here .
( Note : this ended up having better performance ( and flexibility ) than
sqlalchemy )
This function also provides support for additional JOINs etc ( supplied via
the ` other ` kwarg ) and extra conditional clauses ( ` extra ` kwarg ) . See the
` _ QUERY ` var below for the order in which they are used .
For example , FeatureDB . _ relation uses ` other ` to supply the JOIN
substatment , and that same method also uses ` extra ` to supply the
" relations . level = ? " substatment ( see the source for FeatureDB . _ relation
for more details ) .
` args ` contains the arguments that will ultimately be supplied to the
sqlite3 . connection . execute function . It may be further populated below - -
for example , if strand = " + " , then the query will include a strand clause ,
and the strand will be appended to the args .
` args ` can be pre - filled with args that are passed to ` other ` and ` extra ` .""" | _QUERY = ( "{_SELECT} {OTHER} {EXTRA} {FEATURETYPE} " "{LIMIT} {STRAND} {ORDER_BY}" )
# Construct a dictionary ` d ` that will be used later as _ QUERY . format ( * * d ) .
# Default is just _ SELECT , which returns all records in the features table .
# ( Recall that constants . _ SELECT gets the fields in the order needed to
# reconstruct a Feature )
d = dict ( _SELECT = constants . _SELECT , OTHER = "" , FEATURETYPE = "" , LIMIT = "" , STRAND = "" , ORDER_BY = "" , EXTRA = "" )
if other :
d [ 'OTHER' ] = other
if extra :
d [ 'EXTRA' ] = extra
# If ` other ` and ` extra ` take args ( that is , they have " ? " in them ) , then
# they should have been provided in ` args ` .
required_args = ( d [ 'EXTRA' ] + d [ 'OTHER' ] ) . count ( '?' )
if len ( args ) != required_args :
raise ValueError ( 'Not enough args (%s) for subquery' % args )
# Below , if a kwarg is specified , then we create sections of the query - -
# appending to args as necessary .
# IMPORTANT : the order in which things are processed here is the same as
# the order of the placeholders in _ QUERY . That is , we need to build the
# args in parallel with the query to avoid putting the wrong args in the
# wrong place .
if featuretype : # Handle single or iterables of featuretypes .
# e . g . , " featuretype = ' exon ' "
# or , " featuretype IN ( ' exon ' , ' CDS ' ) "
if isinstance ( featuretype , six . string_types ) :
d [ 'FEATURETYPE' ] = "features.featuretype = ?"
args . append ( featuretype )
else :
d [ 'FEATURETYPE' ] = ( "features.featuretype IN (%s)" % ( ',' . join ( [ "?" for _ in featuretype ] ) ) )
args . extend ( featuretype )
if limit : # Restrict to a genomic region . Makes use of the UCSC binning strategy
# for performance .
# ` limit ` is a string or a tuple of ( chrom , start , stop )
# e . g . , " seqid = ' chr2L ' AND start > 1000 AND end < 5000"
if isinstance ( limit , six . string_types ) :
seqid , startstop = limit . split ( ':' )
start , end = startstop . split ( '-' )
else :
seqid , start , end = limit
# Identify possible bins
_bins = bins . bins ( int ( start ) , int ( end ) , one = False )
# Use different overlap conditions
if completely_within :
d [ 'LIMIT' ] = ( "features.seqid = ? AND features.start >= ? " "AND features.end <= ?" )
args . extend ( [ seqid , start , end ] )
else :
d [ 'LIMIT' ] = ( "features.seqid = ? AND features.start <= ? " "AND features.end >= ?" )
# Note order ( end , start )
args . extend ( [ seqid , end , start ] )
# Add bin clause . See issue # 45.
if len ( _bins ) < 900 :
d [ 'LIMIT' ] += " AND features.bin IN (%s)" % ( ',' . join ( map ( str , _bins ) ) )
if strand : # e . g . , " strand = ' + ' "
d [ 'STRAND' ] = "features.strand = ?"
args . append ( strand )
# TODO : implement file _ order !
valid_order_by = constants . _gffkeys_extra + [ 'file_order' , 'length' ]
_order_by = [ ]
if order_by : # Default is essentially random order .
# e . g . " ORDER BY seqid , start DESC "
if isinstance ( order_by , six . string_types ) :
_order_by . append ( order_by )
else :
for k in order_by :
if k not in valid_order_by :
raise ValueError ( "%s not a valid order-by value in %s" % ( k , valid_order_by ) )
# There ' s no length field , so order by end - start
if k == 'length' :
k = '(end - start)'
_order_by . append ( k )
_order_by = ',' . join ( _order_by )
if reverse :
direction = 'DESC'
else :
direction = 'ASC'
d [ 'ORDER_BY' ] = 'ORDER BY %s %s' % ( _order_by , direction )
# Ensure only one " WHERE " is included ; the rest get " AND " . This is ugly .
where = False
if "where" in d [ 'OTHER' ] . lower ( ) :
where = True
for i in [ 'EXTRA' , 'FEATURETYPE' , 'LIMIT' , 'STRAND' ] :
if d [ i ] :
if not where :
d [ i ] = "WHERE " + d [ i ]
where = True
else :
d [ i ] = "AND " + d [ i ]
return _QUERY . format ( ** d ) , args |
def message ( self , data ) :
"""Sends a message to the framework scheduler .
These messages are best effort ; do not expect a framework message to be
retransmitted in any reliable fashion .""" | logging . info ( 'Driver sends framework message {}' . format ( data ) )
return self . driver . sendFrameworkMessage ( data ) |
def rsky_distribution ( self , rmax = None , smooth = 0.1 , nbins = 100 ) :
"""Distribution of projected separations
Returns a : class : ` simpledists . Hist _ Distribution ` object .
: param rmax : ( optional )
Maximum radius to calculate distribution .
: param dr : ( optional )
Bin width for histogram
: param smooth : ( optional )
Smoothing parameter for : class : ` simpledists . Hist _ Distribution `
: param nbins : ( optional )
Number of bins for histogram
: return :
: class : ` simpledists . Hist _ Distribution ` describing Rsky distribution""" | if rmax is None :
if hasattr ( self , 'maxrad' ) :
rmax = self . maxrad
else :
rmax = np . percentile ( self . Rsky , 99 )
dist = dists . Hist_Distribution ( self . Rsky . value , bins = nbins , maxval = rmax , smooth = smooth )
return dist |
def _unpickle_panel_compat ( self , state ) : # pragma : no cover
"""Unpickle the panel .""" | from pandas . io . pickle import _unpickle_array
_unpickle = _unpickle_array
vals , items , major , minor = state
items = _unpickle ( items )
major = _unpickle ( major )
minor = _unpickle ( minor )
values = _unpickle ( vals )
wp = Panel ( values , items , major , minor )
self . _data = wp . _data |
def _vertically_size_cells ( rendered_rows ) :
"""Grow row heights to cater for vertically spanned cells that do not
fit in the available space .""" | for r , rendered_row in enumerate ( rendered_rows ) :
for rendered_cell in rendered_row :
if rendered_cell . rowspan > 1 :
row_height = sum ( row . height for row in rendered_rows [ r : r + rendered_cell . rowspan ] )
extra_height_needed = rendered_cell . height - row_height
if extra_height_needed > 0 :
padding = extra_height_needed / rendered_cell . rowspan
for i in range ( r , r + rendered_cell . rowspan ) :
rendered_rows [ i ] . height += padding
return rendered_rows |
def set_pool_quota ( service , pool_name , max_bytes = None , max_objects = None ) :
""": param service : The Ceph user name to run the command under
: type service : str
: param pool _ name : Name of pool
: type pool _ name : str
: param max _ bytes : Maximum bytes quota to apply
: type max _ bytes : int
: param max _ objects : Maximum objects quota to apply
: type max _ objects : int
: raises : subprocess . CalledProcessError""" | cmd = [ 'ceph' , '--id' , service , 'osd' , 'pool' , 'set-quota' , pool_name ]
if max_bytes :
cmd = cmd + [ 'max_bytes' , str ( max_bytes ) ]
if max_objects :
cmd = cmd + [ 'max_objects' , str ( max_objects ) ]
check_call ( cmd ) |
def get_attribute_value ( self , tag_name , attribute , format_value = False , ** attribute_filter ) :
"""Return the attribute value in xml files which matches the tag name and the specific attribute
: param str tag _ name : specify the tag name
: param str attribute : specify the attribute
: param bool format _ value : specify if the value needs to be formatted with packagename""" | for value in self . get_all_attribute_value ( tag_name , attribute , format_value , ** attribute_filter ) :
if value is not None :
return value |
def transcript_to_fake_psl_line ( self , ref ) :
"""Convert a mapping to a fake PSL line
: param ref : reference genome dictionary
: type ref : dict ( )
: return : psl line
: rtype : string""" | self . _initialize ( )
e = self
mylen = 0
matches = 0
qstartslist = [ ]
for exon in self . exons :
mylen = exon . rng . length ( )
matches += mylen
qstartslist . append ( matches - mylen )
qstarts = ',' . join ( [ str ( x ) for x in qstartslist ] ) + ','
oline = str ( matches ) + "\t"
oline += "0\t"
oline += "0\t"
oline += "0\t"
oline += "0\t"
oline += "0\t"
oline += "0\t"
oline += "0\t"
oline += e . get_strand ( ) + "\t"
oline += e . get_transcript_name ( ) + "\t"
# 10
oline += str ( matches ) + "\t"
# 11
oline += "0\t"
# 12
oline += str ( matches ) + "\t"
# 13
oline += e . get_chrom ( ) + "\t"
# 14
oline += str ( len ( ref [ e . get_chrom ( ) ] ) ) + "\t"
# 15
oline += str ( e . exons [ 0 ] . rng . start - 1 ) + "\t"
# 16
oline += str ( e . exons [ - 1 ] . rng . end ) + "\t"
# 17
oline += str ( len ( e . exons ) ) + "\t"
# 18
oline += ',' . join ( [ str ( e . exons [ x ] . rng . end - ( e . exons [ x ] . rng . start - 1 ) ) for x in range ( 0 , len ( e . exons ) ) ] ) + ',' + "\t"
# 19
oline += qstarts + "\t"
# 20
oline += ',' . join ( [ str ( x . rng . start - 1 ) for x in e . exons ] ) + ','
# 21
return oline |
def is_null ( * symbols ) :
"""True if no nodes or all the given nodes are either
None , NOP or empty blocks . For blocks this applies recursively""" | from symbols . symbol_ import Symbol
for sym in symbols :
if sym is None :
continue
if not isinstance ( sym , Symbol ) :
return False
if sym . token == 'NOP' :
continue
if sym . token == 'BLOCK' :
if not is_null ( * sym . children ) :
return False
continue
return False
return True |
def _check_and_handle_includes ( self , from_file ) :
"""Look for an optional INCLUDE section in the given file path . If
the parser set ` paths ` , it is cleared so that they do not keep
showing up when additional files are parsed .""" | logger . debug ( "Check/handle includes from %s" , from_file )
try :
paths = self . _parser . get ( "INCLUDE" , "paths" )
except ( config_parser . NoSectionError , config_parser . NoOptionError ) as exc :
logger . debug ( "_check_and_handle_includes: EXCEPTION: %s" , exc )
return
paths_lines = [ p . strip ( ) for p in paths . split ( "\n" ) ]
logger . debug ( "paths = %s (wanted just once; CLEARING)" , paths_lines )
self . _parser . remove_option ( "INCLUDE" , "paths" )
for f in paths_lines :
abspath = ( f if os . path . isabs ( f ) else os . path . abspath ( os . path . join ( os . path . dirname ( from_file ) , f ) ) )
use_path = os . path . normpath ( abspath )
if use_path in self . _parsed_files :
raise RecursionInConfigFile ( "In %s: %s already read" , from_file , use_path )
self . _parsed_files . append ( use_path )
self . _handle_rc_file ( use_path ) |
def delete_user ( self , auth , username ) :
"""Deletes the user with username ` ` username ` ` . Should only be called if the
to - be - deleted user has no repositories .
: param auth . Authentication auth : authentication object , must be admin - level
: param str username : username of user to delete""" | path = "/admin/users/{}" . format ( username )
self . delete ( path , auth = auth ) |
def _is_molecule_linear ( self , mol ) :
"""Is the molecule a linear one
Args :
mol : The molecule . OpenBabel OBMol object .
Returns :
Boolean value .""" | if mol . NumAtoms ( ) < 3 :
return True
a1 = mol . GetAtom ( 1 )
a2 = mol . GetAtom ( 2 )
for i in range ( 3 , mol . NumAtoms ( ) + 1 ) :
angle = float ( mol . GetAtom ( i ) . GetAngle ( a2 , a1 ) )
if angle < 0.0 :
angle = - angle
if angle > 90.0 :
angle = 180.0 - angle
if angle > self . _angle_tolerance :
return False
return True |
def average_gradients ( model ) :
"""Gradient averaging .""" | size = float ( dist . get_world_size ( ) )
for param in model . parameters ( ) :
dist . all_reduce ( param . grad . data , op = dist . reduce_op . SUM , group = 0 )
param . grad . data /= size |
def simple_lesk ( context_sentence : str , ambiguous_word : str , pos : str = None , lemma = True , stem = False , hyperhypo = True , stop = True , context_is_lemmatized = False , nbest = False , keepscore = False , normalizescore = False , from_cache = True ) -> "wn.Synset" :
"""Simple Lesk is somewhere in between using more than the
original Lesk algorithm ( 1986 ) and using less signature
words than adapted Lesk ( Banerjee and Pederson , 2002)
: param context _ sentence : String , sentence or document .
: param ambiguous _ word : String , a single word .
: param pos : String , one of ' a ' , ' r ' , ' s ' , ' n ' , ' v ' , or None .
: return : A Synset for the estimated best sense .""" | # Ensure that ambiguous word is a lemma .
ambiguous_word = lemmatize ( ambiguous_word , pos = pos )
# If ambiguous word not in WordNet return None
if not wn . synsets ( ambiguous_word ) :
return None
# Get the signatures for each synset .
ss_sign = simple_signatures ( ambiguous_word , pos , lemma , stem , hyperhypo , stop , from_cache = from_cache )
# Disambiguate the sense in context .
context_sentence = context_sentence . split ( ) if context_is_lemmatized else lemmatize_sentence ( context_sentence )
return compare_overlaps ( context_sentence , ss_sign , nbest = nbest , keepscore = keepscore , normalizescore = normalizescore ) |
def getPiLambert ( n ) :
"""Returns a list containing first n digits of Pi""" | mypi = piGenLambert ( )
result = [ ]
if n > 0 :
result += [ next ( mypi ) for i in range ( n ) ]
mypi . close ( )
return result |
def savefig ( self , output_path , title , dpi = 400 , format = 'png' , cmap = None ) :
"""Write the image to a file using pyplot .
Parameters
output _ path : : obj : ` str `
The directory in which to place the file .
title : : obj : ` str `
The title of the file in which to save the image .
dpi : int
The resolution in dots per inch .
format : : obj : ` str `
The file format to save . Available options include . png , . pdf , . ps ,
. eps , and . svg .
cmap : : obj : ` Colormap ` , optional
A Colormap object fo the pyplot .""" | plt . figure ( )
plt . imshow ( self . data , cmap = cmap )
plt . title ( title )
plt . axis ( 'off' )
title_underscore = title . replace ( ' ' , '_' )
plt . savefig ( os . path . join ( output_path , '{0}.{1}' . format ( title_underscore , format ) ) , dpi = dpi , format = format ) |
def getmu_vertices_stability_phase ( self , target_comp , dep_elt , tol_en = 1e-2 ) :
"""returns a set of chemical potentials corresponding to the vertices of
the simplex in the chemical potential phase diagram .
The simplex is built using all elements in the target _ composition
except dep _ elt .
The chemical potential of dep _ elt is computed from the target
composition energy .
This method is useful to get the limiting conditions for
defects computations for instance .
Args :
target _ comp : A Composition object
dep _ elt : the element for which the chemical potential is computed
from the energy of
the stable phase at the target composition
tol _ en : a tolerance on the energy to set
Returns :
[ { Element : mu } ] : An array of conditions on simplex vertices for
which each element has a chemical potential set to a given
value . " absolute " values ( i . e . , not referenced to element energies )""" | muref = np . array ( [ self . el_refs [ e ] . energy_per_atom for e in self . elements if e != dep_elt ] )
chempot_ranges = self . get_chempot_range_map ( [ e for e in self . elements if e != dep_elt ] )
for e in self . elements :
if not e in target_comp . elements :
target_comp = target_comp + Composition ( { e : 0.0 } )
coeff = [ - target_comp [ e ] for e in self . elements if e != dep_elt ]
for e in chempot_ranges . keys ( ) :
if e . composition . reduced_composition == target_comp . reduced_composition :
multiplicator = e . composition [ dep_elt ] / target_comp [ dep_elt ]
ef = e . energy / multiplicator
all_coords = [ ]
for s in chempot_ranges [ e ] :
for v in s . _coords :
elts = [ e for e in self . elements if e != dep_elt ]
res = { }
for i in range ( len ( elts ) ) :
res [ elts [ i ] ] = v [ i ] + muref [ i ]
res [ dep_elt ] = ( np . dot ( v + muref , coeff ) + ef ) / target_comp [ dep_elt ]
already_in = False
for di in all_coords :
dict_equals = True
for k in di :
if abs ( di [ k ] - res [ k ] ) > tol_en :
dict_equals = False
break
if dict_equals :
already_in = True
break
if not already_in :
all_coords . append ( res )
return all_coords |
def _process ( self , on_commit : UpdateCallable , on_rollback : UpdateCallable ) -> Any :
"""Process action . oncommit is a callback to execute action , onrollback is
a callback to execute if the oncommit ( ) has been called and a rollback
is required""" | _debug ( "---> commiting" , on_commit )
result = self . _do_with_retry ( on_commit )
if len ( self . _transactions ) > 0 : # add statement to rollback log in case something goes wrong
self . _transactions [ - 1 ] . insert ( 0 , on_rollback )
return result |
def _do_connection ( self , wgt , sig , func ) :
"""Make a connection between a GUI widget and a callable .
wgt and sig are strings with widget and signal name
func is a callable for that signal""" | # new style ( we use this )
# self . btn _ name . clicked . connect ( self . on _ btn _ name _ clicked )
# old style
# self . connect ( self . btn _ name , SIGNAL ( ' clicked ( ) ' ) , self . on _ btn _ name _ clicked )
if hasattr ( self , wgt ) :
wgtobj = getattr ( self , wgt )
if hasattr ( wgtobj , sig ) :
sigobj = getattr ( wgtobj , sig )
if isinstance ( sigobj , Signal ) :
sigobj . connect ( func )
return 0
return 1 |
def _zforce ( self , R , z , phi = 0 , t = 0 ) :
"""NAME :
_ zforce
PURPOSE :
evaluate the vertical force at ( R , z , phi )
INPUT :
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
vertical force at ( R , z , phi )
HISTORY :
2016-12-26 - Written - Bovy ( UofT / CCA )""" | r = numpy . sqrt ( R ** 2. + z ** 2. )
out = self . _scf . zforce ( R , z , phi = phi , use_physical = False )
for a , s , ds , H , dH in zip ( self . _Sigma_amp , self . _Sigma , self . _dSigmadR , self . _Hz , self . _dHzdz ) :
out -= 4. * numpy . pi * a * ( ds ( r ) * H ( z ) * z / r + s ( r ) * dH ( z ) )
return out |
def splay ( vec ) :
"""Determine two lengths to split stride the input vector by""" | N2 = 2 ** int ( numpy . log2 ( len ( vec ) ) / 2 )
N1 = len ( vec ) / N2
return N1 , N2 |
def load ( cls , path ) :
"""Load a recursive SOM from a JSON file .
You can use this function to load weights of other SOMs .
If there are no context weights , they will be set to 0.
Parameters
path : str
The path to the JSON file .
Returns
s : cls
A som of the specified class .""" | data = json . load ( open ( path ) )
weights = data [ 'weights' ]
weights = np . asarray ( weights , dtype = np . float64 )
try :
context_weights = data [ 'context_weights' ]
context_weights = np . asarray ( context_weights , dtype = np . float64 )
except KeyError :
context_weights = np . zeros ( ( len ( weights ) , len ( weights ) ) )
try :
alpha = data [ 'alpha' ]
beta = data [ 'beta' ]
except KeyError :
alpha = 1.0
beta = 1.0
s = cls ( data [ 'map_dimensions' ] , data [ 'data_dimensionality' ] , data [ 'params' ] [ 'lr' ] [ 'orig' ] , influence = data [ 'params' ] [ 'infl' ] [ 'orig' ] , alpha = alpha , beta = beta , lr_lambda = data [ 'params' ] [ 'lr' ] [ 'factor' ] , infl_lambda = data [ 'params' ] [ 'infl' ] [ 'factor' ] )
s . weights = weights
s . context_weights = context_weights
s . trained = True
return s |
def damaged_by_cut ( self , subsystem ) :
"""Return ` ` True ` ` if this MICE is affected by the subsystem ' s cut .
The cut affects the MICE if it either splits the MICE ' s mechanism
or splits the connections between the purview and mechanism .""" | return ( subsystem . cut . splits_mechanism ( self . mechanism ) or np . any ( self . _relevant_connections ( subsystem ) * subsystem . cut . cut_matrix ( subsystem . network . size ) == 1 ) ) |
def make_relative ( self , other ) :
"""Return a new path that is the equivalent of this one relative to the path
* other * . Unlike : meth : ` relative _ to ` , this will not throw an error if * self * is
not a sub - path of * other * ; instead , it will use ` ` . . ` ` to build a relative
path . This can result in invalid relative paths if * other * contains a
directory symbolic link .
If * self * is an absolute path , it is returned unmodified .""" | if self . is_absolute ( ) :
return self
from os . path import relpath
other = self . __class__ ( other )
return self . __class__ ( relpath ( text_type ( self ) , text_type ( other ) ) ) |
async def connect ( self , connection ) :
"""Connects to the specified given connection using the given auth key .""" | if self . _user_connected :
self . _log . info ( 'User is already connected!' )
return
self . _connection = connection
await self . _connect ( )
self . _user_connected = True |
def path_selection_changed ( self ) :
"""Handles when the current index of the combobox changes .""" | idx = self . currentIndex ( )
if idx == SELECT_OTHER :
external_path = self . select_directory ( )
if len ( external_path ) > 0 :
self . add_external_path ( external_path )
self . setCurrentIndex ( self . count ( ) - 1 )
else :
self . setCurrentIndex ( CWD )
elif idx == CLEAR_LIST :
reply = QMessageBox . question ( self , _ ( "Clear other directories" ) , _ ( "Do you want to clear the list of other directories?" ) , QMessageBox . Yes | QMessageBox . No )
if reply == QMessageBox . Yes :
self . clear_external_paths ( )
self . setCurrentIndex ( CWD )
elif idx >= EXTERNAL_PATHS :
self . external_path = to_text_string ( self . itemText ( idx ) ) |
def read_readme ( ) :
"""Reads part of the README . rst for use as long _ description in setup ( ) .""" | text = open ( "README.rst" , "rt" ) . read ( )
text_lines = text . split ( "\n" )
ld_i_beg = 0
while text_lines [ ld_i_beg ] . find ( "start long description" ) < 0 :
ld_i_beg += 1
ld_i_beg += 1
ld_i_end = ld_i_beg
while text_lines [ ld_i_end ] . find ( "end long description" ) < 0 :
ld_i_end += 1
ld_text = "\n" . join ( text_lines [ ld_i_beg : ld_i_end ] )
return ld_text |
def advance ( self ) :
"""Carry out one iteration of Arnoldi .""" | if self . iter >= self . maxiter :
raise ArgumentError ( 'Maximum number of iterations reached.' )
if self . invariant :
raise ArgumentError ( 'Krylov subspace was found to be invariant ' 'in the previous iteration.' )
N = self . V . shape [ 0 ]
k = self . iter
# the matrix - vector multiplication
Av = self . A * self . V [ : , [ k ] ]
if self . ortho == 'house' : # Householder
for j in range ( k + 1 ) :
Av [ j : ] = self . houses [ j ] . apply ( Av [ j : ] )
Av [ j ] *= numpy . conj ( self . houses [ j ] . alpha )
if k + 1 < N :
house = House ( Av [ k + 1 : ] )
self . houses . append ( house )
Av [ k + 1 : ] = house . apply ( Av [ k + 1 : ] ) * numpy . conj ( house . alpha )
self . H [ : k + 2 , [ k ] ] = Av [ : k + 2 ]
else :
self . H [ : k + 1 , [ k ] ] = Av [ : k + 1 ]
# next line is safe due to the multiplications with alpha
self . H [ k + 1 , k ] = numpy . abs ( self . H [ k + 1 , k ] )
if self . H [ k + 1 , k ] / numpy . linalg . norm ( self . H [ : k + 2 , : k + 1 ] , 2 ) <= 1e-14 :
self . invariant = True
else :
vnew = numpy . zeros ( ( N , 1 ) , dtype = self . dtype )
vnew [ k + 1 ] = 1
for j in range ( k + 1 , - 1 , - 1 ) :
vnew [ j : ] = self . houses [ j ] . apply ( vnew [ j : ] )
self . V [ : , [ k + 1 ] ] = vnew * self . houses [ - 1 ] . alpha
else : # determine vectors for orthogonalization
start = 0
# Lanczos ?
if self . ortho == 'lanczos' :
start = k
if k > 0 :
self . H [ k - 1 , k ] = self . H [ k , k - 1 ]
if self . M is not None and not isinstance ( self . M , IdentityLinearOperator ) :
Av -= self . H [ k , k - 1 ] * self . P [ : , [ k - 1 ] ]
else :
Av -= self . H [ k , k - 1 ] * self . V [ : , [ k - 1 ] ]
# ( double ) modified Gram - Schmidt
for reortho in range ( self . reorthos + 1 ) : # orthogonalize
for j in range ( start , k + 1 ) :
alpha = inner ( self . V [ : , [ j ] ] , Av , ip_B = self . ip_B ) [ 0 , 0 ]
if self . ortho == 'lanczos' : # check if alpha is real
if abs ( alpha . imag ) > 1e-10 :
warnings . warn ( 'Iter {0}: abs(alpha.imag) = {1} > 1e-10. ' 'Is your operator self-adjoint in the ' 'provided inner product?' . format ( self . iter , abs ( alpha . imag ) ) )
alpha = alpha . real
self . H [ j , k ] += alpha
if self . M is not None :
Av -= alpha * self . P [ : , [ j ] ]
else :
Av -= alpha * self . V [ : , [ j ] ]
if self . M is not None :
MAv = self . M * Av
self . H [ k + 1 , k ] = norm ( Av , MAv , ip_B = self . ip_B )
else :
self . H [ k + 1 , k ] = norm ( Av , ip_B = self . ip_B )
if self . H [ k + 1 , k ] / numpy . linalg . norm ( self . H [ : k + 2 , : k + 1 ] , 2 ) <= 1e-14 :
self . invariant = True
else :
if self . M is not None :
self . P [ : , [ k + 1 ] ] = Av / self . H [ k + 1 , k ]
self . V [ : , [ k + 1 ] ] = MAv / self . H [ k + 1 , k ]
else :
self . V [ : , [ k + 1 ] ] = Av / self . H [ k + 1 , k ]
# increase iteration counter
self . iter += 1 |
def get_smtp_header ( self ) :
"""Returns the SMTP formatted header of the line .
: rtype : string
: return : The SMTP header .""" | header = "From: %s\r\n" % self . get_sender ( )
header += "To: %s\r\n" % ',\r\n ' . join ( self . get_to ( ) )
header += "Cc: %s\r\n" % ',\r\n ' . join ( self . get_cc ( ) )
header += "Bcc: %s\r\n" % ',\r\n ' . join ( self . get_bcc ( ) )
header += "Subject: %s\r\n" % self . get_subject ( )
return header |
def wb_db010 ( self , value = None ) :
"""Corresponds to IDD Field ` wb _ db010 `
mean coincident wet - bulb temperature to
Dry - bulb temperature corresponding to 1.0 % annual cumulative frequency of occurrence ( warm conditions )
Args :
value ( float ) : value for IDD Field ` wb _ db010 `
Unit : C
if ` value ` is None it will not be checked against the
specification and is assumed to be a missing value
Raises :
ValueError : if ` value ` is not a valid value""" | if value is not None :
try :
value = float ( value )
except ValueError :
raise ValueError ( 'value {} need to be of type float ' 'for field `wb_db010`' . format ( value ) )
self . _wb_db010 = value |
def list ( self , id ) :
"""Fetch info about a specific list .
Returns a ` list dict ` _ .""" | id = self . __unpack_id ( id )
return self . __api_request ( 'GET' , '/api/v1/lists/{0}' . format ( id ) ) |
def get_consumption ( self , deviceid , timerange = "10" ) :
"""Return all available energy consumption data for the device .
You need to divice watt _ values by 100 and volt _ values by 1000
to get the " real " values .
: return : dict""" | tranges = ( "10" , "24h" , "month" , "year" )
if timerange not in tranges :
raise ValueError ( "Unknown timerange. Possible values are: {0}" . format ( tranges ) )
url = self . base_url + "/net/home_auto_query.lua"
response = self . session . get ( url , params = { 'sid' : self . sid , 'command' : 'EnergyStats_{0}' . format ( timerange ) , 'id' : deviceid , 'xhr' : 0 , } , timeout = 15 )
response . raise_for_status ( )
data = response . json ( )
result = { }
# Single result values
values_map = { 'MM_Value_Amp' : 'mm_value_amp' , 'MM_Value_Power' : 'mm_value_power' , 'MM_Value_Volt' : 'mm_value_volt' , 'EnStats_average_value' : 'enstats_average_value' , 'EnStats_max_value' : 'enstats_max_value' , 'EnStats_min_value' : 'enstats_min_value' , 'EnStats_timer_type' : 'enstats_timer_type' , 'sum_Day' : 'sum_day' , 'sum_Month' : 'sum_month' , 'sum_Year' : 'sum_year' , }
for avm_key , py_key in values_map . items ( ) :
result [ py_key ] = int ( data [ avm_key ] )
# Stats counts
count = int ( data [ "EnStats_count" ] )
watt_values = [ None for i in range ( count ) ]
volt_values = [ None for i in range ( count ) ]
for i in range ( 1 , count + 1 ) :
watt_values [ i - 1 ] = int ( data [ "EnStats_watt_value_{}" . format ( i ) ] )
volt_values [ i - 1 ] = int ( data [ "EnStats_volt_value_{}" . format ( i ) ] )
result [ 'watt_values' ] = watt_values
result [ 'volt_values' ] = volt_values
return result |
def iter_tiles ( self , include_controller = True ) :
"""Iterate over all tiles in this device in order .
The ordering is by tile address which places the controller tile
first in the list .
Args :
include _ controller ( bool ) : Include the controller tile in the
results .
Yields :
int , EmulatedTile : A tuple with the tile address and tile object .""" | for address , tile in sorted ( self . _tiles . items ( ) ) :
if address == 8 and not include_controller :
continue
yield address , tile |
def populate_user ( self ) :
"""Populates the Django user object using the default bind credentials .""" | user = None
try : # self . attrs will only be non - None if we were able to load this user
# from the LDAP directory , so this filters out nonexistent users .
if self . attrs is not None :
self . _get_or_create_user ( force_populate = True )
user = self . _user
except ldap . LDAPError as e :
results = ldap_error . send ( self . backend . __class__ , context = 'populate_user' , exception = e )
if len ( results ) == 0 :
logger . warning ( u"Caught LDAPError while authenticating %s: %s" , self . _username , pprint . pformat ( e ) )
except Exception as e :
logger . error ( u"Caught Exception while authenticating %s: %s" , self . _username , pprint . pformat ( e ) )
logger . error ( '' . join ( traceback . format_tb ( sys . exc_info ( ) [ 2 ] ) ) )
raise
return user |
def run ( self ) :
"""Main loop""" | while True :
keymap = self . _okeymap . get_keymap ( )
if keymap == self . _last_keymap or not keymap :
sleep ( self . _sleep_time )
continue
keys = self . _okeymap . get_keys ( keymap )
if ( keys [ 'regular' ] and keys [ 'regular' ] != self . _last_keys [ 'regular' ] ) :
if self . _transformer :
transformed_keys = self . _transformer . transform ( keys )
if transformed_keys is not None :
self . _handler . handle ( transformed_keys )
else :
self . _handler . handle ( keys )
self . _last_keymap = keymap
self . _last_keys = keys
sleep ( self . _sleep_time ) |
def strip_vl_extension ( filename ) :
"""Strip the vega - lite extension ( either vl . json or json ) from filename""" | for ext in [ '.vl.json' , '.json' ] :
if filename . endswith ( ext ) :
return filename [ : - len ( ext ) ]
else :
return filename |
def remove_data ( self , request , pk = None ) :
"""Remove data from collection .""" | collection = self . get_object ( )
if 'ids' not in request . data :
return Response ( { "error" : "`ids`parameter is required" } , status = status . HTTP_400_BAD_REQUEST )
for data_id in request . data [ 'ids' ] :
collection . data . remove ( data_id )
return Response ( ) |
def getEvents ( self ) :
"""Gets all events from Redunda and returns them .
: returns : Returns a dictionary of the events which were fetched .""" | url = "https://redunda.sobotics.org/events.json"
data = parse . urlencode ( { "key" : self . key } ) . encode ( )
req = request . Request ( url , data )
response = request . urlopen ( req )
return json . loads ( response . read ( ) . decode ( "utf-8" ) ) |
def save_configuration ( self , configuration_file ) :
'''Saving configuration
Parameters
configuration _ file : string
Filename of the configuration file .''' | if not isinstance ( configuration_file , tb . file . File ) and os . path . splitext ( configuration_file ) [ 1 ] . strip ( ) . lower ( ) != ".h5" :
return save_configuration_to_text_file ( self , configuration_file )
else :
return save_configuration_to_hdf5 ( self , configuration_file ) |
def build ( self , builder ) :
"""Build XML by appending to builder""" | builder . start ( "LocationRef" , dict ( LocationOID = str ( self . oid ) ) )
builder . end ( "LocationRef" ) |
def find_by_field ( self , table , field , field_value ) :
'''从数据库里查询指定条件的记录
Args :
table : 表名字 str
field : 字段名
field _ value : 字段值
return :
成功 : [ dict ] 保存的记录
失败 : - 1 并打印返回报错信息''' | sql = "select * from {} where {} = '{}'" . format ( table , field , field_value )
res = self . query ( sql )
return res |
def max_version ( self ) :
"""Version with the most downloads .
: return : A tuple of the form ( version , n _ downloads )""" | data = self . version_downloads
if not data :
return None , 0
return max ( data . items ( ) , key = lambda item : item [ 1 ] ) |
def get_user ( ) :
'''Get the current user''' | if HAS_PWD :
ret = pwd . getpwuid ( os . geteuid ( ) ) . pw_name
elif HAS_WIN_FUNCTIONS and salt . utils . win_functions . HAS_WIN32 :
ret = salt . utils . win_functions . get_current_user ( )
else :
raise CommandExecutionError ( 'Required external library (pwd or win32api) not installed' )
return salt . utils . stringutils . to_unicode ( ret ) |
def start_end_from_segments ( segment_file ) :
"""Return the start and end time arrays from a segment file .
Parameters
segment _ file : xml segment file
Returns
start : numpy . ndarray
end : numpy . ndarray""" | from glue . ligolw . ligolw import LIGOLWContentHandler as h ;
lsctables . use_in ( h )
indoc = ligolw_utils . load_filename ( segment_file , False , contenthandler = h )
segment_table = table . get_table ( indoc , lsctables . SegmentTable . tableName )
start = numpy . array ( segment_table . getColumnByName ( 'start_time' ) )
start_ns = numpy . array ( segment_table . getColumnByName ( 'start_time_ns' ) )
end = numpy . array ( segment_table . getColumnByName ( 'end_time' ) )
end_ns = numpy . array ( segment_table . getColumnByName ( 'end_time_ns' ) )
return start + start_ns * 1e-9 , end + end_ns * 1e-9 |
def simple_in_memory_settings ( cls ) :
"""Decorator that returns a class that " persists " data in - memory . Mostly
useful for testing
: param cls : the class whose features should be persisted in - memory
: return : A new class that will persist features in memory""" | class Settings ( ff . PersistenceSettings ) :
id_provider = ff . UuidProvider ( )
key_builder = ff . StringDelimitedKeyBuilder ( )
database = ff . InMemoryDatabase ( key_builder = key_builder )
class Model ( cls , Settings ) :
pass
Model . __name__ = cls . __name__
Model . __module__ = cls . __module__
return Model |
def register_module_alias ( self , alias , module_path , after_init = False ) :
"""Adds an alias for a module .
http : / / uwsgi - docs . readthedocs . io / en / latest / PythonModuleAlias . html
: param str | unicode alias :
: param str | unicode module _ path :
: param bool after _ init : add a python module alias after uwsgi module initialization""" | command = 'post-pymodule-alias' if after_init else 'pymodule-alias'
self . _set ( command , '%s=%s' % ( alias , module_path ) , multi = True )
return self . _section |
def check_new_round ( self , hours = 24 , tournament = 1 ) :
"""Check if a new round has started within the last ` hours ` .
Args :
hours ( int , optional ) : timeframe to consider , defaults to 24
tournament ( int ) : ID of the tournament ( optional , defaults to 1)
Returns :
bool : True if a new round has started , False otherwise .
Example :
> > > NumerAPI ( ) . check _ new _ round ( )
False""" | query = '''
query($tournament: Int!) {
rounds(tournament: $tournament
number: 0) {
number
openTime
}
}
'''
arguments = { 'tournament' : tournament }
raw = self . raw_query ( query , arguments ) [ 'data' ] [ 'rounds' ] [ 0 ]
if raw is None :
return False
open_time = utils . parse_datetime_string ( raw [ 'openTime' ] )
now = datetime . datetime . utcnow ( ) . replace ( tzinfo = pytz . utc )
is_new_round = open_time > now - datetime . timedelta ( hours = hours )
return is_new_round |
def _load_plugin ( self , plugin_script , args = None , config = None ) :
"""Load the plugin ( script ) , init it and add to the _ plugin dict .""" | # The key is the plugin name
# for example , the file glances _ xxx . py
# generate self . _ plugins _ list [ " xxx " ] = . . .
name = plugin_script [ len ( self . header ) : - 3 ] . lower ( )
try : # Import the plugin
plugin = __import__ ( plugin_script [ : - 3 ] )
# Init and add the plugin to the dictionary
if name in ( 'help' , 'amps' , 'ports' , 'folders' ) :
self . _plugins [ name ] = plugin . Plugin ( args = args , config = config )
else :
self . _plugins [ name ] = plugin . Plugin ( args = args )
# Set the disable _ < name > to False by default
if self . args is not None :
setattr ( self . args , 'disable_' + name , getattr ( self . args , 'disable_' + name , False ) )
except Exception as e : # If a plugin can not be log , display a critical message
# on the console but do not crash
logger . critical ( "Error while initializing the {} plugin ({})" . format ( name , e ) )
logger . error ( traceback . format_exc ( ) ) |
def error ( self , coro ) :
"""A decorator that registers a coroutine as a local error handler .
A local error handler is an : func : ` . on _ command _ error ` event limited to
a single command . However , the : func : ` . on _ command _ error ` is still
invoked afterwards as the catch - all .
Parameters
coro : : ref : ` coroutine < coroutine > `
The coroutine to register as the local error handler .
Raises
TypeError
The coroutine passed is not actually a coroutine .""" | if not asyncio . iscoroutinefunction ( coro ) :
raise TypeError ( 'The error handler must be a coroutine.' )
self . on_error = coro
return coro |
def add_logger ( self , cb , level = 'NORMAL' , filters = 'ALL' ) :
'''Add a callback to receive log events from this component .
@ param cb The callback function to receive log events . It must have the
signature cb ( name , time , source , level , message ) , where name is the
name of the component the log record came from , time is a
floating - point time stamp , source is the name of the logger that
provided the log record , level is the log level of the record and
message is a text string .
@ param level The maximum level of log records to receive .
@ param filters Filter the objects from which to receive log messages .
@ return An ID for this logger . Use this ID in future operations such as
removing this logger .
@ raises AddLoggerError''' | with self . _mutex :
obs = sdo . RTCLogger ( self , cb )
uuid_val = uuid . uuid4 ( )
intf_type = obs . _this ( ) . _NP_RepositoryId
props = { 'logger.log_level' : level , 'logger.filter' : filters }
props = utils . dict_to_nvlist ( props )
sprof = SDOPackage . ServiceProfile ( id = uuid_val . get_bytes ( ) , interface_type = intf_type , service = obs . _this ( ) , properties = props )
conf = self . object . get_configuration ( )
res = conf . add_service_profile ( sprof )
if res :
self . _loggers [ uuid_val ] = obs
return uuid_val
raise exceptions . AddLoggerError ( self . name ) |
def buildDescriptor ( self , dir = os . getcwd ( ) , configuration = 'Development' , args = [ ] , suppressOutput = False ) :
"""Builds the editor modules for the Unreal project or plugin in the specified directory , using the specified build configuration""" | # Verify that an Unreal project or plugin exists in the specified directory
descriptor = self . getDescriptor ( dir )
descriptorType = 'project' if self . isProject ( descriptor ) else 'plugin'
# If the project or plugin is Blueprint - only , there is no C + + code to build
if os . path . exists ( os . path . join ( dir , 'Source' ) ) == False :
Utility . printStderr ( 'Pure Blueprint {}, nothing to build.' . format ( descriptorType ) )
return
# Verify that the specified build configuration is valid
if configuration not in self . validBuildConfigurations ( ) :
raise UnrealManagerException ( 'invalid build configuration "' + configuration + '"' )
# Generate the arguments to pass to UBT
target = self . getDescriptorName ( descriptor ) + 'Editor' if self . isProject ( descriptor ) else 'UE4Editor'
baseArgs = [ '-{}=' . format ( descriptorType ) + descriptor ]
# Perform the build
self . _runUnrealBuildTool ( target , self . getPlatformIdentifier ( ) , configuration , baseArgs + args , capture = suppressOutput ) |
def from_text_list ( name , ttl , rdclass , rdtype , text_rdatas ) :
"""Create an RRset with the specified name , TTL , class , and type , and with
the specified list of rdatas in text format .
@ rtype : dns . rrset . RRset object""" | if isinstance ( name , ( str , unicode ) ) :
name = dns . name . from_text ( name , None )
if isinstance ( rdclass , ( str , unicode ) ) :
rdclass = dns . rdataclass . from_text ( rdclass )
if isinstance ( rdtype , ( str , unicode ) ) :
rdtype = dns . rdatatype . from_text ( rdtype )
r = RRset ( name , rdclass , rdtype )
r . update_ttl ( ttl )
for t in text_rdatas :
rd = dns . rdata . from_text ( r . rdclass , r . rdtype , t )
r . add ( rd )
return r |
def add_node_set_configuration ( self , param_name , node_to_value ) :
"""Set Nodes parameter
: param param _ name : parameter identifier ( as specified by the chosen model )
: param node _ to _ value : dictionary mapping each node a parameter value""" | for nid , val in future . utils . iteritems ( node_to_value ) :
self . add_node_configuration ( param_name , nid , val ) |
def Validate ( self , value ) :
"""Validate a potential list .""" | if isinstance ( value , string_types ) :
raise TypeValueError ( "Value must be an iterable not a string." )
elif not isinstance ( value , ( list , tuple ) ) :
raise TypeValueError ( "%r not a valid List" % value )
# Validate each value in the list validates against our type .
return [ self . validator . Validate ( val ) for val in value ] |
def create_fleet ( Name = None , ImageName = None , InstanceType = None , ComputeCapacity = None , VpcConfig = None , MaxUserDurationInSeconds = None , DisconnectTimeoutInSeconds = None , Description = None , DisplayName = None , EnableDefaultInternetAccess = None ) :
"""Creates a new fleet .
See also : AWS API Documentation
: example : response = client . create _ fleet (
Name = ' string ' ,
ImageName = ' string ' ,
InstanceType = ' string ' ,
ComputeCapacity = {
' DesiredInstances ' : 123
VpcConfig = {
' SubnetIds ' : [
' string ' ,
MaxUserDurationInSeconds = 123,
DisconnectTimeoutInSeconds = 123,
Description = ' string ' ,
DisplayName = ' string ' ,
EnableDefaultInternetAccess = True | False
: type Name : string
: param Name : [ REQUIRED ]
A unique identifier for the fleet .
: type ImageName : string
: param ImageName : [ REQUIRED ]
Unique name of the image used by the fleet .
: type InstanceType : string
: param InstanceType : [ REQUIRED ]
The instance type of compute resources for the fleet . Fleet instances are launched from this instance type .
: type ComputeCapacity : dict
: param ComputeCapacity : [ REQUIRED ]
The parameters for the capacity allocated to the fleet .
DesiredInstances ( integer ) - - [ REQUIRED ] The desired number of streaming instances .
: type VpcConfig : dict
: param VpcConfig : The VPC configuration for the fleet .
SubnetIds ( list ) - - The list of subnets to which a network interface is established from the fleet instance .
( string ) - -
: type MaxUserDurationInSeconds : integer
: param MaxUserDurationInSeconds : The maximum time for which a streaming session can run . The input can be any numeric value in seconds between 600 and 57600.
: type DisconnectTimeoutInSeconds : integer
: param DisconnectTimeoutInSeconds : The time after disconnection when a session is considered to have ended . If a user who got disconnected reconnects within this timeout interval , the user is connected back to their previous session . The input can be any numeric value in seconds between 60 and 57600.
: type Description : string
: param Description : The description of the fleet .
: type DisplayName : string
: param DisplayName : The display name of the fleet .
: type EnableDefaultInternetAccess : boolean
: param EnableDefaultInternetAccess : Enables or disables default Internet access for the fleet .
: rtype : dict
: return : {
' Fleet ' : {
' Arn ' : ' string ' ,
' Name ' : ' string ' ,
' DisplayName ' : ' string ' ,
' Description ' : ' string ' ,
' ImageName ' : ' string ' ,
' InstanceType ' : ' string ' ,
' ComputeCapacityStatus ' : {
' Desired ' : 123,
' Running ' : 123,
' InUse ' : 123,
' Available ' : 123
' MaxUserDurationInSeconds ' : 123,
' DisconnectTimeoutInSeconds ' : 123,
' State ' : ' STARTING ' | ' RUNNING ' | ' STOPPING ' | ' STOPPED ' ,
' VpcConfig ' : {
' SubnetIds ' : [
' string ' ,
' CreatedTime ' : datetime ( 2015 , 1 , 1 ) ,
' FleetErrors ' : [
' ErrorCode ' : ' IAM _ SERVICE _ ROLE _ MISSING _ ENI _ DESCRIBE _ ACTION ' | ' IAM _ SERVICE _ ROLE _ MISSING _ ENI _ CREATE _ ACTION ' | ' IAM _ SERVICE _ ROLE _ MISSING _ ENI _ DELETE _ ACTION ' | ' NETWORK _ INTERFACE _ LIMIT _ EXCEEDED ' | ' INTERNAL _ SERVICE _ ERROR ' | ' IAM _ SERVICE _ ROLE _ IS _ MISSING ' | ' SUBNET _ HAS _ INSUFFICIENT _ IP _ ADDRESSES ' | ' IAM _ SERVICE _ ROLE _ MISSING _ DESCRIBE _ SUBNET _ ACTION ' | ' SUBNET _ NOT _ FOUND ' | ' IMAGE _ NOT _ FOUND ' | ' INVALID _ SUBNET _ CONFIGURATION ' ,
' ErrorMessage ' : ' string '
' EnableDefaultInternetAccess ' : True | False
: returns :
( string ) - -""" | pass |
def _dyn_loader ( self , module : str , kwargs : str ) :
"""Dynamically load a specific module instance .""" | package_directory : str = os . path . dirname ( os . path . abspath ( __file__ ) )
modules : str = package_directory + "/modules"
module = module + ".py"
if module not in os . listdir ( modules ) :
raise Exception ( "Module %s is not valid" % module )
module_name : str = module [ : - 3 ]
import_path : str = "%s.%s" % ( self . MODULE_PATH , module_name )
imported = import_module ( import_path )
obj = getattr ( imported , 'Module' )
return obj ( ** kwargs ) |
def DOM_setFileInputFiles ( self , files , ** kwargs ) :
"""Function path : DOM . setFileInputFiles
Domain : DOM
Method name : setFileInputFiles
WARNING : This function is marked ' Experimental ' !
Parameters :
Required arguments :
' files ' ( type : array ) - > Array of file paths to set .
Optional arguments :
' nodeId ' ( type : NodeId ) - > Identifier of the node .
' backendNodeId ' ( type : BackendNodeId ) - > Identifier of the backend node .
' objectId ' ( type : Runtime . RemoteObjectId ) - > JavaScript object id of the node wrapper .
No return value .
Description : Sets files for the given file input element .""" | assert isinstance ( files , ( list , tuple ) ) , "Argument 'files' must be of type '['list', 'tuple']'. Received type: '%s'" % type ( files )
expected = [ 'nodeId' , 'backendNodeId' , 'objectId' ]
passed_keys = list ( kwargs . keys ( ) )
assert all ( [ ( key in expected ) for key in passed_keys ] ) , "Allowed kwargs are ['nodeId', 'backendNodeId', 'objectId']. Passed kwargs: %s" % passed_keys
subdom_funcs = self . synchronous_command ( 'DOM.setFileInputFiles' , files = files , ** kwargs )
return subdom_funcs |
def setup_menu ( self ) :
"""Setup context menu""" | self . copy_action = create_action ( self , _ ( 'Copy' ) , shortcut = keybinding ( 'Copy' ) , icon = ima . icon ( 'editcopy' ) , triggered = self . copy , context = Qt . WidgetShortcut )
menu = QMenu ( self )
add_actions ( menu , [ self . copy_action , ] )
return menu |
def export_losses_by_event ( ekey , dstore ) :
""": param ekey : export key , i . e . a pair ( datastore key , fmt )
: param dstore : datastore object""" | oq = dstore [ 'oqparam' ]
writer = writers . CsvWriter ( fmt = writers . FIVEDIGITS )
dest = dstore . build_fname ( 'losses_by_event' , '' , 'csv' )
if oq . calculation_mode . startswith ( 'scenario' ) :
dtlist = [ ( 'eid' , U64 ) ] + oq . loss_dt_list ( )
arr = dstore [ 'losses_by_event' ] . value [ [ 'eid' , 'loss' ] ]
writer . save ( arr . copy ( ) . view ( dtlist ) , dest )
elif oq . calculation_mode == 'ebrisk' :
tagcol = dstore [ 'assetcol/tagcol' ]
lbe = dstore [ 'losses_by_event' ] . value
lbe . sort ( order = 'eid' )
dic = dict ( tagnames = [ 'event_id' , 'loss_type' ] + oq . aggregate_by )
for tagname in oq . aggregate_by :
dic [ tagname ] = getattr ( tagcol , tagname )
dic [ 'event_id' ] = [ '?' ] + list ( lbe [ 'eid' ] )
dic [ 'loss_type' ] = ( '?' , ) + oq . loss_dt ( ) . names
aw = hdf5 . ArrayWrapper ( lbe [ 'loss' ] , dic )
# shape ( E , L , T . . . )
writer . save ( aw . to_table ( ) , dest )
else :
dtlist = [ ( 'event_id' , U64 ) , ( 'rup_id' , U32 ) , ( 'year' , U32 ) ] + oq . loss_dt_list ( )
eids = dstore [ 'losses_by_event' ] [ 'eid' ]
year_of = year_dict ( dstore [ 'events' ] [ 'eid' ] , oq . investigation_time , oq . ses_seed )
arr = numpy . zeros ( len ( dstore [ 'losses_by_event' ] ) , dtlist )
arr [ 'event_id' ] = eids
arr [ 'rup_id' ] = arr [ 'event_id' ] / TWO32
arr [ 'year' ] = [ year_of [ eid ] for eid in eids ]
loss = dstore [ 'losses_by_event' ] [ 'loss' ] . T
# shape ( L , E )
for losses , loss_type in zip ( loss , oq . loss_dt ( ) . names ) :
arr [ loss_type ] = losses
writer . save ( arr , dest )
return writer . getsaved ( ) |
def get_rotations ( self ) :
"""Return all rotations , including inversions for
centrosymmetric crystals .""" | if self . centrosymmetric :
return np . vstack ( ( self . rotations , - self . rotations ) )
else :
return self . rotations |
def deleted ( self , src , path ) :
"""Update the reference tree when a handled file is deleted .""" | if self . parents [ path ] is not None :
for parent in self . parents [ path ] :
self . children [ parent ] . remove ( path )
if not self . children [ parent ] :
del self . children [ parent ]
del self . parents [ path ] |
def _release ( self ) :
"""Destroy self since closures cannot be called again .""" | del self . funcs
del self . variables
del self . variable_values
del self . satisfied |
def convert_to_shape ( x ) :
"""Converts input to a Shape .
Args :
x : Shape , str , or None .
Returns :
Shape or None .
Raises :
ValueError : If x cannot be converted to a Shape .""" | if x is None :
return None
if isinstance ( x , Shape ) :
return x
if isinstance ( x , str ) :
x = _parse_string_to_list_of_pairs ( x , seconds_to_int = True )
return Shape ( x ) |
def is60 ( msg ) :
"""Check if a message is likely to be BDS code 6,0
Args :
msg ( String ) : 28 bytes hexadecimal message string
Returns :
bool : True or False""" | if allzeros ( msg ) :
return False
d = hex2bin ( data ( msg ) )
# status bit 1 , 13 , 24 , 35 , 46
if wrongstatus ( d , 1 , 2 , 12 ) :
return False
if wrongstatus ( d , 13 , 14 , 23 ) :
return False
if wrongstatus ( d , 24 , 25 , 34 ) :
return False
if wrongstatus ( d , 35 , 36 , 45 ) :
return False
if wrongstatus ( d , 46 , 47 , 56 ) :
return False
ias = ias60 ( msg )
if ias is not None and ias > 500 :
return False
mach = mach60 ( msg )
if mach is not None and mach > 1 :
return False
vr_baro = vr60baro ( msg )
if vr_baro is not None and abs ( vr_baro ) > 6000 :
return False
vr_ins = vr60ins ( msg )
if vr_ins is not None and abs ( vr_ins ) > 6000 :
return False
return True |
def describe_configs ( self , config_resources , include_synonyms = False ) :
"""Fetch configuration parameters for one or more Kafka resources .
: param config _ resources : An list of ConfigResource objects .
Any keys in ConfigResource . configs dict will be used to filter the
result . Setting the configs dict to None will get all values . An
empty dict will get zero values ( as per Kafka protocol ) .
: param include _ synonyms : If True , return synonyms in response . Not
supported by all versions . Default : False .
: return : Appropriate version of DescribeConfigsResponse class .""" | version = self . _matching_api_version ( DescribeConfigsRequest )
if version == 0 :
if include_synonyms :
raise IncompatibleBrokerVersion ( "include_synonyms requires DescribeConfigsRequest >= v1, which is not supported by Kafka {}." . format ( self . config [ 'api_version' ] ) )
request = DescribeConfigsRequest [ version ] ( resources = [ self . _convert_describe_config_resource_request ( config_resource ) for config_resource in config_resources ] )
elif version == 1 :
request = DescribeConfigsRequest [ version ] ( resources = [ self . _convert_describe_config_resource_request ( config_resource ) for config_resource in config_resources ] , include_synonyms = include_synonyms )
else :
raise NotImplementedError ( "Support for DescribeConfigs v{} has not yet been added to KafkaAdminClient." . format ( version ) )
return self . _send_request_to_node ( self . _client . least_loaded_node ( ) , request ) |
def add_transition ( self , from_state_id , from_outcome , to_state_id , to_outcome , transition_id = None ) :
"""Adds a transition to the container state
Note : Either the toState or the toOutcome needs to be " None "
: param from _ state _ id : The source state of the transition
: param from _ outcome : The outcome id of the source state to connect the transition to
: param to _ state _ id : The target state of the transition
: param to _ outcome : The target outcome id of a container state
: param transition _ id : An optional transition id for the new transition""" | transition_id = self . check_transition_id ( transition_id )
# Set from _ state _ id to None for start transitions , as from _ state _ id and from _ outcome should both be None for
# these transitions
if from_state_id == self . state_id and from_outcome is None :
from_state_id = None
new_transition = Transition ( from_state_id , from_outcome , to_state_id , to_outcome , transition_id , self )
self . transitions [ transition_id ] = new_transition
# notify all states waiting for transition to be connected
self . _transitions_cv . acquire ( )
self . _transitions_cv . notify_all ( )
self . _transitions_cv . release ( )
# self . create _ transition ( from _ state _ id , from _ outcome , to _ state _ id , to _ outcome , transition _ id )
return transition_id |
def cluster ( self , method , ** kwargs ) :
"""Cluster the tribe .
Cluster templates within a tribe : returns multiple tribes each of
which could be stacked .
: type method : str
: param method :
Method of stacking , see : mod : ` eqcorrscan . utils . clustering `
: return : List of tribes .
. . rubric : : Example""" | from eqcorrscan . utils import clustering
tribes = [ ]
func = getattr ( clustering , method )
if method in [ 'space_cluster' , 'space_time_cluster' ] :
cat = Catalog ( [ t . event for t in self . templates ] )
groups = func ( cat , ** kwargs )
for group in groups :
new_tribe = Tribe ( )
for event in group :
new_tribe . templates . extend ( [ t for t in self . templates if t . event == event ] )
tribes . append ( new_tribe )
return tribes |
def safe_decode ( text , incoming = None , errors = 'strict' ) :
"""Decodes incoming text / bytes string using ` incoming ` if they ' re not
already unicode .
This function was copied from novaclient . openstack . strutils
: param incoming : Text ' s current encoding
: param errors : Errors handling policy . See here for valid
values http : / / docs . python . org / 2 / library / codecs . html
: returns : text or a unicode ` incoming ` encoded
representation of it .
: raises TypeError : If text is not an instance of str""" | if not isinstance ( text , ( six . string_types , six . binary_type ) ) :
raise TypeError ( "%s can't be decoded" % type ( text ) )
if isinstance ( text , six . text_type ) :
return text
if not incoming :
incoming = ( sys . stdin . encoding or sys . getdefaultencoding ( ) )
try :
return text . decode ( incoming , errors )
except UnicodeDecodeError : # Note ( flaper87 ) If we get here , it means that
# sys . stdin . encoding / sys . getdefaultencoding
# didn ' t return a suitable encoding to decode
# text . This happens mostly when global LANG
# var is not set correctly and there ' s no
# default encoding . In this case , most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won ' t be capable
# of decoding non - ASCII characters .
# Also , UTF - 8 is being used since it ' s an ASCII
# extension .
return text . decode ( 'utf-8' , errors ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.