signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def view ( self , start , end , max_items = None , * args , ** kwargs ) :
"""Implements the CalendarView option to FindItem . The difference between filter ( ) and view ( ) is that filter ( )
only returns the master CalendarItem for recurring items , while view ( ) unfolds recurring items and returns all
CalendarItem occurrences as one would normally expect when presenting a calendar .
Supports the same semantics as filter , except for ' start ' and ' end ' keyword attributes which are both required
and behave differently than filter . Here , they denote the start and end of the timespan of the view . All items
the overlap the timespan are returned ( items that end exactly on ' start ' are also returned , for some reason ) .
EWS does not allow combining CalendarView with search restrictions ( filter and exclude ) .
' max _ items ' defines the maximum number of items returned in this view . Optional .""" | qs = QuerySet ( self ) . filter ( * args , ** kwargs )
qs . calendar_view = CalendarView ( start = start , end = end , max_items = max_items )
return qs |
def load_from_file ( path , fmt = None , is_training = True ) :
'''load data from file''' | if fmt is None :
fmt = 'squad'
assert fmt in [ 'squad' , 'csv' ] , 'input format must be squad or csv'
qp_pairs = [ ]
if fmt == 'squad' :
with open ( path ) as data_file :
data = json . load ( data_file ) [ 'data' ]
for doc in data :
for paragraph in doc [ 'paragraphs' ] :
passage = paragraph [ 'context' ]
for qa_pair in paragraph [ 'qas' ] :
question = qa_pair [ 'question' ]
qa_id = qa_pair [ 'id' ]
if not is_training :
qp_pairs . append ( { 'passage' : passage , 'question' : question , 'id' : qa_id } )
else :
for answer in qa_pair [ 'answers' ] :
answer_begin = int ( answer [ 'answer_start' ] )
answer_end = answer_begin + len ( answer [ 'text' ] )
qp_pairs . append ( { 'passage' : passage , 'question' : question , 'id' : qa_id , 'answer_begin' : answer_begin , 'answer_end' : answer_end } )
else :
with open ( path , newline = '' ) as csvfile :
reader = csv . reader ( csvfile , delimiter = '\t' )
line_num = 0
for row in reader :
qp_pairs . append ( { 'passage' : row [ 1 ] , 'question' : row [ 0 ] , 'id' : line_num } )
line_num += 1
return qp_pairs |
def _enumerate_directions ( x ) :
"""For an n - dimensional tensor , returns tensors to enumerate each axis .
> > > x = np . zeros ( [ 2 , 3 , 4 ] ) # or any other tensor
> > > i , j , k = _ enumerate _ directions ( x )
> > > result = i + 2 * j + 3 * k
result [ i , j , k ] = i + 2 * j + 3 * k , and also has the same shape as result
Works very similarly to numpy . ogrid ( open indexing grid )""" | backend = get_backend ( x )
shape = backend . shape ( x )
result = [ ]
for axis_id , axis_length in enumerate ( shape ) :
shape = [ 1 ] * len ( shape )
shape [ axis_id ] = axis_length
result . append ( backend . reshape ( backend . arange ( 0 , axis_length ) , shape ) )
return result |
def get_trans_reg ( self , name : Text , default : Any = None ) -> Any :
"""Convenience function to access the transition register of a specific
kind .
: param name : Name of the register you want to see
: param default : What to return by default""" | tr = self . register . get ( Register . TRANSITION , { } )
return tr . get ( name , default ) |
def bind ( self , instance , auto = False ) :
"""Bind deps to instance
: param instance :
: param auto : follow update of DI and refresh binds once we will get something new
: return :""" | methods = [ ( m , cls . __dict__ [ m ] ) for cls in inspect . getmro ( type ( instance ) ) for m in cls . __dict__ if inspect . isfunction ( cls . __dict__ [ m ] ) ]
try :
deps_of_endpoints = [ ( method_ptr , self . entrypoint_deps ( method_ptr ) ) for ( method_name , method_ptr ) in methods ]
for ( method_ptr , method_deps ) in deps_of_endpoints :
if len ( method_deps ) > 0 :
method_ptr ( instance , ** method_deps )
except KeyError :
pass
if auto and instance not in self . current_scope . get_auto_bind_list ( ) :
self . current_scope . auto_bind ( instance )
return instance |
def get_persistent_items ( self ) :
"""Returns attached container items and container configurations that are marked as persistent . Each returned
item is in the format ` ` ( config name , instance / attached name ) ` ` , where the instance name can also be ` ` None ` ` .
: return : Lists of attached items .
: rtype : ( list [ ( unicode | str , unicode | str ) ] , list [ unicode | str , unicode | str | NoneType ] )""" | attached_items = [ ( container , ac ) for container , config in self for ac in config . attaches ]
persistent_containers = [ ( container , ci ) for container , config in self if config . persistent for ci in config . instances or [ None ] ]
return attached_items , persistent_containers |
def X ( self , i , j = slice ( None , None , None ) ) :
'''Computes the design matrix at the given * PLD * order and the given
indices . The columns are the * PLD * vectors for the target at the
corresponding order , computed as the product of the fractional pixel
flux of all sets of : py : obj : ` n ` pixels , where : py : obj : ` n ` is the * PLD *
order .''' | X1 = self . fpix [ j ] / self . norm [ j ] . reshape ( - 1 , 1 )
X = np . product ( list ( multichoose ( X1 . T , i + 1 ) ) , axis = 1 ) . T
if self . X1N is not None :
return np . hstack ( [ X , self . X1N [ j ] ** ( i + 1 ) ] )
else :
return X |
def create ( zpool , * vdevs , ** kwargs ) :
'''. . versionadded : : 2015.5.0
Create a simple zpool , a mirrored zpool , a zpool having nested VDEVs , a hybrid zpool with cache , spare and log drives or a zpool with RAIDZ - 1 , RAIDZ - 2 or RAIDZ - 3
zpool : string
Name of storage pool
vdevs : string
One or move devices
force : boolean
Forces use of vdevs , even if they appear in use or specify a
conflicting replication level .
mountpoint : string
Sets the mount point for the root dataset
altroot : string
Equivalent to " - o cachefile = none , altroot = root "
properties : dict
Additional pool properties
filesystem _ properties : dict
Additional filesystem properties
createboot : boolean
create a boot partition
. . versionadded : : 2018.3.0
. . warning :
This is only available on illumos and Solaris
CLI Examples :
. . code - block : : bash
salt ' * ' zpool . create myzpool / path / to / vdev1 [ . . . ] [ force = True | False ]
salt ' * ' zpool . create myzpool mirror / path / to / vdev1 / path / to / vdev2 [ . . . ] [ force = True | False ]
salt ' * ' zpool . create myzpool raidz1 / path / to / vdev1 / path / to / vdev2 raidz2 / path / to / vdev3 / path / to / vdev4 / path / to / vdev5 [ . . . ] [ force = True | False ]
salt ' * ' zpool . create myzpool mirror / path / to / vdev1 [ . . . ] mirror / path / to / vdev2 / path / to / vdev3 [ . . . ] [ force = True | False ]
salt ' * ' zpool . create myhybridzpool mirror / tmp / file1 [ . . . ] log mirror / path / to / vdev1 [ . . . ] cache / path / to / vdev2 [ . . . ] spare / path / to / vdev3 [ . . . ] [ force = True | False ]
. . note : :
Zpool properties can be specified at the time of creation of the pool
by passing an additional argument called " properties " and specifying
the properties with their respective values in the form of a python
dictionary :
. . code - block : : text
properties = " { ' property1 ' : ' value1 ' , ' property2 ' : ' value2 ' } "
Filesystem properties can be specified at the time of creation of the
pool by passing an additional argument called " filesystem _ properties "
and specifying the properties with their respective values in the form
of a python dictionary :
. . code - block : : text
filesystem _ properties = " { ' property1 ' : ' value1 ' , ' property2 ' : ' value2 ' } "
Example :
. . code - block : : bash
salt ' * ' zpool . create myzpool / path / to / vdev1 [ . . . ] properties = " { ' property1 ' : ' value1 ' , ' property2 ' : ' value2 ' } "
CLI Example :
. . code - block : : bash
salt ' * ' zpool . create myzpool / path / to / vdev1 [ . . . ] [ force = True | False ]
salt ' * ' zpool . create myzpool mirror / path / to / vdev1 / path / to / vdev2 [ . . . ] [ force = True | False ]
salt ' * ' zpool . create myzpool raidz1 / path / to / vdev1 / path / to / vdev2 raidz2 / path / to / vdev3 / path / to / vdev4 / path / to / vdev5 [ . . . ] [ force = True | False ]
salt ' * ' zpool . create myzpool mirror / path / to / vdev1 [ . . . ] mirror / path / to / vdev2 / path / to / vdev3 [ . . . ] [ force = True | False ]
salt ' * ' zpool . create myhybridzpool mirror / tmp / file1 [ . . . ] log mirror / path / to / vdev1 [ . . . ] cache / path / to / vdev2 [ . . . ] spare / path / to / vdev3 [ . . . ] [ force = True | False ]''' | # # Configure pool
# NOTE : initialize the defaults
flags = [ ]
opts = { }
target = [ ]
# NOTE : push pool and filesystem properties
pool_properties = kwargs . get ( 'properties' , { } )
filesystem_properties = kwargs . get ( 'filesystem_properties' , { } )
# NOTE : set extra config based on kwargs
if kwargs . get ( 'force' , False ) :
flags . append ( '-f' )
if kwargs . get ( 'createboot' , False ) or 'bootsize' in pool_properties :
flags . append ( '-B' )
if kwargs . get ( 'altroot' , False ) :
opts [ '-R' ] = kwargs . get ( 'altroot' )
if kwargs . get ( 'mountpoint' , False ) :
opts [ '-m' ] = kwargs . get ( 'mountpoint' )
# NOTE : append the pool name and specifications
target . append ( zpool )
target . extend ( vdevs )
# # Create storage pool
res = __salt__ [ 'cmd.run_all' ] ( __utils__ [ 'zfs.zpool_command' ] ( command = 'create' , flags = flags , opts = opts , pool_properties = pool_properties , filesystem_properties = filesystem_properties , target = target , ) , python_shell = False , )
ret = __utils__ [ 'zfs.parse_command_result' ] ( res , 'created' )
if ret [ 'created' ] : # # NOTE : lookup zpool status for vdev config
ret [ 'vdevs' ] = _clean_vdev_config ( __salt__ [ 'zpool.status' ] ( zpool = zpool ) [ zpool ] [ 'config' ] [ zpool ] , )
return ret |
def disable_process_breakpoints ( self , dwProcessId ) :
"""Disables all breakpoints for the given process .
@ type dwProcessId : int
@ param dwProcessId : Process global ID .""" | # disable code breakpoints
for bp in self . get_process_code_breakpoints ( dwProcessId ) :
self . disable_code_breakpoint ( dwProcessId , bp . get_address ( ) )
# disable page breakpoints
for bp in self . get_process_page_breakpoints ( dwProcessId ) :
self . disable_page_breakpoint ( dwProcessId , bp . get_address ( ) )
# disable hardware breakpoints
if self . system . has_process ( dwProcessId ) :
aProcess = self . system . get_process ( dwProcessId )
else :
aProcess = Process ( dwProcessId )
aProcess . scan_threads ( )
for aThread in aProcess . iter_threads ( ) :
dwThreadId = aThread . get_tid ( )
for bp in self . get_thread_hardware_breakpoints ( dwThreadId ) :
self . disable_hardware_breakpoint ( dwThreadId , bp . get_address ( ) ) |
def request ( self , method , * , path = None , json = None , params = None , headers = None , timeout = None , backoff_cap = None , ** kwargs ) :
"""Performs an HTTP request with the given parameters .
Implements exponential backoff .
If ` ConnectionError ` occurs , a timestamp equal to now +
the default delay ( ` BACKOFF _ DELAY ` ) is assigned to the object .
The timestamp is in UTC . Next time the function is called , it either
waits till the timestamp is passed or raises ` TimeoutError ` .
If ` ConnectionError ` occurs two or more times in a row ,
the retry count is incremented and the new timestamp is calculated
as now + the default delay multiplied by two to the power of the
number of retries .
If a request is successful , the backoff timestamp is removed ,
the retry count is back to zero .
Args :
method ( str ) : HTTP method ( e . g . : ` ` ' GET ' ` ` ) .
path ( str ) : API endpoint path ( e . g . : ` ` ' / transactions ' ` ` ) .
json ( dict ) : JSON data to send along with the request .
params ( dict ) : Dictionary of URL ( query ) parameters .
headers ( dict ) : Optional headers to pass to the request .
timeout ( int ) : Optional timeout in seconds .
backoff _ cap ( int ) : The maximal allowed backoff delay in seconds
to be assigned to a node .
kwargs : Optional keyword arguments .""" | backoff_timedelta = self . get_backoff_timedelta ( )
if timeout is not None and timeout < backoff_timedelta :
raise TimeoutError
if backoff_timedelta > 0 :
time . sleep ( backoff_timedelta )
connExc = None
timeout = timeout if timeout is None else timeout - backoff_timedelta
try :
response = self . _request ( method = method , timeout = timeout , url = self . node_url + path if path else self . node_url , json = json , params = params , headers = headers , ** kwargs , )
except ConnectionError as err :
connExc = err
raise err
finally :
self . update_backoff_time ( success = connExc is None , backoff_cap = backoff_cap )
return response |
def action_delete ( self , ids ) :
"""Delete selected sessions .""" | is_current = any ( SessionActivity . is_current ( sid_s = id_ ) for id_ in ids )
if is_current :
flash ( 'You could not remove your current session' , 'error' )
return
for id_ in ids :
delete_session ( sid_s = id_ )
db . session . commit ( ) |
def separated ( p , sep , mint , maxt = None , end = None ) :
'''Repeat a parser ` p ` separated by ` s ` between ` mint ` and ` maxt ` times .
When ` end ` is None , a trailing separator is optional .
When ` end ` is True , a trailing separator is required .
When ` end ` is False , a trailing separator is not allowed .
MATCHES AS MUCH AS POSSIBLE .
Return list of values returned by ` p ` .''' | maxt = maxt if maxt else mint
@ Parser
def sep_parser ( text , index ) :
cnt , values , res = 0 , Value . success ( index , [ ] ) , None
while cnt < maxt :
if end in [ False , None ] and cnt > 0 :
res = sep ( text , index )
if res . status : # ` sep ` found , consume it ( advance index )
index , values = res . index , Value . success ( res . index , values . value )
elif cnt < mint :
return res
# error : need more elemnts , but no ` sep ` found .
else :
break
res = p ( text , index )
if res . status :
values = values . aggregate ( Value . success ( res . index , [ res . value ] ) )
index , cnt = res . index , cnt + 1
elif cnt >= mint :
break
else :
return res
# error : need more elements , but no ` p ` found .
if end is True :
res = sep ( text , index )
if res . status :
index , values = res . index , Value . success ( res . index , values . value )
else :
return res
# error : trailing ` sep ` not found
if cnt >= maxt :
break
return values
return sep_parser |
def _F ( self , X ) :
"""analytic solution of the projection integral
: param x : R / Rs
: type x : float > 0""" | if isinstance ( X , int ) or isinstance ( X , float ) :
if X < 1 and X > 0 :
a = 1 / ( X ** 2 - 1 ) * ( 1 - 2 / np . sqrt ( 1 - X ** 2 ) * np . arctanh ( np . sqrt ( ( 1 - X ) / ( 1 + X ) ) ) )
elif X == 1 :
a = 1. / 3
elif X > 1 :
a = 1 / ( X ** 2 - 1 ) * ( 1 - 2 / np . sqrt ( X ** 2 - 1 ) * np . arctan ( np . sqrt ( ( X - 1 ) / ( 1 + X ) ) ) )
else : # X = = 0:
c = 0.0000001
a = 1 / ( - 1 ) * ( 1 - 2 / np . sqrt ( 1 ) * np . arctanh ( np . sqrt ( ( 1 - c ) / ( 1 + c ) ) ) )
else :
a = np . empty_like ( X )
x = X [ ( X < 1 ) & ( X > 0 ) ]
a [ ( X < 1 ) & ( X > 0 ) ] = 1 / ( x ** 2 - 1 ) * ( 1 - 2 / np . sqrt ( 1 - x ** 2 ) * np . arctanh ( np . sqrt ( ( 1 - x ) / ( 1 + x ) ) ) )
a [ X == 1 ] = 1. / 3.
x = X [ X > 1 ]
a [ X > 1 ] = 1 / ( x ** 2 - 1 ) * ( 1 - 2 / np . sqrt ( x ** 2 - 1 ) * np . arctan ( np . sqrt ( ( x - 1 ) / ( 1 + x ) ) ) )
# a [ X > y ] = 0
c = 0.0000001
a [ X == 0 ] = 1 / ( - 1 ) * ( 1 - 2 / np . sqrt ( 1 ) * np . arctanh ( np . sqrt ( ( 1 - c ) / ( 1 + c ) ) ) )
return a |
def pretty ( self ) :
'''Return a string like ' / foo / bar . py : 230 in foo . bar . my _ func ' .''' | return '{}:{} in {}.{}' . format ( self . filename , self . line_number , self . module_name , self . function_name ) |
def key_absent ( name , use_32bit_registry = False ) :
r'''. . versionadded : : 2015.5.4
Ensure a registry key is removed . This will remove the key , subkeys , and all
value entries .
Args :
name ( str ) :
A string representing the full path to the key to be removed to
include the hive and the keypath . The hive can be any of the
following :
- HKEY _ LOCAL _ MACHINE or HKLM
- HKEY _ CURRENT _ USER or HKCU
- HKEY _ USER or HKU
use _ 32bit _ registry ( bool ) :
Use the 32bit portion of the registry . Applies only to 64bit
windows . 32bit Windows will ignore this parameter . Default is False .
Returns :
dict : A dictionary showing the results of the registry operation .
CLI Example :
The following example will delete the ` ` SOFTWARE \ DeleteMe ` ` key in the
` ` HKEY _ LOCAL _ MACHINE ` ` hive including all its subkeys and value pairs .
. . code - block : : yaml
remove _ key _ demo :
reg . key _ absent :
- name : HKEY _ CURRENT _ USER \ SOFTWARE \ DeleteMe
In the above example the path is interpreted as follows :
- ` ` HKEY _ CURRENT _ USER ` ` is the hive
- ` ` SOFTWARE \ DeleteMe ` ` is the key''' | ret = { 'name' : name , 'result' : True , 'changes' : { } , 'comment' : '' }
hive , key = _parse_key ( name )
# Determine what to do
if not __utils__ [ 'reg.read_value' ] ( hive = hive , key = key , use_32bit_registry = use_32bit_registry ) [ 'success' ] :
ret [ 'comment' ] = '{0} is already absent' . format ( name )
return ret
ret [ 'changes' ] = { 'reg' : { 'Removed' : { 'Key' : r'{0}\{1}' . format ( hive , key ) } } }
# Check for test option
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
return ret
# Delete the value
__utils__ [ 'reg.delete_key_recursive' ] ( hive = hive , key = key , use_32bit_registry = use_32bit_registry )
if __utils__ [ 'reg.read_value' ] ( hive = hive , key = key , use_32bit_registry = use_32bit_registry ) [ 'success' ] :
ret [ 'result' ] = False
ret [ 'changes' ] = { }
ret [ 'comment' ] = 'Failed to remove registry key {0}' . format ( name )
return ret |
def despike ( self , window_length = 33 , samples = True , z = 2 ) :
"""Args :
window ( int ) : window length in samples . Default 33 ( or 5 m for
most curves sampled at 0.1524 m intervals ) .
samples ( bool ) : window length is in samples . Use False for a window
length given in metres .
z ( float ) : Z score
Returns :
Curve .""" | window_length //= 1 if samples else self . step
z *= np . nanstd ( self )
# Transform to curve ' s units
curve_sm = self . _rolling_window ( window_length , np . median )
spikes = np . where ( np . nan_to_num ( self - curve_sm ) > z ) [ 0 ]
spukes = np . where ( np . nan_to_num ( curve_sm - self ) > z ) [ 0 ]
out = np . copy ( self )
params = self . __dict__ . copy ( )
out [ spikes ] = curve_sm [ spikes ] + z
out [ spukes ] = curve_sm [ spukes ] - z
return Curve ( out , params = params ) |
def get_date ( context , value ) :
"""Tries to return a DateTime . DateTime object""" | if not value :
return None
if isinstance ( value , DateTime ) :
return value
if isinstance ( value , datetime ) :
return dt2DT ( value )
if not isinstance ( value , basestring ) :
return None
def try_parse ( date_string , format ) :
if not format :
return None
try :
struct_time = strptime ( date_string , format )
return datetime ( * struct_time [ : 6 ] )
except ValueError :
pass
return None
def get_locale_format ( key , context ) :
format = context . translate ( key , domain = "senaite.core" , mapping = { } )
# TODO : Is this replacement below strictly necessary ?
return format . replace ( r"${" , '%' ) . replace ( '}' , '' )
# Try with prioritized formats
formats = [ get_locale_format ( "date_format_long" , context ) , get_locale_format ( "date_format_short" , context ) , "%Y-%m-%d %H:%M" , "%Y-%m-%d" , "%Y-%m-%d %H:%M:%S" ]
for pri_format in formats :
val = try_parse ( value , pri_format )
if not val :
continue
val = dt2DT ( val )
if val . timezoneNaive ( ) : # Use local timezone for tz naive strings
# see http : / / dev . plone . org / plone / ticket / 10141
zone = val . localZone ( safelocaltime ( val . timeTime ( ) ) )
parts = val . parts ( ) [ : - 1 ] + ( zone , )
val = DateTime ( * parts )
return val
logger . warn ( "Unable to convert {} to datetime" . format ( value ) )
return None |
def plot_kmf ( df , condition_col , censor_col , survival_col , strata_col = None , threshold = None , title = None , xlabel = None , ylabel = None , ax = None , with_condition_color = "#B38600" , no_condition_color = "#A941AC" , with_condition_label = None , no_condition_label = None , color_map = None , label_map = None , color_palette = "Set1" , ci_show = False , print_as_title = False ) :
"""Plot survival curves by splitting the dataset into two groups based on
condition _ col . Report results for a log - rank test ( if two groups are plotted )
or CoxPH survival analysis ( if > 2 groups ) for association with survival .
Regarding definition of groups :
If condition _ col is numeric , values are split into 2 groups .
- if threshold is defined , the groups are split on being > or < condition _ col
- if threshold = = ' median ' , the threshold is set to the median of condition _ col
If condition _ col is categorical or string , results are plotted for each unique value in the dataset .
If condition _ col is None , results are plotted for all observations
Currently , if ` strata _ col ` is given , the results are repeated among each stratum of the df .
A truly " stratified " analysis is not yet supported by may be soon .
Parameters
df : dataframe
condition _ col : string , column which contains the condition to split on
survival _ col : string , column which contains the survival time
censor _ col : string ,
strata _ col : optional string , denoting column containing data to
stratify by ( default : None )
threshold : int or string , if int , condition _ col is thresholded at int ,
if ' median ' , condition _ col thresholded
at its median
if ' median - per - strata ' , & if stratified analysis
then condition _ col thresholded by strata
title : Title for the plot , default None
ax : an existing matplotlib ax , optional , default None
note : not currently supported when ` strata _ col ` is not None
with _ condition _ color : str , hex code color for the with - condition curve
no _ condition _ color : str , hex code color for the no - condition curve
with _ condition _ label : str , optional , label for True condition case
no _ condition _ label : str , optional , label for False condition case
color _ map : dict , optional , mapping of hex - values to condition text
in the form of { value _ name : color _ hex _ code } .
defaults to ` sb . color _ palette ` using ` default _ color _ palette ` name ,
or * _ condition _ color options in case of boolean operators .
label _ map : dict , optional , mapping of labels to condition text .
defaults to " condition _ name = condition _ value " , or * _ condition _ label
options in case of boolean operators .
color _ palette : str , optional , name of sb . color _ palette to use
if color _ map not provided .
print _ as _ title : bool , optional , whether or not to print text
within the plot ' s title vs . stdout , default False""" | # set reasonable default threshold value depending on type of condition _ col
if threshold is None :
if df [ condition_col ] . dtype != "bool" and np . issubdtype ( df [ condition_col ] . dtype , np . number ) :
threshold = "median"
# check inputs for threshold for validity
elif isinstance ( threshold , numbers . Number ) :
logger . debug ( "threshold value is numeric" )
elif threshold not in ( "median" , "median-per-strata" ) :
raise ValueError ( "invalid input for threshold. Must be numeric, None, 'median', or 'median-per-strata'." )
elif threshold == "median-per-strata" and strata_col is None :
raise ValueError ( "threshold given was 'median-per-strata' and yet `strata_col` was None. Did you mean 'median'?" )
# construct kwarg dict to pass to _ plot _ kmf _ single .
# start with args that do not vary according to strata _ col
arglist = dict ( condition_col = condition_col , survival_col = survival_col , censor_col = censor_col , threshold = threshold , with_condition_color = with_condition_color , no_condition_color = no_condition_color , with_condition_label = with_condition_label , no_condition_label = no_condition_label , color_map = color_map , label_map = label_map , xlabel = xlabel , ylabel = ylabel , ci_show = ci_show , color_palette = color_palette , print_as_title = print_as_title )
# if strata _ col is None , pass all parameters to _ plot _ kmf _ single
if strata_col is None :
arglist . update ( dict ( df = df , title = title , ax = ax ) )
return _plot_kmf_single ( ** arglist )
else : # prepare for stratified analysis
if threshold == "median" : # by default , " median " threshold should be intra - strata median
arglist [ "threshold" ] = df [ condition_col ] . dropna ( ) . median ( )
elif threshold == "median-per-strata" :
arglist [ "threshold" ] = "median"
# create axis / subplots for stratified results
if ax is not None :
raise ValueError ( "ax not supported with stratified analysis." )
n_strata = len ( df [ strata_col ] . unique ( ) )
f , ax = plt . subplots ( n_strata , sharex = True )
# create results dict to hold per - strata results
results = dict ( )
# call _ plot _ kmf _ single for each of the strata
for i , ( strat_name , strat_df ) in enumerate ( df . groupby ( strata_col ) ) :
if n_strata == 1 :
arglist [ "ax" ] = ax
else :
arglist [ "ax" ] = ax [ i ]
subtitle = "{}: {}" . format ( strata_col , strat_name )
arglist [ "title" ] = subtitle
arglist [ "df" ] = strat_df
results [ subtitle ] = plot_kmf ( ** arglist )
[ print ( desc ) for desc in results [ subtitle ] . desc ]
if title :
f . suptitle ( title )
return results |
def dtype ( self ) :
"""Pixel data type .""" | pixels_group = self . label [ 'IsisCube' ] [ 'Core' ] [ 'Pixels' ]
byte_order = self . BYTE_ORDERS [ pixels_group [ 'ByteOrder' ] ]
pixel_type = self . PIXEL_TYPES [ pixels_group [ 'Type' ] ]
return pixel_type . newbyteorder ( byte_order ) |
def _gettables ( self ) :
"""Return a list of hdf5 tables name PyMCsamples .""" | groups = self . _h5file . list_nodes ( "/" )
if len ( groups ) == 0 :
return [ ]
else :
return [ gr . PyMCsamples for gr in groups if gr . _v_name [ : 5 ] == 'chain' ] |
def _load_texture ( file_name , resolver ) :
"""Load a texture from a file into a PIL image .""" | file_data = resolver . get ( file_name )
image = PIL . Image . open ( util . wrap_as_stream ( file_data ) )
return image |
def upload ( state , host , hostname , filename , remote_filename = None , use_remote_sudo = False , ssh_keyscan = False , ssh_user = None , ) :
'''Upload files to other servers using ` ` scp ` ` .
+ hostname : hostname to upload to
+ filename : file to upload
+ remote _ filename : where to upload the file to ( defaults to ` ` filename ` ` )
+ use _ remote _ sudo : upload to a temporary location and move using sudo
+ ssh _ keyscan : execute ` ` ssh . keyscan ` ` before uploading the file
+ ssh _ user : connect with this user''' | remote_filename = remote_filename or filename
# Figure out where we ' re connecting ( host or user @ host )
connection_target = hostname
if ssh_user :
connection_target = '@' . join ( ( ssh_user , hostname ) )
if ssh_keyscan :
yield keyscan ( state , host , hostname )
# If we ' re not using sudo on the remote side , just scp the file over
if not use_remote_sudo :
yield 'scp {0} {1}:{2}' . format ( filename , connection_target , remote_filename )
else : # Otherwise - we need a temporary location for the file
temp_remote_filename = state . get_temp_filename ( )
# scp it to the temporary location
upload_cmd = 'scp {0} {1}:{2}' . format ( filename , connection_target , temp_remote_filename , )
yield upload_cmd
# And sudo sudo to move it
yield command ( state , host , connection_target , 'sudo mv {0} {1}' . format ( temp_remote_filename , remote_filename , ) ) |
def norm_int_dict ( int_dict ) :
"""Normalizes values in the given dict with int values .
Parameters
int _ dict : list
A dict object mapping each key to an int value .
Returns
dict
A dict where each key is mapped to its relative part in the sum of
all dict values .
Example
> > > dict _ obj = { ' a ' : 3 , ' b ' : 5 , ' c ' : 2}
> > > result = norm _ int _ dict ( dict _ obj )
> > > print ( sorted ( result . items ( ) ) )
[ ( ' a ' , 0.3 ) , ( ' b ' , 0.5 ) , ( ' c ' , 0.2 ) ]""" | norm_dict = int_dict . copy ( )
val_sum = sum ( norm_dict . values ( ) )
for key in norm_dict :
norm_dict [ key ] = norm_dict [ key ] / val_sum
return norm_dict |
def _AbandonInactiveProcessingTasks ( self ) :
"""Marks processing tasks that exceed the inactive time as abandoned .
This method does not lock the manager and should be called by a method
holding the manager lock .""" | if self . _tasks_processing :
inactive_time = time . time ( ) - self . _TASK_INACTIVE_TIME
inactive_time = int ( inactive_time * definitions . MICROSECONDS_PER_SECOND )
# Abandon all tasks after they ' re identified so as not to modify the
# dict while iterating over it .
tasks_to_abandon = [ ]
for task_identifier , task in iter ( self . _tasks_processing . items ( ) ) :
if task . last_processing_time < inactive_time :
logger . debug ( 'Abandoned processing task: {0:s}.' . format ( task_identifier ) )
self . SampleTaskStatus ( task , 'abandoned_processing' )
tasks_to_abandon . append ( ( task_identifier , task ) )
for task_identifier , task in tasks_to_abandon :
self . _tasks_abandoned [ task_identifier ] = task
del self . _tasks_processing [ task_identifier ] |
def on_KeyPress ( self , event ) :
'''To adjust the distance between pitch markers .''' | if event . GetKeyCode ( ) == wx . WXK_UP :
self . dist10deg += 0.1
print ( 'Dist per 10 deg: %.1f' % self . dist10deg )
elif event . GetKeyCode ( ) == wx . WXK_DOWN :
self . dist10deg -= 0.1
if self . dist10deg <= 0 :
self . dist10deg = 0.1
print ( 'Dist per 10 deg: %.1f' % self . dist10deg )
# Toggle Widgets
elif event . GetKeyCode ( ) == 49 :
widgets = [ self . modeText , self . wpText ]
self . toggleWidgets ( widgets )
elif event . GetKeyCode ( ) == 50 :
widgets = [ self . batOutRec , self . batInRec , self . voltsText , self . ampsText , self . batPerText ]
self . toggleWidgets ( widgets )
elif event . GetKeyCode ( ) == 51 :
widgets = [ self . rollText , self . pitchText , self . yawText ]
self . toggleWidgets ( widgets )
elif event . GetKeyCode ( ) == 52 :
widgets = [ self . airspeedText , self . altitudeText , self . climbRateText ]
self . toggleWidgets ( widgets )
elif event . GetKeyCode ( ) == 53 :
widgets = [ self . altHistRect , self . altPlot , self . altMarker , self . altText2 ]
self . toggleWidgets ( widgets )
elif event . GetKeyCode ( ) == 54 :
widgets = [ self . headingTri , self . headingText , self . headingNorthTri , self . headingNorthText , self . headingWPTri , self . headingWPText ]
self . toggleWidgets ( widgets )
# Update Matplotlib Plot
self . canvas . draw ( )
self . canvas . Refresh ( )
self . Refresh ( )
self . Update ( ) |
def ajax ( authenticated = True , data_required = False , json_encoder = json . JSONEncoder ) :
"""Decorator to allow the wrappered view to exist in an AJAX environment .
Provide a decorator to wrap a view method so that it may exist in an
entirely AJAX environment :
- data decoded from JSON as input and data coded as JSON as output
- result status is coded in the HTTP status code ; any non - 2xx response
data will be coded as a JSON string , otherwise the response type ( always
JSON ) is specific to the method called .
if authenticated is true then we ' ll make sure the current user is
authenticated .
If data _ required is true then we ' ll assert that there is a JSON body
present .
The wrapped view method should return either :
- JSON serialisable data
- an object of the django http . HttpResponse subclass ( one of JSONResponse
or CreatedResponse is suggested )
- nothing
Methods returning nothing ( or None explicitly ) will result in a 204 " NO
CONTENT " being returned to the caller .""" | def decorator ( function , authenticated = authenticated , data_required = data_required ) :
@ functools . wraps ( function , assigned = decorators . available_attrs ( function ) )
def _wrapped ( self , request , * args , ** kw ) :
if authenticated and not request . user . is_authenticated :
return JSONResponse ( 'not logged in' , 401 )
if not request . is_ajax ( ) :
return JSONResponse ( 'request must be AJAX' , 400 )
# decode the JSON body if present
request . DATA = None
if request . body :
try :
request . DATA = jsonutils . loads ( request . body )
except ( TypeError , ValueError ) as e :
return JSONResponse ( 'malformed JSON request: %s' % e , 400 )
if data_required :
if not request . DATA :
return JSONResponse ( 'request requires JSON body' , 400 )
# invoke the wrapped function , handling exceptions sanely
try :
data = function ( self , request , * args , ** kw )
if isinstance ( data , http . HttpResponse ) :
return data
elif data is None :
return JSONResponse ( '' , status = 204 )
return JSONResponse ( data , json_encoder = json_encoder )
except http_errors as e : # exception was raised with a specific HTTP status
for attr in [ 'http_status' , 'code' , 'status_code' ] :
if hasattr ( e , attr ) :
http_status = getattr ( e , attr )
break
else :
LOG . exception ( 'HTTP exception with no status/code' )
return JSONResponse ( str ( e ) , 500 )
return JSONResponse ( str ( e ) , http_status )
except Exception as e :
LOG . exception ( 'error invoking apiclient' )
return JSONResponse ( str ( e ) , 500 )
return _wrapped
return decorator |
def get_changelog ( repo_path , from_commit = None ) :
"""Given a repo path and an option commit / tag / refspec to start from , will
get the rpm compatible changelog
Args :
repo _ path ( str ) : path to the git repo
from _ commit ( str ) : refspec ( partial commit hash , tag , branch , full
refspec , partial refspec ) to start the changelog from
Returns :
str : Rpm compatible changelog""" | repo = dulwich . repo . Repo ( repo_path )
tags = get_tags ( repo )
refs = get_refs ( repo )
changelog = [ ]
maj_version = 0
feat_version = 0
fix_version = 0
start_including = False
cur_line = ''
if from_commit is None :
start_including = True
for commit_sha , children in reversed ( get_children_per_first_parent ( repo_path ) . items ( ) ) :
commit = repo . get_object ( commit_sha )
maj_version , feat_version , fix_version = get_version ( commit = commit , tags = tags , maj_version = maj_version , feat_version = feat_version , fix_version = fix_version , )
version = '%s.%s.%s' % ( maj_version , feat_version , fix_version )
if ( start_including or commit_sha . startswith ( from_commit ) or fuzzy_matches_refs ( from_commit , refs . get ( commit_sha , [ ] ) ) ) :
cur_line = pretty_commit ( commit , version , )
for child in children :
cur_line += pretty_commit ( repo . get_object ( child ) , version = None )
start_including = True
changelog . append ( cur_line )
return '\n' . join ( reversed ( changelog ) ) |
def placeholder_plugin_filter ( self , request , queryset ) :
"""This is only used on models which use placeholders from the django - cms""" | if not request :
return queryset
if GLL . is_active :
return queryset . filter ( language = GLL . language_code )
return queryset |
def _iter_convert_to_object ( self , iterable ) :
"""Iterable yields tuples of ( binsha , mode , name ) , which will be converted
to the respective object representation""" | for binsha , mode , name in iterable :
path = join_path ( self . path , name )
try :
yield self . _map_id_to_type [ mode >> 12 ] ( self . repo , binsha , mode , path )
except KeyError :
raise TypeError ( "Unknown mode %o found in tree data for path '%s'" % ( mode , path ) ) |
def scramble_string ( s , key ) :
"""s is the puzzle ' s solution in column - major order , omitting black squares :
i . e . if the puzzle is :
C A T
solution is CATAR
Key is a 4 - digit number in the range 1000 < = key < = 9999""" | key = key_digits ( key )
for k in key : # foreach digit in the key
s = shift ( s , key )
# for each char by each digit in the key in sequence
s = s [ k : ] + s [ : k ]
# cut the sequence around the key digit
s = shuffle ( s )
# do a 1:1 shuffle of the ' deck '
return s |
def full_travel_count ( self ) :
"""Returns the number of tacho counts in the full travel of the motor . When
combined with the ` count _ per _ m ` atribute , you can use this value to
calculate the maximum travel distance of the motor . ( linear motors only )""" | ( self . _full_travel_count , value ) = self . get_cached_attr_int ( self . _full_travel_count , 'full_travel_count' )
return value |
def _kbos_from_survey_sym_model_input_file ( model_file ) :
"""Load a Survey Simulator model file as an array of ephem EllipticalBody objects .
@ param model _ file :
@ return :""" | lines = storage . open_vos_or_local ( model_file ) . read ( ) . split ( '\n' )
kbos = [ ]
for line in lines :
if len ( line ) == 0 or line [ 0 ] == '#' : # skip initial column descriptors and the final blank line
continue
kbo = ephem . EllipticalBody ( )
values = line . split ( )
kbo . name = values [ 8 ]
kbo . j = values [ 9 ]
kbo . k = values [ 10 ]
kbo . _a = float ( values [ 0 ] )
kbo . _e = float ( values [ 1 ] )
kbo . _inc = float ( values [ 2 ] )
kbo . _Om = float ( values [ 3 ] )
kbo . _om = float ( values [ 4 ] )
kbo . _M = float ( values [ 5 ] )
kbo . _H = float ( values [ 6 ] )
epoch = ephem . date ( 2453157.50000 - ephem . julian_date ( 0 ) )
kbo . _epoch_M = epoch
kbo . _epoch = epoch
kbos . append ( kbo )
return kbos |
def _initialize ( self , show_bounds , reset_camera , outline ) :
"""Outlines the input dataset and sets up the scene""" | self . plotter . subplot ( * self . loc )
if outline is None :
self . plotter . add_mesh ( self . input_dataset . outline_corners ( ) , reset_camera = False , color = vtki . rcParams [ 'outline_color' ] , loc = self . loc )
elif outline :
self . plotter . add_mesh ( self . input_dataset . outline ( ) , reset_camera = False , color = vtki . rcParams [ 'outline_color' ] , loc = self . loc )
# add the axis labels
if show_bounds :
self . plotter . show_bounds ( reset_camera = False , loc = loc )
if reset_camera :
cpos = self . plotter . get_default_cam_pos ( )
self . plotter . camera_position = cpos
self . plotter . reset_camera ( )
self . plotter . camera_set = False |
def mutate ( self , p_mutate ) :
"""Simulate mutation against a probability .
p _ mutate : probability for mutation to occur""" | new_dna = [ ]
for bit in self . dna :
if random . random ( ) < p_mutate :
new_bit = bit
while new_bit == bit :
new_bit = random . choice ( self . GENETIC_MATERIAL_OPTIONS )
bit = new_bit
new_dna . append ( bit )
self . dna = '' . join ( new_dna ) |
def import_locations ( self , zone_file ) :
"""Parse zoneinfo zone description data files .
` ` import _ locations ( ) ` ` returns a list of : class : ` Zone ` objects .
It expects data files in one of the following formats : :
AN + 1211-06900America / Curacao
AO - 0848 + 01314Africa / Luanda
AQ - 7750 + 16636Antarctica / McMurdoMcMurdo Station , Ross Island
Files containing the data in this format can be found in the
: file : ` zone . tab ` file that is normally found in
: file : ` / usr / share / zoneinfo ` on UNIX - like systems , or from the ` standard
distribution site ` _ .
When processed by ` ` import _ locations ( ) ` ` a ` ` list ` ` object of the
following style will be returned : :
[ Zone ( None , None , " AN " , " America / Curacao " , None ) ,
Zone ( None , None , " AO " , " Africa / Luanda " , None ) ,
Zone ( None , None , " AO " , " Antartica / McMurdo " ,
[ " McMurdo Station " , " Ross Island " ] ) ]
Args :
zone _ file ( iter ) : ` ` zone . tab ` ` data to read
Returns :
list : Locations as : class : ` Zone ` objects
Raises :
FileFormatError : Unknown file format
. . _ standard distribution site : ftp : / / elsie . nci . nih . gov / pub /""" | self . _zone_file = zone_file
field_names = ( 'country' , 'location' , 'zone' , 'comments' )
data = utils . prepare_csv_read ( zone_file , field_names , delimiter = r" " )
for row in ( x for x in data if not x [ 'country' ] . startswith ( '#' ) ) :
if row [ 'comments' ] :
row [ 'comments' ] = row [ 'comments' ] . split ( ', ' )
self . append ( Zone ( ** row ) ) |
def checar ( cliente_sat ) :
"""Checa em sequência os alertas registrados ( veja : func : ` registrar ` ) contra os
dados da consulta ao status operacional do equipamento SAT . Este método irá
então resultar em uma lista dos alertas ativos .
: param cliente _ sat : Uma instância de
: class : ` satcfe . clientelocal . ClienteSATLocal ` ou
: class : ` satcfe . clientesathub . ClienteSATHub ` onde será invocado o método
para consulta ao status operacional do equipamento SAT .
: rtype : list""" | resposta = cliente_sat . consultar_status_operacional ( )
alertas = [ ]
for classe_alerta in AlertaOperacao . alertas_registrados :
alerta = classe_alerta ( resposta )
if alerta . checar ( ) :
alertas . append ( alerta )
return alertas |
def create ( vm_ ) :
'''Create a single VM from a data dict
CLI Example :
. . code - block : : bash
salt - cloud - p proxmox - ubuntu vmhostname''' | try : # Check for required profile parameters before sending any API calls .
if vm_ [ 'profile' ] and config . is_profile_configured ( __opts__ , __active_provider_name__ or 'proxmox' , vm_ [ 'profile' ] , vm_ = vm_ ) is False :
return False
except AttributeError :
pass
ret = { }
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'starting create' , 'salt/cloud/{0}/creating' . format ( vm_ [ 'name' ] ) , args = __utils__ [ 'cloud.filter_event' ] ( 'creating' , vm_ , [ 'name' , 'profile' , 'provider' , 'driver' ] ) , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
log . info ( 'Creating Cloud VM %s' , vm_ [ 'name' ] )
if 'use_dns' in vm_ and 'ip_address' not in vm_ :
use_dns = vm_ [ 'use_dns' ]
if use_dns :
from socket import gethostbyname , gaierror
try :
ip_address = gethostbyname ( six . text_type ( vm_ [ 'name' ] ) )
except gaierror :
log . debug ( 'Resolving of %s failed' , vm_ [ 'name' ] )
else :
vm_ [ 'ip_address' ] = six . text_type ( ip_address )
try :
newid = _get_next_vmid ( )
data = create_node ( vm_ , newid )
except Exception as exc :
log . error ( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s' , vm_ [ 'name' ] , exc , # Show the traceback if the debug logging level is enabled
exc_info_on_loglevel = logging . DEBUG )
return False
ret [ 'creation_data' ] = data
name = vm_ [ 'name' ]
# hostname which we know
if 'clone' in vm_ and vm_ [ 'clone' ] is True :
vmid = newid
else :
vmid = data [ 'vmid' ]
# vmid which we have received
host = data [ 'node' ]
# host which we have received
nodeType = data [ 'technology' ]
# VM tech ( Qemu / OpenVZ )
if 'agent_get_ip' not in vm_ or vm_ [ 'agent_get_ip' ] == 0 : # Determine which IP to use in order of preference :
if 'ip_address' in vm_ :
ip_address = six . text_type ( vm_ [ 'ip_address' ] )
elif 'public_ips' in data :
ip_address = six . text_type ( data [ 'public_ips' ] [ 0 ] )
# first IP
elif 'private_ips' in data :
ip_address = six . text_type ( data [ 'private_ips' ] [ 0 ] )
# first IP
else :
raise SaltCloudExecutionFailure ( "Could not determine an IP address to use" )
# wait until the vm has been created so we can start it
if not wait_for_created ( data [ 'upid' ] , timeout = 300 ) :
return { 'Error' : 'Unable to create {0}, command timed out' . format ( name ) }
if 'clone' in vm_ and vm_ [ 'clone' ] is True and vm_ [ 'technology' ] == 'qemu' : # If we cloned a machine , see if we need to reconfigure any of the options such as net0,
# ide2 , etc . This enables us to have a different cloud - init ISO mounted for each VM that ' s
# brought up
log . info ( 'Configuring cloned VM' )
# Modify the settings for the VM one at a time so we can see any problems with the values
# as quickly as possible
for setting in 'sockets' , 'cores' , 'cpulimit' , 'memory' , 'onboot' , 'agent' :
if setting in vm_ : # if the property is set , use it for the VM request
postParams = { }
postParams [ setting ] = vm_ [ setting ]
query ( 'post' , 'nodes/{0}/qemu/{1}/config' . format ( vm_ [ 'host' ] , vmid ) , postParams )
# cloud - init settings
for setting in 'ciuser' , 'cipassword' , 'sshkeys' , 'nameserver' , 'searchdomain' :
if setting in vm_ : # if the property is set , use it for the VM request
postParams = { }
postParams [ setting ] = vm_ [ setting ]
query ( 'post' , 'nodes/{0}/qemu/{1}/config' . format ( vm_ [ 'host' ] , vmid ) , postParams )
for setting_number in range ( 3 ) :
setting = 'ide{0}' . format ( setting_number )
if setting in vm_ :
postParams = { }
postParams [ setting ] = vm_ [ setting ]
query ( 'post' , 'nodes/{0}/qemu/{1}/config' . format ( vm_ [ 'host' ] , vmid ) , postParams )
for setting_number in range ( 5 ) :
setting = 'sata{0}' . format ( setting_number )
if setting in vm_ :
vm_config = query ( 'get' , 'nodes/{0}/qemu/{1}/config' . format ( vm_ [ 'host' ] , vmid ) )
if setting in vm_config :
setting_params = vm_ [ setting ]
setting_storage = setting_params . split ( ':' ) [ 0 ]
setting_size = _stringlist_to_dictionary ( setting_params ) [ 'size' ]
vm_disk_params = vm_config [ setting ]
vm_disk_storage = vm_disk_params . split ( ':' ) [ 0 ]
vm_disk_size = _stringlist_to_dictionary ( vm_disk_params ) [ 'size' ]
# if storage is different , move the disk
if setting_storage != vm_disk_storage :
postParams = { }
postParams [ 'disk' ] = setting
postParams [ 'storage' ] = setting_storage
postParams [ 'delete' ] = 1
node = query ( 'post' , 'nodes/{0}/qemu/{1}/move_disk' . format ( vm_ [ 'host' ] , vmid ) , postParams )
data = _parse_proxmox_upid ( node , vm_ )
# wait until the disk has been moved
if not wait_for_task ( data [ 'upid' ] , timeout = 300 ) :
return { 'Error' : 'Unable to move disk {0}, command timed out' . format ( setting ) }
# if storage is different , move the disk
if setting_size != vm_disk_size :
postParams = { }
postParams [ 'disk' ] = setting
postParams [ 'size' ] = setting_size
query ( 'put' , 'nodes/{0}/qemu/{1}/resize' . format ( vm_ [ 'host' ] , vmid ) , postParams )
else :
postParams = { }
postParams [ setting ] = vm_ [ setting ]
query ( 'post' , 'nodes/{0}/qemu/{1}/config' . format ( vm_ [ 'host' ] , vmid ) , postParams )
for setting_number in range ( 13 ) :
setting = 'scsi{0}' . format ( setting_number )
if setting in vm_ :
vm_config = query ( 'get' , 'nodes/{0}/qemu/{1}/config' . format ( vm_ [ 'host' ] , vmid ) )
if setting in vm_config :
setting_params = vm_ [ setting ]
setting_storage = setting_params . split ( ':' ) [ 0 ]
setting_size = _stringlist_to_dictionary ( setting_params ) [ 'size' ]
vm_disk_params = vm_config [ setting ]
vm_disk_storage = vm_disk_params . split ( ':' ) [ 0 ]
vm_disk_size = _stringlist_to_dictionary ( vm_disk_params ) [ 'size' ]
# if storage is different , move the disk
if setting_storage != vm_disk_storage :
postParams = { }
postParams [ 'disk' ] = setting
postParams [ 'storage' ] = setting_storage
postParams [ 'delete' ] = 1
node = query ( 'post' , 'nodes/{0}/qemu/{1}/move_disk' . format ( vm_ [ 'host' ] , vmid ) , postParams )
data = _parse_proxmox_upid ( node , vm_ )
# wait until the disk has been moved
if not wait_for_task ( data [ 'upid' ] , timeout = 300 ) :
return { 'Error' : 'Unable to move disk {0}, command timed out' . format ( setting ) }
# if storage is different , move the disk
if setting_size != vm_disk_size :
postParams = { }
postParams [ 'disk' ] = setting
postParams [ 'size' ] = setting_size
query ( 'put' , 'nodes/{0}/qemu/{1}/resize' . format ( vm_ [ 'host' ] , vmid ) , postParams )
else :
postParams = { }
postParams [ setting ] = vm_ [ setting ]
query ( 'post' , 'nodes/{0}/qemu/{1}/config' . format ( vm_ [ 'host' ] , vmid ) , postParams )
# net strings are a list of comma seperated settings . We need to merge the settings so that
# the setting in the profile only changes the settings it touches and the other settings
# are left alone . An example of why this is necessary is because the MAC address is set
# in here and generally you don ' t want to alter or have to know the MAC address of the new
# instance , but you may want to set the VLAN bridge for example
for setting_number in range ( 20 ) :
setting = 'net{0}' . format ( setting_number )
if setting in vm_ :
data = query ( 'get' , 'nodes/{0}/qemu/{1}/config' . format ( vm_ [ 'host' ] , vmid ) )
# Generate a dictionary of settings from the existing string
new_setting = { }
if setting in data :
new_setting . update ( _stringlist_to_dictionary ( data [ setting ] ) )
# Merge the new settings ( as a dictionary ) into the existing dictionary to get the
# new merged settings
new_setting . update ( _stringlist_to_dictionary ( vm_ [ setting ] ) )
# Convert the dictionary back into a string list
postParams = { setting : _dictionary_to_stringlist ( new_setting ) }
query ( 'post' , 'nodes/{0}/qemu/{1}/config' . format ( vm_ [ 'host' ] , vmid ) , postParams )
for setting_number in range ( 20 ) :
setting = 'ipconfig{0}' . format ( setting_number )
if setting in vm_ :
data = query ( 'get' , 'nodes/{0}/qemu/{1}/config' . format ( vm_ [ 'host' ] , vmid ) )
# Generate a dictionary of settings from the existing string
new_setting = { }
if setting in data :
new_setting . update ( _stringlist_to_dictionary ( data [ setting ] ) )
# Merge the new settings ( as a dictionary ) into the existing dictionary to get the
# new merged settings
if setting_number == 0 and 'ip_address' in vm_ :
if 'gw' in _stringlist_to_dictionary ( vm_ [ setting ] ) :
new_setting . update ( _stringlist_to_dictionary ( 'ip={0}/24,gw={1}' . format ( vm_ [ 'ip_address' ] , _stringlist_to_dictionary ( vm_ [ setting ] ) [ 'gw' ] ) ) )
else :
new_setting . update ( _stringlist_to_dictionary ( 'ip={0}/24' . format ( vm_ [ 'ip_address' ] ) ) )
else :
new_setting . update ( _stringlist_to_dictionary ( vm_ [ setting ] ) )
# Convert the dictionary back into a string list
postParams = { setting : _dictionary_to_stringlist ( new_setting ) }
query ( 'post' , 'nodes/{0}/qemu/{1}/config' . format ( vm_ [ 'host' ] , vmid ) , postParams )
# VM has been created . Starting . .
if not start ( name , vmid , call = 'action' ) :
log . error ( 'Node %s (%s) failed to start!' , name , vmid )
raise SaltCloudExecutionFailure
# Wait until the VM has fully started
log . debug ( 'Waiting for state "running" for vm %s on %s' , vmid , host )
if not wait_for_state ( vmid , 'running' ) :
return { 'Error' : 'Unable to start {0}, command timed out' . format ( name ) }
# For QEMU VMs , we can get the IP Address from qemu - agent
if 'agent_get_ip' in vm_ and vm_ [ 'agent_get_ip' ] == 1 :
def __find_agent_ip ( vm_ ) :
log . debug ( "Waiting for qemu-agent to start..." )
endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces' . format ( vm_ [ 'host' ] , vmid )
interfaces = query ( 'get' , endpoint )
# If we get a result from the agent , parse it
if 'result' in interfaces :
for interface in interfaces [ 'result' ] :
if_name = interface [ 'name' ]
# Only check ethernet type interfaces , as they are not returned in any order
if if_name . startswith ( 'eth' ) or if_name . startswith ( 'ens' ) :
for if_addr in interface [ 'ip-addresses' ] :
ip_addr = if_addr [ 'ip-address' ]
# Ensure interface has a valid IPv4 address
if if_addr [ 'ip-address-type' ] == 'ipv4' and ip_addr is not None :
return six . text_type ( ip_addr )
raise SaltCloudExecutionFailure
# We have to wait for a bit for qemu - agent to start
try :
ip_address = __utils__ [ 'cloud.wait_for_fun' ] ( __find_agent_ip , vm_ = vm_ )
except ( SaltCloudExecutionTimeout , SaltCloudExecutionFailure ) as exc :
try : # If VM was created but we can ' t connect , destroy it .
destroy ( vm_ [ 'name' ] )
except SaltCloudSystemExit :
pass
finally :
raise SaltCloudSystemExit ( six . text_type ( exc ) )
log . debug ( 'Using IP address %s' , ip_address )
ssh_username = config . get_cloud_config_value ( 'ssh_username' , vm_ , __opts__ , default = 'root' )
ssh_password = config . get_cloud_config_value ( 'password' , vm_ , __opts__ , )
ret [ 'ip_address' ] = ip_address
ret [ 'username' ] = ssh_username
ret [ 'password' ] = ssh_password
vm_ [ 'ssh_host' ] = ip_address
vm_ [ 'password' ] = ssh_password
ret = __utils__ [ 'cloud.bootstrap' ] ( vm_ , __opts__ )
# Report success !
log . info ( 'Created Cloud VM \'%s\'' , vm_ [ 'name' ] )
log . debug ( '\'%s\' VM creation details:\n%s' , vm_ [ 'name' ] , pprint . pformat ( data ) )
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'created instance' , 'salt/cloud/{0}/created' . format ( vm_ [ 'name' ] ) , args = __utils__ [ 'cloud.filter_event' ] ( 'created' , vm_ , [ 'name' , 'profile' , 'provider' , 'driver' ] ) , sock_dir = __opts__ [ 'sock_dir' ] , )
return ret |
def get_host ( self , endpoint_or_address ) :
"""Find a host in the metadata for a specific endpoint . If a string inet address is passed ,
iterate all hosts to match the : attr : ` ~ . pool . Host . broadcast _ rpc _ address ` attribute .""" | if not isinstance ( endpoint_or_address , EndPoint ) :
return self . _get_host_by_address ( endpoint_or_address )
return self . _hosts . get ( endpoint_or_address ) |
def send_message ( self , message , sign = True ) :
"""Send the given message to the connection .
@ type message : OmapiMessage
@ param sign : whether the message needs to be signed
@ raises OmapiError :
@ raises socket . error :""" | if sign :
message . sign ( self . authenticators [ self . defauth ] )
logger . debug ( "sending %s" , LazyStr ( message . dump_oneline ) )
self . transport . write ( message . as_string ( ) ) |
def tournament_name2number ( self , name ) :
"""Translate tournament name to tournament number .
Args :
name ( str ) : tournament name to translate
Returns :
number ( int ) : number of the tournament or ` None ` if unknown .
Examples :
> > > NumerAPI ( ) . tournament _ name2number ( ' delta ' )
> > > NumerAPI ( ) . tournament _ name2number ( ' foo ' )
None""" | tournaments = self . get_tournaments ( )
d = { t [ 'name' ] : t [ 'tournament' ] for t in tournaments }
return d . get ( name , None ) |
def set_rest_notification ( self , url , hit_type_id ) :
"""Set a REST endpoint to recieve notifications about the HIT
The newer AWS MTurk API does not support this feature , which means we
cannot use boto3 here . Instead , we make the call manually after
assembling a properly signed request .""" | ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
notification_version = "2006-05-05"
API_version = "2014-08-15"
data = { "AWSAccessKeyId" : self . aws_key , "HITTypeId" : hit_type_id , "Notification.1.Active" : "True" , "Notification.1.Destination" : url , "Notification.1.EventType.1" : "AssignmentAccepted" , "Notification.1.EventType.2" : "AssignmentAbandoned" , "Notification.1.EventType.3" : "AssignmentReturned" , "Notification.1.EventType.4" : "AssignmentSubmitted" , "Notification.1.EventType.5" : "HITReviewable" , "Notification.1.EventType.6" : "HITExpired" , "Notification.1.Transport" : "REST" , "Notification.1.Version" : notification_version , "Operation" : "SetHITTypeNotification" , "SignatureVersion" : "1" , "Timestamp" : time . strftime ( ISO8601 , time . gmtime ( ) ) , "Version" : API_version , }
query_string , signature = self . _calc_old_api_signature ( data )
body = query_string + "&Signature=" + urllib . parse . quote_plus ( signature )
data [ "Signature" ] = signature
headers = { "Content-Type" : "application/x-www-form-urlencoded; charset=UTF-8" , "Content-Length" : str ( len ( body ) ) , "Host" : self . legacy_host , }
resp = requests . post ( "https://" + self . legacy_host , headers = headers , data = body )
return "<IsValid>True</IsValid>" in resp . text |
def _toolkit_repr_print ( model , fields , section_titles , width = None ) :
"""Display a toolkit repr according to some simple rules .
Parameters
model : Turi Create model
fields : List of lists of tuples
Each tuple should be ( display _ name , field _ name ) , where field _ name can
be a string or a _ precomputed _ field object .
section _ titles : List of section titles , one per list in the fields arg .
Example
model _ fields = [
( " L1 penalty " , ' l1 _ penalty ' ) ,
( " L2 penalty " , ' l2 _ penalty ' ) ,
( " Examples " , ' num _ examples ' ) ,
( " Features " , ' num _ features ' ) ,
( " Coefficients " , ' num _ coefficients ' ) ]
solver _ fields = [
( " Solver " , ' solver ' ) ,
( " Solver iterations " , ' training _ iterations ' ) ,
( " Solver status " , ' training _ solver _ status ' ) ,
( " Training time ( sec ) " , ' training _ time ' ) ]
training _ fields = [
( " Log - likelihood " , ' training _ loss ' ) ]
fields = [ model _ fields , solver _ fields , training _ fields ] :
section _ titles = [ ' Model description ' ,
' Solver description ' ,
' Training information ' ]
_ toolkit _ repr _ print ( model , fields , section _ titles )""" | assert len ( section_titles ) == len ( fields ) , "The number of section titles ({0}) " . format ( len ( section_titles ) ) + "doesn't match the number of groups of fields, {0}." . format ( len ( fields ) )
out_fields = [ ( "Class" , model . __class__ . __name__ ) , "" ]
# Record the max _ width so that if width is not provided , we calculate it .
max_width = len ( "Class" )
for index , ( section_title , field_list ) in enumerate ( zip ( section_titles , fields ) ) : # Add in the section header .
out_fields += [ section_title , "-" * len ( section_title ) ]
# Add in all the key - value pairs
for f in field_list :
if isinstance ( f , tuple ) :
f = ( str ( f [ 0 ] ) , f [ 1 ] )
out_fields . append ( ( f [ 0 ] , __extract_model_summary_value ( model , f [ 1 ] ) ) )
max_width = max ( max_width , len ( f [ 0 ] ) )
elif isinstance ( f , _SFrame ) :
out_fields . append ( "" )
out_fields += _make_repr_table_from_sframe ( f )
out_fields . append ( "" )
else :
raise TypeError ( "Type of field %s not recognized." % str ( f ) )
# Add in the empty footer .
out_fields . append ( "" )
if width is None :
width = max_width
# Now , go through and format the key _ value pairs nicely .
def format_key_pair ( key , value ) :
if type ( key ) is list :
key = ',' . join ( str ( k ) for k in key )
return key . ljust ( width , ' ' ) + ' : ' + str ( value )
out_fields = [ s if type ( s ) is str else format_key_pair ( * s ) for s in out_fields ]
return '\n' . join ( out_fields ) |
def callback ( self , filename , lines , ** kwargs ) :
"""publishes lines one by one to the given topic""" | timestamp = self . get_timestamp ( ** kwargs )
if kwargs . get ( 'timestamp' , False ) :
del kwargs [ 'timestamp' ]
for line in lines :
try :
import warnings
with warnings . catch_warnings ( ) :
warnings . simplefilter ( 'error' )
m = self . format ( filename , line , timestamp , ** kwargs )
self . logger . debug ( "Sending message " + m )
self . conn . send ( destination = self . queue , body = m )
except Exception , e :
self . logger . error ( e )
try :
raise TransportException ( e )
except AttributeError :
raise TransportException ( 'Unspecified exception encountered' ) |
def request_get_user ( self , user_ids ) -> dict :
"""Method to get users by ID , do not need authorization""" | method_params = { 'user_ids' : user_ids }
response = self . session . send_method_request ( 'users.get' , method_params )
self . check_for_errors ( 'users.get' , method_params , response )
return response |
def delete_consumer_group ( self , project , logstore , consumer_group ) :
"""Delete consumer group
: type project : string
: param project : project name
: type logstore : string
: param logstore : logstore name
: type consumer _ group : string
: param consumer _ group : consumer group name
: return : None""" | headers = { "x-log-bodyrawsize" : '0' }
params = { }
resource = "/logstores/" + logstore + "/consumergroups/" + consumer_group
( resp , header ) = self . _send ( "DELETE" , project , None , resource , params , headers )
return DeleteConsumerGroupResponse ( header , resp ) |
def validate_plugin ( self , plugin_class , experimental = False ) :
"""Verifies that the plugin _ class should execute under this policy""" | valid_subclasses = [ IndependentPlugin ] + self . valid_subclasses
if experimental :
valid_subclasses += [ ExperimentalPlugin ]
return any ( issubclass ( plugin_class , class_ ) for class_ in valid_subclasses ) |
def close ( self ) :
"""close an open connection""" | if not self . connected :
return True
self . _close ( )
self . connected = False
self . log ( "Closed Connection {}" , self . connection_config . interface_name )
return True |
def send_query ( self , ID , methodname , returnable , * args , ** kwargs ) :
"""将调用请求的ID , 方法名 , 参数包装为请求数据后编码为字节串发送出去 .
Parameters :
ID ( str ) : - 任务ID
methodname ( str ) : - 要调用的方法名
returnable ( bool ) : - 是否要求返回结果
args ( Any ) : - 要调用的方法的位置参数
kwargs ( Any ) : - 要调用的方法的关键字参数
Return :
( bool ) : - 准确地说没有错误就会返回True""" | query = self . _make_query ( ID , methodname , returnable , * args , ** kwargs )
self . _send_query ( query )
self . tasks [ ID ] = self . loop . create_future ( )
return True |
def ssh_sa_ssh_server_mac ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
ssh_sa = ET . SubElement ( config , "ssh-sa" , xmlns = "urn:brocade.com:mgmt:brocade-sec-services" )
ssh = ET . SubElement ( ssh_sa , "ssh" )
server = ET . SubElement ( ssh , "server" )
mac = ET . SubElement ( server , "mac" )
mac . text = kwargs . pop ( 'mac' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def _parse_param ( key , val ) :
"""Parse the query param looking for sparse fields params
Ensure the ` val ` or what will become the sparse fields
is always an array . If the query param is not a sparse
fields query param then return None .
: param key :
the query parameter key in the request ( left of = )
: param val :
the query parameter val in the request ( right of = )
: return :
tuple of resource type to implement the sparse
fields on & a array of the fields .""" | regex = re . compile ( r'fields\[([A-Za-z]+)\]' )
match = regex . match ( key )
if match :
if not isinstance ( val , list ) :
val = val . split ( ',' )
fields = [ field . lower ( ) for field in val ]
rtype = match . groups ( ) [ 0 ] . lower ( )
return rtype , fields |
def camel_to_snake ( name ) :
"""http : / / stackoverflow . com / questions / 1175208/
elegant - python - function - to - convert - camelcase - to - camel - case""" | s1 = FIRST_CAP_RE . sub ( r'\1_\2' , name )
return ALL_CAP_RE . sub ( r'\1_\2' , s1 ) . lower ( ) |
def join_url_params ( dic ) :
"""根据传入的键值对 , 拼接 url 后面 ? 的参数 , 比如 ? key1 = value1 & key2 = value2
: param :
* dic : ( dict ) 参数键值对
: return :
* result : ( string ) 拼接好的参数
举例如下 : :
print ( ' - - - splice _ url _ params demo - - - ' )
dic1 = { ' key1 ' : ' value1 ' , ' key2 ' : ' value2 ' }
print ( splice _ url _ params ( dic1 ) )
print ( ' - - - ' )
执行结果 : :
- - - splice _ url _ params demo - - -
? key1 = value1 & key2 = value2""" | od = OrderedDict ( sorted ( dic . items ( ) ) )
url = '?'
temp_str = urlencode ( od )
url = url + temp_str
return url |
def get_spherical_bounding_box ( lons , lats ) :
"""Given a collection of points find and return the bounding box ,
as a pair of longitudes and a pair of latitudes .
Parameters define longitudes and latitudes of a point collection
respectively in a form of lists or numpy arrays .
: return :
A tuple of four items . These items represent western , eastern ,
northern and southern borders of the bounding box respectively .
Values are floats in decimal degrees .
: raises ValueError :
If points collection has the longitudinal extent of more than
180 degrees ( it is impossible to define a single hemisphere
bound to poles that would contain the whole collection ) .""" | north , south = numpy . max ( lats ) , numpy . min ( lats )
west , east = numpy . min ( lons ) , numpy . max ( lons )
assert ( - 180 <= west <= 180 ) and ( - 180 <= east <= 180 ) , ( west , east )
if get_longitudinal_extent ( west , east ) < 0 : # points are lying on both sides of the international date line
# ( meridian 180 ) . the actual west longitude is the lowest positive
# longitude and east one is the highest negative .
if hasattr ( lons , 'flatten' ) : # fixes test _ surface _ crossing _ international _ date _ line
lons = lons . flatten ( )
west = min ( lon for lon in lons if lon > 0 )
east = max ( lon for lon in lons if lon < 0 )
if not all ( ( get_longitudinal_extent ( west , lon ) >= 0 and get_longitudinal_extent ( lon , east ) >= 0 ) for lon in lons ) :
raise ValueError ( 'points collection has longitudinal extent ' 'wider than 180 deg' )
return SphericalBB ( west , east , north , south ) |
def last_sleep_breakdown ( self ) :
"""Return durations of sleep stages for last complete session .""" | try :
stages = self . intervals [ 1 ] [ 'stages' ]
except KeyError :
return None
breakdown = { 'awake' : 0 , 'light' : 0 , 'deep' : 0 , 'rem' : 0 }
for stage in stages :
if stage [ 'stage' ] == 'awake' :
breakdown [ 'awake' ] += stage [ 'duration' ]
elif stage [ 'stage' ] == 'light' :
breakdown [ 'light' ] += stage [ 'duration' ]
elif stage [ 'stage' ] == 'deep' :
breakdown [ 'deep' ] += stage [ 'duration' ]
elif stage [ 'stage' ] == 'rem' :
breakdown [ 'rem' ] += stage [ 'duration' ]
return breakdown |
def matches ( self , spec ) :
"""Matches a specification against the current Plot .""" | if callable ( spec ) and not isinstance ( spec , type ) :
return spec ( self )
elif isinstance ( spec , type ) :
return isinstance ( self , spec )
else :
raise ValueError ( "Matching specs have to be either a type or a callable." ) |
def delete ( self , record ) :
"""Delete a record .
: param record : Record instance .""" | index , doc_type = self . record_to_index ( record )
return self . client . delete ( id = str ( record . id ) , index = index , doc_type = doc_type , ) |
def OnTimer ( self , event ) :
"""Update all frozen cells because of timer call""" | self . timer_updating = True
shape = self . grid . code_array . shape [ : 2 ]
selection = Selection ( [ ( 0 , 0 ) ] , [ ( shape ) ] , [ ] , [ ] , [ ] )
self . grid . actions . refresh_selected_frozen_cells ( selection )
self . grid . ForceRefresh ( ) |
def _extract_core_semantics ( self , docs ) :
"""Extracts core semantics for a list of documents , returning them along with
a list of all the concepts represented .""" | all_concepts = [ ]
doc_core_sems = [ ]
for doc in docs :
core_sems = self . _process_doc ( doc )
doc_core_sems . append ( core_sems )
all_concepts += [ con for con , weight in core_sems ]
return doc_core_sems , list ( set ( all_concepts ) ) |
def _encrypt ( self , archive ) :
"""Encrypts the compressed archive using GPG .
If encryption fails for any reason , it should be logged by sos but not
cause execution to stop . The assumption is that the unencrypted archive
would still be of use to the user , and / or that the end user has another
means of securing the archive .
Returns the name of the encrypted archive , or raises an exception to
signal that encryption failed and the unencrypted archive name should
be used .""" | arc_name = archive . replace ( "sosreport-" , "secured-sosreport-" )
arc_name += ".gpg"
enc_cmd = "gpg --batch -o %s " % arc_name
env = None
if self . enc_opts [ "key" ] : # need to assume a trusted key here to be able to encrypt the
# archive non - interactively
enc_cmd += "--trust-model always -e -r %s " % self . enc_opts [ "key" ]
enc_cmd += archive
if self . enc_opts [ "password" ] : # prevent change of gpg options using a long password , but also
# prevent the addition of quote characters to the passphrase
passwd = "%s" % self . enc_opts [ "password" ] . replace ( '\'"' , '' )
env = { "sos_gpg" : passwd }
enc_cmd += "-c --passphrase-fd 0 "
enc_cmd = "/bin/bash -c \"echo $sos_gpg | %s\"" % enc_cmd
enc_cmd += archive
r = sos_get_command_output ( enc_cmd , timeout = 0 , env = env )
if r [ "status" ] == 0 :
return arc_name
elif r [ "status" ] == 2 :
if self . enc_opts [ "key" ] :
msg = "Specified key not in keyring"
else :
msg = "Could not read passphrase"
else : # TODO : report the actual error from gpg . Currently , we cannot as
# sos _ get _ command _ output ( ) does not capture stderr
msg = "gpg exited with code %s" % r [ "status" ]
raise Exception ( msg ) |
def file_transfer_protocol_send ( self , target_network , target_system , target_component , payload , force_mavlink1 = False ) :
'''File transfer message
target _ network : Network ID ( 0 for broadcast ) ( uint8 _ t )
target _ system : System ID ( 0 for broadcast ) ( uint8 _ t )
target _ component : Component ID ( 0 for broadcast ) ( uint8 _ t )
payload : Variable length payload . The length is defined by the remaining message length when subtracting the header and other fields . The entire content of this block is opaque unless you understand any the encoding message _ type . The particular encoding used can be extension specific and might not always be documented as part of the mavlink specification . ( uint8 _ t )''' | return self . send ( self . file_transfer_protocol_encode ( target_network , target_system , target_component , payload ) , force_mavlink1 = force_mavlink1 ) |
def jsonstrlen ( self , name , path = Path . rootPath ( ) ) :
"""Returns the length of the string JSON value under ` ` path ` ` at key
` ` name ` `""" | return self . execute_command ( 'JSON.STRLEN' , name , str_path ( path ) ) |
def _schemaPrepareInsert ( self , store ) :
"""Prepare each attribute in my schema for insertion into a given store ,
either by upgrade or by creation . This makes sure all references point
to this store and all relative paths point to this store ' s files
directory .""" | for name , atr in self . getSchema ( ) :
atr . prepareInsert ( self , store ) |
def maps_get_default_rules_output_rules_policyname ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
maps_get_default_rules = ET . Element ( "maps_get_default_rules" )
config = maps_get_default_rules
output = ET . SubElement ( maps_get_default_rules , "output" )
rules = ET . SubElement ( output , "rules" )
policyname = ET . SubElement ( rules , "policyname" )
policyname . text = kwargs . pop ( 'policyname' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def forward_lmm ( snps , pheno , K = None , covs = None , qvalues = False , threshold = 5e-8 , maxiter = 2 , test = 'lrt' , ** kw_args ) :
"""univariate fixed effects test with forward selection
Args :
snps : [ N x S ] SP . array of S SNPs for N individuals ( test SNPs )
pheno : [ N x 1 ] SP . array of 1 phenotype for N individuals
K : [ N x N ] SP . array of LMM - covariance / kinship koefficients ( optional )
If not provided , then linear regression analysis is performed
covs : [ N x D ] SP . array of D covariates for N individuals
threshold : ( float ) P - value thrashold for inclusion in forward selection ( default 5e - 8)
maxiter : ( int ) maximum number of interaction scans . First scan is
without inclusion , so maxiter - 1 inclusions can be performed . ( default 2)
test : ' lrt ' for likelihood ratio test ( default ) or ' f ' for F - test
Returns :
lm : limix LMM object
iadded : array of indices of SNPs included in order of inclusion
pvadded : array of Pvalues obtained by the included SNPs in iteration
before inclusion
pvall : [ maxiter x S ] SP . array of Pvalues for all iterations""" | if K is None :
K = SP . eye ( snps . shape [ 0 ] )
if covs is None :
covs = SP . ones ( ( snps . shape [ 0 ] , 1 ) )
lm = simple_lmm ( snps , pheno , K = K , covs = covs , test = test , ** kw_args )
pvall = SP . zeros ( ( maxiter , snps . shape [ 1 ] ) )
pv = lm . getPv ( )
pvall [ 0 : 1 , : ] = pv
imin = pv . argmin ( )
niter = 1
# start stuff
iadded = [ ]
pvadded = [ ]
qvadded = [ ]
if qvalues :
assert pv . shape [ 0 ] == 1 , "This is untested with the fdr package. pv.shape[0]==1 failed"
qvall = SP . zeros ( ( maxiter , snps . shape [ 1 ] ) )
qv = FDR . qvalues ( pv )
qvall [ 0 : 1 , : ] = qv
score = qv . min ( )
else :
score = pv . min ( )
while ( score < threshold ) and niter < maxiter :
t0 = time . time ( )
iadded . append ( imin )
pvadded . append ( pv [ 0 , imin ] )
if qvalues :
qvadded . append ( qv [ 0 , imin ] )
covs = SP . concatenate ( ( covs , snps [ : , imin : ( imin + 1 ) ] ) , 1 )
lm . setCovs ( covs )
lm . process ( )
pv = lm . getPv ( )
pvall [ niter : niter + 1 , : ] = pv
imin = pv . argmin ( )
if qvalues :
qv = FDR . qvalues ( pv )
qvall [ niter : niter + 1 , : ] = qv
score = qv . min ( )
else :
score = pv . min ( )
t1 = time . time ( )
print ( ( "finished GWAS testing in %.2f seconds" % ( t1 - t0 ) ) )
niter = niter + 1
RV = { }
RV [ 'iadded' ] = iadded
RV [ 'pvadded' ] = pvadded
RV [ 'pvall' ] = pvall
if qvalues :
RV [ 'qvall' ] = qvall
RV [ 'qvadded' ] = qvadded
return lm , RV |
def create_widget ( self ) :
"""Create the underlying widget .
A dialog is not a subclass of view , hence we don ' t set name as widget
or children will try to use it as their parent .""" | d = self . declaration
style = d . style or '@style/Widget.DeviceDefault.PopupMenu'
self . window = PopupWindow ( self . get_context ( ) , None , 0 , style )
self . showing = False |
def channel_close ( self , registry_address : PaymentNetworkID , token_address : TokenAddress , partner_address : Address , retry_timeout : NetworkTimeout = DEFAULT_RETRY_TIMEOUT , ) :
"""Close a channel opened with ` partner _ address ` for the given
` token _ address ` .
Race condition , this can fail if channel was closed externally .""" | self . channel_batch_close ( registry_address = registry_address , token_address = token_address , partner_addresses = [ partner_address ] , retry_timeout = retry_timeout , ) |
def right_click_high_equalarea ( self , event ) :
"""toggles between zoom and pan effects for the high equal area on
right click
Parameters
event : the wx . MouseEvent that triggered the call of this function
Alters
high _ EA _ setting , toolbar4 setting""" | if event . LeftIsDown ( ) :
return
elif self . high_EA_setting == "Zoom" :
self . high_EA_setting = "Pan"
try :
self . toolbar4 . pan ( 'off' )
except TypeError :
pass
elif self . high_EA_setting == "Pan" :
self . high_EA_setting = "Zoom"
try :
self . toolbar4 . zoom ( )
except TypeError :
pass |
def check_container ( self , container_path , container_format = None , config_string = None ) :
"""Check whether the given container is well - formed .
: param string container _ path : the path of the container to be checked
: param container _ format : the format of the container
: type container _ format : : class : ` ~ aeneas . container . ContainerFormat `
: param string config _ string : the configuration string generated by the wizard
: rtype : : class : ` ~ aeneas . validator . ValidatorResult `""" | self . log ( [ u"Checking container '%s'" , container_path ] )
self . result = ValidatorResult ( )
if self . _are_safety_checks_disabled ( u"check_container" ) :
return self . result
if not ( gf . file_exists ( container_path ) or gf . directory_exists ( container_path ) ) :
self . _failed ( u"Container '%s' not found." % container_path )
return self . result
container = Container ( container_path , container_format )
try :
self . log ( u"Checking container has config file" )
if config_string is not None :
self . log ( u"Container with config string from wizard" )
self . check_config_txt ( config_string , is_config_string = True )
elif container . has_config_xml :
self . log ( u"Container has XML config file" )
contents = container . read_entry ( container . entry_config_xml )
if contents is None :
self . _failed ( u"Unable to read the contents of XML config file." )
return self . result
self . check_config_xml ( contents )
elif container . has_config_txt :
self . log ( u"Container has TXT config file" )
contents = container . read_entry ( container . entry_config_txt )
if contents is None :
self . _failed ( u"Unable to read the contents of TXT config file." )
return self . result
self . check_config_txt ( contents , is_config_string = False )
else :
self . _failed ( u"Container does not have a TXT or XML configuration file." )
self . log ( u"Checking we have a valid job in the container" )
if not self . result . passed :
return self . result
self . log ( u"Analyze the contents of the container" )
analyzer = AnalyzeContainer ( container )
if config_string is not None :
job = analyzer . analyze ( config_string = config_string )
else :
job = analyzer . analyze ( )
self . _check_analyzed_job ( job , container )
except OSError :
self . _failed ( u"Unable to read the contents of the container." )
return self . result |
def _get_jwt_for_audience ( self , audience ) :
"""Get a JWT For a given audience .
If there is already an existing , non - expired token in the cache for
the audience , that token is used . Otherwise , a new token will be
created .
Args :
audience ( str ) : The intended audience .
Returns :
bytes : The encoded JWT .""" | token , expiry = self . _cache . get ( audience , ( None , None ) )
if token is None or expiry < _helpers . utcnow ( ) :
token , expiry = self . _make_jwt_for_audience ( audience )
self . _cache [ audience ] = token , expiry
return token |
def start_completion ( self , buffer_name = None , select_first = False , select_last = False , insert_common_part = False , complete_event = None ) :
"""Start asynchronous autocompletion of this buffer .
( This will do nothing if a previous completion was still in progress . )""" | buffer_name = buffer_name or self . current_buffer_name
completer = self . _async_completers . get ( buffer_name )
if completer :
completer ( select_first = select_first , select_last = select_last , insert_common_part = insert_common_part , complete_event = CompleteEvent ( completion_requested = True ) ) |
def create_ltp_package ( aleph_record , book_id , ebook_fn , data , url , urn_nbn = None ) :
"""Create LTP package as it is specified in specification v1.0 as I understand
it .
Args :
aleph _ record ( str ) : XML containing full aleph record .
book _ id ( str ) : UUID of the book .
ebook _ fn ( str ) : Original filename of the ebook .
data ( str / bytes ) : Ebook ' s content .
url ( str ) : URL of the publication used when the URL can ' t be found in
` aleph _ record ` .
urn _ nbn ( str , default None ) : URN : NBN .
Returns :
str : Name of the package ' s directory in ` ` / tmp ` ` .""" | root_dir , orig_dir , meta_dir = _create_package_hierarchy ( book_id = book_id )
# create original file
original_fn = os . path . join ( orig_dir , fn_composers . original_fn ( book_id , ebook_fn ) )
with open ( original_fn , "wb" ) as f :
f . write ( data )
# create metadata files
metadata_filenames = [ ]
records = marcxml2mods ( marc_xml = aleph_record , uuid = book_id , url = url )
for cnt , mods_record in enumerate ( records ) :
fn = os . path . join ( meta_dir , fn_composers . volume_fn ( cnt ) )
with open ( fn , "w" ) as f :
f . write ( mods_record )
metadata_filenames . append ( fn )
# collect md5 sums
md5_fn = os . path . join ( root_dir , fn_composers . checksum_fn ( book_id ) )
checksums = checksum_generator . generate_hashfile ( root_dir )
with open ( md5_fn , "w" ) as f :
f . write ( checksums )
# create info file
info_fn = os . path . join ( root_dir , fn_composers . info_fn ( book_id ) )
with open ( info_fn , "w" ) as f :
f . write ( info_composer . compose_info ( root_dir = root_dir , files = [ original_fn ] + metadata_filenames , hash_fn = md5_fn , aleph_record = aleph_record , urn_nbn = urn_nbn , ) )
return root_dir |
def getVATAmount ( self ) :
"""Compute VAT Amount from the Price and system configured VAT""" | price , vat = self . getPrice ( ) , self . getVAT ( )
return float ( price ) * ( float ( vat ) / 100 ) |
def _set_traffic_class_exp_state ( self , v , load = False ) :
"""Setter method for traffic _ class _ exp _ state , mapped from YANG variable / traffic _ class _ exp _ state ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ traffic _ class _ exp _ state is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ traffic _ class _ exp _ state ( ) directly .
YANG Description : traffic _ class _ exp""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = traffic_class_exp_state . traffic_class_exp_state , is_container = 'container' , presence = False , yang_name = "traffic-class-exp-state" , rest_name = "traffic-class-exp-state" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'qos-traffic-class-exp' , u'cli-suppress-show-path' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-qos-operational' , defining_module = 'brocade-qos-operational' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """traffic_class_exp_state must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=traffic_class_exp_state.traffic_class_exp_state, is_container='container', presence=False, yang_name="traffic-class-exp-state", rest_name="traffic-class-exp-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'qos-traffic-class-exp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='container', is_config=True)""" , } )
self . __traffic_class_exp_state = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def do_round ( value , precision = 0 , method = 'common' ) :
"""Round the number to a given precision . The first
parameter specifies the precision ( default is ` ` 0 ` ` ) , the
second the rounding method :
- ` ` ' common ' ` ` rounds either up or down
- ` ` ' ceil ' ` ` always rounds up
- ` ` ' floor ' ` ` always rounds down
If you don ' t specify a method ` ` ' common ' ` ` is used .
. . sourcecode : : jinja
{ { 42.55 | round } }
- > 43.0
{ { 42.55 | round ( 1 , ' floor ' ) } }
- > 42.5
Note that even if rounded to 0 precision , a float is returned . If
you need a real integer , pipe it through ` int ` :
. . sourcecode : : jinja
{ { 42.55 | round | int } }
- > 43""" | if not method in ( 'common' , 'ceil' , 'floor' ) :
raise FilterArgumentError ( 'method must be common, ceil or floor' )
if method == 'common' :
return round ( value , precision )
func = getattr ( math , method )
return func ( value * ( 10 ** precision ) ) / ( 10 ** precision ) |
def get_current_user ( self , ** params ) :
"""https : / / developers . coinbase . com / api / v2 # show - current - user""" | response = self . _get ( 'v2' , 'user' , params = params )
return self . _make_api_object ( response , CurrentUser ) |
def _process_pathway_disease ( self , limit ) :
"""We make a link between the pathway identifiers ,
and any diseases associated with them .
Since we model diseases as processes , we make a triple saying that
the pathway may be causally upstream of or within the disease process .
: param limit :
: return :""" | LOG . info ( "Processing KEGG pathways to disease ids" )
if self . test_mode :
graph = self . testgraph
else :
graph = self . graph
line_counter = 0
raw = '/' . join ( ( self . rawdir , self . files [ 'pathway_disease' ] [ 'file' ] ) )
with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile :
filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' )
for row in filereader :
line_counter += 1
( disease_id , kegg_pathway_num ) = row
if self . test_mode and kegg_pathway_num not in self . test_ids [ 'pathway' ] :
continue
disease_id = 'KEGG-' + disease_id
# will look like KEGG - path : map04130 or KEGG - path : hsa04130
pathway_id = 'KEGG-' + kegg_pathway_num
graph . addTriple ( pathway_id , self . globaltt [ 'causally upstream of or within' ] , disease_id )
if not self . test_mode and limit is not None and line_counter > limit :
break
return |
def get_Sb ( data ) :
"""returns vgp scatter for data set""" | Sb , N = 0. , 0.
for rec in data :
delta = 90. - abs ( rec [ 'vgp_lat' ] )
if rec [ 'average_k' ] != 0 :
k = rec [ 'average_k' ]
L = rec [ 'average_lat' ] * np . pi / 180.
# latitude in radians
Nsi = rec [ 'average_nn' ]
K = old_div ( k , ( 2. * ( 1. + 3. * np . sin ( L ) ** 2 ) / ( 5. - 3. * np . sin ( L ) ** 2 ) ) )
Sw = old_div ( 81. , np . sqrt ( K ) )
else :
Sw , Nsi = 0 , 1.
Sb += delta ** 2. - old_div ( ( Sw ** 2 ) , Nsi )
N += 1.
return np . sqrt ( old_div ( Sb , float ( N - 1. ) ) ) |
def datasets_update ( self , dataset_name , dataset_info ) :
"""Updates the Dataset info .
Args :
dataset _ name : the name of the dataset to update as a tuple of components .
dataset _ info : the Dataset resource with updated fields .""" | url = Api . _ENDPOINT + ( Api . _DATASETS_PATH % dataset_name )
return datalab . utils . Http . request ( url , method = 'PUT' , data = dataset_info , credentials = self . _credentials ) |
def main ( ) :
"""App entry point""" | # Check python version
if sys . version_info < ( 3 , 0 , 0 ) :
sys . stderr . write ( 'You need python 3.0 or later to run this script!' + os . linesep )
exit ( 1 )
# Generate requires
if platform . system ( ) == 'Windows' :
requirements_file = 'windows.txt'
else :
requirements_file = 'base.txt'
requirements_file = os . path . join ( 'requirements' , requirements_file )
with open ( requirements_file ) as requirements_reader :
requires = requirements_reader . read ( ) . splitlines ( )
# Get package description
with open ( 'README.rst' ) as readme_reader :
long_description = readme_reader . read ( )
# Describe installer
settings = { 'name' : 'pyspectator' , 'version' : '1.2.1' , 'author' : 'Maxim Grischuk' , 'author_email' : 'uzumaxy@gmail.com' , 'maintainer' : 'Maxim Grischuk' , 'maintainer_email' : 'uzumaxy@gmail.com' , 'packages' : [ 'pyspectator' ] , 'url' : 'https://github.com/it-geeks-club/pyspectator' , 'download_url' : 'https://github.com/it-geeks-club/pyspectator/releases' , 'license' : 'BSD' , 'description' : 'pyspectator is a Python cross-platform tool for ' 'monitoring OS resources.' , 'long_description' : long_description , 'install_requires' : requires , 'keywords' : [ 'pyspectator' , 'spectator' , 'monitoring' , 'tool' , 'statistic' , 'stats' , 'computer' , 'pc' , 'server' , 'mem' , 'memory' , 'network' , 'net' , 'io' , 'processor' , 'cpu' , 'hdd' , 'hard' , 'disk' , 'drive' ] , 'platforms' : 'Platform Independent' , 'package_data' : { 'pyspectator' : [ 'LICENSE' , 'README.rst' ] } , 'scripts' : [ 'console.py' ] , 'tests_require' : [ 'pytest>=2.6.2' ] , 'cmdclass' : { 'test' : PyTest } , 'classifiers' : [ 'Development Status :: 5 - Production/Stable' , 'Environment :: Console' , 'Environment :: MacOS X' , 'Environment :: Win32 (MS Windows)' , 'Intended Audience :: Developers' , 'Intended Audience :: Information Technology' , 'Intended Audience :: System Administrators' , 'License :: OSI Approved :: BSD License' , 'Natural Language :: English' , 'Operating System :: MacOS :: MacOS X' , 'Operating System :: Microsoft :: Windows :: Windows 7' , 'Operating System :: Microsoft :: Windows :: Windows NT/2000' , 'Operating System :: Microsoft :: Windows :: Windows Server 2003' , 'Operating System :: Microsoft :: Windows :: Windows Server 2008' , 'Operating System :: Microsoft :: Windows :: Windows Vista' , 'Operating System :: Microsoft :: Windows :: Windows XP' , 'Operating System :: Microsoft' , 'Operating System :: OS Independent' , 'Operating System :: POSIX :: BSD :: FreeBSD' , 'Operating System :: POSIX :: Linux' , 'Operating System :: POSIX :: SunOS/Solaris' , 'Operating System :: POSIX' , 'Programming Language :: C' , 'Programming Language :: Python :: 3' , 'Programming Language :: Python :: 3.0' , 'Programming Language :: Python :: 3.1' , 'Programming Language :: Python :: 3.2' , 'Programming Language :: Python :: 3.3' , 'Programming Language :: Python :: 3.4' , 'Programming Language :: Python :: 3.5' , 'Programming Language :: Python :: Implementation :: CPython' , 'Programming Language :: Python' , 'Topic :: Software Development :: Libraries :: Python Modules' , 'Topic :: Software Development :: Libraries' , 'Topic :: System :: Benchmark' , 'Topic :: System :: Hardware' , 'Topic :: System :: Monitoring' , 'Topic :: System :: Networking :: Monitoring' , 'Topic :: System :: Networking' , 'Topic :: System :: Systems Administration' , 'Topic :: Utilities' , ] , }
setup ( ** settings ) |
def execute ( self , stmt , args ) :
"""Execute a statement , returning a cursor . For internal use only .""" | self . logger ( stmt , args )
with self . cursor ( ) as cursor :
cursor . execute ( stmt , args )
return cursor |
def cluster ( dupes , threshold = .5 , max_components = 30000 ) :
'''Takes in a list of duplicate pairs and clusters them in to a
list records that all refer to the same entity based on a given
threshold
Keyword arguments :
threshold - - number betweent 0 and 1 ( default is . 5 ) . lowering the
number will increase precision , raising it will increase
recall''' | distance_threshold = 1 - threshold
dupe_sub_graphs = connected_components ( dupes , max_components )
for sub_graph in dupe_sub_graphs :
if len ( sub_graph ) > 1 :
i_to_id , condensed_distances , N = condensedDistance ( sub_graph )
linkage = fastcluster . linkage ( condensed_distances , method = 'centroid' , preserve_input = True )
partition = hcluster . fcluster ( linkage , distance_threshold , criterion = 'distance' )
clusters = defaultdict ( list )
for i , cluster_id in enumerate ( partition ) :
clusters [ cluster_id ] . append ( i )
for cluster in viewvalues ( clusters ) :
if len ( cluster ) > 1 :
scores = confidences ( cluster , condensed_distances , N )
yield tuple ( i_to_id [ i ] for i in cluster ) , scores
else :
( ids , score ) , = sub_graph
if score > threshold :
yield tuple ( ids ) , ( score , ) * 2 |
def estimate_umbrella_sampling ( us_trajs , us_dtrajs , us_centers , us_force_constants , md_trajs = None , md_dtrajs = None , kT = None , maxiter = 10000 , maxerr = 1.0E-15 , save_convergence_info = 0 , estimator = 'wham' , lag = 1 , dt_traj = '1 step' , init = None , init_maxiter = 10000 , init_maxerr = 1.0E-8 , width = None , ** kwargs ) :
r"""This function acts as a wrapper for ` ` tram ( ) ` ` , ` ` dtram ( ) ` ` , ` ` mbar ( ) ` ` , and ` ` wham ( ) ` ` and
handles the calculation of bias energies ( ` ` bias ` ` ) and thermodynamic state trajectories
( ` ` ttrajs ` ` ) when the data comes from umbrella sampling and ( optional ) unbiased simulations .
Parameters
us _ trajs : list of N arrays , each of shape ( T _ i , d )
List of arrays , each having T _ i rows , one for each time step , and d columns where d is the
dimensionality of the subspace in which umbrella sampling was applied . Often d = 1 , and thus
us _ trajs will be a list of 1d - arrays .
us _ dtrajs : list of N int arrays , each of shape ( T _ i , )
The integers are indexes in 0 , . . . , n - 1 enumerating the n discrete states or the bins the
umbrella sampling trajectory is in at any time .
us _ centers : list of N floats or d - dimensional arrays of floats
List or array of N center positions . Each position must be a d - dimensional vector . For 1d
umbrella sampling , one can simply pass a list of centers , e . g . [ - 5.0 , - 4.0 , - 3.0 , . . . ] .
us _ force _ constants : list of N floats or d - or dxd - dimensional arrays of floats
The force constants used in the umbrellas , unit - less ( e . g . kT per squared length unit ) . For
multidimensional umbrella sampling , the force matrix must be used .
md _ trajs : list of M arrays , each of shape ( T _ i , d ) , optional , default = None
Unbiased molecular dynamics simulations ; format like us _ trajs .
md _ dtrajs : list of M int arrays , each of shape ( T _ i , )
The integers are indexes in 0 , . . . , n - 1 enumerating the n discrete states or the bins the
unbiased trajectory is in at any time .
kT : float or None , optional , default = None
Use this attribute if the supplied force constants are NOT unit - less ; kT must have the same
energy unit as the force constants .
maxiter : int , optional , default = 10000
The maximum number of self - consistent iterations before the estimator exits unsuccessfully .
maxerr : float , optional , default = 1.0E - 15
Convergence criterion based on the maximal free energy change in a self - consistent
iteration step .
save _ convergence _ info : int , optional , default = 0
Every save _ convergence _ info iteration steps , store the actual increment
and the actual loglikelihood ; 0 means no storage .
estimator : str , optional , default = ' wham '
Specify one of the available estimators
| ' wham ' : use WHAM
| ' mbar ' : use MBAR
| ' dtram ' : use the discrete version of TRAM
| ' tram ' : use TRAM
lag : int or list of int , optional , default = 1
Integer lag time at which transitions are counted . Providing a list of lag times will
trigger one estimation per lag time .
dt _ traj : str , optional , default = ' 1 step '
Description of the physical time corresponding to the lag . May be used by analysis
algorithms such as plotting tools to pretty - print the axes . By default ' 1 step ' , i . e .
there is no physical time unit . Specify by a number , whitespace and unit . Permitted
units are ( * is an arbitrary string ) :
| ' fs ' , ' femtosecond * '
| ' ps ' , ' picosecond * '
| ' ns ' , ' nanosecond * '
| ' us ' , ' microsecond * '
| ' ms ' , ' millisecond * '
| ' s ' , ' second * '
init : str , optional , default = None
Use a specific initialization for the self - consistent iteration :
| None : use a hard - coded guess for free energies and Lagrangian multipliers
| ' wham ' : perform a short WHAM estimate to initialize the free energies ( only with dtram )
| ' mbar ' : perform a short MBAR estimate to initialize the free energies ( only with tram )
init _ maxiter : int , optional , default = 10000
The maximum number of self - consistent iterations during the initialization .
init _ maxerr : float , optional , default = 1.0E - 8
Convergence criterion for the initialization .
width : array - like of float , optional , default = None
Specify periodicity for individual us _ traj dimensions . Each positive entry will make the
corresponding feature periodic and use the given value as width . None / zero values will be
treated as non - periodic .
* * kwargs : dict , optional
You can use this to pass estimator - specific named parameters to the chosen estimator , which
are not already coverd by ` ` estimate _ umbrella _ sampling ( ) ` ` .
Returns
A : class : ` MultiThermModel < pyemma . thermo . models . multi _ therm . MultiThermModel > ` or : class : ` MEMM < pyemma . thermo . models . memm . MEMM > ` object or list thereof
The requested estimator / model object , i . e . , WHAM , MBAR , DTRAM or TRAM . If multiple lag times
are given , a list of objects is returned ( one MEMM per lag time ) .
Example
We look at a 1D umbrella sampling simulation with two umbrellas at 1.1 and 1.3 on the reaction
coordinate with spring constant of 1.0 ; additionally , we have two unbiased simulations .
We start with a joint clustering and use TRAM for the estimation :
> > > from pyemma . coordinates import cluster _ regspace as regspace
> > > from pyemma . thermo import estimate _ umbrella _ sampling as estimate _ us
> > > import numpy as np
> > > us _ centers = [ 1.1 , 1.3]
> > > us _ force _ constants = [ 1.0 , 1.0]
> > > us _ trajs = [ np . array ( [ 1.0 , 1.1 , 1.2 , 1.1 , 1.0 , 1.1 ] ) . reshape ( ( - 1 , 1 ) ) , np . array ( [ 1.3 , 1.2 , 1.3 , 1.4 , 1.4 , 1.3 ] ) . reshape ( ( - 1 , 1 ) ) ]
> > > md _ trajs = [ np . array ( [ 0.9 , 1.0 , 1.1 , 1.2 , 1.3 , 1.4 ] ) . reshape ( ( - 1 , 1 ) ) , np . array ( [ 1.5 , 1.4 , 1.3 , 1.4 , 1.4 , 1.5 ] ) . reshape ( ( - 1 , 1 ) ) ]
> > > cluster = regspace ( data = us _ trajs + md _ trajs , max _ centers = 10 , dmin = 0.15)
> > > us _ dtrajs = cluster . dtrajs [ : 2]
> > > md _ dtrajs = cluster . dtrajs [ 2 : ]
> > > centers = cluster . clustercenters
> > > tram = estimate _ us ( us _ trajs , us _ dtrajs , us _ centers , us _ force _ constants , md _ trajs = md _ trajs , md _ dtrajs = md _ dtrajs , estimator = ' tram ' , lag = 1)
> > > tram . f # doctest : + ELLIPSIS
array ( [ 0.63 . . . , 1.60 . . . , 1.31 . . . ] )
See : class : ` MultiThermModel < pyemma . thermo . models . multi _ therm . MultiThermModel > `
or : class : ` MEMM < pyemma . thermo . models . memm . MEMM > ` for a full documentation .
. . autoclass : : pyemma . thermo . models . multi _ therm . MultiThermModel
: members :
: undoc - members :
. . rubric : : Methods
. . autoautosummary : : pyemma . thermo . models . multi _ therm . MultiThermModel
: methods :
. . rubric : : Attributes
. . autoautosummary : : pyemma . thermo . models . multi _ therm . MultiThermModel
: attributes :
. . autoclass : : pyemma . thermo . models . memm . MEMM
: members :
: undoc - members :
. . rubric : : Methods
. . autoautosummary : : pyemma . thermo . models . memm . MEMM
: methods :
. . rubric : : Attributes
. . autoautosummary : : pyemma . thermo . models . memm . MEMM
: attributes :""" | from . util import get_umbrella_sampling_data as _get_umbrella_sampling_data
# sanity checks
if estimator not in [ 'wham' , 'mbar' , 'dtram' , 'tram' ] :
raise ValueError ( "unsupported estimator: %s" % estimator )
if not isinstance ( us_trajs , ( list , tuple ) ) :
raise ValueError ( "The parameter us_trajs must be a list of numpy.ndarray objects" )
if not isinstance ( us_centers , ( list , tuple ) ) :
raise ValueError ( "The parameter us_centers must be a list of floats or numpy.ndarray objects" )
if not isinstance ( us_force_constants , ( list , tuple ) ) :
raise ValueError ( "The parameter us_force_constants must be a list of floats or numpy.ndarray objects" )
if len ( us_trajs ) != len ( us_centers ) :
raise ValueError ( "Unmatching number of umbrella sampling trajectories and centers: %d!=%d" % ( len ( us_trajs ) , len ( us_centers ) ) )
if len ( us_trajs ) != len ( us_force_constants ) :
raise ValueError ( "Unmatching number of umbrella sampling trajectories and force constants: %d!=%d" % ( len ( us_trajs ) , len ( us_force_constants ) ) )
if len ( us_trajs ) != len ( us_dtrajs ) :
raise ValueError ( "Number of continuous and discrete umbrella sampling trajectories does not " + "match: %d!=%d" % ( len ( us_trajs ) , len ( us_dtrajs ) ) )
i = 0
for traj , dtraj in zip ( us_trajs , us_dtrajs ) :
if traj . shape [ 0 ] != dtraj . shape [ 0 ] :
raise ValueError ( "Lengths of continuous and discrete umbrella sampling trajectories with " + "index %d does not match: %d!=%d" % ( i , len ( us_trajs ) , len ( us_dtrajs ) ) )
i += 1
if md_trajs is not None :
if not isinstance ( md_trajs , ( list , tuple ) ) :
raise ValueError ( "The parameter md_trajs must be a list of numpy.ndarray objects" )
if md_dtrajs is None :
raise ValueError ( "You have provided md_trajs, but md_dtrajs is None" )
if md_dtrajs is None :
md_dtrajs = [ ]
else :
if md_trajs is None :
raise ValueError ( "You have provided md_dtrajs, but md_trajs is None" )
if len ( md_trajs ) != len ( md_dtrajs ) :
raise ValueError ( "Number of continuous and discrete unbiased trajectories does not " + "match: %d!=%d" % ( len ( md_trajs ) , len ( md_dtrajs ) ) )
i = 0
for traj , dtraj in zip ( md_trajs , md_dtrajs ) :
if traj . shape [ 0 ] != dtraj . shape [ 0 ] :
raise ValueError ( "Lengths of continuous and discrete unbiased trajectories with " + "index %d does not match: %d!=%d" % ( i , len ( md_trajs ) , len ( md_dtrajs ) ) )
i += 1
# data preparation
ttrajs , btrajs , umbrella_centers , force_constants , unbiased_state = _get_umbrella_sampling_data ( us_trajs , us_centers , us_force_constants , md_trajs = md_trajs , kT = kT , width = width )
estimator_obj = None
# estimation
if estimator == 'wham' :
estimator_obj = wham ( ttrajs , us_dtrajs + md_dtrajs , _get_averaged_bias_matrix ( btrajs , us_dtrajs + md_dtrajs ) , maxiter = maxiter , maxerr = maxerr , save_convergence_info = save_convergence_info , dt_traj = dt_traj )
elif estimator == 'mbar' :
allowed_keys = [ 'direct_space' ]
parsed_kwargs = dict ( [ ( i , kwargs [ i ] ) for i in allowed_keys if i in kwargs ] )
estimator_obj = mbar ( ttrajs , us_dtrajs + md_dtrajs , btrajs , maxiter = maxiter , maxerr = maxerr , save_convergence_info = save_convergence_info , dt_traj = dt_traj , ** parsed_kwargs )
elif estimator == 'dtram' :
allowed_keys = [ 'count_mode' , 'connectivity' ]
parsed_kwargs = dict ( [ ( i , kwargs [ i ] ) for i in allowed_keys if i in kwargs ] )
estimator_obj = dtram ( ttrajs , us_dtrajs + md_dtrajs , _get_averaged_bias_matrix ( btrajs , us_dtrajs + md_dtrajs ) , lag , unbiased_state = unbiased_state , maxiter = maxiter , maxerr = maxerr , save_convergence_info = save_convergence_info , dt_traj = dt_traj , init = init , init_maxiter = init_maxiter , init_maxerr = init_maxerr , ** parsed_kwargs )
elif estimator == 'tram' :
allowed_keys = [ 'count_mode' , 'connectivity' , 'connectivity_factor' , 'nn' , 'direct_space' , 'N_dtram_accelerations' , 'equilibrium' , 'overcounting_factor' , 'callback' ]
parsed_kwargs = dict ( [ ( i , kwargs [ i ] ) for i in allowed_keys if i in kwargs ] )
estimator_obj = tram ( ttrajs , us_dtrajs + md_dtrajs , btrajs , lag , unbiased_state = unbiased_state , maxiter = maxiter , maxerr = maxerr , save_convergence_info = save_convergence_info , dt_traj = dt_traj , init = init , init_maxiter = init_maxiter , init_maxerr = init_maxerr , ** parsed_kwargs )
# adding thermodynamic state information and return results
try :
estimator_obj . umbrella_centers = umbrella_centers
estimator_obj . force_constants = force_constants
except AttributeError :
for obj in estimator_obj :
obj . umbrella_centers = umbrella_centers
obj . force_constants = force_constants
return estimator_obj |
def get_api ( i ) :
"""Input : {
( path ) - path to module , if comes from access function
or
( module _ uoa ) - if comes from CMD
( func ) - func for API
( out ) - output
Output : {
return - return code = 0 , if successful
> 0 , if error
( error ) - error text if return > 0
title - title string
desc - original description
module - module name
api - api as string
line - line in found module""" | p = i . get ( 'path' , '' )
f = i . get ( 'func' , '' )
o = i . get ( 'out' , '' )
muoa = i . get ( 'module_uoa' , '' )
t = ''
# last function description ( if redirect to another API )
t_orig = ''
# original function description
l = 0
# API line
a = ''
# accumulated API
if p == '' and muoa != '' :
rx = load ( { 'module_uoa' : cfg [ 'module_name' ] , 'data_uoa' : muoa } )
if rx [ 'return' ] > 0 :
return rx
p = rx [ 'path' ]
if p == '' :
p1 = os . path . dirname ( os . path . dirname ( work [ 'dir_default_repo' ] ) )
p = os . path . join ( p1 , cfg [ 'file_kernel_py' ] )
if not os . path . isfile ( p ) :
return { 'return' : 1 , 'error' : 'kernel not found in ' + p }
else :
p = os . path . join ( p , 'module.py' )
if os . path . isfile ( p ) :
rx = load_text_file ( { 'text_file' : p , 'split_to_list' : 'yes' } )
if rx [ 'return' ] > 0 :
return rx
lst = rx [ 'lst' ]
k = - 1
while k < len ( lst ) - 1 :
k += 1
q = lst [ k ]
if q . find ( 'def ' + f + '(' ) >= 0 or q . find ( 'def ' + f + ' (' ) >= 0 or q . find ( 'def\t' + f + '(' ) >= 0 or q . find ( 'def\t' + f + ' (' ) >= 0 :
j = k - 1
if j >= 0 and lst [ j ] . strip ( ) == '' :
j -= 1
x = 'x'
while j >= 0 and x != '' and not x . startswith ( '###' ) :
x = lst [ j ] . strip ( )
if x != '' and not x . startswith ( '###' ) :
if x == '#' :
x = ' '
elif x . startswith ( '# ' ) :
x = x [ 2 : ]
t = x + '\n' + t
j -= 1
if t != '' :
l = j + 2
if t_orig == '' :
t_orig = t
# Find starting point of an API
j = k + 1
if j < len ( lst ) and lst [ j ] . find ( '"""' ) >= 0 :
j += 1
# Check if redirect to another function
restart = False
if j < len ( lst ) :
x = lst [ j ] . strip ( )
if x . lower ( ) . startswith ( "see" ) :
z1 = x . find ( '"' )
if z1 > 0 :
z2 = x . find ( '"' , z1 + 1 )
if z2 > 0 :
f = x [ z1 + 1 : z2 ]
# new function name
k = - 1
restart = True
# restart search for new function
if not restart :
x = ''
while x . find ( '"""' ) < 0 and j < len ( lst ) :
x = lst [ j ]
if x . find ( '"""' ) < 0 :
a += x + '\n'
j += 1
if t == '' and a == '' :
return { 'return' : 1 , 'error' : 'function not found' }
dd = t_orig . strip ( )
if o == 'con' :
out ( 'Description: ' + dd )
out ( '' )
out ( 'Module: ' + p )
out ( '' )
out ( 'Line: ' + str ( l ) )
out ( '' )
out ( 'API:' )
out ( a )
elif o == 'web' :
out ( '<B>Function:</B> ' + t + '<BR>' )
out ( '<BR>' )
out ( '<B>Module:</B> ' + p + '<BR>' )
out ( '<BR>' )
out ( '<B>API:</B><BR>' )
out ( '<pre>' )
out ( a )
out ( '</pre><BR>' )
return { 'return' : 0 , 'title' : t , 'desc' : dd , 'module' : p , 'api' : a , 'line' : l } |
def _inputLine ( self , prompt , ** kwargs ) :
'Add prompt to bottom of screen and get line of input from user .' | self . inInput = True
rstatuslen = self . drawRightStatus ( self . scr , self . sheets [ 0 ] )
attr = 0
promptlen = clipdraw ( self . scr , self . windowHeight - 1 , 0 , prompt , attr , w = self . windowWidth - rstatuslen - 1 )
ret = self . editText ( self . windowHeight - 1 , promptlen , self . windowWidth - promptlen - rstatuslen - 2 , attr = colors . color_edit_cell , unprintablechar = options . disp_unprintable , truncchar = options . disp_truncator , ** kwargs )
self . inInput = False
return ret |
def is_mass_balanced ( reaction ) :
"""Confirm that a reaction is mass balanced .""" | balance = defaultdict ( int )
for metabolite , coefficient in iteritems ( reaction . metabolites ) :
if metabolite . elements is None or len ( metabolite . elements ) == 0 :
return False
for element , amount in iteritems ( metabolite . elements ) :
balance [ element ] += coefficient * amount
return all ( amount == 0 for amount in itervalues ( balance ) ) |
def match_file ( pattern , filename ) :
'''The function will match a pattern in a file and return
a rex object , which will have all the matches found in the file .''' | # Validate user data .
if pattern is None :
return None
if os . stat ( filename ) . st_size == 0 :
return None
rexobj = REX ( pattern , filename )
rexpatstr = reformat_pattern ( pattern )
# print " rexpatstr : " , rexpatstr
rexpat = re . compile ( rexpatstr )
rexobj . rex_patternstr = rexpatstr
rexobj . rex_pattern = rexpat
sfile = open ( filename , 'r' )
data = sfile . read ( )
sfile . close ( )
line_count = 1
for line in data . splitlines ( ) :
mobj = rexpat . match ( line )
if mobj :
populate_resobj ( rexobj , mobj , line_count )
line_count += 1
return rexobj |
def remove ( self , key ) :
"""Clear a column value in the object . Note that this happens immediately :
it does not wait for save ( ) to be called .""" | payload = { key : { '__op' : 'Delete' } }
self . __class__ . PUT ( self . _absolute_url , ** payload )
del self . __dict__ [ key ] |
def add_untagged_ok ( self , text : MaybeBytes , code : Optional [ ResponseCode ] = None ) -> None :
"""Add an untagged ` ` OK ` ` response .
See Also :
: meth : ` . add _ untagged ` , : class : ` ResponseOk `
Args :
text : The response text .
code : Optional response code .""" | response = ResponseOk ( b'*' , text , code )
self . add_untagged ( response ) |
def match ( self , location ) :
"""Check if the given location " matches " .
: param location : The : class : ` Location ` object to try to match .
: returns : : data : ` True ` if the two locations are on the same system and
the : attr : ` directory ` can be matched as a filename pattern or
a literal match on the normalized pathname .""" | if self . ssh_alias != location . ssh_alias : # Never match locations on other systems .
return False
elif self . have_wildcards : # Match filename patterns using fnmatch ( ) .
return fnmatch . fnmatch ( location . directory , self . directory )
else : # Compare normalized directory pathnames .
self = os . path . normpath ( self . directory )
other = os . path . normpath ( location . directory )
return self == other |
def mtf_image_transformer_cifar_mp_4x ( ) :
"""Data parallel CIFAR parameters .""" | hparams = mtf_image_transformer_base_cifar ( )
hparams . mesh_shape = "model:4;batch:8"
hparams . layout = "batch:batch;d_ff:model;heads:model"
hparams . batch_size = 32
hparams . num_heads = 8
hparams . d_ff = 8192
return hparams |
def sync_hue_db ( self , * servers ) :
"""Synchronize the Hue server ' s database .
@ param servers : Name of Hue Server roles to synchronize . Not required starting with API v10.
@ return : List of submitted commands .""" | actual_version = self . _get_resource_root ( ) . version
if actual_version < 10 :
return self . _role_cmd ( 'hueSyncDb' , servers )
return self . _cmd ( 'hueSyncDb' , api_version = 10 ) |
def _concat ( self , egdfs ) :
"""Concatenate evaluated group dataframes
Parameters
egdfs : iterable
Evaluated dataframes
Returns
edata : pandas . DataFrame
Evaluated data""" | egdfs = list ( egdfs )
edata = pd . concat ( egdfs , axis = 0 , ignore_index = False , copy = False )
# groupby can mixup the rows . We try to maintain the original
# order , but we can only do that if the result has a one to
# one relationship with the original
one2one = ( self . keep_index and not any ( edata . index . duplicated ( ) ) and len ( edata . index ) == len ( self . data . index ) )
if one2one :
edata = edata . sort_index ( )
else :
edata . reset_index ( drop = True , inplace = True )
# Maybe this should happen in the verb functions
if self . keep_groups and self . groups :
edata = GroupedDataFrame ( edata , groups = self . groups )
return edata |
def file_resolve ( backend , filepath ) :
"""Mark a conflicted file as resolved , so that a merge can be completed""" | recipe = DKRecipeDisk . find_recipe_name ( )
if recipe is None :
raise click . ClickException ( 'You must be in a recipe folder.' )
click . secho ( "%s - Resolving conflicts" % get_datetime ( ) )
for file_to_resolve in filepath :
if not os . path . exists ( file_to_resolve ) :
raise click . ClickException ( '%s does not exist' % file_to_resolve )
check_and_print ( DKCloudCommandRunner . resolve_conflict ( file_to_resolve ) ) |
def callback_url ( self , request ) :
"""the url to go back after the external service call
: param request : contains the current session
: type request : dict
: rtype : string""" | service = self . service . split ( 'Service' ) [ 1 ] . lower ( )
return_to = '{service}_callback' . format ( service = service )
return '%s://%s%s' % ( request . scheme , request . get_host ( ) , reverse ( return_to ) ) |
def is_purrlog ( path ) :
"""Checks if path refers to a valid purrlog .
Path must exist , and must contain either at least one directory called entry - YYYYMMDD - HHMMSS , or the file " dirconfig " """ | if not os . path . isdir ( path ) :
return False
if list ( filter ( os . path . isdir , glob . glob ( os . path . join ( path , "entry-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]" ) ) ) ) :
return True
return os . path . exists ( os . path . join ( path , "dirconfig" ) ) |
def loss ( args ) :
"""% prog loss a . b . i1 . blocks [ a . b - genomic . blast ]
Extract likely gene loss candidates between genome a and b .""" | p = OptionParser ( loss . __doc__ )
p . add_option ( "--bed" , default = False , action = "store_true" , help = "Genomic BLAST is in bed format [default: %default]" )
p . add_option ( "--gdist" , default = 20 , type = "int" , help = "Gene distance [default: %default]" )
p . add_option ( "--bdist" , default = 20000 , type = "int" , help = "Base pair distance [default: %default]" )
p . set_beds ( )
opts , args = p . parse_args ( args )
if len ( args ) not in ( 1 , 2 ) :
sys . exit ( not p . print_help ( ) )
blocksfile = args [ 0 ]
emptyblast = ( len ( args ) == 1 )
if emptyblast :
genomicblast = "empty.blast"
sh ( "touch {0}" . format ( genomicblast ) )
else :
genomicblast = args [ 1 ]
gdist , bdist = opts . gdist , opts . bdist
qbed , sbed , qorder , sorder , is_self = check_beds ( blocksfile , p , opts )
blocks = [ ]
fp = open ( blocksfile )
genetrack = { }
proxytrack = { }
for row in fp :
a , b = row . split ( )
genetrack [ a ] = b
blocks . append ( ( a , b ) )
data = [ ]
for key , rows in groupby ( blocks , key = lambda x : x [ - 1 ] ) :
rows = list ( rows )
data . append ( ( key , rows ) )
imax = len ( data ) - 1
for i , ( key , rows ) in enumerate ( data ) :
if i == 0 or i == imax :
continue
if key != '.' :
continue
before , br = data [ i - 1 ]
after , ar = data [ i + 1 ]
bi , bx = sorder [ before ]
ai , ax = sorder [ after ]
dist = abs ( bi - ai )
if bx . seqid != ax . seqid or dist > gdist :
continue
start , end = range_minmax ( ( ( bx . start , bx . end ) , ( ax . start , ax . end ) ) )
start , end = max ( start - bdist , 1 ) , end + bdist
proxy = ( bx . seqid , start , end )
for a , b in rows :
proxytrack [ a ] = proxy
tags = { }
if opts . bed :
bed = Bed ( genomicblast , sorted = False )
key = lambda x : gene_name ( x . accn . rsplit ( "." , 1 ) [ 0 ] )
for query , bb in groupby ( bed , key = key ) :
bb = list ( bb )
if query not in proxytrack :
continue
proxy = proxytrack [ query ]
tag = "NS"
best_b = bb [ 0 ]
for b in bb :
hsp = ( b . seqid , b . start , b . end )
if range_overlap ( proxy , hsp ) :
tag = "S"
best_b = b
break
hsp = ( best_b . seqid , best_b . start , best_b . end )
proxytrack [ query ] = hsp
tags [ query ] = tag
else :
blast = Blast ( genomicblast )
for query , bb in blast . iter_hits ( ) :
bb = list ( bb )
query = gene_name ( query )
if query not in proxytrack :
continue
proxy = proxytrack [ query ]
tag = "NS"
best_b = bb [ 0 ]
for b in bb :
hsp = ( b . subject , b . sstart , b . sstop )
if range_overlap ( proxy , hsp ) :
tag = "S"
best_b = b
break
hsp = ( best_b . subject , best_b . sstart , best_b . sstop )
proxytrack [ query ] = hsp
tags [ query ] = tag
for b in qbed :
accn = b . accn
target_region = genetrack [ accn ]
if accn in proxytrack :
target_region = region_str ( proxytrack [ accn ] )
if accn in tags :
ptag = "[{0}]" . format ( tags [ accn ] )
else :
ptag = "[NF]"
target_region = ptag + target_region
print ( "\t" . join ( ( b . seqid , accn , target_region ) ) )
if emptyblast :
sh ( "rm -f {0}" . format ( genomicblast ) ) |
def search_lxc_bridges ( ) :
'''Search which bridges are potentially available as LXC bridges
CLI Example :
. . code - block : : bash
salt ' * ' lxc . search _ lxc _ bridges''' | bridges = __context__ . get ( 'lxc.bridges' , None )
# either match not yet called or no bridges were found
# to handle the case where lxc was not installed on the first
# call
if not bridges :
bridges = set ( )
running_bridges = set ( )
bridges . add ( DEFAULT_BR )
try :
output = __salt__ [ 'cmd.run_all' ] ( 'brctl show' )
for line in output [ 'stdout' ] . splitlines ( ) [ 1 : ] :
if not line . startswith ( ' ' ) :
running_bridges . add ( line . split ( ) [ 0 ] . strip ( ) )
except ( SaltInvocationError , CommandExecutionError ) :
pass
for ifc , ip in six . iteritems ( __grains__ . get ( 'ip_interfaces' , { } ) ) :
if ifc in running_bridges :
bridges . add ( ifc )
elif os . path . exists ( '/sys/devices/virtual/net/{0}/bridge' . format ( ifc ) ) :
bridges . add ( ifc )
bridges = list ( bridges )
# if we found interfaces that have lxc in their names
# we filter them as being the potential lxc bridges
# we also try to default on br0 on other cases
def sort_bridges ( a ) :
pref = 'z'
if 'lxc' in a :
pref = 'a'
elif 'br0' == a :
pref = 'c'
return '{0}_{1}' . format ( pref , a )
bridges . sort ( key = sort_bridges )
__context__ [ 'lxc.bridges' ] = bridges
return bridges |
def get_raw_data ( self , times = 5 ) :
"""do some readings and aggregate them using the defined statistics function
: param times : how many measures to aggregate
: type times : int
: return : the aggregate of the measured values
: rtype float""" | self . _validate_measure_count ( times )
data_list = [ ]
while len ( data_list ) < times :
data = self . _read ( )
if data not in [ False , - 1 ] :
data_list . append ( data )
return data_list |
def read_in_config ( self ) :
"""Vyper will discover and load the configuration file from disk
and key / value stores , searching in one of the defined paths .""" | log . info ( "Attempting to read in config file" )
if self . _get_config_type ( ) not in constants . SUPPORTED_EXTENSIONS :
raise errors . UnsupportedConfigError ( self . _get_config_type ( ) )
with open ( self . _get_config_file ( ) ) as fp :
f = fp . read ( )
self . _config = { }
return self . _unmarshall_reader ( f , self . _config ) |
def extract_frame ( self ) :
"""Pulls one complete frame off the buffer and returns it .
If there is no complete message in the buffer , returns None .
Note that the buffer can contain more than once message . You
should therefore call this method in a loop ( or use iterator
functionality exposed by class ) until None returned .
@ return : The next complete frame in the buffer .
@ rtype : L { stomp . frame . Frame }""" | # ( mbytes , hbytes ) = self . _ find _ message _ bytes ( self . buffer )
# if not mbytes :
# return None
# msgdata = self . buffer [ : mbytes ]
# self . buffer = self . buffer [ mbytes : ]
# hdata = msgdata [ : hbytes ]
# # Strip off any leading whitespace from headers ; this is necessary , because
# # we do not ( any longer ) expect a trailing \ n after the \ x00 byte ( which means
# # it will become a leading \ n to the next frame ) .
# hdata = hdata . lstrip ( )
# elems = hdata . split ( ' \ n ' )
# cmd = elems . pop ( 0)
# headers = { }
# for e in elems :
# try :
# ( k , v ) = e . split ( ' : ' , 1 ) # header values may contain ' : ' so specify maxsplit
# except ValueError :
# continue
# headers [ k . strip ( ) ] = v . strip ( )
# # hbytes points to the start of the ' \ n \ n ' at the end of the header ,
# # so 2 bytes beyond this is the start of the body . The body EXCLUDES
# # the final byte , which is ' \ x00 ' .
# body = msgdata [ hbytes + 2 : - 1]
self . _buffer . seek ( self . _pointer , 0 )
try :
f = Frame . from_buffer ( self . _buffer )
self . _pointer = self . _buffer . tell ( )
except ( IncompleteFrame , EmptyBuffer ) :
self . _buffer . seek ( self . _pointer , 0 )
return None
return f |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.