signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_body_from_message ( message , maintype , subtype ) :
"""Fetchs the body message matching main / sub content type ."""
|
body = six . text_type ( '' )
for part in message . walk ( ) :
if part . get ( 'content-disposition' , '' ) . startswith ( 'attachment;' ) :
continue
if part . get_content_maintype ( ) == maintype and part . get_content_subtype ( ) == subtype :
charset = part . get_content_charset ( )
this_part = part . get_payload ( decode = True )
if charset :
try :
this_part = this_part . decode ( charset , 'replace' )
except LookupError :
this_part = this_part . decode ( 'ascii' , 'replace' )
logger . warning ( 'Unknown encoding %s encountered while decoding ' 'text payload. Interpreting as ASCII with ' 'replacement, but some data may not be ' 'represented as the sender intended.' , charset )
except ValueError :
this_part = this_part . decode ( 'ascii' , 'replace' )
logger . warning ( 'Error encountered while decoding text ' 'payload from an incorrectly-constructed ' 'e-mail; payload was converted to ASCII with ' 'replacement, but some data may not be ' 'represented as the sender intended.' )
else :
this_part = this_part . decode ( 'ascii' , 'replace' )
body += this_part
return body
|
def vector_analysis ( vector , coordinates , elements_vdw , increment = 1.0 ) :
"""Analyse a sampling vector ' s path for window analysis purpose ."""
|
# Calculate number of chunks if vector length is divided by increment .
chunks = int ( np . linalg . norm ( vector ) // increment )
# Create a single chunk .
chunk = vector / chunks
# Calculate set of points on vector ' s path every increment .
vector_pathway = np . array ( [ chunk * i for i in range ( chunks + 1 ) ] )
analysed_vector = np . array ( [ np . amin ( euclidean_distances ( coordinates , i . reshape ( 1 , - 1 ) ) - elements_vdw ) for i in vector_pathway ] )
if all ( i > 0 for i in analysed_vector ) :
pos = np . argmin ( analysed_vector )
# As first argument we need to give the distance from the origin .
dist = np . linalg . norm ( chunk * pos )
return np . array ( [ dist , analysed_vector [ pos ] * 2 , * chunk * pos , * vector ] )
|
def to_transformation_matrix ( translation , orientation_matrix = np . zeros ( ( 3 , 3 ) ) ) :
"""Converts a tuple ( translation _ vector , orientation _ matrix ) to a transformation matrix
Parameters
translation : numpy . array
The translation of your frame presented as a 3D vector .
orientation _ matrix : numpy . array
Optional : The orientation of your frame , presented as a 3x3 matrix ."""
|
matrix = np . eye ( 4 )
matrix [ : - 1 , : - 1 ] = orientation_matrix
matrix [ : - 1 , - 1 ] = translation
return matrix
|
def _is_kvm_hyper ( ) :
'''Returns a bool whether or not this node is a KVM hypervisor'''
|
try :
with salt . utils . files . fopen ( '/proc/modules' ) as fp_ :
if 'kvm_' not in salt . utils . stringutils . to_unicode ( fp_ . read ( ) ) :
return False
except IOError : # No / proc / modules ? Are we on Windows ? Or Solaris ?
return False
return 'libvirtd' in __salt__ [ 'cmd.run' ] ( __grains__ [ 'ps' ] )
|
def setCurrentRule ( self , rule ) :
"""Sets the current query rule for this widget , updating its widget editor if the types do not match .
: param rule | < QueryRule > | | None"""
|
curr_rule = self . currentRule ( )
if ( curr_rule == rule ) :
return
self . _currentRule = rule
curr_op = self . uiOperatorDDL . currentText ( )
self . uiOperatorDDL . blockSignals ( True )
self . uiOperatorDDL . clear ( )
if ( rule ) :
self . uiOperatorDDL . addItems ( rule . operators ( ) )
index = self . uiOperatorDDL . findText ( curr_op )
if ( index != - 1 ) :
self . uiOperatorDDL . setCurrentIndex ( index )
self . uiOperatorDDL . blockSignals ( False )
self . updateEditor ( )
|
def _get_struct_string ( self ) :
"""Get the STRING structure ."""
|
data = [ ]
while True :
t = self . _src . read ( 1 )
if t == b'\x00' :
break
data . append ( t )
val = b'' . join ( data )
return val . decode ( "utf8" )
|
def yn ( n , x , context = None ) :
"""Return the value of the second kind Bessel function of order ` ` n ` ` at
` ` n ` ` should be a Python integer ."""
|
return _apply_function_in_current_context ( BigFloat , mpfr . mpfr_yn , ( n , BigFloat . _implicit_convert ( x ) ) , context , )
|
def update_status_with_media ( self , ** params ) : # pragma : no cover
"""Updates the authenticating user ' s current status and attaches media
for upload . In other words , it creates a Tweet with a picture attached .
Docs :
https : / / developer . twitter . com / en / docs / tweets / post - and - engage / api - reference / post - statuses - update _ with _ media"""
|
warnings . warn ( 'This method is deprecated. You should use Twython.upload_media instead.' , TwythonDeprecationWarning , stacklevel = 2 )
return self . post ( 'statuses/update_with_media' , params = params )
|
def set_volume ( self , volume ) :
"""Set receiver volume via HTTP get command .
Volume is send in a format like - 50.0.
Minimum is - 80.0 , maximum at 18.0"""
|
if volume < - 80 or volume > 18 :
raise ValueError ( "Invalid volume" )
try :
return bool ( self . send_get_command ( self . _urls . command_set_volume % volume ) )
except requests . exceptions . RequestException :
_LOGGER . error ( "Connection error: set volume command not sent." )
return False
|
def create_table ( self , table_name , primary_id = None , primary_type = None ) :
"""Create a new table .
Either loads a table or creates it if it doesn ' t exist yet . You can
define the name and type of the primary key field , if a new table is to
be created . The default is to create an auto - incrementing integer ,
` ` id ` ` . You can also set the primary key to be a string or big integer .
The caller will be responsible for the uniqueness of ` ` primary _ id ` ` if
it is defined as a text type .
Returns a : py : class : ` Table < dataset . Table > ` instance .
table = db . create _ table ( ' population ' )
# custom id and type
table2 = db . create _ table ( ' population2 ' , ' age ' )
table3 = db . create _ table ( ' population3 ' ,
primary _ id = ' city ' ,
primary _ type = db . types . text )
# custom length of String
table4 = db . create _ table ( ' population4 ' ,
primary _ id = ' city ' ,
primary _ type = db . types . string ( 25 ) )
# no primary key
table5 = db . create _ table ( ' population5 ' ,
primary _ id = False )"""
|
assert not isinstance ( primary_type , six . string_types ) , 'Text-based primary_type support is dropped, use db.types.'
table_name = normalize_table_name ( table_name )
with self . lock :
if table_name not in self . _tables :
self . _tables [ table_name ] = Table ( self , table_name , primary_id = primary_id , primary_type = primary_type , auto_create = True )
return self . _tables . get ( table_name )
|
def authenticate ( self , request : AxesHttpRequest , username : str = None , password : str = None , ** kwargs : dict ) :
"""Checks user lockout status and raise a PermissionDenied if user is not allowed to log in .
This method interrupts the login flow and inserts error message directly to the
` ` response _ context ` ` attribute that is supplied as a keyword argument .
: keyword response _ context : kwarg that will be have its ` ` error ` ` attribute updated with context .
: raises AxesBackendRequestParameterRequired : if request parameter is not passed .
: raises AxesBackendPermissionDenied : if user is already locked out ."""
|
if request is None :
raise AxesBackendRequestParameterRequired ( 'AxesBackend requires a request as an argument to authenticate' )
credentials = get_credentials ( username = username , password = password , ** kwargs )
if AxesProxyHandler . is_allowed ( request , credentials ) :
return
# Locked out , don ' t try to authenticate , just update response _ context and return .
# Its a bit weird to pass a context and expect a response value but its nice to get a " why " back .
error_msg = get_lockout_message ( )
response_context = kwargs . get ( 'response_context' , { } )
response_context [ 'error' ] = error_msg
# Raise an error that stops the authentication flows at django . contrib . auth . authenticate .
# This error stops bubbling up at the authenticate call which catches backend PermissionDenied errors .
# After this error is caught by authenticate it emits a signal indicating user login failed ,
# which is processed by axes . signals . log _ user _ login _ failed which logs the attempt and raises
# a second exception which bubbles up the middleware stack and produces a HTTP 403 Forbidden reply
# in the axes . middleware . AxesMiddleware . process _ exception middleware exception handler .
raise AxesBackendPermissionDenied ( 'AxesBackend detected that the given user is locked out' )
|
def get_environ ( self , key , default = None , cast = None ) :
"""Get value from environment variable using os . environ . get
: param key : The name of the setting value , will always be upper case
: param default : In case of not found it will be returned
: param cast : Should cast in to @ int , @ float , @ bool or @ json ?
or cast must be true to use cast inference
: return : The value if found , default or None"""
|
key = key . upper ( )
data = self . environ . get ( key , default )
if data :
if cast in converters :
data = converters . get ( cast ) ( data )
if cast is True :
data = parse_conf_data ( data , tomlfy = True )
return data
|
def parse ( cls , on_str , force_type = None ) : # @ ReservedAssignment
"""Parse a string into one of the object number classes ."""
|
on_str_orig = on_str
if on_str is None :
return None
if not on_str :
raise NotObjectNumberError ( "Got null input" )
if not isinstance ( on_str , string_types ) :
raise NotObjectNumberError ( "Must be a string. Got a {} " . format ( type ( on_str ) ) )
# if isinstance ( on _ str , unicode ) :
# dataset = on _ str . encode ( ' ascii ' )
if force_type :
type_ = force_type
else :
type_ = on_str [ 0 ]
on_str = on_str [ 1 : ]
if type_ not in list ( cls . NDS_LENGTH . keys ( ) ) :
raise NotObjectNumberError ( "Unknown type character '{}' for '{}'" . format ( type_ , on_str_orig ) )
ds_length = len ( on_str ) - cls . NDS_LENGTH [ type_ ]
if ds_length not in cls . DATASET_LENGTHS :
raise NotObjectNumberError ( "Dataset string '{}' has an unfamiliar length: {}" . format ( on_str_orig , ds_length ) )
ds_lengths = cls . DATASET_LENGTHS [ ds_length ]
assignment_class = ds_lengths [ 2 ]
try :
dataset = int ( ObjectNumber . base62_decode ( on_str [ 0 : ds_lengths [ 0 ] ] ) )
if ds_lengths [ 1 ] :
i = len ( on_str ) - ds_lengths [ 1 ]
revision = int ( ObjectNumber . base62_decode ( on_str [ i : ] ) )
on_str = on_str [ 0 : i ]
# remove the revision
else :
revision = None
on_str = on_str [ ds_lengths [ 0 ] : ]
if type_ == cls . TYPE . DATASET :
return DatasetNumber ( dataset , revision = revision , assignment_class = assignment_class )
elif type_ == cls . TYPE . TABLE :
table = int ( ObjectNumber . base62_decode ( on_str ) )
return TableNumber ( DatasetNumber ( dataset , assignment_class = assignment_class ) , table , revision = revision )
elif type_ == cls . TYPE . PARTITION :
partition = int ( ObjectNumber . base62_decode ( on_str ) )
return PartitionNumber ( DatasetNumber ( dataset , assignment_class = assignment_class ) , partition , revision = revision )
elif type_ == cls . TYPE . COLUMN :
table = int ( ObjectNumber . base62_decode ( on_str [ 0 : cls . DLEN . TABLE ] ) )
column = int ( ObjectNumber . base62_decode ( on_str [ cls . DLEN . TABLE : ] ) )
return ColumnNumber ( TableNumber ( DatasetNumber ( dataset , assignment_class = assignment_class ) , table ) , column , revision = revision )
elif type_ == cls . TYPE . OTHER1 or type_ == cls . TYPE . CONFIG :
return GeneralNumber1 ( on_str_orig [ 0 ] , DatasetNumber ( dataset , assignment_class = assignment_class ) , int ( ObjectNumber . base62_decode ( on_str [ 0 : cls . DLEN . OTHER1 ] ) ) , revision = revision )
elif type_ == cls . TYPE . OTHER2 :
return GeneralNumber2 ( on_str_orig [ 0 ] , DatasetNumber ( dataset , assignment_class = assignment_class ) , int ( ObjectNumber . base62_decode ( on_str [ 0 : cls . DLEN . OTHER1 ] ) ) , int ( ObjectNumber . base62_decode ( on_str [ cls . DLEN . OTHER1 : cls . DLEN . OTHER1 + cls . DLEN . OTHER2 ] ) ) , revision = revision )
else :
raise NotObjectNumberError ( 'Unknown type character: ' + type_ + ' in ' + str ( on_str_orig ) )
except Base62DecodeError as e :
raise NotObjectNumberError ( 'Unknown character: ' + str ( e ) )
|
def create_decision_tree ( data , attributes , class_attr , fitness_func , wrapper , ** kwargs ) :
"""Returns a new decision tree based on the examples given ."""
|
split_attr = kwargs . get ( 'split_attr' , None )
split_val = kwargs . get ( 'split_val' , None )
assert class_attr not in attributes
node = None
data = list ( data ) if isinstance ( data , Data ) else data
if wrapper . is_continuous_class :
stop_value = CDist ( seq = [ r [ class_attr ] for r in data ] )
# For a continuous class case , stop if all the remaining records have
# a variance below the given threshold .
stop = wrapper . leaf_threshold is not None and stop_value . variance <= wrapper . leaf_threshold
else :
stop_value = DDist ( seq = [ r [ class_attr ] for r in data ] )
# For a discrete class , stop if all remaining records have the same
# classification .
stop = len ( stop_value . counts ) <= 1
if not data or len ( attributes ) <= 0 : # If the dataset is empty or the attributes list is empty , return the
# default value . The target attribute is not in the attributes list , so
# we need not subtract 1 to account for the target attribute .
if wrapper :
wrapper . leaf_count += 1
return stop_value
elif stop : # If all the records in the dataset have the same classification ,
# return that classification .
if wrapper :
wrapper . leaf_count += 1
return stop_value
else : # Choose the next best attribute to best classify our data
best = choose_attribute ( data , attributes , class_attr , fitness_func , method = wrapper . metric )
# Create a new decision tree / node with the best attribute and an empty
# dictionary object - - we ' ll fill that up next .
# tree = { best : { } }
node = Node ( tree = wrapper , attr_name = best )
node . n += len ( data )
# Create a new decision tree / sub - node for each of the values in the
# best attribute field
for val in get_values ( data , best ) : # Create a subtree for the current value under the " best " field
subtree = create_decision_tree ( [ r for r in data if r [ best ] == val ] , [ attr for attr in attributes if attr != best ] , class_attr , fitness_func , split_attr = best , split_val = val , wrapper = wrapper )
# Add the new subtree to the empty dictionary object in our new
# tree / node we just created .
if isinstance ( subtree , Node ) :
node . _branches [ val ] = subtree
elif isinstance ( subtree , ( CDist , DDist ) ) :
node . set_leaf_dist ( attr_value = val , dist = subtree )
else :
raise Exception ( "Unknown subtree type: %s" % ( type ( subtree ) , ) )
return node
|
def __accept_reject ( self , prompt , accepted_text , rejected_text , display_rejected ) :
"""Return a boolean value for accept / reject ."""
|
accept_event = threading . Event ( )
result_ref = [ False ]
def perform ( ) :
def accepted ( ) :
result_ref [ 0 ] = True
accept_event . set ( )
def rejected ( ) :
result_ref [ 0 ] = False
accept_event . set ( )
self . __message_column . remove_all ( )
pose_confirmation_message_box ( self . ui , self . __message_column , prompt , accepted , rejected , accepted_text , rejected_text , display_rejected )
# self . _ _ message _ column . add ( self . _ _ make _ cancel _ row ( ) )
with self . __lock :
self . __q . append ( perform )
self . document_controller . add_task ( "ui_" + str ( id ( self ) ) , self . __handle_output_and_q )
accept_event . wait ( )
def update_message_column ( ) :
self . __message_column . remove_all ( )
self . __message_column . add ( self . __make_cancel_row ( ) )
self . document_controller . add_task ( "ui_" + str ( id ( self ) ) , update_message_column )
return result_ref [ 0 ]
|
def est_credible_region ( self , level = 0.95 , return_outside = False , modelparam_slice = None ) :
"""Returns an array containing particles inside a credible region of a
given level , such that the described region has probability mass
no less than the desired level .
Particles in the returned region are selected by including the highest -
weight particles first until the desired credibility level is reached .
: param float level : Crediblity level to report .
: param bool return _ outside : If ` True ` , the return value is a tuple
of the those particles within the credible region , and the rest
of the posterior particle cloud .
: param slice modelparam _ slice : Slice over which model parameters
to consider .
: rtype : : class : ` numpy . ndarray ` , shape ` ` ( n _ credible , n _ mps ) ` ` ,
where ` ` n _ credible ` ` is the number of particles in the credible
region and ` ` n _ mps ` ` corresponds to the size of ` ` modelparam _ slice ` ` .
If ` ` return _ outside ` ` is ` ` True ` ` , this method instead
returns tuple ` ` ( inside , outside ) ` ` where ` ` inside ` ` is as
described above , and ` ` outside ` ` has shape ` ` ( n _ particles - n _ credible , n _ mps ) ` ` .
: return : An array of particles inside the estimated credible region . Or ,
if ` ` return _ outside ` ` is ` ` True ` ` , both the particles inside and the
particles outside , as a tuple ."""
|
# which slice of modelparams to take
s_ = np . s_ [ modelparam_slice ] if modelparam_slice is not None else np . s_ [ : ]
mps = self . particle_locations [ : , s_ ]
# Start by sorting the particles by weight .
# We do so by obtaining an array of indices ` id _ sort ` such that
# ` particle _ weights [ id _ sort ] ` is in descending order .
id_sort = np . argsort ( self . particle_weights ) [ : : - 1 ]
# Find the cummulative sum of the sorted weights .
cumsum_weights = np . cumsum ( self . particle_weights [ id_sort ] )
# Find all the indices where the sum is less than level .
# We first find id _ cred such that
# ` all ( cumsum _ weights [ id _ cred ] < = level ) ` .
id_cred = cumsum_weights <= level
# By construction , by adding the next particle to id _ cred , it must be
# true that ` cumsum _ weights [ id _ cred ] > = level ` , as required .
id_cred [ np . sum ( id_cred ) ] = True
# We now return a slice onto the particle _ locations by first permuting
# the particles according to the sort order , then by selecting the
# credible particles .
if return_outside :
return ( mps [ id_sort ] [ id_cred ] , mps [ id_sort ] [ np . logical_not ( id_cred ) ] )
else :
return mps [ id_sort ] [ id_cred ]
|
def folder_path ( preferred_mode , check_other_mode , key ) :
'''This function implements all heuristics and workarounds for messed up
KNOWNFOLDERID registry values . It ' s also verbose ( OutputDebugStringW )
about whether fallbacks worked or whether they would have worked if
check _ other _ mode had been allowed .'''
|
other_mode = 'system' if preferred_mode == 'user' else 'user'
path , exception = dirs_src [ preferred_mode ] [ key ]
if not exception :
return path
logger . info ( "WARNING: menuinst key: '%s'\n" " path: '%s'\n" " .. excepted with: '%s' in knownfolders.py, implementing workarounds .." % ( key , path , type ( exception ) . __name__ ) )
# Since I have seen ' user ' , ' documents ' set as ' \ \ vmware - host \ Shared Folders \ Documents '
# when there ' s no such server , we check ' user ' , ' profile ' + ' \ Documents ' before maybe
# trying the other _ mode ( though I have chickened out on that idea ) .
if preferred_mode == 'user' and key == 'documents' :
user_profile , exception = dirs_src [ 'user' ] [ 'profile' ]
if not exception :
path = join ( user_profile , 'Documents' )
if os . access ( path , os . W_OK ) :
logger . info ( " .. worked-around to: '%s'" % ( path ) )
return path
path , exception = dirs_src [ other_mode ] [ key ]
# Do not fall back to something we cannot write to .
if exception :
if check_other_mode :
logger . info ( " .. despite 'check_other_mode'\n" " and 'other_mode' 'path' of '%s'\n" " it excepted with: '%s' in knownfolders.py" % ( path , type ( exception ) . __name__ ) )
else :
logger . info ( " .. 'check_other_mode' is False,\n" " and 'other_mode' 'path' is '%s'\n" " but it excepted anyway with: '%s' in knownfolders.py" % ( path , type ( exception ) . __name__ ) )
return None
if not check_other_mode :
logger . info ( " .. due to lack of 'check_other_mode' not picking\n" " non-excepting path of '%s'\n in knownfolders.py" % ( path ) )
return None
return path
|
def posterior_to_xarray ( self ) :
"""Convert posterior samples to xarray ."""
|
data = self . posterior
if not isinstance ( data , dict ) :
raise TypeError ( "DictConverter.posterior is not a dictionary" )
if "log_likelihood" in data :
warnings . warn ( "log_likelihood found in posterior." " For stats functions log_likelihood needs to be in sample_stats." , SyntaxWarning , )
return dict_to_dataset ( data , library = None , coords = self . coords , dims = self . dims )
|
def _get_sections ( request ) :
"""Returns list of sections ( horizontal cut , base level ) ."""
|
sections = [ ]
for section_url in SECTIONS :
crumb = find_crumb ( request , section_url )
if crumb :
if section_url == '/' :
if request . path == section_url :
crumb . is_current = True
else :
if request . path . startswith ( section_url ) :
crumb . is_current = True
sections . append ( crumb )
return sections
|
def convert_ram_sp_wf ( ADDR_WIDTH = 8 , DATA_WIDTH = 8 ) :
'''Convert RAM : Single - Port , Write - First'''
|
clk = Signal ( bool ( 0 ) )
we = Signal ( bool ( 0 ) )
addr = Signal ( intbv ( 0 ) [ ADDR_WIDTH : ] )
di = Signal ( intbv ( 0 ) [ DATA_WIDTH : ] )
do = Signal ( intbv ( 0 ) [ DATA_WIDTH : ] )
toVerilog ( ram_sp_wf , clk , we , addr , di , do )
|
def unmount ( self , force = None , auth_no_user_interaction = None ) :
"""Unmount filesystem ."""
|
return self . _M . Filesystem . Unmount ( '(a{sv})' , filter_opt ( { 'force' : ( 'b' , force ) , 'auth.no_user_interaction' : ( 'b' , auth_no_user_interaction ) , } ) )
|
def PopEvent ( self ) :
"""Pops an event from the heap .
Returns :
tuple : containing :
str : identifier of the event MACB group or None if the event cannot
be grouped .
str : identifier of the event content .
EventObject : event ."""
|
try :
macb_group_identifier , content_identifier , event = heapq . heappop ( self . _heap )
if macb_group_identifier == '' :
macb_group_identifier = None
return macb_group_identifier , content_identifier , event
except IndexError :
return None
|
def timeout ( seconds , error_message = None ) :
"""Timeout checking just for Linux - like platform , not working in Windows platform ."""
|
def decorated ( func ) :
result = ""
def _handle_timeout ( signum , frame ) :
errmsg = error_message or 'Timeout: The action <%s> is timeout!' % func . __name__
global result
result = None
import inspect
stack_frame = inspect . stack ( ) [ 4 ]
file_name = os . path . basename ( stack_frame [ 1 ] )
line_no = stack_frame [ 2 ]
method_name = stack_frame [ 3 ]
code_text = ',' . join ( stack_frame [ 4 ] )
stack_info = 'Stack: %s, %s:%s >%s' % ( method_name , file_name , line_no , code_text )
sys . stderr . write ( errmsg + '\n' )
sys . stderr . write ( stack_info + '\n' )
raise TimeoutError ( errmsg )
@ sysx . platform ( sysx . UNIX_LIKE , case_false_wraps = func )
def wrapper ( * args , ** kwargs ) :
global result
signal . signal ( signal . SIGALRM , _handle_timeout )
signal . alarm ( seconds )
try :
result = func ( * args , ** kwargs )
finally :
signal . alarm ( 0 )
return result
return functools . wraps ( func ) ( wrapper )
return decorated
|
def find_lt ( array , x ) :
"""Find rightmost value less than x .
: type array : list
: param array : an iterable object that support inex
: param x : a comparable value
Example : :
> > > find _ lt ( [ 0 , 1 , 2 , 3 ] , 2.5)
* * 中文文档 * *
寻找最大的小于x的数 。"""
|
i = bisect . bisect_left ( array , x )
if i :
return array [ i - 1 ]
raise ValueError
|
def get_assigned_value ( self , name ) :
"""Get the assigned value of an attribute .
Get the underlying value of an attribute . If value has not
been set , will not return the default for the field .
Args :
name : Name of attribute to get .
Returns :
Value of attribute , None if it has not been set ."""
|
message_type = type ( self )
try :
field = message_type . field_by_name ( name )
except KeyError :
raise AttributeError ( 'Message %s has no field %s' % ( message_type . __name__ , name ) )
return self . __tags . get ( field . number )
|
def _finalize_axis ( self , key , ** kwargs ) :
"""Extends the ElementPlot _ finalize _ axis method to set appropriate
labels , and axes options for 3D Plots ."""
|
axis = self . handles [ 'axis' ]
self . handles [ 'fig' ] . set_frameon ( False )
axis . grid ( self . show_grid )
axis . view_init ( elev = self . elevation , azim = self . azimuth )
axis . dist = self . distance
if self . xaxis is None :
axis . w_xaxis . line . set_lw ( 0. )
axis . w_xaxis . label . set_text ( '' )
if self . yaxis is None :
axis . w_yaxis . line . set_lw ( 0. )
axis . w_yaxis . label . set_text ( '' )
if self . zaxis is None :
axis . w_zaxis . line . set_lw ( 0. )
axis . w_zaxis . label . set_text ( '' )
if self . disable_axes :
axis . set_axis_off ( )
if mpl_version <= '1.5.9' :
axis . set_axis_bgcolor ( self . bgcolor )
else :
axis . set_facecolor ( self . bgcolor )
return super ( Plot3D , self ) . _finalize_axis ( key , ** kwargs )
|
async def do_upload ( context , files ) :
"""Upload artifacts and return status .
Returns the integer status of the upload .
args :
context ( scriptworker . context . Context ) : the scriptworker context .
files ( list of str ) : list of files to be uploaded as artifacts
Raises :
Exception : on unexpected exception .
Returns :
int : exit status"""
|
status = 0
try :
await upload_artifacts ( context , files )
except ScriptWorkerException as e :
status = worst_level ( status , e . exit_code )
log . error ( "Hit ScriptWorkerException: {}" . format ( e ) )
except aiohttp . ClientError as e :
status = worst_level ( status , STATUSES [ 'intermittent-task' ] )
log . error ( "Hit aiohttp error: {}" . format ( e ) )
except Exception as e :
log . exception ( "SCRIPTWORKER_UNEXPECTED_EXCEPTION upload {}" . format ( e ) )
raise
return status
|
def get_variant_id ( variant_dict = None , variant_line = None ) :
"""Build a variant id
The variant id is a string made of CHROM _ POS _ REF _ ALT
Args :
variant _ dict ( dict ) : A variant dictionary
Returns :
variant _ id ( str )"""
|
if variant_dict :
chrom = variant_dict [ 'CHROM' ]
position = variant_dict [ 'POS' ]
ref = variant_dict [ 'REF' ]
alt = variant_dict [ 'ALT' ]
elif variant_line :
splitted_line = variant_line . rstrip ( ) . split ( '\t' )
chrom = splitted_line [ 0 ]
position = splitted_line [ 1 ]
ref = splitted_line [ 3 ]
alt = splitted_line [ 4 ]
else :
raise Exception ( "Have to provide variant dict or variant line" )
return '_' . join ( [ chrom , position , ref , alt , ] )
|
def adaptive ( u_kn , N_k , f_k , tol = 1.0e-12 , options = None ) :
"""Determine dimensionless free energies by a combination of Newton - Raphson iteration and self - consistent iteration .
Picks whichever method gives the lowest gradient .
Is slower than NR since it calculates the log norms twice each iteration .
OPTIONAL ARGUMENTS
tol ( float between 0 and 1 ) - relative tolerance for convergence ( default 1.0e - 12)
options : dictionary of options
gamma ( float between 0 and 1 ) - incrementor for NR iterations ( default 1.0 ) . Usually not changed now , since adaptively switch .
maximum _ iterations ( int ) - maximum number of Newton - Raphson iterations ( default 250 : either NR converges or doesn ' t , pretty quickly )
verbose ( boolean ) - verbosity level for debug output
NOTES
This method determines the dimensionless free energies by
minimizing a convex function whose solution is the desired
estimator . The original idea came from the construction of a
likelihood function that independently reproduced the work of
Geyer ( see [ 1 ] and Section 6 of [ 2 ] ) . This can alternatively be
formulated as a root - finding algorithm for the Z - estimator . More
details of this procedure will follow in a subsequent paper . Only
those states with nonzero counts are include in the estimation
procedure .
REFERENCES
See Appendix C . 2 of [ 1 ] ."""
|
# put the defaults here in case we get passed an ' options ' dictionary that is only partial
options . setdefault ( 'verbose' , False )
options . setdefault ( 'maximum_iterations' , 250 )
options . setdefault ( 'print_warning' , False )
options . setdefault ( 'gamma' , 1.0 )
gamma = options [ 'gamma' ]
doneIterating = False
if options [ 'verbose' ] == True :
print ( "Determining dimensionless free energies by Newton-Raphson / self-consistent iteration." )
if tol < 1.5e-15 :
print ( "Tolerance may be too close to machine precision to converge." )
# keep track of Newton - Raphson and self - consistent iterations
nr_iter = 0
sci_iter = 0
f_sci = np . zeros ( len ( f_k ) , dtype = np . float64 )
f_nr = np . zeros ( len ( f_k ) , dtype = np . float64 )
# Perform Newton - Raphson iterations ( with sci computed on the way )
for iteration in range ( 0 , options [ 'maximum_iterations' ] ) :
g = mbar_gradient ( u_kn , N_k , f_k )
# Objective function gradient
H = mbar_hessian ( u_kn , N_k , f_k )
# Objective function hessian
Hinvg = np . linalg . lstsq ( H , g , rcond = - 1 ) [ 0 ]
Hinvg -= Hinvg [ 0 ]
f_nr = f_k - gamma * Hinvg
# self - consistent iteration gradient norm and saved log sums .
f_sci = self_consistent_update ( u_kn , N_k , f_k )
f_sci = f_sci - f_sci [ 0 ]
# zero out the minimum
g_sci = mbar_gradient ( u_kn , N_k , f_sci )
gnorm_sci = np . dot ( g_sci , g_sci )
# newton raphson gradient norm and saved log sums .
g_nr = mbar_gradient ( u_kn , N_k , f_nr )
gnorm_nr = np . dot ( g_nr , g_nr )
# we could save the gradient , for the next round , but it ' s not too expensive to
# compute since we are doing the Hessian anyway .
if options [ 'verbose' ] :
print ( "self consistent iteration gradient norm is %10.5g, Newton-Raphson gradient norm is %10.5g" % ( gnorm_sci , gnorm_nr ) )
# decide which directon to go depending on size of gradient norm
f_old = f_k
if ( gnorm_sci < gnorm_nr or sci_iter < 2 ) :
f_k = f_sci
sci_iter += 1
if options [ 'verbose' ] :
if sci_iter < 2 :
print ( "Choosing self-consistent iteration on iteration %d" % iteration )
else :
print ( "Choosing self-consistent iteration for lower gradient on iteration %d" % iteration )
else :
f_k = f_nr
nr_iter += 1
if options [ 'verbose' ] :
print ( "Newton-Raphson used on iteration %d" % iteration )
div = np . abs ( f_k [ 1 : ] )
# what we will divide by to get relative difference
zeroed = np . abs ( f_k [ 1 : ] ) < np . min ( [ 10 ** - 8 , tol ] )
# check which values are near enough to zero , hard coded max for now .
div [ zeroed ] = 1.0
# for these values , use absolute values .
max_delta = np . max ( np . abs ( f_k [ 1 : ] - f_old [ 1 : ] ) / div )
if np . isnan ( max_delta ) or ( max_delta < tol ) :
doneIterating = True
break
if doneIterating :
if options [ 'verbose' ] :
print ( 'Converged to tolerance of {:e} in {:d} iterations.' . format ( max_delta , iteration + 1 ) )
print ( 'Of {:d} iterations, {:d} were Newton-Raphson iterations and {:d} were self-consistent iterations' . format ( iteration + 1 , nr_iter , sci_iter ) )
if np . all ( f_k == 0.0 ) : # all f _ k appear to be zero
print ( 'WARNING: All f_k appear to be zero.' )
else :
print ( 'WARNING: Did not converge to within specified tolerance.' )
if options [ 'maximum_iterations' ] <= 0 :
print ( "No iterations ran be cause maximum_iterations was <= 0 ({})!" . format ( options [ 'maximum_iterations' ] ) )
else :
print ( 'max_delta = {:e}, tol = {:e}, maximum_iterations = {:d}, iterations completed = {:d}' . format ( max_delta , tol , options [ 'maximum_iterations' ] , iteration ) )
return f_k
|
def get ( self , section , val ) :
"""Get a setting or the default
` Returns ` The current value of the setting ` val ` or the default , or ` None ` if not found
` section ` ( mandatory ) ( string ) the section name in the config E . g . ` " agent " `
` val ` ( mandatory ) ( string ) the section name in the config E . g . ` " host " `"""
|
val = val . lower ( )
if section in self . __config :
if val in self . __config [ section ] : # logger . debug ( ' get config % s % s = % s ' , section , val , self . _ _ config [ section ] [ val ] )
return self . __config [ section ] [ val ]
if section in self . __defaults :
if val in self . __defaults [ section ] : # logger . debug ( ' get defaults % s % s = % s ' , section , val , self . _ _ defaults [ section ] [ val ] )
return self . __defaults [ section ] [ val ]
return None
|
def append ( name , value , convert = False , delimiter = DEFAULT_TARGET_DELIM ) :
'''. . versionadded : : 2014.7.0
Append a value to a list in the grains config file . The grain that is being
appended to ( name ) must exist before the new value can be added .
name
The grain name
value
The value to append
convert
If convert is True , convert non - list contents into a list .
If convert is False and the grain contains non - list contents , an error
is given . Defaults to False .
delimiter
A delimiter different from the default can be provided .
. . versionadded : : v2015.8.2
. . code - block : : yaml
grain _ name :
grains . append :
- value : to _ be _ appended'''
|
name = re . sub ( delimiter , DEFAULT_TARGET_DELIM , name )
ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' }
grain = __salt__ [ 'grains.get' ] ( name , None )
# Check if bool ( grain ) is False or if the grain is specified in the minions
# grains . Grains can be set to a None value by omitting a value in the
# definition .
if grain or name in __grains__ :
if isinstance ( grain , list ) :
if value in grain :
ret [ 'comment' ] = 'Value {1} is already in the list ' 'for grain {0}' . format ( name , value )
return ret
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
ret [ 'comment' ] = 'Value {1} in grain {0} is set to ' 'be added' . format ( name , value )
ret [ 'changes' ] = { 'added' : value }
return ret
__salt__ [ 'grains.append' ] ( name , value )
ret [ 'comment' ] = 'Value {1} was added to grain {0}' . format ( name , value )
ret [ 'changes' ] = { 'added' : value }
else :
if convert is True :
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
ret [ 'comment' ] = 'Grain {0} is set to be converted ' 'to list and value {1} will be ' 'added' . format ( name , value )
ret [ 'changes' ] = { 'added' : value }
return ret
grain = [ ] if grain is None else [ grain ]
grain . append ( value )
__salt__ [ 'grains.setval' ] ( name , grain )
ret [ 'comment' ] = 'Value {1} was added to grain {0}' . format ( name , value )
ret [ 'changes' ] = { 'added' : value }
else :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Grain {0} is not a valid list' . format ( name )
else :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Grain {0} does not exist' . format ( name )
return ret
|
def remove_duplicate_sg ( security_groups ) :
"""Removes duplicate Security Groups that share a same name alias
Args :
security _ groups ( list ) : A list of security group id to compare against SECURITYGROUP _ REPLACEMENTS
Returns :
security _ groups ( list ) : A list of security groups with duplicate aliases removed"""
|
for each_sg , duplicate_sg_name in SECURITYGROUP_REPLACEMENTS . items ( ) :
if each_sg in security_groups and duplicate_sg_name in security_groups :
LOG . info ( 'Duplicate SG found. Removing %s in favor of %s.' , duplicate_sg_name , each_sg )
security_groups . remove ( duplicate_sg_name )
return security_groups
|
def _setSerialTimeout ( self , timeout , device , message ) :
"""Set the serial timeout on the hardware device .
: Parameters :
timeout : ` float ` or ` int `
The timeout value as defined by the hardware manual .
device : ` int `
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol .
message : ` bool `
If set to ` True ` a text message will be returned , if set to ` False `
the integer stored in the Qik will be returned .
: Returns :
Text message indicating the status of the shutdown error ."""
|
timeout = min ( self . _timeoutKeys , key = lambda x : abs ( x - timeout ) )
value = self . _timeoutToValue . get ( timeout , 0 )
self . _deviceConfig [ device ] [ 'timeout' ] = timeout
return self . _setConfig ( self . SERIAL_TIMEOUT , value , device , message )
|
def asm ( code , addr = 0 , syntax = None , target = None , gnu_binutils_prefix = None ) :
"""Assemble statements into machine readable code .
Args :
code ( str ) : The statements to assemble .
addr ( int ) : The memory address where the code will run .
syntax ( AsmSyntax ) : The input assembler syntax for x86 . Defaults to
nasm , ignored on other platforms .
target ( ~ pwnypack . target . Target ) : The target architecture . The
global target is used if this argument is ` ` None ` ` .
gnu _ binutils _ prefix ( str ) : When the syntax is AT & T , gnu binutils '
as and ld will be used . By default , it selects
` ` arm - * - as / ld ` ` for 32bit ARM targets ,
` ` aarch64 - * - as / ld ` ` for 64 bit ARM targets ,
` ` i386 - * - as / ld ` ` for 32bit X86 targets and
` ` amd64 - * - as / ld ` ` for 64bit X86 targets ( all for various flavors
of ` ` * ` ` . This option allows you to pick a different toolchain .
The prefix should always end with a ' - ' ( or be empty ) .
Returns :
bytes : The assembled machine code .
Raises :
SyntaxError : If the assembler statements are invalid .
NotImplementedError : In an unsupported target platform is specified .
Example :
> > > from pwny import *
> > > asm ( ' ' '
. . . pop rdi
. . . ret
. . . ' ' ' , target = Target ( arch = Target . Arch . x86 , bits = 64 ) )
b ' _ \\ xc3'"""
|
if target is None :
target = pwnypack . target . target
if syntax is None and target . arch is pwnypack . target . Target . Arch . x86 :
syntax = AsmSyntax . nasm
if HAVE_KEYSTONE and WANT_KEYSTONE :
ks_mode = 0
ks_syntax = None
if target . arch is pwnypack . target . Target . Arch . x86 :
ks_arch = keystone . KS_ARCH_X86
if target . bits is pwnypack . target . Target . Bits . bits_32 :
ks_mode |= keystone . KS_MODE_32
else :
ks_mode |= keystone . KS_MODE_64
if syntax is AsmSyntax . nasm :
ks_syntax = keystone . KS_OPT_SYNTAX_NASM
elif syntax is AsmSyntax . intel :
ks_syntax = keystone . KS_OPT_SYNTAX_INTEL
else :
ks_syntax = keystone . KS_OPT_SYNTAX_ATT
elif target . arch is pwnypack . target . Target . Arch . arm :
if target . bits is pwnypack . target . Target . Bits . bits_32 :
ks_arch = keystone . KS_ARCH_ARM
if target . mode & pwnypack . target . Target . Mode . arm_thumb :
ks_mode |= keystone . KS_MODE_THUMB
else :
ks_mode |= keystone . KS_MODE_ARM
if target . mode & pwnypack . target . Target . Mode . arm_v8 :
ks_mode |= keystone . KS_MODE_V8
if target . mode & pwnypack . target . Target . Mode . arm_m_class :
ks_mode |= keystone . KS_MODE_MICRO
if target . endian is pwnypack . target . Target . Endian . little :
ks_mode |= keystone . KS_MODE_LITTLE_ENDIAN
else :
ks_mode |= keystone . KS_MODE_BIG_ENDIAN
else :
ks_arch = keystone . KS_ARCH_ARM64
ks_mode |= keystone . KS_MODE_LITTLE_ENDIAN
else :
raise NotImplementedError ( 'Unsupported syntax or target platform.' )
ks = keystone . Ks ( ks_arch , ks_mode )
if ks_syntax is not None :
ks . syntax = ks_syntax
try :
data , insn_count = ks . asm ( code , addr )
except keystone . KsError as e :
import traceback
traceback . print_exc ( )
raise SyntaxError ( e . message )
return b'' . join ( six . int2byte ( b ) for b in data )
if target . arch is pwnypack . target . Target . Arch . x86 and syntax is AsmSyntax . nasm :
with tempfile . NamedTemporaryFile ( ) as tmp_asm :
tmp_asm . write ( ( 'bits %d\norg %d\n%s' % ( target . bits . value , addr , code ) ) . encode ( 'utf-8' ) )
tmp_asm . flush ( )
tmp_bin_fd , tmp_bin_name = tempfile . mkstemp ( )
os . close ( tmp_bin_fd )
try :
p = subprocess . Popen ( [ 'nasm' , '-o' , tmp_bin_name , '-f' , 'bin' , tmp_asm . name , ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE , )
stdout , stderr = p . communicate ( )
if p . returncode :
raise SyntaxError ( stderr . decode ( 'utf-8' ) )
tmp_bin = open ( tmp_bin_name , 'rb' )
result = tmp_bin . read ( )
tmp_bin . close ( )
return result
finally :
try :
os . unlink ( tmp_bin_name )
except OSError :
pass
elif target . arch in ( pwnypack . target . Target . Arch . x86 , pwnypack . target . Target . Arch . arm ) :
preamble = ''
as_flags = [ ]
ld_flags = [ ]
if target . arch is pwnypack . target . Target . Arch . x86 :
if target . bits == 32 :
binutils_arch = 'i386'
else :
binutils_arch = 'amd64'
if syntax is AsmSyntax . intel :
preamble = '.intel_syntax noprefix\n'
ld_flags . extend ( [ '--oformat' , 'binary' ] )
else :
if target . bits == 32 :
binutils_arch = 'arm'
if target . mode & pwnypack . target . Target . Mode . arm_v8 :
as_flags . append ( '-march=armv8-a' )
elif target . mode & pwnypack . target . Target . Mode . arm_m_class :
as_flags . append ( '-march=armv7m' )
else :
binutils_arch = 'aarch64'
if target . endian is pwnypack . target . Target . Endian . little :
as_flags . append ( '-mlittle-endian' )
ld_flags . append ( '-EL' )
else :
as_flags . append ( '-mbig-endian' )
ld_flags . append ( '-EB' )
if target . mode & pwnypack . target . Target . Mode . arm_thumb :
as_flags . append ( '-mthumb' )
if gnu_binutils_prefix is None :
gnu_binutils_prefix = find_binutils_prefix ( binutils_arch )
tmp_out_fd , tmp_out_name = tempfile . mkstemp ( )
try :
os . close ( tmp_out_fd )
p = subprocess . Popen ( [ '%sas' % gnu_binutils_prefix , '-o' , tmp_out_name ] + as_flags , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE , )
stdout , stderr = p . communicate ( ( preamble + code ) . encode ( 'utf-8' ) )
if p . returncode :
raise SyntaxError ( stderr . decode ( 'utf-8' ) )
tmp_bin_fd , tmp_bin_name = tempfile . mkstemp ( )
try :
os . close ( tmp_bin_fd )
p = subprocess . Popen ( [ '%sld' % gnu_binutils_prefix , '-Ttext' , str ( addr ) , ] + ld_flags + [ '-o' , tmp_bin_name , tmp_out_name , ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE , )
stdout , stderr = p . communicate ( )
if p . returncode :
raise SyntaxError ( stderr . decode ( 'utf-8' ) )
if 'binary' in ld_flags :
tmp_bin = open ( tmp_bin_name , 'rb' )
result = tmp_bin . read ( )
tmp_bin . close ( )
return result
else :
tmp_bin = ELF ( tmp_bin_name )
return tmp_bin . get_section_header ( '.text' ) . content
finally :
try :
os . unlink ( tmp_bin_name )
except OSError :
pass
finally :
try :
os . unlink ( tmp_out_name )
except OSError :
pass
# pragma : no cover
else :
raise NotImplementedError ( 'Unsupported syntax or target platform.' )
|
def qualify ( self ) :
"""Convert attribute values , that are references to other
objects , into I { qref } . Qualfied using default document namespace .
Since many wsdls are written improperly : when the document does
not define a default namespace , the schema target namespace is used
to qualify references ."""
|
defns = self . root . defaultNamespace ( )
if Namespace . none ( defns ) :
defns = self . schema . tns
for a in self . autoqualified ( ) :
ref = getattr ( self , a )
if ref is None :
continue
if isqref ( ref ) :
continue
qref = qualify ( ref , self . root , defns )
log . debug ( '%s, convert %s="%s" to %s' , self . id , a , ref , qref )
setattr ( self , a , qref )
|
def with_json_path ( self , path , field = None ) :
"""Annotate Storage objects with a specific JSON path .
: param path : Path to get inside the stored object , which can be
either a list of path components or a comma - separated
string
: param field : Optional output field name"""
|
if field is None :
field = '_' . join ( [ 'json' ] + json_path_components ( path ) )
kwargs = { field : JsonGetPath ( 'json' , path ) }
return self . defer ( 'json' ) . annotate ( ** kwargs )
|
def unmarshall ( values ) :
"""Transform a response payload from DynamoDB to a native dict
: param dict values : The response payload from DynamoDB
: rtype : dict
: raises ValueError : if an unsupported type code is encountered"""
|
unmarshalled = { }
for key in values :
unmarshalled [ key ] = _unmarshall_dict ( values [ key ] )
return unmarshalled
|
def sudo_remove_dirtree ( dir_name ) :
"""Removes directory tree as a superuser .
Args :
dir _ name : name of the directory to remove .
This function is necessary to cleanup directories created from inside a
Docker , since they usually written as a root , thus have to be removed as a
root ."""
|
try :
subprocess . check_output ( [ 'sudo' , 'rm' , '-rf' , dir_name ] )
except subprocess . CalledProcessError as e :
raise WorkerError ( 'Can' 't remove directory {0}' . format ( dir_name ) , e )
|
def hmget ( self , name , keys , * args ) :
"""Returns the values stored in the fields .
: param name : str the name of the redis key
: param fields :
: return : Future ( )"""
|
member_encode = self . memberparse . encode
keys = [ k for k in self . _parse_values ( keys , args ) ]
with self . pipe as pipe :
f = Future ( )
res = pipe . hmget ( self . redis_key ( name ) , [ member_encode ( k ) for k in keys ] )
def cb ( ) :
f . set ( [ self . _value_decode ( keys [ i ] , v ) for i , v in enumerate ( res . result ) ] )
pipe . on_execute ( cb )
return f
|
def all_languages ( ) :
"""Compile a list of all available language translations"""
|
rv = [ ]
for lang in os . listdir ( localedir ) :
base = lang . split ( '_' ) [ 0 ] . split ( '.' ) [ 0 ] . split ( '@' ) [ 0 ]
if 2 <= len ( base ) <= 3 and all ( c . islower ( ) for c in base ) :
if base != 'all' :
rv . append ( lang )
rv . sort ( )
rv . append ( 'en' )
l10n_log ( 'Registered languages:' , rv , lvl = verbose )
return rv
|
def _rewrite_interpreter ( self , path ) :
"""Given the original interpreter binary extracted from the script ' s
interpreter line , look up the associated ` ansible _ * _ interpreter `
variable , render it and return it .
: param str path :
Absolute UNIX path to original interpreter .
: returns :
Shell fragment prefix used to execute the script via " / bin / sh - c " .
While ` ansible _ * _ interpreter ` documentation suggests shell isn ' t
involved here , the vanilla implementation uses it and that use is
exploited in common playbooks ."""
|
key = u'ansible_%s_interpreter' % os . path . basename ( path ) . strip ( )
try :
template = self . _inv . task_vars [ key ]
except KeyError :
return path
return mitogen . utils . cast ( self . _inv . templar . template ( template ) )
|
def command ( cls , name = None ) :
"""A decorator to convert a function to a command .
A command ' s docstring must be a docopt usage string .
See docopt . org for what it supports .
Commands receive three arguments :
* opts : a dictionary output by docopt
* bot : the Bot instance handling the command ( eg for storing state between commands )
* event : the Slack event that triggered the command ( eg for finding the message ' s sender )
Additional options may be passed in as keyword arguments :
* name : the string used to execute the command ( no spaces allowed )
They must return one of three things :
* a string of response text . It will be sent via the RTM api to the channel where
the bot received the message . Slack will format it as per https : / / api . slack . com / docs / message - formatting .
* None , to send no response .
* a dictionary of kwargs representing a message to send via https : / / api . slack . com / methods / chat . postMessage .
Use this to send more complex messages , such as those with custom link text or DMs .
For example , to respond with a DM containing custom link text , return
` { ' text ' : ' < http : / / example . com | my text > ' , ' channel ' : event [ ' user ' ] , ' username ' : bot . name } ` .
Note that this api has higher latency than the RTM api ; use it only when necessary ."""
|
# adapted from https : / / github . com / docopt / docopt / blob / master / examples / interactive _ example . py
def decorator ( func ) :
@ functools . wraps ( func )
def _cmd_wrapper ( rest , * args , ** kwargs ) :
try :
usage = _cmd_wrapper . __doc__ . partition ( '\n' ) [ 0 ]
opts = docopt ( usage , rest )
except ( SystemExit , DocoptExit ) as e : # opts did not match
return str ( e )
return func ( opts , * args , ** kwargs )
cls . commands [ name or func . __name__ ] = _cmd_wrapper
return _cmd_wrapper
return decorator
|
def get_instance ( self , payload ) :
"""Build an instance of AssistantInitiationActionsInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . preview . understand . assistant . assistant _ initiation _ actions . AssistantInitiationActionsInstance
: rtype : twilio . rest . preview . understand . assistant . assistant _ initiation _ actions . AssistantInitiationActionsInstance"""
|
return AssistantInitiationActionsInstance ( self . _version , payload , assistant_sid = self . _solution [ 'assistant_sid' ] , )
|
def get_phase ( self ) :
"""get phase of the pod
: return : PodPhase enum"""
|
if self . phase != PodPhase . TERMINATING :
self . phase = PodPhase . get_from_string ( self . get_status ( ) . phase )
return self . phase
|
def recover ( options ) :
"""recover from an existing export run . We do this by
finding the last time change between events , truncate the file
and restart from there"""
|
event_format = options . kwargs [ 'omode' ]
buffer_size = 64 * 1024
fpd = open ( options . kwargs [ 'output' ] , "r+" )
fpd . seek ( 0 , 2 )
# seek to end
fptr = max ( fpd . tell ( ) - buffer_size , 0 )
fptr_eof = 0
while ( fptr > 0 ) :
fpd . seek ( fptr )
event_buffer = fpd . read ( buffer_size )
( event_start , next_event_start , last_time ) = get_event_start ( event_buffer , event_format )
if ( event_start != - 1 ) :
fptr_eof = event_start + fptr
break
fptr = fptr - buffer_size
if fptr < 0 : # didn ' t find a valid event , so start over
fptr_eof = 0
last_time = 0
# truncate file here
fpd . truncate ( fptr_eof )
fpd . seek ( fptr_eof )
fpd . write ( "\n" )
fpd . close ( )
return last_time
|
def rectangles_from_points ( S ) :
"""How many rectangles can be formed from a set of points
: param S : list of points , as coordinate pairs
: returns : the number of rectangles
: complexity : : math : ` O ( n ^ 2 ) `"""
|
answ = 0
pairs = { }
for j in range ( len ( S ) ) :
for i in range ( j ) :
px , py = S [ i ]
qx , qy = S [ j ]
center = ( px + qx , py + qy )
dist = ( px - qx ) ** 2 + ( py - qy ) ** 2
sign = ( center , dist )
if sign in pairs :
answ += len ( pairs [ sign ] )
pairs [ sign ] . append ( ( i , j ) )
else :
pairs [ sign ] = [ ( i , j ) ]
return answ
|
def extract_name_max_chars ( name , max_chars = 64 , blank = " " ) :
"""Extracts max chars in name truncated to nearest word
: param name : path to edit
: param max _ chars : max chars of new name
: param blank : char that represents the blank between words
: return : Name edited to contain at most max _ chars"""
|
new_name = name . strip ( )
if len ( new_name ) > max_chars :
new_name = new_name [ : max_chars ]
# get at most 64 chars
if new_name . rfind ( blank ) > 0 :
new_name = new_name [ : new_name . rfind ( blank ) ]
# nearest word
return new_name
|
def create_processing_context ( feedback ) :
"""Creates a default processing context
: param feedback : Linked processing feedback object
: type feedback : QgsProcessingFeedback
: return : Processing context
: rtype : QgsProcessingContext"""
|
context = QgsProcessingContext ( )
context . setFeedback ( feedback )
context . setProject ( QgsProject . instance ( ) )
# skip Processing geometry checks - Inasafe has its own geometry validation
# routines which have already been used
context . setInvalidGeometryCheck ( QgsFeatureRequest . GeometryNoCheck )
return context
|
def new_message ( self , message ) :
""": return : None"""
|
self . logger . info ( "New message: " + str ( message ) )
if isinstance ( message , velbus . BusActiveMessage ) :
self . logger . info ( "Velbus active message received" )
if isinstance ( message , velbus . ReceiveReadyMessage ) :
self . logger . info ( "Velbus receive ready message received" )
if isinstance ( message , velbus . BusOffMessage ) :
self . logger . error ( "Velbus bus off message received" )
if isinstance ( message , velbus . ReceiveBufferFullMessage ) :
self . logger . error ( "Velbus receive buffer full message received" )
if isinstance ( message , velbus . ModuleTypeMessage ) :
self . logger . debug ( "Module type response received" )
name = message . module_name ( )
address = message . address
m_type = message . module_type
if name == "Unknown" :
self . logger . warning ( "Unknown module (code: " + str ( message . module_type ) + ')' )
return
if name in velbus . ModuleRegistry :
module = velbus . ModuleRegistry [ name ] ( m_type , name , address , self )
self . _modules [ address ] = module
else :
self . logger . warning ( "Module " + name + " is not yet supported." )
for subscriber in self . __subscribers :
subscriber ( message )
|
def execute ( self ) :
"""Execute this generator regarding its current configuration ."""
|
if self . direct :
if self . file_type == 'pdf' :
raise IOError ( u"Direct output mode is not available for PDF " "export" )
else :
print ( self . render ( ) . encode ( self . encoding ) )
else :
self . write_and_log ( )
if self . watch :
from landslide . watcher import watch
self . log ( u"Watching %s\n" % self . watch_dir )
watch ( self . watch_dir , self . write_and_log )
|
def _is_plugin_disabled ( plugin ) :
"""Determines if provided plugin is disabled from running for the
active task ."""
|
item = _registered . get ( plugin . name )
if not item :
return False
_ , props = item
return bool ( props . get ( 'disabled' ) )
|
def list ( cls , ** kwargs ) :
"""List a command by issuing a GET request to the / command endpoint
Args :
` * * kwargs ` : Various parameters can be used to filter the commands such as :
* command _ type - HiveQuery , PrestoQuery , etc . The types should be in title case .
* status - failed , success , etc
* name
* command _ id
* qbol _ user _ id
* command _ source
* page
* cluster _ label
* session _ id , etc
For example - Command . list ( command _ type = " HiveQuery " , status = " success " )"""
|
conn = Qubole . agent ( )
params = { }
for k in kwargs :
if kwargs [ k ] :
params [ k ] = kwargs [ k ]
params = None if not params else params
return conn . get ( cls . rest_entity_path , params = params )
|
def get_all_hosts ( resource_root , view = None ) :
"""Get all hosts
@ param resource _ root : The root Resource object .
@ return : A list of ApiHost objects ."""
|
return call ( resource_root . get , HOSTS_PATH , ApiHost , True , params = view and dict ( view = view ) or None )
|
def pid_max ( self ) :
"""Get the maximum PID value .
On Linux , the value is read from the ` / proc / sys / kernel / pid _ max ` file .
From ` man 5 proc ` :
The default value for this file , 32768 , results in the same range of
PIDs as on earlier kernels . On 32 - bit platfroms , 32768 is the maximum
value for pid _ max . On 64 - bit systems , pid _ max can be set to any value
up to 2 ^ 22 ( PID _ MAX _ LIMIT , approximately 4 million ) .
If the file is unreadable or not available for whatever reason ,
returns None .
Some other OSes :
- On FreeBSD and macOS the maximum is 99999.
- On OpenBSD > = 6.0 the maximum is 99999 ( was 32766 ) .
- On NetBSD the maximum is 30000.
: returns : int or None"""
|
if LINUX : # XXX : waiting for https : / / github . com / giampaolo / psutil / issues / 720
try :
with open ( '/proc/sys/kernel/pid_max' , 'rb' ) as f :
return int ( f . read ( ) )
except ( OSError , IOError ) :
return None
else :
return None
|
def write_memory ( self , * , region_name : str , offset : int = 0 , value = None ) :
"""Writes a value into a memory region on the QAM at a specified offset .
: param region _ name : Name of the declared memory region on the QAM .
: param offset : Integer offset into the memory region to write to .
: param value : Value to store at the indicated location ."""
|
assert self . status in [ 'loaded' , 'done' ]
aref = ParameterAref ( name = region_name , index = offset )
self . _variables_shim [ aref ] = value
return self
|
def revoke_all ( cls , cur , schema_name , roles ) :
"""Revoke all privileges from schema , tables , sequences and functions for a specific role"""
|
cur . execute ( 'REVOKE ALL ON SCHEMA {0} FROM {1};' 'REVOKE ALL ON ALL TABLES IN SCHEMA {0} FROM {1};' 'REVOKE ALL ON ALL SEQUENCES IN SCHEMA {0} FROM {1};' 'REVOKE ALL ON ALL FUNCTIONS IN SCHEMA {0} FROM {1};' . format ( schema_name , roles ) )
|
def update_domain ( self , service_id , version_number , name_key , ** kwargs ) :
"""Update the domain for a particular service and version ."""
|
body = self . _formdata ( kwargs , FastlyDomain . FIELDS )
content = self . _fetch ( "/service/%s/version/%d/domain/%s" % ( service_id , version_number , name_key ) , method = "PUT" , body = body )
return FastlyDomain ( self , content )
|
def load ( stream , overrides = None , ** kwargs ) :
"""Loads a YAML configuration from a string or file - like object .
Parameters
stream : str or object
Either a string containing valid YAML or a file - like object
supporting the . read ( ) interface .
overrides : dict , optional
A dictionary containing overrides to apply . The location of
the override is specified in the key as a dot - delimited path
to the desired parameter , e . g . " model . corruptor . corruption _ level " .
Returns
graph : dict or object
The dictionary or object ( if the top - level element specified an
Python object to instantiate ) .
Notes
Other keyword arguments are passed on to ` yaml . load ` ."""
|
global is_initialized
if not is_initialized :
initialize ( )
if isinstance ( stream , basestring ) :
string = stream
else :
string = '\n' . join ( stream . readlines ( ) )
# processed _ string = preprocess ( string )
proxy_graph = yaml . load ( string , ** kwargs )
from . import init
init_dict = proxy_graph . get ( 'init' , { } )
init ( ** init_dict )
if overrides is not None :
handle_overrides ( proxy_graph , overrides )
return instantiate_all ( proxy_graph )
|
def validate ( self , data ) :
"""Validate data using defined sub schema / expressions ensuring all
values are valid .
: param data : to be validated with sub defined schemas .
: return : returns validated data"""
|
for s in [ self . _schema ( s , error = self . _error , ignore_extra_keys = self . _ignore_extra_keys ) for s in self . _args ] :
data = s . validate ( data )
return data
|
def prune_old ( self ) :
"""Removes the directories that are older than a certain date ."""
|
path = self . pubdir
dirmask = self . dirmask
expire = self . expire
expire_limit = int ( time . time ( ) ) - ( 86400 * expire )
logger . info ( 'Pruning directories older than %d days' , expire )
if not os . path . isdir ( path ) :
logger . warning ( 'Dir %r not found -- skipping pruning' , path )
return
for entry in os . listdir ( path ) :
logger . debug ( 'Found: %r' , entry )
if os . path . isdir ( os . path . join ( path , entry ) ) :
try :
stamp = time . mktime ( time . strptime ( entry , dirmask ) )
except ValueError as e :
logger . info ( 'Dir %r did not match dirmask %r: %r' , entry , dirmask , e )
logger . info ( 'Skipping %r' , entry )
continue
if stamp < expire_limit :
shutil . rmtree ( os . path . join ( path , entry ) )
logger . info ( 'File Publisher: Pruned old dir: %r' , entry )
else :
logger . info ( '%r is still active' , entry )
else :
logger . info ( '%r is not a directory. Skipping.' , entry )
logger . info ( 'Finished with pruning' )
|
def check_running ( self ) :
'''Check if a pid file exists and if it is associated with
a running process .'''
|
if self . check_pidfile ( ) :
pid = self . get_pidfile ( )
if not salt . utils . platform . is_windows ( ) :
if self . check_pidfile ( ) and self . is_daemonized ( pid ) and os . getppid ( ) != pid :
return True
else : # We have no os . getppid ( ) on Windows . Use salt . utils . win _ functions . get _ parent _ pid
if self . check_pidfile ( ) and self . is_daemonized ( pid ) and salt . utils . win_functions . get_parent_pid ( ) != pid :
return True
return False
|
def parse_prompts ( etc_folder ) :
"""Read prompts and prompts - orignal and return as dictionary ( id as key ) ."""
|
prompts_path = os . path . join ( etc_folder , 'PROMPTS' )
prompts_orig_path = os . path . join ( etc_folder , 'prompts-original' )
prompts = textfile . read_key_value_lines ( prompts_path , separator = ' ' )
prompts_orig = textfile . read_key_value_lines ( prompts_orig_path , separator = ' ' )
prompts_key_fixed = { }
for k , v in prompts . items ( ) :
parts = k . split ( '/' )
key = k
if len ( parts ) > 1 :
key = parts [ - 1 ]
prompts_key_fixed [ key ] = v
prompts = prompts_key_fixed
return prompts , prompts_orig
|
def getTransitionActor ( obj , action_id ) :
"""Returns the actor that performed a given transition . If transition has
not been perormed , or current user has no privileges , returns None
: return : the username of the user that performed the transition passed - in
: type : string"""
|
review_history = getReviewHistory ( obj )
for event in review_history :
if event . get ( 'action' ) == action_id :
return event . get ( 'actor' )
return None
|
def readpipe ( self , chunk = None ) :
"""Return iterator that iterates over STDIN line by line
If ` ` chunk ` ` is set to a positive non - zero integer value , then the
reads are performed in chunks of that many lines , and returned as a
list . Otherwise the lines are returned one by one ."""
|
read = [ ]
while True :
l = sys . stdin . readline ( )
if not l :
if read :
yield read
return
return
if not chunk :
yield l
else :
read . append ( l )
if len ( read ) == chunk :
yield read
|
def upgrade_plugin ( self , name , remote , privileges ) :
"""Upgrade an installed plugin .
Args :
name ( string ) : Name of the plugin to upgrade . The ` ` : latest ` `
tag is optional and is the default if omitted .
remote ( string ) : Remote reference to upgrade to . The
` ` : latest ` ` tag is optional and is the default if omitted .
privileges ( : py : class : ` list ` ) : A list of privileges the user
consents to grant to the plugin . Can be retrieved using
: py : meth : ` ~ plugin _ privileges ` .
Returns :
An iterable object streaming the decoded API logs"""
|
url = self . _url ( '/plugins/{0}/upgrade' , name )
params = { 'remote' : remote , }
headers = { }
registry , repo_name = auth . resolve_repository_name ( remote )
header = auth . get_config_header ( self , registry )
if header :
headers [ 'X-Registry-Auth' ] = header
response = self . _post_json ( url , params = params , headers = headers , data = privileges , stream = True )
self . _raise_for_status ( response )
return self . _stream_helper ( response , decode = True )
|
def _pick_cluster_host ( self , value ) :
"""Selects the Redis cluster host for the specified value .
: param mixed value : The value to use when looking for the host
: rtype : tredis . client . _ Connection"""
|
crc = crc16 . crc16 ( self . _encode_resp ( value [ 1 ] ) ) % HASH_SLOTS
for host in self . _cluster . keys ( ) :
for slot in self . _cluster [ host ] . slots :
if slot [ 0 ] <= crc <= slot [ 1 ] :
return self . _cluster [ host ]
LOGGER . debug ( 'Host not found for %r, returning first connection' , value )
host_keys = sorted ( list ( self . _cluster . keys ( ) ) )
return self . _cluster [ host_keys [ 0 ] ]
|
def commit ( self , force = False , partial = False , device_and_network = False , policy_and_objects = False , vsys = "" , no_vsys = False , delay_factor = 0.1 , ) :
"""Commit the candidate configuration .
Commit the entered configuration . Raise an error and return the failure
if the commit fails .
Automatically enters configuration mode
default :
command _ string = commit
( device _ and _ network or policy _ and _ objects or vsys or
no _ vsys ) and not partial :
Exception"""
|
delay_factor = self . select_delay_factor ( delay_factor )
if ( device_and_network or policy_and_objects or vsys or no_vsys ) and not partial :
raise ValueError ( "'partial' must be True when using " "device_and_network or policy_and_objects " "or vsys or no_vsys." )
# Select proper command string based on arguments provided
command_string = "commit"
commit_marker = "configuration committed successfully"
if force :
command_string += " force"
if partial :
command_string += " partial"
if vsys :
command_string += " {0}" . format ( vsys )
if device_and_network :
command_string += " device-and-network"
if policy_and_objects :
command_string += " device-and-network"
if no_vsys :
command_string += " no-vsys"
command_string += " excluded"
# Enter config mode ( if necessary )
output = self . config_mode ( )
output += self . send_command_expect ( command_string , strip_prompt = False , strip_command = False , expect_string = "100%" , delay_factor = delay_factor , )
if commit_marker not in output . lower ( ) :
raise ValueError ( "Commit failed with the following errors:\n\n{0}" . format ( output ) )
return output
|
def get_mesos_task ( task_name ) :
"""Get a mesos task with a specific task name"""
|
tasks = get_mesos_tasks ( )
if tasks is not None :
for task in tasks :
if task [ 'name' ] == task_name :
return task
return None
|
def create_group ( self , group_name , path = '/' ) :
"""Create a group .
: type group _ name : string
: param group _ name : The name of the new group
: type path : string
: param path : The path to the group ( Optional ) . Defaults to / ."""
|
params = { 'GroupName' : group_name , 'Path' : path }
return self . get_response ( 'CreateGroup' , params )
|
def _copy_id_str_old ( self ) :
'''Return the string to execute ssh - copy - id'''
|
if self . passwd : # Using single quotes prevents shell expansion and
# passwords containing ' $ '
return "{0} {1} '{2} -p {3} {4} {5}@{6}'" . format ( 'ssh-copy-id' , '-i {0}.pub' . format ( self . priv ) , self . _passwd_opts ( ) , self . port , self . _ssh_opts ( ) , self . user , self . host )
return None
|
def compute_collections ( self ) :
"""Finds the collections ( clusters , chains ) that exist in parsed _ response .
Modified :
- self . collection _ sizes : populated with a list of integers indicating
the number of units belonging to each collection
- self . collection _ indices : populated with a list of strings indicating
the indices of each element of each collection
- self . collection _ list : populated with a list lists , each list containing
Unit objects belonging to each collection
There are two types of collections currently implemented :
- cluster : every entry in a cluster is sufficiently similar to every other entry
- chain : every entry in a chain is sufficiently similar to adjacent entries
Similarity between words is calculated using the compute _ similarity _ score method .
Scores between words are then thresholded and binarized using empirically - derived
thresholds ( see : ? ? ? ) . Overlap of clusters is allowed ( a word can be part of
multiple clusters ) , but overlapping chains are not possible , as any two adjacent
words with a lower - than - threshold similarity breaks the chain . Clusters subsumed
by other clusters are not counted . Singletons , i . e . , clusters of size 1 , are
included in this analysis .
. . todo : Find source for thresholding values ."""
|
if self . custom_threshold :
self . similarity_threshold = self . custom_threshold
elif self . type == "PHONETIC" :
if self . current_similarity_measure == "phone" :
phonetic_similarity_thresholds = { 'a' : 0.222222222222 , 'b' : 0.3 , 'c' : 0.2857142857134 , 'd' : 0.3 , 'e' : 0.25 , 'f' : 0.333333333333 , 'g' : 0.2857142857142857 , 'h' : 0.333333333333 , 'i' : 0.3 , 'j' : 0.3 , 'k' : 0.3 , 'l' : 0.333333333333 , 'm' : 0.333333333333 , 'n' : 0.2857142857142857 , 'o' : 0.222222222222 , 'p' : 0.2857142857134 , 'q' : 0.4285714285714286 , 'r' : 0.3 , 's' : 0.2857142857134 , 't' : 0.2857142857134 , 'u' : 0.3076923076923077 , 'v' : 0.333333333333 , 'w' : 0.333333333333 , 'x' : 0.2857142857134 , 'y' : 0.333333333333 , 'z' : 0.333333333333 }
self . similarity_threshold = phonetic_similarity_thresholds [ self . letter ]
elif self . current_similarity_measure == "biphone" :
self . similarity_threshold = 1
elif self . type == "SEMANTIC" :
if self . current_similarity_measure == "lsa" :
if self . category == 'animals' :
thresholds = { '50' : 0.229306542684 , '51' : 0.22594687203200001 , '52' : 0.22403235205800001 , '53' : 0.214750475853 , '54' : 0.210178113675 , '55' : 0.209214667474 , '56' : 0.204037629443 , '57' : 0.203801260742 , '58' : 0.203261303516 , '59' : 0.20351336452999999 , '60' : 0.19834361415999999 , '61' : 0.19752806852999999 , '62' : 0.191322450624 , '63' : 0.194312302459 , '64' : 0.188165419858 , '65' : 0.18464545450299999 , '66' : 0.18478136731399999 , '67' : 0.178950849271 , '68' : 0.17744175606199999 , '69' : 0.17639888996299999 , '70' : 0.17537403274400001 , '71' : 0.17235091169799999 , '72' : 0.17115875396499999 , '73' : 0.17262141635100001 , '74' : 0.16580303697500001 , '75' : 0.16416843492800001 , '76' : 0.166395146381 , '77' : 0.162961462955 , '78' : 0.161888890545 , '79' : 0.160416925579 , '80' : 0.157132807023 , '81' : 0.15965395155699999 , '82' : 0.155974588379 , '83' : 0.15606832182700001 , '84' : 0.14992240019899999 , '85' : 0.15186462595399999 , '86' : 0.14976638614599999 , '87' : 0.14942388535199999 , '88' : 0.14740916274999999 , '89' : 0.14821336952600001 , '90' : 0.14188941422699999 , '91' : 0.14039515298300001 , '92' : 0.14125100827199999 , '93' : 0.140135804694 , '94' : 0.13933483465099999 , '95' : 0.139679588617 , '96' : 0.13569859464199999 , '97' : 0.135394351192 , '98' : 0.13619473881800001 , '99' : 0.136671316751 , '100' : 0.135307208304 }
self . similarity_threshold = thresholds [ str ( self . clustering_parameter ) ]
elif self . current_similarity_measure == "custom" :
self . similarity_threshold = self . custom_threshold
if not self . quiet :
print "Similarity threshold:" , self . similarity_threshold
for index , unit in enumerate ( self . parsed_response ) :
next_word_index = index + 1
collection = [ unit ]
# begin current collection
collection_index = [ index ]
# begin current collection index list
collection_terminus_found = False
while not collection_terminus_found :
if next_word_index < len ( self . parsed_response ) : # Check whether last word in attempt has been read
test = False
if self . current_collection_type == "cluster" : # Check whether next word is related to
# every other word in cluster
unit2 = self . parsed_response [ next_word_index ]
test = all ( [ self . compute_similarity_score ( unit2 , other_unit ) >= self . similarity_threshold for other_unit in collection ] )
elif self . current_collection_type == "chain" : # check whether the word is related to the one before it
# remember that we ' re testing words at the end of the chain , and creating new links
unit1 = self . parsed_response [ next_word_index - 1 ]
unit2 = self . parsed_response [ next_word_index ]
test = self . compute_similarity_score ( unit1 , unit2 ) >= self . similarity_threshold
if test : # add NEXT word
collection . append ( self . parsed_response [ next_word_index ] )
collection_index . append ( next_word_index )
next_word_index += 1
else : # Check whether cluster is subsequence of cluster
# already added to list
collection_index = ' ' . join ( [ str ( w ) for w in collection_index ] )
if collection_index not in str ( self . collection_indices ) :
self . collection_indices . append ( collection_index )
self . collection_sizes . append ( len ( collection ) )
collection_terminus_found = True
else : # Execute if word is last word in attempt
collection_index = ' ' . join ( [ str ( w ) for w in collection_index ] )
if collection_index not in str ( self . collection_indices ) :
self . collection_indices . append ( collection_index )
self . collection_sizes . append ( len ( collection ) )
collection_terminus_found = True
# Get a list of collections and their positions in the response .
for index in self . collection_indices :
collection = [ ]
for i in index . split ( ) :
collection . append ( self . parsed_response [ int ( i ) ] )
self . collection_list . append ( collection )
|
def get_parent ( brain_or_object , catalog_search = False ) :
"""Locate the parent object of the content / catalog brain
The ` catalog _ search ` switch uses the ` portal _ catalog ` to do a search return
a brain instead of the full parent object . However , if the search returned
no results , it falls back to return the full parent object .
: param brain _ or _ object : A single catalog brain or content object
: type brain _ or _ object : ATContentType / DexterityContentType / CatalogBrain
: param catalog _ search : Use a catalog query to find the parent object
: type catalog _ search : bool
: returns : parent object
: rtype : ATContentType / DexterityContentType / PloneSite / CatalogBrain"""
|
if is_portal ( brain_or_object ) :
return get_portal ( )
# Do a catalog search and return the brain
if catalog_search :
parent_path = get_parent_path ( brain_or_object )
# parent is the portal object
if parent_path == get_path ( get_portal ( ) ) :
return get_portal ( )
# get the catalog tool
pc = get_portal_catalog ( )
# query for the parent path
results = pc ( path = { "query" : parent_path , "depth" : 0 } )
# No results fallback : return the parent object
if not results :
return get_object ( brain_or_object ) . aq_parent
# return the brain
return results [ 0 ]
return get_object ( brain_or_object ) . aq_parent
|
def get ( self , path = None , url_kwargs = None , ** kwargs ) :
"""Sends a GET request .
: param path :
The HTTP path ( either absolute or relative ) .
: param url _ kwargs :
Parameters to override in the generated URL . See ` ~ hyperlink . URL ` .
: param * * kwargs :
Optional arguments that ` ` request ` ` takes .
: return : response object"""
|
return self . _session . get ( self . _url ( path , url_kwargs ) , ** kwargs )
|
def select_configuration ( self , obresult ) :
"""Select instrument configuration based on OB"""
|
logger = logging . getLogger ( __name__ )
logger . debug ( 'calling default configuration selector' )
# get first possible image
ref = obresult . get_sample_frame ( )
extr = self . datamodel . extractor_map [ 'fits' ]
if ref : # get INSCONF configuration
result = extr . extract ( 'insconf' , ref )
if result : # found the keyword , try to match
logger . debug ( 'found insconf config uuid=%s' , result )
# Use insconf as uuid key
if result in self . configurations :
return self . configurations [ result ]
else : # Additional check for conf . name
for conf in self . configurations . values ( ) :
if conf . name == result :
return conf
else :
raise KeyError ( 'insconf {} does not match any config' . format ( result ) )
# If not , try to match by DATE
date_obs = extr . extract ( 'observation_date' , ref )
for key , conf in self . configurations . items ( ) :
if key == 'default' : # skip default
continue
if conf . date_end is not None :
upper_t = date_obs < conf . date_end
else :
upper_t = True
if upper_t and ( date_obs >= conf . date_start ) :
logger . debug ( 'found date match, config uuid=%s' , key )
return conf
else :
logger . debug ( 'no match, using default configuration' )
return self . configurations [ 'default' ]
|
def add_sink ( self , sink ) :
"""Add a vehicle data sink to the instance . ` ` sink ` ` should be a
sub - class of ` ` DataSink ` ` or at least have a ` ` receive ( message ,
* * kwargs ) ` ` method .
The sink will be started if it is startable . ( i . e . it has a ` ` start ( ) ` `
method ) ."""
|
if sink is not None :
self . sinks . add ( sink )
if hasattr ( sink , 'start' ) :
sink . start ( )
|
def do_folder_update_metadata ( client , args ) :
"""Update file metadata"""
|
client . update_folder_metadata ( args . uri , foldername = args . foldername , description = args . description , mtime = args . mtime , privacy = args . privacy , privacy_recursive = args . recursive )
return True
|
def dumps ( obj , pretty = False , escaped = True ) :
"""Serialize ` ` obj ` ` to a VDF formatted ` ` str ` ` ."""
|
if not isinstance ( obj , dict ) :
raise TypeError ( "Expected data to be an instance of``dict``" )
if not isinstance ( pretty , bool ) :
raise TypeError ( "Expected pretty to be of type bool" )
if not isinstance ( escaped , bool ) :
raise TypeError ( "Expected escaped to be of type bool" )
return '' . join ( _dump_gen ( obj , pretty , escaped ) )
|
def get_removed_obs_importance ( self , obslist_dict = None , reset_zero_weight = False ) :
"""get a dataframe the posterior uncertainty
as a result of losing some observations
Parameters
obslist _ dict : dict
dictionary of groups of observations
that are to be treated as lost . key values become
row labels in returned dataframe . If None , then test every
( nonzero weight - see reset _ zero _ weight ) observation
reset _ zero _ weight : bool or float
a flag to reset observations with zero weight in obslist _ dict .
If the value of reset _ zero _ weights can be cast to a float ,
then that value will be assigned to zero weight obs . Otherwise ,
zero weight obs will be given a weight of 1.0
Returns
pandas . DataFrame : pandas . DataFrame
a dataframe with index of obslist _ dict . keys ( ) and columns
of forecast names . The values in the dataframe are the posterior
variances of the forecasts resulting from losing the information
contained in obslist _ dict [ key value ]
Note
all observations listed in obslist _ dict with zero
weights will be dropped unless reset _ zero _ weight is set
Example
` ` > > > import pyemu ` `
` ` > > > sc = pyemu . Schur ( jco = " pest . jcb " ) ` `
` ` df = sc . get _ removed _ obs _ importance ( ) ` `"""
|
if obslist_dict is not None :
if type ( obslist_dict ) == list :
obslist_dict = dict ( zip ( obslist_dict , obslist_dict ) )
elif reset_zero_weight is False and self . pst . nnz_obs == 0 :
raise Exception ( "not resetting weights and there are no non-zero weight obs to remove" )
reset = False
if reset_zero_weight is not False :
if not self . obscov . isdiagonal :
raise NotImplementedError ( "cannot reset weights for non-" + "diagonal obscov" )
reset = True
try :
weight = float ( reset_zero_weight )
except :
weight = 1.0
self . logger . statement ( "resetting zero weights to {0}" . format ( weight ) )
# make copies of the original obscov and pst
org_obscov = self . obscov . get ( self . obscov . row_names )
org_pst = self . pst . get ( )
self . log ( "calculating importance of observations" )
if reset and obslist_dict is None :
obs = self . pst . observation_data
onames = [ name for name in self . pst . zero_weight_obs_names if name in self . jco . obs_names and name in self . obscov . row_names ]
obs . loc [ onames , "weight" ] = weight
if obslist_dict is None :
obslist_dict = dict ( zip ( self . pst . nnz_obs_names , self . pst . nnz_obs_names ) )
elif reset :
self . pst . observation_data . index = self . pst . observation_data . obsnme
for name , obslist in obslist_dict . items ( ) :
self . log ( "resetting weights in obs in group {0}" . format ( name ) )
self . pst . _adjust_weights_by_list ( obslist , weight )
self . log ( "resetting weights in obs in group {0}" . format ( name ) )
for case , obslist in obslist_dict . items ( ) :
if not isinstance ( obslist , list ) :
obslist = [ obslist ]
obslist_dict [ case ] = obslist
if reset :
self . log ( "resetting self.obscov" )
self . reset_obscov ( self . pst )
self . log ( "resetting self.obscov" )
results = { }
names = [ "base" ]
for forecast , pt in self . posterior_forecast . items ( ) :
results [ forecast ] = [ pt ]
for case_name , obslist in obslist_dict . items ( ) :
if not isinstance ( obslist , list ) :
obslist = [ obslist ]
names . append ( case_name )
self . log ( "calculating importance of observations by removing: " + str ( obslist ) + '\n' )
# check for missing names
missing_onames = [ oname for oname in obslist if oname not in self . jco . obs_names ]
if len ( missing_onames ) > 0 :
raise Exception ( "case {0} has observation names " . format ( case_name ) + "not found: " + ',' . join ( missing_onames ) )
# find the set difference between obslist and jco obs names
# diff _ onames = [ oname for oname in self . jco . obs _ names if oname not in obslist ]
diff_onames = [ oname for oname in self . nnz_obs_names if oname not in obslist and oname not in self . forecast_names ]
# calculate the increase in forecast variance by not using the obs
# in obslist
case_post = self . get ( par_names = self . jco . par_names , obs_names = diff_onames ) . posterior_forecast
for forecast , pt in case_post . items ( ) :
results [ forecast ] . append ( pt )
df = pd . DataFrame ( results , index = names )
self . log ( "calculating importance of observations by removing: " + str ( obslist ) + '\n' )
if reset :
self . reset_obscov ( org_obscov )
self . reset_pst ( org_pst )
return df
|
def gammaRDD ( sc , shape , scale , size , numPartitions = None , seed = None ) :
"""Generates an RDD comprised of i . i . d . samples from the Gamma
distribution with the input shape and scale .
: param sc : SparkContext used to create the RDD .
: param shape : shape ( > 0 ) parameter for the Gamma distribution
: param scale : scale ( > 0 ) parameter for the Gamma distribution
: param size : Size of the RDD .
: param numPartitions : Number of partitions in the RDD ( default : ` sc . defaultParallelism ` ) .
: param seed : Random seed ( default : a random long integer ) .
: return : RDD of float comprised of i . i . d . samples ~ Gamma ( shape , scale ) .
> > > from math import sqrt
> > > shape = 1.0
> > > scale = 2.0
> > > expMean = shape * scale
> > > expStd = sqrt ( shape * scale * scale )
> > > x = RandomRDDs . gammaRDD ( sc , shape , scale , 1000 , seed = 2)
> > > stats = x . stats ( )
> > > stats . count ( )
1000
> > > abs ( stats . mean ( ) - expMean ) < 0.5
True
> > > abs ( stats . stdev ( ) - expStd ) < 0.5
True"""
|
return callMLlibFunc ( "gammaRDD" , sc . _jsc , float ( shape ) , float ( scale ) , size , numPartitions , seed )
|
def str2date ( self , datestr ) :
"""Try parse date from string . If None template matching this datestr ,
raise Error .
: param datestr : a string represent a date
: type datestr : str
: return : a datetime . date object
Usage : :
> > > from weatherlab . lib . timelib . timewrapper import timewrapper
> > > timewrapper . str2date ( " 12/15/2014 " )
datetime . date ( 2014 , 12 , 15)
* * 中文文档 * *
尝试从字符串中解析出datetime . date对象 。 每次解析时 , 先尝试默认模板 , 如果
失败了 , 再重新对所有模板进行尝试 ; 一旦尝试成功 , 这将当前成功的模板保存
为默认模板 。 这样使得尝试的策略最优化 。"""
|
try :
return datetime . strptime ( datestr , self . default_date_template ) . date ( )
except : # 如果默认的模板不匹配 , 则重新尝试所有的模板
pass
# try all date _ templates
# 对每个template进行尝试 , 如果都不成功 , 抛出异常
for template in self . date_templates :
try :
a_datetime = datetime . strptime ( datestr , template )
# 如果成功了
self . default_date_template = template
# 保存为default
return a_datetime . date ( )
except :
pass
raise NoMatchingTemplateError ( datestr )
|
def clear_trash ( cookie , tokens ) :
'''清空回收站 , 将里面的所有文件都删除 .'''
|
url = '' . join ( [ const . PAN_API_URL , 'recycle/clear?channel=chunlei&clienttype=0&web=1' , '&t=' , util . timestamp ( ) , '&bdstoken=' , tokens [ 'bdstoken' ] , ] )
# 使用POST方式发送命令 , 但data为空 .
req = net . urlopen ( url , headers = { 'Cookie' : cookie . header_output ( ) , } , data = '' . encode ( ) )
if req :
content = req . data
return json . loads ( content . decode ( ) )
else :
return None
|
def _validate_image_datatype ( self , img_array ) :
"""Only uint8 and uint16 images are currently supported ."""
|
if img_array . dtype != np . uint8 and img_array . dtype != np . uint16 :
msg = ( "Only uint8 and uint16 datatypes are currently supported " "when writing." )
raise RuntimeError ( msg )
|
def list_all_refund_operations ( cls , ** kwargs ) :
"""List RefundOperations
Return a list of RefundOperations
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . list _ all _ refund _ operations ( async = True )
> > > result = thread . get ( )
: param async bool
: param int page : page number
: param int size : page size
: param str sort : page order
: return : page [ RefundOperation ]
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _list_all_refund_operations_with_http_info ( ** kwargs )
else :
( data ) = cls . _list_all_refund_operations_with_http_info ( ** kwargs )
return data
|
def creation_time ( self ) :
"""dfdatetime . DateTimeValues : creation time or None if not available ."""
|
timestamp = self . _fsapfs_file_entry . get_creation_time_as_integer ( )
return dfdatetime_apfs_time . APFSTime ( timestamp = timestamp )
|
def version ( self ) :
"""What ' s the version of this database ? Found in metadata attached
by datacache when creating this database ."""
|
query = "SELECT version FROM %s" % METADATA_TABLE_NAME
cursor = self . connection . execute ( query )
version = cursor . fetchone ( )
if not version :
return 0
else :
return int ( version [ 0 ] )
|
def load_chk ( filename ) :
'''Load a checkpoint file
Argument :
| filename - - the file to load from
The return value is a dictionary whose keys are field labels and the
values can be None , string , integer , float , boolean or an array of
strings , integers , booleans or floats .
The file format is similar to the Gaussian fchk format , but has the extra
feature that the shapes of the arrays are also stored .'''
|
with open ( filename ) as f :
result = { }
while True :
line = f . readline ( )
if line == '' :
break
if len ( line ) < 54 :
raise IOError ( 'Header lines must be at least 54 characters long.' )
key = line [ : 40 ] . strip ( )
kind = line [ 47 : 52 ] . strip ( )
value = line [ 53 : - 1 ]
# discard newline
if kind == 'str' :
result [ key ] = value
elif kind == 'int' :
result [ key ] = int ( value )
elif kind == 'bln' :
result [ key ] = value . lower ( ) in [ 'true' , '1' , 'yes' ]
elif kind == 'flt' :
result [ key ] = float ( value )
elif kind [ 3 : 5 ] == 'ar' :
if kind [ : 3 ] == 'str' :
dtype = np . dtype ( 'U22' )
elif kind [ : 3 ] == 'int' :
dtype = int
elif kind [ : 3 ] == 'bln' :
dtype = bool
elif kind [ : 3 ] == 'flt' :
dtype = float
else :
raise IOError ( 'Unsupported kind: %s' % kind )
shape = tuple ( int ( i ) for i in value . split ( ',' ) )
array = np . zeros ( shape , dtype )
if array . size > 0 :
work = array . ravel ( )
counter = 0
while True :
short = f . readline ( ) . split ( )
if len ( short ) == 0 :
raise IOError ( 'Insufficient data' )
for s in short :
if dtype == bool :
work [ counter ] = s . lower ( ) in [ 'true' , '1' , 'yes' ]
elif callable ( dtype ) :
work [ counter ] = dtype ( s )
else :
work [ counter ] = s
counter += 1
if counter == array . size :
break
if counter == array . size :
break
result [ key ] = array
elif kind == 'none' :
result [ key ] = None
else :
raise IOError ( 'Unsupported kind: %s' % kind )
return result
|
def predict ( parameters , X ) :
"""Using the learned parameters , predicts a class for each example in X
Arguments :
parameters - - python dictionary containing your parameters
X - - input data of size ( n _ x , m )
Returns
predictions - - vector of predictions of our model ( red : 0 / blue : 1)"""
|
# Computes probabilities using forward propagation ,
# and classifies to 0/1 using 0.5 as the threshold .
A2 , cache = forward_propagation ( X , parameters )
predictions = np . array ( [ 1 if ( i > 0.5 ) else 0 for i in A2 [ 0 ] ] )
return predictions
|
def beacon ( config ) :
'''Check on different service status reported by the django - server - status
library .
. . code - block : : yaml
beacons :
http _ status :
- sites :
example - site - 1:
url : " https : / / example . com / status "
timeout : 30
content - type : json
status :
- value : 400
comp : <
- value : 300
comp : ' > = '
content :
- path : ' certificate : status '
value : down
comp : ' = = '
- path : ' status _ all '
value : down
comp : ' = = '
- interval : 10'''
|
ret = [ ]
_config = { }
list ( map ( _config . update , config ) )
for site , site_config in _config . get ( 'sites' , { } ) . items ( ) :
url = site_config . pop ( 'url' )
content_type = site_config . pop ( 'content_type' , 'json' )
try :
r = requests . get ( url , timeout = site_config . pop ( 'timeout' , 30 ) )
except requests . exceptions . RequestException as e :
log . info ( "Request failed: %s" , e )
if r . raise_for_status :
log . info ( '[-] Response from status endpoint was invalid: ' '%s' , r . status_code )
_failed = { 'status_code' : r . status_code , 'url' : url }
ret . append ( _failed )
continue
for attr , checks in site_config . items ( ) :
for check in checks :
log . debug ( '[+] response_item: %s' , attr )
attr_path = check . get ( 'path' , '' )
comp = comparisons [ check [ 'comp' ] ]
expected_value = check [ 'value' ]
if attr_path :
received_value = salt . utils . data . traverse_dict_and_list ( attr_func_map [ attr ] ( r ) , attr_path )
else :
received_value = attr_func_map [ attr ] ( r )
if received_value is None :
log . info ( '[-] No data found at location %s for url %s' , attr_path , url )
continue
log . debug ( '[+] expected_value: %s' , expected_value )
log . debug ( '[+] received_value: %s' , received_value )
if not comp ( expected_value , received_value ) :
_failed = { 'expected' : expected_value , 'received' : received_value , 'url' : url , 'path' : attr_path }
ret . append ( _failed )
return ret
|
def add_cmdline_arg ( args , arg , * values ) :
"""Adds a command line argument * arg * to a list of argument * args * , e . g . as returned from
: py : func : ` global _ cmdline _ args ` . When * arg * exists , * args * is returned unchanged . Otherwise ,
* arg * is appended to the end with optional argument * values * . Example :
. . code - block : : python
args = global _ cmdline _ values ( )
# - > [ " - - local - scheduler " ]
add _ cmdline _ arg ( args , " - - local - scheduler " )
# - > [ " - - local - scheduler " ]
add _ cmdline _ arg ( args , " - - workers " , 4)
# - > [ " - - local - scheduler " , " - - workers " , " 4 " ]"""
|
if arg not in args :
args = list ( args ) + [ arg ] + list ( values )
return args
|
def make_tornado_app ( self ) :
"""Creates a : py : class ` tornado . web . Application ` instance that respect the
JSON RPC 2.0 specs and exposes the designated methods . Can be used
in tests to obtain the Tornado application .
: return : a : py : class : ` tornado . web . Application ` instance"""
|
handlers = [ ( self . endpoint , TornadoJsonRpcHandler , { "microservice" : self } ) ]
self . _add_extra_handlers ( handlers )
self . _add_static_handlers ( handlers )
return Application ( handlers , template_path = self . template_dir )
|
def url_to_filename ( url , index = 'index.html' , alt_char = False ) :
'''Return a filename from a URL .
Args :
url ( str ) : The URL .
index ( str ) : If a filename could not be derived from the URL path ,
use index instead . For example , ` ` / images / ` ` will return
` ` index . html ` ` .
alt _ char ( bool ) : If True , the character for the query deliminator
will be ` ` @ ` ` intead of ` ` ? ` ` .
This function does not include the directories and does not sanitize
the filename .
Returns :
str'''
|
assert isinstance ( url , str ) , 'Expect str. Got {}.' . format ( type ( url ) )
url_split_result = urllib . parse . urlsplit ( url )
filename = url_split_result . path . split ( '/' ) [ - 1 ]
if not filename :
filename = index
if url_split_result . query :
if alt_char :
query_delim = '@'
else :
query_delim = '?'
filename = '{0}{1}{2}' . format ( filename , query_delim , url_split_result . query )
return filename
|
def _set_digraph_a ( self , char ) :
'''Sets the currently active character , in case it is ( potentially )
the first part of a digraph .'''
|
self . _set_char ( char , CV )
self . active_dgr_a_info = di_a_lt [ char ]
|
def add_page ( self , orientation = '' ) :
"Start a new page"
|
if ( self . state == 0 ) :
self . open ( )
family = self . font_family
if self . underline :
style = self . font_style + 'U'
else :
style = self . font_style
size = self . font_size_pt
lw = self . line_width
dc = self . draw_color
fc = self . fill_color
tc = self . text_color
cf = self . color_flag
if ( self . page > 0 ) : # Page footer
self . in_footer = 1
self . footer ( )
self . in_footer = 0
# close page
self . _endpage ( )
# Start new page
self . _beginpage ( orientation )
# Set line cap style to square
self . _out ( '2 J' )
# Set line width
self . line_width = lw
self . _out ( sprintf ( '%.2f w' , lw * self . k ) )
# Set font
if ( family ) :
self . set_font ( family , style , size )
# Set colors
self . draw_color = dc
if ( dc != '0 G' ) :
self . _out ( dc )
self . fill_color = fc
if ( fc != '0 g' ) :
self . _out ( fc )
self . text_color = tc
self . color_flag = cf
# Page header
self . header ( )
# Restore line width
if ( self . line_width != lw ) :
self . line_width = lw
self . _out ( sprintf ( '%.2f w' , lw * self . k ) )
# Restore font
if ( family ) :
self . set_font ( family , style , size )
# Restore colors
if ( self . draw_color != dc ) :
self . draw_color = dc
self . _out ( dc )
if ( self . fill_color != fc ) :
self . fill_color = fc
self . _out ( fc )
self . text_color = tc
self . color_flag = cf
|
def list_billing ( region , filter_by_kwargs ) :
"""List available billing metrics"""
|
conn = boto . ec2 . cloudwatch . connect_to_region ( region )
metrics = conn . list_metrics ( metric_name = 'EstimatedCharges' )
# Filtering is based on metric Dimensions . Only really valuable one is
# ServiceName .
if filter_by_kwargs :
filter_key = filter_by_kwargs . keys ( ) [ 0 ]
filter_value = filter_by_kwargs . values ( ) [ 0 ]
if filter_value :
filtered_metrics = [ x for x in metrics if x . dimensions . get ( filter_key ) and x . dimensions . get ( filter_key ) [ 0 ] == filter_value ]
else : # ServiceName = ' '
filtered_metrics = [ x for x in metrics if not x . dimensions . get ( filter_key ) ]
else :
filtered_metrics = metrics
return filtered_metrics
|
def get_version ( db , name ) :
"""Query database and return migration version . WARNING : side effecting
function ! if no version information can be found , any existing database
matching the passed one ' s name will be deleted and recreated .
: param db : connetion object
: param name : associated name
: returns : current migration version"""
|
try :
result = db . fetchone ( GET_VERSION_SQL , dict ( name = name ) )
except psycopg2 . ProgrammingError as exc :
if 'does not exist' in str ( exc ) :
return recreate ( db , name )
raise
else :
if result is None :
set_version ( db , name , 0 , 0 )
return ( 0 , 0 )
version = result [ 'version' ]
return unpack_version ( version )
|
def write_mount_cache ( real_name , device , mkmnt , fstype , mount_opts ) :
'''. . versionadded : : 2018.3.0
Provide information if the path is mounted
: param real _ name : The real name of the mount point where the device is mounted .
: param device : The device that is being mounted .
: param mkmnt : Whether or not the mount point should be created .
: param fstype : The file system that is used .
: param mount _ opts : Additional options used when mounting the device .
: return : Boolean if message was sent successfully .
CLI Example :
. . code - block : : bash
salt ' * ' mount . write _ mount _ cache / mnt / share / dev / sda1 False ext4 defaults , nosuid'''
|
cache = salt . utils . mount . read_cache ( __opts__ )
if not cache :
cache = { }
cache [ 'mounts' ] = { }
else :
if 'mounts' not in cache :
cache [ 'mounts' ] = { }
cache [ 'mounts' ] [ real_name ] = { 'device' : device , 'fstype' : fstype , 'mkmnt' : mkmnt , 'opts' : mount_opts }
cache_write = salt . utils . mount . write_cache ( cache , __opts__ )
if cache_write :
return True
else :
raise CommandExecutionError ( 'Unable to write mount cache.' )
|
def from_summary_line ( cls , summaryLine , version = 4 , existing_object = None ) :
'''Summary format :
object mag stdev dist . . E nobs time av _ xres av _ yres max _ x max _ y
a . . E e . . E i . . E node . . E argperi . . E M . . E ra _ dis dec _ dis'''
|
if not summaryLine :
raise ValueError ( 'No summary line given' )
if version == 4 :
params = summaryLine . split ( )
if len ( params ) != 25 :
print params
raise TypeError ( 'Expected 25 columns, {0} given' . format ( len ( params ) ) )
input_params = params [ 0 : 1 ] + params [ 3 : 23 ]
if not existing_object :
retval = cls ( * input_params )
else :
assert isinstance ( existing_object , tno )
assert existing_object . name == params [ 0 ]
retval = existing_object
retval . mean_mag = float ( params [ 1 ] )
retval . mean_mag_stdev = float ( params [ 2 ] )
retval . ra_discov = float ( params [ 23 ] )
retval . dec_discov = float ( params [ 24 ] )
else :
raise VersionError ( 'Unknown version "{0}"' . format ( version ) )
assert retval
return retval
|
def restore_state ( self ) :
"""Read last state of GUI from configuration file ."""
|
last_path = setting ( 'directory' , '' , expected_type = str )
self . output_directory . setText ( last_path )
|
def component ( self , * components ) :
r"""When search ( ) is called it will limit results to items in a component .
: param component : items passed in will be turned into a list
: returns : : class : ` Search `"""
|
for component in components :
self . _component . append ( component )
return self
|
def scale ( val , src , dst ) :
"""Scale value from src range to dst range .
If value outside bounds , it is clipped and set to
the low or high bound of dst .
Ex :
scale ( 0 , ( 0.0 , 99.0 ) , ( - 1.0 , 1.0 ) ) = = - 1.0
scale ( - 5 , ( 0.0 , 99.0 ) , ( - 1.0 , 1.0 ) ) = = - 1.0"""
|
if val < src [ 0 ] :
return dst [ 0 ]
if val > src [ 1 ] :
return dst [ 1 ]
return ( ( val - src [ 0 ] ) / ( src [ 1 ] - src [ 0 ] ) ) * ( dst [ 1 ] - dst [ 0 ] ) + dst [ 0 ]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.