signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def stop ( self ) :
"""Stop the service ."""
|
try :
sh . systemctl . stop ( self . name )
except sh . ErrorReturnCode_5 :
self . logger . debug ( 'Service not running.' )
|
def from_json ( cls , stream , json_data ) :
"""Create a new DataPoint object from device cloud JSON data
: param DataStream stream : The : class : ` ~ DataStream ` out of which this data is coming
: param dict json _ data : Deserialized JSON data from Device Cloud about this device
: raises ValueError : if the data is malformed
: return : ( : class : ` ~ DataPoint ` ) newly created : class : ` ~ DataPoint `"""
|
type_converter = _get_decoder_method ( stream . get_data_type ( ) )
data = type_converter ( json_data . get ( "data" ) )
return cls ( # these are actually properties of the stream , not the data point
stream_id = stream . get_stream_id ( ) , data_type = stream . get_data_type ( ) , units = stream . get_units ( ) , # and these are part of the data point itself
data = data , description = json_data . get ( "description" ) , timestamp = json_data . get ( "timestampISO" ) , server_timestamp = json_data . get ( "serverTimestampISO" ) , quality = json_data . get ( "quality" ) , location = json_data . get ( "location" ) , dp_id = json_data . get ( "id" ) , )
|
def print_status ( self ) :
"""Provide a snapshot of the current status ."""
|
s = [ ]
s . append ( '=== State Machines: ===\n' )
for stm_id in Driver . _stms_by_id :
stm = Driver . _stms_by_id [ stm_id ]
s . append ( ' - {} in state {}\n' . format ( stm . id , stm . state ) )
s . append ( '=== Events in Queue: ===\n' )
for event in self . _event_queue . queue :
if event is not None :
s . append ( ' - {} for {} with args:{} kwargs:{}\n' . format ( event [ 'id' ] , event [ 'stm' ] . id , event [ 'args' ] , event [ 'kwargs' ] ) )
s . append ( '=== Active Timers: {} ===\n' . format ( len ( self . _timer_queue ) ) )
for timer in self . _timer_queue :
s . append ( ' - {} for {} with timeout {}\n' . format ( timer [ 'id' ] , timer [ 'stm' ] . id , timer [ 'timeout' ] ) )
s . append ( '=== ================ ===\n' )
return '' . join ( s )
|
def match_subselectors ( self , el , selectors ) :
"""Match selectors ."""
|
match = True
for sel in selectors :
if not self . match_selectors ( el , sel ) :
match = False
return match
|
def queue_draw_item ( self , * items ) :
"""Extends the base class method to allow Ports to be passed as item
: param items : Items that are to be redrawn"""
|
gaphas_items = [ ]
for item in items :
if isinstance ( item , Element ) :
gaphas_items . append ( item )
else :
try :
gaphas_items . append ( item . parent )
except AttributeError :
pass
super ( ExtendedGtkView , self ) . queue_draw_item ( * gaphas_items )
|
def add_task_attribute ( self , name , ** attrs ) :
"""Add a new Task attribute and return a : class : ` TaskAttribute ` object .
: param name : name of the : class : ` TaskAttribute `
: param attrs : optional attributes for : class : ` TaskAttribute `"""
|
return TaskAttributes ( self . requester ) . create ( self . id , name , ** attrs )
|
def get_bin_and_lib ( self , x64 = False , native = False ) :
"""Get bin and lib ."""
|
if x64 :
msvc = self . bin64
paths = self . lib64
else :
msvc = self . bin32
paths = self . lib
if native :
arch = 'x64' if x64 else 'x86'
paths += self . sdk . get_lib ( arch , native = True )
else :
attr = 'lib64' if x64 else 'lib'
paths += getattr ( self . sdk , attr )
return msvc , paths
|
def artifact2destination ( self , artifact , descriptor ) :
"""Translate an artifact into a receiver location
: param artifact : The Base64 encoded SAML artifact
: return :"""
|
_art = base64 . b64decode ( artifact )
assert _art [ : 2 ] == ARTIFACT_TYPECODE
try :
endpoint_index = str ( int ( _art [ 2 : 4 ] ) )
except ValueError :
endpoint_index = str ( int ( hexlify ( _art [ 2 : 4 ] ) ) )
entity = self . sourceid [ _art [ 4 : 24 ] ]
destination = None
for desc in entity [ "%s_descriptor" % descriptor ] :
for srv in desc [ "artifact_resolution_service" ] :
if srv [ "index" ] == endpoint_index :
destination = srv [ "location" ]
break
return destination
|
def add_rule ( self , name , callable_ ) :
"""Makes rule ' name ' available to all subsequently loaded Jamfiles .
Calling that rule wil relay to ' callable ' ."""
|
assert isinstance ( name , basestring )
assert callable ( callable_ )
self . project_rules_ . add_rule ( name , callable_ )
|
def get_report ( self , linewise = False , no_lines = False ) :
"""Returns a string describing all the errors collected so far ( the
report ) . The first flag determines the type of report . The second flag
is ignored if the first is set to True ."""
|
if linewise :
return self . _get_linewise_report ( )
else :
return self . _get_report ( not no_lines )
|
async def _create_proxy_connection ( self , req , * args , ** kwargs ) :
"""args , kwargs can contain different elements ( traces , timeout , . . . )
depending on aiohttp version"""
|
if req . proxy . scheme == 'http' :
return await super ( ) . _create_proxy_connection ( req , * args , ** kwargs )
else :
return await self . _create_socks_connection ( req = req )
|
def setup_regex ( self ) :
"""Sets up the patterns and regex objects for parsing the docstrings ."""
|
# Regex for grabbing out valid XML tags that represent known docstrings that we can work with .
self . keywords = [ "summary" , "usage" , "errors" , "member" , "group" , "local" , "comments" , "parameter" ]
# Regex for extracting the contents of docstrings minus the ! ! and any leading spaces .
self . _RX_DOCS = "^\s*!!(?P<docstring>.+?)$"
self . RE_DOCS = re . compile ( self . _RX_DOCS , re . M )
# Regex for handling cross references in the documentation
self . _RX_REFS = r"@CREF\[(?P<reference>[^\]]+)\]"
self . RE_REFS = re . compile ( self . _RX_REFS )
# Regex to match first lines of declarations for code elements that can be
# decorated by docstrings .
self . _RX_DECOR = ( r"((?P<type>character|real|type|logical|integer|complex)?" r"(?P<kind>\([a-z0-9_]+\))?)?(,?(?P<modifiers>[^\n]+?))?" r"\s*(?P<codetype>subroutine|function|type|module|interface)\s+(?P<name>[^(]+)" )
self . RE_DECOR = re . compile ( self . _RX_DECOR , re . I )
# Regex for getting the docstrings decorating one or more modules in a code file ,
# Since they aren ' t contained inside any other code element , we can ' t just use
# the normal docblocks routines .
self . _RX_MODDOCS = ( r"^(?P<docstring>\s*!!.+?)\n\s*module\s+(?P<name>[A-Za-z0-9_]+)" ".+?end\s+module(\s+(?P=name))?" )
self . RE_MODDOCS = re . compile ( self . _RX_MODDOCS , re . DOTALL | re . I )
|
def metrics ( self ) -> list :
"""List of metrics to track for this learning process"""
|
return [ AveragingNamedMetric ( "policy_loss" ) , AveragingNamedMetric ( "value_loss" ) , AveragingNamedMetric ( "policy_entropy" ) , AveragingNamedMetric ( "approx_kl_divergence" ) , AveragingNamedMetric ( "clip_fraction" ) , AveragingNamedMetric ( "grad_norm" ) , AveragingNamedMetric ( "advantage_norm" ) , AveragingNamedMetric ( "explained_variance" ) ]
|
def resource_property ( klass , name , ** kwargs ) :
"""Builds a resource object property ."""
|
klass . PROPERTIES [ name ] = kwargs
def getter ( self ) :
return getattr ( self , '_%s' % name , kwargs . get ( 'default' , None ) )
if kwargs . get ( 'readonly' , False ) :
setattr ( klass , name , property ( getter ) )
else :
def setter ( self , value ) :
setattr ( self , '_%s' % name , value )
setattr ( klass , name , property ( getter , setter ) )
|
def build_contact ( request , slug = "" ) :
"""Builds appropriate contact form based on options
set in the contact _ form controller ."""
|
controller = get_object_or_404 ( ContactFormController , slug = slug )
site = Site . objects . get_current ( )
UserModel = get_user_model ( )
user = request . user
form = ContactForm ( request . POST or None , request . FILES or None , controller = controller )
# if we know , fill in the user name and email
if user . is_authenticated : # first , resolve username for tango and non - tango sites
try :
name = user . display_name
except AttributeError :
name = user . username
form . fields [ 'sender_name' ] . widget . attrs [ 'readonly' ] = 'true'
form . fields [ 'sender_name' ] . initial = name
form . fields [ 'sender_email' ] . widget . attrs [ 'readonly' ] = 'true'
form . fields [ 'sender_email' ] . initial = user . email
if form . is_valid ( ) :
if controller . store_in_db : # To do : sanitize submission .
new_msg = Contact ( ** form . cleaned_data )
new_msg . controller = controller
new_msg . site = site
if controller . override_subject : # we ' re overriding the subject
new_msg . subject = controller . override_subject
new_msg . save ( )
if controller . send_emails :
form_data = form . cleaned_data
if controller . override_subject :
subject = controller . override_subject
elif 'subject' in form_data :
subject = form_data [ 'subject' ]
else :
subject = "{} message from {}" . format ( controller . name , form_data [ 'sender_name' ] )
body = "{} \n\n {}" . format ( form_data [ 'body' ] , form_data [ 'sender_name' ] )
if controller . request_contact_info :
body += "\nAddress: {} \nCity: {} \nState: {} \nPhone: {}" . format ( form_data [ 'contact_address' ] , form_data [ 'contact_city' ] , form_data [ 'contact_state' ] , form_data [ 'contact_phone' ] )
if controller . email_options == '2' : # Create selectable list from recipients
try :
to = [ UserModel . objects . get ( username = form . cleaned_data [ 'to' ] ) . email ]
except Exception :
to = [ form . cleaned_data [ 'to' ] ]
if controller . email_options == '1' :
to = [ r . email for r in controller . recipients . all ( ) ]
for r in controller . other_recipients . all ( ) :
to . append ( r . email )
if 'send_a_copy' in form . cleaned_data :
to . append ( form . cleaned_data [ 'sender_email' ] )
mail = EmailMessage ( subject = subject , body = body , from_email = form . cleaned_data [ 'sender_email' ] , to = to )
if 'photo' in request . FILES :
photo = request . FILES [ 'photo' ]
mail . attach ( photo . name , photo . read ( ) , photo . content_type )
mail . send ( )
return render ( request , 'success_url' , { 'controller' : controller } )
return render ( request , 'contact/form.html' , { 'form' : form , 'site' : site , 'controller' : controller } )
|
def _read_para_ack_data ( self , code , cbit , clen , * , desc , length , version ) :
"""Read HIP ACK _ DATA parameter .
Structure of HIP ACK _ DATA parameter [ RFC 6078 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| Type | Length |
| Acked Sequence number /
Octets Bits Name Description
0 0 ack _ data . type Parameter Type
1 15 ack _ data . critical Critical Bit
2 16 ack _ data . length Length of Contents
4 32 ack _ data . ack Acked Sequence number"""
|
if clen % 4 != 0 :
raise ProtocolError ( f'HIPv{version}: [Parano {code}] invalid format' )
_ackn = list ( )
for _ in range ( clen // 4 ) :
_ackn . append ( self . _read_unpack ( 4 ) )
ack_data = dict ( type = desc , critical = cbit , length = clen , ack = tuple ( _ackn ) , )
return ack_data
|
def get_all_completed_tasks ( self , api_token , ** kwargs ) :
"""Return a list of a user ' s completed tasks .
. . warning : : Requires Todoist premium .
: param api _ token : The user ' s login api _ token .
: type api _ token : str
: param project _ id : Filter the tasks by project .
: type project _ id : str
: param limit : The maximum number of tasks to return
( default ` ` 30 ` ` , max ` ` 50 ` ` ) .
: type limit : int
: param offset : Used for pagination if there are more tasks than limit .
: type offset : int
: param from _ date : Return tasks with a completion date on or older than
from _ date . Formatted as ` ` 2007-4-29T10:13 ` ` .
: type from _ date : str
: param to : Return tasks with a completion date on or less than
to _ date . Formatted as ` ` 2007-4-29T10:13 ` ` .
: type from _ date : str
: return : The HTTP response to the request .
: rtype : : class : ` requests . Response `
> > > from pytodoist . api import TodoistAPI
> > > api = TodoistAPI ( )
> > > response = api . login ( ' john . doe @ gmail . com ' , ' password ' )
> > > user _ info = response . json ( )
> > > user _ api _ token = user _ info [ ' api _ token ' ]
> > > response = api . get _ all _ completed _ tasks ( user _ api _ token )
> > > completed _ tasks = response . json ( )"""
|
params = { 'token' : api_token }
return self . _get ( 'get_all_completed_items' , params , ** kwargs )
|
def lookup_subclass ( cls , d ) :
"""Look up a class based on a serialized dictionary containing a typeid
Args :
d ( dict ) : Dictionary with key " typeid "
Returns :
Serializable subclass"""
|
try :
typeid = d [ "typeid" ]
except KeyError :
raise FieldError ( "typeid not present in keys %s" % list ( d ) )
subclass = cls . _subcls_lookup . get ( typeid , None )
if not subclass :
raise FieldError ( "'%s' not a valid typeid" % typeid )
else :
return subclass
|
def _handle_if_block ( tokens , tokens_len , body_index , function_call ) :
"""Special handler for if - blocks .
If blocks are special because they can have multiple bodies and have
multiple terminating keywords for each of those sub - bodies"""
|
# First handle the if statement and body
next_index , if_statement = _IF_BLOCK_IF_HANDLER ( tokens , tokens_len , body_index , function_call )
elseif_statements = [ ]
else_statement = None
footer = None
# Keep going until we hit endif
while True : # Back up a bit until we found out what terminated the if statement
# body
assert _RE_END_IF_BODY . match ( tokens [ next_index ] . content )
terminator = tokens [ next_index ] . content . lower ( )
if terminator == "endif" :
next_index , footer = _handle_function_call ( tokens , tokens_len , next_index )
break
next_index , header = _handle_function_call ( tokens , tokens_len , next_index )
if terminator == "elseif" :
next_index , elseif_stmnt = _ELSEIF_BLOCK_HANDLER ( tokens , tokens_len , next_index + 1 , header )
elseif_statements . append ( elseif_stmnt )
elif terminator == "else" :
next_index , else_statement = _ELSE_BLOCK_HANDLER ( tokens , tokens_len , next_index + 1 , header )
assert footer is not None
return next_index , IfBlock ( if_statement = if_statement , elseif_statements = elseif_statements , else_statement = else_statement , footer = footer , line = if_statement . line , col = if_statement . col , index = body_index )
|
def subscribe ( self , sr ) :
"""Login required . Send POST to subscribe to a subreddit . If ` ` sr ` ` is the name of the subreddit , a GET request is sent to retrieve the full id of the subreddit , which is necessary for this API call . Returns True or raises : class : ` exceptions . UnexpectedResponse ` if non - " truthy " value in response .
URL : ` ` http : / / www . reddit . com / api / subscribe / ` `
: param sr : full id of subreddit or name of subreddit ( full id is preferred )"""
|
if not sr . startswith ( 't5_' ) :
sr = self . subreddit ( sr ) . name
data = dict ( action = 'sub' , sr = sr )
j = self . post ( 'api' , 'subscribe' , data = data )
return assert_truthy ( j )
|
def set_type ( self , value ) :
"""Setter for type attribute"""
|
if value not in self . types_available :
log = "Sources field 'type' should be in one of %s" % ( self . types_available )
raise MalFormattedSource ( log )
self . _type = value
|
def auto_convert_cell_no_flags ( cell , units = None , parens_as_neg = True ) :
'''Performs a first step conversion of the cell to check
it ' s type or try to convert if a valid conversion exists .
This version of conversion doesn ' t flag changes nor store
cell units .
Args :
units : The dictionary holder for cell units .
parens _ as _ neg : Converts numerics surrounded by parens to
negative values'''
|
units = units if units != None else { }
return auto_convert_cell ( flagable = Flagable ( ) , cell = cell , position = None , worksheet = 0 , flags = { } , units = units , parens_as_neg = parens_as_neg )
|
def handle_scene ( self , obj ) :
"""Handle a scene event .
This function applies a blocking wait at the start of a scene .
: param obj : A : py : class : ` ~ turberfield . dialogue . model . Model . Shot `
object .
: return : The supplied object ."""
|
print ( "{t.dim}{scene}{t.normal}" . format ( scene = obj . scene . capitalize ( ) , t = self . terminal ) , end = "\n" * 3 , file = self . terminal . stream )
time . sleep ( self . pause )
return obj
|
def tag ( words : List [ str ] , corpus : str = "pud" ) -> List [ Tuple [ str , str ] ] :
"""รับค่าเป็น ' ' list ' ' คืนค่าเป็น ' ' list ' ' เช่น [ ( ' คํา ' , ' ชนิดคํา ' ) , ( ' คํา ' , ' ชนิดคํา ' ) , . . . ]"""
|
if not words :
return [ ]
if corpus == "orchid" :
tagger = _ORCHID_TAGGER
i = 0
while i < len ( words ) :
if words [ i ] == " " :
words [ i ] = "<space>"
elif words [ i ] == "+" :
words [ i ] = "<plus>"
elif words [ i ] == "-" :
words [ i ] = "<minus>"
elif words [ i ] == "=" :
words [ i ] = "<equal>"
elif words [ i ] == "," :
words [ i ] = "<comma>"
elif words [ i ] == "$" :
words [ i ] = "<dollar>"
elif words [ i ] == "." :
words [ i ] = "<full_stop>"
elif words [ i ] == "(" :
words [ i ] = "<left_parenthesis>"
elif words [ i ] == ")" :
words [ i ] = "<right_parenthesis>"
elif words [ i ] == '"' :
words [ i ] = "<quotation>"
elif words [ i ] == "@" :
words [ i ] = "<at_mark>"
elif words [ i ] == "&" :
words [ i ] = "<ampersand>"
elif words [ i ] == "{" :
words [ i ] = "<left_curly_bracket>"
elif words [ i ] == "^" :
words [ i ] = "<circumflex_accent>"
elif words [ i ] == "?" :
words [ i ] = "<question_mark>"
elif words [ i ] == "<" :
words [ i ] = "<less_than>"
elif words [ i ] == ">" :
words [ i ] = "<greater_than>"
elif words [ i ] == "=" :
words [ i ] = "<equal>"
elif words [ i ] == "!" :
words [ i ] = "<exclamation>"
elif words [ i ] == "’" :
words [ i ] = "<apostrophe>"
elif words [ i ] == ":" :
words [ i ] = "<colon>"
elif words [ i ] == "*" :
words [ i ] = "<asterisk>"
elif words [ i ] == ";" :
words [ i ] = "<semi_colon>"
elif words [ i ] == "/" :
words [ i ] = "<slash>"
i += 1
t2 = tagger . tag ( words )
t = [ ]
i = 0
while i < len ( t2 ) :
word = t2 [ i ] [ 0 ]
tag = t2 [ i ] [ 1 ]
if word == "<space>" :
word = " "
elif word == "<plus>" :
word = "+"
elif word == "<minus>" :
word = "-"
elif word == "<equal>" :
word = "="
elif word == "<comma>" :
word = ","
elif word == "<dollar>" :
word = "$"
elif word == "<full_stop>" :
word = "."
elif word == "<left_parenthesis>" :
word = "("
elif word == "<right_parenthesis>" :
word = ")"
elif word == "<quotation>" :
word = '"'
elif word == "<at_mark>" :
word = "@"
elif word == "<ampersand>" :
word = "&"
elif word == "<left_curly_bracket>" :
word = "{"
elif word == "<circumflex_accent>" :
word = "^"
elif word == "<question_mark>" :
word = "?"
elif word == "<less_than>" :
word = "<"
elif word == "<greater_than>" :
word = ">"
elif word == "<equal>" :
word = "="
elif word == "<exclamation>" :
word = "!"
elif word == "<apostrophe>" :
word = "’"
elif word == "<colon>" :
word = ":"
elif word == "<asterisk>" :
word = "*"
elif word == "<semi_colon>" :
word = ";"
elif word == "<slash>" :
word = "/"
t . append ( ( word , tag ) )
i += 1
else : # default , use " pud " as a corpus
tagger = _PUD_TAGGER
t = tagger . tag ( words )
return t
|
def runExperimentPool ( numObjects , numLocations , numFeatures , numColumns , networkType = [ "MultipleL4L2Columns" ] , longDistanceConnectionsRange = [ 0.0 ] , numWorkers = 7 , nTrials = 1 , pointRange = 1 , numPoints = 10 , numInferenceRpts = 1 , l2Params = None , l4Params = None , resultsName = "convergence_results.pkl" ) :
"""Allows you to run a number of experiments using multiple processes .
For each parameter except numWorkers , pass in a list containing valid values
for that parameter . The cross product of everything is run , and each
combination is run nTrials times .
Returns a list of dict containing detailed results from each experiment .
Also pickles and saves the results in resultsName for later analysis .
Example :
results = runExperimentPool (
numObjects = [ 10 ] ,
numLocations = [ 5 ] ,
numFeatures = [ 5 ] ,
numColumns = [ 2,3,4,5,6 ] ,
numWorkers = 8,
nTrials = 5)"""
|
# Create function arguments for every possibility
args = [ ]
for c in reversed ( numColumns ) :
for o in reversed ( numObjects ) :
for l in numLocations :
for f in numFeatures :
for n in networkType :
for p in longDistanceConnectionsRange :
for t in range ( nTrials ) :
args . append ( { "numObjects" : o , "numLocations" : l , "numFeatures" : f , "numColumns" : c , "trialNum" : t , "pointRange" : pointRange , "numPoints" : numPoints , "networkType" : n , "longDistanceConnections" : p , "plotInferenceStats" : False , "settlingTime" : 3 , "numInferenceRpts" : numInferenceRpts , "l2Params" : l2Params , "l4Params" : l4Params } )
print "{} experiments to run, {} workers" . format ( len ( args ) , numWorkers )
# Run the pool
if numWorkers > 1 :
pool = Pool ( processes = numWorkers )
result = pool . map ( runExperiment , args )
else :
result = [ ]
for arg in args :
result . append ( runExperiment ( arg ) )
# print " Full results : "
# pprint . pprint ( result , width = 150)
# Pickle results for later use
with open ( resultsName , "wb" ) as f :
cPickle . dump ( result , f )
return result
|
def __getPluginInfo ( self , plugin , package_abspath , package_name ) :
"""Organize plugin information .
: returns : dict : plugin info"""
|
if not isValidSemver ( plugin . __version__ ) :
raise VersionError ( "The plugin version does not conform to the standard named %s" % package_name )
try :
url = plugin . __url__
except AttributeError :
url = None
try :
license = plugin . __license__
except AttributeError :
license = None
try :
license_file = plugin . __license_file__
except AttributeError :
license_file = None
try :
readme_file = plugin . __readme_file__
except AttributeError :
readme_file = None
try :
plugin_state = plugin . __state__
except AttributeError :
plugin_state = "enabled"
# 插件状态首先读取 ` _ _ state ` 状态值 , 优先级低于状态文件 , ENABLED文件优先级低于DISABLED文件
if os . path . isfile ( os . path . join ( package_abspath , "ENABLED" ) ) :
plugin_state = "enabled"
if os . path . isfile ( os . path . join ( package_abspath , "DISABLED" ) ) :
plugin_state = "disabled"
return { "plugin_name" : plugin . __plugin_name__ , "plugin_package_name" : package_name , "plugin_package_abspath" : package_abspath , "plugin_description" : plugin . __description__ , "plugin_version" : plugin . __version__ , "plugin_author" : plugin . __author__ , "plugin_url" : url , "plugin_license" : license , "plugin_license_file" : license_file , "plugin_readme_file" : readme_file , "plugin_state" : plugin_state , "plugin_tpl_path" : os . path . join ( package_abspath , "templates" ) , "plugin_ats_path" : os . path . join ( package_abspath , "static" ) , "plugin_tep" : { } , "plugin_hep" : { } , "plugin_bep" : { } , "plugin_yep" : { } }
|
def build_kernel_to_data ( self , Y , knn = None , bandwidth = None , bandwidth_scale = None ) :
"""Build transition matrix from new data to the graph
Creates a transition matrix such that ` Y ` can be approximated by
a linear combination of landmarks . Any
transformation of the landmarks can be trivially applied to ` Y ` by
performing
` transform _ Y = transitions . dot ( transform ) `
Parameters
Y : array - like , [ n _ samples _ y , n _ features ]
new data for which an affinity matrix is calculated
to the existing data . ` n _ features ` must match
either the ambient or PCA dimensions
Returns
transitions : array - like , [ n _ samples _ y , self . data . shape [ 0 ] ]
Transition matrix from ` Y ` to ` self . data `
Raises
ValueError : if ` precomputed ` is not ` None ` , then the graph cannot
be extended ."""
|
if knn is None :
knn = self . knn
if bandwidth is None :
bandwidth = self . bandwidth
if bandwidth_scale is None :
bandwidth_scale = self . bandwidth_scale
if self . precomputed is not None :
raise ValueError ( "Cannot extend kernel on precomputed graph" )
else :
tasklogger . log_start ( "affinities" )
Y = self . _check_extension_shape ( Y )
pdx = cdist ( Y , self . data_nu , metric = self . distance )
if bandwidth is None :
knn_dist = np . partition ( pdx , knn , axis = 1 ) [ : , : knn ]
bandwidth = np . max ( knn_dist , axis = 1 )
elif callable ( bandwidth ) :
bandwidth = bandwidth ( pdx )
bandwidth = bandwidth_scale * bandwidth
pdx = ( pdx . T / bandwidth ) . T
K = np . exp ( - 1 * pdx ** self . decay )
# handle nan
K = np . where ( np . isnan ( K ) , 1 , K )
K [ K < self . thresh ] = 0
tasklogger . log_complete ( "affinities" )
return K
|
def _build_toc_node ( docname , anchor = "anchor" , text = "test text" , bullet = False ) :
"""Create the node structure that Sphinx expects for TOC Tree entries .
The ` ` bullet ` ` argument wraps it in a ` ` nodes . bullet _ list ` ` ,
which is how you nest TOC Tree entries ."""
|
reference = nodes . reference ( "" , "" , internal = True , refuri = docname , anchorname = "#" + anchor , * [ nodes . Text ( text , text ) ] )
para = addnodes . compact_paragraph ( "" , "" , reference )
ret_list = nodes . list_item ( "" , para )
return nodes . bullet_list ( "" , ret_list ) if bullet else ret_list
|
def merge_config ( original : Optional [ Dict [ str , Any ] ] , overrides : Optional [ Dict [ str , Any ] ] ) -> Dict [ str , Any ] :
"""Return a copy of the ` ` original ` ` configuration dictionary , with overrides from ` ` overrides ` `
applied .
This similar to what : meth : ` dict . update ` does , but when a dictionary is about to be
replaced with another dictionary , it instead merges the contents .
If a key in ` ` overrides ` ` is a dotted path ( ie . ` ` foo . bar . baz : value ` ` ) , it is assumed to be a
shorthand for ` ` foo : { bar : { baz : value } } ` ` .
: param original : a configuration dictionary ( or ` ` None ` ` )
: param overrides : a dictionary containing overriding values to the configuration ( or ` ` None ` ` )
: return : the merge result"""
|
assert check_argument_types ( )
copied = original . copy ( ) if original else { }
if overrides :
for key , value in overrides . items ( ) :
if '.' in key :
key , rest = key . split ( '.' , 1 )
value = { rest : value }
orig_value = copied . get ( key )
if isinstance ( orig_value , dict ) and isinstance ( value , dict ) :
copied [ key ] = merge_config ( orig_value , value )
else :
copied [ key ] = value
return copied
|
def __create_proper_names_lexicon ( self , docs ) :
"""Moodustab dokumendikollektsiooni põhjal pärisnimede sagedussõnastiku
( mis kirjeldab , mitu korda iga pärisnimelemma esines ) ;"""
|
lemmaFreq = dict ( )
for doc in docs :
for word in doc [ WORDS ] : # 1 ) Leiame k6ik s6naga seotud unikaalsed pärisnimelemmad
# ( kui neid on )
uniqLemmas = set ( )
for analysis in word [ ANALYSIS ] :
if analysis [ POSTAG ] == 'H' :
uniqLemmas . add ( analysis [ ROOT ] )
# 2 ) Jäädvustame lemmade sagedused
for lemma in uniqLemmas :
if lemma not in lemmaFreq :
lemmaFreq [ lemma ] = 1
else :
lemmaFreq [ lemma ] += 1
return lemmaFreq
|
def getrange ( self , key , start , end , * , encoding = _NOTSET ) :
"""Get a substring of the string stored at a key .
: raises TypeError : if start or end is not int"""
|
if not isinstance ( start , int ) :
raise TypeError ( "start argument must be int" )
if not isinstance ( end , int ) :
raise TypeError ( "end argument must be int" )
return self . execute ( b'GETRANGE' , key , start , end , encoding = encoding )
|
def _sampler_n_samples ( self , n_samples ) :
"""Return ( sampler , n _ samplers ) tuples"""
|
sampler_indices = self . rng_ . choice ( range ( len ( self . samplers ) ) , size = n_samples , replace = True , p = self . weights )
return [ ( self . samplers [ idx ] , freq ) for idx , freq in itemfreq ( sampler_indices ) ]
|
def fetch_parent_dir ( filepath , n = 1 ) :
'''Returns a parent directory , n places above the input filepath .
Equivalent to something like : ' / home / user / dir ' . split ( ' / ' ) [ - 2 ] if n = 2.'''
|
filepath = os . path . realpath ( filepath )
for i in range ( n ) :
filepath = os . path . dirname ( filepath )
return os . path . basename ( filepath )
|
def createSQL ( self , sql , args = ( ) ) :
"""For use with auto - committing statements such as CREATE TABLE or CREATE
INDEX ."""
|
before = time . time ( )
self . _execSQL ( sql , args )
after = time . time ( )
if after - before > 2.0 :
log . msg ( 'Extremely long CREATE: %s' % ( after - before , ) )
log . msg ( sql )
|
def group_re ( self ) :
'''Return a regexp pattern with named groups'''
|
out = ''
for token , data in self . tokens ( ) :
if token == 'TXT' :
out += re . escape ( data )
elif token == 'VAR' :
out += '(?P<%s>%s)' % ( data [ 1 ] , data [ 0 ] )
elif token == 'ANON' :
out += '(?:%s)' % data
return out
|
def add_artifact ( self , filename , name = None , metadata = None , content_type = None , ) :
"""Add a file as an artifact .
In Sacred terminology an artifact is a file produced by the experiment
run . In case of a MongoObserver that means storing the file in the
database .
This function can only be called during a run , and just calls the
: py : meth : ` sacred . run . Run . add _ artifact ` method .
Parameters
filename : str
name of the file to be stored as artifact
name : str , optional
optionally set the name of the artifact .
Defaults to the relative file - path .
metadata : dict , optional
optionally attach metadata to the artifact .
This only has an effect when using the MongoObserver .
content _ type : str , optional
optionally attach a content - type to the artifact .
This only has an effect when using the MongoObserver ."""
|
assert self . current_run is not None , "Can only be called during a run."
self . current_run . add_artifact ( filename , name , metadata , content_type )
|
def assert_valid_path ( path ) :
"""Checks if a path is a correct format that Marathon expects . Raises ValueError if not valid .
: param str path : The app id .
: rtype : str"""
|
if path is None :
return
# As seen in :
# https : / / github . com / mesosphere / marathon / blob / 0c11661ca2f259f8a903d114ef79023649a6f04b / src / main / scala / mesosphere / marathon / state / PathId . scala # L71
for id in filter ( None , path . strip ( '/' ) . split ( '/' ) ) :
if not ID_PATTERN . match ( id ) :
raise ValueError ( 'invalid path (allowed: lowercase letters, digits, hyphen, "/", ".", ".."): %r' % path )
return path
|
def _get_right_line_color ( self ) :
"""Returns color rgb tuple of right line"""
|
color = self . cell_attributes [ self . key ] [ "bordercolor_right" ]
return tuple ( c / 255.0 for c in color_pack2rgb ( color ) )
|
def spawn_process ( self , target , * args ) :
""": type target : function or class"""
|
p = Process ( target = target , args = args )
p . daemon = True
if target == worker :
p . daemon = Conf . DAEMONIZE_WORKERS
p . timer = args [ 2 ]
self . pool . append ( p )
p . start ( )
return p
|
def stop ( self ) :
"""Stop tracing . Reinstalls the : ref : ` hunter . Tracer . previous ` tracer ."""
|
if self . _handler is not None :
sys . settrace ( self . _previous )
self . _handler = self . _previous = None
if self . threading_support is None or self . threading_support :
threading . settrace ( self . _threading_previous )
self . _threading_previous = None
|
def _is_in_set ( self , inpt , metadata ) :
"""checks if the input is in the metadata ' s * _ set list"""
|
# makes an assumption there is only one _ set in the metadata dict
get_set_methods = [ m for m in dir ( metadata ) if 'get_' in m and '_set' in m ]
set_results = None
for m in get_set_methods :
try :
set_results = getattr ( metadata , m ) ( )
break
except errors . IllegalState :
pass
if set_results is not None and inpt in set_results :
return True
return False
|
def render_response ( self ) :
"""Render as a string formatted for HTTP response headers
( detailed ' Set - Cookie : ' style ) ."""
|
# Use whatever renderers are defined for name and value .
# ( . attributes ( ) is responsible for all other rendering . )
name , value = self . name , self . value
renderer = self . attribute_renderers . get ( 'name' , None )
if renderer :
name = renderer ( name )
renderer = self . attribute_renderers . get ( 'value' , None )
if renderer :
value = renderer ( value )
return '; ' . join ( [ '{0}={1}' . format ( name , value ) ] + [ key if isinstance ( val , bool ) else '=' . join ( ( key , val ) ) for key , val in self . attributes ( ) . items ( ) ] )
|
def _mkpart ( root , fs_format , fs_opts , mount_dir ) :
'''Make a partition , and make it bootable
. . versionadded : : Beryllium'''
|
__salt__ [ 'partition.mklabel' ] ( root , 'msdos' )
loop1 = __salt__ [ 'cmd.run' ] ( 'losetup -f' )
log . debug ( 'First loop device is %s' , loop1 )
__salt__ [ 'cmd.run' ] ( 'losetup {0} {1}' . format ( loop1 , root ) )
part_info = __salt__ [ 'partition.list' ] ( loop1 )
start = six . text_type ( 2048 * 2048 ) + 'B'
end = part_info [ 'info' ] [ 'size' ]
__salt__ [ 'partition.mkpart' ] ( loop1 , 'primary' , start = start , end = end )
__salt__ [ 'partition.set' ] ( loop1 , '1' , 'boot' , 'on' )
part_info = __salt__ [ 'partition.list' ] ( loop1 )
loop2 = __salt__ [ 'cmd.run' ] ( 'losetup -f' )
log . debug ( 'Second loop device is %s' , loop2 )
start = start . rstrip ( 'B' )
__salt__ [ 'cmd.run' ] ( 'losetup -o {0} {1} {2}' . format ( start , loop2 , loop1 ) )
_mkfs ( loop2 , fs_format , fs_opts )
__salt__ [ 'mount.mount' ] ( mount_dir , loop2 )
__salt__ [ 'cmd.run' ] ( ( 'grub-install' , '--target=i386-pc' , '--debug' , '--no-floppy' , '--modules=part_msdos linux' , '--boot-directory={0}/boot' . format ( mount_dir ) , loop1 ) , python_shell = False )
__salt__ [ 'mount.umount' ] ( mount_dir )
__salt__ [ 'cmd.run' ] ( 'losetup -d {0}' . format ( loop2 ) )
__salt__ [ 'cmd.run' ] ( 'losetup -d {0}' . format ( loop1 ) )
return part_info
|
def _convert_coords_to_abmn_X ( data , ** kwargs ) :
"""The syscal only stores positions for the electrodes . Yet , we need to
infer electrode numbers for ( a , b , m , n ) by means of some heuristics . This
heuristic uses the x - coordinates to infer an electrode spacing ( y / z
coordinates are ignored ) . We also assume a constant spacing of electrodes
( i . e . , a gap in electrode positions would indicate unused electrodes ) . This
is usually a good estimate as hardly anybody does change the electrode
positions stored in the Syscal system ( talk to us if you do ) .
Note that this function can use user input to simplify the process by using
a user - supplied x0 value for the smallest electrode position ( corresponding
to electrode 1 ) and a user - supplied spacing ( removing the need to infer
from the positions ) .
Parameters
data : Nx4 array | Nx4 : py : class : ` pandas . DataFrame `
The x positions of a , b , m , n electrodes . N is the number of
measurements
x0 : float , optional
position of first electrode . If not given , then use the smallest
x - position in the data as the first electrode .
spacing : float
electrode spacing . This is important if not all electrodes are used in
a given measurement setup . If not given , then the smallest distance
between electrodes is assumed to be the electrode spacing . Naturally ,
this requires measurements ( or injections ) with subsequent electrodes .
Returns
data _ new : Nx4 : py : class : ` pandas . DataFrame `
The electrode number columns a , b , m , n"""
|
assert data . shape [ 1 ] == 4 , 'data variable must only contain four columns'
x0 = kwargs . get ( 'x0' , data . min ( ) . min ( ) )
electrode_spacing = kwargs . get ( 'spacing' , None )
# try to determine from the data itself
if electrode_spacing is None :
electrode_positions = data . values
electrode_spacing = np . abs ( electrode_positions [ : , 1 : ] - electrode_positions [ : , 0 : - 1 ] ) . min ( )
data_new = pd . DataFrame ( )
data_new [ 'a' ] = ( data . iloc [ : , 0 ] - x0 ) / electrode_spacing + 1
data_new [ 'b' ] = ( data . iloc [ : , 1 ] - x0 ) / electrode_spacing + 1
data_new [ 'm' ] = ( data . iloc [ : , 2 ] - x0 ) / electrode_spacing + 1
data_new [ 'n' ] = ( data . iloc [ : , 3 ] - x0 ) / electrode_spacing + 1
# convert to integers
for col in ( ( 'a' , 'b' , 'm' , 'n' ) ) :
data_new [ col ] = data_new [ col ] . astype ( int )
return data_new
|
def emitDataChanged ( self , treeItem ) : # TODO : move to BaseTreeItem ?
"""Emits the data changed for the model indices ( all columns ) for this treeItem"""
|
indexLeft , indexRight = self . indexTupleFromItem ( treeItem )
checkItem = self . getItem ( indexLeft )
assert checkItem is treeItem , "{} != {}" . format ( checkItem , treeItem )
# TODO : remove
self . dataChanged . emit ( indexLeft , indexRight )
|
def get_record ( self , record_num ) :
"""Get a Record by record number .
@ type record _ num : int
@ param record _ num : The record number of the the record to fetch .
@ rtype Record or None
@ return The record request by record number , or None if the
record is not found ."""
|
for chunk in self . chunks ( ) :
first_record = chunk . log_first_record_number ( )
last_record = chunk . log_last_record_number ( )
if not ( first_record <= record_num <= last_record ) :
continue
for record in chunk . records ( ) :
if record . record_num ( ) == record_num :
return record
return None
|
def _match_metric ( self , metric ) :
"""matches the metric path , if the metrics are empty , it shorts to True"""
|
if len ( self . _compiled_filters ) == 0 :
return True
for ( collector , filter_regex ) in self . _compiled_filters :
if collector != metric . getCollectorPath ( ) :
continue
if filter_regex . match ( metric . getMetricPath ( ) ) :
return True
return False
|
def event_return ( events ) :
'''Return event to CouchDB server
Requires that configuration be enabled via ' event _ return '
option in master config .
Example :
event _ return :
- couchdb'''
|
log . debug ( 'events data is: %s' , events )
options = _get_options ( )
# Check to see if the database exists .
_response = _request ( "GET" , options [ 'url' ] + "_all_dbs" )
event_db = '{}-events' . format ( options [ 'db' ] )
if event_db not in _response : # Make a PUT request to create the database .
log . info ( 'Creating database "%s"' , event_db )
_response = _request ( "PUT" , options [ 'url' ] + event_db , user = options [ 'user' ] , passwd = options [ 'passwd' ] )
# Confirm that the response back was simple ' ok ' : true .
if 'ok' not in _response or _response [ 'ok' ] is not True :
log . error ( 'Nothing logged! Lost data. Unable to create database "%s"' , event_db )
return
log . info ( 'Created database "%s"' , event_db )
for event in events : # Call _ generate _ doc to get a dict object of the document we ' re going to shove into the database .
log . debug ( 'event data is: %s' , event )
doc = _generate_event_doc ( event )
# Make the actual HTTP PUT request to create the doc .
_response = _request ( "PUT" , options [ 'url' ] + event_db + "/" + doc [ '_id' ] , 'application/json' , salt . utils . json . dumps ( doc ) )
# Sanity check regarding the response . .
if 'ok' not in _response or _response [ 'ok' ] is not True :
log . error ( 'Nothing logged! Lost data. Unable to create document: "%s"' , _response )
|
def _validate_response ( self , response , message , exclude_code = None ) : # pylint : disable = no - self - use
"""validate an api server response
: param dict response : server response to check
: param str message : error message to raise
: param int exclude _ code : error codes to exclude from errorhandling
: return :
" : raises Exception : on error"""
|
if 'code' in response and response [ 'code' ] >= 2000 :
if exclude_code is not None and response [ 'code' ] == exclude_code :
return
raise Exception ( "{0}: {1} ({2})" . format ( message , response [ 'msg' ] , response [ 'code' ] ) )
|
def execute_notebook ( nb_path , pkg_dir , dataframes , write_notebook = False , env = None ) :
"""Execute a notebook after adding the prolog and epilog . Can also add % mt _ materialize magics to
write dataframes to files
: param nb _ path : path to a notebook .
: param pkg _ dir : Directory to which dataframes are materialized
: param dataframes : List of names of dataframes to materialize
: return : a Notebook object"""
|
import nbformat
from metapack . jupyter . preprocessors import AddEpilog , AddProlog
from metapack . jupyter . exporters import ExecutePreprocessor , Config
from os . path import dirname , join , splitext , basename
from nbconvert . preprocessors . execute import CellExecutionError
with open ( nb_path , encoding = 'utf8' ) as f :
nb = nbformat . read ( f , as_version = 4 )
root , ext = splitext ( basename ( nb_path ) )
c = Config ( )
nb , resources = AddProlog ( config = c , env = env or { } ) . preprocess ( nb , { } )
nb , resources = AddEpilog ( config = c , pkg_dir = pkg_dir , dataframes = dataframes , ) . preprocess ( nb , { } )
def _write_notebook ( nb_path , root , ext , write_notebook ) :
if write_notebook :
if write_notebook is True :
exec_nb_path = join ( dirname ( nb_path ) , root + '-executed' + ext )
else :
exec_nb_path = write_notebook
with open ( exec_nb_path , 'w' , encoding = 'utf8' ) as f :
nbformat . write ( nb , f )
_write_notebook ( nb_path , root , ext , write_notebook )
try :
ep = ExecutePreprocessor ( config = c )
ep . timeout = 5 * 60
nb , _ = ep . preprocess ( nb , { 'metadata' : { 'path' : dirname ( nb_path ) } } )
except ( CellExecutionError , TimeoutError ) as e :
err_nb_path = join ( dirname ( nb_path ) , root + '-errors' + ext )
with open ( err_nb_path , 'w' , encoding = 'utf8' ) as f :
nbformat . write ( nb , f )
raise CellExecutionError ( "Errors executing noteboook. See notebook at {} for details.\n{}" . format ( err_nb_path , '' ) )
except ImportError as e :
raise NotebookError ( "Failed to import a library required for notebook execution: {}" . format ( str ( e ) ) )
_write_notebook ( nb_path , root , ext , write_notebook )
return nb
|
def get_item_data ( self , session , item , byte_range = None ) :
"""Return a file pointer to the item file . Assumes ` item . file _ name ` points
to the file on disk ."""
|
# Parse byte range
if byte_range is not None :
begin , end = parse_byte_range ( byte_range , max_byte = item . file_size )
else :
begin , end = 0 , item . file_size
# Open the file
fp = open ( item . file_name , "rb+" )
if not begin :
return fp , item . file_type , item . file_size
elif begin and not end :
fp . seek ( begin )
return fp , item . file_type , item . file_size
elif begin and end :
fp . seek ( begin )
data = fp . read ( end - begin )
result = cStringIO . StringIO ( data )
return result , item . file_type , item . file_size
|
def from_response ( raw_response ) :
"""The Yelp Fusion API returns error messages with a json body
like :
' error ' : {
' code ' : ' ALL _ CAPS _ CODE ' ,
' description ' : ' Human readable description . '
Some errors may have additional fields . For example , a
validation error :
' error ' : {
' code ' : ' VALIDATION _ ERROR ' ,
' description ' : " ' en _ USS ' does not match ' ^ [ a - z ] { 2,3 } _ [ A - Z ] { 2 } $ ' " ,
' field ' : ' locale ' ,
' instance ' : ' en _ USS '"""
|
json_response = raw_response . json ( )
error_info = json_response [ "error" ]
code = error_info [ "code" ]
try :
error_cls = _error_map [ code ]
except KeyError :
raise NotImplementedError ( "Unknown error code '{}' returned in Yelp API response. " "This code may have been newly added. Please ensure you are " "using the latest version of the yelp-python library, and if " "so, create a new issue at https://github.com/Yelp/yelp-python " "to add support for this error." . format ( code ) )
else :
return error_cls ( raw_response , ** error_info )
|
def forward_word_extend_selection ( self , e ) :
u"""Move forward to the end of the next word . Words are composed of
letters and digits ."""
|
self . l_buffer . forward_word_extend_selection ( self . argument_reset )
self . finalize ( )
|
def _GetDirectory ( self ) :
"""Retrieves a directory .
Returns :
VShadowDirectory : a directory None if not available ."""
|
if self . entry_type != definitions . FILE_ENTRY_TYPE_DIRECTORY :
return None
return VShadowDirectory ( self . _file_system , self . path_spec )
|
def set_scope ( self , http_method , scope ) :
"""Set a scope condition for the resource for a http _ method
Parameters :
* * * http _ method ( str ) : * * HTTP method like GET , POST , PUT , DELETE
* * * scope ( str , list ) : * * the scope of access control as str if single , or as a list of strings if multiple scopes are to be set"""
|
for con in self . conditions :
if http_method in con [ 'httpMethods' ] :
if isinstance ( scope , list ) :
con [ 'scopes' ] = scope
elif isinstance ( scope , str ) or isinstance ( scope , unicode ) :
con [ 'scopes' ] . append ( scope )
return
# If not present , then create a new condition
if isinstance ( scope , list ) :
self . conditions . append ( { 'httpMethods' : [ http_method ] , 'scopes' : scope } )
elif isinstance ( scope , str ) or isinstance ( scope , unicode ) :
self . conditions . append ( { 'httpMethods' : [ http_method ] , 'scopes' : [ scope ] } )
|
def GetFeedMapping ( client , feed , placeholder_type ) :
"""Gets the Feed Mapping for a given Feed .
Args :
client : an AdWordsClient instance .
feed : the Feed we are retrieving the Feed Mapping for .
placeholder _ type : the Placeholder Type we are looking for .
Returns :
A dictionary containing the Feed Mapping ."""
|
feed_mapping_service = client . GetService ( 'FeedMappingService' , 'v201809' )
attribute_mappings = { }
more_pages = True
selector = { 'fields' : [ 'FeedMappingId' , 'AttributeFieldMappings' ] , 'predicates' : [ { 'field' : 'FeedId' , 'operator' : 'EQUALS' , 'values' : [ feed [ 'id' ] ] } , { 'field' : 'PlaceholderType' , 'operator' : 'EQUALS' , 'values' : [ placeholder_type ] } ] , 'paging' : { 'startIndex' : 0 , 'numberResults' : PAGE_SIZE } }
while more_pages :
page = feed_mapping_service . get ( selector )
if 'entries' in page : # Normally , a feed attribute is mapped only to one field . However , you may
# map it to more than one field if needed .
for feed_mapping in page [ 'entries' ] :
for attribute_mapping in feed_mapping [ 'attributeFieldMappings' ] : # Since attribute mappings can have multiple values for each key ,
# we use a list to store the values .
if attribute_mapping [ 'feedAttributeId' ] in attribute_mappings :
attribute_mappings [ attribute_mapping [ 'feedAttributeId' ] ] . append ( attribute_mapping [ 'fieldId' ] )
else :
attribute_mappings [ attribute_mapping [ 'feedAttributeId' ] ] = [ attribute_mapping [ 'fieldId' ] ]
selector [ 'paging' ] [ 'startIndex' ] += PAGE_SIZE
more_pages = selector [ 'paging' ] [ 'startIndex' ] < int ( page [ 'totalNumEntries' ] )
return attribute_mappings
|
def print_extended_help ( ) :
"""print a detailed help message
: return :"""
|
w = textwrap . TextWrapper ( )
w . expand_tabs = False
w . width = 85
w . initial_indent = '\t'
w . subsequent_indent = '\t '
print ( '' )
print ( textwrap . fill ( "<postanalyze> Complete parameter list:" , initial_indent = '' ) )
print ( '' )
cmd = "--input : (required) csv file to split into training and test sets"
print ( w . fill ( cmd ) )
cmd = "\t\tColumns should be as follows:"
print ( w . fill ( cmd ) )
print ( '' )
cmd = "\t\t id, status, receptor_1, receptor_2, ..., receptor_N"
print ( w . fill ( cmd ) )
cmd = "\t\t CH44, 1, -9.7, -9.3, ..., -10.2"
print ( w . fill ( cmd ) )
cmd = "\t\t ZN44, 0, -6.6, -6.1, ..., -6.8"
print ( w . fill ( cmd ) )
print ( '' )
cmd = "\t\tid is a unique molecular identifier"
print ( w . fill ( cmd ) )
cmd = "\t\tstatus takes a value of '1' if the molecule is active and '0' otherwise."
print ( w . fill ( cmd ) )
cmd = "\t\treceptor_1 through receptor_N are docking scores."
print ( w . fill ( cmd ) )
print ( '' )
outname = "--outname : (required) the prefix of the outputfiles."
print ( w . fill ( outname ) )
print ( '' )
ensemble_list = "--ensemble_list : (required) a list of csv files that contain the queries in\
the ensemble. For example, 'Ensemble_1_queries.csv Ensemble_2_queries.csv\
Ensemble_3_queries.csv ...'"
print ( w . fill ( ensemble_list ) )
print ( '' )
compare = "--compare : (optional) Compare the virtual screening results for the\
ensembles specified after the '--ensemble_list' flag. No more than two ensembles\
may be specified at once."
print ( w . fill ( compare ) )
print ( '' )
fpf = "--fpf : (optional) Evaluate ensemble performance at the set of specified FPF values. \
By default, values of '0.0001', '0.001', '0.01', and '0.05' are considered, if they are defined."
print ( w . fill ( fpf ) )
print ( '' )
plot = "--plot : (optional) Generate ROC plots of the input ensembles and their\
members."
print ( w . fill ( plot ) )
print ( '' )
roc_data = "--write_roc : (optional) if the '--write_roc' flag is set, a 'ROC_DATA' \
directory will be created, & ROC data points will be written there for each\
ensemble. The default is not to write ROC data points."
print ( w . fill ( roc_data ) )
print ( '' )
|
def with_inverse ( points , noise ) :
"""Smooths a set of points
It smooths them twice , once in given order , another one in the reverse order .
The the first half of the results will be taken from the reverse order and
the second half from the normal order .
Args :
points ( : obj : ` list ` of : obj : ` Point ` )
noise ( float ) : Expected noise , the higher it is the more the path will
be smoothed .
Returns :
: obj : ` list ` of : obj : ` Point `"""
|
# noise _ sample = 20
n_points = len ( points ) / 2
break_point = n_points
points_part = copy . deepcopy ( points )
points_part = list ( reversed ( points_part ) )
part = kalman_filter ( points_part , noise )
total = kalman_filter ( points , noise )
result = list ( reversed ( part ) ) [ : break_point ] + total [ break_point : ]
result [ break_point ] = point_mean ( part [ break_point ] , total [ break_point ] )
return result
|
def simxGetBooleanParameter ( clientID , paramIdentifier , operationMode ) :
'''Please have a look at the function description / documentation in the V - REP user manual'''
|
paramValue = ct . c_ubyte ( )
return c_GetBooleanParameter ( clientID , paramIdentifier , ct . byref ( paramValue ) , operationMode ) , bool ( paramValue . value != 0 )
|
def plot_predict ( self , h = 5 , past_values = 20 , intervals = True , ** kwargs ) :
"""Makes forecast with the estimated model
Parameters
h : int ( default : 5)
How many steps ahead would you like to forecast ?
past _ values : int ( default : 20)
How many past observations to show on the forecast graph ?
intervals : Boolean
Would you like to show 95 % prediction intervals for the forecast ?
Returns
- Plot of the forecast"""
|
import matplotlib . pyplot as plt
import seaborn as sns
figsize = kwargs . get ( 'figsize' , ( 10 , 7 ) )
if self . latent_variables . estimated is False :
raise Exception ( "No latent variables estimated!" )
else : # Retrieve data , dates and ( transformed ) latent variables
scale , shape , skewness = scale , shape , skewness = self . _get_scale_and_shape ( self . latent_variables . get_z_values ( transformed = True ) )
# Get expected values
forecasted_values = np . zeros ( h )
for value in range ( 0 , h ) :
if value == 0 :
forecasted_values [ value ] = self . states [ 0 ] [ - 1 ] + self . states [ 1 ] [ - 1 ]
else :
forecasted_values [ value ] = forecasted_values [ value - 1 ] + self . states [ 1 ] [ - 1 ]
previous_value = self . data [ - 1 ]
date_index = self . shift_dates ( h )
simulations = 10000
sim_vector = np . zeros ( [ simulations , h ] )
for n in range ( 0 , simulations ) :
rnd_q = np . random . normal ( 0 , np . sqrt ( self . latent_variables . get_z_values ( transformed = True ) [ 0 ] ) , h )
rnd_q2 = np . random . normal ( 0 , np . sqrt ( self . latent_variables . get_z_values ( transformed = True ) [ 1 ] ) , h )
exp_0 = np . zeros ( h )
exp_1 = np . zeros ( h )
for value in range ( 0 , h ) :
if value == 0 :
exp_0 [ value ] = self . states [ 1 ] [ - 1 ] + self . states [ 0 ] [ - 1 ] + rnd_q [ value ]
exp_1 [ value ] = self . states [ 1 ] [ - 1 ] + rnd_q2 [ value ]
else :
exp_0 [ value ] = exp_0 [ value - 1 ] + exp_1 [ value - 1 ] + rnd_q [ value ]
exp_1 [ value ] = exp_1 [ value - 1 ] + rnd_q2 [ value ]
sim_vector [ n ] = self . family . draw_variable ( loc = self . link ( exp_0 ) , shape = shape , scale = scale , skewness = skewness , nsims = exp_0 . shape [ 0 ] )
sim_vector = np . transpose ( sim_vector )
forecasted_values = self . link ( forecasted_values )
plt . figure ( figsize = figsize )
if intervals == True :
plt . fill_between ( date_index [ - h - 1 : ] , np . insert ( [ np . percentile ( i , 5 ) for i in sim_vector ] , 0 , previous_value ) , np . insert ( [ np . percentile ( i , 95 ) for i in sim_vector ] , 0 , previous_value ) , alpha = 0.2 , label = "95 C.I." )
plot_values = np . append ( self . data [ - past_values : ] , forecasted_values )
plot_index = date_index [ - h - past_values : ]
plt . plot ( plot_index , plot_values , label = self . data_name )
plt . title ( "Forecast for " + self . data_name )
plt . xlabel ( "Time" )
plt . ylabel ( self . data_name )
plt . show ( )
|
def is_json ( string ) :
"""Check if a string is a valid json .
: param string : String to check .
: type string : str
: return : True if json , false otherwise
: rtype : bool"""
|
if not is_full_string ( string ) :
return False
if bool ( JSON_WRAPPER_RE . search ( string ) ) :
try :
return isinstance ( json . loads ( string ) , dict )
except ( TypeError , ValueError , OverflowError ) :
return False
return False
|
def get_weather_code ( self , ip ) :
'''Get weather _ code'''
|
rec = self . get_all ( ip )
return rec and rec . weather_code
|
def dumps ( value , encoding = None ) :
"""dumps ( object , encoding = None ) - > string
This function dumps a python object as a tnetstring ."""
|
# This uses a deque to collect output fragments in reverse order ,
# then joins them together at the end . It ' s measurably faster
# than creating all the intermediate strings .
# If you ' re reading this to get a handle on the tnetstring format ,
# consider the _ gdumps ( ) function instead ; it ' s a standard top - down
# generator that ' s simpler to understand but much less efficient .
q = deque ( )
_rdumpq ( q , 0 , value , encoding )
return "" . join ( q )
|
def set_maintainer ( self , maintainer ) : # type : ( Union [ hdx . data . user . User , Dict , str ] ) - > None
"""Set the dataset ' s maintainer .
Args :
maintainer ( Union [ User , Dict , str ] ) : Either a user id or User metadata from a User object or dictionary .
Returns :
None"""
|
if isinstance ( maintainer , hdx . data . user . User ) or isinstance ( maintainer , dict ) :
if 'id' not in maintainer :
maintainer = hdx . data . user . User . read_from_hdx ( maintainer [ 'name' ] , configuration = self . configuration )
maintainer = maintainer [ 'id' ]
elif not isinstance ( maintainer , str ) :
raise HDXError ( 'Type %s cannot be added as a maintainer!' % type ( maintainer ) . __name__ )
if is_valid_uuid ( maintainer ) is False :
raise HDXError ( '%s is not a valid user id for a maintainer!' % maintainer )
self . data [ 'maintainer' ] = maintainer
|
def install ( pkgs = None , # pylint : disable = R0912 , R0913 , R0914
requirements = None , bin_env = None , use_wheel = False , no_use_wheel = False , log = None , proxy = None , timeout = None , editable = None , find_links = None , index_url = None , extra_index_url = None , no_index = False , mirrors = None , build = None , target = None , download = None , download_cache = None , source = None , upgrade = False , force_reinstall = False , ignore_installed = False , exists_action = None , no_deps = False , no_install = False , no_download = False , global_options = None , install_options = None , user = None , cwd = None , pre_releases = False , cert = None , allow_all_external = False , allow_external = None , allow_unverified = None , process_dependency_links = False , saltenv = 'base' , env_vars = None , use_vt = False , trusted_host = None , no_cache_dir = False , cache_dir = None , no_binary = None , extra_args = None , ** kwargs ) :
'''Install packages with pip
Install packages individually or from a pip requirements file . Install
packages globally or to a virtualenv .
pkgs
Comma separated list of packages to install
requirements
Path to requirements
bin _ env
Path to pip ( or to a virtualenv ) . This can be used to specify the path
to the pip to use when more than one Python release is installed ( e . g .
` ` / usr / bin / pip - 2.7 ` ` or ` ` / usr / bin / pip - 2.6 ` ` . If a directory path is
specified , it is assumed to be a virtualenv .
. . note : :
For Windows , if the pip module is being used to upgrade the pip
package , bin _ env should be the path to the virtualenv or to the
python binary that should be used . The pip command is unable to
upgrade itself in Windows .
use _ wheel
Prefer wheel archives ( requires pip > = 1.4)
no _ use _ wheel
Force to not use wheel archives ( requires pip > = 1.4 , < 10.0.0)
no _ binary
Force to not use binary packages ( requires pip > = 7.0.0)
Accepts either : all : to disable all binary packages , : none : to empty the set ,
or one or more package names with commas between them
log
Log file where a complete ( maximum verbosity ) record will be kept
proxy
Specify a proxy in the form ` ` user : passwd @ proxy . server : port ` ` . Note
that the ` ` user : password @ ` ` is optional and required only if you are
behind an authenticated proxy . If you provide
` ` user @ proxy . server : port ` ` then you will be prompted for a password .
timeout
Set the socket timeout ( default 15 seconds )
editable
install something editable ( e . g .
` ` git + https : / / github . com / worldcompany / djangoembed . git # egg = djangoembed ` ` )
find _ links
URL to search for packages
index _ url
Base URL of Python Package Index
extra _ index _ url
Extra URLs of package indexes to use in addition to ` ` index _ url ` `
no _ index
Ignore package index
mirrors
Specific mirror URL ( s ) to query ( automatically adds - - use - mirrors )
. . warning : :
This option has been deprecated and removed in pip version 7.0.0.
Please use ` ` index _ url ` ` and / or ` ` extra _ index _ url ` ` instead .
build
Unpack packages into ` ` build ` ` dir
target
Install packages into ` ` target ` ` dir
download
Download packages into ` ` download ` ` instead of installing them
download _ cache | cache _ dir
Cache downloaded packages in ` ` download _ cache ` ` or ` ` cache _ dir ` ` dir
source
Check out ` ` editable ` ` packages into ` ` source ` ` dir
upgrade
Upgrade all packages to the newest available version
force _ reinstall
When upgrading , reinstall all packages even if they are already
up - to - date .
ignore _ installed
Ignore the installed packages ( reinstalling instead )
exists _ action
Default action when a path already exists : ( s ) witch , ( i ) gnore , ( w ) ipe ,
( b ) ackup
no _ deps
Ignore package dependencies
no _ install
Download and unpack all packages , but don ' t actually install them
no _ download
Don ' t download any packages , just install the ones already downloaded
( completes an install run with ` ` - - no - install ` ` )
install _ options
Extra arguments to be supplied to the setup . py install command ( e . g .
like ` ` - - install - option = ' - - install - scripts = / usr / local / bin ' ` ` ) . Use
multiple - - install - option options to pass multiple options to setup . py
install . If you are using an option with a directory path , be sure to
use absolute path .
global _ options
Extra global options to be supplied to the setup . py call before the
install command .
user
The user under which to run pip
cwd
Directory from which to run pip
pre _ releases
Include pre - releases in the available versions
cert
Provide a path to an alternate CA bundle
allow _ all _ external
Allow the installation of all externally hosted files
allow _ external
Allow the installation of externally hosted files ( comma separated
list )
allow _ unverified
Allow the installation of insecure and unverifiable files ( comma
separated list )
process _ dependency _ links
Enable the processing of dependency links
env _ vars
Set environment variables that some builds will depend on . For example ,
a Python C - module may have a Makefile that needs INCLUDE _ PATH set to
pick up a header file while compiling . This must be in the form of a
dictionary or a mapping .
Example :
. . code - block : : bash
salt ' * ' pip . install django _ app env _ vars = " { ' CUSTOM _ PATH ' : ' / opt / django _ app ' } "
trusted _ host
Mark this host as trusted , even though it does not have valid or any
HTTPS .
use _ vt
Use VT terminal emulation ( see output while installing )
no _ cache _ dir
Disable the cache .
extra _ args
pip keyword and positional arguments not yet implemented in salt
. . code - block : : yaml
salt ' * ' pip . install pandas extra _ args = " [ { ' - - latest - pip - kwarg ' : ' param ' } , ' - - latest - pip - arg ' ] "
. . warning : :
If unsupported options are passed here that are not supported in a
minion ' s version of pip , a ` No such option error ` will be thrown .
Will be translated into the following pip command :
. . code - block : : bash
pip install pandas - - latest - pip - kwarg param - - latest - pip - arg
CLI Example :
. . code - block : : bash
salt ' * ' pip . install < package name > , < package2 name >
salt ' * ' pip . install requirements = / path / to / requirements . txt
salt ' * ' pip . install < package name > bin _ env = / path / to / virtualenv
salt ' * ' pip . install < package name > bin _ env = / path / to / pip _ bin
Complicated CLI example : :
salt ' * ' pip . install markdown , django editable = git + https : / / github . com / worldcompany / djangoembed . git # egg = djangoembed upgrade = True no _ deps = True'''
|
cmd = _get_pip_bin ( bin_env )
cmd . append ( 'install' )
cleanup_requirements , error = _process_requirements ( requirements = requirements , cmd = cmd , cwd = cwd , saltenv = saltenv , user = user )
if error :
return error
cur_version = version ( bin_env )
if use_wheel :
min_version = '1.4'
max_version = '9.0.3'
too_low = salt . utils . versions . compare ( ver1 = cur_version , oper = '<' , ver2 = min_version )
too_high = salt . utils . versions . compare ( ver1 = cur_version , oper = '>' , ver2 = max_version )
if too_low or too_high :
logger . error ( 'The --use-wheel option is only supported in pip between %s and ' '%s. The version of pip detected is %s. This option ' 'will be ignored.' , min_version , max_version , cur_version )
else :
cmd . append ( '--use-wheel' )
if no_use_wheel :
min_version = '1.4'
max_version = '9.0.3'
too_low = salt . utils . versions . compare ( ver1 = cur_version , oper = '<' , ver2 = min_version )
too_high = salt . utils . versions . compare ( ver1 = cur_version , oper = '>' , ver2 = max_version )
if too_low or too_high :
logger . error ( 'The --no-use-wheel option is only supported in pip between %s and ' '%s. The version of pip detected is %s. This option ' 'will be ignored.' , min_version , max_version , cur_version )
else :
cmd . append ( '--no-use-wheel' )
if no_binary :
min_version = '7.0.0'
too_low = salt . utils . versions . compare ( ver1 = cur_version , oper = '<' , ver2 = min_version )
if too_low :
logger . error ( 'The --no-binary option is only supported in pip %s and ' 'newer. The version of pip detected is %s. This option ' 'will be ignored.' , min_version , cur_version )
else :
if isinstance ( no_binary , list ) :
no_binary = ',' . join ( no_binary )
cmd . extend ( [ '--no-binary' , no_binary ] )
if log :
if os . path . isdir ( log ) :
raise IOError ( '\'{0}\' is a directory. Use --log path_to_file' . format ( log ) )
elif not os . access ( log , os . W_OK ) :
raise IOError ( '\'{0}\' is not writeable' . format ( log ) )
cmd . extend ( [ '--log' , log ] )
if proxy :
cmd . extend ( [ '--proxy' , proxy ] )
if timeout :
try :
if isinstance ( timeout , float ) : # Catch floating point input , exception will be caught in
# exception class below .
raise ValueError ( 'Timeout cannot be a float' )
int ( timeout )
except ValueError :
raise ValueError ( '\'{0}\' is not a valid timeout, must be an integer' . format ( timeout ) )
cmd . extend ( [ '--timeout' , timeout ] )
if find_links :
if isinstance ( find_links , six . string_types ) :
find_links = [ l . strip ( ) for l in find_links . split ( ',' ) ]
for link in find_links :
if not ( salt . utils . url . validate ( link , VALID_PROTOS ) or os . path . exists ( link ) ) :
raise CommandExecutionError ( '\'{0}\' is not a valid URL or path' . format ( link ) )
cmd . extend ( [ '--find-links' , link ] )
if no_index and ( index_url or extra_index_url ) :
raise CommandExecutionError ( '\'no_index\' and (\'index_url\' or \'extra_index_url\') are ' 'mutually exclusive.' )
if index_url :
if not salt . utils . url . validate ( index_url , VALID_PROTOS ) :
raise CommandExecutionError ( '\'{0}\' is not a valid URL' . format ( index_url ) )
cmd . extend ( [ '--index-url' , index_url ] )
if extra_index_url :
if not salt . utils . url . validate ( extra_index_url , VALID_PROTOS ) :
raise CommandExecutionError ( '\'{0}\' is not a valid URL' . format ( extra_index_url ) )
cmd . extend ( [ '--extra-index-url' , extra_index_url ] )
if no_index :
cmd . append ( '--no-index' )
if mirrors : # https : / / github . com / pypa / pip / pull / 2641 / files # diff - 3ef137fb9ffdd400f117a565cd94c188L216
if salt . utils . versions . compare ( ver1 = cur_version , oper = '>=' , ver2 = '7.0.0' ) :
raise CommandExecutionError ( 'pip >= 7.0.0 does not support mirror argument:' ' use index_url and/or extra_index_url instead' )
if isinstance ( mirrors , six . string_types ) :
mirrors = [ m . strip ( ) for m in mirrors . split ( ',' ) ]
cmd . append ( '--use-mirrors' )
for mirror in mirrors :
if not mirror . startswith ( 'http://' ) :
raise CommandExecutionError ( '\'{0}\' is not a valid URL' . format ( mirror ) )
cmd . extend ( [ '--mirrors' , mirror ] )
if build :
cmd . extend ( [ '--build' , build ] )
if target :
cmd . extend ( [ '--target' , target ] )
if download :
cmd . extend ( [ '--download' , download ] )
if download_cache or cache_dir :
cmd . extend ( [ '--cache-dir' if salt . utils . versions . compare ( ver1 = cur_version , oper = '>=' , ver2 = '6.0' ) else '--download-cache' , download_cache or cache_dir ] )
if source :
cmd . extend ( [ '--source' , source ] )
if upgrade :
cmd . append ( '--upgrade' )
if force_reinstall :
cmd . append ( '--force-reinstall' )
if ignore_installed :
cmd . append ( '--ignore-installed' )
if exists_action :
if exists_action . lower ( ) not in ( 's' , 'i' , 'w' , 'b' ) :
raise CommandExecutionError ( 'The exists_action pip option only supports the values ' 's, i, w, and b. \'{0}\' is not valid.' . format ( exists_action ) )
cmd . extend ( [ '--exists-action' , exists_action ] )
if no_deps :
cmd . append ( '--no-deps' )
if no_install :
cmd . append ( '--no-install' )
if no_download :
cmd . append ( '--no-download' )
if no_cache_dir :
cmd . append ( '--no-cache-dir' )
if pre_releases : # Check the locally installed pip version
pip_version = cur_version
# From pip v1.4 the - - pre flag is available
if salt . utils . versions . compare ( ver1 = pip_version , oper = '>=' , ver2 = '1.4' ) :
cmd . append ( '--pre' )
if cert :
cmd . extend ( [ '--cert' , cert ] )
if global_options :
if isinstance ( global_options , six . string_types ) :
global_options = [ go . strip ( ) for go in global_options . split ( ',' ) ]
for opt in global_options :
cmd . extend ( [ '--global-option' , opt ] )
if install_options :
if isinstance ( install_options , six . string_types ) :
install_options = [ io . strip ( ) for io in install_options . split ( ',' ) ]
for opt in install_options :
cmd . extend ( [ '--install-option' , opt ] )
if pkgs :
if not isinstance ( pkgs , list ) :
try :
pkgs = [ p . strip ( ) for p in pkgs . split ( ',' ) ]
except AttributeError :
pkgs = [ p . strip ( ) for p in six . text_type ( pkgs ) . split ( ',' ) ]
pkgs = salt . utils . data . stringify ( salt . utils . data . decode_list ( pkgs ) )
# It ' s possible we replaced version - range commas with semicolons so
# they would survive the previous line ( in the pip . installed state ) .
# Put the commas back in while making sure the names are contained in
# quotes , this allows for proper version spec passing salt > = 0.17.0
cmd . extend ( [ p . replace ( ';' , ',' ) for p in pkgs ] )
elif not any ( [ requirements , editable ] ) : # Starting with pip 10.0.0 , if no packages are specified in the
# command , it returns a retcode 1 . So instead of running the command ,
# just return the output without running pip .
return { 'retcode' : 0 , 'stdout' : 'No packages to install.' }
if editable :
egg_match = re . compile ( r'(?:#|#.*?&)egg=([^&]*)' )
if isinstance ( editable , six . string_types ) :
editable = [ e . strip ( ) for e in editable . split ( ',' ) ]
for entry in editable : # Is the editable local ?
if not ( entry == '.' or entry . startswith ( ( 'file://' , '/' ) ) ) :
match = egg_match . search ( entry )
if not match or not match . group ( 1 ) : # Missing # egg = theEggName
raise CommandExecutionError ( 'You must specify an egg for this editable' )
cmd . extend ( [ '--editable' , entry ] )
if allow_all_external :
cmd . append ( '--allow-all-external' )
if allow_external :
if isinstance ( allow_external , six . string_types ) :
allow_external = [ p . strip ( ) for p in allow_external . split ( ',' ) ]
for pkg in allow_external :
cmd . extend ( [ '--allow-external' , pkg ] )
if allow_unverified :
if isinstance ( allow_unverified , six . string_types ) :
allow_unverified = [ p . strip ( ) for p in allow_unverified . split ( ',' ) ]
for pkg in allow_unverified :
cmd . extend ( [ '--allow-unverified' , pkg ] )
if process_dependency_links :
cmd . append ( '--process-dependency-links' )
if trusted_host :
cmd . extend ( [ '--trusted-host' , trusted_host ] )
if extra_args : # These are arguments from the latest version of pip that
# have not yet been implemented in salt
for arg in extra_args : # It is a keyword argument
if isinstance ( arg , dict ) : # There will only ever be one item in this dictionary
key , val = arg . popitem ( )
# Don ' t allow any recursion into keyword arg definitions
# Don ' t allow multiple definitions of a keyword
if isinstance ( val , ( dict , list ) ) :
raise TypeError ( "Too many levels in: {}" . format ( key ) )
# This is a a normal one - to - one keyword argument
cmd . extend ( [ key , val ] )
# It is a positional argument , append it to the list
else :
cmd . append ( arg )
cmd_kwargs = dict ( saltenv = saltenv , use_vt = use_vt , runas = user )
if kwargs :
cmd_kwargs . update ( kwargs )
if env_vars :
cmd_kwargs . setdefault ( 'env' , { } ) . update ( _format_env_vars ( env_vars ) )
try :
if cwd :
cmd_kwargs [ 'cwd' ] = cwd
if bin_env and os . path . isdir ( bin_env ) :
cmd_kwargs . setdefault ( 'env' , { } ) [ 'VIRTUAL_ENV' ] = bin_env
logger . debug ( 'TRY BLOCK: end of pip.install -- cmd: %s, cmd_kwargs: %s' , cmd , cmd_kwargs )
return __salt__ [ 'cmd.run_all' ] ( cmd , python_shell = False , ** cmd_kwargs )
finally :
_clear_context ( bin_env )
for tempdir in [ cr for cr in cleanup_requirements if cr is not None ] :
if os . path . isdir ( tempdir ) :
shutil . rmtree ( tempdir )
|
def to_array ( self ) :
"""Serializes this InlineQueryResultCachedDocument to a dictionary .
: return : dictionary representation of this object .
: rtype : dict"""
|
array = super ( InlineQueryResultCachedDocument , self ) . to_array ( )
# ' type ' and ' id ' given by superclass
array [ 'title' ] = u ( self . title )
# py2 : type unicode , py3 : type str
array [ 'document_file_id' ] = u ( self . document_file_id )
# py2 : type unicode , py3 : type str
if self . description is not None :
array [ 'description' ] = u ( self . description )
# py2 : type unicode , py3 : type str
if self . caption is not None :
array [ 'caption' ] = u ( self . caption )
# py2 : type unicode , py3 : type str
if self . parse_mode is not None :
array [ 'parse_mode' ] = u ( self . parse_mode )
# py2 : type unicode , py3 : type str
if self . reply_markup is not None :
array [ 'reply_markup' ] = self . reply_markup . to_array ( )
# type InlineKeyboardMarkup
if self . input_message_content is not None :
array [ 'input_message_content' ] = self . input_message_content . to_array ( )
# type InputMessageContent
return array
|
def join ( self , column_label , other , other_label = None ) :
"""Creates a new table with the columns of self and other , containing
rows for all values of a column that appear in both tables .
Args :
` ` column _ label ` ` ( ` ` str ` ` ) : label of column in self that is used to
join rows of ` ` other ` ` .
` ` other ` ` : Table object to join with self on matching values of
` ` column _ label ` ` .
Kwargs :
` ` other _ label ` ` ( ` ` str ` ` ) : default None , assumes ` ` column _ label ` ` .
Otherwise in ` ` other ` ` used to join rows .
Returns :
New table self joined with ` ` other ` ` by matching values in
` ` column _ label ` ` and ` ` other _ label ` ` . If the resulting join is
empty , returns None .
> > > table = Table ( ) . with _ columns ( ' a ' , make _ array ( 9 , 3 , 3 , 1 ) ,
. . . ' b ' , make _ array ( 1 , 2 , 2 , 10 ) ,
. . . ' c ' , make _ array ( 3 , 4 , 5 , 6 ) )
> > > table
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6
> > > table2 = Table ( ) . with _ columns ( ' a ' , make _ array ( 9 , 1 , 1 , 1 ) ,
. . . ' d ' , make _ array ( 1 , 2 , 2 , 10 ) ,
. . . ' e ' , make _ array ( 3 , 4 , 5 , 6 ) )
> > > table2
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
> > > table . join ( ' a ' , table2)
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
> > > table . join ( ' a ' , table2 , ' a ' ) # Equivalent to previous join
a | b | c | d | e
1 | 10 | 6 | 2 | 4
1 | 10 | 6 | 2 | 5
1 | 10 | 6 | 10 | 6
9 | 1 | 3 | 1 | 3
> > > table . join ( ' a ' , table2 , ' d ' ) # Repeat column labels relabeled
a | b | c | a _ 2 | e
1 | 10 | 6 | 9 | 3
> > > table2 # table2 has three rows with a = 1
a | d | e
9 | 1 | 3
1 | 2 | 4
1 | 2 | 5
1 | 10 | 6
> > > table # table has only one row with a = 1
a | b | c
9 | 1 | 3
3 | 2 | 4
3 | 2 | 5
1 | 10 | 6"""
|
if self . num_rows == 0 or other . num_rows == 0 :
return None
if not other_label :
other_label = column_label
self_rows = self . index_by ( column_label )
other_rows = other . index_by ( other_label )
# Gather joined rows from self _ rows that have join values in other _ rows
joined_rows = [ ]
for v , rows in self_rows . items ( ) :
if v in other_rows :
joined_rows += [ row + o for row in rows for o in other_rows [ v ] ]
if not joined_rows :
return None
# Build joined table
self_labels = list ( self . labels )
other_labels = [ self . _unused_label ( s ) for s in other . labels ]
other_labels_map = dict ( zip ( other . labels , other_labels ) )
joined = type ( self ) ( self_labels + other_labels ) . with_rows ( joined_rows )
# Copy formats from both tables
joined . _formats . update ( self . _formats )
for label in other . _formats :
joined . _formats [ other_labels_map [ label ] ] = other . _formats [ label ]
# Remove redundant column , but perhaps save its formatting
del joined [ other_labels_map [ other_label ] ]
if column_label not in self . _formats and other_label in other . _formats :
joined . _formats [ column_label ] = other . _formats [ other_label ]
return joined . move_to_start ( column_label ) . sort ( column_label )
|
def cancel_download_task ( self , task_id , expires = None , ** kwargs ) :
"""取消离线下载任务 .
: param task _ id : 要取消的任务ID号 。
: type task _ id : str
: param expires : 请求失效时间 , 如果有 , 则会校验 。
: type expires : int
: return : Response 对象"""
|
data = { 'expires' : expires , 'task_id' : task_id , }
return self . _request ( 'services/cloud_dl' , 'cancle_task' , data = data , ** kwargs )
|
def get_reaction_values ( self , reaction_id ) :
"""Return stoichiometric values of reaction as a dictionary"""
|
if reaction_id not in self . _reaction_set :
raise ValueError ( 'Unknown reaction: {}' . format ( repr ( reaction_id ) ) )
return self . _database . get_reaction_values ( reaction_id )
|
async def update_ports ( self , ports , ovsdb_ports ) :
"""Called from main module to update port information"""
|
new_port_names = dict ( ( p [ 'name' ] , _to32bitport ( p [ 'ofport' ] ) ) for p in ovsdb_ports )
new_port_ids = dict ( ( p [ 'id' ] , _to32bitport ( p [ 'ofport' ] ) ) for p in ovsdb_ports if p [ 'id' ] )
if new_port_names == self . _portnames and new_port_ids == self . _portids :
return
self . _portnames . clear ( )
self . _portnames . update ( new_port_names )
self . _portids . clear ( )
self . _portids . update ( new_port_ids )
logicalportkeys = [ LogicalPort . default_key ( id ) for id in self . _portids ]
self . _original_initialkeys = logicalportkeys + [ PhysicalPortSet . default_key ( ) ]
self . _initialkeys = tuple ( itertools . chain ( self . _original_initialkeys , self . _append_initialkeys ) )
phy_walker = partial ( self . _physicalport_walker , _portnames = new_port_names )
log_walker = partial ( self . _logicalport_walker , _portids = new_port_ids )
self . _walkerdict = dict ( itertools . chain ( ( ( PhysicalPortSet . default_key ( ) , phy_walker ) , ) , ( ( lgportkey , log_walker ) for lgportkey in logicalportkeys ) ) )
self . _portnames = new_port_names
self . _portids = new_port_ids
await self . restart_walk ( )
|
def optimize ( self , graph ) :
"""Build a dictionary mapping each pair of nodes to a number ( the distance between them ) .
@ type graph : graph
@ param graph : Graph ."""
|
for start in graph . nodes ( ) :
for end in graph . nodes ( ) :
for each in graph . node_attributes ( start ) :
if ( each [ 0 ] == 'position' ) :
start_attr = each [ 1 ]
break
for each in graph . node_attributes ( end ) :
if ( each [ 0 ] == 'position' ) :
end_attr = each [ 1 ]
break
dist = 0
for i in range ( len ( start_attr ) ) :
dist = dist + ( float ( start_attr [ i ] ) - float ( end_attr [ i ] ) ) ** 2
self . distances [ ( start , end ) ] = dist
|
def ticket_forms_reorder ( self , data , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / core / ticket _ forms # reorder - ticket - forms"
|
api_path = "/api/v2/ticket_forms/reorder.json"
return self . call ( api_path , method = "PUT" , data = data , ** kwargs )
|
def get_build_platform ( ) :
"""Return this platform ' s string for platform - specific distributions
XXX Currently this is the same as ` ` distutils . util . get _ platform ( ) ` ` , but it
needs some hacks for Linux and Mac OS X ."""
|
from sysconfig import get_platform
plat = get_platform ( )
if sys . platform == "darwin" and not plat . startswith ( 'macosx-' ) :
try :
version = _macosx_vers ( )
machine = os . uname ( ) [ 4 ] . replace ( " " , "_" )
return "macosx-%d.%d-%s" % ( int ( version [ 0 ] ) , int ( version [ 1 ] ) , _macosx_arch ( machine ) , )
except ValueError : # if someone is running a non - Mac darwin system , this will fall
# through to the default implementation
pass
return plat
|
def check_property ( prop , name , ** kwargs ) :
"""Check and parse a property with either a specific checking function
or a generic parser"""
|
checkers = { 'color' : check_color , 'alpha' : check_alpha , 'size' : check_size , 'thickness' : check_thickness , 'index' : check_index , 'coordinates' : check_coordinates , 'colormap' : check_colormap , 'bins' : check_bins , 'spec' : check_spec }
if name in checkers :
return checkers [ name ] ( prop , ** kwargs )
elif isinstance ( prop , list ) or isinstance ( prop , ndarray ) or isscalar ( prop ) :
return check_1d ( prop , name )
else :
return prop
|
def split ( data : mx . nd . NDArray , num_outputs : int , axis : int = 1 , squeeze_axis : bool = False ) -> List [ mx . nd . NDArray ] :
"""Version of mxnet . ndarray . split that always returns a list . The original
implementation only returns a list if num _ outputs > 1:
https : / / mxnet . incubator . apache . org / api / python / ndarray / ndarray . html # mxnet . ndarray . split
Splits an array along a particular axis into multiple sub - arrays .
: param data : The input .
: param num _ outputs : Number of splits . Note that this should evenly divide
the length of the axis .
: param axis : Axis along which to split .
: param squeeze _ axis : If true , Removes the axis with length 1 from the shapes
of the output arrays .
: return : List of NDArrays resulting from the split ."""
|
ndarray_or_list = data . split ( num_outputs = num_outputs , axis = axis , squeeze_axis = squeeze_axis )
if num_outputs == 1 :
return [ ndarray_or_list ]
return ndarray_or_list
|
def sort_seq_records ( self , seq_records ) :
"""Checks that SeqExpandedRecords are sorted by gene _ code and then by voucher code .
The dashes in taxon names need to be converted to underscores so the
dataset will be accepted by Biopython to do format conversions ."""
|
for seq_record in seq_records :
seq_record . voucher_code = seq_record . voucher_code . replace ( "-" , "_" )
unsorted_gene_codes = set ( [ i . gene_code for i in seq_records ] )
sorted_gene_codes = list ( unsorted_gene_codes )
sorted_gene_codes . sort ( key = lambda x : x . lower ( ) )
unsorted_voucher_codes = set ( [ i . voucher_code for i in seq_records ] )
sorted_voucher_codes = list ( unsorted_voucher_codes )
sorted_voucher_codes . sort ( key = lambda x : x . lower ( ) )
sorted_seq_records = [ ]
for gene_code in sorted_gene_codes :
for voucher_code in sorted_voucher_codes :
for seq_record in seq_records :
should_be_done = ( seq_record . gene_code == gene_code and seq_record . voucher_code == voucher_code )
if should_be_done :
sorted_seq_records . append ( seq_record )
return sorted_seq_records
|
def get_url_params ( url : str , fragment : bool = False ) -> dict :
"""Parse URL params"""
|
parsed_url = urlparse ( url )
if fragment :
url_query = parse_qsl ( parsed_url . fragment )
else :
url_query = parse_qsl ( parsed_url . query )
return dict ( url_query )
|
def getElementsByType ( self , type ) :
"""retrieves all Elements that are of type type
@ type type : class
@ param type : type of the element"""
|
foundElements = [ ]
for element in self . getAllElementsOfHirarchy ( ) :
if isinstance ( element , type ) :
foundElements . append ( element )
return foundElements
|
def get_labs ( format ) :
"""Gets data from all labs from makeinitaly . foundation ."""
|
labs = [ ]
# Get the first page of data
wiki = MediaWiki ( makeinitaly__foundation_api_url )
wiki_response = wiki . call ( { 'action' : 'query' , 'list' : 'categorymembers' , 'cmtitle' : 'Category:Italian_FabLabs' , 'cmlimit' : '500' } )
if "query-continue" in wiki_response :
nextpage = wiki_response [ "query-continue" ] [ "categorymembers" ] [ "cmcontinue" ]
urls = [ ]
for i in wiki_response [ "query" ] [ "categorymembers" ] :
urls . append ( i [ "title" ] . replace ( " " , "_" ) )
# Load all the Labs in the first page
for i in urls :
current_lab = get_single_lab ( i )
labs . append ( current_lab )
# Load all the Labs from the other pages
while "query-continue" in wiki_response :
wiki = MediaWiki ( makeinitaly__foundation_api_url )
wiki_response = wiki . call ( { 'action' : 'query' , 'list' : 'categorymembers' , 'cmtitle' : 'Category:Hackerspace' , 'cmlimit' : '500' , "cmcontinue" : nextpage } )
urls = [ ]
for i in wiki_response [ "query" ] [ "categorymembers" ] :
urls . append ( i [ "title" ] . replace ( " " , "_" ) )
# Load all the Labs
for i in urls :
current_lab = get_single_lab ( i , data_format )
labs . append ( current_lab )
if "query-continue" in wiki_response :
nextpage = wiki_response [ "query-continue" ] [ "categorymembers" ] [ "cmcontinue" ]
else :
break
# Transform the list into a dictionary
labs_dict = { }
for j , k in enumerate ( labs ) :
labs_dict [ j ] = k . __dict__
# Return a dictiornary / json
if format . lower ( ) == "dict" or format . lower ( ) == "json" :
output = labs_dict
# Return a geojson
elif format . lower ( ) == "geojson" or format . lower ( ) == "geo" :
labs_list = [ ]
for l in labs_dict :
single = labs_dict [ l ] . __dict__
single_lab = Feature ( type = "Feature" , geometry = Point ( ( single [ "latitude" ] , single [ "longitude" ] ) ) , properties = single )
labs_list . append ( single_lab )
output = dumps ( FeatureCollection ( labs_list ) )
# Return a Pandas DataFrame
elif format . lower ( ) == "pandas" or format . lower ( ) == "dataframe" :
output = { }
for j in labs_dict :
output [ j ] = labs_dict [ j ] . __dict__
# Transform the dict into a Pandas DataFrame
output = pd . DataFrame . from_dict ( output )
output = output . transpose ( )
# Return an object
elif format . lower ( ) == "object" or format . lower ( ) == "obj" :
output = labs
# Default : return an object
else :
output = labs
# Return a proper json
if format . lower ( ) == "json" :
output = json . dumps ( labs_dict )
return output
|
def tag ( self , name , user , revision = None , message = None , date = None , ** kwargs ) :
"""Creates and returns a tag for the given ` ` revision ` ` .
: param name : name for new tag
: param user : full username , i . e . : " Joe Doe < joe . doe @ example . com > "
: param revision : changeset id for which new tag would be created
: param message : message of the tag ' s commit
: param date : date of tag ' s commit
: raises TagAlreadyExistError : if tag with same name already exists"""
|
if name in self . tags :
raise TagAlreadyExistError ( "Tag %s already exists" % name )
changeset = self . get_changeset ( revision )
local = kwargs . setdefault ( 'local' , False )
if message is None :
message = "Added tag %s for changeset %s" % ( name , changeset . short_id )
if date is None :
date = datetime . datetime . now ( ) . ctime ( )
try :
self . _repo . tag ( name , changeset . _ctx . node ( ) , message , local , user , date )
except Abort , e :
raise RepositoryError ( e . message )
# Reinitialize tags
self . tags = self . _get_tags ( )
tag_id = self . tags [ name ]
return self . get_changeset ( revision = tag_id )
|
def generic_http_header_parser_for ( header_name ) :
"""A parser factory to extract the request id from an HTTP header
: return : A parser that can be used to extract the request id from the current request context
: rtype : ( ) - > str | None"""
|
def parser ( ) :
request_id = request . headers . get ( header_name , '' ) . strip ( )
if not request_id : # If the request id is empty return None
return None
return request_id
return parser
|
def process_auth ( self ) :
"""Reads and processes SSPI stream .
Stream info : http : / / msdn . microsoft . com / en - us / library / dd302844 . aspx"""
|
r = self . _reader
w = self . _writer
pdu_size = r . get_smallint ( )
if not self . authentication :
raise tds_base . Error ( 'Got unexpected token' )
packet = self . authentication . handle_next ( readall ( r , pdu_size ) )
if packet :
w . write ( packet )
w . flush ( )
|
def accuracy ( input : Tensor , targs : Tensor ) -> Rank0Tensor :
"Compute accuracy with ` targs ` when ` input ` is bs * n _ classes ."
|
n = targs . shape [ 0 ]
input = input . argmax ( dim = - 1 ) . view ( n , - 1 )
targs = targs . view ( n , - 1 )
return ( input == targs ) . float ( ) . mean ( )
|
def connect ( * cmds , ** kwargs ) :
"""Connects multiple command streams together and yields the final stream .
Args :
cmds ( list ) : list of commands to pipe together . Each command will be an
input to ` ` stream ` ` .
stdin ( file like object ) : stream to use as the first command ' s
standard input .
env ( dict ) : The environment in which to execute the commands . PATH
should be defined .
timeout ( int ) : Amount of time in seconds to give the pipeline to complete .
The ` ` timeout ` ` utility must be installed to use this feature .
Yields :
The output stream for the final command in the pipeline . It should
typically be wrapped in a ` ` reader ` ` ."""
|
stdin = kwargs . get ( "stdin" )
env = kwargs . get ( "env" , os . environ )
timeout = kwargs . get ( "timeout" )
end = len ( cmds ) - 1
@ contextmanager
def inner ( idx , inp ) :
with stream ( cmds [ idx ] , stdin = inp , env = env , timeout = timeout ) as s :
if idx == end :
yield s
else :
with inner ( idx + 1 , s ) as c :
yield c
with inner ( 0 , stdin ) as s :
yield s
|
def _format_with_same_year_and_month ( format_specifier ) :
"""Return a version of ` format _ specifier ` that renders a date
assuming it has the same year and month as another date . Usually this
means ommitting the year and month .
This can be overridden by specifying a format that has
` _ SAME _ YEAR _ SAME _ MONTH ` appended to the name in the project ' s ` formats `
spec ."""
|
test_format_specifier = format_specifier + "_SAME_YEAR_SAME_MONTH"
test_format = get_format ( test_format_specifier , use_l10n = True )
if test_format == test_format_specifier : # this format string didn ' t resolve to anything and may be a raw format .
# Use a regex to remove year and month markers instead .
no_year = re . sub ( YEAR_RE , '' , get_format ( format_specifier ) )
return re . sub ( MONTH_RE , '' , no_year )
else :
return test_format
|
def get_jid ( jid ) :
'''Return the information returned when the specified job id was executed'''
|
conn = _get_conn ( ret = None )
cur = conn . cursor ( )
sql = '''SELECT id, full_ret FROM salt_returns WHERE jid = ?'''
cur . execute ( sql , ( jid , ) )
data = cur . fetchall ( )
ret = { }
if data :
for minion , full_ret in data :
ret [ minion ] = salt . utils . json . loads ( full_ret )
_close_conn ( conn )
return ret
|
def _parse_timeframe_line ( self , line ) :
"""Parse timeframe line and return start and end timestamps ."""
|
tf = self . _validate_timeframe_line ( line )
if not tf :
raise MalformedCaptionError ( 'Invalid time format' )
return tf . group ( 1 ) , tf . group ( 2 )
|
def classifyplot_from_plotfiles ( plot_files , out_csv , outtype = "png" , title = None , size = None ) :
"""Create a plot from individual summary csv files with classification metrics ."""
|
dfs = [ pd . read_csv ( x ) for x in plot_files ]
samples = [ ]
for df in dfs :
for sample in df [ "sample" ] . unique ( ) :
if sample not in samples :
samples . append ( sample )
df = pd . concat ( dfs )
df . to_csv ( out_csv , index = False )
return classifyplot_from_valfile ( out_csv , outtype , title , size , samples )
|
def check_str_length ( str_to_check , limit = MAX_LENGTH ) :
"""Check the length of a string . If exceeds limit , then truncate it .
: type str _ to _ check : str
: param str _ to _ check : String to check .
: type limit : int
: param limit : The upper limit of the length .
: rtype : tuple
: returns : The string it self if not exceeded length , or truncated string
if exceeded and the truncated byte count ."""
|
str_bytes = str_to_check . encode ( UTF8 )
str_len = len ( str_bytes )
truncated_byte_count = 0
if str_len > limit :
truncated_byte_count = str_len - limit
str_bytes = str_bytes [ : limit ]
result = str ( str_bytes . decode ( UTF8 , errors = 'ignore' ) )
return ( result , truncated_byte_count )
|
def shrink ( image , apikey ) :
"""To shrink a PNG image , post the data to the API service .
The response is a JSON message .
The initial request must be authorized with HTTP Basic authorization .
@ param image : PNG image bytes sequence
@ param apikey : TinyPNG API key
@ param filename : filename of input file"""
|
def _handle_response ( response ) :
body = json . loads ( response . read ( ) )
if response . code == TinyPNGResponse . SUCCESS_CODE :
body [ 'location' ] = response . headers . getheader ( "Location" )
try :
body [ 'bytes' ] = urlopen ( body [ 'location' ] ) . read ( )
except :
body [ 'bytes' ] = None
return response . code , body
auth = b64encode ( bytes ( "api:" + apikey ) ) . decode ( "ascii" )
request = Request ( TINYPNG_SHRINK_URL , image )
request . add_header ( "Authorization" , "Basic %s" % auth )
try :
response = urlopen ( request )
( code , response_dict ) = _handle_response ( response )
except HTTPError as e :
( code , response_dict ) = _handle_response ( e )
return TinyPNGResponse ( code , ** response_dict )
|
def callback ( self , event ) :
"""Callback function to spawn a mini - browser when a feature is clicked ."""
|
artist = event . artist
ind = artist . ind
limit = 5
browser = True
if len ( event . ind ) > limit :
print "more than %s genes selected; not spawning browsers" % limit
browser = False
for i in event . ind :
feature = artist . features [ ind [ i ] ]
print feature ,
if browser :
self . minibrowser . plot ( feature )
|
def parse_timespan ( timedef ) :
"""Convert a string timespan definition to seconds , for example converting
'1m30s ' to 90 . If * timedef * is already an int , the value will be returned
unmodified .
: param timedef : The timespan definition to convert to seconds .
: type timedef : int , str
: return : The converted value in seconds .
: rtype : int"""
|
if isinstance ( timedef , int ) :
return timedef
converter_order = ( 'w' , 'd' , 'h' , 'm' , 's' )
converters = { 'w' : 604800 , 'd' : 86400 , 'h' : 3600 , 'm' : 60 , 's' : 1 }
timedef = timedef . lower ( )
if timedef . isdigit ( ) :
return int ( timedef )
elif len ( timedef ) == 0 :
return 0
seconds = - 1
for spec in converter_order :
timedef = timedef . split ( spec )
if len ( timedef ) == 1 :
timedef = timedef [ 0 ]
continue
elif len ( timedef ) > 2 or not timedef [ 0 ] . isdigit ( ) :
seconds = - 1
break
adjustment = converters [ spec ]
seconds = max ( seconds , 0 )
seconds += ( int ( timedef [ 0 ] ) * adjustment )
timedef = timedef [ 1 ]
if not len ( timedef ) :
break
if seconds < 0 :
raise ValueError ( 'invalid time format' )
return seconds
|
def complete ( self , uio , dropped = False ) :
"""Query for all missing information in the transaction"""
|
if self . dropped and not dropped : # do nothing for dropped xn , unless specifically told to
return
for end in [ 'src' , 'dst' ] :
if getattr ( self , end ) :
continue
# we have this information
uio . show ( '\nEnter ' + end + ' for transaction:' )
uio . show ( '' )
uio . show ( self . summary ( ) )
try :
endpoints = [ ]
remaining = self . amount
while remaining :
account = uio . text ( ' Enter account' , None )
amount = uio . decimal ( ' Enter amount' , default = remaining , lower = 0 , upper = remaining )
endpoints . append ( Endpoint ( account , amount ) )
remaining = self . amount - sum ( map ( lambda x : x . amount , endpoints ) )
except ui . RejectWarning : # bail out
sys . exit ( "bye!" )
# flip amounts if it was a src outcome
if end == 'src' :
endpoints = map ( lambda x : Endpoint ( x . account , - x . amount ) , endpoints )
# set endpoints
setattr ( self , end , endpoints )
|
def getBool ( self , pchSection , pchSettingsKey ) :
"""Users of the system need to provide a proper default in default . vrsettings in the resources / settings / directory
of either the runtime or the driver _ xxx directory . Otherwise the default will be false , 0 , 0.0 or " " """
|
fn = self . function_table . getBool
peError = EVRSettingsError ( )
result = fn ( pchSection , pchSettingsKey , byref ( peError ) )
return result , peError
|
def get ( cls , external_id , local_user_id , provider_name , db_session = None ) :
"""Fetch row using primary key -
will use existing object in session if already present
: param external _ id :
: param local _ user _ id :
: param provider _ name :
: param db _ session :
: return :"""
|
db_session = get_db_session ( db_session )
return db_session . query ( cls . model ) . get ( [ external_id , local_user_id , provider_name ] )
|
def check_levels ( imls , imt , min_iml = 1E-10 ) :
"""Raise a ValueError if the given levels are invalid .
: param imls : a list of intensity measure and levels
: param imt : the intensity measure type
: param min _ iml : minimum intensity measure level ( default 1E - 10)
> > > check _ levels ( [ 0.1 , 0.2 ] , ' PGA ' ) # ok
> > > check _ levels ( [ ] , ' PGA ' )
Traceback ( most recent call last ) :
ValueError : No imls for PGA : [ ]
> > > check _ levels ( [ 0.2 , 0.1 ] , ' PGA ' )
Traceback ( most recent call last ) :
ValueError : The imls for PGA are not sorted : [ 0.2 , 0.1]
> > > check _ levels ( [ 0.2 , 0.2 ] , ' PGA ' )
Traceback ( most recent call last ) :
ValueError : Found duplicated levels for PGA : [ 0.2 , 0.2]"""
|
if len ( imls ) < 1 :
raise ValueError ( 'No imls for %s: %s' % ( imt , imls ) )
elif imls != sorted ( imls ) :
raise ValueError ( 'The imls for %s are not sorted: %s' % ( imt , imls ) )
elif len ( distinct ( imls ) ) < len ( imls ) :
raise ValueError ( "Found duplicated levels for %s: %s" % ( imt , imls ) )
elif imls [ 0 ] == 0 and imls [ 1 ] <= min_iml : # apply the cutoff
raise ValueError ( "The min_iml %s=%s is larger than the second level " "for %s" % ( imt , min_iml , imls ) )
elif imls [ 0 ] == 0 and imls [ 1 ] > min_iml : # apply the cutoff
imls [ 0 ] = min_iml
|
def WriteBlobsWithUnknownHashes ( self , blobs_data ) :
"""Calculates hash ids and writes contents of given data blobs .
Args :
blobs _ data : An iterable of bytes .
Returns :
A list of rdf _ objects . BlobID objects with each blob id corresponding
to an element in the original blobs _ data argument ."""
|
blobs_ids = [ rdf_objects . BlobID . FromBlobData ( d ) for d in blobs_data ]
self . WriteBlobs ( dict ( zip ( blobs_ids , blobs_data ) ) )
return blobs_ids
|
def write ( self , timeunit , timepoints ) -> None :
"""Open a new NetCDF file temporarily and call method
| NetCDFVariableBase . write | of all handled | NetCDFVariableBase |
objects ."""
|
with netcdf4 . Dataset ( self . filepath , "w" ) as ncfile :
ncfile . Conventions = 'CF-1.6'
self . _insert_timepoints ( ncfile , timepoints , timeunit )
for variable in self . variables . values ( ) :
variable . write ( ncfile )
|
def write ( self , fileobj = sys . stdout , indent = u"" ) :
"""Recursively write an element and it ' s children to a file ."""
|
fileobj . write ( self . start_tag ( indent ) )
fileobj . write ( u"\n" )
for c in self . childNodes :
if c . tagName not in self . validchildren :
raise ElementError ( "invalid child %s for %s" % ( c . tagName , self . tagName ) )
c . write ( fileobj , indent + Indent )
if self . pcdata is not None :
fileobj . write ( xmlescape ( self . pcdata ) )
fileobj . write ( u"\n" )
fileobj . write ( self . end_tag ( indent ) )
fileobj . write ( u"\n" )
|
def _list_locators ( self ) :
"""Lists locators .
Returns :
generator of tuple : locator name str , locator header dict"""
|
with _handle_client_exception ( ) :
response = self . client . get_account ( )
for container in response [ 1 ] :
yield container . pop ( 'name' ) , container
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.