signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _format_extname ( self , ext ) :
"""Pretty print given extension name and number tuple ."""
|
if ext is None :
outs = ext
else :
outs = '{0},{1}' . format ( ext [ 0 ] , ext [ 1 ] )
return outs
|
def focus_prev_sibling ( self ) :
"""focus previous sibling of currently focussed message in thread tree"""
|
mid = self . get_selected_mid ( )
localroot = self . _sanitize_position ( ( mid , ) )
if localroot == self . get_focus ( ) [ 1 ] :
newpos = self . _tree . prev_sibling_position ( mid )
if newpos is not None :
newpos = self . _sanitize_position ( ( newpos , ) )
else :
newpos = localroot
if newpos is not None :
self . body . set_focus ( newpos )
|
def _run_and_return ( self , cmd , sudo = None ) :
'''Run a command , show the message to the user if quiet isn ' t set ,
and return the return code . This is a wrapper for the OCI client
to run a command and easily return the return code value ( what
the user is ultimately interested in ) .
Parameters
cmd : the command ( list ) to run .
sudo : whether to add sudo or not .'''
|
sudo = self . _get_sudo ( sudo )
result = self . _run_command ( cmd , sudo = sudo , quiet = True , return_result = True )
# Successful return with no output
if len ( result ) == 0 :
return
# Show the response to the user , only if not quiet .
elif not self . quiet :
bot . println ( result [ 'message' ] )
# Return the state object to the user
return result [ 'return_code' ]
|
def export ( self , nidm_version , export_dir , prepend_path ) :
"""Copy file over of export _ dir and create corresponding triples"""
|
if self . path is not None :
if export_dir is not None : # Copy file only if export _ dir is not None
new_file = os . path . join ( export_dir , self . filename )
if not self . path == new_file :
if prepend_path . endswith ( '.zip' ) :
with zipfile . ZipFile ( prepend_path ) as z :
extracted = z . extract ( str ( self . path ) , export_dir )
shutil . move ( extracted , new_file )
else :
if prepend_path :
file_copied = os . path . join ( prepend_path , self . path )
else :
file_copied = self . path
shutil . copy ( file_copied , new_file )
if self . temporary :
os . remove ( self . path )
else :
new_file = self . path
if nidm_version [ 'num' ] in [ "1.0.0" , "1.1.0" ] :
loc = Identifier ( "file://./" + self . filename )
else :
loc = Identifier ( self . filename )
self . add_attributes ( [ ( NFO [ 'fileName' ] , self . filename ) ] )
if export_dir :
self . add_attributes ( [ ( PROV [ 'atLocation' ] , loc ) ] )
if nidm_version [ 'num' ] in ( "1.0.0" , "1.1.0" ) :
path , org_filename = os . path . split ( self . path )
if ( org_filename is not self . filename ) and ( not self . temporary ) :
self . add_attributes ( [ ( NFO [ 'fileName' ] , org_filename ) ] )
if self . is_nifti ( ) :
if self . sha is None :
self . sha = self . get_sha_sum ( new_file )
if self . fmt is None :
self . fmt = "image/nifti"
self . add_attributes ( [ ( CRYPTO [ 'sha512' ] , self . sha ) , ( DCT [ 'format' ] , self . fmt ) ] )
|
def set_options_from_JSON ( self , filename ) :
"""Load options from a YAML - formated file .
: param str filename : File from which to load the options .
: raise :
: IOError : If ` ` filename ` ` does not exist ."""
|
if not os . path . isfile ( filename ) :
raise IOError ( "File {0} not found" . format ( filename ) )
data_str = "" . join ( [ x . strip ( ) for x in open ( filename ) . readlines ( ) ] )
data_dict = json . loads ( data_str )
self . set_options_from_dict ( data_dict , filename )
|
def list ( self , ** params ) :
"""Retrieve all tags
Returns all tags available to the user , according to the parameters provided
: calls : ` ` get / tags ` `
: param dict params : ( optional ) Search options .
: return : List of dictionaries that support attriubte - style access , which represent collection of Tags .
: rtype : list"""
|
_ , _ , tags = self . http_client . get ( "/tags" , params = params )
return tags
|
def add_command_line_options ( cls , parser ) :
"""function to inject command line parameters"""
|
if "add_argument" in dir ( parser ) :
return cls . add_command_line_options_argparse ( parser )
else :
return cls . add_command_line_options_optparse ( parser )
|
def top_i_answers ( self , i ) :
"""获取排名在前几位的答案 .
: param int i : 获取前几个
: return : 答案对象 , 返回生成器
: rtype : Answer . Iterable"""
|
for j , a in enumerate ( self . answers ) :
if j <= i - 1 :
yield a
else :
return
|
def end_poll ( args ) :
"""Ends a poll ."""
|
if not args . isadmin :
return "Nope, not gonna do it."
if not args . msg :
return "Syntax: !vote end <pollnum>"
if not args . msg . isdigit ( ) :
return "Not A Valid Positive Integer."
poll = get_open_poll ( args . session , int ( args . msg ) )
if poll is None :
return "That poll doesn't exist or has already been deleted!"
if poll . active == 0 :
return "Poll already ended!"
poll . active = 0
return "Poll ended!"
|
def resolve_input_references ( to_resolve , inputs_to_reference ) :
"""Resolves input references given in the string to _ resolve by using the inputs _ to _ reference .
See http : / / www . commonwl . org / user _ guide / 06 - params / index . html for more information .
Example :
" $ ( inputs . my _ file . nameroot ) . md " - > " filename . md "
: param to _ resolve : The path to match
: param inputs _ to _ reference : Inputs which are used to resolve input references like $ ( inputs . my _ input _ file . basename ) .
: return : A string in which the input references are replaced with actual values ."""
|
splitted = split_input_references ( to_resolve )
result = [ ]
for part in splitted :
if is_input_reference ( part ) :
result . append ( str ( resolve_input_reference ( part , inputs_to_reference ) ) )
else :
result . append ( part )
return '' . join ( result )
|
def ticker ( ctx , market ) :
"""Show ticker of a market"""
|
market = Market ( market , bitshares_instance = ctx . bitshares )
ticker = market . ticker ( )
t = [ [ "key" , "value" ] ]
for key in ticker :
t . append ( [ key , str ( ticker [ key ] ) ] )
print_table ( t )
|
def _ensure_unicode_string ( string ) :
"""Returns a unicode string for string .
: param string :
The input string .
: type string :
` basestring `
: returns :
A unicode string .
: rtype :
` unicode `"""
|
if not isinstance ( string , six . text_type ) :
string = string . decode ( 'utf-8' )
return string
|
def deleted ( self ) :
'Return datetime . datetime or None if the file isnt deleted'
|
_d = self . folder . attrib . get ( 'deleted' , None )
if _d is None :
return None
return dateutil . parser . parse ( str ( _d ) )
|
def rai_to_raw ( self , amount ) :
"""Multiply an rai amount by the rai ratio .
: param amount : Amount in rai to convert to raw
: type amount : int
: raises : : py : exc : ` nano . rpc . RPCException `
> > > rpc . rai _ to _ raw ( amount = 1)
100000"""
|
amount = self . _process_value ( amount , 'int' )
payload = { "amount" : amount }
resp = self . call ( 'rai_to_raw' , payload )
return int ( resp [ 'amount' ] )
|
def resolve_tag ( name , ** kwargs ) :
'''. . versionadded : : 2017.7.2
. . versionchanged : : 2018.3.0
Instead of matching against pulled tags using
: py : func : ` docker . list _ tags < salt . modules . dockermod . list _ tags > ` , this
function now simply inspects the passed image name using
: py : func : ` docker . inspect _ image < salt . modules . dockermod . inspect _ image > `
and returns the first matching tag . If no matching tags are found , it
is assumed that the passed image is an untagged image ID , and the full
ID is returned .
Inspects the specified image name and returns the first matching tag in the
inspect results . If the specified image is not pulled locally , this
function will return ` ` False ` ` .
name
Image name to resolve . If the image is found but there are no tags ,
this means that the image name passed was an untagged image . In this
case the image ID will be returned .
all : False
If ` ` True ` ` , a list of all matching tags will be returned . If the image
is found but there are no tags , then a list will still be returned , but
it will simply contain the image ID .
. . versionadded : : 2018.3.0
tags
. . deprecated : : 2018.3.0
CLI Examples :
. . code - block : : bash
salt myminion docker . resolve _ tag busybox
salt myminion docker . resolve _ tag centos : 7 all = True
salt myminion docker . resolve _ tag c9f378ac27d9'''
|
kwargs = __utils__ [ 'args.clean_kwargs' ] ( ** kwargs )
all_ = kwargs . pop ( 'all' , False )
if kwargs :
__utils__ [ 'args.invalid_kwargs' ] ( kwargs )
try :
inspect_result = inspect_image ( name )
tags = inspect_result [ 'RepoTags' ]
if all_ :
if tags :
return tags
# If the image is untagged , don ' t return an empty list , return
# back the resolved ID at he end of this function .
else :
return tags [ 0 ]
except CommandExecutionError : # No matching image pulled locally , or inspect _ image otherwise failed
return False
except KeyError :
log . error ( 'Inspecting docker image \'%s\' returned an unexpected data ' 'structure: %s' , name , inspect_result )
except IndexError : # The image passed is an untagged image ID
pass
return [ inspect_result [ 'Id' ] ] if all_ else inspect_result [ 'Id' ]
|
def coerce_to_decimal ( value ) :
"""Attempt to coerce the value to a Decimal , or raise an error if unable to do so ."""
|
if isinstance ( value , decimal . Decimal ) :
return value
else :
try :
return decimal . Decimal ( value )
except decimal . InvalidOperation as e :
raise GraphQLInvalidArgumentError ( e )
|
def log_likelihood ( self , x , x_dims = ( 128 , 128 ) , standard = False ) :
"""Calculate Log Likelihood with particular mean and std
x must be 2D . [ batch _ size * eqsamples * iwsamples , num _ latent ]"""
|
mu , std , h , w = self . params
shape = tf . stack ( [ 32 , x_dims [ 0 ] , x_dims [ 1 ] , self . num_latent , 1 , 1 ] )
x_reshape = tf . reshape ( x , shape )
c = - 0.5 * math . log ( 2 * math . pi )
if standard is False :
density = c - tf . log ( std + 1e-10 ) - ( x_reshape - mu ) ** 2 / ( 2 * std ** 2 )
else :
density = c - x_reshape ** 2 / 2
# sum over all importance weights . average over all eq _ samples
return tf . reduce_mean ( tf . reduce_sum ( density , axis = 2 ) , axis = ( 1 , 2 ) )
|
def setProperty ( self , name , value ) :
'''setProperty - Set a style property to a value .
NOTE : To remove a style , use a value of empty string , or None
@ param name < str > - The style name .
NOTE : The dash names are expected here , whereas dot - access expects the camel case names .
Example : name = " font - weight " versus the dot - access style . fontWeight
@ param value < str > - The style value , or empty string to remove property'''
|
styleDict = self . _styleDict
if value in ( '' , None ) :
try :
del styleDict [ name ]
except KeyError :
pass
else :
styleDict [ name ] = str ( value )
|
def convert_to_broker_id ( string ) :
"""Convert string to kafka broker _ id ."""
|
error_msg = 'Positive integer or -1 required, {string} given.' . format ( string = string )
try :
value = int ( string )
except ValueError :
raise argparse . ArgumentTypeError ( error_msg )
if value <= 0 and value != - 1 :
raise argparse . ArgumentTypeError ( error_msg )
return value
|
def delete ( self ) :
"""Delete the draft ."""
|
response = self . session . request ( "delete:Message" , [ self . message_id ] )
self . data = response
return self
|
def apply_diff ( self , diff ) :
'''Applies a diff to the label table .
A ` ` diff ` ` is a dictionary with three keys : ` ` add ` ` , ` ` delete ` `
and ` ` change ` ` . Each key should map to a list of labels .
` ` add ` ` corresponds to the labels that are in ` ` new ` ` but not in
` ` old ` ` .
` ` delete ` ` corresponds to the labels that are in ` ` old ` ` but not
in ` ` new ` ` .
` ` change ` ` corresponds to the labels that are in both ` ` old ` `
and ` ` new ` ` but have different coref / rating values .'''
|
# add and change are easy - - - just put the labels .
# delete is a little trickier . We need to scrub out the impact of
# the previous label without actually deleting it . For this , we
# use an unknown coref value .
insert = ( diff [ 'add' ] + diff [ 'change' ] + [ lab . update ( value = CorefValue . Unknown ) for lab in diff [ 'delete' ] ] )
self . put ( * insert )
|
def bfs ( graph , start ) :
"""Finds the shortest string using BFS
Args :
graph ( DFA ) : The DFA states
start ( DFA state ) : The DFA initial state
Returns :
str : The shortest string"""
|
# maintain a queue of paths
queue = [ ]
visited = [ ]
# maintain a queue of nodes
# push the first path into the queue
queue . append ( [ [ '' , start ] ] )
while queue : # get the first path from the queue
path = queue . pop ( 0 )
# get the last node from the path
node = path [ - 1 ] [ 1 ]
if node . stateid not in visited :
visited . append ( node . stateid )
# path found
if node . final != TropicalWeight ( float ( 'inf' ) ) :
return "" . join ( [ mnode [ 0 ] for mnode in path ] )
# enumerate all adjacent nodes , construct a new path and push
# it into the queue
for arc in node . arcs :
char = graph . isyms . find ( arc . ilabel )
next_state = graph [ arc . nextstate ]
# print next _ state . stateid
if next_state . stateid not in visited :
new_path = list ( path )
new_path . append ( [ char , next_state ] )
queue . append ( new_path )
|
def get_allcurrencycodes ( self ) :
"""Return an iterable of distinct 3 character ISO 4217 currency codes"""
|
foundcodes = [ ]
codeelements = self . currencies [ 0 ] . iter ( 'Ccy' )
for codeelement in codeelements :
code = codeelement . text
if code not in foundcodes :
foundcodes += [ code ]
yield code
|
def write_fits ( data , header , file_name ) :
"""Combine data and a fits header to write a fits file .
Parameters
data : numpy . ndarray
The data to be written .
header : astropy . io . fits . hduheader
The header for the fits file .
file _ name : string
The file to write
Returns
None"""
|
hdu = fits . PrimaryHDU ( data )
hdu . header = header
hdulist = fits . HDUList ( [ hdu ] )
hdulist . writeto ( file_name , overwrite = True )
logging . info ( "Wrote {0}" . format ( file_name ) )
return
|
def update ( self , values , copy_instance = False ) :
"""Updates the configuration with the contents of the given configuration object or dictionary .
In case of a dictionary , only valid attributes for this class are considered . Existing attributes are replaced
with the new values . The object is not cleaned before or after , i . e . may accept invalid input .
In case of an update by object , that object is cleaned before the update , so that updated values should be
validated . However , already - stored values are not cleaned before or after .
: param values : Dictionary or ConfigurationObject to update this configuration with .
: type values : dict | ConfigurationObject
: param copy _ instance : Copies lists and dictionaries . Only has an effect if ` ` values ` ` is a ConfigurationObject .
: type copy _ instance : bool"""
|
if isinstance ( values , self . __class__ ) :
self . update_from_obj ( values , copy = copy_instance )
elif isinstance ( values , dict ) :
self . update_from_dict ( values )
else :
raise ValueError ( "{0} or dictionary expected; found '{1}'." . format ( self . __class__ . __name__ , type ( values ) . __name__ ) )
|
def execute ( self , triple_map , output , ** kwargs ) :
"""Method iterates through triple map ' s predicate object maps
and processes query .
Args :
triple _ map ( SimpleNamespace ) : Triple Map"""
|
sparql = PREFIX + triple_map . logicalSource . query . format ( ** kwargs )
bindings = self . __get_bindings__ ( sparql )
iterator = str ( triple_map . logicalSource . iterator )
for binding in bindings :
entity_dict = binding . get ( iterator )
if isinstance ( entity_dict , rdflib . term . Node ) :
entity = entity_dict
elif isinstance ( entity_dict , dict ) :
raw_value = entity_dict . get ( 'value' )
if entity_dict . get ( 'type' ) . startswith ( 'bnode' ) :
entity = rdflib . BNode ( raw_value )
else :
entity = rdflib . URIRef ( raw_value )
if triple_map . subjectMap . class_ is not None :
output . add ( ( entity , rdflib . RDF . type , triple_map . subjectMap . class_ ) )
sparql_query = self . __construct_compound_query__ ( triple_map ) . format ( ** kwargs )
properties = self . __get_bindings__ ( sparql_query )
for pred_obj_map in triple_map . predicateObjectMap :
predicate = pred_obj_map . predicate
if pred_obj_map . constant is not None :
output . add ( ( entity , predicate , pred_obj_map . constant ) )
continue
if "#" in str ( predicate ) :
key = str ( predicate ) . split ( "#" ) [ - 1 ]
else :
key = str ( predicate ) . split ( "/" ) [ - 1 ]
for property_ in properties :
if key in property_ . keys ( ) :
info = { "about" : property_ . get ( key ) }
object_ = __get_object__ ( info )
output . add ( ( entity , predicate , object_ ) )
|
def store_push_data ( repository , pushes ) :
"""Stores push data in the treeherder database
pushes = [
" revision " : " 8afdb7debc82a8b6e0d56449dfdf916c77a7bf80 " ,
" push _ timestamp " : 1378293517,
" author " : " some - sheriff @ mozilla . com " ,
" revisions " : [
" comment " : " Bug 911954 - Add forward declaration of JSScript to TraceLogging . h , r = h4writer " ,
" author " : " John Doe < jdoe @ mozilla . com > " ,
" revision " : " 8afdb7debc82a8b6e0d56449dfdf916c77a7bf80"
returns = {"""
|
if not pushes :
logger . info ( "No new pushes to store" )
return
for push in pushes :
store_push ( repository , push )
|
def adx ( self , n , array = False ) :
"""ADX指标"""
|
result = talib . ADX ( self . high , self . low , self . close , n )
if array :
return result
return result [ - 1 ]
|
def delete_autostart_entry ( ) :
"""Remove a present autostart entry . If none is found , nothing happens ."""
|
autostart_file = Path ( common . AUTOSTART_DIR ) / "autokey.desktop"
if autostart_file . exists ( ) :
autostart_file . unlink ( )
_logger . info ( "Deleted old autostart entry: {}" . format ( autostart_file ) )
|
def get_by_example_hash ( cls , collection , index_id , example_data , allow_multiple = False , skip = None , limit = None ) :
"""This will find all documents matching a given example , using the specified hash index .
: param collection Collection instance
: param index _ id ID of the index which should be used for the query
: param example _ data The example document
: param allow _ multiple If the query can return multiple documents
: param skip The number of documents to skip in the query
: param limit The maximal amount of documents to return . The skip is applied before the limit restriction .
: returns Single document / Document list"""
|
kwargs = { 'index' : index_id , 'skip' : skip , 'limit' : limit , }
return cls . _construct_query ( name = 'by-example-hash' , collection = collection , example = example_data , multiple = allow_multiple , ** kwargs )
|
def internal_get_next_statement_targets ( dbg , seq , thread_id , frame_id ) :
'''gets the valid line numbers for use with set next statement'''
|
try :
frame = dbg . find_frame ( thread_id , frame_id )
if frame is not None :
code = frame . f_code
xml = "<xml>"
if hasattr ( code , 'co_lnotab' ) :
lineno = code . co_firstlineno
lnotab = code . co_lnotab
for i in itertools . islice ( lnotab , 1 , len ( lnotab ) , 2 ) :
if isinstance ( i , int ) :
lineno = lineno + i
else : # in python 2 elements in co _ lnotab are of type str
lineno = lineno + ord ( i )
xml += "<line>%d</line>" % ( lineno , )
else :
xml += "<line>%d</line>" % ( frame . f_lineno , )
del frame
xml += "</xml>"
cmd = dbg . cmd_factory . make_get_next_statement_targets_message ( seq , xml )
dbg . writer . add_command ( cmd )
else :
cmd = dbg . cmd_factory . make_error_message ( seq , "Frame not found: %s from thread: %s" % ( frame_id , thread_id ) )
dbg . writer . add_command ( cmd )
except :
cmd = dbg . cmd_factory . make_error_message ( seq , "Error resolving frame: %s from thread: %s" % ( frame_id , thread_id ) )
dbg . writer . add_command ( cmd )
|
def aeration_data ( DO_column , dirpath ) :
"""Extract the data from folder containing tab delimited
files of aeration data . The file must be the original tab delimited file .
All text strings below the header must be removed from these files .
The file names must be the air flow rates with units of micromoles / s .
An example file name would be " 300 . xls " where 300 is the flow rate in
micromoles / s . The function opens a file dialog for the user to select
the directory containing the data .
: param DO _ column : Index of the column that contains the dissolved oxygen concentration data .
: type DO _ columm : int
: param dirpath : Path to the directory containing aeration data you want to analyze
: type dirpath : string
: return : collection of
* * * filepaths * * ( * string list * ) - All file paths in the directory sorted by flow rate
* * * airflows * * ( * numpy . array * ) - Sorted array of air flow rates with units of micromole / s
* * * DO _ data * * ( * numpy . array list * ) - Sorted list of Numpy arrays . Thus each of the numpy data arrays can have different lengths to accommodate short and long experiments
* * * time _ data * * ( * numpy . array list * ) - Sorted list of Numpy arrays containing the times with units of seconds"""
|
# return the list of files in the directory
filenames = os . listdir ( dirpath )
# extract the flowrates from the filenames and apply units
airflows = ( ( np . array ( [ i . split ( '.' , 1 ) [ 0 ] for i in filenames ] ) ) . astype ( np . float32 ) )
# sort airflows and filenames so that they are in ascending order of flow rates
idx = np . argsort ( airflows )
airflows = ( np . array ( airflows ) [ idx ] ) * u . umole / u . s
filenames = np . array ( filenames ) [ idx ]
filepaths = [ os . path . join ( dirpath , i ) for i in filenames ]
# DO _ data is a list of numpy arrays . Thus each of the numpy data arrays can have different lengths to accommodate short and long experiments
# cycle through all of the files and extract the column of data with oxygen concentrations and the times
DO_data = [ column_of_data ( i , 0 , DO_column , - 1 , 'mg/L' ) for i in filepaths ]
time_data = [ ( column_of_time ( i , 0 , - 1 ) ) . to ( u . s ) for i in filepaths ]
aeration_collection = collections . namedtuple ( 'aeration_results' , 'filepaths airflows DO_data time_data' )
aeration_results = aeration_collection ( filepaths , airflows , DO_data , time_data )
return aeration_results
|
def _initialize_if_needed ( ) :
"""Initialize ` ` TENSOR _ SPACE _ IMPLS ` ` if not already done ."""
|
global IS_INITIALIZED , TENSOR_SPACE_IMPLS
if not IS_INITIALIZED : # pkg _ resources has long import time
from pkg_resources import iter_entry_points
for entry_point in iter_entry_points ( group = 'odl.space' , name = None ) :
try :
module = entry_point . load ( )
except ImportError :
pass
else :
TENSOR_SPACE_IMPLS . update ( module . tensor_space_impls ( ) )
IS_INITIALIZED = True
|
def bootstrap_javascript ( jquery = None ) :
"""Return HTML for Bootstrap JavaScript .
Adjust url in settings . If no url is returned , we don ' t want this
statement to return any HTML .
This is intended behavior .
Default value : ` ` None ` `
This value is configurable , see Settings section
* * Tag name * * : :
bootstrap _ javascript
* * Parameters * * :
: jquery : Truthy to include jQuery as well as Bootstrap
* * Usage * * : :
{ % bootstrap _ javascript % }
* * Example * * : :
{ % bootstrap _ javascript jquery = 1 % }"""
|
javascript = ""
# See if we have to include jQuery
if jquery is None :
jquery = get_bootstrap_setting ( "include_jquery" , False )
# NOTE : No async on scripts , not mature enough . See issue # 52 and # 56
if jquery :
url = bootstrap_jquery_url ( )
if url :
javascript += render_script_tag ( url )
url = bootstrap_javascript_url ( )
if url :
javascript += render_script_tag ( url )
return mark_safe ( javascript )
|
def get_pattern_correlations ( data ) :
"""Gets the average correlation between all bits in patterns , across the entire
dataset . Assumes input is a sparse matrix . Weighted by pattern rather than
by bit ; this is the average pairwise correlation for every pattern in the
data , and is not the average pairwise correlation for all bits that ever
cooccur . This is a subtle but important difference ."""
|
patterns = [ data . rowNonZeros ( i ) [ 0 ] for i in range ( data . nRows ( ) ) ]
dense_data = data . toDense ( )
correlations = numpy . corrcoef ( dense_data , rowvar = False )
correlations = numpy . nan_to_num ( correlations )
pattern_correlations = [ ]
for pattern in patterns :
pattern_correlations . append ( [ correlations [ i , j ] for i in pattern for j in pattern if i != j ] )
return numpy . mean ( pattern_correlations )
|
def drop_column ( self , name ) :
"""Drops a Column from the Table
: param name : The name of the column
: type name : str
: rtype : Table"""
|
name = self . _normalize_identifier ( name )
del self . _columns [ name ]
return self
|
def _create_environment ( config ) :
"""Constructor for an instance of the environment .
Args :
config : Object providing configurations via attributes .
Raises :
NotImplementedError : For action spaces other than Box and Discrete .
Returns :
Wrapped OpenAI Gym environment ."""
|
if isinstance ( config . env , str ) :
env = gym . make ( config . env )
else :
env = config . env ( )
if config . max_length :
env = tools . wrappers . LimitDuration ( env , config . max_length )
if isinstance ( env . action_space , gym . spaces . Box ) :
if config . normalize_ranges :
env = tools . wrappers . RangeNormalize ( env )
env = tools . wrappers . ClipAction ( env )
elif isinstance ( env . action_space , gym . spaces . Discrete ) :
if config . normalize_ranges :
env = tools . wrappers . RangeNormalize ( env , action = False )
else :
message = "Unsupported action space '{}'" . format ( type ( env . action_space ) )
raise NotImplementedError ( message )
env = tools . wrappers . ConvertTo32Bit ( env )
env = tools . wrappers . CacheSpaces ( env )
return env
|
def get_folders ( cls , session , mailbox_or_id ) :
"""List the folders for the mailbox .
Args :
mailbox _ or _ id ( helpscout . models . Mailbox or int ) : Mailbox or the ID
of the mailbox to get the folders for .
Returns :
RequestPaginator ( output _ type = helpscout . models . Folder ) : Folders
iterator ."""
|
if isinstance ( mailbox_or_id , Mailbox ) :
mailbox_or_id = mailbox_or_id . id
return cls ( '/mailboxes/%d/folders.json' % mailbox_or_id , session = session , out_type = Folder , )
|
def cmbuild_from_alignment ( aln , structure_string , refine = False , return_alignment = False , params = None ) :
"""Uses cmbuild to build a CM file given an alignment and structure string .
- aln : an Alignment object or something that can be used to construct
one . All sequences must be the same length .
- structure _ string : vienna structure string representing the consensus
stucture for the sequences in aln . Must be the same length as the
alignment .
- refine : refine the alignment and realign before building the cm .
( Default = False )
- return _ alignment : Return ( in Stockholm format ) alignment file used to
construct the CM file . This will either be the original alignment
and structure string passed in , or the refined alignment if - - refine
was used . ( Default = False )
- Note . This will be a string that can either be written to a file
or parsed ."""
|
aln = Alignment ( aln )
if len ( structure_string ) != aln . SeqLen :
raise ValueError , """Structure string is not same length as alignment. Structure string is %s long. Alignment is %s long.""" % ( len ( structure_string ) , aln . SeqLen )
else :
struct_dict = { 'SS_cons' : structure_string }
# Make new Cmbuild app instance .
app = Cmbuild ( InputHandler = '_input_as_paths' , WorkingDir = '/tmp' , params = params )
# turn on refine flag if True .
if refine :
_ , tmp_file = mkstemp ( dir = app . WorkingDir )
app . Parameters [ '--refine' ] . on ( tmp_file )
# Get alignment in Stockholm format
aln_file_string = stockholm_from_alignment ( aln , GC_annotation = struct_dict )
# get path to alignment filename
aln_path = app . _input_as_multiline_string ( aln_file_string )
cm_path = aln_path . split ( '.txt' ) [ 0 ] + '.cm'
app . Parameters [ '-n' ] . on ( cm_path )
filepaths = [ cm_path , aln_path ]
res = app ( filepaths )
cm_file = res [ 'CmFile' ] . read ( )
if return_alignment : # If alignment was refined , return refined alignment and structure ,
# otherwise return original alignment and structure .
if refine :
aln_file_string = res [ 'Refined' ] . read ( )
res . cleanUp ( )
return cm_file , aln_file_string
# Just return cm _ file
else :
res . cleanUp ( )
return cm_file
|
def four_element_average_dask ( d ) :
"""Average every 4 elements ( 2x2 ) in a 2D array"""
|
try :
offset = d . attrs [ 'area' ] . crop_offset
except ( KeyError , AttributeError ) :
offset = ( 0 , 0 )
res = d . data . map_blocks ( _mean4 , offset = offset , dtype = d . dtype )
return xr . DataArray ( res , attrs = d . attrs , dims = d . dims , coords = d . coords )
|
def simxReadDistance ( clientID , distanceObjectHandle , operationMode ) :
'''Please have a look at the function description / documentation in the V - REP user manual'''
|
minimumDistance = ct . c_float ( )
return c_ReadDistance ( clientID , distanceObjectHandle , ct . byref ( minimumDistance ) , operationMode ) , minimumDistance . value
|
def fit ( self , frequency , recency , n_periods , weights = None , initial_params = None , verbose = False , tol = 1e-7 , index = None , ** kwargs ) :
"""Fit the BG / BB model .
Parameters
frequency : array _ like
Total periods with observed transactions
recency : array _ like
Period of most recent transaction
n _ periods : array _ like
Number of transaction opportunities . Previously called ` n ` .
weights : None or array _ like
Number of customers with given frequency / recency / T ,
defaults to 1 if not specified . Fader and
Hardie condense the individual RFM matrix into all
observed combinations of frequency / recency / T . This
parameter represents the count of customers with a given
purchase pattern . Instead of calculating individual
log - likelihood , the log - likelihood is calculated for each
pattern and multiplied by the number of customers with
that pattern . Previously called ` n _ custs ` .
verbose : boolean , optional
Set to true to print out convergence diagnostics .
tol : float , optional
Tolerance for termination of the function minimization process .
index : array _ like , optional
Index for resulted DataFrame which is accessible via self . data
kwargs :
Key word arguments to pass to the scipy . optimize . minimize
function as options dict
Returns
BetaGeoBetaBinomFitter
fitted and with parameters estimated"""
|
frequency = np . asarray ( frequency ) . astype ( int )
recency = np . asarray ( recency ) . astype ( int )
n_periods = np . asarray ( n_periods ) . astype ( int )
if weights is None :
weights = np . ones_like ( recency )
else :
weights = np . asarray ( weights )
_check_inputs ( frequency , recency , n_periods )
log_params_ , self . _negative_log_likelihood_ , self . _hessian_ = self . _fit ( ( frequency , recency , n_periods , weights , self . penalizer_coef ) , initial_params , 4 , verbose , tol , ** kwargs )
self . params_ = pd . Series ( np . exp ( log_params_ ) , index = [ "alpha" , "beta" , "gamma" , "delta" ] )
self . data = DataFrame ( { "frequency" : frequency , "recency" : recency , "n_periods" : n_periods , "weights" : weights } , index = index )
self . generate_new_data = lambda size = 1 : beta_geometric_beta_binom_model ( # Making a large array replicating n by n _ custs having n .
np . array ( sum ( [ n_ ] * n_cust for ( n_ , n_cust ) in zip ( n_periods , weights ) ) ) , * self . _unload_params ( "alpha" , "beta" , "gamma" , "delta" ) , size = size )
self . variance_matrix_ = self . _compute_variance_matrix ( )
self . standard_errors_ = self . _compute_standard_errors ( )
self . confidence_intervals_ = self . _compute_confidence_intervals ( )
return self
|
def serialize_unix ( attr , ** kwargs ) :
"""Serialize Datetime object into IntTime format .
This is represented as seconds .
: param Datetime attr : Object to be serialized .
: rtype : int
: raises : SerializationError if format invalid"""
|
if isinstance ( attr , int ) :
return attr
try :
if not attr . tzinfo :
_LOGGER . warning ( "Datetime with no tzinfo will be considered UTC." )
return int ( calendar . timegm ( attr . utctimetuple ( ) ) )
except AttributeError :
raise TypeError ( "Unix time object must be valid Datetime object." )
|
def get_connection_status ( self , connection_id ) :
"""Get status of the connection during Role enforcement ."""
|
with self . _connections_lock :
try :
connection_info = self . _connections [ connection_id ]
return connection_info . status
except KeyError :
return None
|
def eval_string ( self , s ) :
"""Returns the tristate value of the expression ' s ' , represented as 0 , 1,
and 2 for n , m , and y , respectively . Raises KconfigError if syntax
errors are detected in ' s ' . Warns if undefined symbols are referenced .
As an example , if FOO and BAR are tristate symbols at least one of
which has the value y , then config . eval _ string ( " y & & ( FOO | | BAR ) " )
returns 2 ( y ) .
To get the string value of non - bool / tristate symbols , use
Symbol . str _ value . eval _ string ( ) always returns a tristate value , and
all non - bool / tristate symbols have the tristate value 0 ( n ) .
The expression parsing is consistent with how parsing works for
conditional ( ' if . . . ' ) expressions in the configuration , and matches
the C implementation . m is rewritten to ' m & & MODULES ' , so
eval _ string ( " m " ) will return 0 ( n ) unless modules are enabled ."""
|
# The parser is optimized to be fast when parsing Kconfig files ( where
# an expression can never appear at the beginning of a line ) . We have
# to monkey - patch things a bit here to reuse it .
self . _filename = None
# Don ' t include the " if " from below to avoid giving confusing error
# messages
self . _line = s
self . _tokens = self . _tokenize ( "if " + s )
self . _tokens_i = 1
# Skip the ' if ' token
return expr_value ( self . _expect_expr_and_eol ( ) )
|
def require_flush ( fun ) :
"""Decorator for methods that need to query security .
It ensures all security related operations are flushed to DB , but
avoids unneeded flushes ."""
|
@ wraps ( fun )
def ensure_flushed ( service , * args , ** kwargs ) :
if service . app_state . needs_db_flush :
session = db . session ( )
if not session . _flushing and any ( isinstance ( m , ( RoleAssignment , SecurityAudit ) ) for models in ( session . new , session . dirty , session . deleted ) for m in models ) :
session . flush ( )
service . app_state . needs_db_flush = False
return fun ( service , * args , ** kwargs )
return ensure_flushed
|
def get ( self , key , default = None , remote = False ) :
"""Overrides dictionary get behavior to retrieve database objects with
support for returning a default . If remote = True then a remote
request is made to retrieve the database from the remote server ,
otherwise the client ' s locally cached database object is returned .
: param str key : Database name used to retrieve the database object .
: param str default : Default database name . Defaults to None .
: param bool remote : Dictates whether the locally cached
database is returned or a remote request is made to retrieve
the database from the server . Defaults to False .
: returns : Database object"""
|
if not remote :
return super ( CouchDB , self ) . get ( key , default )
db = self . _DATABASE_CLASS ( self , key )
if db . exists ( ) :
super ( CouchDB , self ) . __setitem__ ( key , db )
return db
return default
|
def import_from_string ( value ) :
"""Copy of rest _ framework . settings . import _ from _ string"""
|
value = value . replace ( '-' , '_' )
try :
module_path , class_name = value . rsplit ( '.' , 1 )
module = import_module ( module_path )
return getattr ( module , class_name )
except ( ImportError , AttributeError ) as ex :
raise ImportError ( "Could not import '{}'. {}: {}." . format ( value , ex . __class__ . __name__ , ex ) )
|
def assignrepr_values ( values , prefix , width = None , _fakeend = 0 ) :
"""Return a prefixed , wrapped and properly aligned string representation
of the given values using function | repr | .
> > > from hydpy . core . objecttools import assignrepr _ values
> > > print ( assignrepr _ values ( range ( 1 , 13 ) , ' test ( ' , 20 ) + ' ) ' )
test ( 1 , 2 , 3 , 4 , 5,
6 , 7 , 8 , 9 , 10,
11 , 12)
If no width is given , no wrapping is performed :
> > > print ( assignrepr _ values ( range ( 1 , 13 ) , ' test ( ' ) + ' ) ' )
test ( 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12)
To circumvent defining too long string representations , make use of the
ellipsis option :
> > > from hydpy import pub
> > > with pub . options . ellipsis ( 1 ) :
. . . print ( assignrepr _ values ( range ( 1 , 13 ) , ' test ( ' , 20 ) + ' ) ' )
test ( 1 , . . . , 12)
> > > with pub . options . ellipsis ( 5 ) :
. . . print ( assignrepr _ values ( range ( 1 , 13 ) , ' test ( ' , 20 ) + ' ) ' )
test ( 1 , 2 , 3 , 4 , 5,
. . . , 8 , 9 , 10,
11 , 12)
> > > with pub . options . ellipsis ( 6 ) :
. . . print ( assignrepr _ values ( range ( 1 , 13 ) , ' test ( ' , 20 ) + ' ) ' )
test ( 1 , 2 , 3 , 4 , 5,
6 , 7 , 8 , 9 , 10,
11 , 12)"""
|
ellipsis_ = hydpy . pub . options . ellipsis
if ( ellipsis_ > 0 ) and ( len ( values ) > 2 * ellipsis_ ) :
string = ( repr_values ( values [ : ellipsis_ ] ) + ', ...,' + repr_values ( values [ - ellipsis_ : ] ) )
else :
string = repr_values ( values )
blanks = ' ' * len ( prefix )
if width is None :
wrapped = [ string ]
_fakeend = 0
else :
width -= len ( prefix )
wrapped = textwrap . wrap ( string + '_' * _fakeend , width )
if not wrapped :
wrapped = [ '' ]
lines = [ ]
for ( idx , line ) in enumerate ( wrapped ) :
if idx == 0 :
lines . append ( '%s%s' % ( prefix , line ) )
else :
lines . append ( '%s%s' % ( blanks , line ) )
string = '\n' . join ( lines )
return string [ : len ( string ) - _fakeend ]
|
def download_contents ( self , to_folder = None ) :
"""This will download each file and folder sequentially .
Caution when downloading big folder structures
: param drive . Folder to _ folder : folder where to store the contents"""
|
to_folder = to_folder or Path ( )
if not to_folder . exists ( ) :
to_folder . mkdir ( )
for item in self . get_items ( query = self . new_query ( ) . select ( 'id' , 'size' ) ) :
if item . is_folder and item . child_count > 0 :
item . download_contents ( to_folder = to_folder / item . name )
else :
item . download ( to_folder )
|
def filter_slaves ( self , slaves ) :
"Remove slaves that are in an ODOWN or SDOWN state"
|
slaves_alive = [ ]
for slave in slaves :
if slave [ 'is_odown' ] or slave [ 'is_sdown' ] :
continue
slaves_alive . append ( ( slave [ 'ip' ] , slave [ 'port' ] ) )
return slaves_alive
|
def hdrval ( cls ) :
"""Construct dictionary mapping display column title to
IterationStats entries ."""
|
hdrmap = { 'Itn' : 'Iter' }
hdrmap . update ( cls . hdrval_objfun )
hdrmap . update ( { 'r' : 'PrimalRsdl' , 's' : 'DualRsdl' , u ( 'ρ' ) : 'Rho' } )
return hdrmap
|
def add_ssh_options ( self , parser ) :
"""Add the SSH arguments to the ` parser ` ."""
|
parser . add_argument ( "--username" , metavar = 'USER' , help = ( "Username for the SSH connection." ) )
parser . add_argument ( "--boot-only" , action = "store_true" , help = ( "Only use the IP addresses on the machine's boot interface." ) )
|
def require_request_model ( cls , validate = True ) :
"""Makes a handler require that a request body that map towards the given
model is provided . Unless the ` ` validate ` ` option is set to ` ` False ` ` the
data will be validated against the model ' s fields .
The model will be passed to the handler as the last positional argument . : :
@ require _ request _ model ( Model )
async def handle _ model ( request , model ) :
return 200 , model"""
|
def decorator ( handler ) :
async def new_handler ( request , * args , ** kwargs ) :
body = await request . json ( )
model = cls ( ** body )
if validate :
model . validate ( )
return await handler ( request , model , * args , ** kwargs )
return new_handler
return decorator
|
def sort ( records : Sequence [ Record ] ) -> List [ Record ] :
"Sort records into a canonical order , suitable for comparison ."
|
return sorted ( records , key = _record_key )
|
def _ReadPropertySet ( self , property_set ) :
"""Reads properties from a property set .
Args :
property _ set ( pyolecf . property _ set ) : OLECF property set ."""
|
# Combine the values of multiple property sections
# but do not override properties that are already set .
for property_section in property_set . sections :
if property_section . class_identifier != self . _CLASS_IDENTIFIER :
continue
for property_value in property_section . properties :
property_name = self . _PROPERTY_NAMES . get ( property_value . identifier , None )
if not property_name :
property_name = '0x{0:04}' . format ( property_value . identifier )
value = self . _GetValueAsObject ( property_value )
if self . _PROPERTY_VALUE_MAPPINGS :
value_callback_name = self . _PROPERTY_VALUE_MAPPINGS . get ( property_name , None )
if value_callback_name :
value_callback_method = getattr ( self , value_callback_name , None )
if value_callback_method :
value = value_callback_method ( value )
if property_name in self . _DATE_TIME_PROPERTIES :
properties_dict = self . date_time_properties
value = dfdatetime_filetime . Filetime ( timestamp = value )
else :
properties_dict = self . _properties
if property_name not in properties_dict :
properties_dict [ property_name ] = value
|
def dict_to_attributes_code ( dict_ ) :
"""Given a nested dict , generate a python code equivalent .
Example :
> > > d = { ' foo ' : ' bah ' , ' colors ' : { ' red ' : 1 , ' blue ' : 2 } }
> > > print dict _ to _ attributes _ code ( d )
foo = ' bah '
colors . red = 1
colors . blue = 2
Returns :
str ."""
|
lines = [ ]
for key , value in dict_ . iteritems ( ) :
if isinstance ( value , dict ) :
txt = dict_to_attributes_code ( value )
lines_ = txt . split ( '\n' )
for line in lines_ :
if not line . startswith ( ' ' ) :
line = "%s.%s" % ( key , line )
lines . append ( line )
else :
value_txt = pformat ( value )
if '\n' in value_txt :
lines . append ( "%s = \\" % key )
value_txt = indent ( value_txt )
lines . extend ( value_txt . split ( '\n' ) )
else :
line = "%s = %s" % ( key , value_txt )
lines . append ( line )
return '\n' . join ( lines )
|
def get_primary_text ( self , item_url ) :
"""Retrieve the primary text for the given item from the cache .
: type item _ url : String or Item
: param item _ url : the URL of the item , or an Item object
: rtype : String
: returns : the primary text
: raises : ValueError if the primary text is not in the cache"""
|
c = self . conn . cursor ( )
c . execute ( "SELECT * FROM primary_texts WHERE item_url=?" , ( str ( item_url ) , ) )
row = c . fetchone ( )
c . close ( )
if row is None :
raise ValueError ( "Item not present in cache" )
return row [ 1 ]
|
def change_event_type ( self ) :
"""Action : change highlighted event ' s type by cycling through event
type list ."""
|
if self . current_event is None :
return
hl_params = self . highlight . params
self . scene . removeItem ( self . highlight )
ev = self . current_event
new_name = self . parent . notes . change_event_type ( name = ev [ 'name' ] , time = ( ev [ 'start' ] , ev [ 'end' ] ) , chan = ev [ 'chan' ] )
msg = "Event from {} to {} changed type from '{}' to '{}'" . format ( ev [ 'start' ] , ev [ 'end' ] , ev [ 'name' ] , new_name )
ev [ 'name' ] = new_name
self . current_event = ev
self . current_etype = new_name
# self . event _ sel = True
self . parent . notes . idx_eventtype . setCurrentText ( new_name )
self . parent . statusBar ( ) . showMessage ( msg )
self . display_annotations ( )
self . highlight = RectMarker ( * hl_params )
self . scene . addItem ( self . highlight )
|
def export ( self ) :
"""Returns a dictionary with all album information .
Use the : meth : ` from _ export ` method to recreate the
: class : ` Album ` object ."""
|
return { 'id' : self . id , 'name' : self . name , 'artist' : self . _artist_name , 'artist_id' : self . _artist_id , 'cover' : self . _cover_url }
|
def parse_segment ( text , version = None , encoding_chars = None , validation_level = None , reference = None ) :
"""Parse the given ER7 - encoded segment and return an instance of : class : ` Segment < hl7apy . core . Segment > ` .
: type text : ` ` str ` `
: param text : the ER7 - encoded string containing the segment to be parsed
: type version : ` ` str ` `
: param version : the HL7 version ( e . g . " 2.5 " ) , or ` ` None ` ` to use the default
( see : func : ` set _ default _ version < hl7apy . set _ default _ version > ` )
: type encoding _ chars : ` ` dict ` `
: param encoding _ chars : a dictionary containing the encoding chars or None to use the default
( see : func : ` set _ default _ encoding _ chars < hl7apy . set _ default _ encoding _ chars > ` )
: type validation _ level : ` ` int ` `
: param validation _ level : the validation level . Possible values are those defined in
: class : ` VALIDATION _ LEVEL < hl7apy . consts . VALIDATION _ LEVEL > ` class or ` ` None ` ` to use the default
validation level ( see : func : ` set _ default _ validation _ level < hl7apy . set _ default _ validation _ level > ` )
: type reference : ` ` dict ` `
: param reference : a dictionary containing the element structure returned by
: func : ` load _ reference < hl7apy . load _ reference > ` , : func : ` find _ reference < hl7apy . find _ reference > ` or
belonging to a message profile
: return : an instance of : class : ` Segment < hl7apy . core . Segment > `
> > > segment = " EVN | | 20080115153000 | | | | 20080114003000"
> > > s = parse _ segment ( segment )
> > > print ( s )
< Segment EVN >
> > > print ( s . to _ er7 ( ) )
EVN | | 20080115153000 | | | | 20080114003000"""
|
version = _get_version ( version )
encoding_chars = _get_encoding_chars ( encoding_chars , version )
validation_level = _get_validation_level ( validation_level )
segment_name = text [ : 3 ]
text = text [ 4 : ] if segment_name != 'MSH' else text [ 3 : ]
segment = Segment ( segment_name , version = version , validation_level = validation_level , reference = reference )
segment . children = parse_fields ( text , segment_name , version , encoding_chars , validation_level , segment . structure_by_name , segment . allow_infinite_children )
return segment
|
def load_jsonf ( fpath , encoding ) :
""": param unicode fpath :
: param unicode encoding :
: rtype : dict | list"""
|
with codecs . open ( fpath , encoding = encoding ) as f :
return json . load ( f )
|
def _writen ( fd , data ) :
"""Write all the data to a descriptor ."""
|
while data :
n = os . write ( fd , data )
data = data [ n : ]
|
def AsociarLiquidacionSecundariaAContrato ( self , coe = None , nro_contrato = None , cuit_comprador = None , cuit_vendedor = None , cuit_corredor = None , cod_grano = None , ** kwargs ) :
"Asociar una Liquidación a un contrato"
|
ret = self . client . lsgAsociarAContrato ( auth = { 'token' : self . Token , 'sign' : self . Sign , 'cuit' : self . Cuit , } , coe = coe , nroContrato = nro_contrato , cuitComprador = cuit_comprador , cuitVendedor = cuit_vendedor , cuitCorredor = cuit_corredor , codGrano = cod_grano , )
ret = ret [ 'oReturn' ]
self . __analizar_errores ( ret )
if 'liquidacion' in ret : # analizo la respusta
liq = ret [ 'liquidacion' ]
aut = ret [ 'autorizacion' ]
self . AnalizarLiquidacion ( aut , liq )
return True
|
def list_append ( self , key , value , create = False , ** kwargs ) :
"""Add an item to the end of a list .
: param str key : The document ID of the list
: param value : The value to append
: param create : Whether the list should be created if it does not
exist . Note that this option only works on servers > = 4.6
: param kwargs : Additional arguments to : meth : ` mutate _ in `
: return : : class : ` ~ . OperationResult ` .
: raise : : cb _ exc : ` NotFoundError ` if the document does not exist .
and ` create ` was not specified .
example : :
cb . list _ append ( ' a _ list ' , ' hello ' )
cb . list _ append ( ' a _ list ' , ' world ' )
. . seealso : : : meth : ` map _ add `"""
|
op = SD . array_append ( '' , value )
sdres = self . mutate_in ( key , op , ** kwargs )
return self . _wrap_dsop ( sdres )
|
def set_settings_env ( executable_folder = None ) :
"""Add all application folders
: param executable _ folder : the folder that contains local and external _ app _ repos
: return :"""
|
executable_folder = executable_folder or get_executable_folder ( )
# print " ! ! ! ! ! executable : " , executable _ folder
if os . path . exists ( os . path . join ( executable_folder , "local/total_settings.py" ) ) :
print ( "Using total settings" )
os . chdir ( executable_folder )
os . environ [ "DJANGO_SETTINGS_MODULE" ] = "local.total_settings"
os . environ [ "STATIC_ROOT" ] = os . path . join ( executable_folder , "static" )
os . environ [ "MEDIA_ROOT" ] = os . path . join ( executable_folder , "media" )
else :
os . environ . setdefault ( 'ROOT_DIR' , get_folder ( get_inspection_frame ( 2 ) ) )
os . environ [ "DJANGO_SETTINGS_MODULE" ] = "djangoautoconf.base_settings"
|
def server ( self , value ) :
"""Set the connection ' s server property .
Args :
value : New server . String .
Returns :
Nothing ."""
|
self . _server = value
self . _connectionXML . set ( 'server' , value )
|
def location ( self ) -> Optional [ PostLocation ] :
"""If the Post has a location , returns PostLocation namedtuple with fields ' id ' , ' lat ' and ' lng ' and ' name ' ."""
|
loc = self . _field ( "location" )
if self . _location or not loc :
return self . _location
location_id = int ( loc [ 'id' ] )
if any ( k not in loc for k in ( 'name' , 'slug' , 'has_public_page' , 'lat' , 'lng' ) ) :
loc = self . _context . get_json ( "explore/locations/{0}/" . format ( location_id ) , params = { '__a' : 1 } ) [ 'graphql' ] [ 'location' ]
self . _location = PostLocation ( location_id , loc [ 'name' ] , loc [ 'slug' ] , loc [ 'has_public_page' ] , loc [ 'lat' ] , loc [ 'lng' ] )
return self . _location
|
def fetch ( self , is_dl_forced = False ) :
""": param is _ dl _ forced :
: return :"""
|
self . get_files ( is_dl_forced )
ncbi = NCBIGene ( self . graph_type , self . are_bnodes_skized )
# ncbi . fetch ( )
gene_group = ncbi . files [ 'gene_group' ]
self . fetch_from_url ( gene_group [ 'url' ] , '/' . join ( ( ncbi . rawdir , gene_group [ 'file' ] ) ) , False )
# load and tag a list of OMIM IDs with types
# side effect of populating omim replaced
self . omim_type = self . find_omim_type ( )
return
|
def get_prefixed_config ( self , section , option , ** kwargs ) :
"""TODO ."""
|
cfg = Config . instance ( )
default = cfg . get_expanded ( section , option , ** kwargs )
return cfg . get_expanded ( section , "{}_{}" . format ( self . workflow_type , option ) , default = default , ** kwargs )
|
def disconnect ( self , callback : Callable ) -> None :
"""Disconnects the given callback .
The callback will no longer receive events from this signal .
No action is taken if the callback is not on the list of listener callbacks .
: param callback : the callable to remove"""
|
assert check_argument_types ( )
try :
if self . listeners is not None :
self . listeners . remove ( callback )
except ValueError :
pass
|
def set_header ( self , name , value , append = False ) :
'''Create a new response header , replacing any previously defined
headers with the same name .'''
|
if append :
self . add_header ( name , value )
else :
self . _headers [ _hkey ( name ) ] = [ str ( value ) ]
|
def random ( self ) :
"""Draws a new value for a stoch conditional on its parents
and returns it .
Raises an error if no ' random ' argument was passed to _ _ init _ _ ."""
|
if self . _random : # Get current values of parents for use as arguments for _ random ( )
r = self . _random ( ** self . parents . value )
else :
raise AttributeError ( 'Stochastic ' + self . __name__ + ' does not know how to draw its value, see documentation' )
if self . shape :
r = np . reshape ( r , self . shape )
# Set Stochastic ' s value to drawn value
if not self . observed :
self . value = r
return r
|
def create_log ( self , log , scope_identifier , hub_name , plan_id ) :
"""CreateLog .
: param : class : ` < TaskLog > < azure . devops . v5_0 . task . models . TaskLog > ` log :
: param str scope _ identifier : The project GUID to scope the request
: param str hub _ name : The name of the server hub : " build " for the Build server or " rm " for the Release Management server
: param str plan _ id :
: rtype : : class : ` < TaskLog > < azure . devops . v5_0 . task . models . TaskLog > `"""
|
route_values = { }
if scope_identifier is not None :
route_values [ 'scopeIdentifier' ] = self . _serialize . url ( 'scope_identifier' , scope_identifier , 'str' )
if hub_name is not None :
route_values [ 'hubName' ] = self . _serialize . url ( 'hub_name' , hub_name , 'str' )
if plan_id is not None :
route_values [ 'planId' ] = self . _serialize . url ( 'plan_id' , plan_id , 'str' )
content = self . _serialize . body ( log , 'TaskLog' )
response = self . _send ( http_method = 'POST' , location_id = '46f5667d-263a-4684-91b1-dff7fdcf64e2' , version = '5.0' , route_values = route_values , content = content )
return self . _deserialize ( 'TaskLog' , response )
|
def load_xml ( self , xmlfile , ** kwargs ) :
"""Load sources from an XML file ."""
|
extdir = kwargs . get ( 'extdir' , self . extdir )
coordsys = kwargs . get ( 'coordsys' , 'CEL' )
if not os . path . isfile ( xmlfile ) :
xmlfile = os . path . join ( fermipy . PACKAGE_DATA , 'catalogs' , xmlfile )
root = ElementTree . ElementTree ( file = xmlfile ) . getroot ( )
diffuse_srcs = [ ]
srcs = [ ]
ra , dec = [ ] , [ ]
for s in root . findall ( 'source' ) :
src = Source . create_from_xml ( s , extdir = extdir )
if src . diffuse :
diffuse_srcs += [ src ]
else :
srcs += [ src ]
ra += [ src [ 'RAJ2000' ] ]
dec += [ src [ 'DEJ2000' ] ]
src_skydir = SkyCoord ( ra = np . array ( ra ) * u . deg , dec = np . array ( dec ) * u . deg )
radec = np . vstack ( ( src_skydir . ra . deg , src_skydir . dec . deg ) ) . T
glonlat = np . vstack ( ( src_skydir . galactic . l . deg , src_skydir . galactic . b . deg ) ) . T
offset = self . skydir . separation ( src_skydir ) . deg
offset_cel = wcs_utils . sky_to_offset ( self . skydir , radec [ : , 0 ] , radec [ : , 1 ] , 'CEL' )
offset_gal = wcs_utils . sky_to_offset ( self . skydir , glonlat [ : , 0 ] , glonlat [ : , 1 ] , 'GAL' )
m0 = get_skydir_distance_mask ( src_skydir , self . skydir , self . config [ 'src_radius' ] )
m1 = get_skydir_distance_mask ( src_skydir , self . skydir , self . config [ 'src_radius_roi' ] , square = True , coordsys = coordsys )
m = ( m0 & m1 )
srcs = np . array ( srcs ) [ m ]
for i , s in enumerate ( srcs ) :
s . data [ 'offset' ] = offset [ m ] [ i ]
s . data [ 'offset_ra' ] = offset_cel [ : , 0 ] [ m ] [ i ]
s . data [ 'offset_dec' ] = offset_cel [ : , 1 ] [ m ] [ i ]
s . data [ 'offset_glon' ] = offset_gal [ : , 0 ] [ m ] [ i ]
s . data [ 'offset_glat' ] = offset_gal [ : , 1 ] [ m ] [ i ]
self . load_source ( s , False , merge_sources = self . config [ 'merge_sources' ] )
for i , s in enumerate ( diffuse_srcs ) :
self . load_source ( s , False , merge_sources = self . config [ 'merge_sources' ] )
self . _build_src_index ( )
return srcs
|
def get_columns ( self , table_name , column_name = None , after = None , timeout = None ) :
"""RETURN METADATA COLUMNS
: param table _ name : TABLE WE WANT COLUMNS FOR
: param column _ name : OPTIONAL NAME , IF INTERESTED IN ONLY ONE COLUMN
: param after : FORCE LOAD , WAITING FOR last _ updated TO BE AFTER THIS TIME
: param timeout : Signal ; True when should give up
: return :"""
|
DEBUG and after and Log . note ( "getting columns for after {{time}}" , time = after )
table_path = split_field ( table_name )
root_table_name = table_path [ 0 ]
alias = self . _find_alias ( root_table_name )
if not alias :
self . es_cluster . get_metadata ( force = True )
alias = self . _find_alias ( root_table_name )
if not alias :
Log . error ( "{{table|quote}} does not exist" , table = table_name )
try :
table = self . get_table ( alias ) [ 0 ]
# LAST TIME WE GOT INFO FOR THIS TABLE
if not table :
table = TableDesc ( name = alias , url = None , query_path = [ "." ] , timestamp = Date . MIN )
with self . meta . tables . locker :
self . meta . tables . add ( table )
columns = self . _reload_columns ( table )
DEBUG and Log . note ( "columns from reload" )
elif after or table . timestamp < self . es_cluster . metatdata_last_updated :
columns = self . _reload_columns ( table )
DEBUG and Log . note ( "columns from reload" )
else :
columns = self . meta . columns . find ( alias , column_name )
DEBUG and Log . note ( "columns from find()" )
DEBUG and Log . note ( "columns are {{ids}}" , ids = [ id ( c ) for c in columns ] )
columns = jx . sort ( columns , "name" )
if after is None :
return columns
# DO NOT WAIT FOR COMPLETE COLUMNS
# WAIT FOR THE COLUMNS TO UPDATE
while True :
pending = [ c for c in columns if after >= c . last_updated or ( c . cardinality == None and c . jx_type not in STRUCT ) ]
if not pending :
break
if timeout :
Log . error ( "trying to gets columns timed out" )
if DEBUG :
if len ( pending ) > 10 :
Log . note ( "waiting for {{num}} columns to update by {{timestamp}}" , num = len ( pending ) , timestamp = after )
else :
Log . note ( "waiting for columns to update by {{timestamp}}; {{columns|json}}" , timestamp = after , columns = [ c . es_index + "." + c . es_column + " id=" + text_type ( id ( c ) ) for c in pending ] )
Till ( seconds = 1 ) . wait ( )
return columns
except Exception as e :
Log . error ( "Failure to get columns for {{table}}" , table = table_name , cause = e )
return [ ]
|
def get_coord_system_name ( header ) :
"""Return an appropriate key code for the axes coordinate system by
examining the FITS header ."""
|
try :
ctype = header [ 'CTYPE1' ] . strip ( ) . upper ( )
except KeyError :
try : # see if we have an " RA " header
ra = header [ 'RA' ]
# noqa
try :
equinox = float ( header [ 'EQUINOX' ] )
if equinox < 1984.0 :
radecsys = 'FK4'
else :
radecsys = 'FK5'
except KeyError :
radecsys = 'ICRS'
return radecsys . lower ( )
except KeyError :
return 'raw'
match = re . match ( r'^GLON\-.*$' , ctype )
if match :
return 'galactic'
match = re . match ( r'^ELON\-.*$' , ctype )
if match :
return 'ecliptic'
match = re . match ( r'^RA\-\-\-.*$' , ctype )
if match :
hdkey = 'RADECSYS'
try :
radecsys = header [ hdkey ]
except KeyError :
try :
hdkey = 'RADESYS'
radecsys = header [ hdkey ]
except KeyError : # missing keyword
# RADESYS defaults to IRCS unless EQUINOX is given
# alone , in which case it defaults to FK4 prior to 1984
# and FK5 after 1984.
try :
equinox = float ( header [ 'EQUINOX' ] )
if equinox < 1984.0 :
radecsys = 'FK4'
else :
radecsys = 'FK5'
except KeyError :
radecsys = 'ICRS'
radecsys = radecsys . strip ( )
return radecsys . lower ( )
match = re . match ( r'^HPLN\-.*$' , ctype )
if match :
return 'helioprojective'
match = re . match ( r'^HGLT\-.*$' , ctype )
if match :
return 'heliographicstonyhurst'
match = re . match ( r'^PIXEL$' , ctype )
if match :
return 'pixel'
match = re . match ( r'^LINEAR$' , ctype )
if match :
return 'pixel'
# raise WCSError ( " Cannot determine appropriate coordinate system from FITS header " ) # noqa
return 'icrs'
|
def console_print_ex ( con : tcod . console . Console , x : int , y : int , flag : int , alignment : int , fmt : str , ) -> None :
"""Print a string on a console using a blend mode and alignment mode .
Args :
con ( Console ) : Any Console instance .
x ( int ) : Character x position from the left .
y ( int ) : Character y position from the top .
. . deprecated : : 8.5
Use : any : ` Console . print _ ` instead ."""
|
lib . TCOD_console_printf_ex ( _console ( con ) , x , y , flag , alignment , _fmt ( fmt ) )
|
def set_country ( self , country ) :
"""This sets the country for detecting license plates . For example ,
setting country to " us " for United States or " eu " for Europe .
: param country : A unicode / ascii string ( Python 2/3 ) or bytes array ( Python 3)
: return : None"""
|
country = _convert_to_charp ( country )
self . _set_country_func ( self . alpr_pointer , country )
|
def _unescape ( v ) :
"""Unescape characters in a TOML string ."""
|
i = 0
backslash = False
while i < len ( v ) :
if backslash :
backslash = False
if v [ i ] in _escapes :
v = v [ : i - 1 ] + _escape_to_escapedchars [ v [ i ] ] + v [ i + 1 : ]
elif v [ i ] == '\\' :
v = v [ : i - 1 ] + v [ i : ]
elif v [ i ] == 'u' or v [ i ] == 'U' :
i += 1
else :
raise TomlDecodeError ( "Reserved escape sequence used" )
continue
elif v [ i ] == '\\' :
backslash = True
i += 1
return v
|
def recharge ( self , param , must = [ APIKEY , MOBILE , SN ] ) :
'''充值流量
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
mobile String 是 接收的手机号 ( 仅支持大陆号码 ) 15205201314
sn String 是 流量包的唯一ID 点击查看 1008601
callback _ url String 否 本条流量充值的状态报告推送地址 http : / / your _ receive _ url _ address
encrypt String 否 加密方式 使用加密 tea ( 不再使用 )
_ sign String 否 签名字段 参考使用加密 393d079e0a00912335adfe46f4a2e10f ( 不再使用 )
Args :
param :
Results :
Result'''
|
r = self . verify_param ( param , must )
if not r . is_succ ( ) :
return r
h = CommonResultHandler ( lambda rsp : { VERSION_V1 : rsp [ RESULT ] if RESULT in rsp else None , VERSION_V2 : rsp } [ self . version ( ) ] )
return self . path ( 'recharge.json' ) . post ( param , h , r )
|
def assert_no_pending ( self , target_rule = None ) :
'''Raises a : class : ` PendingRequestsLeftException ` error if server has target rule
non - resolved .
When target _ rule argument is ommitted raises if server has any pending
expectations .
Useful in ` ` tearDown ( ) ` ` test method to verify that test had correct expectations
: type target _ rule : Rule
: param target _ rule : will raise if this rule is left pending
: raises : : class : ` PendingRequestsLeftException `'''
|
if target_rule :
if target_rule in self . _rules :
raise PendingRequestsLeftException ( )
elif self . _rules :
raise PendingRequestsLeftException ( )
|
def clean ( self ) :
"""Cleans the input values of this configuration object .
Fields that have gotten updated through properties are converted to configuration values that match the
format needed by functions using them . For example , for list - like values it means that input of single strings
is transformed into a single - entry list . If this conversion fails , a ` ` ValueError ` ` is raised ."""
|
all_props = self . __class__ . CONFIG_PROPERTIES
for prop_name in self . _modified :
attr_config = all_props . get ( prop_name )
if attr_config and attr_config . input_func :
self . _config [ prop_name ] = attr_config . input_func ( self . _config [ prop_name ] )
self . _modified . clear ( )
|
def get_urls ( self ) :
"""Add a delete - translation view ."""
|
urlpatterns = super ( TranslatableAdmin , self ) . get_urls ( )
if not self . _has_translatable_model ( ) :
return urlpatterns
else :
opts = self . model . _meta
info = opts . app_label , opts . model_name
return [ url ( r'^(.+)/change/delete-translation/(.+)/$' , self . admin_site . admin_view ( self . delete_translation ) , name = '{0}_{1}_delete_translation' . format ( * info ) ) ] + urlpatterns
|
def ask_stories ( self , raw = False , limit = None ) :
"""Returns list of item ids of latest Ask HN stories
Args :
limit ( int ) : specifies the number of stories to be returned .
raw ( bool ) : Flag to indicate whether to transform all
objects into raw json .
Returns :
` list ` object containing ids of Ask HN stories ."""
|
ask_stories = self . _get_stories ( 'askstories' , limit )
if raw :
ask_stories = [ story . raw for story in ask_stories ]
return ask_stories
|
def cumulative_value ( self , slip , mmax , mag_value , bbar , dbar , beta ) :
'''Returns the rate of events with M > mag _ value
: param float slip :
Slip rate in mm / yr
: param float mmax :
Maximum magnitude
: param float mag _ value :
Magnitude value
: param float bbar :
\b ar { b } parameter ( effectively = b * log ( 10 . ) )
: param float dbar :
\b ar { d } parameter
: param float beta :
Beta value of formula defined in Eq . 20 of Anderson & Luco ( 1983)'''
|
delta_m = mmax - mag_value
a_3 = self . _get_a3_value ( bbar , dbar , slip / 10. , beta , mmax )
central_term = np . exp ( bbar * delta_m ) - 1.0 - ( bbar * delta_m )
return a_3 * central_term * ( delta_m > 0.0 )
|
def decode_file_args ( self , argv : List [ str ] ) -> List [ str ] :
"""Preprocess a configuration file . The location of the configuration file is stored in the parser so that the
FileOrURI action can add relative locations .
: param argv : raw options list
: return : options list with ' - - conf ' references replaced with file contents"""
|
for i in range ( 0 , len ( argv ) - 1 ) : # TODO : take prefix into account
if argv [ i ] == '--conf' :
del argv [ i ]
conf_file = argv [ i ]
del ( argv [ i ] )
with open ( conf_file ) as config_file :
conf_args = shlex . split ( config_file . read ( ) )
# We take advantage of a poential bug in the parser where you can say " foo - u 1 - u 2 " and get
# 2 as a result
argv = self . fix_rel_paths ( conf_args , conf_file ) + argv
return self . decode_file_args ( argv )
return argv
|
def on_mouse_motion ( x , y , dx , dy ) :
"""当鼠标没有按下时移动的时候触发"""
|
mouse . x , mouse . y = x , y
mouse . move ( )
window . update_caption ( mouse )
|
def smooth ( self , n_iter = 20 , convergence = 0.0 , edge_angle = 15 , feature_angle = 45 , boundary_smoothing = True , feature_smoothing = False , inplace = False ) :
"""Adjust point coordinates using Laplacian smoothing .
The effect is to " relax " the mesh , making the cells better shaped and
the vertices more evenly distributed .
Parameters
n _ iter : int
Number of iterations for Laplacian smoothing ,
convergence : float , optional
Convergence criterion for the iteration process . Smaller numbers
result in more smoothing iterations . Range from ( 0 to 1 ) .
edge _ angle : float , optional
Edge angle to control smoothing along edges ( either interior or boundary ) .
feature _ angle : float , optional
Feature angle for sharp edge identification .
boundary _ smoothing : bool , optional
Boolean flag to control smoothing of boundary edges .
feature _ smoothing : bool , optional
Boolean flag to control smoothing of feature edges .
inplace : bool , optional
Updates mesh in - place while returning nothing .
Returns
mesh : vtki . PolyData
Decimated mesh . None when inplace = True ."""
|
alg = vtk . vtkSmoothPolyDataFilter ( )
alg . SetInputData ( self )
alg . SetNumberOfIterations ( n_iter )
alg . SetConvergence ( convergence )
alg . SetFeatureEdgeSmoothing ( feature_smoothing )
alg . SetFeatureAngle ( feature_angle )
alg . SetEdgeAngle ( edge_angle )
alg . SetBoundarySmoothing ( boundary_smoothing )
alg . Update ( )
mesh = _get_output ( alg )
if inplace :
self . overwrite ( mesh )
else :
return mesh
|
def initialise_shopify_session ( ) :
"""Initialise the Shopify session with the Shopify App ' s API credentials ."""
|
if not settings . SHOPIFY_APP_API_KEY or not settings . SHOPIFY_APP_API_SECRET :
raise ImproperlyConfigured ( "SHOPIFY_APP_API_KEY and SHOPIFY_APP_API_SECRET must be set in settings" )
shopify . Session . setup ( api_key = settings . SHOPIFY_APP_API_KEY , secret = settings . SHOPIFY_APP_API_SECRET )
|
def requires_subrepo ( func : Callable ) -> Callable :
"""Decorator that requires the ` git subrepo ` command to be accessible before calling the given function .
: param func : the function to wrap
: return : the wrapped function"""
|
def decorated ( * args , ** kwargs ) :
try :
run ( [ GIT_COMMAND , _GIT_SUBREPO_COMMAND , "--version" ] )
except RunException as e :
raise RuntimeError ( "`git subrepo` does not appear to be working" ) from e
return func ( * args , ** kwargs )
return decorated
|
def get ( self , thing : type ) :
"""Get an instance of some type ."""
|
if thing in self . instances :
return self . instances [ thing ]
if thing in self . factories :
fact = self . factories [ thing ]
ret = self . get ( fact )
if hasattr ( fact , '__di__' ) and fact . __di__ [ 'singleton' ] :
self . instances [ thing ] = ret
return ret
if inspect . isclass ( thing ) :
return self . _call_class_init ( thing )
elif callable ( thing ) :
return self . call ( thing )
raise DiayException ( 'cannot resolve: %r' % thing )
|
def _generate_examples ( self , archive ) :
"""Generate rock , paper or scissors images and labels given the directory path .
Args :
archive : object that iterates over the zip .
Yields :
The image path and its corresponding label ."""
|
for fname , fobj in archive :
res = _NAME_RE . match ( fname )
if not res : # if anything other than . png ; skip
continue
label = res . group ( 2 ) . lower ( )
yield { "image" : fobj , "label" : label , }
|
def thrice ( self ) :
"""Inspected function should be called three times
Return : self"""
|
def check ( ) : # pylint : disable = missing - docstring
return super ( SinonExpectation , self ) . calledThrice
self . valid_list . append ( check )
return self
|
def rebase ( self ) :
"""Change the backing - file entry of the exported disk .
Please refer to ' qemu - img rebase ' manual for more info ."""
|
if self . standalone :
rebase_msg = 'Merging layered image with base'
else :
rebase_msg = 'Rebase'
with LogTask ( rebase_msg ) :
if len ( self . src_qemu_info ) == 1 : # Base image ( doesn ' t have predecessors )
return
if self . standalone : # Consolidate the layers and base image
utils . qemu_rebase ( target = self . dst , backing_file = "" )
else :
if len ( self . src_qemu_info ) > 2 :
raise utils . LagoUserException ( 'Layered export is currently supported for one ' 'layer only. You can try to use Standalone export.' )
# Put an identifier in the metadata of the copied layer ,
# this identifier will be used later by Lago in order
# to resolve and download the base image
parent = self . src_qemu_info [ 0 ] [ 'backing-filename' ]
# Hack for working with lago images naming convention
# For example : / var / lib / lago / store / phx _ repo : el7.3 - base : v1
# Extract only the image name and the version
# ( in the example el7.3 - base : v1)
parent = os . path . basename ( parent )
try :
parent = parent . split ( ':' , 1 ) [ 1 ]
except IndexError :
pass
parent = './{}' . format ( parent )
utils . qemu_rebase ( target = self . dst , backing_file = parent , safe = False )
|
def output ( value , address ) :
'''int , str - > TxOut
accepts base58 or bech32 addresses'''
|
script = addr . to_output_script ( address )
value = utils . i2le_padded ( value , 8 )
return tb . _make_output ( value , script )
|
def find_first_position ( sorted_list , target , list_length ) :
"""A Python function to locate the first occurrence of an element in a sorted list .
> > > find _ first _ position ( [ 1 , 2 , 3 , 4 , 5 , 6 , 6 ] , 6 , 7)
> > > find _ first _ position ( [ 1 , 2 , 2 , 2 , 3 , 2 , 2 , 4 , 2 ] , 2 , 9)
> > > find _ first _ position ( [ 1 , 2 , 3 ] , 1 , 3)"""
|
start = 0
end = list_length - 1
result = - 1
while start <= end :
middle = ( start + end ) // 2
if sorted_list [ middle ] > target :
end = middle - 1
elif sorted_list [ middle ] < target :
start = middle + 1
else :
result = middle
end = middle - 1
return result
|
def bytes_to_bits ( bytes_ ) :
"""Convert bytes to a list of bits"""
|
res = [ ]
for x in bytes_ :
if not isinstance ( x , int ) :
x = ord ( x )
res += byte_to_bits ( x )
return res
|
def _get ( self , field ) :
"""Return the value of a given field .
| Field | Description |
| batch _ size | Number of randomly chosen examples to use in |
| | each training iteration . |
| cluster _ id | Cluster assignment for each data point and |
| | Euclidean distance to the cluster center |
| cluster _ info | Cluster centers , sum of squared Euclidean |
| | distances from each cluster member to the |
| | assigned center , and the number of data |
| | points belonging to the cluster |
| features | Names of feature columns |
| max _ iterations | Maximum number of iterations to perform |
| method | Algorithm used to train the model . |
| num _ clusters | Number of clusters |
| num _ examples | Number of examples in the dataset |
| num _ features | Number of feature columns used |
| num _ unpacked _ features | Number of features unpacked from the |
| | feature columns |
| training _ iterations | Total number of iterations performed |
| training _ time | Total time taken to cluster the data |
| unpacked _ features | Names of features unpacked from the |
| | feature columns |
Parameters
field : str
The name of the field to query .
Returns
out
Value of the requested field"""
|
opts = { 'model' : self . __proxy__ , 'model_name' : self . __name__ , 'field' : field }
response = _tc . extensions . _kmeans . get_value ( opts )
return response [ 'value' ]
|
def validate ( opts ) :
"""Client facing validate function for command line arguments .
Perform validation operations on opts , a namespace created from
command line arguments . Returns True if all validation tests are successful .
If an exception is raised by the validations , this gracefully exits the
program and leaves a message to the user .
Required attributes on opts :
* input : String giving the path to input files
* output : String giving the path to output destination
* wrapper : String specifying the wrapper format
* extensions : List of strings specifying the file extensions to look for
* overwrite : Boolean specifying whether the original input files should
be overridden
: param opts : namespace containing necessary parameters
: return : True , if all tests are successful"""
|
try :
return _validate ( opts )
except ValidationException as e :
print ( "Command line arguments failed validation:" )
print ( e )
sys . exit ( 0 )
except ValueError as e :
print ( "Incorrect type passed into anchorhub.validate_opts.validate()\n" )
print ( e )
sys . exit ( 0 )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.