signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def hash ( self ) :
"""( property ) Returns a unique hash value for the result .""" | data_str = ';' . join ( [ str ( repr ( var ) ) for var in [ self . N , self . K , self . X , self . L , self . stat , self . cutoff , self . pval , self . pval_thresh , self . escore_pval_thresh ] ] )
data_str += ';'
data = data_str . encode ( 'UTF-8' ) + self . indices . tobytes ( )
return str ( hashlib . md5 ( data ) . hexdigest ( ) ) |
def Deserialize ( self , reader ) :
"""Deserialize full object .
Args :
reader ( neo . IO . BinaryReader ) :""" | self . AssetId = reader . ReadUInt256 ( )
self . Value = reader . ReadFixed8 ( )
self . ScriptHash = reader . ReadUInt160 ( )
if self . ScriptHash is None :
raise Exception ( "Script hash is required from deserialize!!!!!!!!" ) |
def calculate_overlap ( self ) :
"""Create the array that describes how junctions overlap""" | overs = [ ]
if not self . tx_obj1 . range . overlaps ( self . tx_obj2 . range ) :
return [ ]
# if they dont overlap wont find anything
for i in range ( 0 , len ( self . j1 ) ) :
for j in range ( 0 , len ( self . j2 ) ) :
if self . j1 [ i ] . overlaps ( self . j2 [ j ] , tolerance = self . tolerance ) :
overs . append ( [ i , j ] )
return overs |
def json_encode_default ( obj ) :
'''Convert datetime . datetime to timestamp
: param obj : value to ( possibly ) convert''' | if isinstance ( obj , ( datetime , date ) ) :
result = dt2ts ( obj )
else :
result = json_encoder . default ( obj )
return to_encoding ( result ) |
def readspec ( filename , read_scan = None ) :
"""Open a SPEC file and read its content
Inputs :
filename : string
the file to open
read _ scan : None , ' all ' or integer
the index of scan to be read from the file . If None , no scan should be read . If
' all ' , all scans should be read . If a number , just the scan with that number
should be read .
Output :
the data in the spec file in a dict .""" | with open ( filename , 'rt' ) as f :
sf = { 'motors' : [ ] , 'maxscannumber' : 0 }
sf [ 'originalfilename' ] = filename
lastscannumber = None
while True :
l = f . readline ( )
if l . startswith ( '#F' ) :
sf [ 'filename' ] = l [ 2 : ] . strip ( )
elif l . startswith ( '#E' ) :
sf [ 'epoch' ] = int ( l [ 2 : ] . strip ( ) )
sf [ 'datetime' ] = datetime . datetime . fromtimestamp ( sf [ 'epoch' ] )
elif l . startswith ( '#D' ) :
sf [ 'datestring' ] = l [ 2 : ] . strip ( )
elif l . startswith ( '#C' ) :
sf [ 'comment' ] = l [ 2 : ] . strip ( )
elif l . startswith ( '#O' ) :
try :
l = l . split ( None , 1 ) [ 1 ]
except IndexError :
continue
if 'motors' not in list ( sf . keys ( ) ) :
sf [ 'motors' ] = [ ]
sf [ 'motors' ] . extend ( [ x . strip ( ) for x in l . split ( ' ' ) ] )
elif not l . strip ( ) : # empty line , signifies the end of the header part . The next
# line will be a scan .
break
sf [ 'scans' ] = { }
if read_scan is not None :
if read_scan == 'all' :
nr = None
else :
nr = read_scan
try :
while True :
s = readspecscan ( f , nr )
if isinstance ( s , dict ) :
sf [ 'scans' ] [ s [ 'number' ] ] = s
if nr is not None :
break
sf [ 'maxscannumber' ] = max ( sf [ 'maxscannumber' ] , s [ 'number' ] )
elif s is not None :
sf [ 'maxscannumber' ] = max ( sf [ 'maxscannumber' ] , s )
except SpecFileEOF :
pass
else :
while True :
l = f . readline ( )
if not l :
break
if l . startswith ( '#S' ) :
n = int ( l [ 2 : ] . split ( ) [ 0 ] )
sf [ 'maxscannumber' ] = max ( sf [ 'maxscannumber' ] , n )
for n in sf [ 'scans' ] :
s = sf [ 'scans' ] [ n ]
s [ 'motors' ] = sf [ 'motors' ]
if 'comment' not in s :
s [ 'comment' ] = sf [ 'comment' ]
if 'positions' not in s :
s [ 'positions' ] = [ None ] * len ( sf [ 'motors' ] )
return sf |
def setup_network_agents ( self ) :
"""Initializes agents on nodes of graph and registers them to the SimPy environment""" | for i in self . env . G . nodes ( ) :
self . env . G . node [ i ] [ 'agent' ] = self . agent_type ( environment = self . env , agent_id = i , state = deepcopy ( self . initial_states [ i ] ) ) |
def unaccentuate ( s ) :
"""Replace accentuated chars in string by their non accentuated equivalent .""" | return "" . join ( c for c in unicodedata . normalize ( "NFKD" , s ) if not unicodedata . combining ( c ) ) |
def evaluate ( references , estimates , win = 1 * 44100 , hop = 1 * 44100 , mode = 'v4' , padding = True ) :
"""BSS _ EVAL images evaluation using metrics module
Parameters
references : np . ndarray , shape = ( nsrc , nsampl , nchan )
array containing true reference sources
estimates : np . ndarray , shape = ( nsrc , nsampl , nchan )
array containing estimated sources
window : int , defaults to 44100
window size in samples
hop : int
hop size in samples , defaults to 44100 ( no overlap )
mode : str
BSSEval version , default to ` v4 `
Returns
SDR : np . ndarray , shape = ( nsrc , )
vector of Signal to Distortion Ratios ( SDR )
ISR : np . ndarray , shape = ( nsrc , )
vector of Source to Spatial Distortion Image ( ISR )
SIR : np . ndarray , shape = ( nsrc , )
vector of Source to Interference Ratios ( SIR )
SAR : np . ndarray , shape = ( nsrc , )
vector of Sources to Artifacts Ratios ( SAR )""" | estimates = np . array ( estimates )
references = np . array ( references )
if padding :
references , estimates = pad_or_truncate ( references , estimates )
SDR , ISR , SIR , SAR , _ = metrics . bss_eval ( references , estimates , compute_permutation = False , window = win , hop = hop , framewise_filters = ( mode == "v3" ) , bsseval_sources_version = False )
return SDR , ISR , SIR , SAR |
def insertData ( self , tablename , list_value_pairs , list_SQLCMD_pairs = None ) :
"""insert data into table
- ID : identifier of the updated value
- list _ value _ pairs : contains the table field ID and the according value
- list _ SQLCMD _ pairs : contains the table field ID and a SQL command""" | fields = self . _getFieldsInDB ( tablename )
lst_field = [ ]
lst_value = [ ]
# normal field - value - pairs
for pair in list_value_pairs :
if pair [ 0 ] in fields :
lst_field . append ( pair [ 0 ] )
lst_value . append ( '"%s"' % pair [ 1 ] )
else :
print "err: field %s can't be found in the table" % pair [ 0 ]
return False
# field - SQL - command - pairs : the only difference is the missing double quotes in the SQL command
if list_SQLCMD_pairs != None :
for pair in list_SQLCMD_pairs :
if pair [ 0 ] in fields :
lst_field . append ( pair [ 0 ] )
lst_value . append ( pair [ 1 ] )
else :
print "err: field %s can't be found in the table" % pair [ 0 ]
return False
# build the command
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % ( tablename , join ( lst_field , ',' ) , join ( lst_value , ',' ) )
self . execQuery ( SQL )
return True |
def get_by_name ( config = None , name = None , name_label = 'name' ) :
"""Fetches a K8sDeployment by name .
: param config : A K8sConfig object .
: param name : The name we want .
: param name _ label : The label key to use for name .
: return : A list of K8sDeployment objects .""" | if name is None :
raise SyntaxError ( 'Deployment: name: [ {0} ] cannot be None.' . format ( name ) )
if not isinstance ( name , str ) :
raise SyntaxError ( 'Deployment: name: [ {0} ] must be a string.' . format ( name ) )
if config is not None and not isinstance ( config , K8sConfig ) :
raise SyntaxError ( 'Deployment: config: [ {0} ] must be a K8sConfig' . format ( config ) )
deps = K8sDeployment ( config = config , name = name ) . list ( labels = { name_label : name } )
return deps |
def get_pending_withdrawals ( self , currency = None ) :
"""Used to view your pending withdrawals
Endpoint :
1.1 NO EQUIVALENT
2.0 / key / balance / getpendingwithdrawals
: param currency : String literal for the currency ( ie . BTC )
: type currency : str
: return : pending withdrawals in JSON
: rtype : list""" | return self . _api_query ( path_dict = { API_V2_0 : '/key/balance/getpendingwithdrawals' } , options = { 'currencyname' : currency } if currency else None , protection = PROTECTION_PRV ) |
def delete_speaker ( self , speaker_uri ) :
"""Delete an speaker from a collection
: param speaker _ uri : the URI that references the speaker
: type speaker _ uri : String
: rtype : Boolean
: returns : True if the speaker was deleted
: raises : APIError if the request was not successful""" | response = self . api_request ( speaker_uri , method = 'DELETE' )
return self . __check_success ( response ) |
def create ( self ) :
"""Add backup files to select from""" | self . add_handlers ( { '^T' : self . quit } )
self . add ( npyscreen . Textfield , value = 'Pick a version to restore from: ' , editable = False , color = 'STANDOUT' )
self . dir_select = self . add ( npyscreen . SelectOne , values = self . display_vals , scroll_exit = True , rely = 4 ) |
def saliency_map ( output , input , name = "saliency_map" ) :
"""Produce a saliency map as described in the paper :
` Deep Inside Convolutional Networks : Visualising Image Classification Models and Saliency Maps
< https : / / arxiv . org / abs / 1312.6034 > ` _ .
The saliency map is the gradient of the max element in output w . r . t input .
Returns :
tf . Tensor : the saliency map . Has the same shape as input .""" | max_outp = tf . reduce_max ( output , 1 )
saliency_op = tf . gradients ( max_outp , input ) [ : ] [ 0 ]
return tf . identity ( saliency_op , name = name ) |
def get_xml ( pmc_id ) :
"""Returns XML for the article corresponding to a PMC ID .""" | if pmc_id . upper ( ) . startswith ( 'PMC' ) :
pmc_id = pmc_id [ 3 : ]
# Request params
params = { }
params [ 'verb' ] = 'GetRecord'
params [ 'identifier' ] = 'oai:pubmedcentral.nih.gov:%s' % pmc_id
params [ 'metadataPrefix' ] = 'pmc'
# Submit the request
res = requests . get ( pmc_url , params )
if not res . status_code == 200 :
logger . warning ( "Couldn't download %s" % pmc_id )
return None
# Read the bytestream
xml_bytes = res . content
# Check for any XML errors ; xml _ str should still be bytes
tree = ET . XML ( xml_bytes , parser = UTB ( ) )
xmlns = "http://www.openarchives.org/OAI/2.0/"
err_tag = tree . find ( '{%s}error' % xmlns )
if err_tag is not None :
err_code = err_tag . attrib [ 'code' ]
err_text = err_tag . text
logger . warning ( 'PMC client returned with error %s: %s' % ( err_code , err_text ) )
return None
# If no error , return the XML as a unicode string
else :
return xml_bytes . decode ( 'utf-8' ) |
def get_version ( ) :
"""Return formatted version string .
Returns :
str : string with project version or empty string .""" | if all ( [ VERSION , UPDATED , any ( [ isinstance ( UPDATED , date ) , isinstance ( UPDATED , datetime ) , ] ) , ] ) :
return FORMAT_STRING . format ( ** { "version" : VERSION , "updated" : UPDATED , } )
elif VERSION :
return VERSION
elif UPDATED :
return localize ( UPDATED ) if any ( [ isinstance ( UPDATED , date ) , isinstance ( UPDATED , datetime ) , ] ) else ""
else :
return "" |
def create_part ( self , parent , model , name = None , ** kwargs ) :
"""Create a new part instance from a given model under a given parent .
In order to prevent the backend from updating the frontend you may add ` suppress _ kevents = True ` as
additional keyword = value argument to this method . This will improve performance of the backend
against a trade - off that someone looking at the frontend won ' t notice any changes unless the page
is refreshed .
: param parent : parent part instance of the new instance
: type parent : : class : ` models . Part `
: param model : target part model on which the new instance is based
: type model : : class : ` models . Part `
: param name : new part name
: type name : basestring
: param kwargs : ( optional ) additional keyword = value arguments
: return : Part ( category = instance )
: return : : class : ` models . Part ` with category ` INSTANCE `
: raises IllegalArgumentError : When the provided arguments are incorrect
: raises APIError : if the ` Part ` could not be created""" | if parent . category != Category . INSTANCE :
raise IllegalArgumentError ( "The parent should be an category 'INSTANCE'" )
if model . category != Category . MODEL :
raise IllegalArgumentError ( "The models should be of category 'MODEL'" )
if not name :
name = model . name
data = { "name" : name , "parent" : parent . id , "model" : model . id }
return self . _create_part ( action = "new_instance" , data = data , ** kwargs ) |
def query_info ( self , what ) :
"""Way to extend the interface .
in what of type int
return result of type str""" | if not isinstance ( what , baseinteger ) :
raise TypeError ( "what can only be an instance of type baseinteger" )
result = self . _call ( "queryInfo" , in_p = [ what ] )
return result |
def compress_file ( inputfile , filename ) :
"""Compress input file using gzip and change its name .
: param inputfile : File to compress
: type inputfile : ` ` file ` ` like object
: param filename : File ' s name
: type filename : ` ` str ` `
: returns : Tuple with compressed file and new file ' s name
: rtype : : class : ` tempfile . SpooledTemporaryFile ` , ` ` str ` `""" | outputfile = create_spooled_temporary_file ( )
new_filename = filename + '.gz'
zipfile = gzip . GzipFile ( filename = filename , fileobj = outputfile , mode = "wb" )
try :
inputfile . seek ( 0 )
copyfileobj ( inputfile , zipfile , settings . TMP_FILE_READ_SIZE )
finally :
zipfile . close ( )
return outputfile , new_filename |
def append ( self , * values ) :
"""Append values at the end of the list
Allow chaining .
Args :
values : values to be appened at the end .
Example :
> > > from ww import l
> > > lst = l ( [ ] )
> > > lst . append ( 1)
> > > lst
> > > lst . append ( 2 , 3 ) . append ( 4,5)
[1 , 2 , 3 , 4 , 5]
> > > lst
[1 , 2 , 3 , 4 , 5]""" | for value in values :
list . append ( self , value )
return self |
def dot_product_unmasked_attention_local_2d_tpu ( q , k , v , bias , max_relative_position = None , query_shape = ( 8 , 8 ) , dropout_rate = 0.0 , image_shapes = None , name = None , make_image_summary = False , dropout_broadcast_dims = None ) :
"""Calculate unmasked dot - product local self - attention 2d on tpu .
Args :
q : a Tensor with shape [ batch , heads , height , width , depth ] .
k : a Tensor with shape [ batch , heads , height , width , depth ] .
v : a Tensor with shape [ batch , heads , height , width , depth ] .
bias : bias Tensor .
max _ relative _ position : an integer the max relative embedding considered .
Changing this invalidates checkpoints .
query _ shape : a two tuple indicating query shape
dropout _ rate : a floating point number .
image _ shapes : optional tuple of integer scalars .
name : an optional string .
make _ image _ summary : Whether to make an attention image summary .
dropout _ broadcast _ dims : an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions .
saves memory .
Returns :
[ batch , heads , height , width , depth ] tensor , the output of attention .""" | if max_relative_position :
raise ValueError ( "Relative local 2d attention not implemented" )
with tf . variable_scope ( name , default_name = "dot_product_unmasked_attention_local_2d_tpu" , values = [ q , k , v ] ) : # This calculation only works for self attention .
# q , k and v must therefore have the same shape .
q . get_shape ( ) . assert_is_compatible_with ( k . get_shape ( ) )
q . get_shape ( ) . assert_is_compatible_with ( v . get_shape ( ) )
orig_q_shape = common_layers . shape_list ( q )
# Pad query , key , value to ensure multiple of corresponding lengths .
memory_flange = [ int ( query_shape [ 0 ] // 2 ) , int ( query_shape [ 1 ] // 2 ) ]
q = pad_to_multiple_2d ( q , query_shape )
k = pad_to_multiple_2d ( k , query_shape )
v = pad_to_multiple_2d ( v , query_shape )
q_shape = common_layers . shape_list ( q )
( height , width ) = ( q_shape [ 2 ] , q_shape [ 3 ] )
_ , num_heads , height , width , depth_k = common_layers . shape_list ( k )
depth_v = common_layers . shape_list ( v ) [ - 1 ]
num_h_blocks = height // query_shape [ 0 ]
num_w_blocks = width // query_shape [ 1 ]
# Extract center queries , keys , and values
q = tf . reshape ( q , [ - 1 , height , width , depth_k ] )
queries = _extract_blocks ( q , query_shape [ 0 ] , query_shape [ 1 ] )
k = tf . reshape ( k , [ - 1 , height , width , depth_k ] )
keys = get_2d_local_memory_v2 ( k , query_shape , memory_flange )
v = tf . reshape ( v , [ - 1 , height , width , depth_v ] )
values = get_2d_local_memory_v2 ( v , query_shape , memory_flange )
memory_h = query_shape [ 0 ] + 2 * memory_flange [ 0 ]
memory_w = query_shape [ 1 ] + 2 * memory_flange [ 1 ]
queries = tf . reshape ( queries , [ - 1 , num_heads , num_h_blocks , num_w_blocks , query_shape [ 0 ] * query_shape [ 1 ] , depth_k ] )
keys = tf . reshape ( keys , [ - 1 , num_heads , num_h_blocks , num_w_blocks , memory_h * memory_w , depth_k ] )
values = tf . reshape ( values , [ - 1 , num_heads , num_h_blocks , num_w_blocks , memory_h * memory_w , depth_v ] )
logits = tf . matmul ( queries , keys , transpose_b = True )
if bias is not None :
logits += bias
weights = tf . nn . softmax ( logits , name = "attention_weights" )
# Dropping out the attention links for each of the heads
weights = common_layers . dropout_with_broadcast_dims ( weights , 1.0 - dropout_rate , broadcast_dims = dropout_broadcast_dims )
if common_layers . should_generate_summaries ( ) and make_image_summary :
attention_image_summary ( weights , image_shapes )
ret = tf . matmul ( weights , values )
# we need to get it back to shape [ batch , heads , height , width ]
ret = tf . reshape ( ret , [ - 1 , num_heads , num_h_blocks , num_w_blocks , query_shape [ 0 ] , query_shape [ 1 ] , depth_v ] )
ret = tf . transpose ( ret , [ 0 , 1 , 2 , 4 , 3 , 5 , 6 ] )
ret = tf . reshape ( ret , [ - 1 , num_heads , num_h_blocks * query_shape [ 0 ] , num_w_blocks * query_shape [ 1 ] , depth_v ] )
# slice if padding was introduced
ret = tf . slice ( ret , [ 0 , 0 , 0 , 0 , 0 ] , [ - 1 , - 1 , orig_q_shape [ 2 ] , orig_q_shape [ 3 ] , - 1 ] )
return ret |
def _initialize_processes ( self , target , num_workers , description ) : # type : ( _ MultiprocessOffload , function , int , str ) - > None
"""Initialize processes
: param _ MultiprocessOffload self : this
: param function target : target function for process
: param int num _ workers : number of worker processes
: param str description : description""" | if num_workers is None or num_workers < 1 :
raise ValueError ( 'invalid num_workers: {}' . format ( num_workers ) )
logger . debug ( 'initializing {}{} processes' . format ( num_workers , ' ' + description if not None else '' ) )
for _ in range ( num_workers ) :
proc = multiprocessing . Process ( target = target )
proc . start ( )
self . _procs . append ( proc ) |
def markdown_cell ( markdown ) :
r"""Args :
markdown ( str ) :
Returns :
str : json formatted ipython notebook markdown cell
CommandLine :
python - m ibeis . templates . generate _ notebook - - exec - markdown _ cell
Example :
> > > # DISABLE _ DOCTEST
> > > from ibeis . templates . generate _ notebook import * # NOQA
> > > markdown = ' # Title '
> > > result = markdown _ cell ( markdown )
> > > print ( result )""" | import utool as ut
markdown_header = ut . codeblock ( '''
{
"cell_type": "markdown",
"metadata": {},
"source": [
''' )
markdown_footer = ut . codeblock ( '''
]
}
''' )
return ( markdown_header + '\n' + ut . indent ( repr_single_for_md ( markdown ) , ' ' * 2 ) + '\n' + markdown_footer ) |
def expr_order_key ( expr ) :
"""A default order key for arbitrary expressions""" | if hasattr ( expr , '_order_key' ) :
return expr . _order_key
try :
if isinstance ( expr . kwargs , OrderedDict ) :
key_vals = expr . kwargs . values ( )
else :
key_vals = [ expr . kwargs [ key ] for key in sorted ( expr . kwargs ) ]
return KeyTuple ( ( expr . __class__ . __name__ , ) + tuple ( map ( expr_order_key , expr . args ) ) + tuple ( map ( expr_order_key , key_vals ) ) )
except AttributeError :
return str ( expr ) |
def match_reg ( self , reg ) :
"""match the given regular expression object to the current text
position .
if a match occurs , update the current text and line position .""" | mp = self . match_position
match = reg . match ( self . text , self . match_position )
if match :
( start , end ) = match . span ( )
if end == start :
self . match_position = end + 1
else :
self . match_position = end
self . matched_lineno = self . lineno
lines = re . findall ( r"\n" , self . text [ mp : self . match_position ] )
cp = mp - 1
while ( cp >= 0 and cp < self . textlength and self . text [ cp ] != '\n' ) :
cp -= 1
self . matched_charpos = mp - cp
self . lineno += len ( lines )
# print " MATCHED : " , match . group ( 0 ) , " LINE START : " ,
# self . matched _ lineno , " LINE END : " , self . lineno
# print " MATCH : " , regexp , " \ n " , self . text [ mp : mp + 15 ] , \
# ( match and " TRUE " or " FALSE " )
return match |
def build_node ( type , name , content ) :
"""Wrap up content in to a html node .
: param type : content type ( e . g . , doc , section , text , figure )
: type path : str
: param name : content name ( e . g . , the name of the section )
: type path : str
: param name : actual content
: type path : str
: return : new String with content in html format""" | if type == "doc" :
return f"<html>{content}</html>"
if type == "section" :
return f"<section name='{name}'>{content}</section>"
if type == "text" :
return f"<p name='{name}'>{content}</p>"
if type == "figure" :
return f"<img name='{name}' src='{content}'/>" |
def _assign_curtailment ( curtailment , edisgo , generators , curtailment_key ) :
"""Helper function to write curtailment time series to generator objects .
This function also writes a list of the curtailed generators to curtailment
in : class : ` edisgo . grid . network . TimeSeries ` and
: class : ` edisgo . grid . network . Results ` .
Parameters
curtailment : : pandas : ` pandas . DataFrame < dataframe > `
Dataframe containing the curtailment in kW per generator and time step
for all generators of the type ( and in weather cell ) specified in
` curtailment _ key ` parameter . Index is a
: pandas : ` pandas . DatetimeIndex < datetimeindex > ` , columns are the
generator representatives .
edisgo : : class : ` edisgo . grid . network . EDisGo `
generators : : pandas : ` pandas . DataFrame < dataframe > `
Dataframe with all generators of the type ( and in weather cell )
specified in ` curtailment _ key ` parameter . See return value of
: func : ` edisgo . grid . tools . get _ gen _ info ` for more information .
curtailment _ key : : obj : ` str ` or : obj : ` tuple ` with : obj : ` str `
The technology and weather cell ID if : obj : ` tuple ` or only
the technology if : obj : ` str ` the curtailment is specified for .""" | gen_object_list = [ ]
for gen in curtailment . columns : # get generator object from representative
gen_object = generators . loc [ generators . gen_repr == gen ] . index [ 0 ]
# assign curtailment to individual generators
gen_object . curtailment = curtailment . loc [ : , gen ]
gen_object_list . append ( gen_object )
# set timeseries . curtailment
if edisgo . network . timeseries . _curtailment :
edisgo . network . timeseries . _curtailment . extend ( gen_object_list )
edisgo . network . results . _curtailment [ curtailment_key ] = gen_object_list
else :
edisgo . network . timeseries . _curtailment = gen_object_list
# list needs to be copied , otherwise it will be extended every time
# a new key is added to results . _ curtailment
edisgo . network . results . _curtailment = { curtailment_key : gen_object_list . copy ( ) } |
def add_arrow_coord ( self , line , arrow_height , arrow_width , recess ) :
"""Determine the coordinates of an arrow head polygon
with height ( h ) and width ( w ) and recess ( r )
pointing from the one but last to the last point of ( poly ) line ( line ) .
Note that the coordinates of an SvgLine and an SvgPolyline
are stored in different variables .""" | # arrow = SvgPolygon ( _ maxlen = 4)
if line . type == 'polyline' :
xe = line . coordsX [ - 1 ]
ye = line . coordsY [ - 1 ]
xp = line . coordsX [ - 2 ]
yp = line . coordsY [ - 2 ]
else :
xe = line . attributes [ 'x2' ]
ye = line . attributes [ 'y2' ]
xp = line . attributes [ 'x1' ]
yp = line . attributes [ 'y1' ]
h = arrow_height
if arrow_width == 0 :
w = arrow_height / 3
else :
w = arrow_width
r = recess
self . add_coord ( xe , ye )
dx = xe - xp
dy = ye - yp
de = math . sqrt ( dx ** 2 + dy ** 2 )
xh = xe - h * dx / de
yh = ye - h * dy / de
x1 = xh + w * dy / de
y1 = yh - w * dx / de
self . add_coord ( x1 , y1 )
x2 = xe - ( h - r ) * dx / de
y2 = ye - ( h - r ) * dy / de
self . add_coord ( x2 , y2 )
x3 = xh - w * dy / de
y3 = yh + w * dx / de
self . add_coord ( x3 , y3 ) |
def l2traceroute_result_output_reason ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
l2traceroute_result = ET . Element ( "l2traceroute_result" )
config = l2traceroute_result
output = ET . SubElement ( l2traceroute_result , "output" )
reason = ET . SubElement ( output , "reason" )
reason . text = kwargs . pop ( 'reason' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def _set_fallback ( elements , src_field , fallback , dest_field = None ) :
"""Helper function used to set the fallback attributes of an element when
they are defined by the configuration as " None " or " - " .""" | if dest_field is None :
dest_field = src_field
if isinstance ( fallback , six . string_types ) :
fallback = elements [ fallback ]
attrs = elements [ src_field ]
elements [ dest_field ] = ( attrs [ 0 ] if attrs [ 0 ] is not None else fallback [ 0 ] , attrs [ 1 ] if attrs [ 1 ] is not None else fallback [ 1 ] , attrs [ 2 ] if attrs [ 2 ] is not None else fallback [ 2 ] ) |
def infer_module_name ( filename , fspath ) :
"""Convert a python filename to a module relative to pythonpath .""" | filename , _ = os . path . splitext ( filename )
for f in fspath :
short_name = f . relative_path ( filename )
if short_name : # The module name for _ _ init _ _ . py files is the directory .
if short_name . endswith ( os . path . sep + "__init__" ) :
short_name = short_name [ : short_name . rfind ( os . path . sep ) ]
return short_name . replace ( os . path . sep , '.' )
# We have not found filename relative to anywhere in pythonpath .
return '' |
def get_children_as_time_series ( self , json_children , time_field = [ 'entity' , 'occurred' ] ) :
result = [ ]
time_field_as_path_list = time_field . strip ( ) . split ( "." )
if len ( time_field_as_path_list ) == 0 :
return result
"""FROM :
" id " : " 60411677 " ,
" uri " : " https : / / api . cityverve . org . uk / v1 / entity / crime / 60411677 " ,
" type " : " crime " ,
" name " : " Anti Social Behaviour - On or near Manor Road " ,
" loc " : { } ,
" entity " : {
" category " : " Anti Social Behaviour " ,
" occurred " : " 2017-10-01 " ,
" area " : " On or near Manor Road " ,
" outcome " : {
" status " : null ,
" resolved " : null
" instance " : { . . . } ,
" legal " : [ . . ]
TO :
" time " : " 2017-10-01 " ,
" value " : " 556" """ | # get times
result_dict = { }
for child in json_children :
time = self . get_time ( child [ 0 ] , time_field_as_path_list )
if time in result_dict :
result_dict [ time ] += 1
else :
result_dict [ time ] = 1
# turn lookup dict ( result _ dict ) into final list
for key , value in result_dict . items ( ) :
temp = { 'time' : key , 'value' : value }
result . append ( temp )
return result |
def not_ ( self , * query_expressions ) :
'''Add a $ not expression to the query , negating the query expressions
given .
* * Examples * * : ` ` query . not _ ( SomeDocClass . age < = 18 ) ` ` becomes ` ` { ' age ' : { ' $ not ' : { ' $ gt ' : 18 } } } ` `
: param query _ expressions : Instances of : class : ` ommongo . query _ expression . QueryExpression `''' | for qe in query_expressions :
self . filter ( qe . not_ ( ) )
return self |
def get_eip_address_info ( addresses = None , allocation_ids = None , region = None , key = None , keyid = None , profile = None ) :
'''Get ' interesting ' info about some , or all EIPs associated with the current account .
addresses
( list ) - Optional list of addresses . If provided , only the addresses
associated with those in the list will be returned .
allocation _ ids
( list ) - Optional list of allocation IDs . If provided , only the
addresses associated with the given allocation IDs will be returned .
returns
( list of dicts ) - A list of dicts , each containing the info for one of the requested EIPs .
CLI Example :
. . code - block : : bash
salt - call boto _ ec2 . get _ eip _ address _ info addresses = 52.4.2.15
. . versionadded : : 2016.3.0''' | if type ( addresses ) == ( type ( 'string' ) ) :
addresses = [ addresses ]
if type ( allocation_ids ) == ( type ( 'string' ) ) :
allocation_ids = [ allocation_ids ]
ret = _get_all_eip_addresses ( addresses = addresses , allocation_ids = allocation_ids , region = region , key = key , keyid = keyid , profile = profile )
interesting = [ 'allocation_id' , 'association_id' , 'domain' , 'instance_id' , 'network_interface_id' , 'network_interface_owner_id' , 'public_ip' , 'private_ip_address' ]
return [ dict ( [ ( x , getattr ( address , x ) ) for x in interesting ] ) for address in ret ] |
def get_attrs ( self ) :
"""retrieve our attributes""" | self . encoding = _ensure_encoding ( getattr ( self . attrs , 'encoding' , None ) )
self . errors = _ensure_decoded ( getattr ( self . attrs , 'errors' , 'strict' ) )
for n in self . attributes :
setattr ( self , n , _ensure_decoded ( getattr ( self . attrs , n , None ) ) ) |
def _refresh_buckets_cache_file ( creds , cache_file , multiple_env , environment , prefix ) :
'''Retrieve the content of all buckets and cache the metadata to the buckets
cache file''' | # helper s3 query function
def __get_s3_meta ( ) :
return __utils__ [ 's3.query' ] ( key = creds . key , keyid = creds . keyid , kms_keyid = creds . kms_keyid , bucket = creds . bucket , service_url = creds . service_url , verify_ssl = creds . verify_ssl , location = creds . location , return_bin = False , params = { 'prefix' : prefix } , path_style = creds . path_style , https_enable = creds . https_enable )
# grab only the files / dirs in the bucket
def __get_pillar_files_from_s3_meta ( s3_meta ) :
return [ k for k in s3_meta if 'Key' in k ]
# pull out the environment dirs ( e . g . the root dirs )
def __get_pillar_environments ( files ) :
environments = [ ( os . path . dirname ( k [ 'Key' ] ) . split ( '/' , 1 ) ) [ 0 ] for k in files ]
return set ( environments )
log . debug ( 'Refreshing S3 buckets pillar cache file' )
metadata = { }
bucket = creds . bucket
if not multiple_env : # Single environment per bucket
log . debug ( 'Single environment per bucket mode' )
bucket_files = { }
s3_meta = __get_s3_meta ( )
# s3 query returned something
if s3_meta :
bucket_files [ bucket ] = __get_pillar_files_from_s3_meta ( s3_meta )
metadata [ environment ] = bucket_files
else : # Multiple environments per buckets
log . debug ( 'Multiple environment per bucket mode' )
s3_meta = __get_s3_meta ( )
# s3 query returned data
if s3_meta :
files = __get_pillar_files_from_s3_meta ( s3_meta )
environments = __get_pillar_environments ( files )
# pull out the files for the environment
for saltenv in environments : # grab only files / dirs that match this saltenv .
env_files = [ k for k in files if k [ 'Key' ] . startswith ( saltenv ) ]
if saltenv not in metadata :
metadata [ saltenv ] = { }
if bucket not in metadata [ saltenv ] :
metadata [ saltenv ] [ bucket ] = [ ]
metadata [ saltenv ] [ bucket ] += env_files
# write the metadata to disk
if os . path . isfile ( cache_file ) :
os . remove ( cache_file )
log . debug ( 'Writing S3 buckets pillar cache file' )
with salt . utils . files . fopen ( cache_file , 'w' ) as fp_ :
pickle . dump ( metadata , fp_ )
return metadata |
def resource_url ( self ) :
"""str : Root URL for IBM Streams REST API""" | self . _resource_url = self . _resource_url or st . get_rest_api ( )
return self . _resource_url |
def get_attr ( self , symbol , attrname ) :
"""Helper for getting symbol extension attributes""" | return symbol . extension_attributes . get ( self . extension_name , { } ) . get ( attrname , None ) |
def fetch ( self , is_dl_forced = False ) :
""": return :""" | # create the connection details for Flybase
cxn = { 'host' : 'chado.flybase.org' , 'database' : 'flybase' , 'port' : 5432 , 'user' : 'flybase' , 'password' : 'no password' }
self . dataset . setFileAccessUrl ( '' . join ( ( 'jdbc:postgresql://' , cxn [ 'host' ] , ':' , str ( cxn [ 'port' ] ) , '/' , cxn [ 'database' ] ) ) , is_object_literal = True )
# process the tables
# self . fetch _ from _ pgdb ( self . tables , cxn , 100 ) # for testing
self . fetch_from_pgdb ( self . tables , cxn , None , is_dl_forced )
for query_map in self . resources :
query_fh = open ( os . path . join ( os . path . dirname ( __file__ ) , query_map [ 'query' ] ) , 'r' )
query = query_fh . read ( )
self . fetch_query_from_pgdb ( query_map [ 'outfile' ] , query , None , cxn )
# we want to fetch the features ,
# but just a subset to reduce the processing time
# query = \
# " SELECT " \
# " feature _ id , dbxref _ id , organism _ id , name , uniquename , " \
# " null as residues , seqlen , md5checksum , type _ id , is _ analysis , " \
# " timeaccessioned , timelastmodified , is _ obsolete " \
# " FROM feature WHERE is _ analysis = false "
self . fetch_query_from_pgdb ( 'feature' , self . querys [ 'feature' ] , None , cxn , None , is_dl_forced )
self . _get_human_models_file ( )
self . get_files ( False )
self . dataset . set_version_by_num ( self . version_num )
return |
def strip_accents ( text ) :
"""Strip agents from a string .""" | normalized_str = unicodedata . normalize ( 'NFD' , text )
return '' . join ( [ c for c in normalized_str if unicodedata . category ( c ) != 'Mn' ] ) |
def clean ( self ) :
'''Check to make sure password fields match .''' | data = super ( PasswordForm , self ) . clean ( )
if 'new_password' in data :
if data [ 'new_password' ] != data . get ( 'confirm_password' , None ) :
raise ValidationError ( _ ( 'Passwords do not match.' ) )
if data . get ( 'confirm_password' , None ) == data . get ( 'current_password' , None ) :
raise ValidationError ( _ ( 'Old password and new password ' 'must be different' ) )
return data |
def _filter_plans ( attr , name , plans ) :
'''Helper to return list of usage plan items matching the given attribute value .''' | return [ plan for plan in plans if plan [ attr ] == name ] |
def do_list ( self , args ) :
"""List all connected resources .""" | try :
resources = self . resource_manager . list_resources_info ( )
except Exception as e :
print ( e )
else :
self . resources = [ ]
for ndx , ( resource_name , value ) in enumerate ( resources . items ( ) ) :
if not args :
print ( '({0:2d}) {1}' . format ( ndx , resource_name ) )
if value . alias :
print ( ' alias: {}' . format ( value . alias ) )
self . resources . append ( ( resource_name , value . alias or None ) ) |
def get_address ( name , hash , db , target = None ) :
'''fetches the contract address of deployment
: param hash : the contract file hash
: return : ( string ) address of the contract
error , if any''' | key = DB . pkey ( [ EZO . DEPLOYED , name , target , hash ] )
d , err = db . get ( key )
if err :
return None , err
if not d :
return None , None
return d [ 'address' ] . lower ( ) , None |
def run_car_t_validity_assessment ( job , rsem_files , univ_options , reports_options ) :
"""A wrapper for assess _ car _ t _ validity .
: param dict rsem _ files : Results from running rsem
: param dict univ _ options : Dict of universal options used by almost all tools
: param dict reports _ options : Options specific to reporting modules
: return : The results of running assess _ car _ t _ validity
: rtype : toil . fileStore . FileID""" | return job . addChildJobFn ( assess_car_t_validity , rsem_files [ 'rsem.genes.results' ] , univ_options , reports_options ) . rv ( ) |
def references ( self ) :
'''List of URLs of external links on a page .
May include external links within page that aren ' t technically cited anywhere .''' | if not getattr ( self , '_references' , False ) :
def add_protocol ( url ) :
return url if url . startswith ( 'http' ) else 'http:' + url
self . _references = [ add_protocol ( link [ '*' ] ) for link in self . __continued_query ( { 'prop' : 'extlinks' , 'ellimit' : 'max' } ) ]
return self . _references |
def remove_callback ( self , name , before = None , after = None ) :
"""Remove a beforeback , and afterback pair from this Spectator
If ` ` before ` ` and ` ` after ` ` are None then all callbacks for
the given method will be removed . Otherwise , only the exact
callback pair will be removed .
Parameters
name : str
The name of the method the callback pair is associated with .
before : None or callable
The beforeback that was originally registered to the given method .
after : None or callable
The afterback that was originally registered to the given method .""" | if isinstance ( name , ( list , tuple ) ) :
for name in name :
self . remove_callback ( name , before , after )
elif before is None and after is None :
del self . _callback_registry [ name ]
else :
if name in self . _callback_registry :
callback_list = self . _callback_registry [ name ]
else :
callback_list = [ ]
self . _callback_registry [ name ] = callback_list
callback_list . remove ( ( before , after ) )
if len ( callback_list ) == 0 : # cleanup if all callbacks are gone
del self . _callback_registry [ name ] |
def times ( x , y ) :
"""Do something a random amount of times
between x & y""" | def decorator ( fn ) :
def wrapped ( * args , ** kwargs ) :
n = random . randint ( x , y )
for z in range ( 1 , n ) :
fn ( * args , ** kwargs )
return wrapped
return decorator |
def get_large_image ( self , page = 1 ) :
"""Downloads and returns the large sized image of a single page .
The page kwarg specifies which page to return . One is the default .""" | url = self . get_large_image_url ( page = page )
return self . _get_url ( url ) |
def drawtree ( self ) :
'''Loop over the object , process path attribute sets , and drawlines based
on their current contents .''' | self . win . erase ( )
self . line = 0
for child , depth in self . traverse ( ) :
child . curline = self . curline
child . picked = self . picked
child . expanded = self . expanded
child . sized = self . sized
if depth == 0 :
continue
if self . line == self . curline :
self . color . curline ( child . name , child . picked )
children = child . children
name = child . name
else :
self . color . default ( child . name , child . picked )
if child . name in self . sized and not self . sized [ child . name ] :
self . sized [ child . name ] = " [" + du ( child . name ) + "]"
child . drawline ( depth , self . line , self . win )
self . line += 1
self . win . refresh ( )
self . mkheader ( name )
self . mkfooter ( name , children ) |
def _energy_distance_from_distance_matrices ( distance_xx , distance_yy , distance_xy ) :
"""Compute energy distance with precalculated distance matrices .""" | return ( 2 * np . mean ( distance_xy ) - np . mean ( distance_xx ) - np . mean ( distance_yy ) ) |
def load ( self , _path , regs = [ 'chr' , 'left' , 'right' , 'strand' ] , meta = [ ] , values = [ ] , full_load = False , file_extension = "gdm" ) :
"""Parses and loads the data into instance attributes .
The indexes of the data and meta dataframes should be the same .
: param path : The path to the dataset on the filesystem
: param regs : the regions that are to be analyzed
: param meta : the meta - data that are to be analyzed
: param values : the values that are to be selected
: param full _ load : Specifies the method of parsing the data . If False then parser omits the parsing of zero ( 0)
values in order to speed up and save memory . However , while creating the matrix , those zero values are going to be put into the matrix .
( unless a row contains " all zero columns " . This parsing is strongly recommended for sparse datasets .
If the full _ load parameter is True then all the zero ( 0 ) data are going to be read .""" | if not full_load :
warnings . warn ( "\n\nYou are using the optimized loading technique. " "All-zero rows are not going to be loaded into memory. " "To load all the data please set the full_load parameter equal to True." )
p = Parser ( _path )
self . meta = p . parse_meta ( meta )
self . data = p . parse_data ( regs , values , full_load = full_load , extension = file_extension )
self . _path = _path |
def getConfig ( self , key ) :
"""Get a Config Value""" | if hasattr ( self , key ) :
return getattr ( self , key )
else :
return False |
def WriteClientSnapshot ( self , snapshot , cursor = None ) :
"""Write new client snapshot .""" | insert_history_query = ( "INSERT INTO client_snapshot_history(client_id, timestamp, " "client_snapshot) VALUES (%s, FROM_UNIXTIME(%s), %s)" )
insert_startup_query = ( "INSERT INTO client_startup_history(client_id, timestamp, " "startup_info) VALUES(%s, FROM_UNIXTIME(%s), %s)" )
now = rdfvalue . RDFDatetime . Now ( )
client_platform = snapshot . knowledge_base . os
current_timestamp = mysql_utils . RDFDatetimeToTimestamp ( now )
client_info = { "last_snapshot_timestamp" : current_timestamp , "last_startup_timestamp" : current_timestamp , "last_version_string" : snapshot . GetGRRVersionString ( ) , "last_platform_release" : snapshot . Uname ( ) , }
update_clauses = [ "last_snapshot_timestamp = FROM_UNIXTIME(%(last_snapshot_timestamp)s)" , "last_startup_timestamp = FROM_UNIXTIME(%(last_startup_timestamp)s)" , "last_version_string = %(last_version_string)s" , "last_platform_release = %(last_platform_release)s" , ]
if client_platform :
client_info [ "last_platform" ] = client_platform
update_clauses . append ( "last_platform = %(last_platform)s" )
update_query = ( "UPDATE clients SET {} WHERE client_id = %(client_id)s" . format ( ", " . join ( update_clauses ) ) )
int_client_id = db_utils . ClientIDToInt ( snapshot . client_id )
client_info [ "client_id" ] = int_client_id
startup_info = snapshot . startup_info
snapshot . startup_info = None
try :
cursor . execute ( insert_history_query , ( int_client_id , current_timestamp , snapshot . SerializeToString ( ) ) )
cursor . execute ( insert_startup_query , ( int_client_id , current_timestamp , startup_info . SerializeToString ( ) ) )
cursor . execute ( update_query , client_info )
except MySQLdb . IntegrityError as e :
raise db . UnknownClientError ( snapshot . client_id , cause = e )
finally :
snapshot . startup_info = startup_info |
def serialize ( self , q ) :
"""Serialize a Q object into a ( possibly nested ) dict .""" | children = [ ]
for child in q . children :
if isinstance ( child , Q ) :
children . append ( self . serialize ( child ) )
else :
children . append ( child )
serialized = q . __dict__
serialized [ 'children' ] = children
return serialized |
def from_domain ( cls , domain , * args , ** kwargs ) :
"""Try to download the hive file from the domain using the defined
beekeeper spec of domain / api / hive . json .""" | version = kwargs . pop ( 'version' , None )
require = kwargs . pop ( 'require_https' , False )
return cls ( Hive . from_domain ( domain , version , require ) , * args , ** kwargs ) |
def libvlc_media_player_set_video_title_display ( p_mi , position , timeout ) :
'''Set if , and how , the video title will be shown when media is played .
@ param p _ mi : the media player .
@ param position : position at which to display the title , or libvlc _ position _ disable to prevent the title from being displayed .
@ param timeout : title display timeout in milliseconds ( ignored if libvlc _ position _ disable ) .
@ version : libVLC 2.1.0 or later .''' | f = _Cfunctions . get ( 'libvlc_media_player_set_video_title_display' , None ) or _Cfunction ( 'libvlc_media_player_set_video_title_display' , ( ( 1 , ) , ( 1 , ) , ( 1 , ) , ) , None , None , MediaPlayer , Position , ctypes . c_int )
return f ( p_mi , position , timeout ) |
def multisplit ( s , seps = list ( string . punctuation ) + list ( string . whitespace ) , blank = True ) :
r"""Just like str . split ( ) , except that a variety ( list ) of seperators is allowed .
> > > multisplit ( r ' 1-2?3 , ; . 4 + - ' , string . punctuation )
[ ' 1 ' , ' 2 ' , ' 3 ' , ' ' , ' ' , ' 4 ' , ' ' , ' ' ]
> > > multisplit ( r ' 1-2?3 , ; . 4 + - ' , string . punctuation , blank = False )
[ ' 1 ' , ' 2 ' , ' 3 ' , ' 4 ' ]
> > > multisplit ( r ' 1C 234567890 ' , ' \ x00 \ x01 \ x02 \ x03 \ x04 \ x05 \ x06 \ x07 \ x08 \ t \ n ' + string . punctuation )
[ ' 1C 234567890 ' ]""" | seps = str ( ) . join ( seps )
return [ s2 for s2 in s . translate ( str ( ) . join ( [ ( chr ( i ) if chr ( i ) not in seps else seps [ 0 ] ) for i in range ( 256 ) ] ) ) . split ( seps [ 0 ] ) if ( blank or s2 ) ] |
def set_item ( self , index , new_item ) :
"""Changes item at index in collection . Emit dataChanged signal .
: param index : Number of row or index of cell
: param new _ item : Dict - like object""" | row = index . row ( ) if hasattr ( index , "row" ) else index
self . collection [ row ] = new_item
self . dataChanged . emit ( self . index ( row , 0 ) , self . index ( row , self . rowCount ( ) - 1 ) ) |
def businesstime_hours ( self , d1 , d2 ) :
"""Returns a datetime . timedelta of business hours between d1 and d2,
based on the length of the businessday""" | open_hours = self . open_hours . seconds / 3600
btd = self . businesstimedelta ( d1 , d2 )
btd_hours = btd . seconds / 3600
return datetime . timedelta ( hours = ( btd . days * open_hours + btd_hours ) ) |
def format_time ( start , end ) :
"""Returns string with relevant time information formatted properly""" | try :
cpu_usr = end [ 0 ] - start [ 0 ]
cpu_sys = end [ 1 ] - start [ 1 ]
except TypeError : # ` clock ( ) [ 1 ] = = None ` so subtraction results in a TypeError
return 'Time elapsed: {}' . format ( human_time ( cpu_usr ) )
else :
times = ( human_time ( x ) for x in ( cpu_usr , cpu_sys , cpu_usr + cpu_sys ) )
return 'Time elapsed: user: {}, sys: {}, total: {}' . format ( * times ) |
def add_double_proxy_for ( self , label : str , shape : Collection [ int ] = None ) -> Vertex :
"""Creates a proxy vertex for the given label and adds to the sequence item""" | if shape is None :
return Vertex . _from_java_vertex ( self . unwrap ( ) . addDoubleProxyFor ( _VertexLabel ( label ) . unwrap ( ) ) )
else :
return Vertex . _from_java_vertex ( self . unwrap ( ) . addDoubleProxyFor ( _VertexLabel ( label ) . unwrap ( ) , shape ) ) |
def remove_name ( self ) :
"""Removes the name ( short _ description node ) from the metadata node , if present .
: return : True if the node is removed . False is the node is node is not present .""" | short_description_node = self . metadata . find ( 'short_description' )
if short_description_node is not None :
self . metadata . remove ( short_description_node )
return True
return False |
def traverse_parents ( self , visit , * args , ** kwargs ) :
"""Traverse the hierarchy upwards , visiting all parents and their children except self .
See " visitor pattern " in literature . This is implemented in pre - order fashion .
Example :
parents = [ ]
self . traverse _ parents ( parents . append )
print parents""" | if self . has_parent ( ) :
self . __visited = True
self . _parent_ . traverse_parents ( visit , * args , ** kwargs )
self . _parent_ . traverse ( visit , * args , ** kwargs )
self . __visited = False |
def get_absolute_url ( self ) :
"""Get model url""" | return reverse ( 'trionyx:model-view' , kwargs = { 'app' : self . _meta . app_label , 'model' : self . _meta . model_name , 'pk' : self . id } ) |
def _get_conversion_factor ( old_units , new_units , dtype ) :
"""Get the conversion factor between two units of equivalent dimensions . This
is the number you multiply data by to convert from values in ` old _ units ` to
values in ` new _ units ` .
Parameters
old _ units : str or Unit object
The current units .
new _ units : str or Unit object
The units we want .
dtype : NumPy dtype
The dtype of the conversion factor
Returns
conversion _ factor : float
` old _ units / new _ units `
offset : float or None
Offset between the old unit and new unit .""" | if old_units . dimensions != new_units . dimensions :
raise UnitConversionError ( old_units , old_units . dimensions , new_units , new_units . dimensions )
ratio = old_units . base_value / new_units . base_value
if old_units . base_offset == 0 and new_units . base_offset == 0 :
return ( ratio , None )
else : # the dimensions are the same , so both are temperatures , where
# it ' s legal to convert units so no need to do error checking
return ratio , ratio * old_units . base_offset - new_units . base_offset |
def setup_array_pars ( self ) :
"""main entry point for setting up array multipler parameters""" | mlt_df = self . prep_mlt_arrays ( )
if mlt_df is None :
return
mlt_df . loc [ : , "tpl_file" ] = mlt_df . mlt_file . apply ( lambda x : os . path . split ( x ) [ - 1 ] + ".tpl" )
# mlt _ df . loc [ mlt _ df . tpl _ file . apply ( lambda x : pd . notnull ( x . pp _ file ) ) , " tpl _ file " ] = np . NaN
mlt_files = mlt_df . mlt_file . unique ( )
# for suffix , tpl _ file , layer , name in zip ( self . mlt _ df . suffix ,
# self . mlt _ df . tpl , self . mlt _ df . layer ,
# self . mlt _ df . prefix ) :
par_dfs = { }
for mlt_file in mlt_files :
suffixes = mlt_df . loc [ mlt_df . mlt_file == mlt_file , "suffix" ]
if suffixes . unique ( ) . shape [ 0 ] != 1 :
self . logger . lraise ( "wrong number of suffixes for {0}" . format ( mlt_file ) )
suffix = suffixes . iloc [ 0 ]
tpl_files = mlt_df . loc [ mlt_df . mlt_file == mlt_file , "tpl_file" ]
if tpl_files . unique ( ) . shape [ 0 ] != 1 :
self . logger . lraise ( "wrong number of tpl_files for {0}" . format ( mlt_file ) )
tpl_file = tpl_files . iloc [ 0 ]
layers = mlt_df . loc [ mlt_df . mlt_file == mlt_file , "layer" ]
# if layers . unique ( ) . shape [ 0 ] ! = 1:
# self . logger . lraise ( " wrong number of layers for { 0 } " \
# . format ( mlt _ file ) )
layer = layers . iloc [ 0 ]
names = mlt_df . loc [ mlt_df . mlt_file == mlt_file , "prefix" ]
if names . unique ( ) . shape [ 0 ] != 1 :
self . logger . lraise ( "wrong number of names for {0}" . format ( mlt_file ) )
name = names . iloc [ 0 ]
attr_names = mlt_df . loc [ mlt_df . mlt_file == mlt_file , "attr_name" ]
if attr_names . unique ( ) . shape [ 0 ] != 1 :
self . logger . lraise ( "wrong number of attr_names for {0}" . format ( mlt_file ) )
attr_name = attr_names . iloc [ 0 ]
# ib = self . k _ zone _ dict [ layer ]
df = None
if suffix == self . cn_suffix :
self . log ( "writing const tpl:{0}" . format ( tpl_file ) )
# df = self . write _ const _ tpl ( name , tpl _ file , self . m . bas6 . ibound [ layer ] . array )
try :
df = write_const_tpl ( name , os . path . join ( self . m . model_ws , tpl_file ) , self . cn_suffix , self . m . bas6 . ibound [ layer ] . array , ( self . m . nrow , self . m . ncol ) , self . m . sr )
except Exception as e :
self . logger . lraise ( "error writing const template: {0}" . format ( str ( e ) ) )
self . log ( "writing const tpl:{0}" . format ( tpl_file ) )
elif suffix == self . gr_suffix :
self . log ( "writing grid tpl:{0}" . format ( tpl_file ) )
# df = self . write _ grid _ tpl ( name , tpl _ file , self . m . bas6 . ibound [ layer ] . array )
try :
df = write_grid_tpl ( name , os . path . join ( self . m . model_ws , tpl_file ) , self . gr_suffix , self . m . bas6 . ibound [ layer ] . array , ( self . m . nrow , self . m . ncol ) , self . m . sr )
except Exception as e :
self . logger . lraise ( "error writing grid template: {0}" . format ( str ( e ) ) )
self . log ( "writing grid tpl:{0}" . format ( tpl_file ) )
elif suffix == self . zn_suffix :
self . log ( "writing zone tpl:{0}" . format ( tpl_file ) )
if np . all ( [ isinstance ( v , dict ) for v in self . k_zone_dict . values ( ) ] ) : # check is dict of dicts
if attr_name in [ p . split ( '.' ) [ - 1 ] for p in self . k_zone_dict . keys ( ) ] :
k_zone_dict = next ( k_dict for p , k_dict in self . k_zone_dict . items ( ) if p . split ( '.' ) [ - 1 ] == attr_name )
# get dict relating to parameter prefix
else :
assert 'general_zn' in self . k_zone_dict . keys ( ) , "Neither {0} nor 'general_zn' are in k_zone_dict keys: {1}" . format ( attr_name , k_zone_dict . keys ( ) )
k_zone_dict = self . k_zone_dict [ 'general_zn' ]
else :
k_zone_dict = self . k_zone_dict
# df = self . write _ zone _ tpl ( self . m , name , tpl _ file , self . k _ zone _ dict [ layer ] , self . zn _ suffix , self . logger )
try :
df = write_zone_tpl ( name , os . path . join ( self . m . model_ws , tpl_file ) , self . zn_suffix , k_zone_dict [ layer ] , ( self . m . nrow , self . m . ncol ) , self . m . sr )
except Exception as e :
self . logger . lraise ( "error writing zone template: {0}" . format ( str ( e ) ) )
self . log ( "writing zone tpl:{0}" . format ( tpl_file ) )
if df is None :
continue
if suffix not in par_dfs :
par_dfs [ suffix ] = [ df ]
else :
par_dfs [ suffix ] . append ( df )
for suf , dfs in par_dfs . items ( ) :
self . par_dfs [ suf ] = pd . concat ( dfs )
if self . pp_suffix in mlt_df . suffix . values :
self . log ( "setting up pilot point process" )
self . pp_prep ( mlt_df )
self . log ( "setting up pilot point process" )
if self . gr_suffix in mlt_df . suffix . values :
self . log ( "setting up grid process" )
self . grid_prep ( )
self . log ( "setting up grid process" )
if self . kl_suffix in mlt_df . suffix . values :
self . log ( "setting up kl process" )
self . kl_prep ( mlt_df )
self . log ( "setting up kl process" )
mlt_df . to_csv ( os . path . join ( self . m . model_ws , "arr_pars.csv" ) )
ones = np . ones ( ( self . m . nrow , self . m . ncol ) )
for mlt_file in mlt_df . mlt_file . unique ( ) :
self . log ( "save test mlt array {0}" . format ( mlt_file ) )
np . savetxt ( os . path . join ( self . m . model_ws , mlt_file ) , ones , fmt = "%15.6E" )
self . log ( "save test mlt array {0}" . format ( mlt_file ) )
tpl_files = mlt_df . loc [ mlt_df . mlt_file == mlt_file , "tpl_file" ]
if tpl_files . unique ( ) . shape [ 0 ] != 1 :
self . logger . lraise ( "wrong number of tpl_files for {0}" . format ( mlt_file ) )
tpl_file = tpl_files . iloc [ 0 ]
if pd . notnull ( tpl_file ) :
self . tpl_files . append ( tpl_file )
self . in_files . append ( mlt_file )
# for tpl _ file , mlt _ file in zip ( mlt _ df . tpl _ file , mlt _ df . mlt _ file ) :
# if pd . isnull ( tpl _ file ) :
# continue
# self . tpl _ files . append ( tpl _ file )
# self . in _ files . append ( mlt _ file )
os . chdir ( self . m . model_ws )
try :
apply_array_pars ( )
except Exception as e :
os . chdir ( ".." )
self . logger . lraise ( "error test running apply_array_pars():{0}" . format ( str ( e ) ) )
os . chdir ( ".." )
line = "pyemu.helpers.apply_array_pars()\n"
self . logger . statement ( "forward_run line:{0}" . format ( line ) )
self . frun_pre_lines . append ( line ) |
def _set_get_vnetwork_vms ( self , v , load = False ) :
"""Setter method for get _ vnetwork _ vms , mapped from YANG variable / brocade _ vswitch _ rpc / get _ vnetwork _ vms ( rpc )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ get _ vnetwork _ vms is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ get _ vnetwork _ vms ( ) directly .
YANG Description : Shows discovered VMs""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = get_vnetwork_vms . get_vnetwork_vms , is_leaf = True , yang_name = "get-vnetwork-vms" , rest_name = "get-vnetwork-vms" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = { u'tailf-common' : { u'hidden' : u'rpccmd' , u'actionpoint' : u'vm-name' } } , namespace = 'urn:brocade.com:mgmt:brocade-vswitch' , defining_module = 'brocade-vswitch' , yang_type = 'rpc' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """get_vnetwork_vms must be of a type compatible with rpc""" , 'defined-type' : "rpc" , 'generated-type' : """YANGDynClass(base=get_vnetwork_vms.get_vnetwork_vms, is_leaf=True, yang_name="get-vnetwork-vms", rest_name="get-vnetwork-vms", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'vm-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)""" , } )
self . __get_vnetwork_vms = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def _mul8 ( ins ) :
"""Multiplies 2 las values from the stack .
Optimizations :
* If any of the ops is ZERO ,
then do A = 0 = = > XOR A , cause A * 0 = 0 * A = 0
* If any ot the ops is ONE , do NOTHING
A * 1 = 1 * A = A""" | op1 , op2 = tuple ( ins . quad [ 2 : ] )
if _int_ops ( op1 , op2 ) is not None :
op1 , op2 = _int_ops ( op1 , op2 )
output = _8bit_oper ( op1 )
if op2 == 1 : # A * 1 = 1 * A = A
output . append ( 'push af' )
return output
if op2 == 0 :
output . append ( 'xor a' )
output . append ( 'push af' )
return output
if op2 == 2 : # A * 2 = = A SLA 1
output . append ( 'add a, a' )
output . append ( 'push af' )
return output
if op2 == 4 : # A * 4 = = A SLA 2
output . append ( 'add a, a' )
output . append ( 'add a, a' )
output . append ( 'push af' )
return output
output . append ( 'ld h, %i' % int8 ( op2 ) )
else :
if op2 [ 0 ] == '_' : # stack optimization
op1 , op2 = op2 , op1
output = _8bit_oper ( op1 , op2 )
output . append ( 'call __MUL8_FAST' )
# Inmmediate
output . append ( 'push af' )
REQUIRES . add ( 'mul8.asm' )
return output |
def store_data_blob ( self , hexdata , wifs , change_address = None , txouts = None , fee = 10000 , lock_time = 0 , dust_limit = common . DUST_LIMIT ) :
"""TODO add docstring""" | rawtx = self . create_tx ( txouts = txouts , lock_time = lock_time )
rawtx = self . add_data_blob ( rawtx , hexdata , dust_limit = dust_limit )
rawtx = self . add_inputs ( rawtx , wifs , change_address = change_address , fee = fee )
return self . publish ( rawtx ) |
def as_iterable ( iterable_or_scalar ) :
"""Utility for converting an object to an iterable .
Parameters
iterable _ or _ scalar : anything
Returns
l : iterable
If ` obj ` was None , return the empty tuple .
If ` obj ` was not iterable returns a 1 - tuple containing ` obj ` .
Otherwise return ` obj `
Notes
Although both string types and dictionaries are iterable in Python , we are treating them as not iterable in this
method . Thus , as _ iterable ( dict ( ) ) returns ( dict , ) and as _ iterable ( string ) returns ( string , )
Exammples
> > > as _ iterable ( 1)
> > > as _ iterable ( [ 1 , 2 , 3 ] )
[1 , 2 , 3]
> > > as _ iterable ( " my string " )
( " my string " , )
> > > as _ iterable ( { ' a ' : 1 } )
( { ' a ' : 1 } , )""" | if iterable_or_scalar is None :
return ( )
elif isinstance ( iterable_or_scalar , string_types ) :
return ( iterable_or_scalar , )
elif hasattr ( iterable_or_scalar , "__iter__" ) :
return iterable_or_scalar
else :
return ( iterable_or_scalar , ) |
def get_records_with_attachments ( attachment_table , rel_object_field = "REL_OBJECTID" ) :
"""returns a list of ObjectIDs for rows in the attachment table""" | if arcpyFound == False :
raise Exception ( "ArcPy is required to use this function" )
OIDs = [ ]
with arcpy . da . SearchCursor ( attachment_table , [ rel_object_field ] ) as rows :
for row in rows :
if not str ( row [ 0 ] ) in OIDs :
OIDs . append ( "%s" % str ( row [ 0 ] ) )
del row
del rows
return OIDs |
def encode_max_apdu_length_accepted ( arg ) :
"""Return the encoding of the highest encodable value less than the
value of the arg .""" | for i in range ( 5 , - 1 , - 1 ) :
if ( arg >= _max_apdu_length_encoding [ i ] ) :
return i
raise ValueError ( "invalid max APDU length accepted: %r" % ( arg , ) ) |
def get_money_format ( self , amount ) :
""": type amount : int or float or str
Usage :
> > > currency = Currency ( ' USD ' )
> > > currency . get _ money _ format ( 13)
> > > ' $ 13'
> > > currency . get _ money _ format ( 13.99)
> > > ' $ 13.99'
> > > currency . get _ money _ format ( ' 13,2313,33 ' )
> > > ' $ 13,2313,33'
: rtype : str""" | return self . money_formats [ self . get_money_currency ( ) ] [ 'money_format' ] . format ( amount = amount ) |
def _should_send_property ( self , key , value ) :
"""Check the property lock ( property _ lock )""" | to_json = self . trait_metadata ( key , 'to_json' , self . _trait_to_json )
if key in self . _property_lock : # model _ state , buffer _ paths , buffers
split_value = _remove_buffers ( { key : to_json ( value , self ) } )
split_lock = _remove_buffers ( { key : self . _property_lock [ key ] } )
# A roundtrip conversion through json in the comparison takes care of
# idiosyncracies of how python data structures map to json , for example
# tuples get converted to lists .
if ( jsonloads ( jsondumps ( split_value [ 0 ] ) ) == split_lock [ 0 ] and split_value [ 1 ] == split_lock [ 1 ] and _buffer_list_equal ( split_value [ 2 ] , split_lock [ 2 ] ) ) :
return False
if self . _holding_sync :
self . _states_to_send . add ( key )
return False
else :
return True |
def download_url ( url ) :
'''download a URL and return the content''' | import urllib2
try :
resp = urllib2 . urlopen ( url )
headers = resp . info ( )
except urllib2 . URLError as e :
print ( 'Error downloading %s' % url )
return None
return resp . read ( ) |
def mse ( exp , obs ) :
"""Mean Squared Error
: param exp : expected values
: type exp : list of float
: param obs : observed values
: type obs : list of float""" | assert len ( exp ) == len ( obs )
return numpy . mean ( ( numpy . array ( exp ) - numpy . array ( obs ) ) ** 2 ) |
def prettify ( name , blank = " " ) :
"""Prettify name of path
: param name : path Name : to edit
: param blank : default blanks in name
: return : Prettier name from given one : replace bad chars with good ones""" | if name . startswith ( "." ) : # remove starting
name = name [ 1 : ]
for bad_char in BAD_CHARS :
name = name . replace ( bad_char , blank )
# remove token
name = String ( name ) . remove_all ( blank )
for i in range ( 1 , len ( name ) - 2 ) :
try :
are_blanks = name [ i - 1 ] == blank and name [ i + 1 ] == blank
if are_blanks and name [ i ] in BAD_CHARS :
name = name [ : i - 1 ] + name [ i + 2 : ]
except : # out of bounds
pass
if name . startswith ( blank ) :
name = name [ 1 : ]
if name . endswith ( blank ) : # remove ending replacement
name = name [ : - 1 ]
return name |
def _next_page ( self ) :
"""Get the next page in the iterator .
Wraps the response from the : class : ` ~ google . gax . PageIterator ` in a
: class : ` Page ` instance and captures some state at each page .
Returns :
Optional [ Page ] : The next page in the iterator or : data : ` None ` if
there are no pages left .""" | try :
items = six . next ( self . _gax_page_iter )
page = Page ( self , items , self . item_to_value )
self . next_page_token = self . _gax_page_iter . page_token or None
return page
except StopIteration :
return None |
def disconnect_handler ( remote , * args , ** kwargs ) :
"""Handle unlinking of remote account .
This default handler will just delete the remote account link . You may
wish to extend this module to perform clean - up in the remote service
before removing the link ( e . g . removing install webhooks ) .
: param remote : The remote application .
: returns : Redirect response .""" | if not current_user . is_authenticated :
return current_app . login_manager . unauthorized ( )
with db . session . begin_nested ( ) :
account = RemoteAccount . get ( user_id = current_user . get_id ( ) , client_id = remote . consumer_key )
if account :
account . delete ( )
db . session . commit ( )
return redirect ( url_for ( 'invenio_oauthclient_settings.index' ) ) |
async def save ( self ) :
"""Save this interface .""" | if set ( self . tags ) != set ( self . _orig_data [ 'tags' ] ) :
self . _changed_data [ 'tags' ] = ',' . join ( self . tags )
elif 'tags' in self . _changed_data :
del self . _changed_data [ 'tags' ]
orig_params = self . _orig_data [ 'params' ]
if not isinstance ( orig_params , dict ) :
orig_params = { }
params = self . params
if not isinstance ( params , dict ) :
params = { }
self . _changed_data . pop ( 'params' , None )
self . _changed_data . update ( calculate_dict_diff ( orig_params , params ) )
if 'vlan' in self . _changed_data and self . _changed_data [ 'vlan' ] : # Update uses the ID of the VLAN , not the VLAN object .
self . _changed_data [ 'vlan' ] = self . _changed_data [ 'vlan' ] [ 'id' ]
if ( self . _orig_data [ 'vlan' ] and 'id' in self . _orig_data [ 'vlan' ] and self . _changed_data [ 'vlan' ] == ( self . _orig_data [ 'vlan' ] [ 'id' ] ) ) : # VLAN didn ' t really change , the object was just set to the
# same VLAN .
del self . _changed_data [ 'vlan' ]
await super ( Interface , self ) . save ( ) |
def generate_digest ( self ) :
"""RFC 2617.""" | from hashlib import md5
ha1 = self . username + ':' + self . realm + ':' + self . password
HA1 = md5 ( ha1 . encode ( 'UTF-8' ) ) . hexdigest ( )
ha2 = self . method + ':' + self . url
HA2 = md5 ( ha2 . encode ( 'UTF-8' ) ) . hexdigest ( )
encrypt_response = HA1 + ':' + self . nonce + ':' + HA2
response = md5 ( encrypt_response . encode ( 'UTF-8' ) ) . hexdigest ( )
digest_auth = 'Digest '
digest_auth += 'username=\"' + self . username + "\", "
digest_auth += 'realm=\"' + self . realm + "\", "
digest_auth += "algorithm=\"MD5\", "
digest_auth += 'nonce=\"' + self . nonce + "\", "
digest_auth += 'uri=\"' + self . url + "\", "
digest_auth += 'response=\"' + response + '\"'
return digest_auth |
def _gcd_array ( X ) :
"""Return the largest real value h such that all elements in x are integer
multiples of h .""" | greatest_common_divisor = 0.0
for x in X :
greatest_common_divisor = _gcd ( greatest_common_divisor , x )
return greatest_common_divisor |
def _get_vars_to_collections ( variables ) :
"""Returns a dict mapping variables to the collections they appear in .""" | var_to_collections = collections . defaultdict ( lambda : [ ] )
if isinstance ( variables , dict ) :
variables = list ( v for _ , v in variable_map_items ( variables ) )
for graph in set ( v . graph for v in variables ) :
for collection_name in list ( graph . collections ) :
entries = set ( entry for entry in graph . get_collection ( collection_name ) if isinstance ( entry , tf . Variable ) )
# For legacy reasons , tf . GraphKeys . GLOBAL _ VARIABLES = = " variables " .
# Correcting for this here , to avoid confusion .
if collection_name == tf . GraphKeys . GLOBAL_VARIABLES :
collection_name = "global_variables"
for var in entries . intersection ( variables ) :
var_to_collections [ var ] . append ( collection_name )
return var_to_collections |
def _get_count_pagination ( self , base , oldest_neighbor , newest_neighbor ) :
"""Compute the pagination for count - based views""" | count = self . spec [ 'count' ]
out_spec = { ** base , 'count' : count , 'order' : self . _order_by }
if self . _order_by == 'newest' :
older_view = View ( { ** out_spec , 'last' : oldest_neighbor } ) if oldest_neighbor else None
newer_count = View ( { ** base , 'first' : newest_neighbor , 'order' : 'oldest' , 'count' : count } ) if newest_neighbor else None
newer_view = View ( { ** out_spec , 'last' : newer_count . last } ) if newer_count else None
return older_view , newer_view
if self . _order_by == 'oldest' :
older_count = View ( { ** base , 'last' : oldest_neighbor , 'order' : 'newest' , 'count' : count } ) if oldest_neighbor else None
older_view = View ( { ** out_spec , 'first' : older_count . last } ) if older_count else None
newer_view = View ( { ** out_spec , 'first' : newest_neighbor } ) if newest_neighbor else None
return older_view , newer_view
return None , None |
def map_remove ( self , key , mapkey , ** kwargs ) :
"""Remove an item from a map .
: param str key : The document ID
: param str mapkey : The key in the map
: param kwargs : See : meth : ` mutate _ in ` for options
: raise : : exc : ` IndexError ` if the mapkey does not exist
: raise : : cb _ exc : ` NotFoundError ` if the document does not exist .
. . Remove a map key - value pair :
cb . map _ remove ( ' a _ map ' , ' some _ key ' )
. . seealso : : : meth : ` map _ add `""" | op = SD . remove ( mapkey )
sdres = self . mutate_in ( key , op , ** kwargs )
return self . _wrap_dsop ( sdres ) |
def encode ( self ) :
"""Encodes this SeqCmdAttrs to binary and returns a bytearray .""" | byte = self . default
for bit , name , value0 , value1 , default in SeqCmdAttrs . Table :
if name in self . attrs :
value = self . attrs [ name ]
byte = setBit ( byte , bit , value == value1 )
return struct . pack ( 'B' , byte ) |
def _list_records_internal ( self , identifier = None , rtype = None , name = None , content = None ) :
"""Lists all records by the specified criteria""" | response = self . _request_get_dns_zone ( )
if 'records' in response : # Interpret empty string as None because zeep does so too
content_check = content if content != "" else None
name_check = self . _relative_name ( name )
# Stringize the identifier to prevent any rtype differences
identifier_check = str ( identifier ) if identifier is not None else None
filtered_records = [ record for record in response [ 'records' ] if ( identifier is None or str ( record [ 'id' ] ) == identifier_check ) and ( rtype is None or record [ 'type' ] == rtype ) and ( name is None or record [ 'name' ] == name_check ) and ( content is None or ( 'content' in record and record [ 'content' ] == content_check ) ) ]
records = [ self . _create_response_record ( filtered_record ) for filtered_record in filtered_records ]
else :
records = [ ]
return records |
def sendrpc ( self , argv = [ ] ) :
self . _aArgv += argv
_operation = ''
try :
self . _cParams . parser ( self . _aArgv , self . _dOptions )
# set rpc operation handler
while self . _cParams . get ( 'operation' ) not in rpc . operations . keys ( ) :
self . _cParams . set ( 'operation' , raw_input ( "Enter RPC Operation:\n%s:" % rpc . operations . keys ( ) ) )
_operation = self . _cParams . get ( 'operation' )
self . _hRpcOper = rpc . operations [ _operation ] ( opts = self . _dOptions )
# input missing operation parameters
self . _hRpcOper . fill ( params = self . _cParams . get ( ) )
send_msg = self . _hRpcOper . readmsg ( self . _cParams . get ( ) )
self . _cParams . set ( 'messageid' , self . _cParams . get ( 'messageid' ) + 1 )
self . _hConn . sendmsg ( send_msg )
self . _cParams . set ( 'sendmsg' , send_msg )
recv_msg = self . _hConn . recvmsg ( )
self . _cParams . set ( 'recvmsg' , recv_msg )
self . _hRpcOper . parsemsg ( self . _cParams . get ( ) )
self . _hRpcOper . writemsg ( self . _cParams . get ( ) )
# reset operation params
self . _cParams . reset ( )
except :
if _operation != 'close-session' :
print 'BNClient: Call sendrpc%s fail' % ( ' <' + _operation + '>' if len ( _operation ) else '' )
sys . exit ( )
"""end of function exchgmsg""" | |
def run_canu ( self ) :
'''Runs canu instead of spades''' | cmd = self . _make_canu_command ( self . outdir , 'canu' )
ok , errs = common . syscall ( cmd , verbose = self . verbose , allow_fail = False )
if not ok :
raise Error ( 'Error running Canu.' )
original_contigs = os . path . join ( self . outdir , 'canu.contigs.fasta' )
renamed_contigs = os . path . join ( self . outdir , 'contigs.fasta' )
Assembler . _rename_canu_contigs ( original_contigs , renamed_contigs )
original_gfa = os . path . join ( self . outdir , 'canu.contigs.gfa' )
renamed_gfa = os . path . join ( self . outdir , 'contigs.gfa' )
os . rename ( original_gfa , renamed_gfa ) |
def _scalar_from_string ( self , value : str , ) -> Union [ Period , Timestamp , Timedelta , NaTType ] :
"""Construct a scalar type from a string .
Parameters
value : str
Returns
Period , Timestamp , or Timedelta , or NaT
Whatever the type of ` ` self . _ scalar _ type ` ` is .
Notes
This should call ` ` self . _ check _ compatible _ with ` ` before
unboxing the result .""" | raise AbstractMethodError ( self ) |
def get ( self , path , default_value ) :
"""Get value for config item into a string value ; leading slash is optional
and ignored .""" | return lib . zconfig_get ( self . _as_parameter_ , path , default_value ) |
def get_size ( self ) :
"""Get the size of the tree .
Returns :
tupel : ( width , height )""" | rec = self . get_rectangle ( )
return ( int ( rec [ 2 ] - rec [ 0 ] ) , int ( rec [ 3 ] - rec [ 1 ] ) ) |
def replace_option_set_by_id ( cls , option_set_id , option_set , ** kwargs ) :
"""Replace OptionSet
Replace all attributes of OptionSet
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . replace _ option _ set _ by _ id ( option _ set _ id , option _ set , async = True )
> > > result = thread . get ( )
: param async bool
: param str option _ set _ id : ID of optionSet to replace ( required )
: param OptionSet option _ set : Attributes of optionSet to replace ( required )
: return : OptionSet
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _replace_option_set_by_id_with_http_info ( option_set_id , option_set , ** kwargs )
else :
( data ) = cls . _replace_option_set_by_id_with_http_info ( option_set_id , option_set , ** kwargs )
return data |
def two_sat ( formula ) :
"""Solving a 2 - SAT boolean formula
: param formula : list of clauses , a clause is pair of literals
over X1 , . . . , Xn for some n .
a literal is an integer , for example - 1 = not X1 , 3 = X3
: returns : table with boolean assignment satisfying the formula or None
: complexity : linear""" | # - - n is the number of variables
n = max ( abs ( clause [ p ] ) for p in ( 0 , 1 ) for clause in formula )
graph = [ [ ] for node in range ( 2 * n ) ]
for x , y in formula : # x or y
graph [ _vertex ( - x ) ] . append ( _vertex ( y ) )
# - x = > y
graph [ _vertex ( - y ) ] . append ( _vertex ( x ) )
# - y = > x
sccp = tarjan ( graph )
comp_id = [ None ] * ( 2 * n )
# for each node the ID of its component
assignment = [ None ] * ( 2 * n )
for component in sccp :
rep = min ( component )
# representative of the component
for vtx in component :
comp_id [ vtx ] = rep
if assignment [ vtx ] is None :
assignment [ vtx ] = True
assignment [ vtx ^ 1 ] = False
# complementary literal
for i in range ( n ) :
if comp_id [ 2 * i ] == comp_id [ 2 * i + 1 ] :
return None
# insatisfiable formula
return assignment [ : : 2 ] |
def createRect ( self , x , y , width , height , rx = None , ry = None , strokewidth = 1 , stroke = 'black' , fill = 'none' ) :
"""Creates a Rectangle
@ type x : string or int
@ param x : starting x - coordinate
@ type y : string or int
@ param y : starting y - coordinate
@ type width : string or int
@ param width : width of the rectangle
@ type height : string or int
@ param height : height of the rectangle
@ type rx : string or int
@ param rx : For rounded rectangles , the x - axis radius of the ellipse used to round off the corners of the rectangle .
@ type ry : string or int
@ param ry : For rounded rectangles , the y - axis radius of the ellipse used to round off the corners of the rectangle .
@ type strokewidth : string or int
@ param strokewidth : width of the pen used to draw
@ type stroke : string ( either css constants like " black " or numerical values like " # FFFFF " )
@ param stroke : color with which to draw the outer limits
@ type fill : string ( either css constants like " black " or numerical values like " # FFFFF " )
@ param fill : color with which to fill the element ( default : no filling )
@ return : a rect object""" | style_dict = { 'fill' : fill , 'stroke-width' : strokewidth , 'stroke' : stroke }
myStyle = StyleBuilder ( style_dict )
r = Rect ( x , y , width , height , rx , ry )
r . set_style ( myStyle . getStyle ( ) )
return r |
def listify ( val , return_type = tuple ) :
"""Examples :
> > > listify ( ' abc ' , return _ type = list )
[ ' abc ' ]
> > > listify ( None )
> > > listify ( False )
( False , )
> > > listify ( ( ' a ' , ' b ' , ' c ' ) , return _ type = list )
[ ' a ' , ' b ' , ' c ' ]""" | # TODO : flatlistify ( ( 1 , 2 , 3 ) , 4 , ( 5 , 6 , 7 ) )
if val is None :
return return_type ( )
elif isiterable ( val ) :
return return_type ( val )
else :
return return_type ( ( val , ) ) |
def generate_data ( timeseries_length , timeseries_params ) :
"""Generates synthetic timeseries using input parameters .
Each generated timeseries has timeseries _ length data points .
Parameters for each timeseries are specified by timeseries _ params .
Args :
timeseries _ length : Number of data points to generate for each timeseries .
timeseries _ params : Parameters used to generate the timeseries . The following
parameters need to be specified for each timeseries :
m = Slope of the timeseries used to compute the timeseries trend .
b = y - intercept of the timeseries used to compute the timeseries trend .
A = Timeseries amplitude used to compute timeseries period .
freqcoeff = Frequency coefficient used to compute timeseries period .
rndA = Random amplitude used to inject noise into the timeseries .
fn = Base timeseries function ( np . cos or np . sin ) .
Example params for two timeseries .
[ { " m " : 0.006 , " b " : 300.0 , " A " : 50.0 , " freqcoeff " : 1500.0 , " rndA " : 15.0,
" fn " : np . sin } ,
{ " m " : 0.000 , " b " : 500.0 , " A " : 35.0 , " freqcoeff " : 3500.0 , " rndA " : 25.0,
" fn " : np . cos } ]
Returns :
Multi - timeseries ( list of list ) .""" | x = range ( timeseries_length )
multi_timeseries = [ ]
for p in timeseries_params : # Trend
y1 = [ p [ "m" ] * i + p [ "b" ] for i in x ]
# Period
y2 = [ p [ "A" ] * p [ "fn" ] ( i / p [ "freqcoeff" ] ) for i in x ]
# Noise
y3 = np . random . normal ( 0 , p [ "rndA" ] , timeseries_length ) . tolist ( )
# Sum of Trend , Period and Noise . Replace negative values with zero .
y = [ max ( a + b + c , 0 ) for a , b , c in zip ( y1 , y2 , y3 ) ]
multi_timeseries . append ( y )
return multi_timeseries |
def is_sortable_index ( self , index_name , catalog ) :
"""Returns whether the index is sortable""" | index = self . get_index ( index_name , catalog )
if not index :
return False
return index . meta_type in [ "FieldIndex" , "DateIndex" ] |
def run_sixteens ( self ) :
"""Run the 16S analyses using the filtered database""" | SixteensFull ( args = self , pipelinecommit = self . commit , startingtime = self . starttime , scriptpath = self . homepath , analysistype = 'sixteens_full' , cutoff = 0.985 ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.