signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def bed ( args ) :
"""% prog bed binfile fastafile
Write bed files where the bases have at least certain depth ."""
|
p = OptionParser ( bed . __doc__ )
p . add_option ( "-o" , dest = "output" , default = "stdout" , help = "Output file name [default: %default]" )
p . add_option ( "--cutoff" , dest = "cutoff" , default = 10 , type = "int" , help = "Minimum read depth to report intervals [default: %default]" )
opts , args = p . parse_args ( args )
if len ( args ) != 2 :
sys . exit ( not p . print_help ( ) )
binfile , fastafile = args
fw = must_open ( opts . output , "w" )
cutoff = opts . cutoff
assert cutoff >= 0 , "Need non-negative cutoff"
b = BinFile ( binfile )
ar = b . array
fastasize , sizes , offsets = get_offsets ( fastafile )
s = Sizes ( fastafile )
for ctg , ctglen in s . iter_sizes ( ) :
offset = offsets [ ctg ]
subarray = ar [ offset : offset + ctglen ]
key = lambda x : x [ 1 ] >= cutoff
for tf , array_elements in groupby ( enumerate ( subarray ) , key = key ) :
array_elements = list ( array_elements )
if not tf :
continue
# 0 - based system = > 1 - based system
start = array_elements [ 0 ] [ 0 ] + 1
end = array_elements [ - 1 ] [ 0 ] + 1
mean_depth = sum ( [ x [ 1 ] for x in array_elements ] ) / len ( array_elements )
mean_depth = int ( mean_depth )
name = "na"
print ( "\t" . join ( str ( x ) for x in ( ctg , start - 1 , end , name , mean_depth ) ) , file = fw )
|
def daylight ( self , date = None , local = True , use_elevation = True ) :
"""Calculates the daylight time ( the time between sunrise and sunset )
: param date : The date for which to calculate daylight .
If no date is specified then the current date will be used .
: type date : : class : ` ~ datetime . date `
: param local : True = Time to be returned in location ' s time zone ;
False = Time to be returned in UTC .
If not specified then the time will be returned in local time
: type local : bool
: param use _ elevation : True = Return times that allow for the location ' s elevation ;
False = Return times that don ' t use elevation .
If not specified then times will take elevation into account .
: type use _ elevation : bool
: returns : A tuple containing the start and end times
: rtype : tuple ( : class : ` ~ datetime . datetime ` , : class : ` ~ datetime . datetime ` )"""
|
if local and self . timezone is None :
raise ValueError ( "Local time requested but Location has no timezone set." )
if self . astral is None :
self . astral = Astral ( )
if date is None :
date = datetime . date . today ( )
elevation = self . elevation if use_elevation else 0
start , end = self . astral . daylight_utc ( date , self . latitude , self . longitude , observer_elevation = elevation )
if local :
return start . astimezone ( self . tz ) , end . astimezone ( self . tz )
else :
return start , end
|
def after_request ( response ) :
"""Modifies the response object prior to sending it to the client . Used to add CORS headers to the request
Args :
response ( response ) : Flask response object
Returns :
` None `"""
|
response . headers . add ( 'Access-Control-Allow-Origin' , '*' )
response . headers . add ( 'Access-Control-Allow-Headers' , 'Content-Type,Authorization' )
response . headers . add ( 'Access-Control-Allow-Methods' , 'GET,PUT,POST,DELETE' )
return response
|
def set_callback ( self , func ) :
"""Sets ' callback ' parameter . If supplied , the response will use the JSONP format with a callback of the given name
: param func : A string containing the name of the callback function
: raises : TwitterSearchException"""
|
if isinstance ( func , str if py3k else basestring ) and func :
self . arguments . update ( { 'callback' : '%s' % func } )
else :
raise TwitterSearchException ( 1006 )
|
def on_server_start ( self ) :
"""Service run loop function .
Run the desired docker container with parameters and start parsing the monitored file for alerts ."""
|
self . _container = self . _docker_client . containers . run ( self . docker_image_name , detach = True , ** self . docker_params )
self . signal_ready ( )
for log_line in self . get_lines ( ) :
try :
alert_dict = self . parse_line ( log_line )
if alert_dict :
self . add_alert_to_queue ( alert_dict )
except Exception :
self . logger . exception ( None )
|
def dms_maker ( self , force_rerun = False ) :
"""Create surface representation ( dms file ) of receptor
Args :
force _ rerun ( bool ) : If method should be rerun even if output file exists"""
|
log . debug ( '{}: running surface representation maker...' . format ( self . id ) )
if not self . receptorpdb_path :
return ValueError ( 'Please run protein_only_and_noH' )
dms = op . join ( self . dock_dir , '{}_receptor.dms' . format ( self . id ) )
if ssbio . utils . force_rerun ( flag = force_rerun , outfile = dms ) :
cmd = 'dms {} -n -w 1.4 -o {}' . format ( self . receptorpdb_path , dms )
os . system ( cmd )
self . dms_path = dms
if ssbio . utils . is_non_zero_file ( dms ) :
self . dms_path = dms
log . debug ( '{}: successful dms execution' . format ( self . dms_path ) )
else :
log . critical ( '{}: dms_maker failed to run on receptor file' . format ( self . receptorpdb_path ) )
|
def get_tags_users ( self , id_ ) :
"""Get a particular user which are tagged based on the id _"""
|
return _get_request ( _TAGS_USERS . format ( c_api = _C_API_BEGINNING , api = _API_VERSION , id_ = id_ , at = self . access_token ) )
|
def htmlDocDump ( self , f ) :
"""Dump an HTML document to an open FILE ."""
|
ret = libxml2mod . htmlDocDump ( f , self . _o )
return ret
|
def search ( self , query , fetch_messages = False , thread_limit = 5 , message_limit = 5 ) :
"""Searches for messages in all threads
: param query : Text to search for
: param fetch _ messages : Whether to fetch : class : ` models . Message ` objects or IDs only
: param thread _ limit : Max . number of threads to retrieve
: param message _ limit : Max . number of messages to retrieve
: type thread _ limit : int
: type message _ limit : int
: return : Dictionary with thread IDs as keys and generators to get messages as values
: rtype : generator
: raises : FBchatException if request failed"""
|
data = { "query" : query , "snippetLimit" : thread_limit }
j = self . _post ( self . req_url . SEARCH_MESSAGES , data , fix_request = True , as_json = True )
result = j [ "payload" ] [ "search_snippets" ] [ query ]
if fetch_messages :
search_method = self . searchForMessages
else :
search_method = self . searchForMessageIDs
return { thread_id : search_method ( query , limit = message_limit , thread_id = thread_id ) for thread_id in result }
|
def _indent_change ( change , out , options , indent ) :
"""recursive function to print indented change descriptions"""
|
show_unchanged = getattr ( options , "show_unchanged" , False )
show_ignored = getattr ( options , "show_ignored" , False )
show = False
desc = change . get_description ( )
if change . is_change ( ) :
if change . is_ignored ( options ) :
if show_ignored :
show = True
_indent ( out , indent , desc , " [IGNORED]" )
else :
show = True
_indent ( out , indent , desc )
elif show_unchanged :
show = True
_indent ( out , indent , desc )
if show :
indent += 1
for sub in change . collect ( ) :
_indent_change ( sub , out , options , indent )
|
def get_requires ( self , ignored = tuple ( ) ) :
"""The required API , including all external classes , fields , and
methods that this class references"""
|
if self . _requires is None :
self . _requires = set ( self . _get_requires ( ) )
requires = self . _requires
return [ req for req in requires if not fnmatches ( req , * ignored ) ]
|
def parse ( self , data_model , crit ) :
"""Take the relevant pieces of the data model json
and parse into data model and criteria map .
Parameters
data _ model : data model piece of json ( nested dicts )
crit : criteria map piece of json ( nested dicts )
Returns
data _ model : dictionary of DataFrames
crit _ map : DataFrame"""
|
# data model
tables = pd . DataFrame ( data_model )
data_model = { }
for table_name in tables . columns :
data_model [ table_name ] = pd . DataFrame ( tables [ table_name ] [ 'columns' ] ) . T
# replace np . nan with None
data_model [ table_name ] = data_model [ table_name ] . where ( ( pd . notnull ( data_model [ table_name ] ) ) , None )
# criteria map
zipped = list ( zip ( crit . keys ( ) , crit . values ( ) ) )
crit_map = pd . DataFrame ( zipped )
crit_map . index = crit_map [ 0 ]
crit_map . drop ( 0 , axis = 'columns' , inplace = True )
crit_map . rename ( { 1 : 'criteria_map' } , axis = 'columns' , inplace = True )
crit_map . index . rename ( "" , inplace = True )
for table_name in [ 'measurements' , 'specimens' , 'samples' , 'sites' , 'locations' , 'contribution' , 'criteria' , 'images' , 'ages' ] :
crit_map . loc [ table_name ] = np . nan
return data_model , crit_map
|
def _clear_entity_type_registry ( entity , ** kwargs ) :
"""Clear the given database / collection object ' s type registry ."""
|
codecopts = entity . codec_options . with_options ( type_registry = None )
return entity . with_options ( codec_options = codecopts , ** kwargs )
|
def pdf_to_img ( pdf_file , page_num , pdf_dim = None ) :
"""Converts pdf file into image
: param pdf _ file : path to the pdf file
: param page _ num : page number to convert ( index starting at 1)
: return : wand image object"""
|
if not pdf_dim :
pdf_dim = get_pdf_dim ( pdf_file )
page_width , page_height = pdf_dim
img = Image ( filename = f"{pdf_file}[{page_num - 1}]" )
img . resize ( page_width , page_height )
return img
|
def add_to_class ( self , model_class ) :
"""Replace the ` Field ` attribute with a named ` _ FieldDescriptor ` .
. . note : :
This method is called during construction of the ` Model ` ."""
|
model_class . _meta . add_field ( self )
setattr ( model_class , self . name , _FieldDescriptor ( self ) )
|
def make_confidence_report ( filepath , train_start = TRAIN_START , train_end = TRAIN_END , test_start = TEST_START , test_end = TEST_END , batch_size = BATCH_SIZE , which_set = WHICH_SET , mc_batch_size = MC_BATCH_SIZE , report_path = REPORT_PATH , base_eps_iter = BASE_EPS_ITER , nb_iter = NB_ITER , save_advx = SAVE_ADVX ) :
"""Load a saved model , gather its predictions , and save a confidence report .
This function works by running a single MaxConfidence attack on each example .
This provides a reasonable estimate of the true failure rate quickly , so
long as the model does not suffer from gradient masking .
However , this estimate is mostly intended for development work and not
for publication . A more accurate estimate may be obtained by running
make _ confidence _ report _ bundled . py instead .
: param filepath : path to model to evaluate
: param train _ start : index of first training set example to use
: param train _ end : index of last training set example to use
: param test _ start : index of first test set example to use
: param test _ end : index of last test set example to use
: param batch _ size : size of evaluation batches
: param which _ set : ' train ' or ' test '
: param mc _ batch _ size : batch size for MaxConfidence attack
: param base _ eps _ iter : step size if the data were in [ 0,1]
( Step size will be rescaled proportional to the actual data range )
: param nb _ iter : Number of iterations of PGD to run per class
: param save _ advx : bool . If True , saves the adversarial examples to disk .
On by default , but can be turned off to save memory , etc ."""
|
# Set TF random seed to improve reproducibility
tf . set_random_seed ( 1234 )
# Set logging level to see debug information
set_log_level ( logging . INFO )
# Create TF session
sess = tf . Session ( )
if report_path is None :
assert filepath . endswith ( '.joblib' )
report_path = filepath [ : - len ( '.joblib' ) ] + "_report.joblib"
with sess . as_default ( ) :
model = load ( filepath )
assert len ( model . get_params ( ) ) > 0
factory = model . dataset_factory
factory . kwargs [ 'train_start' ] = train_start
factory . kwargs [ 'train_end' ] = train_end
factory . kwargs [ 'test_start' ] = test_start
factory . kwargs [ 'test_end' ] = test_end
dataset = factory ( )
center = dataset . kwargs [ 'center' ]
max_val = dataset . kwargs [ 'max_val' ]
value_range = max_val * ( 1. + center )
min_value = 0. - center * max_val
if 'CIFAR' in str ( factory . cls ) :
base_eps = 8. / 255.
if base_eps_iter is None :
base_eps_iter = 2. / 255.
elif 'MNIST' in str ( factory . cls ) :
base_eps = .3
if base_eps_iter is None :
base_eps_iter = .1
else :
raise NotImplementedError ( str ( factory . cls ) )
mc_params = { 'eps' : base_eps * value_range , 'eps_iter' : base_eps_iter * value_range , 'nb_iter' : nb_iter , 'clip_min' : min_value , 'clip_max' : max_val }
x_data , y_data = dataset . get_set ( which_set )
report = ConfidenceReport ( )
semantic = Semantic ( model , center , max_val , sess )
mc = MaxConfidence ( model , sess = sess )
jobs = [ ( 'clean' , None , None , None , False ) , ( 'Semantic' , semantic , None , None , False ) , ( 'mc' , mc , mc_params , mc_batch_size , True ) ]
for job in jobs :
name , attack , attack_params , job_batch_size , save_this_job = job
if job_batch_size is None :
job_batch_size = batch_size
t1 = time . time ( )
if save_advx and save_this_job : # If we want to save the adversarial examples to the filesystem , we need
# to fetch all of them . Otherwise they ' re just computed one batch at a
# time and discarded
# The path to save to
assert report_path . endswith ( '.joblib' )
advx_path = report_path [ : - len ( '.joblib' ) ] + '_advx_' + name + '.npy'
# Fetch the adversarial examples
x_data = run_attack ( sess , model , x_data , y_data , attack , attack_params , batch_size = job_batch_size , devices = devices )
# Turn off the attack so ` correctness _ and _ confidence ` won ' t run it a
# second time .
attack = None
attack_params = None
# Save the adversarial examples
np . save ( advx_path , x_data )
# Run correctness and confidence evaluation on adversarial examples
packed = correctness_and_confidence ( sess , model , x_data , y_data , batch_size = job_batch_size , devices = devices , attack = attack , attack_params = attack_params )
t2 = time . time ( )
print ( "Evaluation took" , t2 - t1 , "seconds" )
correctness , confidence = packed
report [ name ] = ConfidenceReportEntry ( correctness = correctness , confidence = confidence )
print_stats ( correctness , confidence , name )
save ( report_path , report )
|
def add_features_to_nglview ( view , structure_resnums , chain_id ) :
"""Add select features from the selected SeqProp object to an NGLWidget view object .
Currently parsing for :
* Single residue features ( ie . metal binding sites )
* Disulfide bonds
Args :
view ( NGLWidget ) : NGLWidget view object
seqprop ( SeqProp ) : SeqProp object
structprop ( StructProp ) : StructProp object
chain _ id ( str ) : ID of the structure ' s chain to get annotation from"""
|
# Parse and store chain seq if not already stored
if not structprop . chains . has_id ( chain_id ) :
structprop . parse_structure ( )
if not structprop . chains . has_id ( chain_id ) :
raise ValueError ( 'Chain {} not present in structure {}' . format ( chain_id , structprop . id ) )
if not seqprop . features :
log . warning ( '{}: no stored features' . format ( seqprop . id ) )
# Loop through any stored features
for f in seqprop . features : # Display disulfide bonds
if f . type . lower ( ) == 'disulfide bond' : # TODO : double check if . start or . start + 1
disulfide = map_seqprop_resnums_to_structprop_resnums ( resnums = [ f . location . start + 1 , f . location . end ] , seqprop = seqprop , structprop = structprop , chain_id = chain_id , use_representatives = False )
to_view = [ str ( x ) + '.CA' for x in list ( disulfide . values ( ) ) ]
view . add_distance ( atom_pair = [ to_view ] , color = 'black' )
log . info ( 'Disulfide bridge at residues {} & {}' . format ( f . location . start + 1 , f . location . end ) )
# Display DNA - binding regions
if f . type . lower ( ) == 'dna-binding region' or f . type . lower ( ) == 'nucleotide phosphate-binding region' :
impres = self . map_seqprop_resnums_to_structprop_resnums ( resnums = [ f . location . start + 1 , f . location . end ] , seqprop = seqprop , structprop = structprop , chain_id = chain_id , use_representatives = use_representatives )
# TODO : need to check if f . location . start was mapped and if not , try incrementing . or input the list
# of resnums , not just the start and end
if f . location . start + 1 in impres and f . location . end in impres :
mapped_start = impres [ f . location . start + 1 ]
mapped_end = impres [ f . location . end ]
view . add_ball_and_stick ( selection = ':{} and ( {}-{} )' . format ( chain_id , mapped_start , mapped_end ) , color = 'black' )
log . info ( '{} at sequence region {}-{}, structure residues {}-{}' . format ( f . type , f . location . start , f . location . end , mapped_start , mapped_end ) )
# Display other single residues
if f . location . end - 1 == f . location . start :
if f . type . lower ( ) == 'sequence variant' or f . type . lower ( ) == 'mutagenesis site' :
continue
impres = self . map_seqprop_resnums_to_structprop_resnums ( resnums = f . location . end , seqprop = seqprop , structprop = structprop , chain_id = chain_id , use_representatives = use_representatives )
if f . location . end in impres :
impres_mapped = impres [ f . location . end ]
view . add_ball_and_stick ( selection = str ( impres_mapped ) , color = 'black' )
view . add_label ( selection = ':{} and {}' . format ( chain_id , impres_mapped ) , label_type = 'res' , color = 'black' )
log . info ( '{} at sequence residue {}, structure residue {}' . format ( f . type , f . location . end , impres_mapped ) )
|
def _set_precedence ( self , v , load = False ) :
"""Setter method for precedence , mapped from YANG variable / routing _ system / route _ map / content / precedence ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ precedence is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ precedence ( ) directly .
YANG Description : Set values ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "precedence_value" , precedence . precedence , yang_name = "precedence" , rest_name = "precedence" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'precedence-value' , extensions = { u'tailf-common' : { u'info' : u'Set values.' , u'cli-no-key-completion' : None , u'cli-suppress-mode' : None , u'cli-suppress-list-no' : None , u'cli-compact-syntax' : None , u'cli-sequence-commands' : None , u'cli-suppress-key-abbreviation' : None , u'cli-incomplete-command' : None , u'callpoint' : u'tvfnexthop-cp' } } ) , is_container = 'list' , yang_name = "precedence" , rest_name = "precedence" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Set values.' , u'cli-no-key-completion' : None , u'cli-suppress-mode' : None , u'cli-suppress-list-no' : None , u'cli-compact-syntax' : None , u'cli-sequence-commands' : None , u'cli-suppress-key-abbreviation' : None , u'cli-incomplete-command' : None , u'callpoint' : u'tvfnexthop-cp' } } , namespace = 'urn:brocade.com:mgmt:brocade-ip-policy' , defining_module = 'brocade-ip-policy' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """precedence must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("precedence_value",precedence.precedence, yang_name="precedence", rest_name="precedence", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='precedence-value', extensions={u'tailf-common': {u'info': u'Set values.', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'tvfnexthop-cp'}}), is_container='list', yang_name="precedence", rest_name="precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set values.', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'tvfnexthop-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)""" , } )
self . __precedence = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def blend ( colour1 , colour2 ) :
r"""Takes two : py : class : ` Colour ` s and returns the ' average ' Colour .
Args :
colour1 ( colourettu . Colour ) : a colour
colour2 ( colourettu . Colour ) : a second colour
. . note : :
Uses the formula :
\ \ [ r _ { blended } = \ \ sqrt \ \ frac { r _ 1 ^ 2 + r _ 2 ^ 2 } { 2 } \ \ ]
It is hown here for the red channel , but applied independantly to each
of the red , green , and blue channels . The reason for doing it this way
( rather than using a simple average ) is that the brightness of the
colours is stored in a logrythmic scale , rather than a linear one .
For a fuller explaination , Minute Physics has released a great
` YouTube video < https : / / youtu . be / LKnqECcg6Gw > ` _ .
. . seealso : : : py : func : ` Palette . blend `"""
|
# raw docstring is needed so that MathJax will render in generated
# documentation
# gamma is the power we ' re going to use to do this conversion
gamma = 2.0
# start by normalizing values
r_1 = colour1 . red ( ) / 255.
g_1 = colour1 . green ( ) / 255.
b_1 = colour1 . blue ( ) / 255.
r_2 = colour2 . red ( ) / 255.
g_2 = colour2 . green ( ) / 255.
b_2 = colour2 . blue ( ) / 255.
r_m = math . pow ( ( ( math . pow ( r_1 , gamma ) + math . pow ( r_2 , gamma ) ) / 2 ) , 1 / gamma )
g_m = math . pow ( ( ( math . pow ( g_1 , gamma ) + math . pow ( g_2 , gamma ) ) / 2 ) , 1 / gamma )
b_m = math . pow ( ( ( math . pow ( b_1 , gamma ) + math . pow ( b_2 , gamma ) ) / 2 ) , 1 / gamma )
c_m = Colour ( [ r_m , g_m , b_m ] , normalized_rgb = True )
return c_m
|
def run_fatcat ( structure_path_1 , structure_path_2 , fatcat_sh , outdir = '' , silent = False , print_cmd = False , force_rerun = False ) :
"""Run FATCAT on two PDB files , and return the path of the XML result file .
Args :
structure _ path _ 1 ( str ) : Path to PDB file
structure _ path _ 2 ( str ) : Path to PDB file
fatcat _ sh ( str ) : Path to " runFATCAT . sh " executable script
outdir ( str ) : Path to where FATCAT XML output files will be saved
silent ( bool ) : If stdout should be silenced from showing up in Python console output
print _ cmd ( bool ) : If command to run FATCAT should be printed to stdout
force _ rerun ( bool ) : If FATCAT should be run even if XML output files already exist
Returns :
str : Path to XML output file"""
|
filename1 = op . splitext ( op . basename ( structure_path_1 ) ) [ 0 ]
filename2 = op . splitext ( op . basename ( structure_path_2 ) ) [ 0 ]
if not op . exists ( outdir ) :
os . mkdir ( outdir )
outfile = op . join ( outdir , filename1 + '__' + filename2 + '.xml' )
# Run FATCAT on the structures , print the XML of the result to stdout
fatcat_cmd = '{} -file1 {} -file2 {} -outFile {}' . format ( fatcat_sh , structure_path_1 , structure_path_2 , outfile )
if print_cmd :
print ( fatcat_cmd )
ssbio . utils . command_runner ( fatcat_cmd , force_rerun_flag = force_rerun , outfile_checker = outfile , silent = silent )
return outfile
|
def db_parse ( block_id , txid , vtxindex , op , data , senders , inputs , outputs , fee , db_state = None , ** virtualchain_hints ) :
"""( required by virtualchain state engine )
Parse a blockstack operation from a transaction . The transaction fields are as follows :
* ` block _ id ` is the blockchain height at which this transaction occurs
* ` txid ` is the transaction ID
* ` data ` is the scratch area of the transaction that contains the actual virtualchain operation ( e . g . " id [ opcode ] [ payload ] " )
* ` senders ` is a list in 1 - to - 1 correspondence with ` inputs ` that contains information about what funded the inputs
* ` inputs ` are the list of inputs to the transaction . Some blockchains ( like Bitcoin ) support multiple inputs , whereas others ( like Ethereum ) support only 1.
* ` outputs ` are the list of outputs of the transaction . Some blockchains ( like Bitcoin ) support multiple outputs , whereas others ( like Ethereum ) support only 1.
* ` fee ` is the transaction fee .
` db _ state ` is the StateEngine - derived class . This is a BlockstackDB instance .
` * * virtualchain _ hints ` is a dict with extra virtualchain hints that may be relevant . We require :
* ` raw _ tx ` : the hex - encoded string containing the raw transaction .
Returns a dict with the parsed operation on success .
Return None on error"""
|
# basic sanity checks
if len ( senders ) == 0 :
raise Exception ( "No senders given" )
if not check_tx_sender_types ( senders , block_id ) :
log . warning ( 'Invalid senders for {}' . format ( txid ) )
return None
# this virtualchain instance must give the ' raw _ tx ' hint
assert 'raw_tx' in virtualchain_hints , 'BUG: incompatible virtualchain: requires raw_tx support'
# internal sanity check
raw_tx = virtualchain_hints [ 'raw_tx' ]
btc_tx_data = virtualchain . btc_tx_deserialize ( raw_tx )
test_btc_tx = virtualchain . btc_tx_serialize ( { 'ins' : inputs , 'outs' : outputs , 'locktime' : btc_tx_data [ 'locktime' ] , 'version' : btc_tx_data [ 'version' ] } )
assert raw_tx == test_btc_tx , 'TX mismatch: {} != {}' . format ( raw_tx , test_btc_tx )
# make sure each op has all the right fields defined
try :
opcode = op_get_opcode_name ( op )
assert opcode is not None , "Unrecognized opcode '%s'" % op
except Exception , e :
log . exception ( e )
log . error ( "Skipping unrecognized opcode" )
return None
log . debug ( "PARSE %s at (%s, %s): %s" % ( opcode , block_id , vtxindex , data . encode ( 'hex' ) ) )
# get the data
op_data = None
try :
op_data = op_extract ( opcode , data , senders , inputs , outputs , block_id , vtxindex , txid )
except Exception , e :
log . exception ( e )
op_data = None
if op_data is not None :
try :
assert 'op' in op_data , 'BUG: missing op'
except Exception as e :
log . exception ( e )
log . error ( "BUG: missing op" )
os . abort ( )
original_op_data = copy . deepcopy ( op_data )
# propagate tx data
op_data [ 'vtxindex' ] = int ( vtxindex )
op_data [ 'txid' ] = str ( txid )
op_data [ '__original_op_data__' ] = original_op_data
else :
log . error ( "Unparseable op '%s'" % opcode )
return op_data
|
def df_weighted_average_grouped ( dataframe , groupe , varlist ) :
'''Agrège les résultats de weighted _ average _ grouped ( ) en une unique dataframe pour la liste de variable ' varlist ' .'''
|
return DataFrame ( dict ( [ ( var , collapse ( dataframe , groupe , var ) ) for var in varlist ] ) )
|
def build_params ( self , params = { } ) :
"""build a params dictionary with current editId and packageName .
use optional params parameter
to merge additional params into resulting dictionary ."""
|
z = params . copy ( )
z . update ( { 'editId' : self . edit_id , 'packageName' : self . package_name } )
return z
|
def mimeData ( self , items ) :
"""Returns the mime data for dragging for this instance .
: param items | [ < QtGui . QTreeWidgetItem > , . . ]"""
|
func = self . dataCollector ( )
if func :
return func ( self , items )
data = super ( XTreeWidget , self ) . mimeData ( items )
# return defined custom data
if len ( items ) == 1 :
try :
dragdata = items [ 0 ] . dragData ( )
except AttributeError :
return data
if not data :
data = QtCore . QMimeData ( )
urls = [ ]
for format , value in dragdata . items ( ) :
if format == 'url' :
urls . append ( QtCore . QUrl ( value ) )
else :
data . setData ( format , QtCore . QByteArray ( value ) )
data . setUrls ( urls )
return data
return data
|
def configure_db ( self , hostname , database , username , admin = False ) :
"""Configure access to database for username from hostname ."""
|
self . connect ( password = self . get_mysql_root_password ( ) )
if not self . database_exists ( database ) :
self . create_database ( database )
remote_ip = self . normalize_address ( hostname )
password = self . get_mysql_password ( username )
if not self . grant_exists ( database , username , remote_ip ) :
if not admin :
self . create_grant ( database , username , remote_ip , password )
else :
self . create_admin_grant ( username , remote_ip , password )
self . flush_priviledges ( )
return password
|
def commit_buy ( self , buy_id , ** params ) :
"""https : / / developers . coinbase . com / api / v2 # commit - a - buy"""
|
return self . api_client . commit_buy ( self . id , buy_id , ** params )
|
def add_signature_headers ( mail , sigs , error_msg ) :
'''Add pseudo headers to the mail indicating whether the signature
verification was successful .
: param mail : : class : ` email . message . Message ` the message to entitle
: param sigs : list of : class : ` gpg . results . Signature `
: param error _ msg : An error message if there is one , or None
: type error _ msg : : class : ` str ` or ` None `'''
|
sig_from = ''
sig_known = True
uid_trusted = False
assert error_msg is None or isinstance ( error_msg , str )
if not sigs :
error_msg = error_msg or u'no signature found'
elif not error_msg :
try :
key = crypto . get_key ( sigs [ 0 ] . fpr )
for uid in key . uids :
if crypto . check_uid_validity ( key , uid . email ) :
sig_from = uid . uid
uid_trusted = True
break
else : # No trusted uid found , since we did not break from the loop .
sig_from = key . uids [ 0 ] . uid
except GPGProblem :
sig_from = sigs [ 0 ] . fpr
sig_known = False
if error_msg :
msg = 'Invalid: {}' . format ( error_msg )
elif uid_trusted :
msg = 'Valid: {}' . format ( sig_from )
else :
msg = 'Untrusted: {}' . format ( sig_from )
mail . add_header ( X_SIGNATURE_VALID_HEADER , 'False' if ( error_msg or not sig_known ) else 'True' )
mail . add_header ( X_SIGNATURE_MESSAGE_HEADER , msg )
|
def restrict_args ( func , * args , ** kwargs ) :
'''Restricts the possible arguements to a method to match the func argument .
restrict _ args ( lambda a : a , 1 , 2)'''
|
callargs = getargspec ( func )
if not callargs . varargs :
args = args [ 0 : len ( callargs . args ) ]
return func ( * args , ** kwargs )
|
def _dedent ( text , tabsize = 8 , skip_first_line = False ) :
"""_ dedent ( text , tabsize = 8 , skip _ first _ line = False ) - > dedented text
" text " is the text to dedent .
" tabsize " is the tab width to use for indent width calculations .
" skip _ first _ line " is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting .
This is sometimes useful for docstrings and similar .
textwrap . dedent ( s ) , but don ' t expand tabs to spaces"""
|
lines = text . splitlines ( 1 )
_dedentlines ( lines , tabsize = tabsize , skip_first_line = skip_first_line )
return '' . join ( lines )
|
def _generate_struct_class_properties ( self , ns , data_type ) :
"""Each field of the struct has a corresponding setter and getter .
The setter validates the value being set ."""
|
for field in data_type . fields :
field_name = fmt_func ( field . name )
field_name_reserved_check = fmt_func ( field . name , check_reserved = True )
if is_nullable_type ( field . data_type ) :
field_dt = field . data_type . data_type
dt_nullable = True
else :
field_dt = field . data_type
dt_nullable = False
# generate getter for field
self . emit ( '@property' )
self . emit ( 'def {}(self):' . format ( field_name_reserved_check ) )
with self . indent ( ) :
self . emit ( '"""' )
if field . doc :
self . emit_wrapped_text ( self . process_doc ( field . doc , self . _docf ) )
# Sphinx wants an extra line between the text and the
# rtype declaration .
self . emit ( )
self . emit ( ':rtype: {}' . format ( self . _python_type_mapping ( ns , field_dt ) ) )
self . emit ( '"""' )
self . emit ( 'if self._{}_present:' . format ( field_name ) )
with self . indent ( ) :
self . emit ( 'return self._{}_value' . format ( field_name ) )
self . emit ( 'else:' )
with self . indent ( ) :
if dt_nullable :
self . emit ( 'return None' )
elif field . has_default :
self . emit ( 'return {}' . format ( self . _generate_python_value ( ns , field . default ) ) )
else :
self . emit ( "raise AttributeError(\"missing required field '%s'\")" % field_name )
self . emit ( )
# generate setter for field
self . emit ( '@{}.setter' . format ( field_name_reserved_check ) )
self . emit ( 'def {}(self, val):' . format ( field_name_reserved_check ) )
with self . indent ( ) :
if dt_nullable :
self . emit ( 'if val is None:' )
with self . indent ( ) :
self . emit ( 'del self.{}' . format ( field_name_reserved_check ) )
self . emit ( 'return' )
if is_user_defined_type ( field_dt ) :
self . emit ( 'self._%s_validator.validate_type_only(val)' % field_name )
else :
self . emit ( 'val = self._{}_validator.validate(val)' . format ( field_name ) )
self . emit ( 'self._{}_value = val' . format ( field_name ) )
self . emit ( 'self._{}_present = True' . format ( field_name ) )
self . emit ( )
# generate deleter for field
self . emit ( '@{}.deleter' . format ( field_name_reserved_check ) )
self . emit ( 'def {}(self):' . format ( field_name_reserved_check ) )
with self . indent ( ) :
self . emit ( 'self._{}_value = None' . format ( field_name ) )
self . emit ( 'self._{}_present = False' . format ( field_name ) )
self . emit ( )
|
def clear_data ( self , request ) :
"""Clear all OAuth related data from the session store ."""
|
for key in request . session . keys ( ) :
if key . startswith ( constants . SESSION_KEY ) :
del request . session [ key ]
|
def parse_bangrc ( ) :
"""Parses ` ` $ HOME / . bangrc ` ` for global settings and deployer credentials . The
` ` . bangrc ` ` file is expected to be a YAML file whose outermost structure is
a key - value map .
Note that even though ` ` . bangrc ` ` is just a YAML file in which a user could
store any top - level keys , it is not expected to be used as a holder of
default values for stack - specific configuration attributes - if present ,
they will be ignored .
Returns { } if ` ` $ HOME / . bangrc ` ` does not exist .
: rtype : : class : ` dict `"""
|
raw = read_raw_bangrc ( )
return dict ( ( k , raw [ k ] ) for k in raw if k in RC_KEYS )
|
def rogers_huff_r ( gn ) :
"""Estimate the linkage disequilibrium parameter * r * for each pair of
variants using the method of Rogers and Huff ( 2008 ) .
Parameters
gn : array _ like , int8 , shape ( n _ variants , n _ samples )
Diploid genotypes at biallelic variants , coded as the number of
alternate alleles per call ( i . e . , 0 = hom ref , 1 = het , 2 = hom alt ) .
Returns
r : ndarray , float , shape ( n _ variants * ( n _ variants - 1 ) / / 2 , )
Matrix in condensed form .
Examples
> > > import allel
> > > g = allel . GenotypeArray ( [ [ [ 0 , 0 ] , [ 1 , 1 ] , [ 0 , 0 ] ] ,
. . . [ [ 0 , 0 ] , [ 1 , 1 ] , [ 0 , 0 ] ] ,
. . . [ [ 1 , 1 ] , [ 0 , 0 ] , [ 1 , 1 ] ] ,
. . . [ [ 0 , 0 ] , [ 0 , 1 ] , [ - 1 , - 1 ] ] ] , dtype = ' i1 ' )
> > > gn = g . to _ n _ alt ( fill = - 1)
> > > gn
array ( [ [ 0 , 2 , 0 ] ,
[ 0 , 2 , 0 ] ,
[ 2 , 0 , 2 ] ,
[ 0 , 1 , - 1 ] ] , dtype = int8)
> > > r = allel . rogers _ huff _ r ( gn )
> > > r # doctest : + ELLIPSIS
array ( [ 1 . , - 1.000001 , 1 . , - 1.000001 , 1 . ,
-1 . ] , dtype = float32)
> > > r * * 2 # doctest : + ELLIPSIS
array ( [ 1 . , 1.000002 , 1 . , 1.000002 , 1 . , 1 . ] ,
dtype = float32)
> > > from scipy . spatial . distance import squareform
> > > squareform ( r * * 2)
array ( [ [ 0 . , 1 . , 1.000002 , 1 . ] ,
[1 . , 0 . , 1.000002 , 1 . ] ,
[1.000002 , 1.000002 , 0 . , 1 . ] ,
[1 . , 1 . , 1 . , 0 . ] ] , dtype = float32)"""
|
# check inputs
gn = asarray_ndim ( gn , 2 , dtype = 'i1' )
gn = memoryview_safe ( gn )
# compute correlation coefficients
r = gn_pairwise_corrcoef_int8 ( gn )
# convenience for singletons
if r . size == 1 :
r = r [ 0 ]
return r
|
def in_venv ( ) :
""": return : True if in running from a virtualenv
Has to detect the case where the python binary is run
directly , so VIRTUAL _ ENV may not be set"""
|
global _in_venv
if _in_venv is not None :
return _in_venv
if not ( os . path . isfile ( ORIG_PREFIX_TXT ) or os . path . isfile ( PY_VENV_CFG ) ) :
logger . debug ( "in_venv no orig_prefix_txt [%s]" , ORIG_PREFIX_TXT )
logger . debug ( "in_venv no py_venv_cfg [%s]" , PY_VENV_CFG )
# TODO - check this is actually valid !
_in_venv = False
return _in_venv
if 'VIRTUAL_ENV' in os . environ :
logger . debug ( "in_venv VIRTUAL_ENV set." )
_in_venv = True
else : # Find first python in path . . . if its not this one ,
# . . . we are in a different python
python = basename ( sys . executable )
for p in os . environ [ 'PATH' ] . split ( os . pathsep ) :
py_path = join ( p , python )
if isfile ( py_path ) :
logger . debug ( "in_venv py_at [%s] return: %s" , ( py_path , sys . executable != py_path ) )
_in_venv = sys . executable != py_path
break
return _in_venv
|
def del_resource ( self , service_name , resource_name , base_class = None ) :
"""Deletes a resource class for a given service .
Fails silently if no connection is found in the cache .
: param service _ name : The service a given ` ` Resource ` ` talks to . Ex .
` ` sqs ` ` , ` ` sns ` ` , ` ` dynamodb ` ` , etc .
: type service _ name : string
: param base _ class : ( Optional ) The base class of the object . Prevents
" magically " loading the wrong class ( one with a different base ) .
Default is ` ` default ` ` .
: type base _ class : class"""
|
# Unlike ` ` get _ resource ` ` , this should be fire & forget .
# We don ' t really care , as long as it ' s not in the cache any longer .
try :
classpath = self . build_classpath ( base_class )
opts = self . services [ service_name ] [ 'resources' ] [ resource_name ]
del opts [ classpath ]
except KeyError :
pass
|
def get_roles ( self ) :
"""Return the m2m relations connecting me to creators .
There ' s some publishing - related complexity here . The role relations
( self . creators . through ) connect to draft objects , which then need to
be modified to point to visible ( ) objects ."""
|
creator_ids = self . get_creators ( ) . values_list ( 'id' , flat = True )
return self . creators . through . objects . filter ( work = self . get_draft ( ) , creator_id__in = creator_ids , ) . select_related ( 'role' )
|
def contractDetails ( self , contract_identifier ) :
"""returns string from contract tuple"""
|
if isinstance ( contract_identifier , Contract ) :
tickerId = self . tickerId ( contract_identifier )
else :
if str ( contract_identifier ) . isdigit ( ) :
tickerId = contract_identifier
else :
tickerId = self . tickerId ( contract_identifier )
if tickerId in self . contract_details :
return self . contract_details [ tickerId ]
elif tickerId in self . _contract_details :
return self . _contract_details [ tickerId ]
# default values
return { 'm_category' : None , 'm_contractMonth' : '' , 'downloaded' : False , 'm_evMultiplier' : 0 , 'm_evRule' : None , 'm_industry' : None , 'm_liquidHours' : '' , 'm_longName' : '' , 'm_marketName' : '' , 'm_minTick' : 0.01 , 'm_orderTypes' : '' , 'm_priceMagnifier' : 0 , 'm_subcategory' : None , 'm_timeZoneId' : '' , 'm_tradingHours' : '' , 'm_underConId' : 0 , 'm_validExchanges' : 'SMART' , 'contracts' : [ Contract ( ) ] , 'm_summary' : { 'm_conId' : 0 , 'm_currency' : 'USD' , 'm_exchange' : 'SMART' , 'm_expiry' : '' , 'm_includeExpired' : False , 'm_localSymbol' : '' , 'm_multiplier' : '' , 'm_primaryExch' : None , 'm_right' : None , 'm_secType' : '' , 'm_strike' : 0.0 , 'm_symbol' : '' , 'm_tradingClass' : '' , } }
|
def get_division ( self , row ) :
"""Gets the Division object for the given row of election results ."""
|
# back out of Alaska county
if ( row [ "level" ] == geography . DivisionLevel . COUNTY and row [ "statename" ] == "Alaska" ) :
print ( "Do not take the Alaska county level result" )
return None
kwargs = { "level__name" : row [ "level" ] }
if row [ "reportingunitname" ] :
name = row [ "reportingunitname" ]
else :
name = row [ "statename" ]
if row [ "level" ] in [ geography . DivisionLevel . COUNTY , geography . DivisionLevel . TOWNSHIP , ] :
kwargs [ "code" ] = row [ "fipscode" ]
else :
kwargs [ "name" ] = name
try :
return geography . Division . objects . get ( ** kwargs )
except ObjectDoesNotExist :
return None
|
def str_fraction ( self ) :
"""Returns the fraction with additional whitespace ."""
|
if self . undefined :
return None
denominator = locale . format ( '%d' , self . denominator , grouping = True )
numerator = self . str_numerator . rjust ( len ( denominator ) )
return '{0}/{1}' . format ( numerator , denominator )
|
def insertionpairs ( args ) :
"""% prog insertionpairs endpoints . bed
Pair up the candidate endpoints . A candidate exision point would contain
both left - end ( LE ) and right - end ( RE ) within a given distance .
( RE ) ( LE )"""
|
p = OptionParser ( insertionpairs . __doc__ )
p . add_option ( "--extend" , default = 10 , type = "int" , help = "Allow insertion sites to match up within distance" )
p . set_outfile ( )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
bedfile , = args
mergedbedfile = mergeBed ( bedfile , d = opts . extend , nms = True )
bed = Bed ( mergedbedfile )
fw = must_open ( opts . outfile , "w" )
support = lambda x : - x . reads
for b in bed :
names = b . accn . split ( "," )
ends = [ EndPoint ( x ) for x in names ]
REs = sorted ( [ x for x in ends if x . leftright == "RE" ] , key = support )
LEs = sorted ( [ x for x in ends if x . leftright == "LE" ] , key = support )
if not ( REs and LEs ) :
continue
mRE , mLE = REs [ 0 ] , LEs [ 0 ]
pRE , pLE = mRE . position , mLE . position
if pLE < pRE :
b . start , b . end = pLE - 1 , pRE
else :
b . start , b . end = pRE - 1 , pLE
b . accn = "{0}|{1}" . format ( mRE . label , mLE . label )
b . score = pLE - pRE - 1
print ( b , file = fw )
|
def initialize_worker ( self , process_num = None ) :
"""inits producer for a simulation run on a single process"""
|
self . initial_state . process = process_num
self . random . seed ( hash ( self . seed ) + hash ( process_num ) )
|
def expand_factor_conditions ( s , env ) :
"""If env matches the expanded factor then return value else return ' ' .
Example
> > > s = ' py { 33,34 } : docformatter '
> > > expand _ factor _ conditions ( s , Env ( name = " py34 " , . . . ) )
" docformatter "
> > > expand _ factor _ conditions ( s , Env ( name = " py26 " , . . . ) )"""
|
try :
factor , value = re . split ( r'\s*\:\s*' , s )
except ValueError :
return s
if matches_factor_conditions ( factor , env ) :
return value
else :
return ''
|
def add_constraints ( self , * args , ** kwargs ) :
"""Add some constraints to the state .
You may pass in any number of symbolic booleans as variadic positional arguments ."""
|
if len ( args ) > 0 and isinstance ( args [ 0 ] , ( list , tuple ) ) :
raise Exception ( "Tuple or list passed to add_constraints!" )
if o . TRACK_CONSTRAINTS in self . options and len ( args ) > 0 :
if o . SIMPLIFY_CONSTRAINTS in self . options :
constraints = [ self . simplify ( a ) for a in args ]
else :
constraints = args
self . _inspect ( 'constraints' , BP_BEFORE , added_constraints = constraints )
constraints = self . _inspect_getattr ( "added_constraints" , constraints )
added = self . solver . add ( * constraints )
self . _inspect ( 'constraints' , BP_AFTER )
# add actions for the added constraints
if o . TRACK_CONSTRAINT_ACTIONS in self . options :
for c in added :
sac = SimActionConstraint ( self , c )
self . history . add_action ( sac )
else : # preserve the old action logic for when we don ' t track constraints ( why ? )
if ( 'action' in kwargs and kwargs [ 'action' ] and o . TRACK_CONSTRAINT_ACTIONS in self . options and len ( args ) > 0 ) :
for arg in args :
if self . solver . symbolic ( arg ) :
sac = SimActionConstraint ( self , arg )
self . history . add_action ( sac )
if o . ABSTRACT_SOLVER in self . options and len ( args ) > 0 :
for arg in args :
if self . solver . is_false ( arg ) :
self . _satisfiable = False
return
if self . solver . is_true ( arg ) :
continue
# ` is _ true ` and ` is _ false ` does not use VSABackend currently ( see commits 97a75366 and 2dfba73e in
# claripy ) . There is a chance that VSA backend can in fact handle it .
# Therefore we try to resolve it with VSABackend again
if claripy . backends . vsa . is_false ( arg ) :
self . _satisfiable = False
return
if claripy . backends . vsa . is_true ( arg ) :
continue
# It ' s neither True or False . Let ' s try to apply the condition
# We take the argument , extract a list of constrained SIs out of it ( if we could , of course ) , and
# then replace each original SI the intersection of original SI and the constrained one .
_ , converted = self . solver . constraint_to_si ( arg )
for original_expr , constrained_si in converted :
if not original_expr . variables :
l . error ( 'Incorrect original_expression to replace in add_constraints(). ' 'This is due to defects in VSA logics inside claripy. Please report ' 'to Fish and he will fix it if he\'s free.' )
continue
new_expr = constrained_si
self . registers . replace_all ( original_expr , new_expr )
for _ , region in self . memory . regions . items ( ) :
region . memory . replace_all ( original_expr , new_expr )
l . debug ( "SimState.add_constraints: Applied to final state." )
elif o . SYMBOLIC not in self . options and len ( args ) > 0 :
for arg in args :
if self . solver . is_false ( arg ) :
self . _satisfiable = False
return
|
def check_earthquake_contour_preprocessor ( impact_function ) :
"""Checker for the contour preprocessor .
: param impact _ function : Impact function to check .
: type impact _ function : ImpactFunction
: return : If the preprocessor can run .
: rtype : bool"""
|
hazard_key = impact_function . hazard . keywords . get ( 'hazard' )
is_earthquake = hazard_key == hazard_earthquake [ 'key' ]
if is_earthquake and is_raster_layer ( impact_function . hazard ) :
return True
else :
return False
|
def _number_zero_start_handler ( c , ctx ) :
"""Handles numeric values that start with zero or negative zero . Branches to delegate co - routines according to
_ ZERO _ START _ TABLE ."""
|
assert c == _ZERO
assert len ( ctx . value ) == 0 or ( len ( ctx . value ) == 1 and ctx . value [ 0 ] == _MINUS )
ctx . set_ion_type ( IonType . INT )
ctx . value . append ( c )
c , _ = yield
if _ends_value ( c ) :
trans = ctx . event_transition ( IonThunkEvent , IonEventType . SCALAR , ctx . ion_type , _parse_decimal_int ( ctx . value ) )
if c == _SLASH :
trans = ctx . immediate_transition ( _number_slash_end_handler ( c , ctx , trans ) )
yield trans
yield ctx . immediate_transition ( _ZERO_START_TABLE [ c ] ( c , ctx ) )
|
def get_valid_actions ( name_mapping : Dict [ str , str ] , type_signatures : Dict [ str , Type ] , basic_types : Set [ Type ] , multi_match_mapping : Dict [ Type , List [ Type ] ] = None , valid_starting_types : Set [ Type ] = None , num_nested_lambdas : int = 0 ) -> Dict [ str , List [ str ] ] :
"""Generates all the valid actions starting from each non - terminal . For terminals of a specific
type , we simply add a production from the type to the terminal . For all terminal ` functions ` ,
we additionally add a rule that allows their return type to be generated from an application of
the function . For example , the function ` ` < e , < r , < d , r > > > ` ` , which takes three arguments and
returns an ` ` r ` ` would generate a the production rule ` ` r - > [ < e , < r , < d , r > > > , e , r , d ] ` ` .
For functions that do not contain ANY _ TYPE or placeholder types , this is straight - forward .
When there are ANY _ TYPES or placeholders , we substitute the ANY _ TYPE with all possible basic
types , and then produce a similar rule . For example , the identity function , with type
` ` < # 1 , # 1 > ` ` and basic types ` ` e ` ` and ` ` r ` ` , would produce the rules ` ` e - > [ < # 1 , # 1 > , e ] ` ` and
` ` r - > [ < # 1 , # 1 > , r ] ` ` .
We additionally add a valid action from the start symbol to all ` ` valid _ starting _ types ` ` .
Parameters
name _ mapping : ` ` Dict [ str , str ] ` `
The mapping of names that appear in your logical form languages to their aliases for NLTK .
If you are getting all valid actions for a type declaration , this can be the
` ` COMMON _ NAME _ MAPPING ` ` .
type _ signatures : ` ` Dict [ str , Type ] ` `
The mapping from name aliases to their types . If you are getting all valid actions for a
type declaration , this can be the ` ` COMMON _ TYPE _ SIGNATURE ` ` .
basic _ types : ` ` Set [ Type ] ` `
Set of all basic types in the type declaration .
multi _ match _ mapping : ` ` Dict [ Type , List [ Type ] ] ` ` ( optional )
A mapping from ` MultiMatchNamedBasicTypes ` to the types they can match . This may be
different from the type ' s ` ` types _ to _ match ` ` field based on the context . While building action
sequences that lead to complex types with ` ` MultiMatchNamedBasicTypes ` ` , if a type does not
occur in this mapping , the default set of ` ` types _ to _ match ` ` for that type will be used .
valid _ starting _ types : ` ` Set [ Type ] ` ` , optional
These are the valid starting types for your grammar ; e . g . , what types are we allowed to
parse expressions into ? We will add a " START - > TYPE " rule for each of these types . If
this is ` ` None ` ` , we default to using ` ` basic _ types ` ` .
num _ nested _ lambdas : ` ` int ` ` ( optional )
Does the language used permit lambda expressions ? And if so , how many nested lambdas do we
need to worry about ? We ' ll add rules like " < r , d > - > [ ' lambda x ' , d ] " for all complex
types , where the variable is determined by the number of nestings . We currently only
permit up to three levels of nesting , just for ease of implementation ."""
|
valid_actions : Dict [ str , Set [ str ] ] = defaultdict ( set )
valid_starting_types = valid_starting_types or basic_types
for type_ in valid_starting_types :
valid_actions [ str ( START_TYPE ) ] . add ( _make_production_string ( START_TYPE , type_ ) )
complex_types = set ( )
for name , alias in name_mapping . items ( ) : # Lambda functions and variables associated with them get produced in specific contexts . So
# we do not add them to ` ` valid _ actions ` ` here , and let ` ` GrammarState ` ` deal with it .
# ` ` var ` ` is a special function that some languages ( like LambdaDCS ) use within lambda
# functions to indicate the use of a variable ( eg . : ` ` ( lambda x ( fb : row . row . year ( var x ) ) ) ` ` )
# We do not have to produce this function outside the scope of lambda . Even within lambdas ,
# it is a lot easier to not do it , and let the action sequence to logical form transformation
# logic add it to the output logical forms instead .
if name in [ "lambda" , "var" , "x" , "y" , "z" ] :
continue
name_type = type_signatures [ alias ]
# Type to terminal productions .
for substituted_type in substitute_any_type ( name_type , basic_types ) :
valid_actions [ str ( substituted_type ) ] . add ( _make_production_string ( substituted_type , name ) )
# Keeping track of complex types .
if isinstance ( name_type , ComplexType ) and name_type != ANY_TYPE :
complex_types . add ( name_type )
for complex_type in complex_types :
for substituted_type in substitute_any_type ( complex_type , basic_types ) :
for head , production in _get_complex_type_production ( substituted_type , multi_match_mapping or { } ) :
valid_actions [ str ( head ) ] . add ( production )
# We can produce complex types with a lambda expression , though we ' ll leave out
# placeholder types for now .
for i in range ( num_nested_lambdas ) :
lambda_var = chr ( ord ( 'x' ) + i )
# We ' ll only allow lambdas to be functions that take and return basic types as their
# arguments , for now . Also , we ' re doing this for all possible complex types where
# the first and second types are basic types . So we may be overgenerating a bit .
for first_type in basic_types :
for second_type in basic_types :
key = ComplexType ( first_type , second_type )
production_string = _make_production_string ( key , [ 'lambda ' + lambda_var , second_type ] )
valid_actions [ str ( key ) ] . add ( production_string )
valid_action_strings = { key : sorted ( value ) for key , value in valid_actions . items ( ) }
return valid_action_strings
|
def saveh5 ( self , h5file , qpi_slice = None , series_slice = None , time_interval = None , count = None , max_count = None ) :
"""Save the data set as an hdf5 file ( qpimage . QPSeries format )
Parameters
h5file : str , pathlib . Path , or h5py . Group
Where to store the series data
qpi _ slice : tuple of ( slice , slice )
If not None , only store a slice of each QPImage
in ` h5file ` . A value of None is equivalent to
` ` ( slice ( 0 , - 1 ) , slice ( 0 , - 1 ) ) ` ` .
series _ slice : slice
If None , save the entire series , otherwise only save
the images specified by this slice .
time _ interval : tuple of ( float , float )
If not None , only stores QPImages that were recorded
within the given time interval .
count , max _ count : multiprocessing . Value
Can be used to monitor the progress of the algorithm .
Initially , the value of ` max _ count . value ` is incremented
by the total number of steps . At each step , the value
of ` count . value ` is incremented .
Notes
The series " identifier " meta data is only set when all
of ` qpi _ slice ` , ` series _ slice ` , and ` time _ interval `
are None ."""
|
# set up slice to export
if series_slice is None :
sl = range ( len ( self ) )
else :
sl = range ( series_slice . start , series_slice . stop )
# set up time interval
if time_interval is None :
ta = - np . inf
tb = np . inf
else :
ta , tb = time_interval
# set max _ count according to slice
if max_count is not None :
max_count . value += len ( sl )
qpskw = { "h5file" : h5file , "h5mode" : "w" , }
if ( qpi_slice is None and series_slice is None and time_interval is None ) : # Only add series identifier if series complete .
# ( We assume that if any of the above kwargs is set ,
# the series data is somehow modified )
qpskw [ "identifier" ] = self . identifier
with qpimage . QPSeries ( ** qpskw ) as qps :
increment = 0
for ii in sl :
ti = self . get_time ( ii )
if ti < ta or ti > tb : # Not part of the series
pass
else :
increment += 1
if increment == 1 or len ( self . _bgdata ) != 1 : # initial image or series data where each image
# has a unique background image
qpi = self . get_qpimage ( ii )
if qpi_slice is not None :
qpi = qpi [ qpi_slice ]
qps . add_qpimage ( qpi )
else : # hard - link the background data
qpiraw = self . get_qpimage_raw ( ii )
if qpi_slice is not None :
qpiraw = qpiraw [ qpi_slice ]
qps . add_qpimage ( qpiraw , bg_from_idx = 0 )
if count is not None :
count . value += 1
|
def segment_midpoints_by_vertices ( self , vertices ) :
"""Add midpoints to any segment connected to the vertices in the
list / array provided ."""
|
segments = set ( )
for vertex in vertices :
neighbours = self . identify_vertex_neighbours ( vertex )
segments . update ( min ( tuple ( ( vertex , n1 ) ) , tuple ( ( n1 , vertex ) ) ) for n1 in neighbours )
segs = np . array ( list ( segments ) )
new_midpoint_lonlats = self . segment_midpoints ( segments = segs )
return new_midpoint_lonlats
|
def sequences_from_fasta ( path ) :
"""Extract multiple sequences from a FASTA file ."""
|
from Bio import SeqIO
return { x . description : x . seq for x in SeqIO . parse ( path , 'fasta' ) }
|
def get_template ( self , template_id ) :
"""Get the template for a given template id .
: param template _ id : id of the template , str
: return :"""
|
template = self . contract_concise . getTemplate ( template_id )
if template and len ( template ) == 4 :
return AgreementTemplate ( * template )
return None
|
def vb_get_network_addresses ( machine_name = None , machine = None , wait_for_pattern = None ) :
'''TODO distinguish between private and public addresses
A valid machine _ name or a machine is needed to make this work !
Guest prerequisite : GuestAddition
Thanks to Shrikant Havale for the StackOverflow answer http : / / stackoverflow . com / a / 29335390
More information on guest properties : https : / / www . virtualbox . org / manual / ch04 . html # guestadd - guestprops
@ param machine _ name :
@ type machine _ name : str
@ param machine :
@ type machine : IMachine
@ return : All the IPv4 addresses we could get
@ rtype : str [ ]'''
|
if machine_name :
machine = vb_get_box ( ) . findMachine ( machine_name )
ip_addresses = [ ]
log . debug ( "checking for power on:" )
if machine . state == _virtualboxManager . constants . MachineState_Running :
log . debug ( "got power on:" )
# wait on an arbitrary named property
# for instance use a dhcp client script to set a property via VBoxControl guestproperty set dhcp _ done 1
if wait_for_pattern and not machine . getGuestPropertyValue ( wait_for_pattern ) :
log . debug ( "waiting for pattern:%s:" , wait_for_pattern )
return None
_total_slots = machine . getGuestPropertyValue ( '/VirtualBox/GuestInfo/Net/Count' )
# upon dhcp the net count drops to 0 and it takes some seconds for it to be set again
if not _total_slots :
log . debug ( "waiting for net count:%s:" , wait_for_pattern )
return None
try :
total_slots = int ( _total_slots )
for i in range ( total_slots ) :
try :
address = machine . getGuestPropertyValue ( '/VirtualBox/GuestInfo/Net/{0}/V4/IP' . format ( i ) )
if address :
ip_addresses . append ( address )
except Exception as e :
log . debug ( e . message )
except ValueError as e :
log . debug ( e . message )
return None
log . debug ( "returning ip_addresses:%s:" , ip_addresses )
return ip_addresses
|
def parse ( self , arguments = None ) :
"""Parse the shared [ i ] did arguments"""
|
arguments = self . arguments or arguments
# FIXME : prep / normalize arguments in _ _ init _ _
# Split arguments if given as string and run the parser
if isinstance ( arguments , basestring ) :
arguments = utils . split ( arguments )
# run the wrapped argparser command to gather user set arg values
# FROM : https : / / docs . python . org / 3 / library / argparse . html
# Sometimes a script may only parse a few of the command - line
# arguments , passing the remaining arguments on to another script or
# program . In these cases , the parse _ known _ args ( ) method can be
# useful . It works much like parse _ args ( ) except that it does not
# produce an error when extra arguments are present . Instead , it
# returns a two item tuple containing the populated namespace and
# the list of remaining argument strings .
opts , _arguments = self . parser . parse_known_args ( arguments )
if not opts . config_file :
opts . config_file = os . path . expanduser ( DEFAULT_IDID_CONFIG )
opts . config = Configuration ( ) . from_file ( opts . config_file )
# alias shortcut
self . config = opts . config
# if we ' re passing arguments in as a string we might get \ n ' s or null
# strings ' ' that we want to be sure to ignore
_arguments = filter ( lambda x : x if x else None , [ _ . strip ( ) for _ in _arguments if _ ] )
# Now let the subclass parse the remaining special opts that
# weren ' t consumed by default argparser
opts = self . _parse ( opts , _arguments )
return opts
|
def resolve ( self , key ) :
"""Looks up a variable like ` _ _ getitem _ _ ` or ` get ` but returns an
: class : ` Undefined ` object with the name of the name looked up ."""
|
if key in self . vars :
return self . vars [ key ]
if key in self . parent :
return self . parent [ key ]
return self . environment . undefined ( name = key )
|
def uri_to_iri ( uri , charset = 'utf-8' , errors = 'replace' ) :
r"""Converts a URI in a given charset to a IRI .
Examples for URI versus IRI :
> > > uri _ to _ iri ( b ' http : / / xn - - n3h . net / ' )
u ' http : / / \ u2603 . net / '
> > > uri _ to _ iri ( b ' http : / / % C3 % BCser : p % C3 % A4ssword @ xn - - n3h . net / p % C3 % A5th ' )
u ' http : / / \ xfcser : p \ xe4ssword @ \ u2603 . net / p \ xe5th '
Query strings are left unchanged :
> > > uri _ to _ iri ( ' / ? foo = 24 & x = % 26%2f ' )
u ' / ? foo = 24 & x = % 26%2f '
. . versionadded : : 0.6
: param uri : The URI to convert .
: param charset : The charset of the URI .
: param errors : The error handling on decode ."""
|
if isinstance ( uri , tuple ) :
uri = url_unparse ( uri )
uri = url_parse ( to_unicode ( uri , charset ) )
path = url_unquote ( uri . path , charset , errors , '/;?' )
query = url_unquote ( uri . query , charset , errors , ';/?:@&=+,$' )
fragment = url_unquote ( uri . fragment , charset , errors , ';/?:@&=+,$' )
return url_unparse ( ( uri . scheme , uri . decode_netloc ( ) , path , query , fragment ) )
|
def set_xlim_cb ( self , redraw = True ) :
"""Set plot limit based on user values ."""
|
try :
xmin = float ( self . w . x_lo . get_text ( ) )
except Exception :
set_min = True
else :
set_min = False
try :
xmax = float ( self . w . x_hi . get_text ( ) )
except Exception :
set_max = True
else :
set_max = False
if set_min or set_max :
self . tab_plot . draw ( )
self . set_xlimits_widgets ( set_min = set_min , set_max = set_max )
if not ( set_min and set_max ) :
self . tab_plot . ax . set_xlim ( xmin , xmax )
if redraw :
self . tab_plot . draw ( )
|
def show_low_sls ( mods , test = None , queue = False , ** kwargs ) :
'''Display the low data from a specific sls . The default environment is
` ` base ` ` , use ` ` saltenv ` ` to specify a different environment .
saltenv
Specify a salt fileserver environment to be used when applying states
pillar
Custom Pillar values , passed as a dictionary of key - value pairs
. . code - block : : bash
salt ' * ' state . show _ low _ sls stuff pillar = ' { " foo " : " bar " } '
. . note : :
Values passed this way will override Pillar values set via
` ` pillar _ roots ` ` or an external Pillar source .
pillarenv
Specify a Pillar environment to be used when applying states . This
can also be set in the minion config file using the
: conf _ minion : ` pillarenv ` option . When neither the
: conf _ minion : ` pillarenv ` minion config option nor this CLI argument is
used , all Pillar environments will be merged together .
CLI Example :
. . code - block : : bash
salt ' * ' state . show _ low _ sls foo
salt ' * ' state . show _ low _ sls foo saltenv = dev'''
|
if 'env' in kwargs : # " env " is not supported ; Use " saltenv " .
kwargs . pop ( 'env' )
conflict = _check_queue ( queue , kwargs )
if conflict is not None :
return conflict
orig_test = __opts__ . get ( 'test' , None )
opts = salt . utils . state . get_sls_opts ( __opts__ , ** kwargs )
opts [ 'test' ] = _get_test_value ( test , ** kwargs )
# Since this is dealing with a specific SLS file ( or files ) , fall back to
# the ' base ' saltenv if none is configured and none was passed .
if opts [ 'saltenv' ] is None :
opts [ 'saltenv' ] = 'base'
pillar_override = kwargs . get ( 'pillar' )
pillar_enc = kwargs . get ( 'pillar_enc' )
if pillar_enc is None and pillar_override is not None and not isinstance ( pillar_override , dict ) :
raise SaltInvocationError ( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' )
try :
st_ = salt . state . HighState ( opts , pillar_override , proxy = __proxy__ , initial_pillar = _get_initial_pillar ( opts ) )
except NameError :
st_ = salt . state . HighState ( opts , pillar_override , initial_pillar = _get_initial_pillar ( opts ) )
errors = _get_pillar_errors ( kwargs , pillar = st_ . opts [ 'pillar' ] )
if errors :
__context__ [ 'retcode' ] = salt . defaults . exitcodes . EX_PILLAR_FAILURE
raise CommandExecutionError ( 'Pillar failed to render' , info = errors )
mods = salt . utils . args . split_input ( mods )
st_ . push_active ( )
try :
high_ , errors = st_ . render_highstate ( { opts [ 'saltenv' ] : mods } )
finally :
st_ . pop_active ( )
errors += st_ . state . verify_high ( high_ )
if errors :
__context__ [ 'retcode' ] = salt . defaults . exitcodes . EX_STATE_COMPILER_ERROR
return errors
ret = st_ . state . compile_high_data ( high_ )
# Work around Windows multiprocessing bug , set _ _ opts _ _ [ ' test ' ] back to
# value from before this function was run .
__opts__ [ 'test' ] = orig_test
return ret
|
def create_authz_decision_query ( self , destination , action , evidence = None , resource = None , subject = None , message_id = 0 , consent = None , extensions = None , sign = None , sign_alg = None , digest_alg = None , ** kwargs ) :
"""Creates an authz decision query .
: param destination : The IdP endpoint
: param action : The action you want to perform ( has to be at least one )
: param evidence : Why you should be able to perform the action
: param resource : The resource you want to perform the action on
: param subject : Who wants to do the thing
: param message _ id : Message identifier
: param consent : If the principal gave her consent to this request
: param extensions : Possible request extensions
: param sign : Whether the request should be signed or not .
: return : AuthzDecisionQuery instance"""
|
return self . _message ( AuthzDecisionQuery , destination , message_id , consent , extensions , sign , action = action , evidence = evidence , resource = resource , subject = subject , sign_alg = sign_alg , digest_alg = digest_alg , ** kwargs )
|
def _fetch_partition_info ( self , topic_id , partition_id ) :
"""Fetch partition info for given topic - partition ."""
|
info_path = "/brokers/topics/{topic_id}/partitions/{p_id}"
try :
_ , partition_info = self . get ( info_path . format ( topic_id = topic_id , p_id = partition_id ) , )
return partition_info
except NoNodeError :
return { }
|
def data ( self , column , role ) :
"""Return the data for the specified column and role
The column addresses one attribute of the data .
: param column : the data column
: type column : int
: param role : the data role
: type role : QtCore . Qt . ItemDataRole
: returns : data depending on the role
: rtype :
: raises : None"""
|
return self . columns [ column ] ( self . _atype , role )
|
def potential_purviews ( self , direction , mechanism , purviews = False ) :
"""Return all purviews that could belong to the | MIC | / | MIE | .
Filters out trivially - reducible purviews .
Args :
direction ( str ) : Either | CAUSE | or | EFFECT | .
mechanism ( tuple [ int ] ) : The mechanism of interest .
Keyword Args :
purviews ( tuple [ int ] ) : Optional subset of purviews of interest ."""
|
system = self . system [ direction ]
return [ purview for purview in system . potential_purviews ( direction , mechanism , purviews ) if set ( purview ) . issubset ( self . purview_indices ( direction ) ) ]
|
def execute ( self ) :
"""Execute a system command ."""
|
if self . _decode_output : # Capture and decode system output
with Popen ( self . command , shell = True , stdout = PIPE ) as process :
self . _output = [ i . decode ( "utf-8" ) . strip ( ) for i in process . stdout ]
self . _success = True
else : # Execute without capturing output
os . system ( self . command )
self . _success = True
return self
|
def gen_thin ( cachedir , extra_mods = '' , overwrite = False , so_mods = '' , python2_bin = 'python2' , python3_bin = 'python3' , absonly = True , compress = 'gzip' , extended_cfg = None ) :
'''Generate the salt - thin tarball and print the location of the tarball
Optional additional mods to include ( e . g . mako ) can be supplied as a comma
delimited string . Permits forcing an overwrite of the output file as well .
CLI Example :
. . code - block : : bash
salt - run thin . generate
salt - run thin . generate mako
salt - run thin . generate mako , wempy 1
salt - run thin . generate overwrite = 1'''
|
if sys . version_info < ( 2 , 6 ) :
raise salt . exceptions . SaltSystemExit ( 'The minimum required python version to run salt-ssh is "2.6".' )
if compress not in [ 'gzip' , 'zip' ] :
log . warning ( 'Unknown compression type: "%s". Falling back to "gzip" compression.' , compress )
compress = 'gzip'
thindir = os . path . join ( cachedir , 'thin' )
if not os . path . isdir ( thindir ) :
os . makedirs ( thindir )
thintar = os . path . join ( thindir , 'thin.' + ( compress == 'gzip' and 'tgz' or 'zip' ) )
thinver = os . path . join ( thindir , 'version' )
pythinver = os . path . join ( thindir , '.thin-gen-py-version' )
salt_call = os . path . join ( thindir , 'salt-call' )
pymap_cfg = os . path . join ( thindir , 'supported-versions' )
code_checksum = os . path . join ( thindir , 'code-checksum' )
digest_collector = salt . utils . hashutils . DigestCollector ( )
with salt . utils . files . fopen ( salt_call , 'wb' ) as fp_ :
fp_ . write ( _get_salt_call ( 'pyall' , ** _get_ext_namespaces ( extended_cfg ) ) )
if os . path . isfile ( thintar ) :
if not overwrite :
if os . path . isfile ( thinver ) :
with salt . utils . files . fopen ( thinver ) as fh_ :
overwrite = fh_ . read ( ) != salt . version . __version__
if overwrite is False and os . path . isfile ( pythinver ) :
with salt . utils . files . fopen ( pythinver ) as fh_ :
overwrite = fh_ . read ( ) != str ( sys . version_info [ 0 ] )
# future lint : disable = blacklisted - function
else :
overwrite = True
if overwrite :
try :
log . debug ( 'Removing %s archive file' , thintar )
os . remove ( thintar )
except OSError as exc :
log . error ( 'Error while removing %s file: %s' , thintar , exc )
if os . path . exists ( thintar ) :
raise salt . exceptions . SaltSystemExit ( 'Unable to remove {0}. See logs for details.' . format ( thintar ) )
else :
return thintar
if _six . PY3 : # Let ' s check for the minimum python 2 version requirement , 2.6
py_shell_cmd = "{} -c 'import sys;sys.stdout.write(\"%s.%s\\n\" % sys.version_info[:2]);'" . format ( python2_bin )
cmd = subprocess . Popen ( py_shell_cmd , stdout = subprocess . PIPE , shell = True )
stdout , _ = cmd . communicate ( )
if cmd . returncode == 0 :
py2_version = tuple ( int ( n ) for n in stdout . decode ( 'utf-8' ) . strip ( ) . split ( '.' ) )
if py2_version < ( 2 , 6 ) :
raise salt . exceptions . SaltSystemExit ( 'The minimum required python version to run salt-ssh is "2.6".' 'The version reported by "{0}" is "{1}". Please try "salt-ssh ' '--python2-bin=<path-to-python-2.6-binary-or-higher>".' . format ( python2_bin , stdout . strip ( ) ) )
else :
log . error ( 'Unable to detect Python-2 version' )
log . debug ( stdout )
tops_failure_msg = 'Failed %s tops for Python binary %s.'
tops_py_version_mapping = { }
tops = get_tops ( extra_mods = extra_mods , so_mods = so_mods )
tops_py_version_mapping [ sys . version_info . major ] = tops
# Collect tops , alternative to 2 . x version
if _six . PY2 and sys . version_info . major == 2 : # Get python 3 tops
py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'" . format ( python3_bin , salt . utils . json . dumps ( { 'extra_mods' : extra_mods , 'so_mods' : so_mods } ) )
cmd = subprocess . Popen ( py_shell_cmd , stdout = subprocess . PIPE , stderr = subprocess . PIPE , shell = True )
stdout , stderr = cmd . communicate ( )
if cmd . returncode == 0 :
try :
tops = salt . utils . json . loads ( stdout )
tops_py_version_mapping [ '3' ] = tops
except ValueError as err :
log . error ( tops_failure_msg , 'parsing' , python3_bin )
log . exception ( err )
else :
log . error ( tops_failure_msg , 'collecting' , python3_bin )
log . debug ( stderr )
# Collect tops , alternative to 3 . x version
if _six . PY3 and sys . version_info . major == 3 : # Get python 2 tops
py_shell_cmd = "{0} -c 'import salt.utils.thin as t;print(t.gte())' '{1}'" . format ( python2_bin , salt . utils . json . dumps ( { 'extra_mods' : extra_mods , 'so_mods' : so_mods } ) )
cmd = subprocess . Popen ( py_shell_cmd , stdout = subprocess . PIPE , stderr = subprocess . PIPE , shell = True )
stdout , stderr = cmd . communicate ( )
if cmd . returncode == 0 :
try :
tops = salt . utils . json . loads ( stdout . decode ( 'utf-8' ) )
tops_py_version_mapping [ '2' ] = tops
except ValueError as err :
log . error ( tops_failure_msg , 'parsing' , python2_bin )
log . exception ( err )
else :
log . error ( tops_failure_msg , 'collecting' , python2_bin )
log . debug ( stderr )
with salt . utils . files . fopen ( pymap_cfg , 'wb' ) as fp_ :
fp_ . write ( _get_supported_py_config ( tops = tops_py_version_mapping , extended_cfg = extended_cfg ) )
tmp_thintar = _get_thintar_prefix ( thintar )
if compress == 'gzip' :
tfp = tarfile . open ( tmp_thintar , 'w:gz' , dereference = True )
elif compress == 'zip' :
tfp = zipfile . ZipFile ( tmp_thintar , 'w' , compression = zlib and zipfile . ZIP_DEFLATED or zipfile . ZIP_STORED )
tfp . add = tfp . write
try : # cwd may not exist if it was removed but salt was run from it
start_dir = os . getcwd ( )
except OSError :
start_dir = None
tempdir = None
# Pack default data
log . debug ( 'Packing default libraries based on current Salt version' )
for py_ver , tops in _six . iteritems ( tops_py_version_mapping ) :
for top in tops :
if absonly and not os . path . isabs ( top ) :
continue
base = os . path . basename ( top )
top_dirname = os . path . dirname ( top )
if os . path . isdir ( top_dirname ) :
os . chdir ( top_dirname )
else : # This is likely a compressed python . egg
tempdir = tempfile . mkdtemp ( )
egg = zipfile . ZipFile ( top_dirname )
egg . extractall ( tempdir )
top = os . path . join ( tempdir , base )
os . chdir ( tempdir )
site_pkg_dir = _is_shareable ( base ) and 'pyall' or 'py{}' . format ( py_ver )
log . debug ( 'Packing "%s" to "%s" destination' , base , site_pkg_dir )
if not os . path . isdir ( top ) : # top is a single file module
if os . path . exists ( os . path . join ( top_dirname , base ) ) :
tfp . add ( base , arcname = os . path . join ( site_pkg_dir , base ) )
continue
for root , dirs , files in salt . utils . path . os_walk ( base , followlinks = True ) :
for name in files :
if not name . endswith ( ( '.pyc' , '.pyo' ) ) :
digest_collector . add ( os . path . join ( root , name ) )
arcname = os . path . join ( site_pkg_dir , root , name )
if hasattr ( tfp , 'getinfo' ) :
try : # This is a little slow but there ' s no clear way to detect duplicates
tfp . getinfo ( os . path . join ( site_pkg_dir , root , name ) )
arcname = None
except KeyError :
log . debug ( 'ZIP: Unable to add "%s" with "getinfo"' , arcname )
if arcname :
tfp . add ( os . path . join ( root , name ) , arcname = arcname )
if tempdir is not None :
shutil . rmtree ( tempdir )
tempdir = None
# Pack alternative data
if extended_cfg :
log . debug ( 'Packing libraries based on alternative Salt versions' )
for ns , cfg in _six . iteritems ( get_ext_tops ( extended_cfg ) ) :
tops = [ cfg . get ( 'path' ) ] + cfg . get ( 'dependencies' )
py_ver_major , py_ver_minor = cfg . get ( 'py-version' )
for top in tops :
base , top_dirname = os . path . basename ( top ) , os . path . dirname ( top )
os . chdir ( top_dirname )
site_pkg_dir = _is_shareable ( base ) and 'pyall' or 'py{0}' . format ( py_ver_major )
log . debug ( 'Packing alternative "%s" to "%s/%s" destination' , base , ns , site_pkg_dir )
if not os . path . isdir ( top ) : # top is a single file module
if os . path . exists ( os . path . join ( top_dirname , base ) ) :
tfp . add ( base , arcname = os . path . join ( ns , site_pkg_dir , base ) )
continue
for root , dirs , files in salt . utils . path . os_walk ( base , followlinks = True ) :
for name in files :
if not name . endswith ( ( '.pyc' , '.pyo' ) ) :
digest_collector . add ( os . path . join ( root , name ) )
arcname = os . path . join ( ns , site_pkg_dir , root , name )
if hasattr ( tfp , 'getinfo' ) :
try :
tfp . getinfo ( os . path . join ( site_pkg_dir , root , name ) )
arcname = None
except KeyError :
log . debug ( 'ZIP: Unable to add "%s" with "getinfo"' , arcname )
if arcname :
tfp . add ( os . path . join ( root , name ) , arcname = arcname )
os . chdir ( thindir )
with salt . utils . files . fopen ( thinver , 'w+' ) as fp_ :
fp_ . write ( salt . version . __version__ )
with salt . utils . files . fopen ( pythinver , 'w+' ) as fp_ :
fp_ . write ( str ( sys . version_info . major ) )
# future lint : disable = blacklisted - function
with salt . utils . files . fopen ( code_checksum , 'w+' ) as fp_ :
fp_ . write ( digest_collector . digest ( ) )
os . chdir ( os . path . dirname ( thinver ) )
for fname in [ 'version' , '.thin-gen-py-version' , 'salt-call' , 'supported-versions' , 'code-checksum' ] :
tfp . add ( fname )
if start_dir :
os . chdir ( start_dir )
tfp . close ( )
shutil . move ( tmp_thintar , thintar )
return thintar
|
def chromosomes_from_fai ( genome_fai ) :
"""Read a fasta index ( fai ) file and parse the input chromosomes .
: param str genome _ fai : Path to the fai file .
: return : list of input chromosomes
: rtype : list [ str ]"""
|
chromosomes = [ ]
with open ( genome_fai ) as fai_file :
for line in fai_file :
line = line . strip ( ) . split ( )
chromosomes . append ( line [ 0 ] )
return chromosomes
|
def on_update ( self , value , * args , ** kwargs ) :
"""Inform the parent of progress .
: param value : The value of this subprogresscallback
: param args : Extra positional arguments
: param kwargs : Extra keyword arguments"""
|
parent_value = self . _parent_min
if self . _max != self . _min :
sub_progress = ( value - self . _min ) / ( self . _max - self . _min )
parent_value = self . _parent_min + sub_progress * ( self . _parent_max - self . _parent_min )
self . _parent . update ( parent_value , * args , ** kwargs )
|
def build_columns ( self , X , verbose = False ) :
"""construct the model matrix columns for the term
Parameters
X : array - like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
scipy sparse array with n rows"""
|
return sp . sparse . csc_matrix ( X [ : , self . feature ] [ : , np . newaxis ] )
|
def decode_params ( params ) :
"""Decode parameters list according to RFC 2231.
params is a sequence of 2 - tuples containing ( param name , string value ) ."""
|
# Copy params so we don ' t mess with the original
params = params [ : ]
new_params = [ ]
# Map parameter ' s name to a list of continuations . The values are a
# 3 - tuple of the continuation number , the string value , and a flag
# specifying whether a particular segment is % - encoded .
rfc2231_params = { }
name , value = params . pop ( 0 )
new_params . append ( ( name , value ) )
while params :
name , value = params . pop ( 0 )
if name . endswith ( '*' ) :
encoded = True
else :
encoded = False
value = unquote ( value )
mo = rfc2231_continuation . match ( name )
if mo :
name , num = mo . group ( 'name' , 'num' )
if num is not None :
num = int ( num )
rfc2231_params . setdefault ( name , [ ] ) . append ( ( num , value , encoded ) )
else :
new_params . append ( ( name , '"%s"' % quote ( value ) ) )
if rfc2231_params :
for name , continuations in rfc2231_params . items ( ) :
value = [ ]
extended = False
# Sort by number
continuations . sort ( )
# And now append all values in numerical order , converting
# % - encodings for the encoded segments . If any of the
# continuation names ends in a * , then the entire string , after
# decoding segments and concatenating , must have the charset and
# language specifiers at the beginning of the string .
for num , s , encoded in continuations :
if encoded : # Decode as " latin - 1 " , so the characters in s directly
# represent the percent - encoded octet values .
# collapse _ rfc2231 _ value treats this as an octet sequence .
s = url_unquote ( s , encoding = "latin-1" )
extended = True
value . append ( s )
value = quote ( EMPTYSTRING . join ( value ) )
if extended :
charset , language , value = decode_rfc2231 ( value )
new_params . append ( ( name , ( charset , language , '"%s"' % value ) ) )
else :
new_params . append ( ( name , '"%s"' % value ) )
return new_params
|
def connection_factory ( self , endpoint , * args , ** kwargs ) :
"""Called to create a new connection with proper configuration .
Intended for internal use only ."""
|
kwargs = self . _make_connection_kwargs ( endpoint , kwargs )
return self . connection_class . factory ( endpoint , self . connect_timeout , * args , ** kwargs )
|
def find ( self , instance_ids = None , filters = None ) :
"""Flatten list of reservations to a list of instances .
: param instance _ ids : A list of instance ids to filter by
: type instance _ ids : list
: param filters : A dict of Filter . N values defined in http : / / goo . gl / jYNej9
: type filters : dict
: return : A flattened list of filtered instances .
: rtype : list"""
|
instances = [ ]
reservations = self . retry_on_ec2_error ( self . ec2 . get_all_instances , instance_ids = instance_ids , filters = filters )
for reservation in reservations :
instances . extend ( reservation . instances )
return instances
|
def do_help ( self , arg ) :
"""List available commands with " help " or detailed help with " help cmd " ."""
|
if arg : # XXX check arg syntax
try :
func = getattr ( self , 'help_' + arg )
except AttributeError :
try :
doc = getattr ( self , 'do_' + arg ) . __doc__
if doc :
self . stdout . write ( "%s\n" % str ( doc ) )
return
except AttributeError :
pass
self . stdout . write ( "%s\n" % str ( self . nohelp % ( arg , ) ) )
return
func ( )
else :
names = self . get_names ( )
cmds_doc = [ ]
cmds_undoc = [ ]
help_page = { }
for name in names :
if name [ : 5 ] == 'help_' :
help_page [ name [ 5 : ] ] = 1
names . sort ( )
# There can be duplicates if routines overridden
prevname = ''
for name in names :
if name [ : 3 ] == 'do_' :
if name == prevname :
continue
prevname = name
cmd = name [ 3 : ]
if cmd in help_page :
cmds_doc . append ( cmd )
del help_page [ cmd ]
elif getattr ( self , name ) . __doc__ :
cmds_doc . append ( cmd )
else :
cmds_undoc . append ( cmd )
self . stdout . write ( "%s\n" % str ( self . doc_leader ) )
self . print_topics ( self . doc_header , cmds_doc , 15 , 80 )
self . print_topics ( self . misc_header , list ( help_page . keys ( ) ) , 15 , 80 )
self . print_topics ( self . undoc_header , cmds_undoc , 15 , 80 )
for topic in self . command_topics :
topic_cmds = self . command_topics [ topic ]
self . print_topics ( string . capwords ( topic + " commands" ) , topic_cmds , 15 , 80 )
|
def set_codes ( self , codes , reject = False ) :
"""Set the accepted or rejected codes codes list .
: param codes : A list of the response codes .
: param reject : If True , the listed codes will be rejected , and
the conversion will format as " - " ; if False ,
only the listed codes will be accepted , and the
conversion will format as " - " for all the
others ."""
|
self . codes = set ( codes )
self . reject = reject
|
def _flush_bits_to_stream ( self ) :
"""Flush the bits to the stream . This is used when
a few bits have been read and ` ` self . _ bits ` ` contains unconsumed /
flushed bits when data is to be written to the stream"""
|
if len ( self . _bits ) == 0 :
return 0
bits = list ( self . _bits )
diff = 8 - ( len ( bits ) % 8 )
padding = [ 0 ] * diff
bits = bits + padding
self . _stream . write ( bits_to_bytes ( bits ) )
self . _bits . clear ( )
|
def delete_if_exists ( self , ** kwargs ) :
"""Deletes an object if it exists in database according to given query
parameters and returns True otherwise does nothing and returns False .
Args :
* * kwargs : query parameters
Returns ( bool ) : True or False"""
|
try :
self . get ( ** kwargs ) . blocking_delete ( )
return True
except ObjectDoesNotExist :
return False
|
def outputdata ( self , data ) :
"""Send output with fixed length data"""
|
if not isinstance ( data , bytes ) :
data = str ( data ) . encode ( self . encoding )
self . output ( MemoryStream ( data ) )
|
def get_spot ( self , feature = None , ** kwargs ) :
"""Shortcut to : meth : ` get _ feature ` but with kind = ' spot '"""
|
kwargs . setdefault ( 'kind' , 'spot' )
return self . get_feature ( feature , ** kwargs )
|
def wait_for_ready_state_complete ( driver , timeout = settings . EXTREME_TIMEOUT ) :
"""The DOM ( Document Object Model ) has a property called " readyState " .
When the value of this becomes " complete " , page resources are considered
fully loaded ( although AJAX and other loads might still be happening ) .
This method will wait until document . readyState = = " complete " ."""
|
start_ms = time . time ( ) * 1000.0
stop_ms = start_ms + ( timeout * 1000.0 )
for x in range ( int ( timeout * 10 ) ) :
try :
ready_state = driver . execute_script ( "return document.readyState" )
except WebDriverException : # Bug fix for : [ Permission denied to access property " document " ]
time . sleep ( 0.03 )
return True
if ready_state == u'complete' :
time . sleep ( 0.01 )
# Better be sure everything is done loading
return True
else :
now_ms = time . time ( ) * 1000.0
if now_ms >= stop_ms :
break
time . sleep ( 0.1 )
raise Exception ( "Page elements never fully loaded after %s seconds!" % timeout )
|
def _g_func ( self ) :
"""Eq . 20 in Peters and Mathews 1963."""
|
return ( self . n ** 4. / 32. * ( ( jv ( self . n - 2. , self . n * self . e_vals ) - 2. * self . e_vals * jv ( self . n - 1. , self . n * self . e_vals ) + 2. / self . n * jv ( self . n , self . n * self . e_vals ) + 2. * self . e_vals * jv ( self . n + 1. , self . n * self . e_vals ) - jv ( self . n + 2. , self . n * self . e_vals ) ) ** 2. + ( 1. - self . e_vals ** 2. ) * ( jv ( self . n - 2. , self . n * self . e_vals ) - 2. * jv ( self . n , self . n * self . e_vals ) + jv ( self . n + 2. , self . n * self . e_vals ) ) ** 2. + 4. / ( 3. * self . n ** 2. ) * ( jv ( self . n , self . n * self . e_vals ) ) ** 2. ) )
|
def _tp_cache ( func ) :
"""Internal wrapper caching _ _ getitem _ _ of generic types with a fallback to
original function for non - hashable arguments ."""
|
cached = functools . lru_cache ( ) ( func )
_cleanups . append ( cached . cache_clear )
@ functools . wraps ( func )
def inner ( * args , ** kwds ) :
try :
return cached ( * args , ** kwds )
except TypeError :
pass
# All real errors ( not unhashable args ) are raised below .
return func ( * args , ** kwds )
return inner
|
def _get_default_annual_spacing ( nyears ) :
"""Returns a default spacing between consecutive ticks for annual data ."""
|
if nyears < 11 :
( min_spacing , maj_spacing ) = ( 1 , 1 )
elif nyears < 20 :
( min_spacing , maj_spacing ) = ( 1 , 2 )
elif nyears < 50 :
( min_spacing , maj_spacing ) = ( 1 , 5 )
elif nyears < 100 :
( min_spacing , maj_spacing ) = ( 5 , 10 )
elif nyears < 200 :
( min_spacing , maj_spacing ) = ( 5 , 25 )
elif nyears < 600 :
( min_spacing , maj_spacing ) = ( 10 , 50 )
else :
factor = nyears // 1000 + 1
( min_spacing , maj_spacing ) = ( factor * 20 , factor * 100 )
return ( min_spacing , maj_spacing )
|
def get_constructor ( self ) :
""": returns : A function that constructs this gate on variable qubit indices . E . g .
` mygate . get _ constructor ( ) ( 1 ) applies the gate to qubit 1 . `"""
|
if self . parameters :
return lambda * params : lambda * qubits : Gate ( name = self . name , params = list ( params ) , qubits = list ( map ( unpack_qubit , qubits ) ) )
else :
return lambda * qubits : Gate ( name = self . name , params = [ ] , qubits = list ( map ( unpack_qubit , qubits ) ) )
|
def hotkey ( * args , ** kwargs ) :
"""Performs key down presses on the arguments passed in order , then performs
key releases in reverse order .
The effect is that calling hotkey ( ' ctrl ' , ' shift ' , ' c ' ) would perform a
" Ctrl - Shift - C " hotkey / keyboard shortcut press .
Args :
key ( s ) ( str ) : The series of keys to press , in order . This can also be a
list of key strings to press .
interval ( float , optional ) : The number of seconds in between each press .
0.0 by default , for no pause in between presses .
Returns :
None"""
|
interval = float ( kwargs . get ( 'interval' , 0.0 ) )
_failSafeCheck ( )
for c in args :
if len ( c ) > 1 :
c = c . lower ( )
platformModule . _keyDown ( c )
time . sleep ( interval )
for c in reversed ( args ) :
if len ( c ) > 1 :
c = c . lower ( )
platformModule . _keyUp ( c )
time . sleep ( interval )
_autoPause ( kwargs . get ( 'pause' , None ) , kwargs . get ( '_pause' , True ) )
|
def update ( context , id , etag , name , country , parent_id , active , external ) :
"""update ( context , id , etag , name , country , parent _ id , active , external )
Update a team .
> > > dcictl team - update [ OPTIONS ]
: param string id : ID of the team to update [ required ]
: param string etag : Entity tag of the resource [ required ]
: param string name : Name of the team [ required ]
: param string country : Country code where the team is based
: param boolean active : Set the team in the ( in ) active state
: param string parent _ id : The ID of the team this team belongs to
: param boolean external : Set the team as external"""
|
result = team . update ( context , id = id , etag = etag , name = name , state = utils . active_string ( active ) , country = country , parent_id = parent_id , external = external )
utils . format_output ( result , context . format )
|
def request_xml ( url , auth = None ) :
'''Returns an etree . XMLRoot object loaded from the url
: param str url : URL for the resource to load as an XML'''
|
try :
r = requests . get ( url , auth = auth , verify = False )
return r . text . encode ( 'utf-8' )
except BaseException :
logger . error ( "Skipping %s (error parsing the XML)" % url )
return
|
def kill_cursors ( cursor_ids ) :
"""Get a * * killCursors * * message ."""
|
data = _ZERO_32
data += struct . pack ( "<i" , len ( cursor_ids ) )
for cursor_id in cursor_ids :
data += struct . pack ( "<q" , cursor_id )
return __pack_message ( 2007 , data )
|
def remove_hop_by_hop_headers ( headers ) :
"""Remove all HTTP / 1.1 " Hop - by - Hop " headers from a list or
: class : ` Headers ` object . This operation works in - place .
. . versionadded : : 0.5
: param headers : a list or : class : ` Headers ` object ."""
|
headers [ : ] = [ ( key , value ) for key , value in headers if not is_hop_by_hop_header ( key ) ]
|
def close_stream_on_error ( func ) :
'''Decorator to close stream on error .'''
|
@ asyncio . coroutine
@ functools . wraps ( func )
def wrapper ( self , * args , ** kwargs ) :
with wpull . util . close_on_error ( self . close ) :
return ( yield from func ( self , * args , ** kwargs ) )
return wrapper
|
def find_string_ids ( self , substring , suffix_tree_id , limit = None ) :
"""Returns a set of IDs for strings that contain the given substring ."""
|
# Find an edge for the substring .
edge , ln = self . find_substring_edge ( substring = substring , suffix_tree_id = suffix_tree_id )
# If there isn ' t an edge , return an empty set .
if edge is None :
return set ( )
# Get all the string IDs beneath the edge ' s destination node .
string_ids = get_string_ids ( node_id = edge . dest_node_id , node_repo = self . node_repo , node_child_collection_repo = self . node_child_collection_repo , stringid_collection_repo = self . stringid_collection_repo , length_until_end = edge . length + 1 - ln , limit = limit )
# Return a set of string IDs .
return set ( string_ids )
|
def plot_di_mean ( dec , inc , a95 , color = 'k' , marker = 'o' , markersize = 20 , label = '' , legend = 'no' ) :
"""Plot a mean direction ( declination , inclination ) with alpha _ 95 ellipse on
an equal area plot .
Before this function is called , a plot needs to be initialized with code
that looks something like :
> fignum = 1
> plt . figure ( num = fignum , figsize = ( 10,10 ) , dpi = 160)
> ipmag . plot _ net ( fignum )
Required Parameters
dec : declination of mean being plotted
inc : inclination of mean being plotted
a95 : a95 confidence ellipse of mean being plotted
Optional Parameters ( defaults are used if not specified )
color : the default color is black . Other colors can be chosen ( e . g . ' r ' ) .
marker : the default is a circle . Other symbols can be chosen ( e . g . ' s ' ) .
markersize : the default is 20 . Other sizes can be chosen .
label : the default is no label . Labels can be assigned .
legend : the default is no legend ( ' no ' ) . Putting ' yes ' will plot a legend ."""
|
DI_dimap = pmag . dimap ( dec , inc )
if inc < 0 :
plt . scatter ( DI_dimap [ 0 ] , DI_dimap [ 1 ] , edgecolors = color , facecolors = 'white' , marker = marker , s = markersize , label = label )
if inc >= 0 :
plt . scatter ( DI_dimap [ 0 ] , DI_dimap [ 1 ] , edgecolors = color , facecolors = color , marker = marker , s = markersize , label = label )
Xcirc , Ycirc = [ ] , [ ]
Da95 , Ia95 = pmag . circ ( dec , inc , a95 )
if legend == 'yes' :
plt . legend ( loc = 2 )
for k in range ( len ( Da95 ) ) :
XY = pmag . dimap ( Da95 [ k ] , Ia95 [ k ] )
Xcirc . append ( XY [ 0 ] )
Ycirc . append ( XY [ 1 ] )
plt . plot ( Xcirc , Ycirc , c = color )
plt . tight_layout ( )
|
def collection_location ( obj ) :
"""Get the URL for the collection of objects like ` ` obj ` ` .
: param obj : Either a type representing a Kubernetes object kind or an
instance of such a type .
: return tuple [ unicode ] : Some path segments to stick on to a base URL to
construct the location of the collection of objects like the one
given ."""
|
# TODO kind is not part of IObjectLoader and we should really be loading
# apiVersion off of this object too .
kind = obj . kind
apiVersion = obj . apiVersion
prefix = version_to_segments [ apiVersion ]
collection = kind . lower ( ) + u"s"
if IObject . providedBy ( obj ) : # Actual objects * could * have a namespace . . .
namespace = obj . metadata . namespace
else : # Types representing a kind couldn ' t possible .
namespace = None
if namespace is None : # If there ' s no namespace , look in the un - namespaced area .
return prefix + ( collection , )
# If there is , great , look there .
return prefix + ( u"namespaces" , namespace , collection )
|
def _write_dihedral_information ( xml_file , structure , ref_energy ) :
"""Write dihedrals in the system .
Parameters
xml _ file : file object
The file object of the hoomdxml file being written
structure : parmed . Structure
Parmed structure object
ref _ energy : float , default = 1.0
Reference energy for conversion to reduced units"""
|
unique_dihedral_types = set ( )
xml_file . write ( '<dihedral>\n' )
for dihedral in structure . rb_torsions :
t1 , t2 = dihedral . atom1 . type , dihedral . atom2 . type , t3 , t4 = dihedral . atom3 . type , dihedral . atom4 . type
if [ t2 , t3 ] == sorted ( [ t2 , t3 ] ) :
types_in_dihedral = '-' . join ( ( t1 , t2 , t3 , t4 ) )
else :
types_in_dihedral = '-' . join ( ( t4 , t3 , t2 , t1 ) )
dihedral_type = ( types_in_dihedral , dihedral . type . c0 , dihedral . type . c1 , dihedral . type . c2 , dihedral . type . c3 , dihedral . type . c4 , dihedral . type . c5 , dihedral . type . scee , dihedral . type . scnb )
unique_dihedral_types . add ( dihedral_type )
xml_file . write ( '{} {} {} {} {}\n' . format ( dihedral_type [ 0 ] , dihedral . atom1 . idx , dihedral . atom2 . idx , dihedral . atom3 . idx , dihedral . atom4 . idx ) )
xml_file . write ( '</dihedral>\n' )
xml_file . write ( '<dihedral_coeffs>\n' )
xml_file . write ( '<!-- type k1 k2 k3 k4 -->\n' )
for dihedral_type , c0 , c1 , c2 , c3 , c4 , c5 , scee , scnb in unique_dihedral_types :
opls_coeffs = RB_to_OPLS ( c0 , c1 , c2 , c3 , c4 , c5 )
opls_coeffs /= ref_energy
xml_file . write ( '{} {:.5f} {:.5f} {:.5f} {:.5f}\n' . format ( dihedral_type , * opls_coeffs ) )
xml_file . write ( '</dihedral_coeffs>\n' )
|
def get_unique_values ( dictionary_input ) :
"""Function to retrieve unique values from input dictionary values .
> > > get _ unique _ values ( { ' msm ' : [ 5 , 6 , 7 , 8 ] , ' is ' : [ 10 , 11 , 7 , 5 ] , ' best ' : [ 6 , 12 , 10 , 8 ] , ' for ' : [ 1 , 2 , 5 ] } )
[1 , 2 , 5 , 6 , 7 , 8 , 10 , 11 , 12]
> > > get _ unique _ values ( { ' Built ' : [ 7 , 1 , 9 , 4 ] , ' for ' : [ 11 , 21 , 36 , 14 , 9 ] , ' ISP ' : [ 4 , 1 , 21 , 39 , 47 ] , ' TV ' : [ 1 , 32 , 38 ] } )
[1 , 4 , 7 , 9 , 11 , 14 , 21 , 32 , 36 , 38 , 39 , 47]
> > > get _ unique _ values ( { ' F ' : [ 11 , 13 , 14 , 17 ] , ' A ' : [ 12 , 11 , 15 , 18 ] , ' N ' : [ 19 , 21 , 15 , 36 ] , ' G ' : [ 37 , 36 , 35 ] } )
[11 , 12 , 13 , 14 , 15 , 17 , 18 , 19 , 21 , 35 , 36 , 37]"""
|
unique_output = list ( sorted ( set ( value for sublist in dictionary_input . values ( ) for value in sublist ) ) )
return unique_output
|
def _safebuiltins ( ) :
"""Construct a safe builtin environment without I / O functions .
: rtype : dict"""
|
result = { }
objectnames = [ objectname for objectname in dir ( builtins ) if objectname not in BUILTIN_IO_PROPS ]
for objectname in objectnames :
result [ objectname ] = getattr ( builtins , objectname )
return result
|
def factor ( self , data : [ 'SASdata' , str ] = None , by : str = None , cls : [ str , list ] = None , freq : str = None , paired : str = None , var : str = None , weight : str = None , procopts : str = None , stmtpassthrough : str = None , ** kwargs : dict ) -> 'SASresults' :
"""Python method to call the FACTOR procedure
Documentation link :
https : / / go . documentation . sas . com / ? cdcId = pgmsascdc & cdcVersion = 9.4_3.4 & docsetId = statug & docsetTarget = statug _ factor _ syntax . htm & locale = en
: param data : SASdata object or string . This parameter is required .
: parm by : The by variable can only be a string type .
: parm cls : The cls variable can be a string or list type . It refers to the categorical , or nominal variables .
: parm freq : The freq variable can only be a string type .
: parm paired : The paired variable can only be a string type .
: parm var : The var variable can only be a string type .
: parm weight : The weight variable can only be a string type .
: parm procopts : The procopts variable is a generic option available for advanced use . It can only be a string type .
: parm stmtpassthrough : The stmtpassthrough variable is a generic option available for advanced use . It can only be a string type .
: return : SAS Result Object"""
| |
def _delete_forever_values ( self , forever_key ) :
"""Delete all of the keys that have been stored forever .
: type forever _ key : str"""
|
forever = self . _store . connection ( ) . lrange ( forever_key , 0 , - 1 )
if len ( forever ) > 0 :
self . _store . connection ( ) . delete ( * forever )
|
def observe ( self , body ) :
"""Compute the ` Astrometric ` position of a body from this location .
To compute the body ' s astrometric position , it is first asked
for its position at the time ` t ` of this position itself . The
distance to the body is then divided by the speed of light to
find how long it takes its light to arrive . Finally , the light
travel time is subtracted from ` t ` and the body is asked for a
series of increasingly exact positions to learn where it was
when it emitted the light that is now reaching this position .
> > > earth . at ( t ) . observe ( mars )
< Astrometric position and velocity at date t >"""
|
p , v , t , light_time = body . _observe_from_bcrs ( self )
astrometric = Astrometric ( p , v , t , observer_data = self . observer_data )
astrometric . light_time = light_time
return astrometric
|
def plotSolidAngleCMD ( self ) :
"""Solid angle within the mask as a function of color and magnitude ."""
|
msg = "'%s.plotSolidAngleCMD': ADW 2018-05-05" % self . __class__ . __name__
DeprecationWarning ( msg )
import ugali . utils . plotting
ugali . utils . plotting . twoDimensionalHistogram ( 'mask' , 'color' , 'magnitude' , self . solid_angle_cmd , self . roi . bins_color , self . roi . bins_mag , lim_x = [ self . roi . bins_color [ 0 ] , self . roi . bins_color [ - 1 ] ] , lim_y = [ self . roi . bins_mag [ - 1 ] , self . roi . bins_mag [ 0 ] ] )
|
def return_periods ( eff_time , num_losses ) :
""": param eff _ time : ses _ per _ logic _ tree _ path * investigation _ time
: param num _ losses : used to determine the minimum period
: returns : an array of 32 bit periods
Here are a few examples :
> > > return _ periods ( 1 , 1)
Traceback ( most recent call last ) :
AssertionError : eff _ time too small : 1
> > > return _ periods ( 2 , 2)
array ( [ 1 , 2 ] , dtype = uint32)
> > > return _ periods ( 2 , 10)
array ( [ 1 , 2 ] , dtype = uint32)
> > > return _ periods ( 100 , 2)
array ( [ 50 , 100 ] , dtype = uint32)
> > > return _ periods ( 1000 , 1000)
array ( [ 1 , 2 , 5 , 10 , 20 , 50 , 100 , 200 , 500 , 1000 ] ,
dtype = uint32)"""
|
assert eff_time >= 2 , 'eff_time too small: %s' % eff_time
assert num_losses >= 2 , 'num_losses too small: %s' % num_losses
min_time = eff_time / num_losses
period = 1
periods = [ ]
loop = True
while loop :
for val in [ 1 , 2 , 5 ] :
time = period * val
if time >= min_time :
if time > eff_time :
loop = False
break
periods . append ( time )
period *= 10
return U32 ( periods )
|
def separable_series ( h , N = 1 ) :
"""finds the first N rank 1 tensors such that their sum approximates
the tensor h ( 2d or 3d ) best
returns ( e . g . for 3d case ) res = ( hx , hy , hz ) [ i ]
s . t .
h \a pprox sum _ i einsum ( " i , j , k " , res [ i , 0 ] , res [ i , 1 ] , res [ i , 2 ] )
Parameters
h : ndarray
input array ( 2 or 2 dimensional )
N : int
order of approximation
Returns
res , the series of tensors
res [ i ] = ( hx , hy , hz ) [ i ]"""
|
if h . ndim == 2 :
return _separable_series2 ( h , N )
elif h . ndim == 3 :
return _separable_series3 ( h , N )
else :
raise ValueError ( "unsupported array dimension: %s (only 2d or 3d) " % h . ndim )
|
def save ( self ) :
"""save or update endpoint to Ariane server
: return :"""
|
LOGGER . debug ( "Endpoint.save" )
if self . parent_node is not None :
if self . parent_node . id is None :
self . parent_node . save ( )
self . parent_node_id = self . parent_node . id
post_payload = { }
consolidated_twin_endpoints_id = [ ]
consolidated_properties = { }
consolidated_endpoint_properties = [ ]
if self . id is not None :
post_payload [ 'endpointID' ] = self . id
if self . url is not None :
post_payload [ 'endpointURL' ] = self . url
if self . parent_node_id is not None :
post_payload [ 'endpointParentNodeID' ] = self . parent_node_id
if self . twin_endpoints_id is not None :
consolidated_twin_endpoints_id = copy . deepcopy ( self . twin_endpoints_id )
if self . twin_endpoints_2_rm is not None :
for twin_node_2_rm in self . twin_endpoints_2_rm :
if twin_node_2_rm . id is None :
twin_node_2_rm . sync ( )
consolidated_twin_endpoints_id . remove ( twin_node_2_rm . id )
if self . twin_endpoints_2_add is not None :
for twin_endpoint_2_add in self . twin_endpoints_2_add :
if twin_endpoint_2_add . id is None :
twin_endpoint_2_add . save ( )
consolidated_twin_endpoints_id . append ( twin_endpoint_2_add . id )
post_payload [ 'endpointTwinEndpointsID' ] = consolidated_twin_endpoints_id
if self . properties is not None :
consolidated_properties = copy . deepcopy ( self . properties )
if self . properties_2_rm is not None :
for n_property_name in self . properties_2_rm :
consolidated_properties . pop ( n_property_name , 0 )
if self . properties_2_add is not None :
for n_property_tuple in self . properties_2_add :
consolidated_properties [ n_property_tuple [ 0 ] ] = n_property_tuple [ 1 ]
if consolidated_properties . __len__ ( ) > 0 :
for key , value in consolidated_properties . items ( ) :
consolidated_endpoint_properties . append ( DriverTools . property_params ( key , value ) )
post_payload [ 'endpointProperties' ] = consolidated_endpoint_properties
params = SessionService . complete_transactional_req ( { 'payload' : json . dumps ( post_payload ) } )
if MappingService . driver_type != DriverFactory . DRIVER_REST :
params [ 'OPERATION' ] = 'createEndpoint'
args = { 'properties' : params }
else :
args = { 'http_operation' : 'POST' , 'operation_path' : '' , 'parameters' : params }
response = EndpointService . requester . call ( args )
if MappingService . driver_type != DriverFactory . DRIVER_REST :
response = response . get ( )
if response . rc != 0 :
LOGGER . warning ( 'Endpoint.save - Problem while saving endpoint ' + self . url + '. Reason: ' + str ( response . response_content ) + ' - ' + str ( response . error_message ) + " (" + str ( response . rc ) + ")" )
if response . rc == 500 and ArianeMappingOverloadError . ERROR_MSG in response . error_message :
raise ArianeMappingOverloadError ( "Endpoint.save" , ArianeMappingOverloadError . ERROR_MSG )
# traceback . print _ stack ( )
else :
self . id = response . response_content [ 'endpointID' ]
if self . twin_endpoints_2_add is not None :
for twin_endpoint_2_add in self . twin_endpoints_2_add :
twin_endpoint_2_add . sync ( )
if self . twin_endpoints_2_rm is not None :
for twin_node_2_rm in self . twin_endpoints_2_rm :
twin_node_2_rm . sync ( )
if self . parent_node is not None :
self . parent_node . sync ( )
self . sync ( json_obj = response . response_content )
self . twin_endpoints_2_add . clear ( )
self . twin_endpoints_2_rm . clear ( )
self . properties_2_add . clear ( )
self . properties_2_rm . clear ( )
|
def _sim ( self , xg , ancs1 , ancs2 , pfx1 , pfx2 ) :
"""Compare two lineages"""
|
xancs1 = set ( )
for a in ancs1 :
if a in xg : # TODO : restrict this to neighbors in single ontology
for n in xg . neighbors ( a ) :
pfx = self . _id_to_ontology ( n )
if pfx == pfx2 :
xancs1 . add ( n )
logging . debug ( 'SIM={}/{} ## {}' . format ( len ( xancs1 . intersection ( ancs2 ) ) , len ( xancs1 ) , xancs1 . intersection ( ancs2 ) , xancs1 ) )
n_shared = len ( xancs1 . intersection ( ancs2 ) )
n_total = len ( xancs1 )
return ( 1 + n_shared ) / ( 1 + n_total ) , n_shared , n_total
|
def load_projections ( folder , indices = None ) :
"""Load geometry and data stored in Mayo format from folder .
Parameters
folder : str
Path to the folder where the Mayo DICOM files are stored .
indices : optional
Indices of the projections to load .
Accepts advanced indexing such as slice or list of indices .
Returns
geometry : ConeFlatGeometry
Geometry corresponding to the Mayo projector .
proj _ data : ` numpy . ndarray `
Projection data , given as the line integral of the linear attenuation
coefficient ( g / cm ^ 3 ) . Its unit is thus g / cm ^ 2."""
|
datasets , data_array = _read_projections ( folder , indices )
# Get the angles
angles = [ d . DetectorFocalCenterAngularPosition for d in datasets ]
angles = - np . unwrap ( angles ) - np . pi
# different defintion of angles
# Set minimum and maximum corners
shape = np . array ( [ datasets [ 0 ] . NumberofDetectorColumns , datasets [ 0 ] . NumberofDetectorRows ] )
pixel_size = np . array ( [ datasets [ 0 ] . DetectorElementTransverseSpacing , datasets [ 0 ] . DetectorElementAxialSpacing ] )
# Correct from center of pixel to corner of pixel
minp = - ( np . array ( datasets [ 0 ] . DetectorCentralElement ) - 0.5 ) * pixel_size
maxp = minp + shape * pixel_size
# Select geometry parameters
src_radius = datasets [ 0 ] . DetectorFocalCenterRadialDistance
det_radius = ( datasets [ 0 ] . ConstantRadialDistance - datasets [ 0 ] . DetectorFocalCenterRadialDistance )
# For unknown reasons , mayo does not include the tag
# " TableFeedPerRotation " , which is what we want .
# Instead we manually compute the pitch
pitch = ( ( datasets [ - 1 ] . DetectorFocalCenterAxialPosition - datasets [ 0 ] . DetectorFocalCenterAxialPosition ) / ( ( np . max ( angles ) - np . min ( angles ) ) / ( 2 * np . pi ) ) )
# Get flying focal spot data
offset_axial = np . array ( [ d . SourceAxialPositionShift for d in datasets ] )
offset_angular = np . array ( [ d . SourceAngularPositionShift for d in datasets ] )
offset_radial = np . array ( [ d . SourceRadialDistanceShift for d in datasets ] )
# TODO ( adler - j ) : Implement proper handling of flying focal spot .
# Currently we do not fully account for it , merely making some " first
# order corrections " to the detector position and radial offset .
# Update angles with flying focal spot ( in plane direction ) .
# This increases the resolution of the reconstructions .
angles = angles - offset_angular
# We correct for the mean offset due to the rotated angles , we need to
# shift the detector .
offset_detector_by_angles = det_radius * np . mean ( offset_angular )
minp [ 0 ] -= offset_detector_by_angles
maxp [ 0 ] -= offset_detector_by_angles
# We currently apply only the mean of the offsets
src_radius = src_radius + np . mean ( offset_radial )
# Partially compensate for a movement of the source by moving the object
# instead . We need to rescale by the magnification to get the correct
# change in the detector . This approximation is only exactly valid on the
# axis of rotation .
mean_offset_along_axis_for_ffz = np . mean ( offset_axial ) * ( src_radius / ( src_radius + det_radius ) )
# Create partition for detector
detector_partition = odl . uniform_partition ( minp , maxp , shape )
# Convert offset to odl defintions
offset_along_axis = ( mean_offset_along_axis_for_ffz + datasets [ 0 ] . DetectorFocalCenterAxialPosition - angles [ 0 ] / ( 2 * np . pi ) * pitch )
# Assemble geometry
angle_partition = odl . nonuniform_partition ( angles )
geometry = odl . tomo . ConeFlatGeometry ( angle_partition , detector_partition , src_radius = src_radius , det_radius = det_radius , pitch = pitch , offset_along_axis = offset_along_axis )
# Create a * temporary * ray transform ( we need its range )
spc = odl . uniform_discr ( [ - 1 ] * 3 , [ 1 ] * 3 , [ 32 ] * 3 )
ray_trafo = odl . tomo . RayTransform ( spc , geometry , interp = 'linear' )
# convert coordinates
theta , up , vp = ray_trafo . range . grid . meshgrid
d = src_radius + det_radius
u = d * np . arctan ( up / d )
v = d / np . sqrt ( d ** 2 + up ** 2 ) * vp
# Calculate projection data in rectangular coordinates since we have no
# backend that supports cylindrical
proj_data_cylinder = ray_trafo . range . element ( data_array )
interpolated_values = proj_data_cylinder . interpolation ( ( theta , u , v ) )
proj_data = ray_trafo . range . element ( interpolated_values )
return geometry , proj_data . asarray ( )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.