signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _decode_embedded_list ( src ) :
'''Convert enbedded bytes to strings if possible .
List helper .'''
|
output = [ ]
for elem in src :
if isinstance ( elem , dict ) :
elem = _decode_embedded_dict ( elem )
elif isinstance ( elem , list ) :
elem = _decode_embedded_list ( elem )
# pylint : disable = redefined - variable - type
elif isinstance ( elem , bytes ) :
try :
elem = elem . decode ( )
except UnicodeError :
pass
output . append ( elem )
return output
|
def get_git_ref ( self , ref ) :
""": calls : ` GET / repos / : owner / : repo / git / refs / : ref < http : / / developer . github . com / v3 / git / refs > ` _
: param ref : string
: rtype : : class : ` github . GitRef . GitRef `"""
|
prefix = "/git/refs/"
if not self . _requester . FIX_REPO_GET_GIT_REF :
prefix = "/git/"
assert isinstance ( ref , ( str , unicode ) ) , ref
headers , data = self . _requester . requestJsonAndCheck ( "GET" , self . url + prefix + ref )
return github . GitRef . GitRef ( self . _requester , headers , data , completed = True )
|
def process ( self , model = None , context = None ) :
"""Perform validation and filtering at the same time , return a
validation result object .
: param model : object or dict
: param context : object , dict or None
: return : shiftschema . result . Result"""
|
self . filter ( model , context )
return self . validate ( model , context )
|
def cleandata ( inputlist ) :
"""Helper function for parse . getdata .
Remove empty variables , convert strings to float
args :
inputlist : list
List of Variables
Returns :
ouput :
Cleaned list"""
|
output = [ ]
for e in inputlist :
new = [ ]
for f in e :
if f == "--" :
new . append ( None )
else :
new . append ( float ( f ) )
output . append ( new )
return output
|
def dynamic_content_item_variants ( self , id , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / core / dynamic _ content # list - variants"
|
api_path = "/api/v2/dynamic_content/items/{id}/variants.json"
api_path = api_path . format ( id = id )
return self . call ( api_path , ** kwargs )
|
def patch_namespaced_replication_controller_dummy_scale ( self , name , namespace , body , ** kwargs ) : # noqa : E501
"""patch _ namespaced _ replication _ controller _ dummy _ scale # noqa : E501
partially update scale of the specified ReplicationControllerDummy # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . patch _ namespaced _ replication _ controller _ dummy _ scale ( name , namespace , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the Scale ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param UNKNOWN _ BASE _ TYPE body : ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: return : ExtensionsV1beta1Scale
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . patch_namespaced_replication_controller_dummy_scale_with_http_info ( name , namespace , body , ** kwargs )
# noqa : E501
else :
( data ) = self . patch_namespaced_replication_controller_dummy_scale_with_http_info ( name , namespace , body , ** kwargs )
# noqa : E501
return data
|
def ogrn ( self ) -> str :
"""Generate random valid ` ` OGRN ` ` .
: return : OGRN .
: Example :
4715113303725."""
|
numbers = [ ]
for _ in range ( 0 , 12 ) :
numbers . append ( self . random . randint ( 1 if _ == 0 else 0 , 9 ) )
ogrn = '' . join ( [ str ( x ) for x in numbers ] )
check_sum = str ( int ( ogrn ) % 11 % 10 )
return '{}{}' . format ( ogrn , check_sum )
|
def refresh ( self ) :
'''Refresh this item from data on the server .
Will save any unsaved data first .'''
|
if not self . _item_path :
raise AttributeError ( 'refresh is not available for %s' % self . _type )
if not self . id :
raise RedmineError ( '%s did not come from the Redmine server - no link.' % self . _type )
try :
self . save ( )
except :
pass
# Mimic the Redmine _ Item _ Manager . get command
target = self . _item_path % self . id
json_data = self . _redmine . get ( target )
data = self . _redmine . unwrap_json ( self . _type , json_data )
self . _update_data ( data = data )
|
def p_fragment_definition1 ( self , p ) :
"""fragment _ definition : FRAGMENT fragment _ name ON type _ condition directives selection _ set"""
|
p [ 0 ] = FragmentDefinition ( name = p [ 2 ] , type_condition = p [ 4 ] , selections = p [ 6 ] , directives = p [ 5 ] )
|
def description ( self ) :
"""Get the textual description of the category"""
|
if self . _meta and self . _meta . get_payload ( ) :
return utils . TrueCallableProxy ( self . _description )
return utils . CallableProxy ( None )
|
def _construct_operation_id ( self , service_name , protorpc_method_name ) :
"""Return an operation id for a service method .
Args :
service _ name : The name of the service .
protorpc _ method _ name : The ProtoRPC method name .
Returns :
A string representing the operation id ."""
|
# camelCase the ProtoRPC method name
method_name_camel = util . snake_case_to_headless_camel_case ( protorpc_method_name )
return '{0}_{1}' . format ( service_name , method_name_camel )
|
def slugify ( value ) :
"""Converts to lowercase , removes non - word characters ( alphanumerics and
underscores ) and converts spaces to hyphens . Also strips leading and
trailing whitespace ."""
|
if six . PY3 :
value = str ( value )
else :
value = unicode ( value )
value = unicodedata . normalize ( 'NFKD' , value ) . encode ( 'ascii' , 'ignore' ) . decode ( 'ascii' )
value = to_remove . sub ( '' , value ) . strip ( ) . lower ( )
return remove_dup . sub ( '-' , value )
|
def run_from_command_line ( ) :
"""Run Firenado ' s management commands from a command line"""
|
for commands_conf in firenado . conf . management [ 'commands' ] :
logger . debug ( "Loading %s commands from %s." % ( commands_conf [ 'name' ] , commands_conf [ 'module' ] ) )
exec ( 'import %s' % commands_conf [ 'module' ] )
command_index = 1
for arg in sys . argv [ 1 : ] :
command_index += 1
if arg [ 0 ] != "-" :
break
parser = FirenadoArgumentParser ( prog = os . path . split ( sys . argv [ 0 ] ) [ 1 ] , add_help = False )
parser . add_argument ( "-h" , "--help" , default = argparse . SUPPRESS )
parser . add_argument ( "command" , default = "help" , help = "Command to executed" )
try :
namespace = parser . parse_args ( sys . argv [ 1 : command_index ] )
if not command_exists ( namespace . command ) :
show_command_line_usage ( parser )
else :
run_command ( namespace . command , sys . argv [ command_index - 1 : ] )
except FirenadoArgumentError :
show_command_line_usage ( parser , True )
|
def interp_like ( self , other , method = 'linear' , assume_sorted = False , kwargs = { } ) :
"""Interpolate this object onto the coordinates of another object ,
filling out of range values with NaN .
Parameters
other : Dataset or DataArray
Object with an ' indexes ' attribute giving a mapping from dimension
names to an 1d array - like , which provides coordinates upon
which to index the variables in this dataset .
method : string , optional .
{ ' linear ' , ' nearest ' } for multidimensional array ,
{ ' linear ' , ' nearest ' , ' zero ' , ' slinear ' , ' quadratic ' , ' cubic ' }
for 1 - dimensional array . ' linear ' is used by default .
assume _ sorted : boolean , optional
If False , values of coordinates that are interpolated over can be
in any order and they are sorted first . If True , interpolated
coordinates are assumed to be an array of monotonically increasing
values .
kwargs : dictionary , optional
Additional keyword passed to scipy ' s interpolator .
Returns
interpolated : xr . DataArray
Another dataarray by interpolating this dataarray ' s data along the
coordinates of the other object .
Notes
scipy is required .
If the dataarray has object - type coordinates , reindex is used for these
coordinates instead of the interpolation .
See Also
DataArray . interp
DataArray . reindex _ like"""
|
if self . dtype . kind not in 'uifc' :
raise TypeError ( 'interp only works for a numeric type array. ' 'Given {}.' . format ( self . dtype ) )
ds = self . _to_temp_dataset ( ) . interp_like ( other , method = method , kwargs = kwargs , assume_sorted = assume_sorted )
return self . _from_temp_dataset ( ds )
|
def index ( args ) :
"""% prog index samfile / bamfile
If SAM file , convert to BAM , sort and then index , using SAMTOOLS"""
|
p = OptionParser ( index . __doc__ )
p . add_option ( "--fasta" , dest = "fasta" , default = None , help = "add @SQ header to the BAM file [default: %default]" )
p . add_option ( "--unique" , default = False , action = "store_true" , help = "only retain uniquely mapped reads [default: %default]" )
p . set_cpus ( )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( p . print_help ( ) )
samfile , = args
cpus = opts . cpus
fastafile = opts . fasta
if fastafile :
assert op . exists ( fastafile )
bamfile = samfile . replace ( ".sam" , ".bam" )
if fastafile :
faifile = fastafile + ".fai"
if need_update ( fastafile , faifile ) :
sh ( "samtools faidx {0}" . format ( fastafile ) )
cmd = "samtools view -bt {0} {1} -o {2}" . format ( faifile , samfile , bamfile )
else :
cmd = "samtools view -bS {0} -o {1}" . format ( samfile , bamfile )
cmd += " -@ {0}" . format ( cpus )
if opts . unique :
cmd += " -q 1"
if samfile . endswith ( ".sam" ) and need_update ( samfile , bamfile ) :
sh ( cmd )
# Already sorted ?
if bamfile . endswith ( ".sorted.bam" ) :
sortedbamfile = bamfile
else :
prefix = bamfile . replace ( ".bam" , "" )
sortedbamfile = prefix + ".sorted.bam"
if need_update ( bamfile , sortedbamfile ) :
cmd = "samtools sort {0} -o {1}" . format ( bamfile , sortedbamfile )
cmd += " -@ {0}" . format ( cpus )
sh ( cmd )
baifile = sortedbamfile + ".bai"
if need_update ( sortedbamfile , baifile ) :
sh ( "samtools index {0}" . format ( sortedbamfile ) )
return sortedbamfile
|
def create_osd_keyring ( conn , cluster , key ) :
"""Run on osd node , writes the bootstrap key if not there yet ."""
|
logger = conn . logger
path = '/var/lib/ceph/bootstrap-osd/{cluster}.keyring' . format ( cluster = cluster , )
if not conn . remote_module . path_exists ( path ) :
logger . warning ( 'osd keyring does not exist yet, creating one' )
conn . remote_module . write_keyring ( path , key )
|
def _validate ( self ) :
'''Validate the mappings .'''
|
self . _validate_fasta_vs_seqres ( )
self . _validate_mapping_signature ( )
self . _validate_id_types ( )
self . _validate_residue_types ( )
|
def sequence_prep ( self ) :
"""Create metadata objects for all PacBio assembly FASTA files in the sequencepath .
Create individual subdirectories for each sample .
Relative symlink the original FASTA file to the appropriate subdirectory"""
|
# Create a sorted list of all the FASTA files in the sequence path
strains = sorted ( glob ( os . path . join ( self . fastapath , '*.fa*' . format ( self . fastapath ) ) ) )
for sample in strains : # Create the object
metadata = MetadataObject ( )
# Set the sample name to be the file name of the sequence by removing the path and file extension
sample_name = os . path . splitext ( os . path . basename ( sample ) ) [ 0 ]
if sample_name in self . strainset : # Extract the OLNID from the dictionary using the SEQID
samplename = self . straindict [ sample_name ]
# samplename = sample _ name
# Set and create the output directory
outputdir = os . path . join ( self . path , samplename )
make_path ( outputdir )
# Set the name of the JSON file
json_metadata = os . path . join ( outputdir , '{name}.json' . format ( name = samplename ) )
if not os . path . isfile ( json_metadata ) : # Create the name and output directory attributes
metadata . name = samplename
metadata . seqid = sample_name
metadata . outputdir = outputdir
metadata . jsonfile = json_metadata
# Set the name of the FASTA file to use in the analyses
metadata . bestassemblyfile = os . path . join ( metadata . outputdir , '{name}.fasta' . format ( name = metadata . name ) )
# Symlink the original file to the output directory
relative_symlink ( sample , outputdir , '{sn}.fasta' . format ( sn = metadata . name ) )
# Associate the corresponding FASTQ files with the assembly
metadata . fastqfiles = sorted ( glob ( os . path . join ( self . fastqpath , '{name}*.gz' . format ( name = metadata . name ) ) ) )
metadata . forward_fastq , metadata . reverse_fastq = metadata . fastqfiles
# Write the object to file
self . write_json ( metadata )
else :
metadata = self . read_json ( json_metadata )
# Add the metadata object to the list of objects
self . metadata . append ( metadata )
|
def get_provider_metadata ( self ) :
"""Gets the metadata for a provider .
return : ( osid . Metadata ) - metadata for the provider
* compliance : mandatory - - This method must be implemented . *"""
|
metadata = dict ( self . _mdata [ 'provider' ] )
metadata . update ( { 'existing_id_values' : self . _my_map [ 'providerId' ] } )
return Metadata ( ** metadata )
|
def save_features_and_arrays ( features , arrays , prefix , compressed = False , link_features = False , overwrite = False ) :
"""Saves NumPy arrays of processed data , along with the features that
correspond to each row , to files for later use .
Two files will be saved , both starting with ` prefix ` :
prefix . features : a file of features . If GFF features were provided ,
this will be in GFF format , if BED features were provided it will be in
BED format , and so on .
prefix . npz : A NumPy . npz file .
Parameters
arrays : dict of NumPy arrays
Rows in each array should correspond to ` features ` . This dictionary is
passed to np . savez
features : iterable of Feature - like objects
This is usually the same features that were used to create the array in
the first place .
link _ features : bool
If True , then assume that ` features ` is either a pybedtools . BedTool
pointing to a file , or a filename . In this case , instead of making
a copy , a symlink will be created to the original features . This helps
save disk space .
prefix : str
Path to where data will be saved .
compressed : bool
If True , saves arrays using np . savez _ compressed rather than np . savez .
This will save disk space , but will be slower when accessing the data
later ."""
|
if link_features :
if isinstance ( features , pybedtools . BedTool ) :
assert isinstance ( features . fn , basestring )
features_filename = features . fn
else :
assert isinstance ( features , basestring )
features_filename = features
if overwrite :
force_flag = '-f'
else :
force_flag = ''
cmds = [ 'ln' , '-s' , force_flag , os . path . abspath ( features_filename ) , prefix + '.features' ]
os . system ( ' ' . join ( cmds ) )
else :
pybedtools . BedTool ( features ) . saveas ( prefix + '.features' )
if compressed :
np . savez_compressed ( prefix , ** arrays )
else :
np . savez ( prefix , ** arrays )
|
def get_error ( self , errstr ) :
'''Parse out an error and return a targeted error string'''
|
for line in errstr . split ( '\n' ) :
if line . startswith ( 'ssh:' ) :
return line
if line . startswith ( 'Pseudo-terminal' ) :
continue
if 'to the list of known hosts.' in line :
continue
return line
return errstr
|
def project_get ( project_id = None , name = None , profile = None , ** connection_args ) :
'''Return a specific projects ( keystone project - get )
Overrides keystone tenant - get form api V2.
For keystone api V3 only .
. . versionadded : : 2016.11.0
project _ id
The project id .
name
The project name .
profile
Configuration profile - if configuration for multiple openstack accounts required .
CLI Examples :
. . code - block : : bash
salt ' * ' keystone . project _ get c965f79c4f864eaaa9c3b41904e67082
salt ' * ' keystone . project _ get project _ id = c965f79c4f864eaaa9c3b41904e67082
salt ' * ' keystone . project _ get name = nova'''
|
auth ( profile , ** connection_args )
if _OS_IDENTITY_API_VERSION > 2 :
return tenant_get ( tenant_id = project_id , name = name , profile = None , ** connection_args )
else :
return False
|
def ReliefF_compute_scores ( inst , attr , nan_entries , num_attributes , mcmap , NN , headers , class_type , X , y , labels_std , data_type ) :
"""Unique scoring procedure for ReliefF algorithm . Scoring based on k nearest hits and misses of current target instance ."""
|
scores = np . zeros ( num_attributes )
for feature_num in range ( num_attributes ) :
scores [ feature_num ] += compute_score ( attr , mcmap , NN , feature_num , inst , nan_entries , headers , class_type , X , y , labels_std , data_type )
return scores
|
def _run_forever ( self ) :
"""Run configured jobs until termination request ."""
|
while True :
try :
tick = time . time ( )
asyncore . loop ( timeout = self . POLL_TIMEOUT , use_poll = True )
# Sleep for remaining poll cycle time
tick += self . POLL_TIMEOUT - time . time ( )
if tick > 0 : # wait POLL _ TIMEOUT at most ( robust against time shifts )
time . sleep ( min ( tick , self . POLL_TIMEOUT ) )
except KeyboardInterrupt as exc :
self . LOG . info ( "Termination request received (%s)" % exc )
break
except SystemExit as exc :
self . return_code = exc . code or 0
self . LOG . info ( "System exit (RC=%r)" % self . return_code )
break
else : # Idle work
# self . LOG . warn ( " IDLE % s % r " % ( self . options . guard _ file , os . path . exists ( self . options . guard _ file ) ) )
if self . options . guard_file and not os . path . exists ( self . options . guard_file ) :
self . LOG . warn ( "Guard file '%s' disappeared, exiting!" % self . options . guard_file )
break
|
def cummax ( self , axis = 0 , ** kwargs ) :
"""Cumulative max for each group ."""
|
if axis != 0 :
return self . apply ( lambda x : np . maximum . accumulate ( x , axis ) )
return self . _cython_transform ( 'cummax' , numeric_only = False )
|
def create ( self , unique_name , domain_suffix = values . unset ) :
"""Create a new EnvironmentInstance
: param unicode unique _ name : The unique _ name
: param unicode domain _ suffix : The domain _ suffix
: returns : Newly created EnvironmentInstance
: rtype : twilio . rest . serverless . v1 . service . environment . EnvironmentInstance"""
|
data = values . of ( { 'UniqueName' : unique_name , 'DomainSuffix' : domain_suffix , } )
payload = self . _version . create ( 'POST' , self . _uri , data = data , )
return EnvironmentInstance ( self . _version , payload , service_sid = self . _solution [ 'service_sid' ] , )
|
def query_nvidia_smi ( device_ids : List [ int ] , result_queue : multiprocessing . Queue ) -> None :
"""Runs nvidia - smi to determine the memory usage .
: param device _ ids : A list of devices for which the the memory usage will be queried .
: param result _ queue : The queue to which the result dictionary of device id mapping to a tuple of
( memory used , memory total ) is added ."""
|
device_id_strs = [ str ( device_id ) for device_id in device_ids ]
query = "--query-gpu=index,memory.used,memory.total"
format_arg = "--format=csv,noheader,nounits"
try :
sp = subprocess . Popen ( [ 'nvidia-smi' , query , format_arg , "-i" , "," . join ( device_id_strs ) ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
result = sp . communicate ( ) [ 0 ] . decode ( "utf-8" ) . rstrip ( ) . split ( "\n" )
except OSError :
logger . exception ( "Failed calling nvidia-smi to query memory usage." )
result_queue . put ( { } )
return
try :
memory_data = { }
for line in result :
gpu_id , mem_used , mem_total = line . split ( "," )
memory_data [ int ( gpu_id ) ] = ( int ( mem_used ) , int ( mem_total ) )
result_queue . put ( memory_data )
except :
logger . exception ( "Failed parsing nvidia-smi output %s" , "\n" . join ( result ) )
result_queue . put ( { } )
|
def list ( ) :
"""Use this function to display all of the stored URLs .
This command is used for displaying all of the URLs and their names from
the stored list ."""
|
for name , url in get_all_data ( ) . items ( ) :
echo ( '{}: {}' . format ( style ( name , fg = 'blue' ) , style ( url , fg = 'green' ) ) )
|
def get_observation ( observation_id : int ) -> Dict [ str , Any ] :
"""Get details about an observation .
: param observation _ id :
: returns : a dict with details on the observation
: raises : ObservationNotFound"""
|
r = get_observations ( params = { 'id' : observation_id } )
if r [ 'results' ] :
return r [ 'results' ] [ 0 ]
raise ObservationNotFound ( )
|
def get_param_type_indexes ( self , data , name = None , prev = None ) :
"""Get from a docstring a parameter type indexes .
In javadoc style it is after @ type .
: param data : string to parse
: param name : the name of the parameter ( Default value = None )
: param prev : index after the previous element ( param or param ' s description ) ( Default value = None )
: returns : start and end indexes of found element else ( - 1 , - 1)
Note : the end index is the index after the last included character or - 1 if
reached the end
: rtype : tuple"""
|
start , end = - 1 , - 1
stl_type = self . opt [ 'type' ] [ self . style [ 'in' ] ] [ 'name' ]
if not prev :
_ , prev = self . get_param_description_indexes ( data )
if prev >= 0 :
if self . style [ 'in' ] in self . tagstyles + [ 'unknown' ] :
idx = self . get_elem_index ( data [ prev : ] )
if idx >= 0 and data [ prev + idx : ] . startswith ( stl_type ) :
idx = prev + idx + len ( stl_type )
m = re . match ( r'\W*(\w+)\W+(\w+)\W*' , data [ idx : ] . strip ( ) )
if m :
param = m . group ( 1 ) . strip ( )
if ( name and param == name ) or not name :
desc = m . group ( 2 )
start = data [ idx : ] . find ( desc ) + idx
end = self . get_elem_index ( data [ start : ] )
if end >= 0 :
end += start
if self . style [ 'in' ] in [ 'params' , 'unknown' ] and ( start , end ) == ( - 1 , - 1 ) : # TODO : manage this
pass
return ( start , end )
|
def run ( self ) :
"""Executes the experiment ."""
|
logger . info ( "Initializing..." )
javabridge . call ( self . jobject , "initialize" , "()V" )
logger . info ( "Running..." )
javabridge . call ( self . jobject , "runExperiment" , "()V" )
logger . info ( "Finished..." )
javabridge . call ( self . jobject , "postProcess" , "()V" )
|
def request ( self ) :
"""Returns a callable and an iterable respectively . Those can be used to
both transmit a message and / or iterate over incoming messages ,
that were replied by a reply socket . Note that the iterable returns
as many parts as sent by repliers . Also , the sender function has a
` ` print ` ` like signature , with an infinite number of arguments . Each one
being a part of the complete message .
: rtype : ( function , generator )"""
|
sock = self . __sock ( zmq . REQ )
return self . __send_function ( sock ) , self . __recv_generator ( sock )
|
def revoke_sudo_privileges ( request ) :
"""Revoke sudo privileges from a request explicitly"""
|
request . _sudo = False
if COOKIE_NAME in request . session :
del request . session [ COOKIE_NAME ]
|
def simxGetUISlider ( clientID , uiHandle , uiButtonID , operationMode ) :
'''Please have a look at the function description / documentation in the V - REP user manual'''
|
position = ct . c_int ( )
return c_GetUISlider ( clientID , uiHandle , uiButtonID , ct . byref ( position ) , operationMode ) , position . value
|
def write ( self , path = None , * args , ** kwargs ) :
"""Perform formatting and write the formatted string to a file or stdout .
Optional arguments can be used to format the editor ' s contents . If no
file path is given , prints to standard output .
Args :
path ( str ) : Full file path ( default None , prints to stdout )
* args : Positional arguments to format the editor with
* * kwargs : Keyword arguments to format the editor with"""
|
if path is None :
print ( self . format ( * args , ** kwargs ) )
else :
with io . open ( path , 'w' , newline = "" ) as f :
f . write ( self . format ( * args , ** kwargs ) )
|
def color_map_data ( self , data : numpy . ndarray ) -> None :
"""Set the data and mark the canvas item for updating .
Data should be an ndarray of shape ( 256 , 3 ) with type uint8"""
|
self . __color_map_data = data
self . update ( )
|
def aa3_to_aa1 ( seq ) :
"""convert string of 3 - letter amino acids to 1 - letter amino acids
> > > aa3 _ to _ aa1 ( " CysAlaThrSerAlaArgGluLeuAlaMetGlu " )
' CATSARELAME '
> > > aa3 _ to _ aa1 ( None )"""
|
if seq is None :
return None
return "" . join ( aa3_to_aa1_lut [ aa3 ] for aa3 in [ seq [ i : i + 3 ] for i in range ( 0 , len ( seq ) , 3 ) ] )
|
def fill_dataset_tree ( self , tree , data_sets ) :
"""fills the tree with data sets where datasets is a dictionary of the form
Args :
tree :
data _ sets : a dataset
Returns :"""
|
tree . model ( ) . removeRows ( 0 , tree . model ( ) . rowCount ( ) )
for index , ( time , script ) in enumerate ( data_sets . items ( ) ) :
name = script . settings [ 'tag' ]
type = script . name
item_time = QtGui . QStandardItem ( str ( time ) )
item_name = QtGui . QStandardItem ( str ( name ) )
item_type = QtGui . QStandardItem ( str ( type ) )
item_time . setSelectable ( False )
item_time . setEditable ( False )
item_type . setSelectable ( False )
item_type . setEditable ( False )
tree . model ( ) . appendRow ( [ item_time , item_name , item_type ] )
|
def stream_header ( self , f ) :
"""Stream the block header in the standard way to the file - like object f ."""
|
stream_struct ( "L##LLL" , f , self . version , self . previous_block_hash , self . merkle_root , self . timestamp , self . difficulty , self . nonce )
|
def plaintext ( self , result ) :
"""Converts the image object into an ascii representation . Code taken from
http : / / a - eter . blogspot . com / 2010/04 / image - to - ascii - art - in - python . html"""
|
from PIL import Image
ascii_chars = [ '#' , 'A' , '@' , '%' , 'S' , '+' , '<' , '*' , ':' , ',' , '.' ]
def image_to_ascii ( image ) :
image_as_ascii = [ ]
all_pixels = list ( image . getdata ( ) )
for pixel_value in all_pixels :
index = pixel_value / 25
# 0 - 10
image_as_ascii . append ( ascii_chars [ index ] )
return image_as_ascii
img = Image . open ( result )
width , heigth = img . size
new_width = 80
new_heigth = int ( ( heigth * new_width ) / width )
new_image = img . resize ( ( new_width , new_heigth ) )
new_image = new_image . convert ( "L" )
# convert to grayscale
# now that we have a grayscale image with some fixed width we have to convert every pixel
# to the appropriate ascii character from " ascii _ chars "
img_as_ascii = image_to_ascii ( new_image )
img_as_ascii = '' . join ( ch for ch in img_as_ascii )
out = [ ]
for c in range ( 0 , len ( img_as_ascii ) , new_width ) :
out . append ( img_as_ascii [ c : c + new_width ] )
return "\n" . join ( out )
|
def removeAxis ( self , axis ) :
"""Removes an axis from this chart either by direct reference or by
name .
: param axis | < projexui . widgets . XChartAxis > | | < str >"""
|
if not isinstance ( axis , XChartAxis ) :
axis = self . axis ( nativestring ( axis ) )
try :
self . _axes . remove ( axis )
except ValueError :
pass
|
def _collapse_root ( master , current , dsn , pc ) :
"""Collapse the root items of the current time series entry
: param dict master : LiPD data ( so far )
: param dict current : Current time series entry
: param str dsn : Dataset name
: param str pc : paleoData or chronData ( mode )
: return dict master :
: return dict current :"""
|
logger_ts . info ( "enter collapse_root" )
_tmp_fund = { }
_tmp_pub = { }
# The tmp lipd data that we ' ll place in master later
_tmp_master = { 'pub' : [ ] , 'geo' : { 'geometry' : { 'coordinates' : [ ] } , 'properties' : { } } , 'funding' : [ ] , 'paleoData' : { } , "chronData" : { } }
# _ raw = _ switch [ pc ]
_c_keys = [ 'meanLat' , 'meanLon' , 'meanElev' ]
_c_vals = [ 0 , 0 , 0 ]
_p_keys = [ 'siteName' , 'pages2kRegion' , "location" , "gcmdLocation" , "" ]
try : # does not have
# paleoData , chronData , mode , tableType , time _ id , depth , depthUnits , age , ageUnits
# does have
# pub , geo , funding , proxy , archiveType , description , investigator ,
# For all keys in the current time series entry
for k , v in current . items ( ) : # Underscore present . Only underscore keys that belong here are funding , geo , and pub
if "_" in k : # FUNDING
if 'funding' in k : # Group funding items in tmp _ funding by number
m = re_fund_valid . match ( k )
try :
_tmp_fund [ m . group ( 1 ) ] [ m . group ( 2 ) ] = v
except Exception :
try : # If the first layer is missing , create it and try again
_tmp_fund [ m . group ( 1 ) ] = { }
_tmp_fund [ m . group ( 1 ) ] [ m . group ( 2 ) ] = v
except Exception : # Still not working . Give up .
pass
# GEO
elif 'geo' in k :
key = k . split ( '_' )
# Coordinates - [ LON , LAT , ELEV ]
if key [ 1 ] in _c_keys :
if key [ 1 ] == 'meanLon' or key [ 1 ] == "longitude" :
_c_vals [ 0 ] = v
elif key [ 1 ] == 'meanLat' or key [ 1 ] == "latitude" :
_c_vals [ 1 ] = v
elif key [ 1 ] == 'meanElev' or key [ 1 ] == "elevation" :
_c_vals [ 2 ] = v
# Properties
else :
_tmp_master [ 'geo' ] [ 'properties' ] [ key [ 1 ] ] = v
# All others
# else :
# _ tmp _ master [ ' geo ' ] [ key [ 1 ] ] = v
# PUBLICATION
elif 'pub' in k : # Group pub items in tmp _ pub by number
m = re_pub_valid . match ( k . lower ( ) )
if m :
number = int ( m . group ( 1 ) ) - 1
# 0 indexed behind the scenes , 1 indexed to user .
key = m . group ( 2 )
# Authors ( " Pu , Y . ; Nace , T . ; etc . . " )
if key == 'author' or key == 'authors' :
try :
_tmp_pub [ number ] [ 'author' ] = _collapse_author ( v )
except KeyError as e : # Dictionary not created yet . Assign one first .
_tmp_pub [ number ] = { }
_tmp_pub [ number ] [ 'author' ] = _collapse_author ( v )
# DOI ID
elif key == 'DOI' :
try :
_tmp_pub [ number ] [ 'identifier' ] = [ { "id" : v , "type" : "doi" , "url" : "http://dx.doi.org/" + str ( v ) } ]
except KeyError : # Dictionary not created yet . Assign one first .
_tmp_pub [ number ] = { }
_tmp_pub [ number ] [ 'identifier' ] = [ { "id" : v , "type" : "doi" , "url" : "http://dx.doi.org/" + str ( v ) } ]
# All others
else :
try :
_tmp_pub [ number ] [ key ] = v
except KeyError : # Dictionary not created yet . Assign one first .
_tmp_pub [ number ] = { }
_tmp_pub [ number ] [ key ] = v
# No underscore in name , we can rule out the other obvious keys we don ' t want
else : # Rule out any timeseries keys that we added , and paleoData / chronData prefixed keys .
if not any ( i in k . lower ( ) or i is k . lower ( ) for i in [ "paleodata" , "chrondata" , "mode" , "tabletype" , "time_id" , "depth" , "depthunits" , "age" , "ageunits" ] ) : # Root item :
_tmp_master [ k ] = v
continue
# Append the compiled data into the master dataset data
for k , v in _tmp_pub . items ( ) :
_tmp_master [ 'pub' ] . append ( v )
for k , v in _tmp_fund . items ( ) :
_tmp_master [ 'funding' ] . append ( v )
# Get rid of elevation coordinate if one was never added .
if _c_vals [ 2 ] == 0 :
del _c_vals [ 2 ]
_tmp_master [ 'geo' ] [ 'geometry' ] [ 'coordinates' ] = _c_vals
# Create entry in object master , and set our new data to it .
master [ dsn ] = _tmp_master
except Exception as e :
logger_ts . error ( "collapse_root: Exception: {}, {}" . format ( dsn , e ) )
logger_ts . info ( "exit collapse_root" )
return master , current
|
def init_with_context ( self , context ) :
"""Please refer to
: meth : ` ~ admin _ tools . menu . items . MenuItem . init _ with _ context `
documentation from : class : ` ~ admin _ tools . menu . items . MenuItem ` class ."""
|
items = self . _visible_models ( context [ 'request' ] )
for model , perms in items :
if not ( perms [ 'change' ] or perms . get ( 'view' , False ) ) :
continue
title = model . _meta . verbose_name_plural
url = self . _get_admin_change_url ( model , context )
item = MenuItem ( title = title , url = url )
self . children . append ( item )
|
def parse_value ( self , text : str ) -> Optional [ bool ] :
"""Parse boolean value .
Args :
text : String representation of the value ."""
|
if text == "true" :
return True
if text == "false" :
return False
|
def __set_revlookup_auth_string ( self , username , password ) :
'''Creates and sets the authentication string for accessing the reverse
lookup servlet . No return , the string is set as an attribute to
the client instance .
: param username : Username .
: param password : Password .'''
|
auth = b2handle . utilhandle . create_authentication_string ( username , password )
self . __revlookup_auth_string = auth
|
def to_representation ( self , value ) :
"""List of object instances - > List of dicts of primitive datatypes ."""
|
return { six . text_type ( key ) : self . child . to_representation ( val ) for key , val in value . items ( ) }
|
def predict_without_uncertainties ( self , mjd , complain = True ) :
"""Predict the object position at a given MJD .
The return value is a tuple ` ` ( ra , dec ) ` ` , in radians , giving the
predicted position of the object at * mjd * . Unlike : meth : ` predict ` , the
astrometric uncertainties are ignored . This function is therefore
deterministic but potentially misleading .
If * complain * is True , print out warnings for incomplete information .
This function relies on the external : mod : ` skyfield ` package ."""
|
import sys
self . verify ( complain = complain )
planets , ts = load_skyfield_data ( )
# might download stuff from the internet
earth = planets [ 'earth' ]
t = ts . tdb ( jd = mjd + 2400000.5 )
# " Best " position . The implementation here is a bit weird to keep
# parallelism with predict ( ) .
args = { 'ra_hours' : self . ra * R2H , 'dec_degrees' : self . dec * R2D , }
if self . pos_epoch is not None :
args [ 'jd_of_position' ] = self . pos_epoch + 2400000.5
if self . promo_ra is not None :
args [ 'ra_mas_per_year' ] = self . promo_ra
args [ 'dec_mas_per_year' ] = self . promo_dec
if self . parallax is not None :
args [ 'parallax_mas' ] = self . parallax
if self . vradial is not None :
args [ 'radial_km_per_s' ] = self . vradial
bestra , bestdec , _ = earth . at ( t ) . observe ( PromoEpochStar ( ** args ) ) . radec ( )
return bestra . radians , bestdec . radians
|
def _retrieve ( self ) :
"""Query Apache Tomcat Server Status Page in XML format and return
the result as an ElementTree object .
@ return : ElementTree object of Status Page XML ."""
|
url = "%s://%s:%d/manager/status" % ( self . _proto , self . _host , self . _port )
params = { }
params [ 'XML' ] = 'true'
response = util . get_url ( url , self . _user , self . _password , params )
tree = ElementTree . XML ( response )
return tree
|
def schedule ( self ) :
"""Initiate distribution of the test collection .
Initiate scheduling of the items across the nodes . If this gets called
again later it behaves the same as calling ` ` . _ reschedule ( ) ` ` on all
nodes so that newly added nodes will start to be used .
If ` ` . collection _ is _ completed ` ` is True , this is called by the hook :
- ` ` DSession . worker _ collectionfinish ` ` ."""
|
assert self . collection_is_completed
# Initial distribution already happened , reschedule on all nodes
if self . collection is not None :
for node in self . nodes :
self . _reschedule ( node )
return
# Check that all nodes collected the same tests
if not self . _check_nodes_have_same_collection ( ) :
self . log ( "**Different tests collected, aborting run**" )
return
# Collections are identical , create the final list of items
self . collection = list ( next ( iter ( self . registered_collections . values ( ) ) ) )
if not self . collection :
return
# Determine chunks of work ( scopes )
for nodeid in self . collection :
scope = self . _split_scope ( nodeid )
work_unit = self . workqueue . setdefault ( scope , default = OrderedDict ( ) )
work_unit [ nodeid ] = False
# Avoid having more workers than work
extra_nodes = len ( self . nodes ) - len ( self . workqueue )
if extra_nodes > 0 :
self . log ( "Shuting down {0} nodes" . format ( extra_nodes ) )
for _ in range ( extra_nodes ) :
unused_node , assigned = self . assigned_work . popitem ( last = True )
self . log ( "Shuting down unused node {0}" . format ( unused_node ) )
unused_node . shutdown ( )
# Assign initial workload
for node in self . nodes :
self . _assign_work_unit ( node )
# Ensure nodes start with at least two work units if possible ( # 277)
for node in self . nodes :
self . _reschedule ( node )
# Initial distribution sent all tests , start node shutdown
if not self . workqueue :
for node in self . nodes :
node . shutdown ( )
|
def instance_admin_api ( self ) :
"""Helper for session - related API calls ."""
|
if self . _instance_admin_api is None :
self . _instance_admin_api = InstanceAdminClient ( credentials = self . credentials , client_info = _CLIENT_INFO )
return self . _instance_admin_api
|
def decompress_messages ( self , partitions_offmsgs ) :
"""Decompress pre - defined compressed fields for each message ."""
|
for pomsg in partitions_offmsgs :
if pomsg [ 'message' ] :
pomsg [ 'message' ] = self . decompress_fun ( pomsg [ 'message' ] )
yield pomsg
|
def proxy_fetch ( self , env , url ) :
"""Proxy mode only endpoint that handles OPTIONS requests and COR fetches for Preservation Worker .
Due to normal cross - origin browser restrictions in proxy mode , auto fetch worker cannot access the CSS rules
of cross - origin style sheets and must re - fetch them in a manner that is CORS safe . This endpoint facilitates
that by fetching the stylesheets for the auto fetch worker and then responds with its contents
: param dict env : The WSGI environment dictionary
: param str url : The URL of the resource to be fetched
: return : WbResponse that is either response to an Options request or the results of fetching url
: rtype : WbResponse"""
|
if not self . is_proxy_enabled ( env ) : # we are not in proxy mode so just respond with forbidden
return WbResponse . text_response ( 'proxy mode must be enabled to use this endpoint' , status = '403 Forbidden' )
if env . get ( 'REQUEST_METHOD' ) == 'OPTIONS' :
return WbResponse . options_response ( env )
# ensure full URL
request_url = env [ 'REQUEST_URI' ]
# replace with / id _ so we do not get rewritten
url = request_url . replace ( '/proxy-fetch' , '/id_' )
# update WSGI environment object
env [ 'REQUEST_URI' ] = self . proxy_coll + url
env [ 'PATH_INFO' ] = env [ 'PATH_INFO' ] . replace ( '/proxy-fetch' , self . proxy_coll + '/id_' )
# make request using normal serve _ content
response = self . serve_content ( env , self . proxy_coll , url )
# for WR
if isinstance ( response , WbResponse ) :
response . add_access_control_headers ( env = env )
return response
|
def unique ( ar ) :
r"""Find the unique elements of an array .
It uses ` ` dask . array . unique ` ` if necessary .
Args :
ar ( array _ like ) : Input array .
Returns :
array _ like : the sorted unique elements ."""
|
import dask . array as da
if isinstance ( ar , da . core . Array ) :
return da . unique ( ar )
return _unique ( ar )
|
def performance_view ( dstore ) :
"""Returns the performance view as a numpy array ."""
|
data = sorted ( dstore [ 'performance_data' ] , key = operator . itemgetter ( 0 ) )
out = [ ]
for operation , group in itertools . groupby ( data , operator . itemgetter ( 0 ) ) :
counts = 0
time = 0
mem = 0
for _operation , time_sec , memory_mb , counts_ in group :
counts += counts_
time += time_sec
mem = max ( mem , memory_mb )
out . append ( ( operation , time , mem , counts ) )
out . sort ( key = operator . itemgetter ( 1 ) , reverse = True )
# sort by time
return numpy . array ( out , perf_dt )
|
def contributors ( self , sr , limit = None ) :
"""Login required . GETs list of contributors to subreddit ` ` sr ` ` . Returns : class : ` things . ListBlob ` object .
* * NOTE * * : The : class : ` things . Account ` objects in the returned ListBlob * only * have ` ` id ` ` and ` ` name ` ` set . This is because that ' s all reddit returns . If you need full info on each contributor , you must individually GET them using : meth : ` user ` or : meth : ` things . Account . about ` .
URL : ` ` http : / / www . reddit . com / r / < sr > / about / contributors / ` `
: param sr : name of subreddit"""
|
userlist = self . _limit_get ( 'r' , sr , 'about' , 'contributors' , limit = limit )
return _process_userlist ( userlist )
|
def _define_array_view ( data_type ) :
"""Define a new view object for a ` Array ` type ."""
|
element_type = data_type . element_type
element_view = _resolve_view ( element_type )
if element_view is None :
mixins = ( _DirectArrayViewMixin , )
attributes = _get_mixin_attributes ( mixins )
elif isinstance ( element_type , _ATOMIC ) :
mixins = ( _IndirectAtomicArrayViewMixin , )
attributes = _get_mixin_attributes ( mixins )
attributes . update ( { '_element_view' : element_view , } )
else :
mixins = ( _IndirectCompositeArrayViewMixin , )
attributes = _get_mixin_attributes ( mixins )
attributes . update ( { '_element_view' : element_view , } )
name = data_type . name if data_type . name else 'ArrayView'
return type ( name , ( ) , attributes )
|
def from_string ( cls , s ) :
"""Instantiate Relations from a relations string ."""
|
tables = [ ]
seen = set ( )
current_table = None
lines = list ( reversed ( s . splitlines ( ) ) )
# to pop ( ) in right order
while lines :
line = lines . pop ( ) . strip ( )
table_m = re . match ( r'^(?P<table>\w.+):$' , line )
field_m = re . match ( r'\s*(?P<name>\S+)' r'(\s+(?P<attrs>[^#]+))?' r'(\s*#\s*(?P<comment>.*)$)?' , line )
if table_m is not None :
table_name = table_m . group ( 'table' )
if table_name in seen :
raise ItsdbError ( 'Table {} already defined.' . format ( table_name ) )
current_table = ( table_name , [ ] )
tables . append ( current_table )
seen . add ( table_name )
elif field_m is not None and current_table is not None :
name = field_m . group ( 'name' )
attrs = field_m . group ( 'attrs' ) . split ( )
datatype = attrs . pop ( 0 )
key = ':key' in attrs
partial = ':partial' in attrs
comment = field_m . group ( 'comment' )
current_table [ 1 ] . append ( Field ( name , datatype , key , partial , comment ) )
elif line != '' :
raise ItsdbError ( 'Invalid line: ' + line )
return cls ( tables )
|
def create_response ( self , request , content , content_type ) :
"""Returns a response object for the request . Can be overridden to return different responses ."""
|
return HttpResponse ( content = content , content_type = content_type )
|
def _status_query ( query , hostname , enumerate = None , service = None ) :
'''Send query along to Nagios .'''
|
config = _config ( )
data = None
params = { 'hostname' : hostname , 'query' : query , }
ret = { 'result' : False }
if enumerate :
params [ 'formatoptions' ] = 'enumerate'
if service :
params [ 'servicedescription' ] = service
if config [ 'username' ] and config [ 'password' ] is not None :
auth = ( config [ 'username' ] , config [ 'password' ] , )
else :
auth = None
try :
result = salt . utils . http . query ( config [ 'url' ] , method = 'GET' , params = params , decode = True , data = data , text = True , status = True , header_dict = { } , auth = auth , backend = 'requests' , opts = __opts__ , )
except ValueError :
ret [ 'error' ] = 'Please ensure Nagios is running.'
ret [ 'result' ] = False
return ret
if result . get ( 'status' , None ) == salt . ext . six . moves . http_client . OK :
try :
ret [ 'json_data' ] = result [ 'dict' ]
ret [ 'result' ] = True
except ValueError :
ret [ 'error' ] = 'Please ensure Nagios is running.'
elif result . get ( 'status' , None ) == salt . ext . six . moves . http_client . UNAUTHORIZED :
ret [ 'error' ] = 'Authentication failed. Please check the configuration.'
elif result . get ( 'status' , None ) == salt . ext . six . moves . http_client . NOT_FOUND :
ret [ 'error' ] = 'URL {0} was not found.' . format ( config [ 'url' ] )
else :
ret [ 'error' ] = 'Results: {0}' . format ( result . text )
return ret
|
def fixed_length ( cls , l , allow_empty = False ) :
"""Create a sedes for text data with exactly ` l ` encoded characters ."""
|
return cls ( l , l , allow_empty = allow_empty )
|
def listmetadataformats ( ** kwargs ) :
"""Create OAI - PMH response for ListMetadataFormats verb ."""
|
cfg = current_app . config
e_tree , e_listmetadataformats = verb ( ** kwargs )
if 'identifier' in kwargs : # test if record exists
OAIIDProvider . get ( pid_value = kwargs [ 'identifier' ] )
for prefix , metadata in cfg . get ( 'OAISERVER_METADATA_FORMATS' , { } ) . items ( ) :
e_metadataformat = SubElement ( e_listmetadataformats , etree . QName ( NS_OAIPMH , 'metadataFormat' ) )
e_metadataprefix = SubElement ( e_metadataformat , etree . QName ( NS_OAIPMH , 'metadataPrefix' ) )
e_metadataprefix . text = prefix
e_schema = SubElement ( e_metadataformat , etree . QName ( NS_OAIPMH , 'schema' ) )
e_schema . text = metadata [ 'schema' ]
e_metadataNamespace = SubElement ( e_metadataformat , etree . QName ( NS_OAIPMH , 'metadataNamespace' ) )
e_metadataNamespace . text = metadata [ 'namespace' ]
return e_tree
|
def cast_to_str ( obj ) :
"""Return a string representation of a Seq or SeqRecord .
Args :
obj ( str , Seq , SeqRecord ) : Biopython Seq or SeqRecord
Returns :
str : String representation of the sequence"""
|
if isinstance ( obj , str ) :
return obj
if isinstance ( obj , Seq ) :
return str ( obj )
if isinstance ( obj , SeqRecord ) :
return str ( obj . seq )
else :
raise ValueError ( 'Must provide a string, Seq, or SeqRecord object.' )
|
def hexraw ( self , lfilter = None ) :
"""Same as nsummary ( ) , except that if a packet has a Raw layer , it will be hexdumped # noqa : E501
lfilter : a truth function that decides whether a packet must be displayed"""
|
# noqa : E501
for i , res in enumerate ( self . res ) :
p = self . _elt2pkt ( res )
if lfilter is not None and not lfilter ( p ) :
continue
print ( "%s %s %s" % ( conf . color_theme . id ( i , fmt = "%04i" ) , p . sprintf ( "%.time%" ) , self . _elt2sum ( res ) ) )
if p . haslayer ( conf . raw_layer ) :
hexdump ( p . getlayer ( conf . raw_layer ) . load )
|
def select_distinct ( self , table , cols = '*' , execute = True ) :
"""Query distinct values from a table ."""
|
return self . select ( table , cols , execute , select_type = 'SELECT DISTINCT' )
|
def create_user ( self , name , password ) :
"""Create user with hashed password ."""
|
hashed_password = self . _password_hasher ( password )
return dict ( name = name , password = hashed_password )
|
def _int_size ( x ) :
"""Return the smallest size int that can store the value"""
|
if - 0x80 <= x <= 0x7F :
return 1
elif - 0x8000 <= x <= 0x7FFF :
return 2
elif - 0x80000000 <= x <= 0x7FFFFFFF :
return 4
elif long ( - 0x8000000000000000 ) <= x <= long ( 0x7FFFFFFFFFFFFFFF ) :
return 8
else :
raise RuntimeError ( "Cannot represent value: " + str ( x ) )
|
def sign ( self , keypair ) :
"""Sign this transaction envelope with a given keypair .
Note that the signature must not already be in this instance ' s list of
signatures .
: param keypair : The keypair to use for signing this transaction
envelope .
: type keypair : : class : ` Keypair < stellar _ base . keypair . Keypair > `
: raises : : exc : ` SignatureExistError
< stellar _ base . utils . SignatureExistError > `"""
|
assert isinstance ( keypair , Keypair )
tx_hash = self . hash_meta ( )
sig = keypair . sign_decorated ( tx_hash )
sig_dict = [ signature . __dict__ for signature in self . signatures ]
if sig . __dict__ in sig_dict :
raise SignatureExistError ( 'The keypair has already signed' )
else :
self . signatures . append ( sig )
|
def process_input ( self , encoded_stream , value ) :
"""Process or drop a graph input .
This method asynchronously queued an item to be processed by the
sensorgraph worker task in _ reset _ vector . It must be called from
inside the emulation loop and returns immediately before the input is
processed ."""
|
if not self . enabled :
return
if isinstance ( encoded_stream , str ) :
stream = DataStream . FromString ( encoded_stream )
encoded_stream = stream . encode ( )
elif isinstance ( encoded_stream , DataStream ) :
stream = encoded_stream
encoded_stream = stream . encode ( )
else :
stream = DataStream . FromEncoded ( encoded_stream )
reading = IOTileReading ( self . get_timestamp ( ) , encoded_stream , value )
self . _inputs . put_nowait ( ( stream , reading ) )
|
from typing import List
def compute_parentheses_depth ( parens_str : str ) -> List [ int ] :
"""Function evaluates a string consisting of multiple groups of nested parentheses separated by spaces .
For each group , the function output is the deepest level of parentheses nesting .
For instance , ( ( ) ( ) ) has maximum two levels of nesting whereas ( ( ( ) ) ) has three .
Example :
> > > compute _ parentheses _ depth ( ' ( ( ) ( ) ) ( ( ( ) ) ) ( ) ( ( ( ) ) ( ) ( ) ) ' )
[2 , 3 , 1 , 3]"""
|
def max_parentheses_depth ( paren_group ) :
current_depth = 0
maximum_depth = 0
for char in paren_group :
if char == '(' :
current_depth += 1
maximum_depth = max ( current_depth , maximum_depth )
else :
current_depth -= 1
return maximum_depth
return [ max_parentheses_depth ( group ) for group in parens_str . split ( ' ' ) if group ]
|
def safe_pow ( base , exp ) :
"""safe version of pow"""
|
if exp > MAX_EXPONENT :
raise RuntimeError ( "Invalid exponent, max exponent is {}" . format ( MAX_EXPONENT ) )
return base ** exp
|
def restrict_input_to_index ( df_or_dict , column_id , index ) :
"""Restrict df _ or _ dict to those ids contained in index .
: param df _ or _ dict : a pandas DataFrame or a dictionary .
: type df _ or _ dict : pandas . DataFrame or dict
: param column _ id : it must be present in the pandas DataFrame or in all DataFrames in the dictionary .
It is not allowed to have NaN values in this column .
: type column _ id : basestring
: param index : Index containing the ids
: type index : Iterable or pandas . Series
: return df _ or _ dict _ restricted : the restricted df _ or _ dict
: rtype df _ or _ dict _ restricted : dict or pandas . DataFrame
: raise : ` ` TypeError ` ` if df _ or _ dict is not of type dict or pandas . DataFrame"""
|
if isinstance ( df_or_dict , pd . DataFrame ) :
df_or_dict_restricted = df_or_dict [ df_or_dict [ column_id ] . isin ( index ) ]
elif isinstance ( df_or_dict , dict ) :
df_or_dict_restricted = { kind : df [ df [ column_id ] . isin ( index ) ] for kind , df in df_or_dict . items ( ) }
else :
raise TypeError ( "df_or_dict should be of type dict or pandas.DataFrame" )
return df_or_dict_restricted
|
def any_path_path ( cls , project , database , document , any_path ) :
"""Return a fully - qualified any _ path string ."""
|
return google . api_core . path_template . expand ( "projects/{project}/databases/{database}/documents/{document}/{any_path=**}" , project = project , database = database , document = document , any_path = any_path , )
|
def _run_rtg_eval ( vrn_file , rm_file , rm_interval_file , base_dir , data , validate_method ) :
"""Run evaluation of a caller against the truth set using rtg vcfeval ."""
|
out_dir = os . path . join ( base_dir , "rtg" )
if not utils . file_exists ( os . path . join ( out_dir , "done" ) ) :
if os . path . exists ( out_dir ) :
shutil . rmtree ( out_dir )
vrn_file , rm_file , interval_bed = _prepare_inputs ( vrn_file , rm_file , rm_interval_file , base_dir , data )
rtg_ref = tz . get_in ( [ "reference" , "rtg" ] , data )
if isinstance ( rtg_ref , dict ) and "base" in rtg_ref :
rtg_ref = os . path . dirname ( rtg_ref [ "base" ] )
assert rtg_ref and os . path . exists ( rtg_ref ) , ( "Did not find rtg indexed reference file for validation:\n%s\n" "Run bcbio_nextgen.py upgrade --data --aligners rtg" % rtg_ref )
# handle CWL where we have a reference to a single file in the RTG directory
if os . path . isfile ( rtg_ref ) :
rtg_ref = os . path . dirname ( rtg_ref )
# get core and memory usage from standard configuration
threads = min ( dd . get_num_cores ( data ) , 6 )
resources = config_utils . get_resources ( "rtg" , data [ "config" ] )
memory = config_utils . adjust_opts ( resources . get ( "jvm_opts" , [ "-Xms500m" , "-Xmx1500m" ] ) , { "algorithm" : { "memory_adjust" : { "magnitude" : threads , "direction" : "increase" } } } )
jvm_stack = [ x for x in memory if x . startswith ( "-Xms" ) ]
jvm_mem = [ x for x in memory if x . startswith ( "-Xmx" ) ]
jvm_stack = jvm_stack [ 0 ] if len ( jvm_stack ) > 0 else "-Xms500m"
jvm_mem = jvm_mem [ 0 ] . replace ( "-Xmx" , "" ) if len ( jvm_mem ) > 0 else "3g"
cmd = [ "rtg" , "vcfeval" , "--threads" , str ( threads ) , "-b" , rm_file , "--bed-regions" , interval_bed , "-c" , vrn_file , "-t" , rtg_ref , "-o" , out_dir ]
if validate_method == "rtg-squash-ploidy" :
cmd += [ "--squash-ploidy" ]
rm_samples = vcfutils . get_samples ( rm_file )
if len ( rm_samples ) > 1 and dd . get_sample_name ( data ) in rm_samples :
cmd += [ "--sample=%s" % dd . get_sample_name ( data ) ]
cmd += [ "--vcf-score-field='%s'" % ( _pick_best_quality_score ( vrn_file ) ) ]
mem_export = "%s export RTG_JAVA_OPTS='%s' && export RTG_MEM=%s" % ( utils . local_path_export ( ) , jvm_stack , jvm_mem )
cmd = mem_export + " && " + " " . join ( cmd )
do . run ( cmd , "Validate calls using rtg vcfeval" , data )
out = { "fp" : os . path . join ( out_dir , "fp.vcf.gz" ) , "fn" : os . path . join ( out_dir , "fn.vcf.gz" ) }
tp_calls = os . path . join ( out_dir , "tp.vcf.gz" )
tp_baseline = os . path . join ( out_dir , "tp-baseline.vcf.gz" )
if os . path . exists ( tp_baseline ) :
out [ "tp" ] = tp_baseline
out [ "tp-calls" ] = tp_calls
else :
out [ "tp" ] = tp_calls
return out
|
def map_drawn_samples ( selected_pairs_by_state , trajectories , top = None ) :
"""Lookup trajectory frames using pairs of ( trajectory , frame ) indices .
Parameters
selected _ pairs _ by _ state : array , dtype = int , shape = ( n _ states , n _ samples , 2)
selected _ pairs _ by _ state [ state , sample ] gives the ( trajectory , frame )
index associated with a particular sample from that state .
trajectories : list ( md . Trajectory ) or list ( np . ndarray ) or list ( filenames )
The trajectories assocated with sequences ,
which will be used to extract coordinates of the state centers
from the raw trajectory data . This can also be a list of np . ndarray
objects or filenames . If they are filenames , mdtraj will be used to
load them
top : md . Topology , optional , default = None
Use this topology object to help mdtraj load filenames
Returns
frames _ by _ state : mdtraj . Trajectory
Output will be a list of trajectories such that frames _ by _ state [ state ]
is a trajectory drawn from ` state ` of length ` n _ samples ` . If
trajectories are numpy arrays , the output will be numpy arrays instead
of md . Trajectories
Examples
> > > selected _ pairs _ by _ state = hmm . draw _ samples ( sequences , 3)
> > > samples = map _ drawn _ samples ( selected _ pairs _ by _ state , trajectories )
Notes
YOU are responsible for ensuring that selected _ pairs _ by _ state and
trajectories correspond to the same dataset !
See Also
ghmm . GaussianHMM . draw _ samples : Draw samples from GHMM
ghmm . GaussianHMM . draw _ centroids : Draw centroids from GHMM"""
|
frames_by_state = [ ]
for state , pairs in enumerate ( selected_pairs_by_state ) :
if isinstance ( trajectories [ 0 ] , str ) :
if top :
process = lambda x , frame : md . load_frame ( x , frame , top = top )
else :
process = lambda x , frame : md . load_frame ( x , frame )
else :
process = lambda x , frame : x [ frame ]
frames = [ process ( trajectories [ trj ] , frame ) for trj , frame in pairs ]
try : # If frames are mdtraj Trajectories
# Get an empty trajectory with correct shape and call the join
# method on it to merge trajectories
state_trj = frames [ 0 ] [ 0 : 0 ] . join ( frames )
except AttributeError :
state_trj = np . array ( frames )
# Just a bunch of np arrays
frames_by_state . append ( state_trj )
return frames_by_state
|
def run_supernova ( ctx , executable , debug , quiet , environment , command , conf , echo , dashboard ) :
"""You can use supernova with many OpenStack clients and avoid the pain of
managing multiple sets of environment variables . Getting started is easy
and there ' s some documentation that can help :
http : / / supernova . readthedocs . org /
The first step is to get your environment variables packed into a
configuration file , usually in ~ / . supernova . The docs ( linked above ) have
some good examples that you can fill in via copy / paste .
Once you have a configuration ready to go , replace ' prod ' below with one
of your configured environments and try some of these commands :
supernova prod list ( Lists instances via novaclient )
supernova prod image - list ( Lists images via novaclient )
supernova prod boot . . . ( Boots an instance via novaclient )
Have questions , bugs , or comments ? Head on over to Github and open an
issue or submit a pull request !
https : / / github . com / major / supernova"""
|
# Retrieve our credentials from the configuration file
try :
nova_creds = config . run_config ( config_file_override = conf )
except Exception as e :
msg = ( "\n There's an error in your configuration file:\n\n" " {0}\n" ) . format ( e )
click . echo ( msg )
ctx . exit ( 1 )
# Warn the user if there are potentially conflicting environment variables
# already set in the user ' s environment .
utils . check_environment_presets ( )
# Is our environment argument a single environment or a supernova group ?
if utils . is_valid_group ( environment , nova_creds ) :
envs = utils . get_envs_in_group ( environment , nova_creds )
elif ',' in environment :
envs = [ ]
for env in environment . split ( ',' ) :
if utils . is_valid_group ( env , nova_creds ) :
envs . extend ( utils . get_envs_in_group ( env , nova_creds ) )
else :
envs . append ( env )
elif environment . startswith ( '/' ) and environment . endswith ( '/' ) :
envs = [ nova_env for nova_env in nova_creds . keys ( ) if re . search ( environment [ 1 : - 1 ] , nova_env ) ]
else :
envs = [ environment ]
# These are arguments for supernova and not the executable that supernova
# will eventually call .
supernova_args = { 'debug' : debug , 'executable' : executable , 'quiet' : quiet , 'echo' : echo , 'dashboard' : dashboard , }
# If the user specified a single environment , we need to verify that the
# environment actually exists in their configuration file .
if len ( envs ) == 1 and not utils . is_valid_environment ( envs [ 0 ] , nova_creds ) :
msg = ( "\nCouldn't find an environment called '{0}' in your " "configuration file.\nTry supernova --list to see all " "configured environments.\n" . format ( envs [ 0 ] ) )
click . echo ( msg )
ctx . exit ( 1 )
if supernova_args [ 'echo' ] :
if len ( envs ) > 1 :
msg = ( "\nCan't echo a group of environments.\nSpecify a single " "environment when using --echo." )
click . echo ( msg )
ctx . exit ( 1 )
env = credentials . prep_shell_environment ( envs [ 0 ] , nova_creds )
for k in env :
click . echo ( '{0}={1}' . format ( k , env [ k ] ) )
ctx . exit ( 0 )
if supernova_args [ 'dashboard' ] :
if len ( envs ) > 1 :
msg = ( "\nCan't open dashboard for a group of environments.\n" "Specify a single environment when using --dashboard." )
click . echo ( msg )
ctx . exit ( 1 )
url = nova_creds [ envs [ 0 ] ] . get ( 'SUPERNOVA_DASHBOARD_URL' )
if url is None :
msg = ( "\nNo SUPERNOVA_DASHBOARD_URL specified " "for environment: %s" % envs [ 0 ] )
click . echo ( msg )
ctx . exit ( 1 )
webbrowser . open ( url )
ctx . exit ( 0 )
if len ( command ) == 0 :
msg = ( "\nMissing arguments to pass to executable Run supernova " "--help for examples.\n" . format ( envs [ 0 ] ) )
click . echo ( msg )
ctx . exit ( 1 )
nova_args = list ( command )
# Loop through the single environment ( if the user specified one ) or all
# of the environments in a supernova group ( if the user specified a group ) .
for env in envs :
supernova_args [ 'nova_env' ] = env
returncode = supernova . run_command ( nova_creds , nova_args , supernova_args )
# NOTE ( major ) : The return code here is the one that comes back from the
# OS _ EXECUTABLE that supernova runs ( by default , ' nova ' ) . When using
# supernova groups , the return code is the one returned by the executable
# for the last environment in the group .
# It ' s not ideal , but it ' s all I can think of for now . ; )
sys . exit ( returncode )
|
def rel_humid_from_db_dpt ( db_temp , dew_pt ) :
"""Relative Humidity ( % ) at db _ temp ( C ) , and dew _ pt ( C ) ."""
|
pws_ta = saturated_vapor_pressure ( db_temp + 273.15 )
pws_td = saturated_vapor_pressure ( dew_pt + 273.15 )
rh = 100 * ( pws_td / pws_ta )
return rh
|
def filesFromHere_explore ( self , astr_startPath = '/' ) :
"""Return a list of path / files from " here " in the stree , using
the child explore access .
: param astr _ startPath : path from which to start
: return :"""
|
self . l_fwd = [ ]
self . treeExplore ( startPath = astr_startPath , f = self . fwd )
self . l_allFiles = [ f . split ( '/' ) for f in self . l_fwd ]
for i in range ( 0 , len ( self . l_allFiles ) ) :
self . l_allFiles [ i ] [ 0 ] = '/'
return self . l_fwd
|
def _map_channel_row_to_dict ( self , row ) :
"""Convert dictionary keys from raw csv format ( see CHANNEL _ INFO _ HEADER ) ,
to ricecooker - like keys , e . g . , ' ' Source ID ' - - > ' source _ id '"""
|
channel_cleaned = _clean_dict ( row )
channel_dict = dict ( title = channel_cleaned [ CHANNEL_TITLE_KEY ] , description = channel_cleaned [ CHANNEL_DESCRIPTION_KEY ] , source_domain = channel_cleaned [ CHANNEL_DOMAIN_KEY ] , source_id = channel_cleaned [ CHANNEL_SOURCEID_KEY ] , language = channel_cleaned [ CHANNEL_LANGUAGE_KEY ] , thumbnail_chan_path = channel_cleaned [ CHANNEL_THUMBNAIL_KEY ] )
return channel_dict
|
def preprocess_section ( self , section ) :
"""Preprocessors a given section into it ' s components ."""
|
lines = [ ]
in_codeblock = False
keyword = None
components = { }
for line in section . content . split ( '\n' ) :
line = line . strip ( )
if line . startswith ( "```" ) :
in_codeblock = not in_codeblock
if not in_codeblock :
match = re . match ( r':(?:param|parameter)\s+(\w+)\s*:(.*)?$' , line )
if match :
keyword = 'Arguments'
param = match . group ( 1 )
text = match . group ( 2 )
text = text . strip ( )
component = components . get ( keyword , [ ] )
component . append ( '- `{}`: {}' . format ( param , text ) )
components [ keyword ] = component
continue
match = re . match ( r':(?:return|returns)\s*:(.*)?$' , line )
if match :
keyword = 'Returns'
text = match . group ( 1 )
text = text . strip ( )
component = components . get ( keyword , [ ] )
component . append ( text )
components [ keyword ] = component
continue
match = re . match ( ':(?:raises|raise)\s+(\w+)\s*:(.*)?$' , line )
if match :
keyword = 'Raises'
exception = match . group ( 1 )
text = match . group ( 2 )
text = text . strip ( )
component = components . get ( keyword , [ ] )
component . append ( '- `{}`: {}' . format ( exception , text ) )
components [ keyword ] = component
continue
if keyword is not None :
components [ keyword ] . append ( line )
else :
lines . append ( line )
for key in components :
self . _append_section ( lines , key , components )
section . content = '\n' . join ( lines )
|
def _process_model_dict ( self , d ) :
"""Remove redundant items from a model ' s configuration dict .
Parameters
d : dict
Modified in place .
Returns
dict
Modified ` d ` ."""
|
del d [ 'model_type' ]
del d [ 'fit_filters' ]
del d [ 'predict_filters' ]
if d [ 'model_expression' ] == self . default_model_expr :
del d [ 'model_expression' ]
if YTRANSFORM_MAPPING [ d [ 'ytransform' ] ] == self . default_ytransform :
del d [ 'ytransform' ]
d [ "name" ] = yamlio . to_scalar_safe ( d [ "name" ] )
return d
|
def get_version_string ( check_name ) :
"""Get the version string for the given check ."""
|
version = VERSION . search ( read_version_file ( check_name ) )
if version :
return version . group ( 1 )
|
def _check_flood_protection ( self , component , action , clientuuid ) :
"""Checks if any clients have been flooding the node"""
|
if clientuuid not in self . _flood_counter :
self . _flood_counter [ clientuuid ] = 0
self . _flood_counter [ clientuuid ] += 1
if self . _flood_counter [ clientuuid ] > 100 :
packet = { 'component' : 'hfos.ui.clientmanager' , 'action' : 'Flooding' , 'data' : True }
self . fireEvent ( send ( clientuuid , packet ) )
self . log ( 'Flooding from' , clientuuid )
return True
|
def change_vartype ( self , vartype , inplace = True ) :
"""Create a binary quadratic model with the specified vartype .
Args :
vartype ( : class : ` . Vartype ` / str / set , optional ) :
Variable type for the changed model . Accepted input values :
* : class : ` . Vartype . SPIN ` , ` ` ' SPIN ' ` ` , ` ` { - 1 , 1 } ` `
* : class : ` . Vartype . BINARY ` , ` ` ' BINARY ' ` ` , ` ` { 0 , 1 } ` `
inplace ( bool , optional , default = True ) :
If True , the binary quadratic model is updated in - place ; otherwise , a new binary
quadratic model is returned .
Returns :
: class : ` . BinaryQuadraticModel ` . A new binary quadratic model with
vartype matching input ' vartype ' .
Examples :
This example creates an Ising model and then creates a QUBO from it .
> > > import dimod
> > > bqm _ spin = dimod . BinaryQuadraticModel ( { 1 : 1 , 2 : 2 } , { ( 1 , 2 ) : 0.5 } , 0.5 , dimod . SPIN )
> > > bqm _ qubo = bqm _ spin . change _ vartype ( ' BINARY ' , inplace = False )
> > > bqm _ spin . offset , bqm _ spin . vartype
(0.5 , < Vartype . SPIN : frozenset ( { 1 , - 1 } ) > )
> > > bqm _ qubo . offset , bqm _ qubo . vartype
( - 2.0 , < Vartype . BINARY : frozenset ( { 0 , 1 } ) > )"""
|
if not inplace : # create a new model of the appropriate type , then add self ' s biases to it
new_model = BinaryQuadraticModel ( { } , { } , 0.0 , vartype )
new_model . add_variables_from ( self . linear , vartype = self . vartype )
new_model . add_interactions_from ( self . quadratic , vartype = self . vartype )
new_model . add_offset ( self . offset )
return new_model
# in this case we are doing things in - place , if the desired vartype matches self . vartype ,
# then we don ' t need to do anything
if vartype is self . vartype :
return self
if self . vartype is Vartype . SPIN and vartype is Vartype . BINARY :
linear , quadratic , offset = self . spin_to_binary ( self . linear , self . quadratic , self . offset )
elif self . vartype is Vartype . BINARY and vartype is Vartype . SPIN :
linear , quadratic , offset = self . binary_to_spin ( self . linear , self . quadratic , self . offset )
else :
raise RuntimeError ( "something has gone wrong. unknown vartype conversion." )
# drop everything
for v in linear :
self . remove_variable ( v )
self . add_offset ( - self . offset )
self . vartype = vartype
self . add_variables_from ( linear )
self . add_interactions_from ( quadratic )
self . add_offset ( offset )
return self
|
def get_last_user ( self , refresh = False ) :
"""Get the last used PIN user id"""
|
if refresh :
self . refresh_complex_value ( 'sl_UserCode' )
val = self . get_complex_value ( "sl_UserCode" )
# Syntax string : UserID = " < pin _ slot > " UserName = " < pin _ code _ name > "
# See http : / / wiki . micasaverde . com / index . php / Luup _ UPnP _ Variables _ and _ Actions # DoorLock1
try : # Get the UserID = " " and UserName = " " fields separately
raw_userid , raw_username = val . split ( ' ' )
# Get the right hand value without quotes of UserID = " < here > "
userid = raw_userid . split ( '=' ) [ 1 ] . split ( '"' ) [ 1 ]
# Get the right hand value without quotes of UserName = " < here > "
username = raw_username . split ( '=' ) [ 1 ] . split ( '"' ) [ 1 ]
except Exception as ex :
logger . error ( 'Got unsupported user string {}: {}' . format ( val , ex ) )
return None
return ( userid , username )
|
def list_race_details ( self , meeting_ids = None , race_ids = None , session = None , lightweight = None ) :
"""Search for races to get their details .
: param dict meeting _ ids : Optionally restricts the results to the specified meeting IDs .
The unique Id for the meeting equivalent to the eventId for that specific race as
returned by listEvents
: param str race _ ids : Optionally restricts the results to the specified race IDs . The
unique Id for the race in the format meetingid . raceTime ( hhmm ) . raceTime is in GMT
: param requests . session session : Requests session object
: param bool lightweight : If True will return dict not a resource
: rtype : list [ resources . RaceDetail ]"""
|
params = clean_locals ( locals ( ) )
method = '%s%s' % ( self . URI , 'listRaceDetails' )
( response , elapsed_time ) = self . request ( method , params , session )
return self . process_response ( response , resources . RaceDetails , elapsed_time , lightweight )
|
def get_frequency_dict ( lang , wordlist = 'best' , match_cutoff = 30 ) :
"""Get a word frequency list as a dictionary , mapping tokens to
frequencies as floating - point probabilities ."""
|
freqs = { }
pack = get_frequency_list ( lang , wordlist , match_cutoff )
for index , bucket in enumerate ( pack ) :
freq = cB_to_freq ( - index )
for word in bucket :
freqs [ word ] = freq
return freqs
|
def parse_value ( val , parsebool = False ) :
"""Parse input string and return int , float or str depending on format .
@ param val : Input string .
@ param parsebool : If True parse yes / no , on / off as boolean .
@ return : Value of type int , float or str ."""
|
try :
return int ( val )
except ValueError :
pass
try :
return float ( val )
except :
pass
if parsebool :
if re . match ( 'yes|on' , str ( val ) , re . IGNORECASE ) :
return True
elif re . match ( 'no|off' , str ( val ) , re . IGNORECASE ) :
return False
return val
|
def circularize ( self ) :
'''Circularize linear DNA .
: returns : A circularized version of the current sequence .
: rtype : coral . DNA'''
|
if self . top [ - 1 ] . seq == '-' and self . bottom [ 0 ] . seq == '-' :
raise ValueError ( 'Cannot circularize - termini disconnected.' )
if self . bottom [ - 1 ] . seq == '-' and self . top [ 0 ] . seq == '-' :
raise ValueError ( 'Cannot circularize - termini disconnected.' )
copy = self . copy ( )
copy . circular = True
copy . top . circular = True
copy . bottom . circular = True
return copy
|
def map_new ( w : int , h : int ) -> tcod . map . Map :
"""Return a : any : ` tcod . map . Map ` with a width and height .
. . deprecated : : 4.5
Use the : any : ` tcod . map ` module for working with field - of - view ,
or : any : ` tcod . path ` for working with path - finding ."""
|
return tcod . map . Map ( w , h )
|
def step_indices ( group_idx ) :
"""Get the edges of areas within group _ idx , which are filled
with the same value"""
|
ilen = step_count ( group_idx ) + 1
indices = np . empty ( ilen , int )
indices [ 0 ] = 0
indices [ - 1 ] = group_idx . size
inline ( c_step_indices , [ 'group_idx' , 'indices' ] , define_macros = c_macros , extra_compile_args = c_args )
return indices
|
def coerce ( self , value , resource ) :
"""Convert a list of objects in a list of dicts .
Arguments
value : iterable
The list ( or other iterable ) to get values to get some resources from .
resource : dataql . resources . List
The ` ` List ` ` object used to obtain this value from the original one .
Returns
list
A list with one entry for each iteration get from ` ` value ` ` .
If the ` ` resource ` ` has only one sub - resource , each entry in the result list will
be the result for the subresource for each iteration .
If the ` ` resource ` ` has more that one sub - resource , each entry in the result list will
be another list with an entry for each subresource for the current iteration .
Raises
dataql . solvers . exceptions . NotIterable
When the value is not iterable ."""
|
if not isinstance ( value , Iterable ) :
raise NotIterable ( resource , self . registry [ value ] )
# Case # 1 : we only have one sub - resource , so we return a list with this item for
# each iteration
if len ( resource . resources ) == 1 :
res = resource . resources [ 0 ]
return [ self . registry . solve_resource ( v , res ) for v in value ]
# Case # 2 : we have many sub - resources , we return a list with , for each iteration , a
# list with all entries
return [ [ self . registry . solve_resource ( v , res ) for res in resource . resources ] for v in value ]
|
def modify_access ( src , dst = 'any' , port = None , proto = None , action = 'allow' , index = None ) :
"""Grant access to an address or subnet
: param src : address ( e . g . 192.168.1.234 ) or subnet
( e . g . 192.168.1.0/24 ) .
: param dst : destiny of the connection , if the machine has multiple IPs and
connections to only one of those have to accepted this is the
field has to be set .
: param port : destiny port
: param proto : protocol ( tcp or udp )
: param action : ` allow ` or ` delete `
: param index : if different from None the rule is inserted at the given
` index ` ."""
|
if not is_enabled ( ) :
hookenv . log ( 'ufw is disabled, skipping modify_access()' , level = 'WARN' )
return
if action == 'delete' :
cmd = [ 'ufw' , 'delete' , 'allow' ]
elif index is not None :
cmd = [ 'ufw' , 'insert' , str ( index ) , action ]
else :
cmd = [ 'ufw' , action ]
if src is not None :
cmd += [ 'from' , src ]
if dst is not None :
cmd += [ 'to' , dst ]
if port is not None :
cmd += [ 'port' , str ( port ) ]
if proto is not None :
cmd += [ 'proto' , proto ]
hookenv . log ( 'ufw {}: {}' . format ( action , ' ' . join ( cmd ) ) , level = 'DEBUG' )
p = subprocess . Popen ( cmd , stdout = subprocess . PIPE )
( stdout , stderr ) = p . communicate ( )
hookenv . log ( stdout , level = 'INFO' )
if p . returncode != 0 :
hookenv . log ( stderr , level = 'ERROR' )
hookenv . log ( 'Error running: {}, exit code: {}' . format ( ' ' . join ( cmd ) , p . returncode ) , level = 'ERROR' )
|
def serialize_object ( self , obj ) :
"""Write one item to the object stream"""
|
self . start_object ( obj )
for field in obj . _meta . local_fields :
if field . serialize and getattr ( field , 'include_in_xml' , True ) :
if field . rel is None :
if self . selected_fields is None or field . attname in self . selected_fields :
self . handle_field ( obj , field )
else :
if self . selected_fields is None or field . attname [ : - 3 ] in self . selected_fields :
self . handle_fk_field ( obj , field )
# recursively serialize all foreign key relations
for ( foreign_key_descriptor_name , foreign_key_descriptor ) in get_foreign_key_desciptors ( obj ) : # don ' t follow foreign keys that have a ' nofollow ' attribute
if foreign_key_descriptor . related . field . serialize and not hasattr ( foreign_key_descriptor . related . field , 'nofollow' ) :
bound_foreign_key_descriptor = foreign_key_descriptor . __get__ ( obj )
s = RecursiveXmlSerializer ( )
s . serialize ( bound_foreign_key_descriptor . all ( ) , xml = self . xml , stream = self . stream )
# recursively serialize all one to one relations
# TODO : make this work for non abstract inheritance but without infinite recursion
# for ( one _ to _ one _ descriptor _ name , one _ to _ one _ descriptor ) in get _ one _ to _ one _ descriptors ( obj ) :
# related _ objects = [ ]
# try :
# related _ object = one _ to _ one _ descriptor . _ _ get _ _ ( obj )
# related _ objects . append ( related _ object )
# except Exception as e :
# pass
# s = RecursiveXmlSerializer ( )
# s . serialize ( related _ objects , xml = self . xml , stream = self . stream )
# add generic relations
for ( generic_relation_descriptor_name , generic_relation_descriptor ) in get_generic_relation_descriptors ( obj ) : # generic relations always have serialize set to False so we always include them .
bound_generic_relation_descriptor = generic_relation_descriptor . __get__ ( obj )
s = RecursiveXmlSerializer ( )
s . serialize ( bound_generic_relation_descriptor . all ( ) , xml = self . xml , stream = self . stream )
# serialize the default field descriptors :
for ( default_field_descriptor_name , default_field_descriptor ) in get_default_field_descriptors ( obj ) :
if default_field_descriptor . serialize :
self . handle_field ( obj , default_field_descriptor )
for field in obj . _meta . many_to_many :
if field . serialize :
if self . selected_fields is None or field . attname in self . selected_fields :
self . handle_m2m_field ( obj , field )
self . end_object ( obj )
|
def converged_electronic ( self ) :
"""Checks that electronic step convergence has been reached in the final
ionic step"""
|
final_esteps = self . ionic_steps [ - 1 ] [ "electronic_steps" ]
if 'LEPSILON' in self . incar and self . incar [ 'LEPSILON' ] :
i = 1
to_check = set ( [ 'e_wo_entrp' , 'e_fr_energy' , 'e_0_energy' ] )
while set ( final_esteps [ i ] . keys ( ) ) == to_check :
i += 1
return i + 1 != self . parameters [ "NELM" ]
return len ( final_esteps ) < self . parameters [ "NELM" ]
|
def to_dict ( self , include_args = True , include_kwargs = True ) :
"""Converts this object to a dictionary .
: param include _ args : boolean indicating whether to include the
exception args in the output .
: param include _ kwargs : boolean indicating whether to include the
exception kwargs in the output ."""
|
data = { 'exception_str' : self . exception_str , 'traceback_str' : self . traceback_str , 'exc_type_names' : self . exception_type_names , 'exc_args' : self . exception_args if include_args else tuple ( ) , 'exc_kwargs' : self . exception_kwargs if include_kwargs else { } , 'generated_on' : self . generated_on , }
if self . _cause is not None :
data [ 'cause' ] = self . _cause . to_dict ( include_args = include_args , include_kwargs = include_kwargs )
return data
|
def snmp_server_user_priv_password ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
snmp_server = ET . SubElement ( config , "snmp-server" , xmlns = "urn:brocade.com:mgmt:brocade-snmp" )
user = ET . SubElement ( snmp_server , "user" )
username_key = ET . SubElement ( user , "username" )
username_key . text = kwargs . pop ( 'username' )
priv_password = ET . SubElement ( user , "priv-password" )
priv_password . text = kwargs . pop ( 'priv_password' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def delete ( self , resource , ** params ) :
"""Generic TeleSign REST API DELETE handler .
: param resource : The partial resource URI to perform the request against , as a string .
: param params : Body params to perform the DELETE request with , as a dictionary .
: return : The RestClient Response object ."""
|
return self . _execute ( self . session . delete , 'DELETE' , resource , ** params )
|
def _import_sub_module ( module , name ) :
"""import _ sub _ module will mimic the function of importlib . import _ module"""
|
module = __import__ ( module . __name__ + "." + name )
for level in name . split ( "." ) :
module = getattr ( module , level )
return module
|
def _smartos_zone_pkgsrc_data ( ) :
'''SmartOS zone pkgsrc information'''
|
# Provides :
# pkgsrcversion
# pkgsrcpath
grains = { 'pkgsrcversion' : 'Unknown' , 'pkgsrcpath' : 'Unknown' , }
pkgsrcversion = re . compile ( '^release:\\s(.+)' )
if os . path . isfile ( '/etc/pkgsrc_version' ) :
with salt . utils . files . fopen ( '/etc/pkgsrc_version' , 'r' ) as fp_ :
for line in fp_ :
line = salt . utils . stringutils . to_unicode ( line )
match = pkgsrcversion . match ( line )
if match :
grains [ 'pkgsrcversion' ] = match . group ( 1 )
pkgsrcpath = re . compile ( 'PKG_PATH=(.+)' )
if os . path . isfile ( '/opt/local/etc/pkg_install.conf' ) :
with salt . utils . files . fopen ( '/opt/local/etc/pkg_install.conf' , 'r' ) as fp_ :
for line in fp_ :
line = salt . utils . stringutils . to_unicode ( line )
match = pkgsrcpath . match ( line )
if match :
grains [ 'pkgsrcpath' ] = match . group ( 1 )
return grains
|
def create_subscription ( self , subscription ) :
"""Create a new subscription
: param subscription : the new subscription the client wants to create"""
|
if subscription . subscription_id is not None :
self . _validate_uuid ( subscription . subscription_id )
if subscription . endpoint is not None :
if subscription . endpoint . subscriber_id is not None :
self . _validate_subscriber_id ( subscription . endpoint . subscriber_id )
if subscription . endpoint . endpoint_id is not None :
self . _validate_uuid ( subscription . endpoint . endpoint_id )
if subscription . channel is not None :
self . _validate_uuid ( subscription . channel . channel_id )
url = "/notification/v1/subscription"
response = NWS_DAO ( ) . postURL ( url , self . _write_headers ( ) , self . _json_body ( subscription . json_data ( ) ) )
if response . status != 201 :
raise DataFailureException ( url , response . status , response . data )
return response . status
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.